对于一个h264码流,若想要改变I帧间隔,需要解码再编码
ffmpeg 版本4.3
static void openEncoder(int width, int height, AVCodecContext** enc_ctx)
{
//使用libx264编码器
AVCodec * pCodec = avcodec_find_encoder_by_name("libx264");
if(nullptr == pCodec)
{
printf("avcodec_find_encoder_by_name fail.\n");
return;
}
//获取编码器上下文
*enc_ctx = avcodec_alloc_context3(pCodec);
if(nullptr == enc_ctx)
{
printf("avcodec_alloc_context3(pCodec) fail.\n");
return;
}
//sps/pps
(*enc_ctx)->profile = FF_PROFILE_H264_HIGH;
(*enc_ctx)->level = 50;//表示level是5.0
//分辨率
(*enc_ctx)->width = width;
(*enc_ctx)->height = height;
//gop
(*enc_ctx)->gop_size = 20;
(*enc_ctx)->keyint_min = 15;//设置最小自动插入i帧的间隔.OPTION
//B帧
(*enc_ctx)->max_b_frames = 0;//OPTION
(*enc_ctx)->has_b_frames = 0;//OPTION
//参考帧
(*enc_ctx)->refs = 3;//OPTION
//设置输入的yuv格式
(*enc_ctx)->pix_fmt = AV_PIX_FMT_YUV420P;
//设置码率
(*enc_ctx)->bit_rate = 1000000;
//设置帧率
(*enc_ctx)->time_base = (AVRational){1,25};//帧与帧之间的间隔
(*enc_ctx)->framerate = (AVRational){25,1};//帧率 25帧每秒
if(avcodec_open2((*enc_ctx),pCodec,nullptr) < 0)
{
printf("avcodec_open2 fail.\n");
}
return;
}
static AVFrame* creat_frame(int width,int height)
{
AVFrame *frame = av_frame_alloc();
frame->width = width;
frame->height = height;
frame->format = AV_PIX_FMT_YUV420P;
//分配frame中buffer的大小
av_frame_get_buffer(frame, 32);//按32位对齐
return frame;
}
static void encode(AVCodecContext *encCtx,AVFrame *frame,AVPacket *newpkt,FILE *outPutFile)
{
int nRet = 0;
if (frame)
printf("send frame to encoder ,pts=%lld \n",frame->pts);
//送原始数据给编码器进行编码
nRet = avcodec_send_frame(encCtx,frame);
if(nRet < 0)
{
printf("avcodec_send_frame fail.\n");
return;
}
//从编码器获取编号的数据
while(nRet >= 0)
{
nRet = avcodec_receive_packet(encCtx,newpkt);
if(nRet < 0)
{
printf("avcodec_receive_packet fail.\n");
return;
}
fwrite(newpkt->data,1,newpkt->size,outPutFile);
av_packet_unref(newpkt);
}
}
#define YUVFORMAT_YUV420P AV_PIX_FMT_YUV420P
static void test()
{
int nRet = 0;
const char *pInFileName = "D:/videos/gop30.h264";
const char *pOutFileName = "D:/output/gop20.h264";
FILE *fp = fopen(pOutFileName, "wb+");
AVDictionary *pDic = nullptr;
AVFormatContext *pInFmtCtx = nullptr;
nRet = avformat_open_input(&pInFmtCtx,pInFileName,nullptr,&pDic);
if( nRet < 0)
{
printf("Could not open input file.");
return;
}
avformat_find_stream_info(pInFmtCtx, nullptr);
printf("===========Input Information==========\n");
av_dump_format(pInFmtCtx, 0, pInFileName, 0);
printf("======================================\n");
//查找解码器
int vudioStreamIndex = -1;
for(int i = 0; i < pInFmtCtx->nb_streams; ++i)
{
if(AVMEDIA_TYPE_VIDEO == pInFmtCtx->streams[i]->codecpar->codec_type)
{
vudioStreamIndex = i;
break;
}
}
AVStream * in_stream = pInFmtCtx->streams[vudioStreamIndex];
AVCodec *pInCodec = avcodec_find_decoder(in_stream->codecpar->codec_id);
if(nullptr == pInCodec)
{
printf("avcodec_find_decoder fail.");
return;
}
AVCodecContext* pInCodecCtx = avcodec_alloc_context3(pInCodec); //??????????????
nRet = avcodec_parameters_to_context(pInCodecCtx, in_stream->codecpar);
if(nRet < 0)
{
printf("avcodec_parameters_to_context fail.");
return;
}
//打开解码器
if(avcodec_open2(pInCodecCtx, pInCodec, nullptr) < 0)
{
printf("Error: Can't open codec!\n");
return ;
}
printf("width = %d\n", pInCodecCtx->width);
printf("height = %d\n", pInCodecCtx->height);
AVFrame *pFrameYUV = av_frame_alloc();
AVPacket *packet = av_packet_alloc();
av_init_packet(packet);
//计算这个格式的图片,需要多少字节来存储
int bytes_num = av_image_get_buffer_size(YUVFORMAT_YUV420P, pInCodecCtx->width, pInCodecCtx->height,1);
//申请空间来存放图片数据。包含源数据和目标数据
uint8_t* out_buffer = (uint8_t*)av_malloc(bytes_num);
//前面的av_frame_alloc函数,只是为这个AVFrame结构体分配了内存,
//而该类型的指针指向的内存还没分配。这里把av_malloc得到的内存和AVFrame关联起来。
//当然,其还会设置AVFrame的其他成员
avpicture_fill((AVPicture*)pFrameYUV, out_buffer, YUVFORMAT_YUV420P,pInCodecCtx->width, pInCodecCtx->height);
int got_picture = 0;
AVFrame *pFrame = av_frame_alloc();
AVPacket *newpkt = av_packet_alloc();
int64_t pts = 0;
AVCodecContext *encCtx = nullptr;
//打开编码器
openEncoder(1920,1080,&encCtx);
while(av_read_frame(pInFmtCtx, packet) >= 0)
{
if( vudioStreamIndex == packet->stream_index)
{
//avcodec_send_packet送原始数据给编码器进行编码
//avcodec_receive_frame
if(avcodec_send_packet(pInCodecCtx, packet)<0 || (got_picture =avcodec_receive_frame(pInCodecCtx, pFrame))<0)
{
std::cout<<"changeGOP end";
goto __end;
}
if(!got_picture)//
{
pts += 40;
// 设置pts
pFrame->pts = pts;
encode(encCtx, pFrame, newpkt, fp);
}
av_packet_unref(packet);
av_packet_unref(newpkt);
}
}
fflush(fp);
//flush decoder
//当av_read_frame 退出循环的时候,实际上解码器中可能还包含
//剩余的几帧数据。直接调用avcodec_decode_video2获得AVFrame ,
//而不再向解码器传递AVPacket
while(1)
{
if(avcodec_send_packet(pInCodecCtx, packet)<0 || (got_picture =avcodec_receive_frame(pInCodecCtx, pFrame))<0)
{
std::cout<<"changeGOP end";
goto __end;
}
if(!got_picture)//
{
pts += 40;
// 设置pts
pFrame->pts = pts;
encode(encCtx, pFrame, newpkt, fp);
}
av_packet_unref(packet);
av_packet_unref(newpkt);
}
__end:
fflush(fp);
fclose(fp);
av_frame_free(&pFrame);
av_frame_free(&pFrameYUV);
avcodec_close(pInCodecCtx);
avformat_close_input(&pInFmtCtx);
}
void main()
{
test();
std::cout<<"changeGOP end";
}