基于FFMPEG的视频编码器。该编码器实现了YUV420P的像素数据编码为H.264的压缩编码数据,或编码成其他文件也是类似的操作。
主要操作是读取原来的YUV数据压缩到文件中并且写入文件头尾,
存在的两个问题暂时先记录
1、avcodec_receive_packet的时候总是提示EAGAIN错误,而使用旧的avcodec_encode_video2却可以编码
2、ffmpeg自带的exmple中的encode_video.c要如何运行,
相关函数和要注意的地方
第1步、就是得到AVOutputFormat* fmt;输出格式
有两种方法
//Method1.
pFormatCtx = avformat_alloc_context();
//av_guess_format在注册输出格式列表中返回与所提供的参数最匹配的输出格式,如果没有匹配则返回NULL。
//AVOutputFormat *av_guess_format(const char *short_name, const char *filename, const char *mime_type);
//从前往后去匹配 如果为NULL则选择后一个参数
fmt = av_guess_format(NULL, out_file, NULL);//根据文件名去匹配输出格式AVOutputFormat
pFormatCtx->oformat = fmt;
//Method 2.
//为输出格式分配一个AVFormatContext。
//int avformat_alloc_output_context2(AVFormatContext **ctx, AVOutputFormat *oformat,const char *format_name, const char *filename);
//oformat用于分配上下文的格式,如果为NULL则用format_name、filename代替
//format_name用于分配上下文的输出格式的名称,如果为NULL则使用filename文件名代替
//filename用于分配上下文的文件名
avformat_alloc_output_context2(&pFormatCtx, NULL, NULL, out_file);
fmt = pFormatCtx->oformat;
第2步、实例化AVIOContext与输出文件进行绑定
//创建并初始化一个AVIOContext,用于访问url指示的资源。
原型;传入双重指针,填充AVIOContext
int avio_open(AVIOContext **s, const char *url, int flags);
使用
if (avio_open(&pFormatCtx->pb, out_file, AVIO_FLAG_READ_WRITE) < 0) {
printf("Failed to open output file! \n");
return -1;
}
第3步、创建AVStream 视频流
//原型
向媒体文件添加一个新流。
s media file handle,c可以传入特殊指定 否则默认AVFormatContext里面的
AVStream *avformat_new_stream(AVFormatContext *s, const AVCodec *c);
//使用
video_st = avformat_new_stream(pFormatCtx, 0);
第4步、给AVCodecContext解码器上下文属性赋值,并创建打开解码器
AVCodecContext* pCodecCtx;
AVCodec* pCodec;
pCodecCtx = video_st->codec;
//pCodecCtx->codec_id =AV_CODEC_ID_HEVC;
pCodecCtx->codec_id = fmt->video_codec;
pCodecCtx->codec_type = AVMEDIA_TYPE_VIDEO;
pCodecCtx->pix_fmt = AV_PIX_FMT_YUV420P;
pCodecCtx->width = in_w;
pCodecCtx->height = in_h;
pCodecCtx->bit_rate = 400000;
pCodecCtx->gop_size = 250;
pCodecCtx->time_base.num = 1;
pCodecCtx->time_base.den = 25;
//查找具有匹配编解码器ID的已注册编码器。
pCodec = avcodec_find_encoder(pCodecCtx->codec_id);
if (!pCodec) {
printf("Can not find encoder! \n");
return -1;
}
if (avcodec_open2(pCodecCtx, pCodec, ¶m) < 0) {
printf("Failed to open encoder! \n");
return -1;
}
第5步、给读取的帧申请缓存,布置data大小
//存储一帧像素数据缓冲区
pFrame = av_frame_alloc();
picture_size = av_image_get_buffer_size(pCodecCtx->pix_fmt, pCodecCtx->width, pCodecCtx->height, 1);
pFrame_buf = (uint8_t *)av_malloc(picture_size);
av_image_fill_arrays(pFrame->data, pFrame->linesize, pFrame_buf,AV_PIX_FMT_YUV420P, pCodecCtx->width, pCodecCtx->height, 1);
pFrame->width = pCodecCtx->width;
pFrame->height = pCodecCtx->height;
pFrame->format = pCodecCtx->pix_fmt;
第6步、读取写入了
//分配流私有数据并将流头写入输出媒体文件。
avformat_write_header(pFormatCtx, NULL);
for (int i = 0; i<framenum; i++)
{
//从文件中读取YUV数据
if (fread(picture_buf, 1, y_size * 3 / 2, in_file) <= 0) {
printf("Failed to read raw data! \n");
getchar();
return -1;
}
else if (feof(in_file)) {
break;
}
//给AVFrame赋值
pFrame->data[0] = picture_buf; // Y
pFrame->data[1] = picture_buf + y_size; // U
pFrame->data[2] = picture_buf + y_size * 5 / 4; // V y_size+y_size/4个位置的U
pFrame->pts = i*(video_st->time_base.den) / ((video_st->time_base.num) * 25);
int got_picture = 0;
//成功 解码
int ret = avcodec_encode_video2(pCodecCtx, pkt, pFrame, &got_picture);
if (ret < 0) {
printf("Failed to encode! \n");
return -1;
}
if (got_picture == 1) {
printf("Succeed to encode frame: %5d\tsize:%5d\n", framecnt, pkt->size);
framecnt++;
pkt->stream_index = video_st->index;
//写入到文件
ret = av_write_frame(pFormatCtx, pkt);
av_free_packet(pkt);
}
}
//最后还要将缓冲中的写入
//Flush Encoder
ret = flush_encoder(pFormatCtx, 0);
if (ret < 0) {
printf("Flushing encoder failed\n");
getchar();
return -1;
}
//Write file trailer
av_write_trailer(pFormatCtx);
//将avcodec_encode_video2替换成新的API不知道为什么总是失败,avcodec_receive_packet的时候总是提示EAGAIN错误,
/* 失败 avcodec_receive_packet总是返回EAGAIN导致没有包出来
//Encode
ret = avcodec_send_frame(pCodecCtx, pFrame);
if (ret < 0) {
printf("Failed to encode! \n");
getchar();
return -1;
}
while (ret >= 0) {
ret = avcodec_receive_packet(pCodecCtx, pkt);
if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
{
av_free_packet(pkt);
break;
}
else if (ret < 0) {
fprintf(stderr, "Error during encoding\n");
exit(1);
}
if (ret == 1) {
printf("Succeed to encode frame: %5d\tsize:%5d\n", framecnt, pkt->size);
framecnt++;
pkt->stream_index = video_st->index;
ret = av_write_frame(pFormatCtx, pkt);
av_free_packet(pkt);
}
}
*/
实践代码
int flush_encoder(AVFormatContext *fmt_ctx, unsigned int stream_index) {
int ret;
int got_frame;
AVPacket enc_pkt;
if (!(fmt_ctx->streams[stream_index]->codec->codec->capabilities &
CODEC_CAP_DELAY))
return 0;
while (1) {
enc_pkt.data = NULL;
enc_pkt.size = 0;
av_init_packet(&enc_pkt);
ret = avcodec_encode_video2(fmt_ctx->streams[stream_index]->codec, &enc_pkt,
NULL, &got_frame);
av_frame_free(NULL);
if (ret < 0)
break;
if (!got_frame) {
ret = 0;
break;
}
printf("Flush Encoder: Succeed to encode 1 frame!\tsize:%5d\n", enc_pkt.size);
/* mux encoded frame */
ret = av_write_frame(fmt_ctx, &enc_pkt);
if (ret < 0)
break;
}
return ret;
}
int main(int argc, char* argv[])
{
AVFormatContext* pFormatCtx;
AVOutputFormat* fmt;
AVStream* video_st;
AVCodecContext* pCodecCtx;
AVCodec* pCodec;
AVPacket* pkt;
uint8_t* picture_buf;
uint8_t*pFrame_buf;
AVFrame* pFrame;
int picture_size;
int y_size;
int framecnt = 0;
int ret;
//FILE *in_file = fopen("src01_480x272.yuv", "rb"); //Input raw YUV data
FILE *in_file = fopen("output240X128.yuv", "rb"); //Input raw YUV data
int in_w = 240, in_h = 128; //Input data's width and height
int framenum = 100; //Frames to encode
//const char* out_file = "src01.h264"; //Output Filepath
//const char* out_file = "src01.ts";
//const char* out_file = "src01.hevc";
const char* out_file = "ds.h264";
av_register_all();
/*
//Method1.
pFormatCtx = avformat_alloc_context();
//av_guess_format在注册输出格式列表中返回与所提供的参数最匹配的输出格式,如果没有匹配则返回NULL。
fmt = av_guess_format(NULL, out_file, NULL);//根据文件名去匹配输出格式AVOutputFormat
pFormatCtx->oformat = fmt;
*/
//Method 2.
//为输出格式分配一个AVFormatContext。
//int avformat_alloc_output_context2(AVFormatContext **ctx, AVOutputFormat *oformat,const char *format_name, const char *filename);
//oformat用于分配上下文的格式,如果为NULL则用format_name、filename代替
//format_name用于分配上下文的输出格式的名称,如果为NULL则使用filename文件名代替
//filename用于分配上下文的文件名
avformat_alloc_output_context2(&pFormatCtx, NULL, NULL, out_file);
fmt = pFormatCtx->oformat;
//Open output URL
//创建并初始化一个AVIOContext,用于访问url指示的资源。
if (avio_open(&pFormatCtx->pb, out_file, AVIO_FLAG_READ_WRITE) < 0) {
printf("Failed to open output file! \n");
return -1;
}
video_st = avformat_new_stream(pFormatCtx, 0);
//video_st->time_base.num = 1;
//video_st->time_base.den = 25;
if (video_st == NULL) {
return -1;
}
//Param that must set
pCodecCtx = video_st->codec;
//pCodecCtx->codec_id =AV_CODEC_ID_HEVC;
pCodecCtx->codec_id = fmt->video_codec;
pCodecCtx->codec_type = AVMEDIA_TYPE_VIDEO;
pCodecCtx->pix_fmt = AV_PIX_FMT_YUV420P;
pCodecCtx->width = in_w;
pCodecCtx->height = in_h;
pCodecCtx->bit_rate = 400000;
pCodecCtx->gop_size = 250;
pCodecCtx->time_base.num = 1;
pCodecCtx->time_base.den = 25;
//H264
//pCodecCtx->me_range = 16;
//pCodecCtx->max_qdiff = 4;
//pCodecCtx->qcompress = 0.6;
pCodecCtx->qmin = 10;
pCodecCtx->qmax = 51;
//Optional Param
pCodecCtx->max_b_frames = 3;
// Set Option
AVDictionary *param = 0;
//H.264
if (pCodecCtx->codec_id == AV_CODEC_ID_H264) {
av_dict_set(¶m, "preset", "slow", 0);
av_dict_set(¶m, "tune", "zerolatency", 0);
//av_dict_set(param, "profile", "main", 0);
}
//H.265
if (pCodecCtx->codec_id == AV_CODEC_ID_H265) {
av_dict_set(¶m, "preset", "ultrafast", 0);
av_dict_set(¶m, "tune", "zero-latency", 0);
}
//Show some Information
av_dump_format(pFormatCtx, 0, out_file, 1);
//查找具有匹配编解码器ID的已注册编码器。
pCodec = avcodec_find_encoder(pCodecCtx->codec_id);
if (!pCodec) {
printf("Can not find encoder! \n");
return -1;
}
if (avcodec_open2(pCodecCtx, pCodec, ¶m) < 0) {
printf("Failed to open encoder! \n");
return -1;
}
//存储一帧像素数据缓冲区
pFrame = av_frame_alloc();
picture_size = av_image_get_buffer_size(pCodecCtx->pix_fmt, pCodecCtx->width, pCodecCtx->height, 1);
picture_buf = (uint8_t *)av_malloc(picture_size);
pFrame_buf = (uint8_t *)av_malloc(picture_size);
//av_image_fill_arrays((AVPicture *)pFrame, picture_buf, pCodecCtx->pix_fmt, pCodecCtx->width, pCodecCtx->height);//淘汰了
av_image_fill_arrays(pFrame->data, pFrame->linesize, pFrame_buf,AV_PIX_FMT_YUV420P, pCodecCtx->width, pCodecCtx->height, 1);
pFrame->width = pCodecCtx->width;
pFrame->height = pCodecCtx->height;
pFrame->format = pCodecCtx->pix_fmt;
/*ret = av_frame_get_buffer(pFrame, picture_size);
if (ret < 0) {
fprintf(stderr, "Could not allocate the video frame data\n");
exit(1);
}*/
//Write File Header
avformat_write_header(pFormatCtx, NULL);
pkt = av_packet_alloc();
//av_new_packet(&pkt, picture_size);
y_size = pCodecCtx->width * pCodecCtx->height;
for (int i = 0; i<framenum; i++)
{
fflush(stdout);
/*ret = av_frame_make_writable(pFrame);
if (ret < 0) {
printf("Failed to av_frame_make_writable! \n");
getchar();
return -1;
}*/
//Read raw YUV data 一个像素YUV420 4 1 1 4个像素6个字节表示,一个像素1.5个字节
if (fread(picture_buf, 1, y_size * 3 / 2, in_file) <= 0) {
printf("Failed to read raw data! \n");
getchar();
return -1;
}
else if (feof(in_file)) {
break;
}
pFrame->data[0] = picture_buf; // Y
pFrame->data[1] = picture_buf + y_size; // U
pFrame->data[2] = picture_buf + y_size * 5 / 4; // V y_size+y_size/4个位置的U
pFrame->pts = i*(video_st->time_base.den) / ((video_st->time_base.num) * 25);
int got_picture = 0;
//成功
//Encode
int ret = avcodec_encode_video2(pCodecCtx, pkt, pFrame, &got_picture);
if (ret < 0) {
printf("Failed to encode! \n");
return -1;
}
if (got_picture == 1) {
printf("Succeed to encode frame: %5d\tsize:%5d\n", framecnt, pkt->size);
framecnt++;
pkt->stream_index = video_st->index;
ret = av_write_frame(pFormatCtx, pkt);
av_free_packet(pkt);
}
/* 失败 avcodec_receive_packet总是返回EAGAIN导致没有包出来
//Encode
ret = avcodec_send_frame(pCodecCtx, pFrame);
if (ret < 0) {
printf("Failed to encode! \n");
getchar();
return -1;
}
while (ret >= 0) {
ret = avcodec_receive_packet(pCodecCtx, pkt);
if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
{
av_free_packet(pkt);
break;
}
else if (ret < 0) {
fprintf(stderr, "Error during encoding\n");
exit(1);
}
if (ret == 1) {
printf("Succeed to encode frame: %5d\tsize:%5d\n", framecnt, pkt->size);
framecnt++;
pkt->stream_index = video_st->index;
ret = av_write_frame(pFormatCtx, pkt);
av_free_packet(pkt);
}
}
*/
}
//Flush Encoder
ret = flush_encoder(pFormatCtx, 0);
if (ret < 0) {
printf("Flushing encoder failed\n");
getchar();
return -1;
}
//Write file trailer
av_write_trailer(pFormatCtx);
//Clean
if (video_st) {
avcodec_close(video_st->codec);
av_free(pFrame);
av_free(picture_buf);
}
avio_close(pFormatCtx->pb);
avformat_free_context(pFormatCtx);
//fclose(in_file);
getchar();
return 0;
}
ffmpeg-4.0.2\doc\examples\encode_video.c代码
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <libavcodec/avcodec.h>
#include <libavutil/opt.h>
#include <libavutil/imgutils.h>
static void encode(AVCodecContext *enc_ctx, AVFrame *frame, AVPacket *pkt,
FILE *outfile)
{
int ret;
/* send the frame to the encoder */
if (frame)
printf("Send frame %3"PRId64"\n", frame->pts);
ret = avcodec_send_frame(enc_ctx, frame);
if (ret < 0) {
fprintf(stderr, "Error sending a frame for encoding\n");
exit(1);
}
while (ret >= 0) {
ret = avcodec_receive_packet(enc_ctx, pkt);
if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
return;
else if (ret < 0) {
fprintf(stderr, "Error during encoding\n");
exit(1);
}
printf("Write packet %3"PRId64" (size=%5d)\n", pkt->pts, pkt->size);
fwrite(pkt->data, 1, pkt->size, outfile);
av_packet_unref(pkt);
}
}
int main(int argc, char **argv)
{
const char *filename, *codec_name;
const AVCodec *codec;
AVCodecContext *c= NULL;
int i, ret, x, y;
FILE *f;
AVFrame *frame;
AVPacket *pkt;
uint8_t endcode[] = { 0, 0, 1, 0xb7 };
if (argc <= 2) {
fprintf(stderr, "Usage: %s <output file> <codec name>\n", argv[0]);
exit(0);
}
filename = argv[1];
codec_name = argv[2];
/* find the mpeg1video encoder */
codec = avcodec_find_encoder_by_name(codec_name);
if (!codec) {
fprintf(stderr, "Codec '%s' not found\n", codec_name);
exit(1);
}
c = avcodec_alloc_context3(codec);
if (!c) {
fprintf(stderr, "Could not allocate video codec context\n");
exit(1);
}
pkt = av_packet_alloc();
if (!pkt)
exit(1);
/* put sample parameters */
c->bit_rate = 400000;
/* resolution must be a multiple of two */
c->width = 352;
c->height = 288;
/* frames per second */
c->time_base = (AVRational){1, 25};
c->framerate = (AVRational){25, 1};
/* emit one intra frame every ten frames
* check frame pict_type before passing frame
* to encoder, if frame->pict_type is AV_PICTURE_TYPE_I
* then gop_size is ignored and the output of encoder
* will always be I frame irrespective to gop_size
*/
c->gop_size = 10;
c->max_b_frames = 1;
c->pix_fmt = AV_PIX_FMT_YUV420P;
if (codec->id == AV_CODEC_ID_H264)
av_opt_set(c->priv_data, "preset", "slow", 0);
/* open it */
ret = avcodec_open2(c, codec, NULL);
if (ret < 0) {
fprintf(stderr, "Could not open codec: %s\n", av_err2str(ret));
exit(1);
}
f = fopen(filename, "wb");
if (!f) {
fprintf(stderr, "Could not open %s\n", filename);
exit(1);
}
frame = av_frame_alloc();
if (!frame) {
fprintf(stderr, "Could not allocate video frame\n");
exit(1);
}
frame->format = c->pix_fmt;
frame->width = c->width;
frame->height = c->height;
ret = av_frame_get_buffer(frame, 32);
if (ret < 0) {
fprintf(stderr, "Could not allocate the video frame data\n");
exit(1);
}
/* encode 1 second of video */
for (i = 0; i < 25; i++) {
fflush(stdout);
/* make sure the frame data is writable */
ret = av_frame_make_writable(frame);
if (ret < 0)
exit(1);
/* prepare a dummy image */
/* Y */
for (y = 0; y < c->height; y++) {
for (x = 0; x < c->width; x++) {
frame->data[0][y * frame->linesize[0] + x] = x + y + i * 3;
}
}
/* Cb and Cr */
for (y = 0; y < c->height/2; y++) {
for (x = 0; x < c->width/2; x++) {
frame->data[1][y * frame->linesize[1] + x] = 128 + y + i * 2;
frame->data[2][y * frame->linesize[2] + x] = 64 + x + i * 5;
}
}
frame->pts = i;
/* encode the image */
encode(c, frame, pkt, f);
}
/* flush the encoder */
encode(c, NULL, pkt, f);
/* add sequence end code to have a real MPEG file */
fwrite(endcode, 1, sizeof(endcode), f);
fclose(f);
avcodec_free_context(&c);
av_frame_free(&frame);
av_packet_free(&pkt);
return 0;
}