学习ffmpeg有一段时间了,由于身边没有人可以请教,所有基本上都是出了问题就上网查资料。到现在勉强能完成工作任务了。本文旨在进行一下总结,加深理解和方便复习。
项目中使用到的ffmpeg主要包括:
1.上位机拉相机的视频流并显示。(上位机用Qt开发)
2.保存视频流(目前只实现了264的编码,265一直不成功,还未弄懂)。
3.添加时间戳(每秒加一个时间水印,主要是效率问题和字体问题)。
4.回放视频。(实现了一个视频播放器的基本功能)
5.上位机拉相机流,但是解码后需要向图片中加入一些智能识别的信息(目前的项目是加入高空抛物的轨迹),所以需要只使用ffmpeg的解码器,然后将解码的数据用opencv绘制后,在发给Qt客户端显示。
上述工程涉及到很多细节问题,它们能够得到解决,需要感谢雷霄骅和云天之巅的博客,尤其是雷霄骅的博客,帮助我从零开始入门,完成的了基本的任务,大神称号实至名归!
只使用ffmpeg的解码器,具体的解码的流程如下图所示:(总结的是上述的第5点)
ffmpeg函数:
avcodec_find_decoder():查找解码器。
avcodec_alloc_context3():为AVCodecContext分配内存。
avcodec_open2():打开解码器。
av_parser_init():初始化AVCodecParserContext。
av_parser_parse2():解析获得一个Packet。
avcodec_send_packet() + avcodec_receive_frame():解码帧数据(因为ffmpeg的版本更新原因,雷神用的函数是avcodec_decode_video2(),需要进行替换)
sws_scale():格式转换,将解码后的帧数据转成qt的image。
雷霄骅相关博客的网址: https://blog.csdn.net/leixiaohua1020/article/details/42181571
解码部分的关键代码:
整个软件解码的工作流程:帧数据是从网络摄像机利用socket发送过来的带有高空抛物检测信息的视频流,所以收到socket数据后将265的数据分离出来然后传给解码器,解码器解码后将数据在转成qt的图片,并添加抛物信息(如绘制路径等)后显示。
代码:
//初始化函数
bool DecodeThread::initVideoObjects()
{
m_avPacket = av_packet_alloc();
av_init_packet(m_avPacket);
m_avFrameInput = av_frame_alloc();
m_avFramePicture = av_frame_alloc();
m_pCodec = avcodec_find_decoder(m_CodecId);
if (!m_pCodec)
{
CDiagnosis::GetInstance()->printMsg(QString::fromLocal8Bit("Codec not found."));
return false;
}
m_avCodecCtx = avcodec_alloc_context3(m_pCodec);
if (!m_avCodecCtx)
{
CDiagnosis::GetInstance()->printMsg(QString::fromLocal8Bit("Could not allocate video codec context."));
return false;
}
m_pCodecParserCtx = av_parser_init(m_CodecId);
if (!m_pCodecParserCtx)
{
CDiagnosis::GetInstance()->printMsg(QString::fromLocal8Bit("Could not allocate video parser context."));
return false;
}
if (avcodec_open2(m_avCodecCtx, m_pCodec, NULL) < 0)
{
CDiagnosis::GetInstance()->printMsg(QString::fromLocal8Bit("Could not open codec."));
return false;
}
m_bFfmpegInited = true;
return true;
}
//初始化函数,用来初始化qt显示时所用的图片
bool DecodeThread::initImageObjects()
{
AVPixelFormat srcFormat = AV_PIX_FMT_YUV420P;
AVPixelFormat dstFormat = AV_PIX_FMT_RGB32;
srcFormat = m_avCodecCtx->pix_fmt;
int byte = av_image_get_buffer_size(AV_PIX_FMT_RGB32, m_nVideoWidth, m_nVideoHeight, 1);
m_pPicBuffer = (uint8_t*)av_malloc(byte * sizeof(uint8_t));
//开辟缓存存储一帧数据
av_image_fill_arrays(m_avFramePicture->data, m_avFramePicture->linesize, m_pPicBuffer, dstFormat, m_nVideoWidth, m_nVideoHeight, 1);
m_pSwsContext = sws_getContext(m_nVideoWidth, m_nVideoHeight, srcFormat, m_nVideoWidth, m_nVideoHeight, dstFormat, SWS_FAST_BILINEAR, NULL, NULL, NULL);
return true;
}
//解码的关键函数
void DecodeThread::decodeData()
{
QString szTime = QTime::currentTime().toString();
QString szMsg = szTime.append(QString::fromLocal8Bit(" :启动线程成功"));
CDiagnosis::GetInstance()->printMsg(szMsg);
int first_time = 1;
while (!m_bStop)
{
if (!m_bIsInit)//init according to this flag
{
if (m_bIsFirstData)
continue;
bool bInitSuccess = initVideoObjects();
if (!bInitSuccess)
{
continue;
}
m_bIsInit = true;
continue;
}
m_pDataMutex->lock();
if (m_qDataArray.size() <= 0)
{
CUtilityMethod::Sleep(200);
m_pDataMutex->unlock();
continue;
}
QByteArray data = m_qDataArray.dequeue();
m_pDataMutex->unlock();
int nDataSize = data.size();
if (nDataSize <= 0)
{
CDiagnosis::GetInstance()->printMsg(QString::fromLocal8Bit("数据长度不大于零,等待下一包数据"));//即将退出线程
CUtilityMethod::Sleep(200);
continue;
//break;
}
while (nDataSize > 0)
{
//unsigned char pData[nBufferSize] = { 0 };
const int nBufferSize = nDataSize + FF_BUG_NO_PADDING;
unsigned char* pData = new unsigned char[nBufferSize];
memcpy_s(pData, nBufferSize, data, nDataSize);
int nLength = av_parser_parse2(m_pCodecParserCtx, m_avCodecCtx, &m_avPacket->data, &m_avPacket->size, pData, nDataSize, AV_NOPTS_VALUE, AV_NOPTS_VALUE, AV_NOPTS_VALUE);
nDataSize -= nLength;
data.remove(0, nLength);
if (m_avPacket->size == 0)
{
delete[] pData;
continue;
}
switch (m_pCodecParserCtx->pict_type)
{
case AV_PICTURE_TYPE_I: /*CDiagnosis::GetInstance()->printMsg(QString::fromLocal8Bit("收到I帧"));*/ break;
case AV_PICTURE_TYPE_P: /*CDiagnosis::GetInstance()->printMsg(QString::fromLocal8Bit("收到P帧"));*/ break;
case AV_PICTURE_TYPE_B: CDiagnosis::GetInstance()->printMsg(QString::fromLocal8Bit("收到B帧")); break;
default: CDiagnosis::GetInstance()->printMsg(QString::fromLocal8Bit("收到未知帧")); break;
}
avcodec_send_packet(m_avCodecCtx, m_avPacket);
int ret = avcodec_receive_frame(m_avCodecCtx, m_avFrameInput);
if (ret < 0)
{
if (ret == -11)
{
delete[] pData;
continue;
}
CDiagnosis::GetInstance()->printMsg(QString::fromLocal8Bit("解码错误"));
return;
}
else
{
if (first_time)
{
m_nVideoWidth = m_avCodecCtx->width;
m_nVideoHeight = m_avCodecCtx->height;
initImageObjects();
first_time = 0;
}
sws_scale(m_pSwsContext, (const uint8_t* const*)m_avFrameInput->data, m_avFrameInput->linesize, 0, m_nVideoHeight, m_avFramePicture->data, m_avFramePicture->linesize);
if (m_qDrawData.size() <= 0)
{
delete[] pData;
continue;
}
m_pHeadMutex->lock();
output_str* pTemp = m_qDrawData.dequeue();
m_pHeadMutex->unlock();
QImage image(m_avFramePicture->data[0], m_nVideoWidth, m_nVideoHeight, QImage::Format_RGB32);
if (pTemp->data_size <= 0)
{
emit receiveImage(image);
cv::Mat matImage = cv::Mat(image.height(), image.width(), CV_8UC4, (void*)image.constBits(), image.bytesPerLine());
}
else
{
cv::Mat matImage = cv::Mat(image.height(), image.width(), CV_8UC4, (void*)image.constBits(), image.bytesPerLine());
cv::Mat newImage = show(matImage, *pTemp);
const uchar* pSrc = (const uchar*)newImage.data;
QImage ShowImage(pSrc, newImage.cols, newImage.rows, newImage.step, QImage::Format_RGB32);//Format_ARGB32
if (!ShowImage.isNull())
{
emit receiveImage(ShowImage);
}
matImage.release();
newImage.release();
}
delete pTemp;
}
delete[] pData;
}
//_CrtDumpMemoryLeaks();
av_packet_unref(m_avPacket);
av_freep(m_avPacket);
}
freeObjects();
emit stopSignal();
}