C++基于FFmpeg对rtmp直播流进行拉流

//Author: shibaorong
//Date: 2021.5.23

#include <iostream>
#include <string>
#include <opencv2/opencv.hpp>
#include <opencv2/core/core.hpp>


#ifdef __cplusplus
extern "C"
{
#endif
#include <libavformat/avformat.h>
#include <libavutil/frame.h>
#include <libavutil/mem.h>
#include <libswscale/swscale.h>
#include <libavcodec/avcodec.h>
#include <libavutil/dict.h>

#ifdef __cplusplus
}
#endif

class FfmpegStreamChr{
     public:
     AVFormatContext* format_context_;
     AVStream * video_st;
     AVStream * out_stream;
     AVCodec* codec_;
     AVCodecContext* codec_context_;
     AVFrame* yuv_frame_;
     AVFormatContext * pOutFmtContext;
     AVPacket* packet_;
     cv::Mat TempImg;
     SwsContext* y2r_sws_context_;
     uchar ** rgb_data_;//need
     int* rgb_line_size_;//need
     int video_stream_index_;
     int audio_stream_index_;
     int duration;
     int width_;
     int height_;
     int bg_pts;
     int ret;
     int video_frame_size;
     int audio_frame_size;
     int video_frame_count;
     int audio_frame_count;
     //bool pts_first;
     bool Decode(const char *,const char *);
     //bool DecodeToImage();
     static int custom_interrupt_callback(void *){
        std::cout<<"chaoshi"<<std::endl;
        return 0;
     };
     FfmpegStreamChr(){
         this->format_context_=avformat_alloc_context();
         this->format_context_->interrupt_callback.callback=custom_interrupt_callback;
         this->format_context_->interrupt_callback.opaque=this;
         rgb_data_=new uchar* [sizeof(uint8_t *)* 8];
         rgb_line_size_=new int[sizeof(int)*8];
         packet_=av_packet_alloc();
         video_frame_size=0;
         audio_frame_size=0;
         video_frame_count=0;
         audio_frame_count=0;
     }

};

bool FfmpegStreamChr::Decode(const char *pstrFilename,const char* out_file){

    avformat_network_init();

    //视频流地址
    std::string tempfile=pstrFilename;

    if(avformat_open_input(&format_context_,tempfile.c_str(),NULL,NULL)<0)return false;
    
    //从媒体文件中读包进而获取流消息
    if(avformat_find_stream_info(format_context_,nullptr)<0)return false;

    //打印
    av_dump_format(format_context_,0,tempfile.c_str(),0);

    ret=avformat_alloc_output_context2(&pOutFmtContext,NULL,NULL,out_file);
    if(ret<0){
        printf("avformat_alloc_output_context2 filed:%d\n",ret);
        return false;
    }

    for(int i=0;i<format_context_->nb_streams;i++){
        video_st=format_context_->streams[i];
        //筛选视频流和音频流
        if(video_st->codecpar->codec_type==AVMEDIA_TYPE_VIDEO){
            video_stream_index_=i;
        }
        if(video_st->codecpar->codec_type==AVMEDIA_TYPE_AUDIO){
            audio_stream_index_=i;
        }

        //找到对应的解码器
        codec_=avcodec_find_decoder(video_st->codecpar->codec_id);
        out_stream=avformat_new_stream(pOutFmtContext,codec_);

        //创建解码器对应的结构体
        AVCodecContext* codec_ctx=avcodec_alloc_context3(codec_);
        ret=avcodec_parameters_to_context(codec_ctx,video_st->codecpar);
        if(ret<0){
            printf("Failed to copy in_stream codecpar to codec context\n");
            return false;
        }

        codec_ctx->codec_tag=0;
        codec_ctx->codec_tag = 0;
        if (pOutFmtContext->oformat->flags & AVFMT_GLOBALHEADER)
            codec_ctx->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;

        ret=avcodec_parameters_from_context(out_stream->codecpar,codec_ctx);
        if(ret<0){
            printf("Failed to copy codec context to out_stream codecpar context\n");
            return false;
        }

    }

    av_dump_format(pOutFmtContext, 0, out_file, 1);
    ret = avio_open(&pOutFmtContext->pb, out_file, AVIO_FLAG_WRITE);
    if(ret < 0){
        printf("avio_open failed\n");
        return -1;
    }

    ret = avformat_write_header(pOutFmtContext, NULL);
    if( ret < 0){
        printf("avformat_write_header failed\n");
        return -1;
    }

    while(1){
        //6.读取数据包
        ret=av_read_frame(format_context_,packet_);
        if(ret<0)break;
        video_st=format_context_->streams[packet_->stream_index];
        out_stream=pOutFmtContext->streams[packet_->stream_index];

        av_packet_rescale_ts(packet_,video_st->time_base,out_stream->time_base);

        if(packet_->stream_index==video_stream_index_){
            video_frame_size+=packet_->size;
            printf("recv %5d video frame %5d-%5d\n", ++video_frame_count, packet_->size, video_frame_size);
        }

        if(packet_->stream_index==audio_stream_index_){
            audio_frame_size+=packet_->size;
            printf("recv %5d audio frame %5d-%5d\n", ++audio_frame_count, packet_->size, audio_frame_size);
        }

        ret=av_interleaved_write_frame(pOutFmtContext,packet_);
        if(ret<0){
            printf("av_interleaved_write_frame failed\n");
            break;
        }
        av_packet_unref(packet_);
    }

    av_write_trailer(pOutFmtContext);
    av_packet_free(&packet_);

    avformat_close_input(&format_context_);
    avio_close(pOutFmtContext->pb);
    avformat_free_context(pOutFmtContext);
    printf("................end\n");
    return true;

};


int main()
{
    const char* s="rtmp://58.200.131.2:1935/livetv/hunantv";
    const char* o="outfile.flv";
    FfmpegStreamChr* f=new FfmpegStreamChr();
    bool res=f->Decode(s,o);
    std::cout<<res;
    return 0;
}

 

上一篇:python接口测试-项目实践(七)脚本优化


下一篇:认识Wireshark界面