#include "stdafx.h"
#include <stdio.h>
#include <stdlib.h>
extern "C"
{
#include <SDL2/SDL.h>
#include "libavutil/opt.h"
#include "libavutil/channel_layout.h"
#include "libavutil/common.h"
#include "libavutil/imgutils.h"
#include "libavutil/mathematics.h"
#include "libavutil/samplefmt.h"
#include "libavutil/time.h"
#include "libavutil/fifo.h"
#include "libavcodec/avcodec.h"
#include "libavformat/avformat.h"
#include "libavformat/avio.h"
#include "libavfilter/avfiltergraph.h"
#include "libavfilter/avfilter.h"
#include "libavfilter/buffersink.h"
#include "libavfilter/buffersrc.h"
#include "libswscale/swscale.h"
#include "libswresample/swresample.h"
}
#include <memory>
#include <windows.h>
//#include "sdlplayer.h"
#include "CGSDLRender.h"
using namespace ChunGen::Client::Player;
#pragma warning(disable :4996)
int mymain();
int _tmain(int argc, _TCHAR* argv[]) {
mymain();
return 0;
}
int videoIndex;
int audioIndex;
AVInputFormat mFormat;
AVDictionary* iformat_opts;
using namespace std;
#define INBUF_SIZE 4096
void Init()
{
av_register_all();
avfilter_register_all();
avformat_network_init();
av_log_set_level(AV_LOG_ERROR);
}
AVFormatContext *ic = NULL;
int64_t lastReadPacktTime;
std::shared_ptr <AVPacket> readPacketFromSource()
{
std::shared_ptr<AVPacket> packet(static_cast<AVPacket*>(av_malloc(sizeof(AVPacket))), [&](AVPacket *p) { av_packet_free(&p); av_freep(&p); });
av_init_packet(packet.get());
lastReadPacktTime = av_gettime();
int ret = av_read_frame(ic, packet.get());
if (ret >= 0)
{
return packet;
}
else
{
return nullptr;
}
}
bool videoDecode(AVPacket* packet, AVFrame *frame)
{
int gotFrame = 0;
//videoIndex
auto hr = avcodec_decode_video2(ic->streams[videoIndex]->codec, frame, &gotFrame, packet);
int pmt = ic->streams[videoIndex]->codec->pix_fmt;
if (hr >= 0 && gotFrame != 0)
{
return true;
}
return false;
}
int initVideoDecodeContext()
{
auto codecId = ic->streams[videoIndex]->codec->codec_id;
auto codec = avcodec_find_decoder(codecId);
if (!codec)
{
return -1;
}
int ret = avcodec_open2(ic->streams[videoIndex]->codec, codec, NULL);
return ret;
}
AVFrame *m_pFrameRGB, *m_pFrameYUV;
uint8_t *m_rgbBuffer, *m_yuvBuffer;
struct SwsContext *m_img_convert_ctx;
void init_Sws(int w, int h) //分配两个Frame,两段buff,一个转换上下文
{
//为每帧图像分配内存
m_pFrameYUV = av_frame_alloc();
m_pFrameRGB = av_frame_alloc();
// width和heigt为传入的分辨率的大小,分辨率有变化时可以以最大标准申请
int numBytes = avpicture_get_size(AV_PIX_FMT_RGB32, w, h);
m_rgbBuffer = (uint8_t *)av_malloc(numBytes * sizeof(uint8_t));
int yuvSize = w * h * 3 / 2;
m_yuvBuffer = (uint8_t *)av_malloc(yuvSize);
//特别注意sws_getContext内存泄露问题,
//注意sws_getContext只能调用一次,在初始化时候调用即可,另外调用完后,在析构函数中使用sws_freeContext,将它的内存释放。
//设置图像转换上下文
m_img_convert_ctx = sws_getContext(w, h, AV_PIX_FMT_YUV420P, \
w, h, AV_PIX_FMT_RGB32, SWS_BICUBIC, NULL, NULL, NULL);
avpicture_fill((AVPicture *)m_pFrameRGB, m_rgbBuffer, AV_PIX_FMT_RGB32, w, h);
}
FILE *f;
static void pgm_save(unsigned char *buf, int wrap, int xsize, int ysize,
char *filename)
{
int i;
FILE * f = fopen(filename, "w");
//fprintf(f, "P5\n%d %d\n%d\n", xsize, ysize, 255);
for (i = 0; i < ysize; i++)
fwrite(buf + i * wrap, 1, xsize, f);
fclose(f);
}
//Refresh Event
#define REFRESH_EVENT (SDL_USEREVENT + 1)
int thread_exit = 0;
int refresh_video(void *opaque) {
while (thread_exit == 0) {
SDL_Event event;
event.type = REFRESH_EVENT;
SDL_PushEvent(&event);
SDL_Delay(40);
}
return 0;
}
//char* spath = "E:\\a1.mp4";
//char* spath = "D:\\game\\three.js-master\\examples\\textures\\sintel.mp4";
//char* spath = "D:\\迅雷下载\\[阳光电影www.ygdy8.com].爱的成人式.BD.720p.中文字幕.rmvb";
char* spath = "E:\\a1.mp4";
int64_t lastDts;
#define DelayTime 5
int mymain()
{
printf("--->\n");
int scan_all_pmts_set = 0;
/* register all codecs, demux and protocols */
Init();
ic = avformat_alloc_context();
int ret;
if (!ic) {
av_log(NULL, AV_LOG_FATAL, "Could not allocate context.\n");
ret = AVERROR(ENOMEM);
printf("alloc err %d\n", ret);
}
/*
if (!av_dict_get(iformat_opts, "scan_all_pmts", NULL, AV_DICT_MATCH_CASE)) {
av_dict_set(&iformat_opts, "scan_all_pmts", "1", AV_DICT_DONT_OVERWRITE);
scan_all_pmts_set = 1;
}
*/
int err = avformat_open_input(&ic, spath, nullptr, nullptr);
if (err < 0) {
printf("open err err=%d\n", err);
}
printf("come 2\n");
ret = avformat_find_stream_info(ic, nullptr);
if (ret < 0)
{
av_log(NULL, AV_LOG_ERROR, "Find input file stream inform failed\n");
}
else
{
for (int i = 0; i < ic->nb_streams; i++)
{
if (ic->streams[i]->codec->codec_type == AVMediaType::AVMEDIA_TYPE_VIDEO)
{
videoIndex = i;
}
else if (ic->streams[i]->codec->codec_type == AVMediaType::AVMEDIA_TYPE_AUDIO)
{
audioIndex = i;
}
}
//playState = CGPlayState::POpen;
//av_log(NULL, AV_LOG_FATAL, "Open input file %s success\n", inputUrl.c_str());
}
int ret1 = initVideoDecodeContext();
printf("ret1 = %d\n", ret1);
std::shared_ptr<CGSDLRender> sdlRender = std::make_shared<CGSDLRender>();//???????
ret = initVideoDecodeContext();
if (ret < 0) return ret;
sdlRender->InitVideo(0);
sdlRender->CreateVideoSurface(ic->streams[videoIndex]->codec->width, ic->streams[videoIndex]->codec->height);
int w = ic->streams[videoIndex]->codec->width;
int h = ic->streams[videoIndex]->codec->height;
AVFrame * videoFrame = av_frame_alloc();
//f = fopen("new.rgb", "wb+");
SwsContext* swsCtx = NULL;
//swsCtx= sws_getContext(width,)
int gL = 0;
SDL_Thread *refresh_thread = SDL_CreateThread(refresh_video, NULL, NULL);
SDL_Event event;
for (int i = 0; i <5700; i++) {
auto packet = readPacketFromSource();
if (packet) {
if (packet->stream_index == videoIndex)
if (videoDecode(packet.get(), videoFrame))
{
//sws_scale
//playVideo(videoFrame);
int j = 0;
j++;
printf("%d---\n", i);
//char buf[1024];
//sprintf(buf, "pgmdir\\%s-%d.pgm", "file", i);
AVFrame * frame = videoFrame;
SDL_WaitEvent(&event);
if (event.type == REFRESH_EVENT) {
sdlRender->Display((uint8_t**)frame->data, frame->linesize);
}
else if (event.type == SDL_QUIT) {
break;
}
//---------------
uint8_t *dst_data[4];
int dst_linesize[4];
int dst_bufsize;
struct SwsContext *pSwsCtx;
pSwsCtx = sws_getContext(w, h, ic->streams[videoIndex]->codec->pix_fmt,
w, h, AV_PIX_FMT_RGB24, SWS_BICUBIC, NULL, NULL, NULL);
if ((ret = av_image_alloc(dst_data, dst_linesize,
w, h, AV_PIX_FMT_RGB24, 1)) < 0) {
fprintf(stderr, "Could not allocate destination image\n");
return -1;
}
dst_bufsize = ret;
sws_scale(pSwsCtx, (const uint8_t * const*)frame->data,
frame->linesize, 0, h, dst_data, dst_linesize);
//fwrite(dst_data[0], 1, dst_bufsize, f);
int g = 0;
}
if (lastDts >= 0)
{
auto diff = packet->dts - lastDts;
int duration = diff * 1000 / (ic->streams[videoIndex]->time_base.den
/ ic->streams[videoIndex]->time_base.num);
if (duration > DelayTime && duration < 1000)
{
//SDL_Delay(duration - DelayTime);
//Sleep(duration - DelayTime);
printf("SDL_Delay %d \n", duration - DelayTime);
}
}
lastDts = packet->dts;
}
else {
break;
}
}
av_frame_free(&videoFrame);
//fclose(f);
system("pause");
return 0;
}
相关文章
- 10-26Linear regression with one variable - Cost function intuition I
- 10-26JOKER.ONE的多签是什么?
- 10-26odoo10 many2one字段下拉更多选项时自定义排序方法
- 10-26joker.one的价值属性是什么?joker.one的去中心化
- 10-26Plus One 解答
- 10-26《嵌入式 - 语音识别TWen-ASR-ONE开发笔记》第3章 TWen-ASR-ONE 多线程和消息队列
- 10-26来自Google Play的Android App拒绝
- 10-26Google Fuchsia OS 可能的第一款手机曝光!华为在 Honor Play 上启动新操作
- 10-26org.hibernate.HibernateException: No Hibernate Session bound to thread, and configuration does not allow creation of non-transactional one here
- 10-26(one) 条件判断的总结