1. Linux内我们使用V4L2框架获取摄像头数据,由于摄像头的不同,摄像头所输出的数据格式各有不同。
考虑到YUV420P 的格式使用最广泛,我们最终将摄像头数据转为该格式。
pic_data_transform.c
/*============================================================================= * # Desc: use ffmpeg read a frame data from v4l2, and convert * # the output data format * =============================================================================*/ #include <stdio.h> #include <string.h> #include <stdlib.h> #include <unistd.h> #include <string.h> #include "avformat.h" #include "avcodec.h" #include "avdevice.h" #include <libavutil/imgutils.h> #include <libswscale/swscale.h> char* input_name= "video4linux2"; char* file_name = "/dev/video0"; char* out_file = "yuv420.yuv"; void captureOneFrame(void){ AVFormatContext *fmtCtx = NULL; AVInputFormat *inputFmt; AVPacket *packet; AVCodecContext *pCodecCtx; AVCodec *pCodec; struct SwsContext *sws_ctx; FILE *fp; int i; int ret; int videoindex; enum AVPixelFormat dst_pix_fmt = AV_PIX_FMT_YUV420P; const char *dst_size = NULL; const char *src_size = NULL; uint8_t *src_data[4]; uint8_t *dst_data[4]; int src_linesize[4]; int dst_linesize[4]; int src_bufsize; int dst_bufsize; int src_w ; int src_h ; int dst_w = 1280; int dst_h = 960; fp = fopen(out_file, "wb"); if (fp < 0) { printf("open frame data file failed\n"); return ; } inputFmt = av_find_input_format (input_name); if (inputFmt == NULL) { printf("can not find_input_format\n"); return; } if (avformat_open_input ( &fmtCtx, file_name, inputFmt, NULL) < 0){ printf("can not open_input_file\n"); return; } av_dump_format(fmtCtx, 0, file_name, 0); videoindex= -1; for(i=0; i<fmtCtx->nb_streams; i++) if(fmtCtx->streams[i]->codec->codec_type==AVMEDIA_TYPE_VIDEO){ videoindex=i; break; } if(videoindex==-1){ printf("Didn't find a video stream.\n"); return -1; } pCodecCtx = fmtCtx->streams[videoindex]->codec; pCodec = avcodec_find_decoder(pCodecCtx->codec_id); printf("picture width = %d \n", pCodecCtx->width); printf("picture height = %d \n", pCodecCtx->height); printf("Pixel Format = %d \n", pCodecCtx->pix_fmt); sws_ctx = sws_getContext( pCodecCtx->width, pCodecCtx->height, pCodecCtx->pix_fmt, dst_w, dst_h, dst_pix_fmt, SWS_BILINEAR, NULL, NULL, NULL); src_bufsize = av_image_alloc(src_data, src_linesize, pCodecCtx->width, pCodecCtx->height, pCodecCtx->pix_fmt, 16); dst_bufsize = av_image_alloc(dst_data, dst_linesize, dst_w, dst_h, dst_pix_fmt, 1); packet = (AVPacket *)av_malloc(sizeof(AVPacket)); int loop = 1000; // while(loop--){ av_read_frame(fmtCtx, packet); memcpy(src_data[0], packet->data, packet->size); sws_scale(sws_ctx, src_data, src_linesize, 0, pCodecCtx->height, dst_data, dst_linesize); fwrite(dst_data[0], 1, dst_bufsize, fp); // } fclose(fp); av_free_packet(packet); av_freep(&dst_data[0]); sws_freeContext(sws_ctx); avformat_close_input(&fmtCtx); } int main(void){ avcodec_register_all(); avdevice_register_all(); captureOneFrame(); return 0; }
makefile
OUT_APP = test INCLUDE_PATH = /usr/local/ffmpeg/include/ INCLUDE = -I$(INCLUDE_PATH) -I$(INCLUDE_PATH)libavutil/ -I$(INCLUDE_PATH)libavdevice/ \ -I$(INCLUDE_PATH)libavcodec/ -I$(INCLUDE_PATH)libswresample \ -I$(INCLUDE_PATH)libavfilter/ -I$(INCLUDE_PATH)libavformat \ -I$(INCLUDE_PATH)libswscale/ LIB_PATH = /usr/local/ffmpeg/lib/ FFMPEG_LIBS = -L$(LIB_PATH) -lavformat -lavutil -lavdevice -lavcodec -lswresample -lavfilter -lswscale SDL_LIBS = LIBS = $(FFMPEG_LIBS)$(SDL_LIBS) COMPILE_OPTS = $(INCLUDE) C = c OBJ = o C_COMPILER = cc C_FLAGS = $(COMPILE_OPTS) $(CPPFLAGS) $(CFLAGS) LINK = cc -o LINK_OPTS = -lz -lm -lpthread LINK_OBJ = pic_data_transform.o .$(C).$(OBJ): $(C_COMPILER) -c -g $(C_FLAGS) $< $(OUT_APP): $(LINK_OBJ) $(LINK)$@ $(LINK_OBJ) $(LIBS) $(LINK_OPTS) clean: -rm -rf *.$(OBJ) $(OUT_APP) core *.core *~ picture
编译运行记录
.