https://blog.csdn.net/jxcr1984/article/details/52766524
本文转自: http://blog.csdn.net/leixiaohua1020/article/details/14214577
-
/*
-
*雷霄骅
-
*leixiaohua1020@126.com
-
*中国传媒大学/数字电视技术
-
*/
-
/**
-
* Audio Video Frame.
-
* New fields can be added to the end of AVFRAME with minor version
-
* bumps. Similarly fields that are marked as to be only accessed by
-
* av_opt_ptr() can be reordered. This allows 2 forks to add fields
-
* without breaking compatibility with each other.
-
* Removal, reordering and changes in the remaining cases require
-
* a major version bump.
-
* sizeof(AVFrame) must not be used outside libavcodec.
-
*/
-
typedef struct AVFrame {
-
#define AV_NUM_DATA_POINTERS 8
-
/**图像数据
-
* pointer to the picture/channel planes.
-
* This might be different from the first allocated byte
-
* - encoding: Set by user
-
* - decoding: set by AVCodecContext.get_buffer()
-
*/
-
uint8_t *data[AV_NUM_DATA_POINTERS];
-
-
/**
-
* Size, in bytes, of the data for each picture/channel plane.
-
*
-
* For audio, only linesize[0] may be set. For planar audio, each channel
-
* plane must be the same size.
-
*
-
* - encoding: Set by user
-
* - decoding: set by AVCodecContext.get_buffer()
-
*/
-
int linesize[AV_NUM_DATA_POINTERS];
-
-
/**
-
* pointers to the data planes/channels.
-
*
-
* For video, this should simply point to data[].
-
*
-
* For planar audio, each channel has a separate data pointer, and
-
* linesize[0] contains the size of each channel buffer.
-
* For packed audio, there is just one data pointer, and linesize[0]
-
* contains the total size of the buffer for all channels.
-
*
-
* Note: Both data and extended_data will always be set by get_buffer(),
-
* but for planar audio with more channels that can fit in data,
-
* extended_data must be used by the decoder in order to access all
-
* channels.
-
*
-
* encoding: unused
-
* decoding: set by AVCodecContext.get_buffer()
-
*/
-
uint8_t **extended_data;
-
-
/**宽高
-
* width and height of the video frame
-
* - encoding: unused
-
* - decoding: Read by user.
-
*/
-
int width, height;
-
-
/**
-
* number of audio samples (per channel) described by this frame
-
* - encoding: Set by user
-
* - decoding: Set by libavcodec
-
*/
-
int nb_samples;
-
-
/**
-
* format of the frame, -1 if unknown or unset
-
* Values correspond to enum AVPixelFormat for video frames,
-
* enum AVSampleFormat for audio)
-
* - encoding: unused
-
* - decoding: Read by user.
-
*/
-
int format;
-
-
/**是否是关键帧
-
* 1 -> keyframe, 0-> not
-
* - encoding: Set by libavcodec.
-
* - decoding: Set by libavcodec.
-
*/
-
int key_frame;
-
-
/**帧类型(I,B,P)
-
* Picture type of the frame, see ?_TYPE below.
-
* - encoding: Set by libavcodec. for coded_picture (and set by user for input).
-
* - decoding: Set by libavcodec.
-
*/
-
enum AVPictureType pict_type;
-
-
/**
-
* pointer to the first allocated byte of the picture. Can be used in get_buffer/release_buffer.
-
* This isn't used by libavcodec unless the default get/release_buffer() is used.
-
* - encoding:
-
* - decoding:
-
*/
-
uint8_t *base[AV_NUM_DATA_POINTERS];
-
-
/**
-
* sample aspect ratio for the video frame, 0/1 if unknown/unspecified
-
* - encoding: unused
-
* - decoding: Read by user.
-
*/
-
AVRational sample_aspect_ratio;
-
-
/**
-
* presentation timestamp in time_base units (time when frame should be shown to user)
-
* If AV_NOPTS_VALUE then frame_rate = 1/time_base will be assumed.
-
* - encoding: MUST be set by user.
-
* - decoding: Set by libavcodec.
-
*/
-
int64_t pts;
-
-
/**
-
* reordered pts from the last AVPacket that has been input into the decoder
-
* - encoding: unused
-
* - decoding: Read by user.
-
*/
-
int64_t pkt_pts;
-
-
/**
-
* dts from the last AVPacket that has been input into the decoder
-
* - encoding: unused
-
* - decoding: Read by user.
-
*/
-
int64_t pkt_dts;
-
-
/**
-
* picture number in bitstream order
-
* - encoding: set by
-
* - decoding: Set by libavcodec.
-
*/
-
int coded_picture_number;
-
/**
-
* picture number in display order
-
* - encoding: set by
-
* - decoding: Set by libavcodec.
-
*/
-
int display_picture_number;
-
-
/**
-
* quality (between 1 (good) and FF_LAMBDA_MAX (bad))
-
* - encoding: Set by libavcodec. for coded_picture (and set by user for input).
-
* - decoding: Set by libavcodec.
-
*/
-
int quality;
-
-
/**
-
* is this picture used as reference
-
* The values for this are the same as the MpegEncContext.picture_structure
-
* variable, that is 1->top field, 2->bottom field, 3->frame/both fields.
-
* Set to 4 for delayed, non-reference frames.
-
* - encoding: unused
-
* - decoding: Set by libavcodec. (before get_buffer() call)).
-
*/
-
int reference;
-
-
/**QP表
-
* QP table
-
* - encoding: unused
-
* - decoding: Set by libavcodec.
-
*/
-
int8_t *qscale_table;
-
/**
-
* QP store stride
-
* - encoding: unused
-
* - decoding: Set by libavcodec.
-
*/
-
int qstride;
-
-
/**
-
*
-
*/
-
int qscale_type;
-
-
/**跳过宏块表
-
* mbskip_table[mb]>=1 if MB didn't change
-
* stride= mb_width = (width+15)>>4
-
* - encoding: unused
-
* - decoding: Set by libavcodec.
-
*/
-
uint8_t *mbskip_table;
-
-
/**运动矢量表
-
* motion vector table
-
* @code
-
* example:
-
* int mv_sample_log2= 4 - motion_subsample_log2;
-
* int mb_width= (width+15)>>4;
-
* int mv_stride= (mb_width << mv_sample_log2) + 1;
-
* motion_val[direction][x + y*mv_stride][0->mv_x, 1->mv_y];
-
* @endcode
-
* - encoding: Set by user.
-
* - decoding: Set by libavcodec.
-
*/
-
int16_t (*motion_val[2])[2];
-
-
/**宏块类型表
-
* macroblock type table
-
* mb_type_base + mb_width + 2
-
* - encoding: Set by user.
-
* - decoding: Set by libavcodec.
-
*/
-
uint32_t *mb_type;
-
-
/**DCT系数
-
* DCT coefficients
-
* - encoding: unused
-
* - decoding: Set by libavcodec.
-
*/
-
short *dct_coeff;
-
-
/**参考帧列表
-
* motion reference frame index
-
* the order in which these are stored can depend on the codec.
-
* - encoding: Set by user.
-
* - decoding: Set by libavcodec.
-
*/
-
int8_t *ref_index[2];
-
-
/**
-
* for some private data of the user
-
* - encoding: unused
-
* - decoding: Set by user.
-
*/
-
void *opaque;
-
-
/**
-
* error
-
* - encoding: Set by libavcodec. if flags&CODEC_FLAG_PSNR.
-
* - decoding: unused
-
*/
-
uint64_t error[AV_NUM_DATA_POINTERS];
-
-
/**
-
* type of the buffer (to keep track of who has to deallocate data[*])
-
* - encoding: Set by the one who allocates it.
-
* - decoding: Set by the one who allocates it.
-
* Note: User allocated (direct rendering) & internal buffers cannot coexist currently.
-
*/
-
int type;
-
-
/**
-
* When decoding, this signals how much the picture must be delayed.
-
* extra_delay = repeat_pict / (2*fps)
-
* - encoding: unused
-
* - decoding: Set by libavcodec.
-
*/
-
int repeat_pict;
-
-
/**
-
* The content of the picture is interlaced.
-
* - encoding: Set by user.
-
* - decoding: Set by libavcodec. (default 0)
-
*/
-
int interlaced_frame;
-
-
/**
-
* If the content is interlaced, is top field displayed first.
-
* - encoding: Set by user.
-
* - decoding: Set by libavcodec.
-
*/
-
int top_field_first;
-
-
/**
-
* Tell user application that palette has changed from previous frame.
-
* - encoding: ??? (no palette-enabled encoder yet)
-
* - decoding: Set by libavcodec. (default 0).
-
*/
-
int palette_has_changed;
-
-
/**
-
* codec suggestion on buffer type if != 0
-
* - encoding: unused
-
* - decoding: Set by libavcodec. (before get_buffer() call)).
-
*/
-
int buffer_hints;
-
-
/**
-
* Pan scan.
-
* - encoding: Set by user.
-
* - decoding: Set by libavcodec.
-
*/
-
AVPanScan *pan_scan;
-
-
/**
-
* reordered opaque 64bit (generally an integer or a double precision float
-
* PTS but can be anything).
-
* The user sets AVCodecContext.reordered_opaque to represent the input at
-
* that time,
-
* the decoder reorders values as needed and sets AVFrame.reordered_opaque
-
* to exactly one of the values provided by the user through AVCodecContext.reordered_opaque
-
* @deprecated in favor of pkt_pts
-
* - encoding: unused
-
* - decoding: Read by user.
-
*/
-
int64_t reordered_opaque;
-
-
/**
-
* hardware accelerator private data (FFmpeg-allocated)
-
* - encoding: unused
-
* - decoding: Set by libavcodec
-
*/
-
void *hwaccel_picture_private;
-
-
/**
-
* the AVCodecContext which ff_thread_get_buffer() was last called on
-
* - encoding: Set by libavcodec.
-
* - decoding: Set by libavcodec.
-
*/
-
struct AVCodecContext *owner;
-
-
/**
-
* used by multithreading to store frame-specific info
-
* - encoding: Set by libavcodec.
-
* - decoding: Set by libavcodec.
-
*/
-
void *thread_opaque;
-
-
/**
-
* log2 of the size of the block which a single vector in motion_val represents:
-
* (4->16x16, 3->8x8, 2-> 4x4, 1-> 2x2)
-
* - encoding: unused
-
* - decoding: Set by libavcodec.
-
*/
-
uint8_t motion_subsample_log2;
-
-
/**(音频)采样率
-
* Sample rate of the audio data.
-
*
-
* - encoding: unused
-
* - decoding: read by user
-
*/
-
int sample_rate;
-
-
/**
-
* Channel layout of the audio data.
-
*
-
* - encoding: unused
-
* - decoding: read by user.
-
*/
-
uint64_t channel_layout;
-
-
/**
-
* frame timestamp estimated using various heuristics, in stream time base
-
* Code outside libavcodec should access this field using:
-
* av_frame_get_best_effort_timestamp(frame)
-
* - encoding: unused
-
* - decoding: set by libavcodec, read by user.
-
*/
-
int64_t best_effort_timestamp;
-
-
/**
-
* reordered pos from the last AVPacket that has been input into the decoder
-
* Code outside libavcodec should access this field using:
-
* av_frame_get_pkt_pos(frame)
-
* - encoding: unused
-
* - decoding: Read by user.
-
*/
-
int64_t pkt_pos;
-
-
/**
-
* duration of the corresponding packet, expressed in
-
* AVStream->time_base units, 0 if unknown.
-
* Code outside libavcodec should access this field using:
-
* av_frame_get_pkt_duration(frame)
-
* - encoding: unused
-
* - decoding: Read by user.
-
*/
-
int64_t pkt_duration;
-
-
/**
-
* metadata.
-
* Code outside libavcodec should access this field using:
-
* av_frame_get_metadata(frame)
-
* - encoding: Set by user.
-
* - decoding: Set by libavcodec.
-
*/
-
AVDictionary *metadata;
-
-
/**
-
* decode error flags of the frame, set to a combination of
-
* FF_DECODE_ERROR_xxx flags if the decoder produced a frame, but there
-
* were errors during the decoding.
-
* Code outside libavcodec should access this field using:
-
* av_frame_get_decode_error_flags(frame)
-
* - encoding: unused
-
* - decoding: set by libavcodec, read by user.
-
*/
-
int decode_error_flags;
-
#define FF_DECODE_ERROR_INVALID_BITSTREAM 1
-
#define FF_DECODE_ERROR_MISSING_REFERENCE 2
-
-
/**
-
* number of audio channels, only used for audio.
-
* Code outside libavcodec should access this field using:
-
* av_frame_get_channels(frame)
-
* - encoding: unused
-
* - decoding: Read by user.
-
*/
-
int64_t channels;
-
} AVFrame;
AVFrame结构体为解码后的数据,一般用于存储原始数据(即非压缩数据,例如对视频来说是YUV,RGB,对于音频来说就是PCM),此外还包括一些相关的数据。
如:解码的时候存储了宏块类型表,QP表,运行矢量表等数据。编码的时候也存储了相关的数据。因此在使用FFMPEG进行码流分析的时候,AVFrame是一个很重要的结构体。
uint8_t *data[AV_NUM_DATA_POINTERS]: 解码后原始数据
int linesize[AV_NUM_DATA_PONITERS]: data中"一行"数据的大小。注:未必就是图像的宽,一般大于图像的宽
int width, height: 视频帧宽和高(1920*1080)
int nb_samples: 音频的一个AVFrame中可能包含多个音频帧,在此标记包含了几个
int format: 解码后原始数据型(YUV420, YUV422, RGB24...)//YUV420P YV12
// if (frame->format == AV_PIX_FMT_YUV420P || frame->format == AV_PIX_FMT_YUVJ420P)
int key_frame: 是否为关键帧
enum AVPictureType pict_type: 帧类型(I,B,P...)
AVRational sample_aspect_ratio: 宽高比(16:9, 4:3...)
int64_t pts: 显示时间戳
int coded_picture_number: 编码帧序号
int display_picture_number: 显示帧序号
int8_t *qscale_table: QP表
uint8_t *mbskip_table: 跳过宏块表
int16_t (*motion_val[2])[2]: 运行矢量表
uint32_t *mb_type: 宏块类型表
short *dct_coeff: DCT系数
int8_t *ref_index[2]: 运动估计参考帧列表
int interlaced_frame: 是否是隔行扫描
uint8_t motion_subsample_log2: 一个宏块中的运行矢量采样个数,取log的
https://blog.csdn.net/leixiaohua1020/article/details/42134965
- enum AVPixelFormat {
-
AV_PIX_FMT_NONE = -1,
-
AV_PIX_FMT_YUV420P, ///< planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
-
AV_PIX_FMT_YUYV422, ///< packed YUV 4:2:2, 16bpp, Y0 Cb Y1 Cr
-
AV_PIX_FMT_RGB24, ///< packed RGB 8:8:8, 24bpp, RGBRGB...
-
AV_PIX_FMT_BGR24, ///< packed RGB 8:8:8, 24bpp, BGRBGR...
-
AV_PIX_FMT_YUV422P, ///< planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
-
AV_PIX_FMT_YUV444P, ///< planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
-
AV_PIX_FMT_YUV410P, ///< planar YUV 4:1:0, 9bpp, (1 Cr & Cb sample per 4x4 Y samples)
-
AV_PIX_FMT_YUV411P, ///< planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples)
-
AV_PIX_FMT_GRAY8, ///< Y , 8bpp
-
AV_PIX_FMT_MONOWHITE, ///< Y , 1bpp, 0 is white, 1 is black, in each byte pixels are ordered from the msb to the lsb
-
AV_PIX_FMT_MONOBLACK, ///< Y , 1bpp, 0 is black, 1 is white, in each byte pixels are ordered from the msb to the lsb
-
AV_PIX_FMT_PAL8, ///< 8 bit with PIX_FMT_RGB32 palette
-
AV_PIX_FMT_YUVJ420P, ///< planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of PIX_FMT_YUV420P and setting color_range
-
AV_PIX_FMT_YUVJ422P, ///< planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of PIX_FMT_YUV422P and setting color_range
-
AV_PIX_FMT_YUVJ444P, ///< planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of PIX_FMT_YUV444P and setting color_range
-
#if FF_API_XVMC
-
AV_PIX_FMT_XVMC_MPEG2_MC,///< XVideo Motion Acceleration via common packet passing
-
AV_PIX_FMT_XVMC_MPEG2_IDCT,
-
#define AV_PIX_FMT_XVMC AV_PIX_FMT_XVMC_MPEG2_IDCT
-
#endif /* FF_API_XVMC */
-
AV_PIX_FMT_UYVY422, ///< packed YUV 4:2:2, 16bpp, Cb Y0 Cr Y1
-
AV_PIX_FMT_UYYVYY411, ///< packed YUV 4:1:1, 12bpp, Cb Y0 Y1 Cr Y2 Y3
-
AV_PIX_FMT_BGR8, ///< packed RGB 3:3:2, 8bpp, (msb)2B 3G 3R(lsb)
-
AV_PIX_FMT_BGR4, ///< packed RGB 1:2:1 bitstream, 4bpp, (msb)1B 2G 1R(lsb), a byte contains two pixels, the first pixel in the byte is the one composed by the 4 msb bits
-
AV_PIX_FMT_BGR4_BYTE, ///< packed RGB 1:2:1, 8bpp, (msb)1B 2G 1R(lsb)
-
AV_PIX_FMT_RGB8, ///< packed RGB 3:3:2, 8bpp, (msb)2R 3G 3B(lsb)
-
AV_PIX_FMT_RGB4, ///< packed RGB 1:2:1 bitstream, 4bpp, (msb)1R 2G 1B(lsb), a byte contains two pixels, the first pixel in the byte is the one composed by the 4 msb bits
-
AV_PIX_FMT_RGB4_BYTE, ///< packed RGB 1:2:1, 8bpp, (msb)1R 2G 1B(lsb)
-
AV_PIX_FMT_NV12, ///< planar YUV 4:2:0, 12bpp, 1 plane for Y and 1 plane for the UV components, which are interleaved (first byte U and the following byte V)
-
AV_PIX_FMT_NV21, ///< as above, but U and V bytes are swapped
-
-
AV_PIX_FMT_ARGB, ///< packed ARGB 8:8:8:8, 32bpp, ARGBARGB...
-
AV_PIX_FMT_RGBA, ///< packed RGBA 8:8:8:8, 32bpp, RGBARGBA...
-
AV_PIX_FMT_ABGR, ///< packed ABGR 8:8:8:8, 32bpp, ABGRABGR...
-
AV_PIX_FMT_BGRA, ///< packed BGRA 8:8:8:8, 32bpp, BGRABGRA...
-
-
AV_PIX_FMT_GRAY16BE, ///< Y , 16bpp, big-endian
-
AV_PIX_FMT_GRAY16LE, ///< Y , 16bpp, little-endian
-
AV_PIX_FMT_YUV440P, ///< planar YUV 4:4:0 (1 Cr & Cb sample per 1x2 Y samples)
-
AV_PIX_FMT_YUVJ440P, ///< planar YUV 4:4:0 full scale (JPEG), deprecated in favor of PIX_FMT_YUV440P and setting color_range
-
AV_PIX_FMT_YUVA420P, ///< planar YUV 4:2:0, 20bpp, (1 Cr & Cb sample per 2x2 Y & A samples)
-
#if FF_API_VDPAU
-
AV_PIX_FMT_VDPAU_H264,///< H.264 HW decoding with VDPAU, data[0] contains a vdpau_render_state struct which contains the bitstream of the slices as well as various fields extracted from headers
-
AV_PIX_FMT_VDPAU_MPEG1,///< MPEG-1 HW decoding with VDPAU, data[0] contains a vdpau_render_state struct which contains the bitstream of the slices as well as various fields extracted from headers
-
AV_PIX_FMT_VDPAU_MPEG2,///< MPEG-2 HW decoding with VDPAU, data[0] contains a vdpau_render_state struct which contains the bitstream of the slices as well as various fields extracted from headers
-
AV_PIX_FMT_VDPAU_WMV3,///< WMV3 HW decoding with VDPAU, data[0] contains a vdpau_render_state struct which contains the bitstream of the slices as well as various fields extracted from headers
-
AV_PIX_FMT_VDPAU_VC1, ///< VC-1 HW decoding with VDPAU, data[0] contains a vdpau_render_state struct which contains the bitstream of the slices as well as various fields extracted from headers
-
#endif
-
AV_PIX_FMT_RGB48BE, ///< packed RGB 16:16:16, 48bpp, 16R, 16G, 16B, the 2-byte value for each R/G/B component is stored as big-endian
-
AV_PIX_FMT_RGB48LE, ///< packed RGB 16:16:16, 48bpp, 16R, 16G, 16B, the 2-byte value for each R/G/B component is stored as little-endian
-
-
AV_PIX_FMT_RGB565BE, ///< packed RGB 5:6:5, 16bpp, (msb) 5R 6G 5B(lsb), big-endian
-
AV_PIX_FMT_RGB565LE, ///< packed RGB 5:6:5, 16bpp, (msb) 5R 6G 5B(lsb), little-endian
-
AV_PIX_FMT_RGB555BE, ///< packed RGB 5:5:5, 16bpp, (msb)1A 5R 5G 5B(lsb), big-endian, most significant bit to 0
-
AV_PIX_FMT_RGB555LE, ///< packed RGB 5:5:5, 16bpp, (msb)1A 5R 5G 5B(lsb), little-endian, most significant bit to 0
-
-
AV_PIX_FMT_BGR565BE, ///< packed BGR 5:6:5, 16bpp, (msb) 5B 6G 5R(lsb), big-endian
-
AV_PIX_FMT_BGR565LE, ///< packed BGR 5:6:5, 16bpp, (msb) 5B 6G 5R(lsb), little-endian
-
AV_PIX_FMT_BGR555BE, ///< packed BGR 5:5:5, 16bpp, (msb)1A 5B 5G 5R(lsb), big-endian, most significant bit to 1
-
AV_PIX_FMT_BGR555LE, ///< packed BGR 5:5:5, 16bpp, (msb)1A 5B 5G 5R(lsb), little-endian, most significant bit to 1
-
-
AV_PIX_FMT_VAAPI_MOCO, ///< HW acceleration through VA API at motion compensation entry-point, Picture.data[3] contains a vaapi_render_state struct which contains macroblocks as well as various fields extracted from headers
-
AV_PIX_FMT_VAAPI_IDCT, ///< HW acceleration through VA API at IDCT entry-point, Picture.data[3] contains a vaapi_render_state struct which contains fields extracted from headers
-
AV_PIX_FMT_VAAPI_VLD, ///< HW decoding through VA API, Picture.data[3] contains a vaapi_render_state struct which contains the bitstream of the slices as well as various fields extracted from headers
-
-
AV_PIX_FMT_YUV420P16LE, ///< planar YUV 4:2:0, 24bpp, (1 Cr & Cb sample per 2x2 Y samples), little-endian
-
AV_PIX_FMT_YUV420P16BE, ///< planar YUV 4:2:0, 24bpp, (1 Cr & Cb sample per 2x2 Y samples), big-endian
-
AV_PIX_FMT_YUV422P16LE, ///< planar YUV 4:2:2, 32bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian
-
AV_PIX_FMT_YUV422P16BE, ///< planar YUV 4:2:2, 32bpp, (1 Cr & Cb sample per 2x1 Y samples), big-endian
-
AV_PIX_FMT_YUV444P16LE, ///< planar YUV 4:4:4, 48bpp, (1 Cr & Cb sample per 1x1 Y samples), little-endian
-
AV_PIX_FMT_YUV444P16BE, ///< planar YUV 4:4:4, 48bpp, (1 Cr & Cb sample per 1x1 Y samples), big-endian
-
#if FF_API_VDPAU
-
AV_PIX_FMT_VDPAU_MPEG4, ///< MPEG4 HW decoding with VDPAU, data[0] contains a vdpau_render_state struct which contains the bitstream of the slices as well as various fields extracted from headers
-
#endif
-
AV_PIX_FMT_DXVA2_VLD, ///< HW decoding through DXVA2, Picture.data[3] contains a LPDIRECT3DSURFACE9 pointer
-
-
AV_PIX_FMT_RGB444LE, ///< packed RGB 4:4:4, 16bpp, (msb)4A 4R 4G 4B(lsb), little-endian, most significant bits to 0
-
AV_PIX_FMT_RGB444BE, ///< packed RGB 4:4:4, 16bpp, (msb)4A 4R 4G 4B(lsb), big-endian, most significant bits to 0
-
AV_PIX_FMT_BGR444LE, ///< packed BGR 4:4:4, 16bpp, (msb)4A 4B 4G 4R(lsb), little-endian, most significant bits to 1
-
AV_PIX_FMT_BGR444BE, ///< packed BGR 4:4:4, 16bpp, (msb)4A 4B 4G 4R(lsb), big-endian, most significant bits to 1
-
AV_PIX_FMT_GRAY8A, ///< 8bit gray, 8bit alpha
-
AV_PIX_FMT_BGR48BE, ///< packed RGB 16:16:16, 48bpp, 16B, 16G, 16R, the 2-byte value for each R/G/B component is stored as big-endian
-
AV_PIX_FMT_BGR48LE, ///< packed RGB 16:16:16, 48bpp, 16B, 16G, 16R, the 2-byte value for each R/G/B component is stored as little-endian
-
-
/**
-
* The following 12 formats have the disadvantage of needing 1 format for each bit depth.
-
* Notice that each 9/10 bits sample is stored in 16 bits with extra padding.
-
* If you want to support multiple bit depths, then using AV_PIX_FMT_YUV420P16* with the bpp stored separately is better.
-
*/
-
AV_PIX_FMT_YUV420P9BE, ///< planar YUV 4:2:0, 13.5bpp, (1 Cr & Cb sample per 2x2 Y samples), big-endian
-
AV_PIX_FMT_YUV420P9LE, ///< planar YUV 4:2:0, 13.5bpp, (1 Cr & Cb sample per 2x2 Y samples), little-endian
-
AV_PIX_FMT_YUV420P10BE,///< planar YUV 4:2:0, 15bpp, (1 Cr & Cb sample per 2x2 Y samples), big-endian
-
AV_PIX_FMT_YUV420P10LE,///< planar YUV 4:2:0, 15bpp, (1 Cr & Cb sample per 2x2 Y samples), little-endian
-
AV_PIX_FMT_YUV422P10BE,///< planar YUV 4:2:2, 20bpp, (1 Cr & Cb sample per 2x1 Y samples), big-endian
-
AV_PIX_FMT_YUV422P10LE,///< planar YUV 4:2:2, 20bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian
-
AV_PIX_FMT_YUV444P9BE, ///< planar YUV 4:4:4, 27bpp, (1 Cr & Cb sample per 1x1 Y samples), big-endian
-
AV_PIX_FMT_YUV444P9LE, ///< planar YUV 4:4:4, 27bpp, (1 Cr & Cb sample per 1x1 Y samples), little-endian
-
AV_PIX_FMT_YUV444P10BE,///< planar YUV 4:4:4, 30bpp, (1 Cr & Cb sample per 1x1 Y samples), big-endian
-
AV_PIX_FMT_YUV444P10LE,///< planar YUV 4:4:4, 30bpp, (1 Cr & Cb sample per 1x1 Y samples), little-endian
-
AV_PIX_FMT_YUV422P9BE, ///< planar YUV 4:2:2, 18bpp, (1 Cr & Cb sample per 2x1 Y samples), big-endian
-
AV_PIX_FMT_YUV422P9LE, ///< planar YUV 4:2:2, 18bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian
-
AV_PIX_FMT_VDA_VLD, ///< hardware decoding through VDA
-
-
#ifdef AV_PIX_FMT_ABI_GIT_MASTER
-
AV_PIX_FMT_RGBA64BE, ///< packed RGBA 16:16:16:16, 64bpp, 16R, 16G, 16B, 16A, the 2-byte value for each R/G/B/A component is stored as big-endian
-
AV_PIX_FMT_RGBA64LE, ///< packed RGBA 16:16:16:16, 64bpp, 16R, 16G, 16B, 16A, the 2-byte value for each R/G/B/A component is stored as little-endian
-
AV_PIX_FMT_BGRA64BE, ///< packed RGBA 16:16:16:16, 64bpp, 16B, 16G, 16R, 16A, the 2-byte value for each R/G/B/A component is stored as big-endian
-
AV_PIX_FMT_BGRA64LE, ///< packed RGBA 16:16:16:16, 64bpp, 16B, 16G, 16R, 16A, the 2-byte value for each R/G/B/A component is stored as little-endian
-
#endif
-
AV_PIX_FMT_GBRP, ///< planar GBR 4:4:4 24bpp
-
AV_PIX_FMT_GBRP9BE, ///< planar GBR 4:4:4 27bpp, big-endian
-
AV_PIX_FMT_GBRP9LE, ///< planar GBR 4:4:4 27bpp, little-endian
-
AV_PIX_FMT_GBRP10BE, ///< planar GBR 4:4:4 30bpp, big-endian
-
AV_PIX_FMT_GBRP10LE, ///< planar GBR 4:4:4 30bpp, little-endian
-
AV_PIX_FMT_GBRP16BE, ///< planar GBR 4:4:4 48bpp, big-endian
-
AV_PIX_FMT_GBRP16LE, ///< planar GBR 4:4:4 48bpp, little-endian
-
-
/**
-
* duplicated pixel formats for compatibility with libav.
-
* FFmpeg supports these formats since May 8 2012 and Jan 28 2012 (commits f9ca1ac7 and 143a5c55)
-
* Libav added them Oct 12 2012 with incompatible values (commit 6d5600e85)
-
*/
-
AV_PIX_FMT_YUVA422P_LIBAV, ///< planar YUV 4:2:2 24bpp, (1 Cr & Cb sample per 2x1 Y & A samples)
-
AV_PIX_FMT_YUVA444P_LIBAV, ///< planar YUV 4:4:4 32bpp, (1 Cr & Cb sample per 1x1 Y & A samples)
-
-
AV_PIX_FMT_YUVA420P9BE, ///< planar YUV 4:2:0 22.5bpp, (1 Cr & Cb sample per 2x2 Y & A samples), big-endian
-
AV_PIX_FMT_YUVA420P9LE, ///< planar YUV 4:2:0 22.5bpp, (1 Cr & Cb sample per 2x2 Y & A samples), little-endian
-
AV_PIX_FMT_YUVA422P9BE, ///< planar YUV 4:2:2 27bpp, (1 Cr & Cb sample per 2x1 Y & A samples), big-endian
-
AV_PIX_FMT_YUVA422P9LE, ///< planar YUV 4:2:2 27bpp, (1 Cr & Cb sample per 2x1 Y & A samples), little-endian
-
AV_PIX_FMT_YUVA444P9BE, ///< planar YUV 4:4:4 36bpp, (1 Cr & Cb sample per 1x1 Y & A samples), big-endian
-
AV_PIX_FMT_YUVA444P9LE, ///< planar YUV 4:4:4 36bpp, (1 Cr & Cb sample per 1x1 Y & A samples), little-endian
-
AV_PIX_FMT_YUVA420P10BE, ///< planar YUV 4:2:0 25bpp, (1 Cr & Cb sample per 2x2 Y & A samples, big-endian)
-
AV_PIX_FMT_YUVA420P10LE, ///< planar YUV 4:2:0 25bpp, (1 Cr & Cb sample per 2x2 Y & A samples, little-endian)
-
AV_PIX_FMT_YUVA422P10BE, ///< planar YUV 4:2:2 30bpp, (1 Cr & Cb sample per 2x1 Y & A samples, big-endian)
-
AV_PIX_FMT_YUVA422P10LE, ///< planar YUV 4:2:2 30bpp, (1 Cr & Cb sample per 2x1 Y & A samples, little-endian)
-
AV_PIX_FMT_YUVA444P10BE, ///< planar YUV 4:4:4 40bpp, (1 Cr & Cb sample per 1x1 Y & A samples, big-endian)
-
AV_PIX_FMT_YUVA444P10LE, ///< planar YUV 4:4:4 40bpp, (1 Cr & Cb sample per 1x1 Y & A samples, little-endian)
-
AV_PIX_FMT_YUVA420P16BE, ///< planar YUV 4:2:0 40bpp, (1 Cr & Cb sample per 2x2 Y & A samples, big-endian)
-
AV_PIX_FMT_YUVA420P16LE, ///< planar YUV 4:2:0 40bpp, (1 Cr & Cb sample per 2x2 Y & A samples, little-endian)
-
AV_PIX_FMT_YUVA422P16BE, ///< planar YUV 4:2:2 48bpp, (1 Cr & Cb sample per 2x1 Y & A samples, big-endian)
-
AV_PIX_FMT_YUVA422P16LE, ///< planar YUV 4:2:2 48bpp, (1 Cr & Cb sample per 2x1 Y & A samples, little-endian)
-
AV_PIX_FMT_YUVA444P16BE, ///< planar YUV 4:4:4 64bpp, (1 Cr & Cb sample per 1x1 Y & A samples, big-endian)
-
AV_PIX_FMT_YUVA444P16LE, ///< planar YUV 4:4:4 64bpp, (1 Cr & Cb sample per 1x1 Y & A samples, little-endian)
-
-
AV_PIX_FMT_VDPAU, ///< HW acceleration through VDPAU, Picture.data[3] contains a VdpVideoSurface
-
-
AV_PIX_FMT_XYZ12LE, ///< packed XYZ 4:4:4, 36 bpp, (msb) 12X, 12Y, 12Z (lsb), the 2-byte value for each X/Y/Z is stored as little-endian, the 4 lower bits are set to 0
-
AV_PIX_FMT_XYZ12BE, ///< packed XYZ 4:4:4, 36 bpp, (msb) 12X, 12Y, 12Z (lsb), the 2-byte value for each X/Y/Z is stored as big-endian, the 4 lower bits are set to 0
-
AV_PIX_FMT_NV16, ///< interleaved chroma YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
-
AV_PIX_FMT_NV20LE, ///< interleaved chroma YUV 4:2:2, 20bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian
-
AV_PIX_FMT_NV20BE, ///< interleaved chroma YUV 4:2:2, 20bpp, (1 Cr & Cb sample per 2x1 Y samples), big-endian
-
-
/**
-
* duplicated pixel formats for compatibility with libav.
-
* FFmpeg supports these formats since Sat Sep 24 06:01:45 2011 +0200 (commits 9569a3c9f41387a8c7d1ce97d8693520477a66c3)
-
* also see Fri Nov 25 01:38:21 2011 +0100 92afb431621c79155fcb7171d26f137eb1bee028
-
* Libav added them Sun Mar 16 23:05:47 2014 +0100 with incompatible values (commit 1481d24c3a0abf81e1d7a514547bd5305232be30)
-
*/
-
AV_PIX_FMT_RGBA64BE_LIBAV, ///< packed RGBA 16:16:16:16, 64bpp, 16R, 16G, 16B, 16A, the 2-byte value for each R/G/B/A component is stored as big-endian
-
AV_PIX_FMT_RGBA64LE_LIBAV, ///< packed RGBA 16:16:16:16, 64bpp, 16R, 16G, 16B, 16A, the 2-byte value for each R/G/B/A component is stored as little-endian
-
AV_PIX_FMT_BGRA64BE_LIBAV, ///< packed RGBA 16:16:16:16, 64bpp, 16B, 16G, 16R, 16A, the 2-byte value for each R/G/B/A component is stored as big-endian
-
AV_PIX_FMT_BGRA64LE_LIBAV, ///< packed RGBA 16:16:16:16, 64bpp, 16B, 16G, 16R, 16A, the 2-byte value for each R/G/B/A component is stored as little-endian
-
-
AV_PIX_FMT_YVYU422, ///< packed YUV 4:2:2, 16bpp, Y0 Cr Y1 Cb
-
-
#ifndef AV_PIX_FMT_ABI_GIT_MASTER
-
AV_PIX_FMT_RGBA64BE=0x123, ///< packed RGBA 16:16:16:16, 64bpp, 16R, 16G, 16B, 16A, the 2-byte value for each R/G/B/A component is stored as big-endian
-
AV_PIX_FMT_RGBA64LE, ///< packed RGBA 16:16:16:16, 64bpp, 16R, 16G, 16B, 16A, the 2-byte value for each R/G/B/A component is stored as little-endian
-
AV_PIX_FMT_BGRA64BE, ///< packed RGBA 16:16:16:16, 64bpp, 16B, 16G, 16R, 16A, the 2-byte value for each R/G/B/A component is stored as big-endian
-
AV_PIX_FMT_BGRA64LE, ///< packed RGBA 16:16:16:16, 64bpp, 16B, 16G, 16R, 16A, the 2-byte value for each R/G/B/A component is stored as little-endian
-
#endif
-
AV_PIX_FMT_0RGB=0x123+4, ///< packed RGB 8:8:8, 32bpp, 0RGB0RGB...
-
AV_PIX_FMT_RGB0, ///< packed RGB 8:8:8, 32bpp, RGB0RGB0...
-
AV_PIX_FMT_0BGR, ///< packed BGR 8:8:8, 32bpp, 0BGR0BGR...
-
AV_PIX_FMT_BGR0, ///< packed BGR 8:8:8, 32bpp, BGR0BGR0...
-
AV_PIX_FMT_YUVA444P, ///< planar YUV 4:4:4 32bpp, (1 Cr & Cb sample per 1x1 Y & A samples)
-
AV_PIX_FMT_YUVA422P, ///< planar YUV 4:2:2 24bpp, (1 Cr & Cb sample per 2x1 Y & A samples)
-
-
AV_PIX_FMT_YUV420P12BE, ///< planar YUV 4:2:0,18bpp, (1 Cr & Cb sample per 2x2 Y samples), big-endian
-
AV_PIX_FMT_YUV420P12LE, ///< planar YUV 4:2:0,18bpp, (1 Cr & Cb sample per 2x2 Y samples), little-endian
-
AV_PIX_FMT_YUV420P14BE, ///< planar YUV 4:2:0,21bpp, (1 Cr & Cb sample per 2x2 Y samples), big-endian
-
AV_PIX_FMT_YUV420P14LE, ///< planar YUV 4:2:0,21bpp, (1 Cr & Cb sample per 2x2 Y samples), little-endian
-
AV_PIX_FMT_YUV422P12BE, ///< planar YUV 4:2:2,24bpp, (1 Cr & Cb sample per 2x1 Y samples), big-endian
-
AV_PIX_FMT_YUV422P12LE, ///< planar YUV 4:2:2,24bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian
-
AV_PIX_FMT_YUV422P14BE, ///< planar YUV 4:2:2,28bpp, (1 Cr & Cb sample per 2x1 Y samples), big-endian
-
AV_PIX_FMT_YUV422P14LE, ///< planar YUV 4:2:2,28bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian
-
AV_PIX_FMT_YUV444P12BE, ///< planar YUV 4:4:4,36bpp, (1 Cr & Cb sample per 1x1 Y samples), big-endian
-
AV_PIX_FMT_YUV444P12LE, ///< planar YUV 4:4:4,36bpp, (1 Cr & Cb sample per 1x1 Y samples), little-endian
-
AV_PIX_FMT_YUV444P14BE, ///< planar YUV 4:4:4,42bpp, (1 Cr & Cb sample per 1x1 Y samples), big-endian
-
AV_PIX_FMT_YUV444P14LE, ///< planar YUV 4:4:4,42bpp, (1 Cr & Cb sample per 1x1 Y samples), little-endian
-
AV_PIX_FMT_GBRP12BE, ///< planar GBR 4:4:4 36bpp, big-endian
-
AV_PIX_FMT_GBRP12LE, ///< planar GBR 4:4:4 36bpp, little-endian
-
AV_PIX_FMT_GBRP14BE, ///< planar GBR 4:4:4 42bpp, big-endian
-
AV_PIX_FMT_GBRP14LE, ///< planar GBR 4:4:4 42bpp, little-endian
-
AV_PIX_FMT_GBRAP, ///< planar GBRA 4:4:4:4 32bpp
-
AV_PIX_FMT_GBRAP16BE, ///< planar GBRA 4:4:4:4 64bpp, big-endian
-
AV_PIX_FMT_GBRAP16LE, ///< planar GBRA 4:4:4:4 64bpp, little-endian
-
AV_PIX_FMT_YUVJ411P, ///< planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples) full scale (JPEG), deprecated in favor of PIX_FMT_YUV411P and setting color_range
-
-
AV_PIX_FMT_BAYER_BGGR8, ///< bayer, BGBG..(odd line), GRGR..(even line), 8-bit samples */
-
AV_PIX_FMT_BAYER_RGGB8, ///< bayer, RGRG..(odd line), GBGB..(even line), 8-bit samples */
-
AV_PIX_FMT_BAYER_GBRG8, ///< bayer, GBGB..(odd line), RGRG..(even line), 8-bit samples */
-
AV_PIX_FMT_BAYER_GRBG8, ///< bayer, GRGR..(odd line), BGBG..(even line), 8-bit samples */
-
AV_PIX_FMT_BAYER_BGGR16LE, ///< bayer, BGBG..(odd line), GRGR..(even line), 16-bit samples, little-endian */
-
AV_PIX_FMT_BAYER_BGGR16BE, ///< bayer, BGBG..(odd line), GRGR..(even line), 16-bit samples, big-endian */
-
AV_PIX_FMT_BAYER_RGGB16LE, ///< bayer, RGRG..(odd line), GBGB..(even line), 16-bit samples, little-endian */
-
AV_PIX_FMT_BAYER_RGGB16BE, ///< bayer, RGRG..(odd line), GBGB..(even line), 16-bit samples, big-endian */
-
AV_PIX_FMT_BAYER_GBRG16LE, ///< bayer, GBGB..(odd line), RGRG..(even line), 16-bit samples, little-endian */
-
AV_PIX_FMT_BAYER_GBRG16BE, ///< bayer, GBGB..(odd line), RGRG..(even line), 16-bit samples, big-endian */
-
AV_PIX_FMT_BAYER_GRBG16LE, ///< bayer, GRGR..(odd line), BGBG..(even line), 16-bit samples, little-endian */
-
AV_PIX_FMT_BAYER_GRBG16BE, ///< bayer, GRGR..(odd line), BGBG..(even line), 16-bit samples, big-endian */
-
#if !FF_API_XVMC
-
AV_PIX_FMT_XVMC,///< XVideo Motion Acceleration via common packet passing
-
#endif /* !FF_API_XVMC */
-
-
AV_PIX_FMT_NB, ///< number of pixel formats, DO NOT USE THIS if you want to link with shared libav* because the number of formats might differ between versions
-
-
#if FF_API_PIX_FMT
-
#include "old_pix_fmts.h"
-
#endif
- static int x264_frame_internal_csp( int external_csp )
-
{
-
switch( external_csp & X264_CSP_MASK )
-
{
-
case X264_CSP_NV12:
-
case X264_CSP_NV21:
-
case X264_CSP_I420:
-
case X264_CSP_YV12:
-
return X264_CSP_NV12;
-
case X264_CSP_NV16:
-
case X264_CSP_I422:
-
case X264_CSP_YV16:
-
case X264_CSP_V210:
-
return X264_CSP_NV16;
-
case X264_CSP_I444:
-
case X264_CSP_YV24:
-
case X264_CSP_BGR:
-
case X264_CSP_BGRA:
-
case X264_CSP_RGB:
-
return X264_CSP_I444;
-
default:
-
return X264_CSP_NONE;
-
}
-
}