FFMPEG结构体分析:AVFrame(解码后的数据)

https://blog.csdn.net/jxcr1984/article/details/52766524

本文转自: http://blog.csdn.net/leixiaohua1020/article/details/14214577

  1.  
    /*
  2.  
    *雷霄骅
  3.  
    *leixiaohua1020@126.com
  4.  
    *中国传媒大学/数字电视技术
  5.  
    */
  6.  
    /**
  7.  
    * Audio Video Frame.
  8.  
    * New fields can be added to the end of AVFRAME with minor version
  9.  
    * bumps. Similarly fields that are marked as to be only accessed by
  10.  
    * av_opt_ptr() can be reordered. This allows 2 forks to add fields
  11.  
    * without breaking compatibility with each other.
  12.  
    * Removal, reordering and changes in the remaining cases require
  13.  
    * a major version bump.
  14.  
    * sizeof(AVFrame) must not be used outside libavcodec.
  15.  
    */
  16.  
    typedef struct AVFrame {
  17.  
    #define AV_NUM_DATA_POINTERS 8
  18.  
    /**图像数据
  19.  
    * pointer to the picture/channel planes.
  20.  
    * This might be different from the first allocated byte
  21.  
    * - encoding: Set by user
  22.  
    * - decoding: set by AVCodecContext.get_buffer()
  23.  
    */
  24.  
    uint8_t *data[AV_NUM_DATA_POINTERS];
  25.  
     
  26.  
    /**
  27.  
    * Size, in bytes, of the data for each picture/channel plane.
  28.  
    *
  29.  
    * For audio, only linesize[0] may be set. For planar audio, each channel
  30.  
    * plane must be the same size.
  31.  
    *
  32.  
    * - encoding: Set by user
  33.  
    * - decoding: set by AVCodecContext.get_buffer()
  34.  
    */
  35.  
    int linesize[AV_NUM_DATA_POINTERS];
  36.  
     
  37.  
    /**
  38.  
    * pointers to the data planes/channels.
  39.  
    *
  40.  
    * For video, this should simply point to data[].
  41.  
    *
  42.  
    * For planar audio, each channel has a separate data pointer, and
  43.  
    * linesize[0] contains the size of each channel buffer.
  44.  
    * For packed audio, there is just one data pointer, and linesize[0]
  45.  
    * contains the total size of the buffer for all channels.
  46.  
    *
  47.  
    * Note: Both data and extended_data will always be set by get_buffer(),
  48.  
    * but for planar audio with more channels that can fit in data,
  49.  
    * extended_data must be used by the decoder in order to access all
  50.  
    * channels.
  51.  
    *
  52.  
    * encoding: unused
  53.  
    * decoding: set by AVCodecContext.get_buffer()
  54.  
    */
  55.  
    uint8_t **extended_data;
  56.  
     
  57.  
    /**宽高
  58.  
    * width and height of the video frame
  59.  
    * - encoding: unused
  60.  
    * - decoding: Read by user.
  61.  
    */
  62.  
    int width, height;
  63.  
     
  64.  
    /**
  65.  
    * number of audio samples (per channel) described by this frame
  66.  
    * - encoding: Set by user
  67.  
    * - decoding: Set by libavcodec
  68.  
    */
  69.  
    int nb_samples;
  70.  
     
  71.  
    /**
  72.  
    * format of the frame, -1 if unknown or unset
  73.  
    * Values correspond to enum AVPixelFormat for video frames,
  74.  
    * enum AVSampleFormat for audio)
  75.  
    * - encoding: unused
  76.  
    * - decoding: Read by user.
  77.  
    */
  78.  
    int format;
  79.  
     
  80.  
    /**是否是关键帧
  81.  
    * 1 -> keyframe, 0-> not
  82.  
    * - encoding: Set by libavcodec.
  83.  
    * - decoding: Set by libavcodec.
  84.  
    */
  85.  
    int key_frame;
  86.  
     
  87.  
    /**帧类型(I,B,P)
  88.  
    * Picture type of the frame, see ?_TYPE below.
  89.  
    * - encoding: Set by libavcodec. for coded_picture (and set by user for input).
  90.  
    * - decoding: Set by libavcodec.
  91.  
    */
  92.  
    enum AVPictureType pict_type;
  93.  
     
  94.  
    /**
  95.  
    * pointer to the first allocated byte of the picture. Can be used in get_buffer/release_buffer.
  96.  
    * This isn't used by libavcodec unless the default get/release_buffer() is used.
  97.  
    * - encoding:
  98.  
    * - decoding:
  99.  
    */
  100.  
    uint8_t *base[AV_NUM_DATA_POINTERS];
  101.  
     
  102.  
    /**
  103.  
    * sample aspect ratio for the video frame, 0/1 if unknown/unspecified
  104.  
    * - encoding: unused
  105.  
    * - decoding: Read by user.
  106.  
    */
  107.  
    AVRational sample_aspect_ratio;
  108.  
     
  109.  
    /**
  110.  
    * presentation timestamp in time_base units (time when frame should be shown to user)
  111.  
    * If AV_NOPTS_VALUE then frame_rate = 1/time_base will be assumed.
  112.  
    * - encoding: MUST be set by user.
  113.  
    * - decoding: Set by libavcodec.
  114.  
    */
  115.  
    int64_t pts;
  116.  
     
  117.  
    /**
  118.  
    * reordered pts from the last AVPacket that has been input into the decoder
  119.  
    * - encoding: unused
  120.  
    * - decoding: Read by user.
  121.  
    */
  122.  
    int64_t pkt_pts;
  123.  
     
  124.  
    /**
  125.  
    * dts from the last AVPacket that has been input into the decoder
  126.  
    * - encoding: unused
  127.  
    * - decoding: Read by user.
  128.  
    */
  129.  
    int64_t pkt_dts;
  130.  
     
  131.  
    /**
  132.  
    * picture number in bitstream order
  133.  
    * - encoding: set by
  134.  
    * - decoding: Set by libavcodec.
  135.  
    */
  136.  
    int coded_picture_number;
  137.  
    /**
  138.  
    * picture number in display order
  139.  
    * - encoding: set by
  140.  
    * - decoding: Set by libavcodec.
  141.  
    */
  142.  
    int display_picture_number;
  143.  
     
  144.  
    /**
  145.  
    * quality (between 1 (good) and FF_LAMBDA_MAX (bad))
  146.  
    * - encoding: Set by libavcodec. for coded_picture (and set by user for input).
  147.  
    * - decoding: Set by libavcodec.
  148.  
    */
  149.  
    int quality;
  150.  
     
  151.  
    /**
  152.  
    * is this picture used as reference
  153.  
    * The values for this are the same as the MpegEncContext.picture_structure
  154.  
    * variable, that is 1->top field, 2->bottom field, 3->frame/both fields.
  155.  
    * Set to 4 for delayed, non-reference frames.
  156.  
    * - encoding: unused
  157.  
    * - decoding: Set by libavcodec. (before get_buffer() call)).
  158.  
    */
  159.  
    int reference;
  160.  
     
  161.  
    /**QP表
  162.  
    * QP table
  163.  
    * - encoding: unused
  164.  
    * - decoding: Set by libavcodec.
  165.  
    */
  166.  
    int8_t *qscale_table;
  167.  
    /**
  168.  
    * QP store stride
  169.  
    * - encoding: unused
  170.  
    * - decoding: Set by libavcodec.
  171.  
    */
  172.  
    int qstride;
  173.  
     
  174.  
    /**
  175.  
    *
  176.  
    */
  177.  
    int qscale_type;
  178.  
     
  179.  
    /**跳过宏块表
  180.  
    * mbskip_table[mb]>=1 if MB didn't change
  181.  
    * stride= mb_width = (width+15)>>4
  182.  
    * - encoding: unused
  183.  
    * - decoding: Set by libavcodec.
  184.  
    */
  185.  
    uint8_t *mbskip_table;
  186.  
     
  187.  
    /**运动矢量表
  188.  
    * motion vector table
  189.  
    * @code
  190.  
    * example:
  191.  
    * int mv_sample_log2= 4 - motion_subsample_log2;
  192.  
    * int mb_width= (width+15)>>4;
  193.  
    * int mv_stride= (mb_width << mv_sample_log2) + 1;
  194.  
    * motion_val[direction][x + y*mv_stride][0->mv_x, 1->mv_y];
  195.  
    * @endcode
  196.  
    * - encoding: Set by user.
  197.  
    * - decoding: Set by libavcodec.
  198.  
    */
  199.  
    int16_t (*motion_val[2])[2];
  200.  
     
  201.  
    /**宏块类型表
  202.  
    * macroblock type table
  203.  
    * mb_type_base + mb_width + 2
  204.  
    * - encoding: Set by user.
  205.  
    * - decoding: Set by libavcodec.
  206.  
    */
  207.  
    uint32_t *mb_type;
  208.  
     
  209.  
    /**DCT系数
  210.  
    * DCT coefficients
  211.  
    * - encoding: unused
  212.  
    * - decoding: Set by libavcodec.
  213.  
    */
  214.  
    short *dct_coeff;
  215.  
     
  216.  
    /**参考帧列表
  217.  
    * motion reference frame index
  218.  
    * the order in which these are stored can depend on the codec.
  219.  
    * - encoding: Set by user.
  220.  
    * - decoding: Set by libavcodec.
  221.  
    */
  222.  
    int8_t *ref_index[2];
  223.  
     
  224.  
    /**
  225.  
    * for some private data of the user
  226.  
    * - encoding: unused
  227.  
    * - decoding: Set by user.
  228.  
    */
  229.  
    void *opaque;
  230.  
     
  231.  
    /**
  232.  
    * error
  233.  
    * - encoding: Set by libavcodec. if flags&CODEC_FLAG_PSNR.
  234.  
    * - decoding: unused
  235.  
    */
  236.  
    uint64_t error[AV_NUM_DATA_POINTERS];
  237.  
     
  238.  
    /**
  239.  
    * type of the buffer (to keep track of who has to deallocate data[*])
  240.  
    * - encoding: Set by the one who allocates it.
  241.  
    * - decoding: Set by the one who allocates it.
  242.  
    * Note: User allocated (direct rendering) & internal buffers cannot coexist currently.
  243.  
    */
  244.  
    int type;
  245.  
     
  246.  
    /**
  247.  
    * When decoding, this signals how much the picture must be delayed.
  248.  
    * extra_delay = repeat_pict / (2*fps)
  249.  
    * - encoding: unused
  250.  
    * - decoding: Set by libavcodec.
  251.  
    */
  252.  
    int repeat_pict;
  253.  
     
  254.  
    /**
  255.  
    * The content of the picture is interlaced.
  256.  
    * - encoding: Set by user.
  257.  
    * - decoding: Set by libavcodec. (default 0)
  258.  
    */
  259.  
    int interlaced_frame;
  260.  
     
  261.  
    /**
  262.  
    * If the content is interlaced, is top field displayed first.
  263.  
    * - encoding: Set by user.
  264.  
    * - decoding: Set by libavcodec.
  265.  
    */
  266.  
    int top_field_first;
  267.  
     
  268.  
    /**
  269.  
    * Tell user application that palette has changed from previous frame.
  270.  
    * - encoding: ??? (no palette-enabled encoder yet)
  271.  
    * - decoding: Set by libavcodec. (default 0).
  272.  
    */
  273.  
    int palette_has_changed;
  274.  
     
  275.  
    /**
  276.  
    * codec suggestion on buffer type if != 0
  277.  
    * - encoding: unused
  278.  
    * - decoding: Set by libavcodec. (before get_buffer() call)).
  279.  
    */
  280.  
    int buffer_hints;
  281.  
     
  282.  
    /**
  283.  
    * Pan scan.
  284.  
    * - encoding: Set by user.
  285.  
    * - decoding: Set by libavcodec.
  286.  
    */
  287.  
    AVPanScan *pan_scan;
  288.  
     
  289.  
    /**
  290.  
    * reordered opaque 64bit (generally an integer or a double precision float
  291.  
    * PTS but can be anything).
  292.  
    * The user sets AVCodecContext.reordered_opaque to represent the input at
  293.  
    * that time,
  294.  
    * the decoder reorders values as needed and sets AVFrame.reordered_opaque
  295.  
    * to exactly one of the values provided by the user through AVCodecContext.reordered_opaque
  296.  
    * @deprecated in favor of pkt_pts
  297.  
    * - encoding: unused
  298.  
    * - decoding: Read by user.
  299.  
    */
  300.  
    int64_t reordered_opaque;
  301.  
     
  302.  
    /**
  303.  
    * hardware accelerator private data (FFmpeg-allocated)
  304.  
    * - encoding: unused
  305.  
    * - decoding: Set by libavcodec
  306.  
    */
  307.  
    void *hwaccel_picture_private;
  308.  
     
  309.  
    /**
  310.  
    * the AVCodecContext which ff_thread_get_buffer() was last called on
  311.  
    * - encoding: Set by libavcodec.
  312.  
    * - decoding: Set by libavcodec.
  313.  
    */
  314.  
    struct AVCodecContext *owner;
  315.  
     
  316.  
    /**
  317.  
    * used by multithreading to store frame-specific info
  318.  
    * - encoding: Set by libavcodec.
  319.  
    * - decoding: Set by libavcodec.
  320.  
    */
  321.  
    void *thread_opaque;
  322.  
     
  323.  
    /**
  324.  
    * log2 of the size of the block which a single vector in motion_val represents:
  325.  
    * (4->16x16, 3->8x8, 2-> 4x4, 1-> 2x2)
  326.  
    * - encoding: unused
  327.  
    * - decoding: Set by libavcodec.
  328.  
    */
  329.  
    uint8_t motion_subsample_log2;
  330.  
     
  331.  
    /**(音频)采样率
  332.  
    * Sample rate of the audio data.
  333.  
    *
  334.  
    * - encoding: unused
  335.  
    * - decoding: read by user
  336.  
    */
  337.  
    int sample_rate;
  338.  
     
  339.  
    /**
  340.  
    * Channel layout of the audio data.
  341.  
    *
  342.  
    * - encoding: unused
  343.  
    * - decoding: read by user.
  344.  
    */
  345.  
    uint64_t channel_layout;
  346.  
     
  347.  
    /**
  348.  
    * frame timestamp estimated using various heuristics, in stream time base
  349.  
    * Code outside libavcodec should access this field using:
  350.  
    * av_frame_get_best_effort_timestamp(frame)
  351.  
    * - encoding: unused
  352.  
    * - decoding: set by libavcodec, read by user.
  353.  
    */
  354.  
    int64_t best_effort_timestamp;
  355.  
     
  356.  
    /**
  357.  
    * reordered pos from the last AVPacket that has been input into the decoder
  358.  
    * Code outside libavcodec should access this field using:
  359.  
    * av_frame_get_pkt_pos(frame)
  360.  
    * - encoding: unused
  361.  
    * - decoding: Read by user.
  362.  
    */
  363.  
    int64_t pkt_pos;
  364.  
     
  365.  
    /**
  366.  
    * duration of the corresponding packet, expressed in
  367.  
    * AVStream->time_base units, 0 if unknown.
  368.  
    * Code outside libavcodec should access this field using:
  369.  
    * av_frame_get_pkt_duration(frame)
  370.  
    * - encoding: unused
  371.  
    * - decoding: Read by user.
  372.  
    */
  373.  
    int64_t pkt_duration;
  374.  
     
  375.  
    /**
  376.  
    * metadata.
  377.  
    * Code outside libavcodec should access this field using:
  378.  
    * av_frame_get_metadata(frame)
  379.  
    * - encoding: Set by user.
  380.  
    * - decoding: Set by libavcodec.
  381.  
    */
  382.  
    AVDictionary *metadata;
  383.  
     
  384.  
    /**
  385.  
    * decode error flags of the frame, set to a combination of
  386.  
    * FF_DECODE_ERROR_xxx flags if the decoder produced a frame, but there
  387.  
    * were errors during the decoding.
  388.  
    * Code outside libavcodec should access this field using:
  389.  
    * av_frame_get_decode_error_flags(frame)
  390.  
    * - encoding: unused
  391.  
    * - decoding: set by libavcodec, read by user.
  392.  
    */
  393.  
    int decode_error_flags;
  394.  
    #define FF_DECODE_ERROR_INVALID_BITSTREAM 1
  395.  
    #define FF_DECODE_ERROR_MISSING_REFERENCE 2
  396.  
     
  397.  
    /**
  398.  
    * number of audio channels, only used for audio.
  399.  
    * Code outside libavcodec should access this field using:
  400.  
    * av_frame_get_channels(frame)
  401.  
    * - encoding: unused
  402.  
    * - decoding: Read by user.
  403.  
    */
  404.  
    int64_t channels;
  405.  
    } AVFrame;

AVFrame结构体为解码后的数据,一般用于存储原始数据(即非压缩数据,例如对视频来说是YUV,RGB,对于音频来说就是PCM),此外还包括一些相关的数据。

如:解码的时候存储了宏块类型表,QP表,运行矢量表等数据。编码的时候也存储了相关的数据。因此在使用FFMPEG进行码流分析的时候,AVFrame是一个很重要的结构体。

uint8_t *data[AV_NUM_DATA_POINTERS]: 解码后原始数据

int linesize[AV_NUM_DATA_PONITERS]: data中"一行"数据的大小。注:未必就是图像的宽,一般大于图像的宽

int width, height: 视频帧宽和高(1920*1080)

int nb_samples: 音频的一个AVFrame中可能包含多个音频帧,在此标记包含了几个

int format: 解码后原始数据型(YUV420, YUV422, RGB24...)//YUV420P YV12

// if (frame->format == AV_PIX_FMT_YUV420P || frame->format == AV_PIX_FMT_YUVJ420P)

int key_frame: 是否为关键帧

enum AVPictureType pict_type: 帧类型(I,B,P...)

AVRational sample_aspect_ratio: 宽高比(16:9, 4:3...)

int64_t pts: 显示时间戳

int coded_picture_number: 编码帧序号

int display_picture_number: 显示帧序号

int8_t *qscale_table: QP表

uint8_t *mbskip_table: 跳过宏块表

int16_t (*motion_val[2])[2]: 运行矢量表

uint32_t *mb_type: 宏块类型表

short *dct_coeff: DCT系数

int8_t *ref_index[2]: 运动估计参考帧列表

int interlaced_frame: 是否是隔行扫描

uint8_t motion_subsample_log2: 一个宏块中的运行矢量采样个数,取log的

https://blog.csdn.net/leixiaohua1020/article/details/42134965

  1. enum AVPixelFormat {
  2.  
    AV_PIX_FMT_NONE = -1,
  3.  
    AV_PIX_FMT_YUV420P, ///< planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
  4.  
    AV_PIX_FMT_YUYV422, ///< packed YUV 4:2:2, 16bpp, Y0 Cb Y1 Cr
  5.  
    AV_PIX_FMT_RGB24, ///< packed RGB 8:8:8, 24bpp, RGBRGB...
  6.  
    AV_PIX_FMT_BGR24, ///< packed RGB 8:8:8, 24bpp, BGRBGR...
  7.  
    AV_PIX_FMT_YUV422P, ///< planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
  8.  
    AV_PIX_FMT_YUV444P, ///< planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
  9.  
    AV_PIX_FMT_YUV410P, ///< planar YUV 4:1:0, 9bpp, (1 Cr & Cb sample per 4x4 Y samples)
  10.  
    AV_PIX_FMT_YUV411P, ///< planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples)
  11.  
    AV_PIX_FMT_GRAY8, ///< Y , 8bpp
  12.  
    AV_PIX_FMT_MONOWHITE, ///< Y , 1bpp, 0 is white, 1 is black, in each byte pixels are ordered from the msb to the lsb
  13.  
    AV_PIX_FMT_MONOBLACK, ///< Y , 1bpp, 0 is black, 1 is white, in each byte pixels are ordered from the msb to the lsb
  14.  
    AV_PIX_FMT_PAL8, ///< 8 bit with PIX_FMT_RGB32 palette
  15.  
    AV_PIX_FMT_YUVJ420P, ///< planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of PIX_FMT_YUV420P and setting color_range
  16.  
    AV_PIX_FMT_YUVJ422P, ///< planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of PIX_FMT_YUV422P and setting color_range
  17.  
    AV_PIX_FMT_YUVJ444P, ///< planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of PIX_FMT_YUV444P and setting color_range
  18.  
    #if FF_API_XVMC
  19.  
    AV_PIX_FMT_XVMC_MPEG2_MC,///< XVideo Motion Acceleration via common packet passing
  20.  
    AV_PIX_FMT_XVMC_MPEG2_IDCT,
  21.  
    #define AV_PIX_FMT_XVMC AV_PIX_FMT_XVMC_MPEG2_IDCT
  22.  
    #endif /* FF_API_XVMC */
  23.  
    AV_PIX_FMT_UYVY422, ///< packed YUV 4:2:2, 16bpp, Cb Y0 Cr Y1
  24.  
    AV_PIX_FMT_UYYVYY411, ///< packed YUV 4:1:1, 12bpp, Cb Y0 Y1 Cr Y2 Y3
  25.  
    AV_PIX_FMT_BGR8, ///< packed RGB 3:3:2, 8bpp, (msb)2B 3G 3R(lsb)
  26.  
    AV_PIX_FMT_BGR4, ///< packed RGB 1:2:1 bitstream, 4bpp, (msb)1B 2G 1R(lsb), a byte contains two pixels, the first pixel in the byte is the one composed by the 4 msb bits
  27.  
    AV_PIX_FMT_BGR4_BYTE, ///< packed RGB 1:2:1, 8bpp, (msb)1B 2G 1R(lsb)
  28.  
    AV_PIX_FMT_RGB8, ///< packed RGB 3:3:2, 8bpp, (msb)2R 3G 3B(lsb)
  29.  
    AV_PIX_FMT_RGB4, ///< packed RGB 1:2:1 bitstream, 4bpp, (msb)1R 2G 1B(lsb), a byte contains two pixels, the first pixel in the byte is the one composed by the 4 msb bits
  30.  
    AV_PIX_FMT_RGB4_BYTE, ///< packed RGB 1:2:1, 8bpp, (msb)1R 2G 1B(lsb)
  31.  
    AV_PIX_FMT_NV12, ///< planar YUV 4:2:0, 12bpp, 1 plane for Y and 1 plane for the UV components, which are interleaved (first byte U and the following byte V)
  32.  
    AV_PIX_FMT_NV21, ///< as above, but U and V bytes are swapped
  33.  
     
  34.  
    AV_PIX_FMT_ARGB, ///< packed ARGB 8:8:8:8, 32bpp, ARGBARGB...
  35.  
    AV_PIX_FMT_RGBA, ///< packed RGBA 8:8:8:8, 32bpp, RGBARGBA...
  36.  
    AV_PIX_FMT_ABGR, ///< packed ABGR 8:8:8:8, 32bpp, ABGRABGR...
  37.  
    AV_PIX_FMT_BGRA, ///< packed BGRA 8:8:8:8, 32bpp, BGRABGRA...
  38.  
     
  39.  
    AV_PIX_FMT_GRAY16BE, ///< Y , 16bpp, big-endian
  40.  
    AV_PIX_FMT_GRAY16LE, ///< Y , 16bpp, little-endian
  41.  
    AV_PIX_FMT_YUV440P, ///< planar YUV 4:4:0 (1 Cr & Cb sample per 1x2 Y samples)
  42.  
    AV_PIX_FMT_YUVJ440P, ///< planar YUV 4:4:0 full scale (JPEG), deprecated in favor of PIX_FMT_YUV440P and setting color_range
  43.  
    AV_PIX_FMT_YUVA420P, ///< planar YUV 4:2:0, 20bpp, (1 Cr & Cb sample per 2x2 Y & A samples)
  44.  
    #if FF_API_VDPAU
  45.  
    AV_PIX_FMT_VDPAU_H264,///< H.264 HW decoding with VDPAU, data[0] contains a vdpau_render_state struct which contains the bitstream of the slices as well as various fields extracted from headers
  46.  
    AV_PIX_FMT_VDPAU_MPEG1,///< MPEG-1 HW decoding with VDPAU, data[0] contains a vdpau_render_state struct which contains the bitstream of the slices as well as various fields extracted from headers
  47.  
    AV_PIX_FMT_VDPAU_MPEG2,///< MPEG-2 HW decoding with VDPAU, data[0] contains a vdpau_render_state struct which contains the bitstream of the slices as well as various fields extracted from headers
  48.  
    AV_PIX_FMT_VDPAU_WMV3,///< WMV3 HW decoding with VDPAU, data[0] contains a vdpau_render_state struct which contains the bitstream of the slices as well as various fields extracted from headers
  49.  
    AV_PIX_FMT_VDPAU_VC1, ///< VC-1 HW decoding with VDPAU, data[0] contains a vdpau_render_state struct which contains the bitstream of the slices as well as various fields extracted from headers
  50.  
    #endif
  51.  
    AV_PIX_FMT_RGB48BE, ///< packed RGB 16:16:16, 48bpp, 16R, 16G, 16B, the 2-byte value for each R/G/B component is stored as big-endian
  52.  
    AV_PIX_FMT_RGB48LE, ///< packed RGB 16:16:16, 48bpp, 16R, 16G, 16B, the 2-byte value for each R/G/B component is stored as little-endian
  53.  
     
  54.  
    AV_PIX_FMT_RGB565BE, ///< packed RGB 5:6:5, 16bpp, (msb) 5R 6G 5B(lsb), big-endian
  55.  
    AV_PIX_FMT_RGB565LE, ///< packed RGB 5:6:5, 16bpp, (msb) 5R 6G 5B(lsb), little-endian
  56.  
    AV_PIX_FMT_RGB555BE, ///< packed RGB 5:5:5, 16bpp, (msb)1A 5R 5G 5B(lsb), big-endian, most significant bit to 0
  57.  
    AV_PIX_FMT_RGB555LE, ///< packed RGB 5:5:5, 16bpp, (msb)1A 5R 5G 5B(lsb), little-endian, most significant bit to 0
  58.  
     
  59.  
    AV_PIX_FMT_BGR565BE, ///< packed BGR 5:6:5, 16bpp, (msb) 5B 6G 5R(lsb), big-endian
  60.  
    AV_PIX_FMT_BGR565LE, ///< packed BGR 5:6:5, 16bpp, (msb) 5B 6G 5R(lsb), little-endian
  61.  
    AV_PIX_FMT_BGR555BE, ///< packed BGR 5:5:5, 16bpp, (msb)1A 5B 5G 5R(lsb), big-endian, most significant bit to 1
  62.  
    AV_PIX_FMT_BGR555LE, ///< packed BGR 5:5:5, 16bpp, (msb)1A 5B 5G 5R(lsb), little-endian, most significant bit to 1
  63.  
     
  64.  
    AV_PIX_FMT_VAAPI_MOCO, ///< HW acceleration through VA API at motion compensation entry-point, Picture.data[3] contains a vaapi_render_state struct which contains macroblocks as well as various fields extracted from headers
  65.  
    AV_PIX_FMT_VAAPI_IDCT, ///< HW acceleration through VA API at IDCT entry-point, Picture.data[3] contains a vaapi_render_state struct which contains fields extracted from headers
  66.  
    AV_PIX_FMT_VAAPI_VLD, ///< HW decoding through VA API, Picture.data[3] contains a vaapi_render_state struct which contains the bitstream of the slices as well as various fields extracted from headers
  67.  
     
  68.  
    AV_PIX_FMT_YUV420P16LE, ///< planar YUV 4:2:0, 24bpp, (1 Cr & Cb sample per 2x2 Y samples), little-endian
  69.  
    AV_PIX_FMT_YUV420P16BE, ///< planar YUV 4:2:0, 24bpp, (1 Cr & Cb sample per 2x2 Y samples), big-endian
  70.  
    AV_PIX_FMT_YUV422P16LE, ///< planar YUV 4:2:2, 32bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian
  71.  
    AV_PIX_FMT_YUV422P16BE, ///< planar YUV 4:2:2, 32bpp, (1 Cr & Cb sample per 2x1 Y samples), big-endian
  72.  
    AV_PIX_FMT_YUV444P16LE, ///< planar YUV 4:4:4, 48bpp, (1 Cr & Cb sample per 1x1 Y samples), little-endian
  73.  
    AV_PIX_FMT_YUV444P16BE, ///< planar YUV 4:4:4, 48bpp, (1 Cr & Cb sample per 1x1 Y samples), big-endian
  74.  
    #if FF_API_VDPAU
  75.  
    AV_PIX_FMT_VDPAU_MPEG4, ///< MPEG4 HW decoding with VDPAU, data[0] contains a vdpau_render_state struct which contains the bitstream of the slices as well as various fields extracted from headers
  76.  
    #endif
  77.  
    AV_PIX_FMT_DXVA2_VLD, ///< HW decoding through DXVA2, Picture.data[3] contains a LPDIRECT3DSURFACE9 pointer
  78.  
     
  79.  
    AV_PIX_FMT_RGB444LE, ///< packed RGB 4:4:4, 16bpp, (msb)4A 4R 4G 4B(lsb), little-endian, most significant bits to 0
  80.  
    AV_PIX_FMT_RGB444BE, ///< packed RGB 4:4:4, 16bpp, (msb)4A 4R 4G 4B(lsb), big-endian, most significant bits to 0
  81.  
    AV_PIX_FMT_BGR444LE, ///< packed BGR 4:4:4, 16bpp, (msb)4A 4B 4G 4R(lsb), little-endian, most significant bits to 1
  82.  
    AV_PIX_FMT_BGR444BE, ///< packed BGR 4:4:4, 16bpp, (msb)4A 4B 4G 4R(lsb), big-endian, most significant bits to 1
  83.  
    AV_PIX_FMT_GRAY8A, ///< 8bit gray, 8bit alpha
  84.  
    AV_PIX_FMT_BGR48BE, ///< packed RGB 16:16:16, 48bpp, 16B, 16G, 16R, the 2-byte value for each R/G/B component is stored as big-endian
  85.  
    AV_PIX_FMT_BGR48LE, ///< packed RGB 16:16:16, 48bpp, 16B, 16G, 16R, the 2-byte value for each R/G/B component is stored as little-endian
  86.  
     
  87.  
    /**
  88.  
    * The following 12 formats have the disadvantage of needing 1 format for each bit depth.
  89.  
    * Notice that each 9/10 bits sample is stored in 16 bits with extra padding.
  90.  
    * If you want to support multiple bit depths, then using AV_PIX_FMT_YUV420P16* with the bpp stored separately is better.
  91.  
    */
  92.  
    AV_PIX_FMT_YUV420P9BE, ///< planar YUV 4:2:0, 13.5bpp, (1 Cr & Cb sample per 2x2 Y samples), big-endian
  93.  
    AV_PIX_FMT_YUV420P9LE, ///< planar YUV 4:2:0, 13.5bpp, (1 Cr & Cb sample per 2x2 Y samples), little-endian
  94.  
    AV_PIX_FMT_YUV420P10BE,///< planar YUV 4:2:0, 15bpp, (1 Cr & Cb sample per 2x2 Y samples), big-endian
  95.  
    AV_PIX_FMT_YUV420P10LE,///< planar YUV 4:2:0, 15bpp, (1 Cr & Cb sample per 2x2 Y samples), little-endian
  96.  
    AV_PIX_FMT_YUV422P10BE,///< planar YUV 4:2:2, 20bpp, (1 Cr & Cb sample per 2x1 Y samples), big-endian
  97.  
    AV_PIX_FMT_YUV422P10LE,///< planar YUV 4:2:2, 20bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian
  98.  
    AV_PIX_FMT_YUV444P9BE, ///< planar YUV 4:4:4, 27bpp, (1 Cr & Cb sample per 1x1 Y samples), big-endian
  99.  
    AV_PIX_FMT_YUV444P9LE, ///< planar YUV 4:4:4, 27bpp, (1 Cr & Cb sample per 1x1 Y samples), little-endian
  100.  
    AV_PIX_FMT_YUV444P10BE,///< planar YUV 4:4:4, 30bpp, (1 Cr & Cb sample per 1x1 Y samples), big-endian
  101.  
    AV_PIX_FMT_YUV444P10LE,///< planar YUV 4:4:4, 30bpp, (1 Cr & Cb sample per 1x1 Y samples), little-endian
  102.  
    AV_PIX_FMT_YUV422P9BE, ///< planar YUV 4:2:2, 18bpp, (1 Cr & Cb sample per 2x1 Y samples), big-endian
  103.  
    AV_PIX_FMT_YUV422P9LE, ///< planar YUV 4:2:2, 18bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian
  104.  
    AV_PIX_FMT_VDA_VLD, ///< hardware decoding through VDA
  105.  
     
  106.  
    #ifdef AV_PIX_FMT_ABI_GIT_MASTER
  107.  
    AV_PIX_FMT_RGBA64BE, ///< packed RGBA 16:16:16:16, 64bpp, 16R, 16G, 16B, 16A, the 2-byte value for each R/G/B/A component is stored as big-endian
  108.  
    AV_PIX_FMT_RGBA64LE, ///< packed RGBA 16:16:16:16, 64bpp, 16R, 16G, 16B, 16A, the 2-byte value for each R/G/B/A component is stored as little-endian
  109.  
    AV_PIX_FMT_BGRA64BE, ///< packed RGBA 16:16:16:16, 64bpp, 16B, 16G, 16R, 16A, the 2-byte value for each R/G/B/A component is stored as big-endian
  110.  
    AV_PIX_FMT_BGRA64LE, ///< packed RGBA 16:16:16:16, 64bpp, 16B, 16G, 16R, 16A, the 2-byte value for each R/G/B/A component is stored as little-endian
  111.  
    #endif
  112.  
    AV_PIX_FMT_GBRP, ///< planar GBR 4:4:4 24bpp
  113.  
    AV_PIX_FMT_GBRP9BE, ///< planar GBR 4:4:4 27bpp, big-endian
  114.  
    AV_PIX_FMT_GBRP9LE, ///< planar GBR 4:4:4 27bpp, little-endian
  115.  
    AV_PIX_FMT_GBRP10BE, ///< planar GBR 4:4:4 30bpp, big-endian
  116.  
    AV_PIX_FMT_GBRP10LE, ///< planar GBR 4:4:4 30bpp, little-endian
  117.  
    AV_PIX_FMT_GBRP16BE, ///< planar GBR 4:4:4 48bpp, big-endian
  118.  
    AV_PIX_FMT_GBRP16LE, ///< planar GBR 4:4:4 48bpp, little-endian
  119.  
     
  120.  
    /**
  121.  
    * duplicated pixel formats for compatibility with libav.
  122.  
    * FFmpeg supports these formats since May 8 2012 and Jan 28 2012 (commits f9ca1ac7 and 143a5c55)
  123.  
    * Libav added them Oct 12 2012 with incompatible values (commit 6d5600e85)
  124.  
    */
  125.  
    AV_PIX_FMT_YUVA422P_LIBAV, ///< planar YUV 4:2:2 24bpp, (1 Cr & Cb sample per 2x1 Y & A samples)
  126.  
    AV_PIX_FMT_YUVA444P_LIBAV, ///< planar YUV 4:4:4 32bpp, (1 Cr & Cb sample per 1x1 Y & A samples)
  127.  
     
  128.  
    AV_PIX_FMT_YUVA420P9BE, ///< planar YUV 4:2:0 22.5bpp, (1 Cr & Cb sample per 2x2 Y & A samples), big-endian
  129.  
    AV_PIX_FMT_YUVA420P9LE, ///< planar YUV 4:2:0 22.5bpp, (1 Cr & Cb sample per 2x2 Y & A samples), little-endian
  130.  
    AV_PIX_FMT_YUVA422P9BE, ///< planar YUV 4:2:2 27bpp, (1 Cr & Cb sample per 2x1 Y & A samples), big-endian
  131.  
    AV_PIX_FMT_YUVA422P9LE, ///< planar YUV 4:2:2 27bpp, (1 Cr & Cb sample per 2x1 Y & A samples), little-endian
  132.  
    AV_PIX_FMT_YUVA444P9BE, ///< planar YUV 4:4:4 36bpp, (1 Cr & Cb sample per 1x1 Y & A samples), big-endian
  133.  
    AV_PIX_FMT_YUVA444P9LE, ///< planar YUV 4:4:4 36bpp, (1 Cr & Cb sample per 1x1 Y & A samples), little-endian
  134.  
    AV_PIX_FMT_YUVA420P10BE, ///< planar YUV 4:2:0 25bpp, (1 Cr & Cb sample per 2x2 Y & A samples, big-endian)
  135.  
    AV_PIX_FMT_YUVA420P10LE, ///< planar YUV 4:2:0 25bpp, (1 Cr & Cb sample per 2x2 Y & A samples, little-endian)
  136.  
    AV_PIX_FMT_YUVA422P10BE, ///< planar YUV 4:2:2 30bpp, (1 Cr & Cb sample per 2x1 Y & A samples, big-endian)
  137.  
    AV_PIX_FMT_YUVA422P10LE, ///< planar YUV 4:2:2 30bpp, (1 Cr & Cb sample per 2x1 Y & A samples, little-endian)
  138.  
    AV_PIX_FMT_YUVA444P10BE, ///< planar YUV 4:4:4 40bpp, (1 Cr & Cb sample per 1x1 Y & A samples, big-endian)
  139.  
    AV_PIX_FMT_YUVA444P10LE, ///< planar YUV 4:4:4 40bpp, (1 Cr & Cb sample per 1x1 Y & A samples, little-endian)
  140.  
    AV_PIX_FMT_YUVA420P16BE, ///< planar YUV 4:2:0 40bpp, (1 Cr & Cb sample per 2x2 Y & A samples, big-endian)
  141.  
    AV_PIX_FMT_YUVA420P16LE, ///< planar YUV 4:2:0 40bpp, (1 Cr & Cb sample per 2x2 Y & A samples, little-endian)
  142.  
    AV_PIX_FMT_YUVA422P16BE, ///< planar YUV 4:2:2 48bpp, (1 Cr & Cb sample per 2x1 Y & A samples, big-endian)
  143.  
    AV_PIX_FMT_YUVA422P16LE, ///< planar YUV 4:2:2 48bpp, (1 Cr & Cb sample per 2x1 Y & A samples, little-endian)
  144.  
    AV_PIX_FMT_YUVA444P16BE, ///< planar YUV 4:4:4 64bpp, (1 Cr & Cb sample per 1x1 Y & A samples, big-endian)
  145.  
    AV_PIX_FMT_YUVA444P16LE, ///< planar YUV 4:4:4 64bpp, (1 Cr & Cb sample per 1x1 Y & A samples, little-endian)
  146.  
     
  147.  
    AV_PIX_FMT_VDPAU, ///< HW acceleration through VDPAU, Picture.data[3] contains a VdpVideoSurface
  148.  
     
  149.  
    AV_PIX_FMT_XYZ12LE, ///< packed XYZ 4:4:4, 36 bpp, (msb) 12X, 12Y, 12Z (lsb), the 2-byte value for each X/Y/Z is stored as little-endian, the 4 lower bits are set to 0
  150.  
    AV_PIX_FMT_XYZ12BE, ///< packed XYZ 4:4:4, 36 bpp, (msb) 12X, 12Y, 12Z (lsb), the 2-byte value for each X/Y/Z is stored as big-endian, the 4 lower bits are set to 0
  151.  
    AV_PIX_FMT_NV16, ///< interleaved chroma YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
  152.  
    AV_PIX_FMT_NV20LE, ///< interleaved chroma YUV 4:2:2, 20bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian
  153.  
    AV_PIX_FMT_NV20BE, ///< interleaved chroma YUV 4:2:2, 20bpp, (1 Cr & Cb sample per 2x1 Y samples), big-endian
  154.  
     
  155.  
    /**
  156.  
    * duplicated pixel formats for compatibility with libav.
  157.  
    * FFmpeg supports these formats since Sat Sep 24 06:01:45 2011 +0200 (commits 9569a3c9f41387a8c7d1ce97d8693520477a66c3)
  158.  
    * also see Fri Nov 25 01:38:21 2011 +0100 92afb431621c79155fcb7171d26f137eb1bee028
  159.  
    * Libav added them Sun Mar 16 23:05:47 2014 +0100 with incompatible values (commit 1481d24c3a0abf81e1d7a514547bd5305232be30)
  160.  
    */
  161.  
    AV_PIX_FMT_RGBA64BE_LIBAV, ///< packed RGBA 16:16:16:16, 64bpp, 16R, 16G, 16B, 16A, the 2-byte value for each R/G/B/A component is stored as big-endian
  162.  
    AV_PIX_FMT_RGBA64LE_LIBAV, ///< packed RGBA 16:16:16:16, 64bpp, 16R, 16G, 16B, 16A, the 2-byte value for each R/G/B/A component is stored as little-endian
  163.  
    AV_PIX_FMT_BGRA64BE_LIBAV, ///< packed RGBA 16:16:16:16, 64bpp, 16B, 16G, 16R, 16A, the 2-byte value for each R/G/B/A component is stored as big-endian
  164.  
    AV_PIX_FMT_BGRA64LE_LIBAV, ///< packed RGBA 16:16:16:16, 64bpp, 16B, 16G, 16R, 16A, the 2-byte value for each R/G/B/A component is stored as little-endian
  165.  
     
  166.  
    AV_PIX_FMT_YVYU422, ///< packed YUV 4:2:2, 16bpp, Y0 Cr Y1 Cb
  167.  
     
  168.  
    #ifndef AV_PIX_FMT_ABI_GIT_MASTER
  169.  
    AV_PIX_FMT_RGBA64BE=0x123, ///< packed RGBA 16:16:16:16, 64bpp, 16R, 16G, 16B, 16A, the 2-byte value for each R/G/B/A component is stored as big-endian
  170.  
    AV_PIX_FMT_RGBA64LE, ///< packed RGBA 16:16:16:16, 64bpp, 16R, 16G, 16B, 16A, the 2-byte value for each R/G/B/A component is stored as little-endian
  171.  
    AV_PIX_FMT_BGRA64BE, ///< packed RGBA 16:16:16:16, 64bpp, 16B, 16G, 16R, 16A, the 2-byte value for each R/G/B/A component is stored as big-endian
  172.  
    AV_PIX_FMT_BGRA64LE, ///< packed RGBA 16:16:16:16, 64bpp, 16B, 16G, 16R, 16A, the 2-byte value for each R/G/B/A component is stored as little-endian
  173.  
    #endif
  174.  
    AV_PIX_FMT_0RGB=0x123+4, ///< packed RGB 8:8:8, 32bpp, 0RGB0RGB...
  175.  
    AV_PIX_FMT_RGB0, ///< packed RGB 8:8:8, 32bpp, RGB0RGB0...
  176.  
    AV_PIX_FMT_0BGR, ///< packed BGR 8:8:8, 32bpp, 0BGR0BGR...
  177.  
    AV_PIX_FMT_BGR0, ///< packed BGR 8:8:8, 32bpp, BGR0BGR0...
  178.  
    AV_PIX_FMT_YUVA444P, ///< planar YUV 4:4:4 32bpp, (1 Cr & Cb sample per 1x1 Y & A samples)
  179.  
    AV_PIX_FMT_YUVA422P, ///< planar YUV 4:2:2 24bpp, (1 Cr & Cb sample per 2x1 Y & A samples)
  180.  
     
  181.  
    AV_PIX_FMT_YUV420P12BE, ///< planar YUV 4:2:0,18bpp, (1 Cr & Cb sample per 2x2 Y samples), big-endian
  182.  
    AV_PIX_FMT_YUV420P12LE, ///< planar YUV 4:2:0,18bpp, (1 Cr & Cb sample per 2x2 Y samples), little-endian
  183.  
    AV_PIX_FMT_YUV420P14BE, ///< planar YUV 4:2:0,21bpp, (1 Cr & Cb sample per 2x2 Y samples), big-endian
  184.  
    AV_PIX_FMT_YUV420P14LE, ///< planar YUV 4:2:0,21bpp, (1 Cr & Cb sample per 2x2 Y samples), little-endian
  185.  
    AV_PIX_FMT_YUV422P12BE, ///< planar YUV 4:2:2,24bpp, (1 Cr & Cb sample per 2x1 Y samples), big-endian
  186.  
    AV_PIX_FMT_YUV422P12LE, ///< planar YUV 4:2:2,24bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian
  187.  
    AV_PIX_FMT_YUV422P14BE, ///< planar YUV 4:2:2,28bpp, (1 Cr & Cb sample per 2x1 Y samples), big-endian
  188.  
    AV_PIX_FMT_YUV422P14LE, ///< planar YUV 4:2:2,28bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian
  189.  
    AV_PIX_FMT_YUV444P12BE, ///< planar YUV 4:4:4,36bpp, (1 Cr & Cb sample per 1x1 Y samples), big-endian
  190.  
    AV_PIX_FMT_YUV444P12LE, ///< planar YUV 4:4:4,36bpp, (1 Cr & Cb sample per 1x1 Y samples), little-endian
  191.  
    AV_PIX_FMT_YUV444P14BE, ///< planar YUV 4:4:4,42bpp, (1 Cr & Cb sample per 1x1 Y samples), big-endian
  192.  
    AV_PIX_FMT_YUV444P14LE, ///< planar YUV 4:4:4,42bpp, (1 Cr & Cb sample per 1x1 Y samples), little-endian
  193.  
    AV_PIX_FMT_GBRP12BE, ///< planar GBR 4:4:4 36bpp, big-endian
  194.  
    AV_PIX_FMT_GBRP12LE, ///< planar GBR 4:4:4 36bpp, little-endian
  195.  
    AV_PIX_FMT_GBRP14BE, ///< planar GBR 4:4:4 42bpp, big-endian
  196.  
    AV_PIX_FMT_GBRP14LE, ///< planar GBR 4:4:4 42bpp, little-endian
  197.  
    AV_PIX_FMT_GBRAP, ///< planar GBRA 4:4:4:4 32bpp
  198.  
    AV_PIX_FMT_GBRAP16BE, ///< planar GBRA 4:4:4:4 64bpp, big-endian
  199.  
    AV_PIX_FMT_GBRAP16LE, ///< planar GBRA 4:4:4:4 64bpp, little-endian
  200.  
    AV_PIX_FMT_YUVJ411P, ///< planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples) full scale (JPEG), deprecated in favor of PIX_FMT_YUV411P and setting color_range
  201.  
     
  202.  
    AV_PIX_FMT_BAYER_BGGR8, ///< bayer, BGBG..(odd line), GRGR..(even line), 8-bit samples */
  203.  
    AV_PIX_FMT_BAYER_RGGB8, ///< bayer, RGRG..(odd line), GBGB..(even line), 8-bit samples */
  204.  
    AV_PIX_FMT_BAYER_GBRG8, ///< bayer, GBGB..(odd line), RGRG..(even line), 8-bit samples */
  205.  
    AV_PIX_FMT_BAYER_GRBG8, ///< bayer, GRGR..(odd line), BGBG..(even line), 8-bit samples */
  206.  
    AV_PIX_FMT_BAYER_BGGR16LE, ///< bayer, BGBG..(odd line), GRGR..(even line), 16-bit samples, little-endian */
  207.  
    AV_PIX_FMT_BAYER_BGGR16BE, ///< bayer, BGBG..(odd line), GRGR..(even line), 16-bit samples, big-endian */
  208.  
    AV_PIX_FMT_BAYER_RGGB16LE, ///< bayer, RGRG..(odd line), GBGB..(even line), 16-bit samples, little-endian */
  209.  
    AV_PIX_FMT_BAYER_RGGB16BE, ///< bayer, RGRG..(odd line), GBGB..(even line), 16-bit samples, big-endian */
  210.  
    AV_PIX_FMT_BAYER_GBRG16LE, ///< bayer, GBGB..(odd line), RGRG..(even line), 16-bit samples, little-endian */
  211.  
    AV_PIX_FMT_BAYER_GBRG16BE, ///< bayer, GBGB..(odd line), RGRG..(even line), 16-bit samples, big-endian */
  212.  
    AV_PIX_FMT_BAYER_GRBG16LE, ///< bayer, GRGR..(odd line), BGBG..(even line), 16-bit samples, little-endian */
  213.  
    AV_PIX_FMT_BAYER_GRBG16BE, ///< bayer, GRGR..(odd line), BGBG..(even line), 16-bit samples, big-endian */
  214.  
    #if !FF_API_XVMC
  215.  
    AV_PIX_FMT_XVMC,///< XVideo Motion Acceleration via common packet passing
  216.  
    #endif /* !FF_API_XVMC */
  217.  
     
  218.  
    AV_PIX_FMT_NB, ///< number of pixel formats, DO NOT USE THIS if you want to link with shared libav* because the number of formats might differ between versions
  219.  
     
  220.  
    #if FF_API_PIX_FMT
  221.  
    #include "old_pix_fmts.h"
  222.  
    #endif
  223.  
    1. static int x264_frame_internal_csp( int external_csp )
    2.  
      {
    3.  
      switch( external_csp & X264_CSP_MASK )
    4.  
      {
    5.  
      case X264_CSP_NV12:
    6.  
      case X264_CSP_NV21:
    7.  
      case X264_CSP_I420:
    8.  
      case X264_CSP_YV12:
    9.  
      return X264_CSP_NV12;
    10.  
      case X264_CSP_NV16:
    11.  
      case X264_CSP_I422:
    12.  
      case X264_CSP_YV16:
    13.  
      case X264_CSP_V210:
    14.  
      return X264_CSP_NV16;
    15.  
      case X264_CSP_I444:
    16.  
      case X264_CSP_YV24:
    17.  
      case X264_CSP_BGR:
    18.  
      case X264_CSP_BGRA:
    19.  
      case X264_CSP_RGB:
    20.  
      return X264_CSP_I444;
    21.  
      default:
    22.  
      return X264_CSP_NONE;
    23.  
      }
    24.  
      }
上一篇:【OpenGL】用OpenGL shader实现将YUV(YUV420,YV12)转RGB-(直接调用GPU实现,纯硬件方式,效率高)


下一篇:cv::Mat到YUV420的转换《转》