diff options
author | Anton Khirnov <anton@khirnov.net> | 2012-10-17 11:51:01 +0200 |
---|---|---|
committer | Anton Khirnov <anton@khirnov.net> | 2013-03-08 07:36:15 +0100 |
commit | 7ecc2d403ce5c7b6ea3b1f368dccefd105209c7e (patch) | |
tree | cf6a6275abb9737b551b7cbea0b61b7935a7116b /libavutil | |
parent | ad0c9f2d5d81e22207c6ccecc426bf7306acc327 (diff) | |
download | ffmpeg-7ecc2d403ce5c7b6ea3b1f368dccefd105209c7e.tar.gz |
Move AVFrame from lavc to lavu.
Add AVBuffer-based reference counting API to it.
Diffstat (limited to 'libavutil')
-rw-r--r-- | libavutil/Makefile | 2 | ||||
-rw-r--r-- | libavutil/frame.c | 401 | ||||
-rw-r--r-- | libavutil/frame.h | 429 |
3 files changed, 832 insertions, 0 deletions
diff --git a/libavutil/Makefile b/libavutil/Makefile index ef29d5c4b5..582fc31951 100644 --- a/libavutil/Makefile +++ b/libavutil/Makefile @@ -20,6 +20,7 @@ HEADERS = adler32.h \ eval.h \ fifo.h \ file.h \ + frame.h \ hmac.h \ imgutils.h \ intfloat.h \ @@ -70,6 +71,7 @@ OBJS = adler32.o \ fifo.o \ file.o \ float_dsp.o \ + frame.o \ hmac.o \ imgutils.o \ intfloat_readwrite.o \ diff --git a/libavutil/frame.c b/libavutil/frame.c new file mode 100644 index 0000000000..e84052eda0 --- /dev/null +++ b/libavutil/frame.c @@ -0,0 +1,401 @@ +/* + * + * This file is part of Libav. + * + * Libav is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * Libav is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with Libav; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#include "audioconvert.h" +#include "buffer.h" +#include "common.h" +#include "frame.h" +#include "imgutils.h" +#include "mem.h" +#include "samplefmt.h" + +static void get_frame_defaults(AVFrame *frame) +{ + if (frame->extended_data != frame->data) + av_freep(&frame->extended_data); + + memset(frame, 0, sizeof(*frame)); + + frame->pts = AV_NOPTS_VALUE; + frame->key_frame = 1; + frame->sample_aspect_ratio = (AVRational){ 0, 1 }; + frame->format = -1; /* unknown */ + frame->extended_data = frame->data; +} + +AVFrame *av_frame_alloc(void) +{ + AVFrame *frame = av_mallocz(sizeof(*frame)); + + if (!frame) + return NULL; + + get_frame_defaults(frame); + + return frame; +} + +void av_frame_free(AVFrame **frame) +{ + if (!frame || !*frame) + return; + + av_frame_unref(*frame); + av_freep(frame); +} + +static int get_video_buffer(AVFrame *frame, int align) +{ + const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(frame->format); + int ret, i; + + if (!desc) + return AVERROR(EINVAL); + + if ((ret = av_image_check_size(frame->width, frame->height, 0, NULL)) < 0) + return ret; + + if (!frame->linesize[0]) { + ret = av_image_fill_linesizes(frame->linesize, frame->format, + frame->width); + if (ret < 0) + return ret; + + for (i = 0; i < 4 && frame->linesize[i]; i++) + frame->linesize[i] = FFALIGN(frame->linesize[i], align); + } + + for (i = 0; i < 4 && frame->linesize[i]; i++) { + int h = frame->height; + if (i == 1 || i == 2) + h = -((-h) >> desc->log2_chroma_h); + + frame->buf[i] = av_buffer_alloc(frame->linesize[i] * h); + if (!frame->buf[i]) + goto fail; + + frame->data[i] = frame->buf[i]->data; + } + if (desc->flags & PIX_FMT_PAL || desc->flags & PIX_FMT_PSEUDOPAL) { + av_buffer_unref(&frame->buf[1]); + frame->buf[1] = av_buffer_alloc(1024); + if (!frame->buf[1]) + goto fail; + frame->data[1] = frame->buf[1]->data; + } + + frame->extended_data = frame->data; + + return 0; +fail: + av_frame_unref(frame); + return AVERROR(ENOMEM); +} + +static int get_audio_buffer(AVFrame *frame, int align) +{ + int channels = av_get_channel_layout_nb_channels(frame->channel_layout); + int planar = av_sample_fmt_is_planar(frame->format); + int planes = planar ? channels : 1; + int ret, i; + + if (!frame->linesize[0]) { + ret = av_samples_get_buffer_size(&frame->linesize[0], channels, + frame->nb_samples, frame->format, + align); + if (ret < 0) + return ret; + } + + if (planes > AV_NUM_DATA_POINTERS) { + frame->extended_data = av_mallocz(planes * + sizeof(*frame->extended_data)); + frame->extended_buf = av_mallocz((planes - AV_NUM_DATA_POINTERS) * + sizeof(*frame->extended_buf)); + if (!frame->extended_data || !frame->extended_buf) { + av_freep(&frame->extended_data); + av_freep(&frame->extended_buf); + return AVERROR(ENOMEM); + } + frame->nb_extended_buf = planes - AV_NUM_DATA_POINTERS; + } else + frame->extended_data = frame->data; + + for (i = 0; i < FFMIN(planes, AV_NUM_DATA_POINTERS); i++) { + frame->buf[i] = av_buffer_alloc(frame->linesize[0]); + if (!frame->buf[i]) { + av_frame_unref(frame); + return AVERROR(ENOMEM); + } + frame->extended_data[i] = frame->data[i] = frame->buf[i]->data; + } + for (i = 0; i < planes - AV_NUM_DATA_POINTERS; i++) { + frame->extended_buf[i] = av_buffer_alloc(frame->linesize[0]); + if (!frame->extended_buf[i]) { + av_frame_unref(frame); + return AVERROR(ENOMEM); + } + frame->extended_data[i + AV_NUM_DATA_POINTERS] = frame->extended_buf[i]->data; + } + return 0; + +} + +int av_frame_get_buffer(AVFrame *frame, int align) +{ + if (frame->format < 0) + return AVERROR(EINVAL); + + if (frame->width > 0 && frame->height > 0) + return get_video_buffer(frame, align); + else if (frame->nb_samples > 0 && frame->channel_layout) + return get_audio_buffer(frame, align); + + return AVERROR(EINVAL); +} + +int av_frame_ref(AVFrame *dst, AVFrame *src) +{ + int i, ret = 0; + + dst->format = src->format; + dst->width = src->width; + dst->height = src->height; + dst->channel_layout = src->channel_layout; + dst->nb_samples = src->nb_samples; + + ret = av_frame_copy_props(dst, src); + if (ret < 0) + return ret; + + /* duplicate the frame data if it's not refcounted */ + if (!src->buf[0]) { + ret = av_frame_get_buffer(dst, 32); + if (ret < 0) + return ret; + + if (src->nb_samples) { + int ch = av_get_channel_layout_nb_channels(src->channel_layout); + av_samples_copy(dst->extended_data, src->extended_data, 0, 0, + dst->nb_samples, ch, dst->format); + } else { + av_image_copy(dst->data, dst->linesize, src->data, src->linesize, + dst->format, dst->width, dst->height); + } + return 0; + } + + /* ref the buffers */ + for (i = 0; i < FF_ARRAY_ELEMS(src->buf) && src->buf[i]; i++) { + dst->buf[i] = av_buffer_ref(src->buf[i]); + if (!dst->buf[i]) { + ret = AVERROR(ENOMEM); + goto fail; + } + } + + if (src->extended_buf) { + dst->extended_buf = av_mallocz(sizeof(*dst->extended_buf) * + src->nb_extended_buf); + if (!dst->extended_buf) { + ret = AVERROR(ENOMEM); + goto fail; + } + dst->nb_extended_buf = src->nb_extended_buf; + + for (i = 0; i < src->nb_extended_buf; i++) { + dst->extended_buf[i] = av_buffer_ref(src->extended_buf[i]); + if (!dst->extended_buf[i]) { + ret = AVERROR(ENOMEM); + goto fail; + } + } + } + + /* duplicate extended data */ + if (src->extended_data != src->data) { + int ch = av_get_channel_layout_nb_channels(src->channel_layout); + + if (!ch) { + ret = AVERROR(EINVAL); + goto fail; + } + + dst->extended_data = av_malloc(sizeof(*dst->extended_data) * ch); + if (!dst->extended_data) { + ret = AVERROR(ENOMEM); + goto fail; + } + memcpy(dst->extended_data, src->extended_data, sizeof(*src->extended_data) * ch); + } else + dst->extended_data = dst->data; + + memcpy(dst->data, src->data, sizeof(src->data)); + memcpy(dst->linesize, src->linesize, sizeof(src->linesize)); + + return 0; + +fail: + av_frame_unref(dst); + return ret; +} + +AVFrame *av_frame_clone(AVFrame *src) +{ + AVFrame *ret = av_frame_alloc(); + + if (!ret) + return NULL; + + if (av_frame_ref(ret, src) < 0) + av_frame_free(&ret); + + return ret; +} + +void av_frame_unref(AVFrame *frame) +{ + int i; + + for (i = 0; i < FF_ARRAY_ELEMS(frame->buf); i++) + av_buffer_unref(&frame->buf[i]); + for (i = 0; i < frame->nb_extended_buf; i++) + av_buffer_unref(&frame->extended_buf[i]); + av_freep(&frame->extended_buf); + get_frame_defaults(frame); +} + +void av_frame_move_ref(AVFrame *dst, AVFrame *src) +{ + *dst = *src; + if (src->extended_data == src->data) + dst->extended_data = dst->data; + memset(src, 0, sizeof(*src)); + get_frame_defaults(src); +} + +int av_frame_is_writable(AVFrame *frame) +{ + int i, ret = 1; + + /* assume non-refcounted frames are not writable */ + if (!frame->buf[0]) + return 0; + + for (i = 0; i < FF_ARRAY_ELEMS(frame->buf) && frame->buf[i]; i++) + ret &= !!av_buffer_is_writable(frame->buf[i]); + for (i = 0; i < frame->nb_extended_buf; i++) + ret &= !!av_buffer_is_writable(frame->extended_buf[i]); + + return ret; +} + +int av_frame_make_writable(AVFrame *frame) +{ + AVFrame tmp; + int ret; + + if (!frame->buf[0]) + return AVERROR(EINVAL); + + if (av_frame_is_writable(frame)) + return 0; + + memset(&tmp, 0, sizeof(tmp)); + tmp.format = frame->format; + tmp.width = frame->width; + tmp.height = frame->height; + tmp.channel_layout = frame->channel_layout; + tmp.nb_samples = frame->nb_samples; + ret = av_frame_get_buffer(&tmp, 32); + if (ret < 0) + return ret; + + if (tmp.nb_samples) { + int ch = av_get_channel_layout_nb_channels(tmp.channel_layout); + av_samples_copy(tmp.extended_data, frame->extended_data, 0, 0, + frame->nb_samples, ch, frame->format); + } else { + av_image_copy(tmp.data, tmp.linesize, frame->data, frame->linesize, + frame->format, frame->width, frame->height); + } + + ret = av_frame_copy_props(&tmp, frame); + if (ret < 0) { + av_frame_unref(&tmp); + return ret; + } + + av_frame_unref(frame); + + *frame = tmp; + if (tmp.data == tmp.extended_data) + frame->extended_data = frame->data; + + return 0; +} + +int av_frame_copy_props(AVFrame *dst, const AVFrame *src) +{ + dst->key_frame = src->key_frame; + dst->pict_type = src->pict_type; + dst->sample_aspect_ratio = src->sample_aspect_ratio; + dst->pts = src->pts; + dst->interlaced_frame = src->interlaced_frame; + dst->top_field_first = src->top_field_first; + dst->sample_rate = src->sample_rate; + dst->opaque = src->opaque; + dst->pkt_pts = src->pkt_pts; + dst->pkt_dts = src->pkt_dts; + dst->quality = src->quality; + dst->coded_picture_number = src->coded_picture_number; + dst->display_picture_number = src->display_picture_number; + + return 0; +} + +AVBufferRef *av_frame_get_plane_buffer(AVFrame *frame, int plane) +{ + uint8_t *data; + int planes, i; + + if (frame->nb_samples) { + int channels = av_get_channel_layout_nb_channels(frame->channel_layout); + if (!channels) + return NULL; + planes = av_sample_fmt_is_planar(frame->format) ? channels : 1; + } else + planes = 4; + + if (plane < 0 || plane >= planes || !frame->extended_data[plane]) + return NULL; + data = frame->extended_data[plane]; + + for (i = 0; i < FF_ARRAY_ELEMS(frame->buf) && frame->buf[i]; i++) { + AVBufferRef *buf = frame->buf[i]; + if (data >= buf->data && data < buf->data + buf->size) + return buf; + } + for (i = 0; i < frame->nb_extended_buf; i++) { + AVBufferRef *buf = frame->extended_buf[i]; + if (data >= buf->data && data < buf->data + buf->size) + return buf; + } + return NULL; +} diff --git a/libavutil/frame.h b/libavutil/frame.h new file mode 100644 index 0000000000..ed410ba122 --- /dev/null +++ b/libavutil/frame.h @@ -0,0 +1,429 @@ +/* + * + * This file is part of Libav. + * + * Libav is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * Libav is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with Libav; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_FRAME_H +#define AVUTIL_FRAME_H + +#include <stdint.h> + +#include "libavcodec/version.h" + +#include "avutil.h" +#include "buffer.h" +#include "rational.h" +#include "samplefmt.h" + + +/** + * This structure describes decoded (raw) audio or video data. + * + * AVFrame must be allocated using av_frame_alloc(). Not that this only + * allocates the AVFrame itself, the buffers for the data must be managed + * through other means (see below). + * AVFrame must be freed with av_frame_free(). + * + * AVFrame is typically allocated once and then reused multiple times to hold + * different data (e.g. a single AVFrame to hold frames received from a + * decoder). In such a case, av_frame_unref() will free any references held by + * the frame and reset it to its original clean state before it + * is reused again. + * + * The data described by an AVFrame is usually reference counted through the + * AVBuffer API. The underlying buffer references are stored in AVFrame.buf / + * AVFrame.extended_buf. An AVFrame is considered to be reference counted if at + * least one reference is set, i.e. if AVFrame.buf[0] != NULL. In such a case, + * every single data plane must be contained in one of the buffers in + * AVFrame.buf or AVFrame.extended_buf. + * There may be a single buffer for all the data, or one separate buffer for + * each plane, or anything in between. + * + * sizeof(AVFrame) is not a part of the public ABI, so new fields may be added + * to the end with a minor bump. + */ +typedef struct AVFrame { +#define AV_NUM_DATA_POINTERS 8 + /** + * pointer to the picture/channel planes. + * This might be different from the first allocated byte + */ + uint8_t *data[AV_NUM_DATA_POINTERS]; + + /** + * For video, size in bytes of each picture line. + * For audio, size in bytes of each plane. + * + * For audio, only linesize[0] may be set. For planar audio, each channel + * plane must be the same size. + */ + int linesize[AV_NUM_DATA_POINTERS]; + + /** + * pointers to the data planes/channels. + * + * For video, this should simply point to data[]. + * + * For planar audio, each channel has a separate data pointer, and + * linesize[0] contains the size of each channel buffer. + * For packed audio, there is just one data pointer, and linesize[0] + * contains the total size of the buffer for all channels. + * + * Note: Both data and extended_data should always be set in a valid frame, + * but for planar audio with more channels that can fit in data, + * extended_data must be used in order to access all channels. + */ + uint8_t **extended_data; + + /** + * width and height of the video frame + */ + int width, height; + + /** + * number of audio samples (per channel) described by this frame + */ + int nb_samples; + + /** + * format of the frame, -1 if unknown or unset + * Values correspond to enum AVPixelFormat for video frames, + * enum AVSampleFormat for audio) + */ + int format; + + /** + * 1 -> keyframe, 0-> not + */ + int key_frame; + + /** + * Picture type of the frame. + */ + enum AVPictureType pict_type; + + uint8_t *base[AV_NUM_DATA_POINTERS]; + + /** + * Sample aspect ratio for the video frame, 0/1 if unknown/unspecified. + */ + AVRational sample_aspect_ratio; + + /** + * Presentation timestamp in time_base units (time when frame should be shown to user). + */ + int64_t pts; + + /** + * PTS copied from the AVPacket that was decoded to produce this frame. + */ + int64_t pkt_pts; + + /** + * DTS copied from the AVPacket that triggered returning this frame. + */ + int64_t pkt_dts; + + /** + * picture number in bitstream order + */ + int coded_picture_number; + /** + * picture number in display order + */ + int display_picture_number; + + /** + * quality (between 1 (good) and FF_LAMBDA_MAX (bad)) + */ + int quality; + + int reference; + + /** + * QP table + */ + int8_t *qscale_table; + /** + * QP store stride + */ + int qstride; + + int qscale_type; + + /** + * mbskip_table[mb]>=1 if MB didn't change + * stride= mb_width = (width+15)>>4 + */ + uint8_t *mbskip_table; + + /** + * motion vector table + * @code + * example: + * int mv_sample_log2= 4 - motion_subsample_log2; + * int mb_width= (width+15)>>4; + * int mv_stride= (mb_width << mv_sample_log2) + 1; + * motion_val[direction][x + y*mv_stride][0->mv_x, 1->mv_y]; + * @endcode + */ + int16_t (*motion_val[2])[2]; + + /** + * macroblock type table + * mb_type_base + mb_width + 2 + */ + uint32_t *mb_type; + + /** + * DCT coefficients + */ + short *dct_coeff; + + /** + * motion reference frame index + * the order in which these are stored can depend on the codec. + */ + int8_t *ref_index[2]; + + /** + * for some private data of the user + */ + void *opaque; + + /** + * error + */ + uint64_t error[AV_NUM_DATA_POINTERS]; + + int type; + + /** + * When decoding, this signals how much the picture must be delayed. + * extra_delay = repeat_pict / (2*fps) + */ + int repeat_pict; + + /** + * The content of the picture is interlaced. + */ + int interlaced_frame; + + /** + * If the content is interlaced, is top field displayed first. + */ + int top_field_first; + + /** + * Tell user application that palette has changed from previous frame. + */ + int palette_has_changed; + + int buffer_hints; + + /** + * Pan scan. + */ + struct AVPanScan *pan_scan; + + /** + * reordered opaque 64bit (generally an integer or a double precision float + * PTS but can be anything). + * The user sets AVCodecContext.reordered_opaque to represent the input at + * that time, + * the decoder reorders values as needed and sets AVFrame.reordered_opaque + * to exactly one of the values provided by the user through AVCodecContext.reordered_opaque + * @deprecated in favor of pkt_pts + */ + int64_t reordered_opaque; + +#if FF_API_AVFRAME_LAVC + /** + * @deprecated this field is unused + */ + attribute_deprecated void *hwaccel_picture_private; +#endif + + struct AVCodecContext *owner; + void *thread_opaque; + + /** + * log2 of the size of the block which a single vector in motion_val represents: + * (4->16x16, 3->8x8, 2-> 4x4, 1-> 2x2) + */ + uint8_t motion_subsample_log2; + + /** + * Sample rate of the audio data. + */ + int sample_rate; + + /** + * Channel layout of the audio data. + */ + uint64_t channel_layout; + + /** + * AVBuffer references backing the data for this frame. If all elements of + * this array are NULL, then this frame is not reference counted. + * + * There may be at most one AVBuffer per data plane, so for video this array + * always contains all the references. For planar audio with more than + * AV_NUM_DATA_POINTERS channels, there may be more buffers than can fit in + * this array. Then the extra AVBufferRef pointers are stored in the + * extended_buf array. + */ + AVBufferRef *buf[AV_NUM_DATA_POINTERS]; + + /** + * For planar audio which requires more than AV_NUM_DATA_POINTERS + * AVBufferRef pointers, this array will hold all the references which + * cannot fit into AVFrame.buf. + * + * Note that this is different from AVFrame.extended_data, which always + * contains all the pointers. This array only contains the extra pointers, + * which cannot fit into AVFrame.buf. + * + * This array is always allocated using av_malloc() by whoever constructs + * the frame. It is freed in av_frame_unref(). + */ + AVBufferRef **extended_buf; + /** + * Number of elements in extended_buf. + */ + int nb_extended_buf; +} AVFrame; + +/** + * Allocate an AVFrame and set its fields to default values. The resulting + * struct must be freed using av_frame_free(). + * + * @return An AVFrame filled with default values or NULL on failure. + * + * @note this only allocates the AVFrame itself, not the data buffers. Those + * must be allocated through other means, e.g. with av_frame_get_buffer() or + * manually. + */ +AVFrame *av_frame_alloc(void); + +/** + * Free the frame and any dynamically allocated objects in it, + * e.g. extended_data. If the frame is reference counted, it will be + * unreferenced first. + * + * @param frame frame to be freed. The pointer will be set to NULL. + */ +void av_frame_free(AVFrame **frame); + +/** + * Setup a new reference to the data described by an given frame. + * + * Copy frame properties from src to dst and create a new reference for each + * AVBufferRef from src. + * + * If src is not reference counted, new buffers are allocated and the data is + * copied. + * + * @return 0 on success, a negative AVERROR on error + */ +int av_frame_ref(AVFrame *dst, AVFrame *src); + +/** + * Create a new frame that references the same data as src. + * + * This is a shortcut for av_frame_alloc()+av_frame_ref(). + * + * @return newly created AVFrame on success, NULL on error. + */ +AVFrame *av_frame_clone(AVFrame *src); + +/** + * Unreference all the buffers referenced by frame and reset the frame fields. + */ +void av_frame_unref(AVFrame *frame); + +/** + * Move everythnig contained in src to dst and reset src. + */ +void av_frame_move_ref(AVFrame *dst, AVFrame *src); + +/** + * Allocate new buffer(s) for audio or video data. + * + * The following fields must be set on frame before calling this function: + * - format (pixel format for video, sample format for audio) + * - width and height for video + * - nb_samples and channel_layout for audio + * + * This function will fill AVFrame.data and AVFrame.buf arrays and, if + * necessary, allocate and fill AVFrame.extended_data and AVFrame.extended_buf. + * For planar formats, one buffer will be allocated for each plane. + * + * @param frame frame in which to store the new buffers. + * @param align required buffer size alignment + * + * @return 0 on success, a negative AVERROR on error. + */ +int av_frame_get_buffer(AVFrame *frame, int align); + +/** + * Check if the frame data is writable. + * + * @return A positive value if the frame data is writable (which is true if and + * only if each of the underlying buffers has only one reference, namely the one + * stored in this frame). Return 0 otherwise. + * + * If 1 is returned the answer is valid until av_buffer_ref() is called on any + * of the underlying AVBufferRefs (e.g. through av_frame_ref() or directly). + * + * @see av_frame_make_writable(), av_buffer_is_writable() + */ +int av_frame_is_writable(AVFrame *frame); + +/** + * Ensure that the frame data is writable, avoiding data copy if possible. + * + * Do nothing if the frame is writable, allocate new buffers and copy the data + * if it is not. + * + * @return 0 on success, a negative AVERROR on error. + * + * @see av_frame_is_writable(), av_buffer_is_writable(), + * av_buffer_make_writable() + */ +int av_frame_make_writable(AVFrame *frame); + +/** + * Copy only "metadata" fields from src to dst. + * + * Metadata for the purpose of this function are those fields that do not affect + * the data layout in the buffers. E.g. pts, sample rate (for audio) or sample + * aspect ratio (for video), but not width/height or channel layout. + */ +int av_frame_copy_props(AVFrame *dst, const AVFrame *src); + +/** + * Get the buffer reference a given data plane is stored in. + * + * @param plane index of the data plane of interest in frame->extended_data. + * + * @return the buffer reference that contains the plane or NULL if the input + * frame is not valid. + */ +AVBufferRef *av_frame_get_plane_buffer(AVFrame *frame, int plane); + +#endif /* AVUTIL_FRAME_H */ |