diff options
author | Stefano Sabatini <stefano.sabatini-lala@poste.it> | 2011-08-18 16:21:47 +0200 |
---|---|---|
committer | Stefano Sabatini <stefano.sabatini-lala@poste.it> | 2011-09-06 18:47:04 +0200 |
commit | c4415f6ec980d1a5f3ddda79395258150747e97f (patch) | |
tree | d46d96fc5ec9633224cdd2bf9100ad6ad131e7fb /libavfilter/vsink_buffer.c | |
parent | be7eed72c89368de70dbf8749eca1dac7443e51a (diff) | |
download | ffmpeg-c4415f6ec980d1a5f3ddda79395258150747e97f.tar.gz |
lavfi: unify asink_buffer and vsink_buffer API
The new API is more generic (no distinction between audio/video for
pulling frames), and avoids code duplication.
A backward compatibility layer is kept for avoiding tools ABI breaks
(only for the video binary interface, audio interface was never used
in the tools).
Diffstat (limited to 'libavfilter/vsink_buffer.c')
-rw-r--r-- | libavfilter/vsink_buffer.c | 180 |
1 files changed, 152 insertions, 28 deletions
diff --git a/libavfilter/vsink_buffer.c b/libavfilter/vsink_buffer.c index 52e362e672..4ae561c7a7 100644 --- a/libavfilter/vsink_buffer.c +++ b/libavfilter/vsink_buffer.c @@ -27,33 +27,60 @@ #include "avfilter.h" #include "vsink_buffer.h" +AVBufferSinkParams *av_buffersink_params_alloc(void) +{ + static const int pixel_fmts[] = { -1 }; + AVBufferSinkParams *params = av_malloc(sizeof(AVBufferSinkParams)); + if (!params) + return NULL; + + params->pixel_fmts = pixel_fmts; + return params; +} + +AVABufferSinkParams *av_abuffersink_params_alloc(void) +{ + static const int sample_fmts[] = { -1 }; + static const int packing_fmts[] = { -1 }; + static const int64_t channel_layouts[] = { -1 }; + AVABufferSinkParams *params = av_malloc(sizeof(AVABufferSinkParams)); + + if (!params) + return NULL; + + params->sample_fmts = sample_fmts; + params->channel_layouts = channel_layouts; + params->packing_fmts = packing_fmts; + return params; +} + typedef struct { - AVFifoBuffer *fifo; ///< FIFO buffer of video frame references - enum PixelFormat *pix_fmts; ///< accepted pixel formats, must be terminated with -1 + AVFifoBuffer *fifo; ///< FIFO buffer of video frame references + + /* only used for video */ + const enum PixelFormat *pixel_fmts; ///< list of accepted pixel formats, must be terminated with -1 + + /* only used for audio */ + const enum AVSampleFormat *sample_fmts; ///< list of accepted sample formats, terminated by AV_SAMPLE_FMT_NONE + const int64_t *channel_layouts; ///< list of accepted channel layouts, terminated by -1 + const int *packing_fmts; ///< list of accepted packing formats, terminated by -1 } BufferSinkContext; #define FIFO_INIT_SIZE 8 -static av_cold int init(AVFilterContext *ctx, const char *args, void *opaque) +static av_cold int common_init(AVFilterContext *ctx) { BufferSinkContext *buf = ctx->priv; - if (!opaque) { - av_log(ctx, AV_LOG_ERROR, "No opaque field provided, which is required.\n"); - return AVERROR(EINVAL); - } - buf->fifo = av_fifo_alloc(FIFO_INIT_SIZE*sizeof(AVFilterBufferRef *)); if (!buf->fifo) { av_log(ctx, AV_LOG_ERROR, "Failed to allocate fifo\n"); return AVERROR(ENOMEM); } - - buf->pix_fmts = opaque; return 0; } -static av_cold void uninit(AVFilterContext *ctx) +static av_cold void common_uninit(AVFilterContext *ctx) { BufferSinkContext *buf = ctx->priv; AVFilterBufferRef *picref; @@ -88,21 +115,13 @@ static void end_frame(AVFilterLink *inlink) &inlink->cur_buf, sizeof(AVFilterBufferRef *), NULL); } -static int query_formats(AVFilterContext *ctx) -{ - BufferSinkContext *buf = ctx->priv; - - avfilter_set_common_pixel_formats(ctx, avfilter_make_format_list(buf->pix_fmts)); - return 0; -} - -int av_vsink_buffer_get_video_buffer_ref(AVFilterContext *ctx, - AVFilterBufferRef **picref, int flags) +int av_buffersink_get_buffer_ref(AVFilterContext *ctx, + AVFilterBufferRef **bufref, int flags) { BufferSinkContext *buf = ctx->priv; AVFilterLink *inlink = ctx->inputs[0]; int ret; - *picref = NULL; + *bufref = NULL; /* no picref available, fetch it from the filterchain */ if (!av_fifo_size(buf->fifo)) { @@ -113,11 +132,50 @@ int av_vsink_buffer_get_video_buffer_ref(AVFilterContext *ctx, if (!av_fifo_size(buf->fifo)) return AVERROR(EINVAL); - if (flags & AV_VSINK_BUF_FLAG_PEEK) - *picref = *((AVFilterBufferRef **)av_fifo_peek2(buf->fifo, 0)); + if (flags & AV_BUFFERSINK_FLAG_PEEK) + *bufref = *((AVFilterBufferRef **)av_fifo_peek2(buf->fifo, 0)); else - av_fifo_generic_read(buf->fifo, picref, sizeof(*picref), NULL); + av_fifo_generic_read(buf->fifo, bufref, sizeof(*bufref), NULL); + + return 0; +} + +#if FF_API_OLD_VSINK_API +int av_vsink_buffer_get_video_buffer_ref(AVFilterContext *ctx, + AVFilterBufferRef **picref, int flags) +{ + return av_buffersink_get_buffer_ref(ctx, picref, flags); +} +#endif + +#if CONFIG_BUFFERSINK_FILTER + +static av_cold int vsink_init(AVFilterContext *ctx, const char *args, void *opaque) +{ + BufferSinkContext *buf = ctx->priv; + av_unused AVBufferSinkParams *params; + + if (!opaque) { + av_log(ctx, AV_LOG_ERROR, + "No opaque field provided\n"); + return AVERROR(EINVAL); + } else { +#if FF_API_OLD_VSINK_API + buf->pixel_fmts = (const enum PixelFormats *)opaque; +#else + params = (AVBufferSinkParams *)opaque; + buf->pixel_fmts = params->pixel_fmts; +#endif + } + + return common_init(ctx); +} + +static int vsink_query_formats(AVFilterContext *ctx) +{ + BufferSinkContext *buf = ctx->priv; + avfilter_set_common_pixel_formats(ctx, avfilter_make_format_list(buf->pixel_fmts)); return 0; } @@ -125,10 +183,10 @@ AVFilter avfilter_vsink_buffersink = { .name = "buffersink", .description = NULL_IF_CONFIG_SMALL("Buffer video frames, and make them available to the end of the filter graph."), .priv_size = sizeof(BufferSinkContext), - .init = init, - .uninit = uninit, + .init = vsink_init, + .uninit = common_uninit, - .query_formats = query_formats, + .query_formats = vsink_query_formats, .inputs = (AVFilterPad[]) {{ .name = "default", .type = AVMEDIA_TYPE_VIDEO, @@ -137,3 +195,69 @@ AVFilter avfilter_vsink_buffersink = { { .name = NULL }}, .outputs = (AVFilterPad[]) {{ .name = NULL }}, }; + +#endif /* CONFIG_BUFFERSINK_FILTER */ + +#if CONFIG_ABUFFERSINK_FILTER + +static void filter_samples(AVFilterLink *link, AVFilterBufferRef *samplesref) +{ + end_frame(link); +} + +static av_cold int asink_init(AVFilterContext *ctx, const char *args, void *opaque) +{ + BufferSinkContext *buf = ctx->priv; + AVABufferSinkParams *params; + + if (!opaque) { + av_log(ctx, AV_LOG_ERROR, + "No opaque field provided, an AVABufferSinkParams struct is required\n"); + return AVERROR(EINVAL); + } else + params = (AVABufferSinkParams *)opaque; + + buf->sample_fmts = params->sample_fmts; + buf->channel_layouts = params->channel_layouts; + buf->packing_fmts = params->packing_fmts; + + return common_init(ctx); +} + +static int asink_query_formats(AVFilterContext *ctx) +{ + BufferSinkContext *buf = ctx->priv; + AVFilterFormats *formats = NULL; + + if (!(formats = avfilter_make_format_list(buf->sample_fmts))) + return AVERROR(ENOMEM); + avfilter_set_common_sample_formats(ctx, formats); + + if (!(formats = avfilter_make_format64_list(buf->channel_layouts))) + return AVERROR(ENOMEM); + avfilter_set_common_channel_layouts(ctx, formats); + + if (!(formats = avfilter_make_format_list(buf->packing_fmts))) + return AVERROR(ENOMEM); + avfilter_set_common_packing_formats(ctx, formats); + + return 0; +} + +AVFilter avfilter_asink_abuffersink = { + .name = "abuffersink", + .description = NULL_IF_CONFIG_SMALL("Buffer audio frames, and make them available to the end of the filter graph."), + .init = asink_init, + .uninit = common_uninit, + .priv_size = sizeof(BufferSinkContext), + .query_formats = asink_query_formats, + + .inputs = (AVFilterPad[]) {{ .name = "default", + .type = AVMEDIA_TYPE_AUDIO, + .filter_samples = filter_samples, + .min_perms = AV_PERM_READ, }, + { .name = NULL }}, + .outputs = (AVFilterPad[]) {{ .name = NULL }}, +}; + +#endif /* CONFIG_ABUFFERSINK_FILTER */ |