diff options
author | Paul B Mahol <onemda@gmail.com> | 2019-05-03 20:17:28 +0200 |
---|---|---|
committer | Paul B Mahol <onemda@gmail.com> | 2019-05-05 19:51:44 +0200 |
commit | 73f234fdf8d096ba2ab2cfd548b316937bf706b6 (patch) | |
tree | a098b321475dd37cf0cd68dbbf3f1e45f813bdc1 /libavfilter | |
parent | fcc01ba36a7a35d4e962f0e2a2a4739e890ba5cf (diff) | |
download | ffmpeg-73f234fdf8d096ba2ab2cfd548b316937bf706b6.tar.gz |
avfilter/f_interleave: switch to activate
Diffstat (limited to 'libavfilter')
-rw-r--r-- | libavfilter/f_interleave.c | 137 |
1 files changed, 63 insertions, 74 deletions
diff --git a/libavfilter/f_interleave.c b/libavfilter/f_interleave.c index d8a73b52e5..06f4cda798 100644 --- a/libavfilter/f_interleave.c +++ b/libavfilter/f_interleave.c @@ -27,12 +27,9 @@ #include "libavutil/avstring.h" #include "libavutil/opt.h" -#define FF_INTERNAL_FIELDS 1 -#include "framequeue.h" - #include "avfilter.h" -#include "bufferqueue.h" #include "formats.h" +#include "filters.h" #include "internal.h" #include "audio.h" #include "video.h" @@ -40,7 +37,7 @@ typedef struct InterleaveContext { const AVClass *class; int nb_inputs; - struct FFBufQueue *queues; + int64_t pts; } InterleaveContext; #define OFFSET(x) offsetof(InterleaveContext, x) @@ -52,58 +49,78 @@ static const AVOption filt_name##_options[] = { \ { NULL } \ } -inline static int push_frame(AVFilterContext *ctx) +static int activate(AVFilterContext *ctx) { + AVFilterLink *outlink = ctx->outputs[0]; InterleaveContext *s = ctx->priv; - AVFrame *frame; - int i, queue_idx = -1; - int64_t pts_min = INT64_MAX; + int64_t q_pts, pts = INT64_MAX; + int i, nb_eofs = 0, input_idx = -1; + + FF_FILTER_FORWARD_STATUS_BACK_ALL(outlink, ctx); - /* look for oldest frame */ for (i = 0; i < ctx->nb_inputs; i++) { - struct FFBufQueue *q = &s->queues[i]; + if (!ff_outlink_get_status(ctx->inputs[i]) && + !ff_inlink_queued_frames(ctx->inputs[i])) + break; + } - if (!q->available && !ctx->inputs[i]->status_out) - return 0; - if (q->available) { - frame = ff_bufqueue_peek(q, 0); - if (frame->pts < pts_min) { - pts_min = frame->pts; - queue_idx = i; + if (i == ctx->nb_inputs) { + for (i = 0; i < ctx->nb_inputs; i++) { + AVFrame *frame; + + if (ff_outlink_get_status(ctx->inputs[i])) + continue; + + frame = ff_inlink_peek_frame(ctx->inputs[i], 0); + if (frame->pts == AV_NOPTS_VALUE) { + int ret; + + av_log(ctx, AV_LOG_WARNING, + "NOPTS value for input frame cannot be accepted, frame discarded\n"); + ret = ff_inlink_consume_frame(ctx->inputs[i], &frame); + if (ret < 0) + return ret; + av_frame_free(&frame); + return AVERROR_INVALIDDATA; + } + + q_pts = av_rescale_q(frame->pts, ctx->inputs[i]->time_base, AV_TIME_BASE_Q); + if (q_pts < pts) { + pts = q_pts; + input_idx = i; } } - } - /* all inputs are closed */ - if (queue_idx < 0) - return AVERROR_EOF; + if (input_idx >= 0) { + AVFrame *frame; + int ret; - frame = ff_bufqueue_get(&s->queues[queue_idx]); - av_log(ctx, AV_LOG_DEBUG, "queue:%d -> frame time:%f\n", - queue_idx, frame->pts * av_q2d(AV_TIME_BASE_Q)); - return ff_filter_frame(ctx->outputs[0], frame); -} + ret = ff_inlink_consume_frame(ctx->inputs[input_idx], &frame); + if (ret < 0) + return ret; -static int filter_frame(AVFilterLink *inlink, AVFrame *frame) -{ - AVFilterContext *ctx = inlink->dst; - InterleaveContext *s = ctx->priv; - unsigned in_no = FF_INLINK_IDX(inlink); + frame->pts = s->pts = pts; + return ff_filter_frame(outlink, frame); + } + } - if (frame->pts == AV_NOPTS_VALUE) { - av_log(ctx, AV_LOG_WARNING, - "NOPTS value for input frame cannot be accepted, frame discarded\n"); - av_frame_free(&frame); - return AVERROR_INVALIDDATA; + for (i = 0; i < ctx->nb_inputs; i++) { + if (ff_inlink_queued_frames(ctx->inputs[i])) + continue; + if (ff_outlink_frame_wanted(outlink) && + !ff_outlink_get_status(ctx->inputs[i])) { + ff_inlink_request_frame(ctx->inputs[i]); + return 0; + } + nb_eofs++; } - /* queue frame */ - frame->pts = av_rescale_q(frame->pts, inlink->time_base, AV_TIME_BASE_Q); - av_log(ctx, AV_LOG_DEBUG, "frame pts:%f -> queue idx:%d available:%d\n", - frame->pts * av_q2d(AV_TIME_BASE_Q), in_no, s->queues[in_no].available); - ff_bufqueue_add(ctx, &s->queues[in_no], frame); + if (nb_eofs == ctx->nb_inputs) { + ff_outlink_set_status(outlink, AVERROR_EOF, s->pts); + return 0; + } - return push_frame(ctx); + return FFERROR_NOT_READY; } static av_cold int init(AVFilterContext *ctx) @@ -112,10 +129,6 @@ static av_cold int init(AVFilterContext *ctx) const AVFilterPad *outpad = &ctx->filter->outputs[0]; int i, ret; - s->queues = av_calloc(s->nb_inputs, sizeof(s->queues[0])); - if (!s->queues) - return AVERROR(ENOMEM); - for (i = 0; i < s->nb_inputs; i++) { AVFilterPad inpad = { 0 }; @@ -123,7 +136,6 @@ static av_cold int init(AVFilterContext *ctx) if (!inpad.name) return AVERROR(ENOMEM); inpad.type = outpad->type; - inpad.filter_frame = filter_frame; switch (outpad->type) { case AVMEDIA_TYPE_VIDEO: @@ -144,14 +156,8 @@ static av_cold int init(AVFilterContext *ctx) static av_cold void uninit(AVFilterContext *ctx) { - InterleaveContext *s = ctx->priv; - int i; - - for (i = 0; i < ctx->nb_inputs; i++) { - ff_bufqueue_discard_all(&s->queues[i]); - av_freep(&s->queues[i]); + for (int i = 0; i < ctx->nb_inputs; i++) av_freep(&ctx->input_pads[i].name); - } } static int config_output(AVFilterLink *outlink) @@ -190,23 +196,6 @@ static int config_output(AVFilterLink *outlink) return 0; } -static int request_frame(AVFilterLink *outlink) -{ - AVFilterContext *ctx = outlink->src; - InterleaveContext *s = ctx->priv; - int i, ret; - - for (i = 0; i < ctx->nb_inputs; i++) { - if (!s->queues[i].available && !ctx->inputs[i]->status_out) { - ret = ff_request_frame(ctx->inputs[i]); - if (ret != AVERROR_EOF) - return ret; - } - } - - return push_frame(ctx); -} - #if CONFIG_INTERLEAVE_FILTER DEFINE_OPTIONS(interleave, AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM); @@ -217,7 +206,6 @@ static const AVFilterPad interleave_outputs[] = { .name = "default", .type = AVMEDIA_TYPE_VIDEO, .config_props = config_output, - .request_frame = request_frame, }, { NULL } }; @@ -228,6 +216,7 @@ AVFilter ff_vf_interleave = { .priv_size = sizeof(InterleaveContext), .init = init, .uninit = uninit, + .activate = activate, .outputs = interleave_outputs, .priv_class = &interleave_class, .flags = AVFILTER_FLAG_DYNAMIC_INPUTS, @@ -245,7 +234,6 @@ static const AVFilterPad ainterleave_outputs[] = { .name = "default", .type = AVMEDIA_TYPE_AUDIO, .config_props = config_output, - .request_frame = request_frame, }, { NULL } }; @@ -256,6 +244,7 @@ AVFilter ff_af_ainterleave = { .priv_size = sizeof(InterleaveContext), .init = init, .uninit = uninit, + .activate = activate, .outputs = ainterleave_outputs, .priv_class = &ainterleave_class, .flags = AVFILTER_FLAG_DYNAMIC_INPUTS, |