diff options
author | Nicolas George <george@nsup.org> | 2016-01-03 15:44:42 +0100 |
---|---|---|
committer | Nicolas George <george@nsup.org> | 2016-12-18 10:38:52 +0100 |
commit | 02aa0701ae0dc2def8db640c9e3c06dc1b5de70c (patch) | |
tree | d36bc5207cb7b5a5cbfd1a8ac9c1dbae90255020 | |
parent | 62b11db0a08cbb8c338e413a0d1707a8c81ae24e (diff) | |
download | ffmpeg-02aa0701ae0dc2def8db640c9e3c06dc1b5de70c.tar.gz |
lavfi: make filter_frame non-recursive.
A lot of changes happen at the same time:
- Add a framequeue fifo to AVFilterLink.
- split AVFilterLink.status into status_in and status_out: requires
changes to the few filters and programs that use it directly
(f_interleave, split, filtfmts).
- Add a field ready to AVFilterContext, marking when the filter is ready
and its activation priority.
- Add flags to mark blocked links.
- Change ff_filter_frame() to enqueue the frame.
- Change all filtering functions to update the ready field and the
blocked flags.
- Update ff_filter_graph_run_once() to use the ready field.
- buffersrc: always push the frame immediately.
-rw-r--r-- | libavfilter/avfilter.c | 457 | ||||
-rw-r--r-- | libavfilter/avfilter.h | 71 | ||||
-rw-r--r-- | libavfilter/avfiltergraph.c | 53 | ||||
-rw-r--r-- | libavfilter/buffersink.c | 21 | ||||
-rw-r--r-- | libavfilter/buffersrc.c | 6 | ||||
-rw-r--r-- | libavfilter/f_interleave.c | 8 | ||||
-rw-r--r-- | libavfilter/internal.h | 6 | ||||
-rw-r--r-- | libavfilter/split.c | 5 | ||||
-rw-r--r-- | libavfilter/tests/filtfmts.c | 3 | ||||
-rw-r--r-- | libavfilter/vf_extractplanes.c | 6 |
10 files changed, 493 insertions, 143 deletions
diff --git a/libavfilter/avfilter.c b/libavfilter/avfilter.c index 6f30e3bcfe..c2a84136b3 100644 --- a/libavfilter/avfilter.c +++ b/libavfilter/avfilter.c @@ -34,6 +34,9 @@ #include "libavutil/rational.h" #include "libavutil/samplefmt.h" +#define FF_INTERNAL_FIELDS 1 +#include "framequeue.h" + #include "audio.h" #include "avfilter.h" #include "formats.h" @@ -135,6 +138,10 @@ int avfilter_link(AVFilterContext *src, unsigned srcpad, { AVFilterLink *link; + av_assert0(src->graph); + av_assert0(dst->graph); + av_assert0(src->graph == dst->graph); + if (src->nb_outputs <= srcpad || dst->nb_inputs <= dstpad || src->outputs[srcpad] || dst->inputs[dstpad]) return AVERROR(EINVAL); @@ -160,6 +167,7 @@ int avfilter_link(AVFilterContext *src, unsigned srcpad, link->type = src->output_pads[srcpad].type; av_assert0(AV_PIX_FMT_NONE == -1 && AV_SAMPLE_FMT_NONE == -1); link->format = -1; + ff_framequeue_init(&link->fifo, &src->graph->internal->frame_queues); return 0; } @@ -170,6 +178,7 @@ void avfilter_link_free(AVFilterLink **link) return; av_frame_free(&(*link)->partial_buf); + ff_framequeue_free(&(*link)->fifo); ff_video_frame_pool_uninit((FFVideoFramePool**)&(*link)->video_frame_pool); av_freep(link); @@ -180,16 +189,46 @@ int avfilter_link_get_channels(AVFilterLink *link) return link->channels; } +static void ff_filter_set_ready(AVFilterContext *filter, unsigned priority) +{ + filter->ready = FFMAX(filter->ready, priority); +} + +/** + * Clear frame_blocked_in on all outputs. + * This is necessary whenever something changes on input. + */ +static void filter_unblock(AVFilterContext *filter) +{ + unsigned i; + + for (i = 0; i < filter->nb_outputs; i++) + filter->outputs[i]->frame_blocked_in = 0; +} + + void ff_avfilter_link_set_in_status(AVFilterLink *link, int status, int64_t pts) { - ff_avfilter_link_set_out_status(link, status, pts); + if (link->status_in == status) + return; + av_assert0(!link->status_in); + link->status_in = status; + link->status_in_pts = pts; + link->frame_wanted_out = 0; + link->frame_blocked_in = 0; + filter_unblock(link->dst); + ff_filter_set_ready(link->dst, 200); } void ff_avfilter_link_set_out_status(AVFilterLink *link, int status, int64_t pts) { - link->status = status; - link->frame_wanted_in = link->frame_wanted_out = 0; - ff_update_link_current_pts(link, pts); + av_assert0(!link->frame_wanted_out); + av_assert0(!link->status_out); + link->status_out = status; + if (pts != AV_NOPTS_VALUE) + ff_update_link_current_pts(link, pts); + filter_unblock(link->dst); + ff_filter_set_ready(link->src, 200); } void avfilter_link_set_closed(AVFilterLink *link, int closed) @@ -370,10 +409,23 @@ int ff_request_frame(AVFilterLink *link) { FF_TPRINTF_START(NULL, request_frame); ff_tlog_link(NULL, link, 1); - if (link->status) - return link->status; - link->frame_wanted_in = 1; + if (link->status_out) + return link->status_out; + if (link->status_in) { + if (ff_framequeue_queued_frames(&link->fifo)) { + av_assert1(!link->frame_wanted_out); + av_assert1(link->dst->ready >= 300); + return 0; + } else { + /* Acknowledge status change. Filters using ff_request_frame() will + handle the change automatically. Filters can also check the + status directly but none do yet. */ + ff_avfilter_link_set_out_status(link, link->status_in, link->status_in_pts); + return link->status_out; + } + } link->frame_wanted_out = 1; + ff_filter_set_ready(link->src, 100); return 0; } @@ -382,22 +434,17 @@ int ff_request_frame_to_filter(AVFilterLink *link) int ret = -1; FF_TPRINTF_START(NULL, request_frame_to_filter); ff_tlog_link(NULL, link, 1); - link->frame_wanted_in = 0; + /* Assume the filter is blocked, let the method clear it if not */ + link->frame_blocked_in = 1; if (link->srcpad->request_frame) ret = link->srcpad->request_frame(link); else if (link->src->inputs[0]) ret = ff_request_frame(link->src->inputs[0]); - if (ret == AVERROR_EOF && link->partial_buf) { - AVFrame *pbuf = link->partial_buf; - link->partial_buf = NULL; - ret = ff_filter_frame_framed(link, pbuf); - ff_avfilter_link_set_in_status(link, AVERROR_EOF, AV_NOPTS_VALUE); - link->frame_wanted_out = 0; - return ret; - } if (ret < 0) { - if (ret != AVERROR(EAGAIN) && ret != link->status) + if (ret != AVERROR(EAGAIN) && ret != link->status_in) ff_avfilter_link_set_in_status(link, ret, AV_NOPTS_VALUE); + if (ret == AVERROR_EOF) + ret = 0; } return ret; } @@ -1056,11 +1103,6 @@ static int ff_filter_frame_framed(AVFilterLink *link, AVFrame *frame) AVFilterCommand *cmd= link->dst->command_queue; int64_t pts; - if (link->status) { - av_frame_free(&frame); - return link->status; - } - if (!(filter_frame = dst->filter_frame)) filter_frame = default_filter_frame; @@ -1142,52 +1184,9 @@ fail: return ret; } -static int ff_filter_frame_needs_framing(AVFilterLink *link, AVFrame *frame) -{ - int insamples = frame->nb_samples, inpos = 0, nb_samples; - AVFrame *pbuf = link->partial_buf; - int nb_channels = av_frame_get_channels(frame); - int ret = 0; - - /* Handle framing (min_samples, max_samples) */ - while (insamples) { - if (!pbuf) { - AVRational samples_tb = { 1, link->sample_rate }; - pbuf = ff_get_audio_buffer(link, link->partial_buf_size); - if (!pbuf) { - av_log(link->dst, AV_LOG_WARNING, - "Samples dropped due to memory allocation failure.\n"); - return 0; - } - av_frame_copy_props(pbuf, frame); - pbuf->pts = frame->pts; - if (pbuf->pts != AV_NOPTS_VALUE) - pbuf->pts += av_rescale_q(inpos, samples_tb, link->time_base); - pbuf->nb_samples = 0; - } - nb_samples = FFMIN(insamples, - link->partial_buf_size - pbuf->nb_samples); - av_samples_copy(pbuf->extended_data, frame->extended_data, - pbuf->nb_samples, inpos, - nb_samples, nb_channels, link->format); - inpos += nb_samples; - insamples -= nb_samples; - pbuf->nb_samples += nb_samples; - if (pbuf->nb_samples >= link->min_samples) { - ret = ff_filter_frame_framed(link, pbuf); - pbuf = NULL; - } else { - if (link->frame_wanted_out) - link->frame_wanted_in = 1; - } - } - av_frame_free(&frame); - link->partial_buf = pbuf; - return ret; -} - int ff_filter_frame(AVFilterLink *link, AVFrame *frame) { + int ret; FF_TPRINTF_START(NULL, filter_frame); ff_tlog_link(NULL, link, 1); ff_tlog(NULL, " "); ff_tlog_ref(NULL, frame, 1); /* Consistency checks */ @@ -1220,23 +1219,329 @@ int ff_filter_frame(AVFilterLink *link, AVFrame *frame) } } - link->frame_wanted_out = 0; + link->frame_blocked_in = link->frame_wanted_out = 0; link->frame_count_in++; - /* Go directly to actual filtering if possible */ - if (link->type == AVMEDIA_TYPE_AUDIO && - link->min_samples && - (link->partial_buf || - frame->nb_samples < link->min_samples || - frame->nb_samples > link->max_samples)) { - return ff_filter_frame_needs_framing(link, frame); - } else { - return ff_filter_frame_framed(link, frame); + filter_unblock(link->dst); + ret = ff_framequeue_add(&link->fifo, frame); + if (ret < 0) { + av_frame_free(&frame); + return ret; } + ff_filter_set_ready(link->dst, 300); + return 0; + error: av_frame_free(&frame); return AVERROR_PATCHWELCOME; } +static int samples_ready(AVFilterLink *link) +{ + return ff_framequeue_queued_frames(&link->fifo) && + (ff_framequeue_queued_samples(&link->fifo) >= link->min_samples || + link->status_in); +} + +static int take_samples(AVFilterLink *link, unsigned min, unsigned max, + AVFrame **rframe) +{ + AVFrame *frame0, *frame, *buf; + unsigned nb_samples, nb_frames, i, p; + int ret; + + /* Note: this function relies on no format changes and must only be + called with enough samples. */ + av_assert1(samples_ready(link)); + frame0 = frame = ff_framequeue_peek(&link->fifo, 0); + if (frame->nb_samples >= min && frame->nb_samples < max) { + *rframe = ff_framequeue_take(&link->fifo); + return 0; + } + nb_frames = 0; + nb_samples = 0; + while (1) { + if (nb_samples + frame->nb_samples > max) { + if (nb_samples < min) + nb_samples = max; + break; + } + nb_samples += frame->nb_samples; + nb_frames++; + if (nb_frames == ff_framequeue_queued_frames(&link->fifo)) + break; + frame = ff_framequeue_peek(&link->fifo, nb_frames); + } + + buf = ff_get_audio_buffer(link, nb_samples); + if (!buf) + return AVERROR(ENOMEM); + ret = av_frame_copy_props(buf, frame0); + if (ret < 0) { + av_frame_free(&buf); + return ret; + } + buf->pts = frame0->pts; + + p = 0; + for (i = 0; i < nb_frames; i++) { + frame = ff_framequeue_take(&link->fifo); + av_samples_copy(buf->extended_data, frame->extended_data, p, 0, + frame->nb_samples, link->channels, link->format); + p += frame->nb_samples; + } + if (p < nb_samples) { + unsigned n = nb_samples - p; + frame = ff_framequeue_peek(&link->fifo, 0); + av_samples_copy(buf->extended_data, frame->extended_data, p, 0, n, + link->channels, link->format); + frame->nb_samples -= n; + av_samples_copy(frame->extended_data, frame->extended_data, 0, n, + frame->nb_samples, link->channels, link->format); + if (frame->pts != AV_NOPTS_VALUE) + frame->pts += av_rescale_q(n, av_make_q(1, link->sample_rate), link->time_base); + ff_framequeue_update_peeked(&link->fifo, 0); + ff_framequeue_skip_samples(&link->fifo, n); + } + + *rframe = buf; + return 0; +} + +int ff_filter_frame_to_filter(AVFilterLink *link) +{ + AVFrame *frame; + AVFilterContext *dst = link->dst; + int ret; + + av_assert1(ff_framequeue_queued_frames(&link->fifo)); + if (link->min_samples) { + int min = link->min_samples; + if (link->status_in) + min = FFMIN(min, ff_framequeue_queued_samples(&link->fifo)); + ret = take_samples(link, min, link->max_samples, &frame); + if (ret < 0) + return ret; + } else { + frame = ff_framequeue_take(&link->fifo); + } + /* The filter will soon have received a new frame, that may allow it to + produce one or more: unblock its outputs. */ + filter_unblock(dst); + ret = ff_filter_frame_framed(link, frame); + if (ret < 0 && ret != link->status_out) { + ff_avfilter_link_set_out_status(link, ret, AV_NOPTS_VALUE); + } else { + /* Run once again, to see if several frames were available, or if + the input status has also changed, or any other reason. */ + ff_filter_set_ready(dst, 300); + } + return ret; +} + +static int forward_status_change(AVFilterContext *filter, AVFilterLink *in) +{ + unsigned out = 0, progress = 0; + int ret; + + av_assert0(!in->status_out); + if (!filter->nb_outputs) { + /* not necessary with the current API and sinks */ + return 0; + } + while (!in->status_out) { + if (!filter->outputs[out]->status_in) { + progress++; + ret = ff_request_frame_to_filter(filter->outputs[out]); + if (ret < 0) + return ret; + } + if (++out == filter->nb_outputs) { + if (!progress) { + /* Every output already closed: input no longer interesting + (example: overlay in shortest mode, other input closed). */ + ff_avfilter_link_set_out_status(in, in->status_in, in->status_in_pts); + return 0; + } + progress = 0; + out = 0; + } + } + ff_filter_set_ready(filter, 200); + return 0; +} + +#define FFERROR_NOT_READY FFERRTAG('N','R','D','Y') + +static int ff_filter_activate_default(AVFilterContext *filter) +{ + unsigned i; + + for (i = 0; i < filter->nb_inputs; i++) { + if (samples_ready(filter->inputs[i])) { + return ff_filter_frame_to_filter(filter->inputs[i]); + } + } + for (i = 0; i < filter->nb_inputs; i++) { + if (filter->inputs[i]->status_in && !filter->inputs[i]->status_out) { + av_assert1(!ff_framequeue_queued_frames(&filter->inputs[i]->fifo)); + return forward_status_change(filter, filter->inputs[i]); + } + } + for (i = 0; i < filter->nb_outputs; i++) { + if (filter->outputs[i]->frame_wanted_out && + !filter->outputs[i]->frame_blocked_in) { + return ff_request_frame_to_filter(filter->outputs[i]); + } + } + return FFERROR_NOT_READY; +} + +/* + Filter scheduling and activation + + When a filter is activated, it must: + - if possible, output a frame; + - else, if relevant, forward the input status change; + - else, check outputs for wanted frames and forward the requests. + + The following AVFilterLink fields are used for activation: + + - frame_wanted_out: + + This field indicates if a frame is needed on this input of the + destination filter. A positive value indicates that a frame is needed + to process queued frames or internal data or to satisfy the + application; a zero value indicates that a frame is not especially + needed but could be processed anyway; a negative value indicates that a + frame would just be queued. + + It is set by filters using ff_request_frame() or ff_request_no_frame(), + when requested by the application through a specific API or when it is + set on one of the outputs. + + It is cleared when a frame is sent from the source using + ff_filter_frame(). + + It is also cleared when a status change is sent from the source using + ff_avfilter_link_set_in_status(). + + - frame_blocked_in: + + This field means that the source filter can not generate a frame as is. + Its goal is to avoid repeatedly calling the request_frame() method on + the same link. + + It is set by the framework on all outputs of a filter before activating it. + + It is automatically cleared by ff_filter_frame(). + + It is also automatically cleared by ff_avfilter_link_set_in_status(). + + It is also cleared on all outputs (using filter_unblock()) when + something happens on an input: processing a frame or changing the + status. + + - fifo: + + Contains the frames queued on a filter input. If it contains frames and + frame_wanted_out is not set, then the filter can be activated. If that + result in the filter not able to use these frames, the filter must set + frame_wanted_out to ask for more frames. + + - status_in and status_in_pts: + + Status (EOF or error code) of the link and timestamp of the status + change (in link time base, same as frames) as seen from the input of + the link. The status change is considered happening after the frames + queued in fifo. + + It is set by the source filter using ff_avfilter_link_set_in_status(). + + - status_out: + + Status of the link as seen from the output of the link. The status + change is considered having already happened. + + It is set by the destination filter using + ff_avfilter_link_set_out_status(). + + Filters are activated according to the ready field, set using the + ff_filter_set_ready(). Eventually, a priority queue will be used. + ff_filter_set_ready() is called whenever anything could cause progress to + be possible. Marking a filter ready when it is not is not a problem, + except for the small overhead it causes. + + Conditions that cause a filter to be marked ready are: + + - frames added on an input link; + + - changes in the input or output status of an input link; + + - requests for a frame on an output link; + + - after any actual processing using the legacy methods (filter_frame(), + and request_frame() to acknowledge status changes), to run once more + and check if enough input was present for several frames. + + Exemples of scenarios to consider: + + - buffersrc: activate if frame_wanted_out to notify the application; + activate when the application adds a frame to push it immediately. + + - testsrc: activate only if frame_wanted_out to produce and push a frame. + + - concat (not at stitch points): can process a frame on any output. + Activate if frame_wanted_out on output to forward on the corresponding + input. Activate when a frame is present on input to process it + immediately. + + - framesync: needs at least one frame on each input; extra frames on the + wrong input will accumulate. When a frame is first added on one input, + set frame_wanted_out<0 on it to avoid getting more (would trigger + testsrc) and frame_wanted_out>0 on the other to allow processing it. + + Activation of old filters: + + In order to activate a filter implementing the legacy filter_frame() and + request_frame() methods, perform the first possible of the following + actions: + + - If an input has frames in fifo and frame_wanted_out == 0, dequeue a + frame and call filter_frame(). + + Ratinale: filter frames as soon as possible instead of leaving them + queued; frame_wanted_out < 0 is not possible since the old API does not + set it nor provides any similar feedback; frame_wanted_out > 0 happens + when min_samples > 0 and there are not enough samples queued. + + - If an input has status_in set but not status_out, try to call + request_frame() on one of the outputs in the hope that it will trigger + request_frame() on the input with status_in and acknowledge it. This is + awkward and fragile, filters with several inputs or outputs should be + updated to direct activation as soon as possible. + + - If an output has frame_wanted_out > 0 and not frame_blocked_in, call + request_frame(). + + Rationale: checking frame_blocked_in is necessary to avoid requesting + repeatedly on a blocked input if another is not blocked (example: + [buffersrc1][testsrc1][buffersrc2][testsrc2]concat=v=2). + + TODO: respect needs_fifo and remove auto-inserted fifos. + + */ + +int ff_filter_activate(AVFilterContext *filter) +{ + int ret; + + filter->ready = 0; + ret = ff_filter_activate_default(filter); + if (ret == FFERROR_NOT_READY) + ret = 0; + return ret; +} + const AVClass *avfilter_get_class(void) { return &avfilter_class; diff --git a/libavfilter/avfilter.h b/libavfilter/avfilter.h index d21b1445f0..828b270b6c 100644 --- a/libavfilter/avfilter.h +++ b/libavfilter/avfilter.h @@ -368,6 +368,13 @@ struct AVFilterContext { * Overrides global number of threads set per filter graph. */ int nb_threads; + + /** + * Ready status of the filter. + * A non-0 value means that the filter needs activating; + * a higher value suggests a more urgent activation. + */ + unsigned ready; }; /** @@ -509,18 +516,6 @@ struct AVFilterLink { int max_samples; /** - * Link status. - * If not zero, all attempts of filter_frame or request_frame - * will fail with the corresponding code, and if necessary the reference - * will be destroyed. - * If request_frame returns an error, the status is set on the - * corresponding link. - * It can be set also be set by either the source or the destination - * filter. - */ - int status; - - /** * Number of channels. */ int channels; @@ -541,13 +536,6 @@ struct AVFilterLink { void *video_frame_pool; /** - * True if a frame is currently wanted on the input of this filter. - * Set when ff_request_frame() is called by the output, - * cleared when the request is handled or forwarded. - */ - int frame_wanted_in; - - /** * True if a frame is currently wanted on the output of this filter. * Set when ff_request_frame() is called by the output, * cleared when a frame is filtered. @@ -559,6 +547,51 @@ struct AVFilterLink { * AVHWFramesContext describing the frames. */ AVBufferRef *hw_frames_ctx; + +#ifndef FF_INTERNAL_FIELDS + + /** + * Internal structure members. + * The fields below this limit are internal for libavfilter's use + * and must in no way be accessed by applications. + */ + char reserved[0xF000]; + +#else /* FF_INTERNAL_FIELDS */ + + /** + * Queue of frames waiting to be filtered. + */ + FFFrameQueue fifo; + + /** + * If set, the source filter can not generate a frame as is. + * The goal is to avoid repeatedly calling the request_frame() method on + * the same link. + */ + int frame_blocked_in; + + /** + * Link input status. + * If not zero, all attempts of filter_frame will fail with the + * corresponding code. + */ + int status_in; + + /** + * Timestamp of the input status change. + */ + int64_t status_in_pts; + + /** + * Link output status. + * If not zero, all attempts of request_frame will fail with the + * corresponding code. + */ + int status_out; + +#endif /* FF_INTERNAL_FIELDS */ + }; /** diff --git a/libavfilter/avfiltergraph.c b/libavfilter/avfiltergraph.c index 3af698d4be..6b5a6f34ec 100644 --- a/libavfilter/avfiltergraph.c +++ b/libavfilter/avfiltergraph.c @@ -32,6 +32,9 @@ #include "libavutil/opt.h" #include "libavutil/pixdesc.h" +#define FF_INTERNAL_FIELDS 1 +#include "framequeue.h" + #include "avfilter.h" #include "formats.h" #include "internal.h" @@ -87,6 +90,7 @@ AVFilterGraph *avfilter_graph_alloc(void) ret->av_class = &filtergraph_class; av_opt_set_defaults(ret); + ff_framequeue_global_init(&ret->internal->frame_queues); return ret; } @@ -1377,10 +1381,10 @@ void ff_avfilter_graph_update_heap(AVFilterGraph *graph, AVFilterLink *link) heap_bubble_down(graph, link, link->age_index); } - int avfilter_graph_request_oldest(AVFilterGraph *graph) { AVFilterLink *oldest = graph->sink_links[0]; + int64_t frame_count; int r; while (graph->sink_links_count) { @@ -1400,7 +1404,8 @@ int avfilter_graph_request_oldest(AVFilterGraph *graph) if (!graph->sink_links_count) return AVERROR_EOF; av_assert1(oldest->age_index >= 0); - while (oldest->frame_wanted_out) { + frame_count = oldest->frame_count_out; + while (frame_count == oldest->frame_count_out) { r = ff_filter_graph_run_once(graph); if (r < 0) return r; @@ -1408,41 +1413,17 @@ int avfilter_graph_request_oldest(AVFilterGraph *graph) return 0; } -static AVFilterLink *graph_run_once_find_filter(AVFilterGraph *graph) -{ - unsigned i, j; - AVFilterContext *f; - - /* TODO: replace scanning the graph with a priority list */ - for (i = 0; i < graph->nb_filters; i++) { - f = graph->filters[i]; - for (j = 0; j < f->nb_outputs; j++) - if (f->outputs[j]->frame_wanted_in) - return f->outputs[j]; - } - for (i = 0; i < graph->nb_filters; i++) { - f = graph->filters[i]; - for (j = 0; j < f->nb_outputs; j++) - if (f->outputs[j]->frame_wanted_out) - return f->outputs[j]; - } - return NULL; -} - int ff_filter_graph_run_once(AVFilterGraph *graph) { - AVFilterLink *link; - int ret; - - link = graph_run_once_find_filter(graph); - if (!link) { - av_log(NULL, AV_LOG_WARNING, "Useless run of a filter graph\n"); + AVFilterContext *filter; + unsigned i; + + av_assert0(graph->nb_filters); + filter = graph->filters[0]; + for (i = 1; i < graph->nb_filters; i++) + if (graph->filters[i]->ready > filter->ready) + filter = graph->filters[i]; + if (!filter->ready) return AVERROR(EAGAIN); - } - ret = ff_request_frame_to_filter(link); - if (ret == AVERROR_EOF) - /* local EOF will be forwarded through request_frame() / - set_status() until it reaches the sink */ - ret = 0; - return ret < 0 ? ret : 1; + return ff_filter_activate(filter); } diff --git a/libavfilter/buffersink.c b/libavfilter/buffersink.c index 2feb56dee9..7b7b47747d 100644 --- a/libavfilter/buffersink.c +++ b/libavfilter/buffersink.c @@ -31,6 +31,9 @@ #include "libavutil/mathematics.h" #include "libavutil/opt.h" +#define FF_INTERNAL_FIELDS 1 +#include "framequeue.h" + #include "audio.h" #include "avfilter.h" #include "buffersink.h" @@ -129,18 +132,26 @@ int attribute_align_arg av_buffersink_get_frame_flags(AVFilterContext *ctx, AVFr { BufferSinkContext *buf = ctx->priv; AVFilterLink *inlink = ctx->inputs[0]; - int ret; + int peek_in_framequeue = 0, ret; + int64_t frame_count; AVFrame *cur_frame; /* no picref available, fetch it from the filterchain */ while (!av_fifo_size(buf->fifo)) { - if (inlink->status) - return inlink->status; - if (flags & AV_BUFFERSINK_FLAG_NO_REQUEST) + /* if peek_in_framequeue is true later, then ff_request_frame() and + the ff_filter_graph_run_once() loop will take a frame from it and + move it to the internal fifo, ending the global loop */ + av_assert0(!peek_in_framequeue); + if (inlink->status_out) + return inlink->status_out; + peek_in_framequeue = ff_framequeue_queued_frames(&inlink->fifo) && + ff_framequeue_queued_samples(&inlink->fifo) >= inlink->min_samples; + if ((flags & AV_BUFFERSINK_FLAG_NO_REQUEST) && !peek_in_framequeue) return AVERROR(EAGAIN); if ((ret = ff_request_frame(inlink)) < 0) return ret; - while (inlink->frame_wanted_out) { + frame_count = inlink->frame_count_out; + while (frame_count == inlink->frame_count_out) { ret = ff_filter_graph_run_once(ctx->graph); if (ret < 0) return ret; diff --git a/libavfilter/buffersrc.c b/libavfilter/buffersrc.c index 9294811d36..1314397a32 100644 --- a/libavfilter/buffersrc.c +++ b/libavfilter/buffersrc.c @@ -184,6 +184,7 @@ static int av_buffersrc_add_frame_internal(AVFilterContext *ctx, if (!frame) { s->eof = 1; + ff_avfilter_link_set_in_status(ctx->outputs[0], AVERROR_EOF, AV_NOPTS_VALUE); return 0; } else if (s->eof) return AVERROR(EINVAL); @@ -235,9 +236,8 @@ static int av_buffersrc_add_frame_internal(AVFilterContext *ctx, return ret; } - if ((flags & AV_BUFFERSRC_FLAG_PUSH)) - if ((ret = ctx->output_pads[0].request_frame(ctx->outputs[0])) < 0) - return ret; + if ((ret = ctx->output_pads[0].request_frame(ctx->outputs[0])) < 0) + return ret; return 0; } diff --git a/libavfilter/f_interleave.c b/libavfilter/f_interleave.c index 422f2bfb29..b9192e9b14 100644 --- a/libavfilter/f_interleave.c +++ b/libavfilter/f_interleave.c @@ -26,6 +26,10 @@ #include "libavutil/avassert.h" #include "libavutil/avstring.h" #include "libavutil/opt.h" + +#define FF_INTERNAL_FIELDS 1 +#include "framequeue.h" + #include "avfilter.h" #include "bufferqueue.h" #include "formats.h" @@ -59,7 +63,7 @@ inline static int push_frame(AVFilterContext *ctx) for (i = 0; i < ctx->nb_inputs; i++) { struct FFBufQueue *q = &s->queues[i]; - if (!q->available && !ctx->inputs[i]->status) + if (!q->available && !ctx->inputs[i]->status_out) return 0; if (q->available) { frame = ff_bufqueue_peek(q, 0); @@ -190,7 +194,7 @@ static int request_frame(AVFilterLink *outlink) int i, ret; for (i = 0; i < ctx->nb_inputs; i++) { - if (!s->queues[i].available && !ctx->inputs[i]->status) { + if (!s->queues[i].available && !ctx->inputs[i]->status_out) { ret = ff_request_frame(ctx->inputs[i]); if (ret != AVERROR_EOF) return ret; diff --git a/libavfilter/internal.h b/libavfilter/internal.h index 3856012aa9..a8b69fd7f1 100644 --- a/libavfilter/internal.h +++ b/libavfilter/internal.h @@ -29,6 +29,7 @@ #include "avfiltergraph.h" #include "formats.h" #include "framepool.h" +#include "framequeue.h" #include "thread.h" #include "version.h" #include "video.h" @@ -147,6 +148,7 @@ struct AVFilterPad { struct AVFilterGraphInternal { void *thread; avfilter_execute_func *thread_execute; + FFFrameQueueGlobal frame_queues; }; struct AVFilterInternal { @@ -336,6 +338,8 @@ int ff_request_frame(AVFilterLink *link); int ff_request_frame_to_filter(AVFilterLink *link); +int ff_filter_frame_to_filter(AVFilterLink *link); + #define AVFILTER_DEFINE_CLASS(fname) \ static const AVClass fname##_class = { \ .class_name = #fname, \ @@ -376,6 +380,8 @@ int ff_filter_frame(AVFilterLink *link, AVFrame *frame); */ AVFilterContext *ff_filter_alloc(const AVFilter *filter, const char *inst_name); +int ff_filter_activate(AVFilterContext *filter); + /** * Remove a filter from a graph; */ diff --git a/libavfilter/split.c b/libavfilter/split.c index 6630087a58..b85a221353 100644 --- a/libavfilter/split.c +++ b/libavfilter/split.c @@ -30,6 +30,9 @@ #include "libavutil/mem.h" #include "libavutil/opt.h" +#define FF_INTERNAL_FIELDS 1 +#include "framequeue.h" + #include "avfilter.h" #include "audio.h" #include "formats.h" @@ -78,7 +81,7 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *frame) for (i = 0; i < ctx->nb_outputs; i++) { AVFrame *buf_out; - if (ctx->outputs[i]->status) + if (ctx->outputs[i]->status_in) continue; buf_out = av_frame_clone(frame); if (!buf_out) { diff --git a/libavfilter/tests/filtfmts.c b/libavfilter/tests/filtfmts.c index 46a2d9447d..f59199c9a9 100644 --- a/libavfilter/tests/filtfmts.c +++ b/libavfilter/tests/filtfmts.c @@ -25,6 +25,9 @@ #include "libavutil/pixdesc.h" #include "libavutil/samplefmt.h" +#define FF_INTERNAL_FIELDS 1 +#include "libavfilter/framequeue.h" + #include "libavfilter/avfilter.h" #include "libavfilter/formats.h" diff --git a/libavfilter/vf_extractplanes.c b/libavfilter/vf_extractplanes.c index f1a0443dd1..65bba33d90 100644 --- a/libavfilter/vf_extractplanes.c +++ b/libavfilter/vf_extractplanes.c @@ -22,6 +22,10 @@ #include "libavutil/imgutils.h" #include "libavutil/opt.h" #include "libavutil/pixdesc.h" + +#define FF_INTERNAL_FIELDS 1 +#include "libavfilter/framequeue.h" + #include "avfilter.h" #include "drawutils.h" #include "internal.h" @@ -283,7 +287,7 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *frame) const int idx = s->map[i]; AVFrame *out; - if (outlink->status) + if (outlink->status_in) continue; out = ff_get_video_buffer(outlink, outlink->w, outlink->h); |