aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAnton Khirnov <anton@khirnov.net>2023-12-13 18:26:21 +0100
committerAnton Khirnov <anton@khirnov.net>2023-12-14 20:16:53 +0100
commit84201d8af6255ddfbeffa38afd51f0ecf742390b (patch)
tree11c05facaea1fa998a59857602d6952a8d8e72f4
parent06d5dc11db257786dd18d13f39dd063ef8c1fa7d (diff)
downloadffmpeg-84201d8af6255ddfbeffa38afd51f0ecf742390b.tar.gz
fftools/ffmpeg_filter: move FilterGraph.graph to FilterGraphThread
The AVFilterGraph is fully owned by the filtering thread and should never be accessed outside of it.
-rw-r--r--fftools/ffmpeg.h2
-rw-r--r--fftools/ffmpeg_filter.c120
2 files changed, 64 insertions, 58 deletions
diff --git a/fftools/ffmpeg.h b/fftools/ffmpeg.h
index 7f84f3266d..de79bb1cbe 100644
--- a/fftools/ffmpeg.h
+++ b/fftools/ffmpeg.h
@@ -327,8 +327,6 @@ typedef struct FilterGraph {
const AVClass *class;
int index;
- AVFilterGraph *graph;
-
InputFilter **inputs;
int nb_inputs;
OutputFilter **outputs;
diff --git a/fftools/ffmpeg_filter.c b/fftools/ffmpeg_filter.c
index 21f823be12..002bdb143d 100644
--- a/fftools/ffmpeg_filter.c
+++ b/fftools/ffmpeg_filter.c
@@ -81,6 +81,8 @@ static const FilterGraphPriv *cfgp_from_cfg(const FilterGraph *fg)
// data that is local to the filter thread and not visible outside of it
typedef struct FilterGraphThread {
+ AVFilterGraph *graph;
+
AVFrame *frame;
// Temporary buffer for output frames, since on filtergraph reset
@@ -851,7 +853,6 @@ void fg_free(FilterGraph **pfg)
return;
fgp = fgp_from_fg(fg);
- avfilter_graph_free(&fg->graph);
for (int j = 0; j < fg->nb_inputs; j++) {
InputFilter *ifilter = fg->inputs[j];
InputFilterPriv *ifp = ifp_from_ifilter(ifilter);
@@ -1214,7 +1215,8 @@ static int insert_filter(AVFilterContext **last_filter, int *pad_idx,
return 0;
}
-static int configure_output_video_filter(FilterGraph *fg, OutputFilter *ofilter, AVFilterInOut *out)
+static int configure_output_video_filter(FilterGraph *fg, AVFilterGraph *graph,
+ OutputFilter *ofilter, AVFilterInOut *out)
{
OutputFilterPriv *ofp = ofp_from_ofilter(ofilter);
OutputStream *ost = ofilter->ost;
@@ -1229,7 +1231,7 @@ static int configure_output_video_filter(FilterGraph *fg, OutputFilter *ofilter,
snprintf(name, sizeof(name), "out_%d_%d", ost->file_index, ost->index);
ret = avfilter_graph_create_filter(&ofp->filter,
avfilter_get_by_name("buffersink"),
- name, NULL, NULL, fg->graph);
+ name, NULL, NULL, graph);
if (ret < 0)
return ret;
@@ -1249,7 +1251,7 @@ static int configure_output_video_filter(FilterGraph *fg, OutputFilter *ofilter,
snprintf(name, sizeof(name), "scaler_out_%d_%d",
ost->file_index, ost->index);
if ((ret = avfilter_graph_create_filter(&filter, avfilter_get_by_name("scale"),
- name, args, NULL, fg->graph)) < 0)
+ name, args, NULL, graph)) < 0)
return ret;
if ((ret = avfilter_link(last_filter, pad_idx, filter, 0)) < 0)
return ret;
@@ -1268,7 +1270,7 @@ static int configure_output_video_filter(FilterGraph *fg, OutputFilter *ofilter,
ret = avfilter_graph_create_filter(&filter,
avfilter_get_by_name("format"),
- "format", pix_fmts, NULL, fg->graph);
+ "format", pix_fmts, NULL, graph);
av_bprint_finalize(&bprint, NULL);
if (ret < 0)
return ret;
@@ -1293,7 +1295,8 @@ static int configure_output_video_filter(FilterGraph *fg, OutputFilter *ofilter,
return 0;
}
-static int configure_output_audio_filter(FilterGraph *fg, OutputFilter *ofilter, AVFilterInOut *out)
+static int configure_output_audio_filter(FilterGraph *fg, AVFilterGraph *graph,
+ OutputFilter *ofilter, AVFilterInOut *out)
{
OutputFilterPriv *ofp = ofp_from_ofilter(ofilter);
OutputStream *ost = ofilter->ost;
@@ -1307,7 +1310,7 @@ static int configure_output_audio_filter(FilterGraph *fg, OutputFilter *ofilter,
snprintf(name, sizeof(name), "out_%d_%d", ost->file_index, ost->index);
ret = avfilter_graph_create_filter(&ofp->filter,
avfilter_get_by_name("abuffersink"),
- name, NULL, NULL, fg->graph);
+ name, NULL, NULL, graph);
if (ret < 0)
return ret;
if ((ret = av_opt_set_int(ofp->filter, "all_channel_counts", 1, AV_OPT_SEARCH_CHILDREN)) < 0)
@@ -1321,7 +1324,7 @@ static int configure_output_audio_filter(FilterGraph *fg, OutputFilter *ofilter,
\
ret = avfilter_graph_create_filter(&filt_ctx, \
avfilter_get_by_name(filter_name), \
- filter_name, arg, NULL, fg->graph); \
+ filter_name, arg, NULL, graph); \
if (ret < 0) \
goto fail; \
\
@@ -1362,7 +1365,7 @@ static int configure_output_audio_filter(FilterGraph *fg, OutputFilter *ofilter,
ost->file_index, ost->index);
ret = avfilter_graph_create_filter(&format,
avfilter_get_by_name("aformat"),
- name, args.str, NULL, fg->graph);
+ name, args.str, NULL, graph);
if (ret < 0)
goto fail;
@@ -1401,8 +1404,8 @@ fail:
return ret;
}
-static int configure_output_filter(FilterGraph *fg, OutputFilter *ofilter,
- AVFilterInOut *out)
+static int configure_output_filter(FilterGraph *fg, AVFilterGraph *graph,
+ OutputFilter *ofilter, AVFilterInOut *out)
{
if (!ofilter->ost) {
av_log(fg, AV_LOG_FATAL, "Filter %s has an unconnected output\n", ofilter->name);
@@ -1410,8 +1413,8 @@ static int configure_output_filter(FilterGraph *fg, OutputFilter *ofilter,
}
switch (avfilter_pad_get_type(out->filter_ctx->output_pads, out->pad_idx)) {
- case AVMEDIA_TYPE_VIDEO: return configure_output_video_filter(fg, ofilter, out);
- case AVMEDIA_TYPE_AUDIO: return configure_output_audio_filter(fg, ofilter, out);
+ case AVMEDIA_TYPE_VIDEO: return configure_output_video_filter(fg, graph, ofilter, out);
+ case AVMEDIA_TYPE_AUDIO: return configure_output_audio_filter(fg, graph, ofilter, out);
default: av_assert0(0); return 0;
}
}
@@ -1445,8 +1448,8 @@ static void sub2video_prepare(InputFilterPriv *ifp)
ifp->sub2video.initialize = 1;
}
-static int configure_input_video_filter(FilterGraph *fg, InputFilter *ifilter,
- AVFilterInOut *in)
+static int configure_input_video_filter(FilterGraph *fg, AVFilterGraph *graph,
+ InputFilter *ifilter, AVFilterInOut *in)
{
InputFilterPriv *ifp = ifp_from_ifilter(ifilter);
@@ -1499,7 +1502,7 @@ static int configure_input_video_filter(FilterGraph *fg, InputFilter *ifilter,
if ((ret = avfilter_graph_create_filter(&ifp->filter, buffer_filt, name,
- args.str, NULL, fg->graph)) < 0)
+ args.str, NULL, graph)) < 0)
goto fail;
par->hw_frames_ctx = ifp->hw_frames_ctx;
ret = av_buffersrc_parameters_set(ifp->filter, par);
@@ -1575,8 +1578,8 @@ fail:
return ret;
}
-static int configure_input_audio_filter(FilterGraph *fg, InputFilter *ifilter,
- AVFilterInOut *in)
+static int configure_input_audio_filter(FilterGraph *fg, AVFilterGraph *graph,
+ InputFilter *ifilter, AVFilterInOut *in)
{
InputFilterPriv *ifp = ifp_from_ifilter(ifilter);
AVFilterContext *last_filter;
@@ -1611,7 +1614,7 @@ static int configure_input_audio_filter(FilterGraph *fg, InputFilter *ifilter,
if ((ret = avfilter_graph_create_filter(&ifp->filter, abuffer_filt,
name, args.str, NULL,
- fg->graph)) < 0)
+ graph)) < 0)
return ret;
last_filter = ifp->filter;
@@ -1634,24 +1637,24 @@ static int configure_input_audio_filter(FilterGraph *fg, InputFilter *ifilter,
return 0;
}
-static int configure_input_filter(FilterGraph *fg, InputFilter *ifilter,
- AVFilterInOut *in)
+static int configure_input_filter(FilterGraph *fg, AVFilterGraph *graph,
+ InputFilter *ifilter, AVFilterInOut *in)
{
switch (ifp_from_ifilter(ifilter)->type) {
- case AVMEDIA_TYPE_VIDEO: return configure_input_video_filter(fg, ifilter, in);
- case AVMEDIA_TYPE_AUDIO: return configure_input_audio_filter(fg, ifilter, in);
+ case AVMEDIA_TYPE_VIDEO: return configure_input_video_filter(fg, graph, ifilter, in);
+ case AVMEDIA_TYPE_AUDIO: return configure_input_audio_filter(fg, graph, ifilter, in);
default: av_assert0(0); return 0;
}
}
-static void cleanup_filtergraph(FilterGraph *fg)
+static void cleanup_filtergraph(FilterGraph *fg, FilterGraphThread *fgt)
{
int i;
for (i = 0; i < fg->nb_outputs; i++)
ofp_from_ofilter(fg->outputs[i])->filter = NULL;
for (i = 0; i < fg->nb_inputs; i++)
ifp_from_ifilter(fg->inputs[i])->filter = NULL;
- avfilter_graph_free(&fg->graph);
+ avfilter_graph_free(&fgt->graph);
}
static int filter_is_buffersrc(const AVFilterContext *f)
@@ -1678,9 +1681,9 @@ static int graph_is_meta(AVFilterGraph *graph)
return 1;
}
-static int sub2video_frame(InputFilter *ifilter, AVFrame *frame);
+static int sub2video_frame(InputFilter *ifilter, AVFrame *frame, int buffer);
-static int configure_filtergraph(FilterGraph *fg, const FilterGraphThread *fgt)
+static int configure_filtergraph(FilterGraph *fg, FilterGraphThread *fgt)
{
FilterGraphPriv *fgp = fgp_from_fg(fg);
AVBufferRef *hw_device;
@@ -1689,27 +1692,28 @@ static int configure_filtergraph(FilterGraph *fg, const FilterGraphThread *fgt)
int have_input_eof = 0;
const char *graph_desc = fgp->graph_desc;
- cleanup_filtergraph(fg);
- if (!(fg->graph = avfilter_graph_alloc()))
+ cleanup_filtergraph(fg, fgt);
+ fgt->graph = avfilter_graph_alloc();
+ if (!fgt->graph)
return AVERROR(ENOMEM);
if (simple) {
OutputStream *ost = fg->outputs[0]->ost;
if (filter_nbthreads) {
- ret = av_opt_set(fg->graph, "threads", filter_nbthreads, 0);
+ ret = av_opt_set(fgt->graph, "threads", filter_nbthreads, 0);
if (ret < 0)
goto fail;
} else {
const AVDictionaryEntry *e = NULL;
e = av_dict_get(ost->encoder_opts, "threads", NULL, 0);
if (e)
- av_opt_set(fg->graph, "threads", e->value, 0);
+ av_opt_set(fgt->graph, "threads", e->value, 0);
}
if (av_dict_count(ost->sws_dict)) {
ret = av_dict_get_string(ost->sws_dict,
- &fg->graph->scale_sws_opts,
+ &fgt->graph->scale_sws_opts,
'=', ':');
if (ret < 0)
goto fail;
@@ -1720,20 +1724,20 @@ static int configure_filtergraph(FilterGraph *fg, const FilterGraphThread *fgt)
ret = av_dict_get_string(ost->swr_opts, &args, '=', ':');
if (ret < 0)
goto fail;
- av_opt_set(fg->graph, "aresample_swr_opts", args, 0);
+ av_opt_set(fgt->graph, "aresample_swr_opts", args, 0);
av_free(args);
}
} else {
- fg->graph->nb_threads = filter_complex_nbthreads;
+ fgt->graph->nb_threads = filter_complex_nbthreads;
}
hw_device = hw_device_for_filter();
- if ((ret = graph_parse(fg->graph, graph_desc, &inputs, &outputs, hw_device)) < 0)
+ if ((ret = graph_parse(fgt->graph, graph_desc, &inputs, &outputs, hw_device)) < 0)
goto fail;
for (cur = inputs, i = 0; cur; cur = cur->next, i++)
- if ((ret = configure_input_filter(fg, fg->inputs[i], cur)) < 0) {
+ if ((ret = configure_input_filter(fg, fgt->graph, fg->inputs[i], cur)) < 0) {
avfilter_inout_free(&inputs);
avfilter_inout_free(&outputs);
goto fail;
@@ -1741,7 +1745,7 @@ static int configure_filtergraph(FilterGraph *fg, const FilterGraphThread *fgt)
avfilter_inout_free(&inputs);
for (cur = outputs, i = 0; cur; cur = cur->next, i++) {
- ret = configure_output_filter(fg, fg->outputs[i], cur);
+ ret = configure_output_filter(fg, fgt->graph, fg->outputs[i], cur);
if (ret < 0) {
avfilter_inout_free(&outputs);
goto fail;
@@ -1750,11 +1754,11 @@ static int configure_filtergraph(FilterGraph *fg, const FilterGraphThread *fgt)
avfilter_inout_free(&outputs);
if (fgp->disable_conversions)
- avfilter_graph_set_auto_convert(fg->graph, AVFILTER_AUTO_CONVERT_NONE);
- if ((ret = avfilter_graph_config(fg->graph, NULL)) < 0)
+ avfilter_graph_set_auto_convert(fgt->graph, AVFILTER_AUTO_CONVERT_NONE);
+ if ((ret = avfilter_graph_config(fgt->graph, NULL)) < 0)
goto fail;
- fgp->is_meta = graph_is_meta(fg->graph);
+ fgp->is_meta = graph_is_meta(fgt->graph);
/* limit the lists of allowed formats to the ones selected, to
* make sure they stay the same if the filtergraph is reconfigured later */
@@ -1792,7 +1796,7 @@ static int configure_filtergraph(FilterGraph *fg, const FilterGraphThread *fgt)
AVFrame *tmp;
while (av_fifo_read(ifp->frame_queue, &tmp, 1) >= 0) {
if (ifp->type_src == AVMEDIA_TYPE_SUBTITLE) {
- sub2video_frame(&ifp->ifilter, tmp);
+ sub2video_frame(&ifp->ifilter, tmp, !fgt->graph);
} else {
ret = av_buffersrc_add_frame(ifp->filter, tmp);
}
@@ -1815,14 +1819,14 @@ static int configure_filtergraph(FilterGraph *fg, const FilterGraphThread *fgt)
if (have_input_eof) {
// make sure the EOF propagates to the end of the graph
- ret = avfilter_graph_request_oldest(fg->graph);
+ ret = avfilter_graph_request_oldest(fgt->graph);
if (ret < 0 && ret != AVERROR(EAGAIN) && ret != AVERROR_EOF)
goto fail;
}
return 0;
fail:
- cleanup_filtergraph(fg);
+ cleanup_filtergraph(fg, fgt);
return ret;
}
@@ -1895,17 +1899,18 @@ int filtergraph_is_simple(const FilterGraph *fg)
return fgp->is_simple;
}
-static void send_command(FilterGraph *fg, double time, const char *target,
+static void send_command(FilterGraph *fg, AVFilterGraph *graph,
+ double time, const char *target,
const char *command, const char *arg, int all_filters)
{
int ret;
- if (!fg->graph)
+ if (!graph)
return;
if (time < 0) {
char response[4096];
- ret = avfilter_graph_send_command(fg->graph, target, command, arg,
+ ret = avfilter_graph_send_command(graph, target, command, arg,
response, sizeof(response),
all_filters ? 0 : AVFILTER_CMD_FLAG_ONE);
fprintf(stderr, "Command reply for stream %d: ret:%d res:\n%s",
@@ -1913,7 +1918,7 @@ static void send_command(FilterGraph *fg, double time, const char *target,
} else if (!all_filters) {
fprintf(stderr, "Queuing commands only on filters supporting the specific command is unsupported\n");
} else {
- ret = avfilter_graph_queue_command(fg->graph, target, command, arg, 0, time);
+ ret = avfilter_graph_queue_command(graph, target, command, arg, 0, time);
if (ret < 0)
fprintf(stderr, "Queuing command failed with error %s\n", av_err2str(ret));
}
@@ -2399,7 +2404,7 @@ static int read_frames(FilterGraph *fg, FilterGraphThread *fgt,
int did_step = 0;
// graph not configured, just select the input to request
- if (!fg->graph) {
+ if (!fgt->graph) {
for (int i = 0; i < fg->nb_inputs; i++) {
InputFilterPriv *ifp = ifp_from_ifilter(fg->inputs[i]);
if (ifp->format < 0 && !fgt->eof_in[i]) {
@@ -2418,7 +2423,7 @@ static int read_frames(FilterGraph *fg, FilterGraphThread *fgt,
while (fgp->nb_outputs_done < fg->nb_outputs) {
int ret;
- ret = avfilter_graph_request_oldest(fg->graph);
+ ret = avfilter_graph_request_oldest(fgt->graph);
if (ret == AVERROR(EAGAIN)) {
fgt->next_in = choose_input(fg, fgt);
break;
@@ -2476,12 +2481,12 @@ static void sub2video_heartbeat(InputFilter *ifilter, int64_t pts, AVRational tb
sub2video_push_ref(ifp, pts2);
}
-static int sub2video_frame(InputFilter *ifilter, AVFrame *frame)
+static int sub2video_frame(InputFilter *ifilter, AVFrame *frame, int buffer)
{
InputFilterPriv *ifp = ifp_from_ifilter(ifilter);
int ret;
- if (!ifilter->graph->graph) {
+ if (buffer) {
AVFrame *tmp;
if (!frame)
@@ -2597,7 +2602,7 @@ static int send_frame(FilterGraph *fg, FilterGraphThread *fgt,
break;
}
- if (!ifp->ist->reinit_filters && fg->graph)
+ if (!ifp->ist->reinit_filters && fgt->graph)
need_reinit = 0;
if (!!ifp->hw_frames_ctx != !!frame->hw_frames_ctx ||
@@ -2618,7 +2623,7 @@ static int send_frame(FilterGraph *fg, FilterGraphThread *fgt,
}
/* (re)init the graph if possible, otherwise buffer the frame and return */
- if (need_reinit || !fg->graph) {
+ if (need_reinit || !fgt->graph) {
AVFrame *tmp = av_frame_alloc();
if (!tmp)
@@ -2634,7 +2639,7 @@ static int send_frame(FilterGraph *fg, FilterGraphThread *fgt,
return ret;
}
- ret = fg->graph ? read_frames(fg, fgt, tmp) : 0;
+ ret = fgt->graph ? read_frames(fg, fgt, tmp) : 0;
av_frame_free(&tmp);
if (ret < 0)
return ret;
@@ -2700,6 +2705,8 @@ static void fg_thread_uninit(FilterGraphThread *fgt)
av_freep(&fgt->eof_in);
av_freep(&fgt->eof_out);
+ avfilter_graph_free(&fgt->graph);
+
memset(fgt, 0, sizeof(*fgt));
}
@@ -2783,7 +2790,7 @@ static void *filter_thread(void *arg)
av_assert0(o == FRAME_OPAQUE_SEND_COMMAND && fgt.frame->buf[0]);
fc = (FilterCommand*)fgt.frame->buf[0]->data;
- send_command(fg, fc->time, fc->target, fc->command, fc->arg,
+ send_command(fg, fgt.graph, fc->time, fc->target, fc->command, fc->arg,
fc->all_filters);
av_frame_unref(fgt.frame);
continue;
@@ -2795,7 +2802,8 @@ static void *filter_thread(void *arg)
if (ifp->type_src == AVMEDIA_TYPE_SUBTITLE) {
int hb_frame = input_status >= 0 && o == FRAME_OPAQUE_SUB_HEARTBEAT;
- ret = sub2video_frame(ifilter, (fgt.frame->buf[0] || hb_frame) ? fgt.frame : NULL);
+ ret = sub2video_frame(ifilter, (fgt.frame->buf[0] || hb_frame) ? fgt.frame : NULL,
+ !fgt.graph);
} else if (fgt.frame->buf[0]) {
ret = send_frame(fg, &fgt, ifilter, fgt.frame);
} else {