aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAnton Khirnov <anton@khirnov.net>2024-08-06 08:50:21 +0200
committerAnton Khirnov <anton@khirnov.net>2024-08-15 19:34:27 +0200
commit42cbf66fffed5a8bba2f14b5247ce60d788b9e01 (patch)
tree4b744e56f719ffc0455331703e844bf1299b8db9
parenta23d565ea7d41e61f160578f9714a23e695f3bfd (diff)
downloadffmpeg-42cbf66fffed5a8bba2f14b5247ce60d788b9e01.tar.gz
lavfi: move AVFilterLink.{frame,sample}_count_{in,out} to FilterLink
-rw-r--r--libavfilter/af_adrc.c3
-rw-r--r--libavfilter/af_afftdn.c3
-rw-r--r--libavfilter/af_ashowinfo.c4
-rw-r--r--libavfilter/af_dynaudnorm.c3
-rw-r--r--libavfilter/af_volume.c4
-rw-r--r--libavfilter/asrc_sine.c3
-rw-r--r--libavfilter/avf_showfreqs.c5
-rw-r--r--libavfilter/avfilter.c16
-rw-r--r--libavfilter/avfilter.h10
-rw-r--r--libavfilter/avfiltergraph.c4
-rw-r--r--libavfilter/f_graphmonitor.c24
-rw-r--r--libavfilter/f_latency.c6
-rw-r--r--libavfilter/f_loop.c6
-rw-r--r--libavfilter/f_metadata.c6
-rw-r--r--libavfilter/f_segment.c10
-rw-r--r--libavfilter/f_select.c4
-rw-r--r--libavfilter/f_sendcmd.c4
-rw-r--r--libavfilter/f_streamselect.c3
-rw-r--r--libavfilter/filters.h10
-rw-r--r--libavfilter/qrencode.c3
-rw-r--r--libavfilter/vf_bbox.c4
-rw-r--r--libavfilter/vf_blackdetect.c4
-rw-r--r--libavfilter/vf_blend.c3
-rw-r--r--libavfilter/vf_blockdetect.c5
-rw-r--r--libavfilter/vf_blurdetect.c5
-rw-r--r--libavfilter/vf_crop.c4
-rw-r--r--libavfilter/vf_datascope.c4
-rw-r--r--libavfilter/vf_delogo.c4
-rw-r--r--libavfilter/vf_detelecine.c3
-rw-r--r--libavfilter/vf_drawtext.c9
-rw-r--r--libavfilter/vf_eq.c3
-rw-r--r--libavfilter/vf_fade.c10
-rw-r--r--libavfilter/vf_fftfilt.c4
-rw-r--r--libavfilter/vf_fieldhint.c17
-rw-r--r--libavfilter/vf_fieldmatch.c7
-rw-r--r--libavfilter/vf_find_rect.c5
-rw-r--r--libavfilter/vf_framestep.c3
-rw-r--r--libavfilter/vf_freezeframes.c8
-rw-r--r--libavfilter/vf_geq.c5
-rw-r--r--libavfilter/vf_hue.c3
-rw-r--r--libavfilter/vf_libplacebo.c6
-rw-r--r--libavfilter/vf_overlay.c4
-rw-r--r--libavfilter/vf_overlay_cuda.c3
-rw-r--r--libavfilter/vf_perspective.c7
-rw-r--r--libavfilter/vf_quirc.c3
-rw-r--r--libavfilter/vf_rotate.c4
-rw-r--r--libavfilter/vf_scale.c12
-rw-r--r--libavfilter/vf_scale_npp.c8
-rw-r--r--libavfilter/vf_showinfo.c3
-rw-r--r--libavfilter/vf_swaprect.c4
-rw-r--r--libavfilter/vf_telecine.c3
-rw-r--r--libavfilter/vf_tinterlace.c7
-rw-r--r--libavfilter/vf_vignette.c3
-rw-r--r--libavfilter/vf_weave.c3
-rw-r--r--libavfilter/vf_zoompan.c9
-rw-r--r--libavfilter/vsrc_mptestsrc.c5
56 files changed, 209 insertions, 116 deletions
diff --git a/libavfilter/af_adrc.c b/libavfilter/af_adrc.c
index 7a7d5e0370..e11db05f70 100644
--- a/libavfilter/af_adrc.c
+++ b/libavfilter/af_adrc.c
@@ -363,6 +363,7 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *in)
{
AVFilterContext *ctx = inlink->dst;
AVFilterLink *outlink = ctx->outputs[0];
+ FilterLink *outl = ff_filter_link(outlink);
AudioDRCContext *s = ctx->priv;
AVFrame *out;
int ret;
@@ -373,7 +374,7 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *in)
goto fail;
}
- s->var_values[VAR_SN] = outlink->sample_count_in;
+ s->var_values[VAR_SN] = outl->sample_count_in;
s->var_values[VAR_T] = s->var_values[VAR_SN] * (double)1/outlink->sample_rate;
s->in = in;
diff --git a/libavfilter/af_afftdn.c b/libavfilter/af_afftdn.c
index a2e6ca6107..fd6b2b2685 100644
--- a/libavfilter/af_afftdn.c
+++ b/libavfilter/af_afftdn.c
@@ -355,8 +355,9 @@ static void process_frame(AVFilterContext *ctx,
double *prior, double *prior_band_excit, int track_noise)
{
AVFilterLink *outlink = ctx->outputs[0];
+ FilterLink *outl = ff_filter_link(outlink);
const double *abs_var = dnch->abs_var;
- const double ratio = outlink->frame_count_out ? s->ratio : 1.0;
+ const double ratio = outl->frame_count_out ? s->ratio : 1.0;
const double rratio = 1. - ratio;
const int *bin2band = s->bin2band;
double *noisy_data = dnch->noisy_data;
diff --git a/libavfilter/af_ashowinfo.c b/libavfilter/af_ashowinfo.c
index de3c81f90b..9ca97c609f 100644
--- a/libavfilter/af_ashowinfo.c
+++ b/libavfilter/af_ashowinfo.c
@@ -38,6 +38,7 @@
#include "audio.h"
#include "avfilter.h"
+#include "filters.h"
#include "internal.h"
typedef struct AShowInfoContext {
@@ -173,6 +174,7 @@ static void dump_unknown(AVFilterContext *ctx, AVFrameSideData *sd)
static int filter_frame(AVFilterLink *inlink, AVFrame *buf)
{
+ FilterLink *inl = ff_filter_link(inlink);
AVFilterContext *ctx = inlink->dst;
AShowInfoContext *s = ctx->priv;
char chlayout_str[128];
@@ -203,7 +205,7 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *buf)
"n:%"PRId64" pts:%s pts_time:%s "
"fmt:%s channels:%d chlayout:%s rate:%d nb_samples:%d "
"checksum:%08"PRIX32" ",
- inlink->frame_count_out,
+ inl->frame_count_out,
av_ts2str(buf->pts), av_ts2timestr(buf->pts, &inlink->time_base),
av_get_sample_fmt_name(buf->format), buf->ch_layout.nb_channels, chlayout_str,
buf->sample_rate, buf->nb_samples,
diff --git a/libavfilter/af_dynaudnorm.c b/libavfilter/af_dynaudnorm.c
index 846d62584b..a3d3c47d17 100644
--- a/libavfilter/af_dynaudnorm.c
+++ b/libavfilter/af_dynaudnorm.c
@@ -725,6 +725,7 @@ static void perform_compression(DynamicAudioNormalizerContext *s, AVFrame *frame
static int analyze_frame(AVFilterContext *ctx, AVFilterLink *outlink, AVFrame **frame)
{
+ FilterLink *outl = ff_filter_link(outlink);
DynamicAudioNormalizerContext *s = ctx->priv;
AVFrame *analyze_frame;
@@ -780,7 +781,7 @@ static int analyze_frame(AVFilterContext *ctx, AVFilterLink *outlink, AVFrame **
analyze_frame = *frame;
}
- s->var_values[VAR_SN] = outlink->sample_count_in;
+ s->var_values[VAR_SN] = outl->sample_count_in;
s->var_values[VAR_T] = s->var_values[VAR_SN] * (double)1/outlink->sample_rate;
if (s->channels_coupled) {
diff --git a/libavfilter/af_volume.c b/libavfilter/af_volume.c
index b3dd57c5e5..827415fc1c 100644
--- a/libavfilter/af_volume.c
+++ b/libavfilter/af_volume.c
@@ -35,6 +35,7 @@
#include "audio.h"
#include "avfilter.h"
+#include "filters.h"
#include "formats.h"
#include "internal.h"
#include "af_volume.h"
@@ -328,6 +329,7 @@ static int process_command(AVFilterContext *ctx, const char *cmd, const char *ar
static int filter_frame(AVFilterLink *inlink, AVFrame *buf)
{
+ FilterLink *inl = ff_filter_link(inlink);
AVFilterContext *ctx = inlink->dst;
VolumeContext *vol = inlink->dst->priv;
AVFilterLink *outlink = inlink->dst->outputs[0];
@@ -380,7 +382,7 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *buf)
}
vol->var_values[VAR_PTS] = TS2D(buf->pts);
vol->var_values[VAR_T ] = TS2T(buf->pts, inlink->time_base);
- vol->var_values[VAR_N ] = inlink->frame_count_out;
+ vol->var_values[VAR_N ] = inl->frame_count_out;
#if FF_API_FRAME_PKT
FF_DISABLE_DEPRECATION_WARNINGS
diff --git a/libavfilter/asrc_sine.c b/libavfilter/asrc_sine.c
index 72a24cce65..bbfe37ba53 100644
--- a/libavfilter/asrc_sine.c
+++ b/libavfilter/asrc_sine.c
@@ -207,10 +207,11 @@ static av_cold int config_props(AVFilterLink *outlink)
static int activate(AVFilterContext *ctx)
{
AVFilterLink *outlink = ctx->outputs[0];
+ FilterLink *outl = ff_filter_link(outlink);
SineContext *sine = ctx->priv;
AVFrame *frame;
double values[VAR_VARS_NB] = {
- [VAR_N] = outlink->frame_count_in,
+ [VAR_N] = outl->frame_count_in,
[VAR_PTS] = sine->pts,
[VAR_T] = sine->pts * av_q2d(outlink->time_base),
[VAR_TB] = av_q2d(outlink->time_base),
diff --git a/libavfilter/avf_showfreqs.c b/libavfilter/avf_showfreqs.c
index f5b86a7f17..245d62dec4 100644
--- a/libavfilter/avf_showfreqs.c
+++ b/libavfilter/avf_showfreqs.c
@@ -298,6 +298,7 @@ static inline void plot_freq(ShowFreqsContext *s, int ch,
double a, int f, uint8_t fg[4], int *prev_y,
AVFrame *out, AVFilterLink *outlink)
{
+ FilterLink *outl = ff_filter_link(outlink);
const int w = s->w;
const float min = s->minamp;
const float avg = s->avg_data[ch][f];
@@ -337,12 +338,12 @@ static inline void plot_freq(ShowFreqsContext *s, int ch,
switch (s->avg) {
case 0:
- y = s->avg_data[ch][f] = !outlink->frame_count_in ? y : FFMIN(0, y);
+ y = s->avg_data[ch][f] = !outl->frame_count_in ? y : FFMIN(0, y);
break;
case 1:
break;
default:
- s->avg_data[ch][f] = avg + y * (y - avg) / (FFMIN(outlink->frame_count_in + 1, s->avg) * (float)y);
+ s->avg_data[ch][f] = avg + y * (y - avg) / (FFMIN(outl->frame_count_in + 1, s->avg) * (float)y);
y = av_clip(s->avg_data[ch][f], 0, outlink->h - 1);
break;
}
diff --git a/libavfilter/avfilter.c b/libavfilter/avfilter.c
index 66dda6584d..9a01bc1c4e 100644
--- a/libavfilter/avfilter.c
+++ b/libavfilter/avfilter.c
@@ -991,6 +991,7 @@ static int default_filter_frame(AVFilterLink *link, AVFrame *frame)
static int ff_filter_frame_framed(AVFilterLink *link, AVFrame *frame)
{
+ FilterLink *l = ff_filter_link(link);
int (*filter_frame)(AVFilterLink *, AVFrame *);
AVFilterContext *dstctx = link->dst;
AVFilterPad *dst = link->dstpad;
@@ -1012,7 +1013,7 @@ static int ff_filter_frame_framed(AVFilterLink *link, AVFrame *frame)
(dstctx->filter->flags & AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC))
filter_frame = default_filter_frame;
ret = filter_frame(link, frame);
- link->frame_count_out++;
+ l->frame_count_out++;
return ret;
fail:
@@ -1058,8 +1059,8 @@ int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
}
li->frame_blocked_in = link->frame_wanted_out = 0;
- link->frame_count_in++;
- link->sample_count_in += frame->nb_samples;
+ li->l.frame_count_in++;
+ li->l.sample_count_in += frame->nb_samples;
filter_unblock(link->dst);
ret = ff_framequeue_add(&li->fifo, frame);
if (ret < 0) {
@@ -1163,7 +1164,7 @@ static int ff_filter_frame_to_filter(AVFilterLink *link)
filter_unblock(dst);
/* AVFilterPad.filter_frame() expect frame_count_out to have the value
before the frame; ff_filter_frame_framed() will re-increment it. */
- link->frame_count_out--;
+ li->l.frame_count_out--;
ret = ff_filter_frame_framed(link, frame);
if (ret < 0 && ret != li->status_out) {
link_set_out_status(link, ret, AV_NOPTS_VALUE);
@@ -1444,8 +1445,8 @@ static void consume_update(FilterLinkInternal *li, const AVFrame *frame)
ff_inlink_process_commands(link, frame);
if (link == link->dst->inputs[0])
link->dst->is_disabled = !ff_inlink_evaluate_timeline_at_frame(link, frame);
- link->frame_count_out++;
- link->sample_count_out += frame->nb_samples;
+ li->l.frame_count_out++;
+ li->l.sample_count_out += frame->nb_samples;
}
int ff_inlink_consume_frame(AVFilterLink *link, AVFrame **rframe)
@@ -1552,6 +1553,7 @@ int ff_inlink_process_commands(AVFilterLink *link, const AVFrame *frame)
int ff_inlink_evaluate_timeline_at_frame(AVFilterLink *link, const AVFrame *frame)
{
+ FilterLink *l = ff_filter_link(link);
AVFilterContext *dstctx = link->dst;
int64_t pts = frame->pts;
#if FF_API_FRAME_PKT
@@ -1563,7 +1565,7 @@ FF_ENABLE_DEPRECATION_WARNINGS
if (!dstctx->enable_str)
return 1;
- dstctx->var_values[VAR_N] = link->frame_count_out;
+ dstctx->var_values[VAR_N] = l->frame_count_out;
dstctx->var_values[VAR_T] = pts == AV_NOPTS_VALUE ? NAN : pts * av_q2d(link->time_base);
dstctx->var_values[VAR_W] = link->w;
dstctx->var_values[VAR_H] = link->h;
diff --git a/libavfilter/avfilter.h b/libavfilter/avfilter.h
index 3498514459..176498cdb4 100644
--- a/libavfilter/avfilter.h
+++ b/libavfilter/avfilter.h
@@ -601,16 +601,6 @@ struct AVFilterLink {
struct AVFilterGraph *graph;
/**
- * Number of past frames sent through the link.
- */
- int64_t frame_count_in, frame_count_out;
-
- /**
- * Number of past samples sent through the link.
- */
- int64_t sample_count_in, sample_count_out;
-
- /**
* True if a frame is currently wanted on the output of this filter.
* Set when ff_request_frame() is called by the output,
* cleared when a frame is filtered.
diff --git a/libavfilter/avfiltergraph.c b/libavfilter/avfiltergraph.c
index e589ac2415..18a3f54759 100644
--- a/libavfilter/avfiltergraph.c
+++ b/libavfilter/avfiltergraph.c
@@ -1402,8 +1402,8 @@ int avfilter_graph_request_oldest(AVFilterGraph *graph)
return AVERROR_EOF;
av_assert1(!oldest->dst->filter->activate);
av_assert1(oldesti->age_index >= 0);
- frame_count = oldest->frame_count_out;
- while (frame_count == oldest->frame_count_out) {
+ frame_count = oldesti->l.frame_count_out;
+ while (frame_count == oldesti->l.frame_count_out) {
r = ff_filter_graph_run_once(graph);
if (r == AVERROR(EAGAIN) &&
!oldest->frame_wanted_out && !oldesti->frame_blocked_in &&
diff --git a/libavfilter/f_graphmonitor.c b/libavfilter/f_graphmonitor.c
index 72ca5cea15..c02a205e7e 100644
--- a/libavfilter/f_graphmonitor.c
+++ b/libavfilter/f_graphmonitor.c
@@ -308,33 +308,33 @@ static int draw_items(AVFilterContext *ctx,
drawtext(out, xpos, ypos, buffer, len, frames > 0 ? frames >= 10 ? frames >= 50 ? s->red : s->yellow : s->green : s->white);
xpos += len * 8;
}
- if ((flags & FLAG_FCIN) && (!(mode & MODE_NOZERO) || l->frame_count_in)) {
- len = snprintf(buffer, sizeof(buffer)-1, " | in: %"PRId64, l->frame_count_in);
+ if ((flags & FLAG_FCIN) && (!(mode & MODE_NOZERO) || fl->frame_count_in)) {
+ len = snprintf(buffer, sizeof(buffer)-1, " | in: %"PRId64, fl->frame_count_in);
drawtext(out, xpos, ypos, buffer, len, s->white);
xpos += len * 8;
}
- if ((flags & FLAG_FCOUT) && (!(mode & MODE_NOZERO) || l->frame_count_out)) {
- len = snprintf(buffer, sizeof(buffer)-1, " | out: %"PRId64, l->frame_count_out);
+ if ((flags & FLAG_FCOUT) && (!(mode & MODE_NOZERO) || fl->frame_count_out)) {
+ len = snprintf(buffer, sizeof(buffer)-1, " | out: %"PRId64, fl->frame_count_out);
drawtext(out, xpos, ypos, buffer, len, s->white);
xpos += len * 8;
}
- if ((flags & FLAG_FC_DELTA) && (!(mode & MODE_NOZERO) || (l->frame_count_in - l->frame_count_out))) {
- len = snprintf(buffer, sizeof(buffer)-1, " | delta: %"PRId64, l->frame_count_in - l->frame_count_out);
+ if ((flags & FLAG_FC_DELTA) && (!(mode & MODE_NOZERO) || (fl->frame_count_in - fl->frame_count_out))) {
+ len = snprintf(buffer, sizeof(buffer)-1, " | delta: %"PRId64, fl->frame_count_in - fl->frame_count_out);
drawtext(out, xpos, ypos, buffer, len, s->white);
xpos += len * 8;
}
- if ((flags & FLAG_SCIN) && (!(mode & MODE_NOZERO) || l->sample_count_in)) {
- len = snprintf(buffer, sizeof(buffer)-1, " | sin: %"PRId64, l->sample_count_in);
+ if ((flags & FLAG_SCIN) && (!(mode & MODE_NOZERO) || fl->sample_count_in)) {
+ len = snprintf(buffer, sizeof(buffer)-1, " | sin: %"PRId64, fl->sample_count_in);
drawtext(out, xpos, ypos, buffer, len, s->white);
xpos += len * 8;
}
- if ((flags & FLAG_SCOUT) && (!(mode & MODE_NOZERO) || l->sample_count_out)) {
- len = snprintf(buffer, sizeof(buffer)-1, " | sout: %"PRId64, l->sample_count_out);
+ if ((flags & FLAG_SCOUT) && (!(mode & MODE_NOZERO) || fl->sample_count_out)) {
+ len = snprintf(buffer, sizeof(buffer)-1, " | sout: %"PRId64, fl->sample_count_out);
drawtext(out, xpos, ypos, buffer, len, s->white);
xpos += len * 8;
}
- if ((flags & FLAG_SC_DELTA) && (!(mode & MODE_NOZERO) || (l->sample_count_in - l->sample_count_out))) {
- len = snprintf(buffer, sizeof(buffer)-1, " | sdelta: %"PRId64, l->sample_count_in - l->sample_count_out);
+ if ((flags & FLAG_SC_DELTA) && (!(mode & MODE_NOZERO) || (fl->sample_count_in - fl->sample_count_out))) {
+ len = snprintf(buffer, sizeof(buffer)-1, " | sdelta: %"PRId64, fl->sample_count_in - fl->sample_count_out);
drawtext(out, xpos, ypos, buffer, len, s->white);
xpos += len * 8;
}
diff --git a/libavfilter/f_latency.c b/libavfilter/f_latency.c
index a39c3c7d24..5315545873 100644
--- a/libavfilter/f_latency.c
+++ b/libavfilter/f_latency.c
@@ -46,6 +46,7 @@ static int activate(AVFilterContext *ctx)
{
LatencyContext *s = ctx->priv;
AVFilterLink *inlink = ctx->inputs[0];
+ FilterLink *inl = ff_filter_link(inlink);
AVFilterLink *outlink = ctx->outputs[0];
FF_FILTER_FORWARD_STATUS_BACK(outlink, inlink);
@@ -53,14 +54,15 @@ static int activate(AVFilterContext *ctx)
if (!ctx->is_disabled && ctx->inputs[0]->src &&
ctx->inputs[0]->src->nb_inputs > 0) {
AVFilterLink *prevlink = ctx->inputs[0]->src->inputs[0];
+ FilterLink *prevl = ff_filter_link(prevlink);
int64_t delta = 0;
switch (prevlink->type) {
case AVMEDIA_TYPE_AUDIO:
- delta = prevlink->sample_count_in - inlink->sample_count_out;
+ delta = prevl->sample_count_in - inl->sample_count_out;
break;
case AVMEDIA_TYPE_VIDEO:
- delta = prevlink->frame_count_in - inlink->frame_count_out;
+ delta = prevl->frame_count_in - inl->frame_count_out;
break;
}
diff --git a/libavfilter/f_loop.c b/libavfilter/f_loop.c
index 9c9c03fb05..51cf2dbf4d 100644
--- a/libavfilter/f_loop.c
+++ b/libavfilter/f_loop.c
@@ -144,6 +144,7 @@ static int push_samples(AVFilterContext *ctx, int nb_samples)
static int afilter_frame(AVFilterLink *inlink, AVFrame *frame)
{
+ FilterLink *inl = ff_filter_link(inlink);
AVFilterContext *ctx = inlink->dst;
AVFilterLink *outlink = ctx->outputs[0];
LoopContext *s = ctx->priv;
@@ -158,7 +159,7 @@ static int afilter_frame(AVFilterLink *inlink, AVFrame *frame)
int drain = 0;
if (s->start < 0)
- s->start = inlink->sample_count_out - written;
+ s->start = inl->sample_count_out - written;
ret = av_audio_fifo_write(s->fifo, (void **)frame->extended_data, written);
if (ret < 0)
@@ -374,6 +375,7 @@ static int push_frame(AVFilterContext *ctx)
static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
{
+ FilterLink *inl = ff_filter_link(inlink);
AVFilterContext *ctx = inlink->dst;
AVFilterLink *outlink = ctx->outputs[0];
FilterLink *outl = ff_filter_link(outlink);
@@ -381,7 +383,7 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
int64_t duration;
int ret = 0;
- if (((s->start >= 0 && inlink->frame_count_out >= s->start) ||
+ if (((s->start >= 0 && inl->frame_count_out >= s->start) ||
(s->time_pts != AV_NOPTS_VALUE &&
frame->pts >= s->time_pts)) &&
s->size > 0 && s->loop != 0) {
diff --git a/libavfilter/f_metadata.c b/libavfilter/f_metadata.c
index b6d548612b..e18d477850 100644
--- a/libavfilter/f_metadata.c
+++ b/libavfilter/f_metadata.c
@@ -36,6 +36,7 @@
#include "libavformat/avio.h"
#include "avfilter.h"
#include "audio.h"
+#include "filters.h"
#include "internal.h"
#include "video.h"
@@ -303,6 +304,7 @@ static av_cold void uninit(AVFilterContext *ctx)
static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
{
+ FilterLink *inl = ff_filter_link(inlink);
AVFilterContext *ctx = inlink->dst;
AVFilterLink *outlink = ctx->outputs[0];
MetadataContext *s = ctx->priv;
@@ -336,14 +338,14 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
case METADATA_PRINT:
if (!s->key && e) {
s->print(ctx, "frame:%-4"PRId64" pts:%-7s pts_time:%s\n",
- inlink->frame_count_out, av_ts2str(frame->pts), av_ts2timestr(frame->pts, &inlink->time_base));
+ inl->frame_count_out, av_ts2str(frame->pts), av_ts2timestr(frame->pts, &inlink->time_base));
s->print(ctx, "%s=%s\n", e->key, e->value);
while ((e = av_dict_iterate(*metadata, e)) != NULL) {
s->print(ctx, "%s=%s\n", e->key, e->value);
}
} else if (e && e->value && (!s->value || (e->value && s->compare(s, e->value, s->value)))) {
s->print(ctx, "frame:%-4"PRId64" pts:%-7s pts_time:%s\n",
- inlink->frame_count_out, av_ts2str(frame->pts), av_ts2timestr(frame->pts, &inlink->time_base));
+ inl->frame_count_out, av_ts2str(frame->pts), av_ts2timestr(frame->pts, &inlink->time_base));
s->print(ctx, "%s=%s\n", s->key, e->value);
}
return ff_filter_frame(outlink, frame);
diff --git a/libavfilter/f_segment.c b/libavfilter/f_segment.c
index 16c611b4a8..f655c1e675 100644
--- a/libavfilter/f_segment.c
+++ b/libavfilter/f_segment.c
@@ -162,6 +162,7 @@ static int current_segment_finished(AVFilterContext *ctx, AVFrame *frame)
{
SegmentContext *s = ctx->priv;
AVFilterLink *inlink = ctx->inputs[0];
+ FilterLink *inl = ff_filter_link(inlink);
int ret = 0;
if (s->use_timestamps) {
@@ -169,10 +170,10 @@ static int current_segment_finished(AVFilterContext *ctx, AVFrame *frame)
} else {
switch (inlink->type) {
case AVMEDIA_TYPE_VIDEO:
- ret = inlink->frame_count_out - 1 >= s->points[s->current_point];
+ ret = inl->frame_count_out - 1 >= s->points[s->current_point];
break;
case AVMEDIA_TYPE_AUDIO:
- ret = inlink->sample_count_out - frame->nb_samples >= s->points[s->current_point];
+ ret = inl->sample_count_out - frame->nb_samples >= s->points[s->current_point];
break;
}
}
@@ -183,6 +184,7 @@ static int current_segment_finished(AVFilterContext *ctx, AVFrame *frame)
static int activate(AVFilterContext *ctx)
{
AVFilterLink *inlink = ctx->inputs[0];
+ FilterLink *inl = ff_filter_link(inlink);
SegmentContext *s = ctx->priv;
AVFrame *frame = NULL;
int ret, status;
@@ -199,14 +201,14 @@ static int activate(AVFilterContext *ctx)
ret = ff_inlink_consume_frame(inlink, &frame);
break;
case AVMEDIA_TYPE_AUDIO:
- diff = s->points[s->current_point] - inlink->sample_count_out;
+ diff = s->points[s->current_point] - inl->sample_count_out;
while (diff <= 0) {
ff_outlink_set_status(ctx->outputs[s->current_point], AVERROR_EOF, s->last_pts);
s->current_point++;
if (s->current_point >= s->nb_points)
return AVERROR(EINVAL);
- diff = s->points[s->current_point] - inlink->sample_count_out;
+ diff = s->points[s->current_point] - inl->sample_count_out;
}
if (s->use_timestamps) {
max_samples = av_rescale_q(diff, av_make_q(1, inlink->sample_rate), inlink->time_base);
diff --git a/libavfilter/f_select.c b/libavfilter/f_select.c
index 7402d3169f..d834397ea6 100644
--- a/libavfilter/f_select.c
+++ b/libavfilter/f_select.c
@@ -34,6 +34,7 @@
#include "libavutil/pixdesc.h"
#include "avfilter.h"
#include "audio.h"
+#include "filters.h"
#include "formats.h"
#include "internal.h"
#include "video.h"
@@ -342,6 +343,7 @@ static void select_frame(AVFilterContext *ctx, AVFrame *frame)
{
SelectContext *select = ctx->priv;
AVFilterLink *inlink = ctx->inputs[0];
+ FilterLink *inl = ff_filter_link(inlink);
double res;
if (isnan(select->var_values[VAR_START_PTS]))
@@ -349,7 +351,7 @@ static void select_frame(AVFilterContext *ctx, AVFrame *frame)
if (isnan(select->var_values[VAR_START_T]))
select->var_values[VAR_START_T] = TS2D(frame->pts) * av_q2d(inlink->time_base);
- select->var_values[VAR_N ] = inlink->frame_count_out;
+ select->var_values[VAR_N ] = inl->frame_count_out;
select->var_values[VAR_PTS] = TS2D(frame->pts);
select->var_values[VAR_T ] = TS2D(frame->pts) * av_q2d(inlink->time_base);
#if FF_API_FRAME_PKT
diff --git a/libavfilter/f_sendcmd.c b/libavfilter/f_sendcmd.c
index 47bfaba9c9..86f9d522e9 100644
--- a/libavfilter/f_sendcmd.c
+++ b/libavfilter/f_sendcmd.c
@@ -33,6 +33,7 @@
#include "libavutil/opt.h"
#include "libavutil/parseutils.h"
#include "avfilter.h"
+#include "filters.h"
#include "internal.h"
#include "audio.h"
#include "video.h"
@@ -488,6 +489,7 @@ static av_cold void uninit(AVFilterContext *ctx)
static int filter_frame(AVFilterLink *inlink, AVFrame *ref)
{
+ FilterLink *inl = ff_filter_link(inlink);
AVFilterContext *ctx = inlink->dst;
SendCmdContext *s = ctx->priv;
int64_t ts;
@@ -535,7 +537,7 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *ref)
double end = TS2T(interval->end_ts, AV_TIME_BASE_Q);
double current = TS2T(ref->pts, inlink->time_base);
- var_values[VAR_N] = inlink->frame_count_in;
+ var_values[VAR_N] = inl->frame_count_in;
#if FF_API_FRAME_PKT
FF_DISABLE_DEPRECATION_WARNINGS
var_values[VAR_POS] = ref->pkt_pos == -1 ? NAN : ref->pkt_pos;
diff --git a/libavfilter/f_streamselect.c b/libavfilter/f_streamselect.c
index 285e7d7315..07d55a7506 100644
--- a/libavfilter/f_streamselect.c
+++ b/libavfilter/f_streamselect.c
@@ -65,11 +65,12 @@ static int process_frame(FFFrameSync *fs)
for (j = 0; j < ctx->nb_inputs; j++) {
for (i = 0; i < s->nb_map; i++) {
+ FilterLink *outl = ff_filter_link(ctx->outputs[i]);
if (s->map[i] == j) {
AVFrame *out;
if (s->is_audio && s->last_pts[j] == in[j]->pts &&
- ctx->outputs[i]->frame_count_in > 0)
+ outl->frame_count_in > 0)
continue;
out = av_frame_clone(in[j]);
if (!out)
diff --git a/libavfilter/filters.h b/libavfilter/filters.h
index fc65f1df20..3f591e6f9d 100644
--- a/libavfilter/filters.h
+++ b/libavfilter/filters.h
@@ -71,6 +71,16 @@ typedef struct FilterLink {
int max_samples;
/**
+ * Number of past frames sent through the link.
+ */
+ int64_t frame_count_in, frame_count_out;
+
+ /**
+ * Number of past samples sent through the link.
+ */
+ int64_t sample_count_in, sample_count_out;
+
+ /**
* Frame rate of the stream on the link, or 1/0 if unknown or variable.
*
* May be set by the link source filter in its config_props(); if left to
diff --git a/libavfilter/qrencode.c b/libavfilter/qrencode.c
index f308de7646..d184ac994d 100644
--- a/libavfilter/qrencode.c
+++ b/libavfilter/qrencode.c
@@ -780,12 +780,13 @@ static int qrencode_query_formats(AVFilterContext *ctx)
static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
{
+ FilterLink *inl = ff_filter_link(inlink);
AVFilterContext *ctx = inlink->dst;
AVFilterLink *outlink = ctx->outputs[0];
QREncodeContext *qr = ctx->priv;
int ret;
- V(n) = inlink->frame_count_out;
+ V(n) = inl->frame_count_out;
V(t) = frame->pts == AV_NOPTS_VALUE ?
NAN : frame->pts * av_q2d(inlink->time_base);
V(pict_type) = frame->pict_type;
diff --git a/libavfilter/vf_bbox.c b/libavfilter/vf_bbox.c
index 02893d500d..2f6edf9393 100644
--- a/libavfilter/vf_bbox.c
+++ b/libavfilter/vf_bbox.c
@@ -28,6 +28,7 @@
#include "libavutil/timestamp.h"
#include "avfilter.h"
#include "bbox.h"
+#include "filters.h"
#include "internal.h"
typedef struct BBoxContext {
@@ -75,6 +76,7 @@ static const enum AVPixelFormat pix_fmts[] = {
static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
{
+ FilterLink *inl = ff_filter_link(inlink);
AVFilterContext *ctx = inlink->dst;
BBoxContext *bbox = ctx->priv;
FFBoundingBox box;
@@ -88,7 +90,7 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
h = box.y2 - box.y1 + 1;
av_log(ctx, AV_LOG_INFO,
- "n:%"PRId64" pts:%s pts_time:%s", inlink->frame_count_out,
+ "n:%"PRId64" pts:%s pts_time:%s", inl->frame_count_out,
av_ts2str(frame->pts), av_ts2timestr(frame->pts, &inlink->time_base));
if (has_bbox) {
diff --git a/libavfilter/vf_blackdetect.c b/libavfilter/vf_blackdetect.c
index 23ff8600ca..973216e883 100644
--- a/libavfilter/vf_blackdetect.c
+++ b/libavfilter/vf_blackdetect.c
@@ -30,6 +30,7 @@
#include "libavutil/pixdesc.h"
#include "libavutil/timestamp.h"
#include "avfilter.h"
+#include "filters.h"
#include "internal.h"
#include "video.h"
@@ -173,6 +174,7 @@ static int black_counter(AVFilterContext *ctx, void *arg,
static int filter_frame(AVFilterLink *inlink, AVFrame *picref)
{
+ FilterLink *inl = ff_filter_link(inlink);
AVFilterContext *ctx = inlink->dst;
BlackDetectContext *s = ctx->priv;
double picture_black_ratio = 0;
@@ -195,7 +197,7 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *picref)
av_log(ctx, AV_LOG_DEBUG,
"frame:%"PRId64" picture_black_ratio:%f pts:%s t:%s type:%c\n",
- inlink->frame_count_out, picture_black_ratio,
+ inl->frame_count_out, picture_black_ratio,
av_ts2str(picref->pts), av_ts2timestr(picref->pts, &s->time_base),
av_get_picture_type_char(picref->pict_type));
diff --git a/libavfilter/vf_blend.c b/libavfilter/vf_blend.c
index d93daa1fac..a38cae2db3 100644
--- a/libavfilter/vf_blend.c
+++ b/libavfilter/vf_blend.c
@@ -174,10 +174,11 @@ static int filter_slice(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
const uint8_t *top = td->top->data[td->plane];
const uint8_t *bottom = td->bottom->data[td->plane];
uint8_t *dst = td->dst->data[td->plane];
+ FilterLink *inl = ff_filter_link(td->inlink);
double values[VAR_VARS_NB];
SliceParams sliceparam = {.values = &values[0], .starty = slice_start, .e = td->param->e ? td->param->e[jobnr] : NULL};
- values[VAR_N] = td->inlink->frame_count_out;
+ values[VAR_N] = inl->frame_count_out;
values[VAR_T] = td->dst->pts == AV_NOPTS_VALUE ? NAN : td->dst->pts * av_q2d(td->inlink->time_base);
values[VAR_W] = td->w;
values[VAR_H] = td->h;
diff --git a/libavfilter/vf_blockdetect.c b/libavfilter/vf_blockdetect.c
index b7f68722fe..6b1f35c974 100644
--- a/libavfilter/vf_blockdetect.c
+++ b/libavfilter/vf_blockdetect.c
@@ -32,6 +32,8 @@
#include "libavutil/mem.h"
#include "libavutil/opt.h"
#include "libavutil/pixdesc.h"
+
+#include "filters.h"
#include "internal.h"
#include "video.h"
@@ -198,6 +200,7 @@ static void set_meta(AVDictionary **metadata, const char *key, float d)
static int blockdetect_filter_frame(AVFilterLink *inlink, AVFrame *in)
{
+ FilterLink *inl = ff_filter_link(inlink);
AVFilterContext *ctx = inlink->dst;
BLKContext *s = ctx->priv;
AVFilterLink *outlink = ctx->outputs[0];
@@ -236,7 +239,7 @@ static int blockdetect_filter_frame(AVFilterLink *inlink, AVFrame *in)
set_meta(metadata, "lavfi.block", block);
- s->nb_frames = inlink->frame_count_in;
+ s->nb_frames = inl->frame_count_in;
return ff_filter_frame(outlink, in);
}
diff --git a/libavfilter/vf_blurdetect.c b/libavfilter/vf_blurdetect.c
index f1c5be6b0f..2b85a9e95a 100644
--- a/libavfilter/vf_blurdetect.c
+++ b/libavfilter/vf_blurdetect.c
@@ -34,6 +34,8 @@
#include "libavutil/opt.h"
#include "libavutil/pixdesc.h"
#include "libavutil/qsort.h"
+
+#include "filters.h"
#include "internal.h"
#include "edge_common.h"
#include "video.h"
@@ -256,6 +258,7 @@ static void set_meta(AVDictionary **metadata, const char *key, float d)
static int blurdetect_filter_frame(AVFilterLink *inlink, AVFrame *in)
{
+ FilterLink *inl = ff_filter_link(inlink);
AVFilterContext *ctx = inlink->dst;
BLRContext *s = ctx->priv;
AVFilterLink *outlink = ctx->outputs[0];
@@ -316,7 +319,7 @@ static int blurdetect_filter_frame(AVFilterLink *inlink, AVFrame *in)
set_meta(metadata, "lavfi.blur", blur);
- s->nb_frames = inlink->frame_count_in;
+ s->nb_frames = inl->frame_count_in;
return ff_filter_frame(outlink, in);
}
diff --git a/libavfilter/vf_crop.c b/libavfilter/vf_crop.c
index d4966323f5..a16fc22743 100644
--- a/libavfilter/vf_crop.c
+++ b/libavfilter/vf_crop.c
@@ -26,6 +26,7 @@
#include <stdio.h>
#include "avfilter.h"
+#include "filters.h"
#include "formats.h"
#include "internal.h"
#include "video.h"
@@ -255,12 +256,13 @@ static int config_output(AVFilterLink *link)
static int filter_frame(AVFilterLink *link, AVFrame *frame)
{
+ FilterLink *l = ff_filter_link(link);
AVFilterContext *ctx = link->dst;
CropContext *s = ctx->priv;
const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(link->format);
int i;
- s->var_values[VAR_N] = link->frame_count_out;
+ s->var_values[VAR_N] = l->frame_count_out;
s->var_values[VAR_T] = frame->pts == AV_NOPTS_VALUE ?
NAN : frame->pts * av_q2d(link->time_base);
#if FF_API_FRAME_PKT
diff --git a/libavfilter/vf_datascope.c b/libavfilter/vf_datascope.c
index 52b1939cd2..b4496eca2d 100644
--- a/libavfilter/vf_datascope.c
+++ b/libavfilter/vf_datascope.c
@@ -25,6 +25,7 @@
#include "libavutil/xga_font_data.h"
#include "avfilter.h"
#include "drawutils.h"
+#include "filters.h"
#include "formats.h"
#include "internal.h"
#include "video.h"
@@ -1038,6 +1039,7 @@ static void draw_scope(OscilloscopeContext *s, int x0, int y0, int x1, int y1,
static int oscilloscope_filter_frame(AVFilterLink *inlink, AVFrame *frame)
{
+ FilterLink *inl = ff_filter_link(inlink);
AVFilterContext *ctx = inlink->dst;
OscilloscopeContext *s = ctx->priv;
AVFilterLink *outlink = ctx->outputs[0];
@@ -1047,7 +1049,7 @@ static int oscilloscope_filter_frame(AVFilterLink *inlink, AVFrame *frame)
int i, c;
s->nb_values = 0;
- draw_scope(s, s->x1, s->y1, s->x2, s->y2, frame, s->values, inlink->frame_count_in & 1);
+ draw_scope(s, s->x1, s->y1, s->x2, s->y2, frame, s->values, inl->frame_count_in & 1);
ff_blend_rectangle(&s->draw, &s->dark, frame->data, frame->linesize,
frame->width, frame->height,
s->ox, s->oy, s->width, s->height + 20 * s->statistics);
diff --git a/libavfilter/vf_delogo.c b/libavfilter/vf_delogo.c
index c049f273b0..a321b805a2 100644
--- a/libavfilter/vf_delogo.c
+++ b/libavfilter/vf_delogo.c
@@ -33,6 +33,7 @@
#include "libavutil/pixdesc.h"
#include "libavutil/eval.h"
#include "avfilter.h"
+#include "filters.h"
#include "internal.h"
#include "video.h"
static const char * const var_names[] = {
@@ -286,6 +287,7 @@ static int config_input(AVFilterLink *inlink)
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
{
+ FilterLink *inl = ff_filter_link(inlink);
DelogoContext *s = inlink->dst->priv;
AVFilterLink *outlink = inlink->dst->outputs[0];
const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
@@ -297,7 +299,7 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *in)
AVRational sar;
int ret;
- s->var_values[VAR_N] = inlink->frame_count_out;
+ s->var_values[VAR_N] = inl->frame_count_out;
s->var_values[VAR_T] = TS2T(in->pts, inlink->time_base);
s->x = av_expr_eval(s->x_pexpr, s->var_values, s);
s->y = av_expr_eval(s->y_pexpr, s->var_values, s);
diff --git a/libavfilter/vf_detelecine.c b/libavfilter/vf_detelecine.c
index eb81e3424e..ecf1de7da1 100644
--- a/libavfilter/vf_detelecine.c
+++ b/libavfilter/vf_detelecine.c
@@ -193,6 +193,7 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *inpicref)
{
AVFilterContext *ctx = inlink->dst;
AVFilterLink *outlink = ctx->outputs[0];
+ FilterLink *outl = ff_filter_link(outlink);
DetelecineContext *s = ctx->priv;
int i, len = 0, ret = 0, out = 0;
@@ -332,7 +333,7 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *inpicref)
av_frame_copy_props(frame, inpicref);
frame->pts = ((s->start_time == AV_NOPTS_VALUE) ? 0 : s->start_time) +
- av_rescale(outlink->frame_count_in, s->ts_unit.num,
+ av_rescale(outl->frame_count_in, s->ts_unit.num,
s->ts_unit.den);
ret = ff_filter_frame(outlink, frame);
}
diff --git a/libavfilter/vf_drawtext.c b/libavfilter/vf_drawtext.c
index 0ac0a0721c..78bda5b122 100644
--- a/libavfilter/vf_drawtext.c
+++ b/libavfilter/vf_drawtext.c
@@ -60,6 +60,7 @@
#include "libavutil/detection_bbox.h"
#include "avfilter.h"
#include "drawutils.h"
+#include "filters.h"
#include "formats.h"
#include "internal.h"
#include "textutils.h"
@@ -1553,6 +1554,7 @@ static int draw_text(AVFilterContext *ctx, AVFrame *frame)
{
DrawTextContext *s = ctx->priv;
AVFilterLink *inlink = ctx->inputs[0];
+ FilterLink *inl = ff_filter_link(inlink);
int x = 0, y = 0, ret;
int shift_x64, shift_y64;
int x64, y64;
@@ -1596,7 +1598,7 @@ static int draw_text(AVFilterContext *ctx, AVFrame *frame)
if (s->tc_opt_string) {
char tcbuf[AV_TIMECODE_STR_SIZE];
- av_timecode_make_string(&s->tc, tcbuf, inlink->frame_count_out);
+ av_timecode_make_string(&s->tc, tcbuf, inl->frame_count_out);
av_bprint_clear(bp);
av_bprintf(bp, "%s%s", s->text, tcbuf);
}
@@ -1828,6 +1830,7 @@ static int draw_text(AVFilterContext *ctx, AVFrame *frame)
static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
{
+ FilterLink *inl = ff_filter_link(inlink);
AVFilterContext *ctx = inlink->dst;
AVFilterLink *outlink = ctx->outputs[0];
DrawTextContext *s = ctx->priv;
@@ -1848,7 +1851,7 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
}
}
- if (s->reload && !(inlink->frame_count_out % s->reload)) {
+ if (s->reload && !(inl->frame_count_out % s->reload)) {
if ((ret = ff_load_textfile(ctx, (const char *)s->textfile, &s->text, NULL)) < 0) {
av_frame_free(&frame);
return ret;
@@ -1862,7 +1865,7 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
#endif
}
- s->var_values[VAR_N] = inlink->frame_count_out + s->start_number;
+ s->var_values[VAR_N] = inl->frame_count_out + s->start_number;
s->var_values[VAR_T] = frame->pts == AV_NOPTS_VALUE ?
NAN : frame->pts * av_q2d(inlink->time_base);
diff --git a/libavfilter/vf_eq.c b/libavfilter/vf_eq.c
index 38a13b0cfb..377bc848bf 100644
--- a/libavfilter/vf_eq.c
+++ b/libavfilter/vf_eq.c
@@ -221,6 +221,7 @@ static const enum AVPixelFormat pixel_fmts_eq[] = {
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
{
+ FilterLink *inl = ff_filter_link(inlink);
AVFilterContext *ctx = inlink->dst;
AVFilterLink *outlink = inlink->dst->outputs[0];
EQContext *eq = ctx->priv;
@@ -237,7 +238,7 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *in)
av_frame_copy_props(out, in);
desc = av_pix_fmt_desc_get(inlink->format);
- eq->var_values[VAR_N] = inlink->frame_count_out;
+ eq->var_values[VAR_N] = inl->frame_count_out;
#if FF_API_FRAME_PKT
FF_DISABLE_DEPRECATION_WARNINGS
{
diff --git a/libavfilter/vf_fade.c b/libavfilter/vf_fade.c
index cd64a82f25..6db60f06f5 100644
--- a/libavfilter/vf_fade.c
+++ b/libavfilter/vf_fade.c
@@ -33,6 +33,7 @@
#include "libavutil/pixdesc.h"
#include "avfilter.h"
#include "drawutils.h"
+#include "filters.h"
#include "formats.h"
#include "internal.h"
#include "video.h"
@@ -442,6 +443,7 @@ static int config_input(AVFilterLink *inlink)
static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
{
+ FilterLink *inl = ff_filter_link(inlink);
AVFilterContext *ctx = inlink->dst;
FadeContext *s = ctx->priv;
@@ -449,7 +451,7 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
if (s->fade_state == VF_FADE_WAITING) {
s->factor=0;
if (frame->pts >= s->start_time_pts
- && inlink->frame_count_out >= s->start_frame) {
+ && inl->frame_count_out >= s->start_frame) {
// Time to start fading
s->fade_state = VF_FADE_FADING;
@@ -460,15 +462,15 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
// Save start frame in case we are starting based on time and fading based on frames
if (s->start_time_pts != 0 && s->start_frame == 0) {
- s->start_frame = inlink->frame_count_out;
+ s->start_frame = inl->frame_count_out;
}
}
}
if (s->fade_state == VF_FADE_FADING) {
if (s->duration_pts == 0) {
// Fading based on frame count
- s->factor = (inlink->frame_count_out - s->start_frame) * s->fade_per_frame;
- if (inlink->frame_count_out > s->start_frame + s->nb_frames) {
+ s->factor = (inl->frame_count_out - s->start_frame) * s->fade_per_frame;
+ if (inl->frame_count_out > s->start_frame + s->nb_frames) {
s->fade_state = VF_FADE_DONE;
}
diff --git a/libavfilter/vf_fftfilt.c b/libavfilter/vf_fftfilt.c
index af0e1f51d2..e2a8adb7b4 100644
--- a/libavfilter/vf_fftfilt.c
+++ b/libavfilter/vf_fftfilt.c
@@ -24,6 +24,7 @@
* FFT domain filtering.
*/
+#include "filters.h"
#include "internal.h"
#include "video.h"
#include "libavutil/common.h"
@@ -284,10 +285,11 @@ static av_cold int initialize(AVFilterContext *ctx)
static void do_eval(FFTFILTContext *s, AVFilterLink *inlink, int plane)
{
+ FilterLink *l = ff_filter_link(inlink);
double values[VAR_VARS_NB];
int i, j;
- values[VAR_N] = inlink->frame_count_out;
+ values[VAR_N] = l->frame_count_out;
values[VAR_W] = s->planewidth[plane];
values[VAR_H] = s->planeheight[plane];
values[VAR_WS] = s->rdft_hlen[plane];
diff --git a/libavfilter/vf_fieldhint.c b/libavfilter/vf_fieldhint.c
index 8d0e715749..0320484d5c 100644
--- a/libavfilter/vf_fieldhint.c
+++ b/libavfilter/vf_fieldhint.c
@@ -25,6 +25,7 @@
#include "libavutil/opt.h"
#include "libavutil/pixdesc.h"
#include "avfilter.h"
+#include "filters.h"
#include "formats.h"
#include "internal.h"
#include "video.h"
@@ -113,8 +114,10 @@ static int config_input(AVFilterLink *inlink)
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
{
+ FilterLink *inl = ff_filter_link(inlink);
AVFilterContext *ctx = inlink->dst;
AVFilterLink *outlink = ctx->outputs[0];
+ FilterLink *outl = ff_filter_link(outlink);
FieldHintContext *s = ctx->priv;
AVFrame *out, *top, *bottom;
char buf[1024] = { 0 };
@@ -152,9 +155,9 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *in)
}
switch (s->mode) {
case ABSOLUTE_HINT:
- if (tf > outlink->frame_count_in + 1 || tf < FFMAX(0, outlink->frame_count_in - 1) ||
- bf > outlink->frame_count_in + 1 || bf < FFMAX(0, outlink->frame_count_in - 1)) {
- av_log(ctx, AV_LOG_ERROR, "Out of range frames %"PRId64" and/or %"PRId64" on line %"PRId64" for %"PRId64". input frame.\n", tf, bf, s->line, inlink->frame_count_out);
+ if (tf > outl->frame_count_in + 1 || tf < FFMAX(0, outl->frame_count_in - 1) ||
+ bf > outl->frame_count_in + 1 || bf < FFMAX(0, outl->frame_count_in - 1)) {
+ av_log(ctx, AV_LOG_ERROR, "Out of range frames %"PRId64" and/or %"PRId64" on line %"PRId64" for %"PRId64". input frame.\n", tf, bf, s->line, inl->frame_count_out);
return AVERROR_INVALIDDATA;
}
break;
@@ -162,7 +165,7 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *in)
case RELATIVE_HINT:
if (tf > 1 || tf < -1 ||
bf > 1 || bf < -1) {
- av_log(ctx, AV_LOG_ERROR, "Out of range %"PRId64" and/or %"PRId64" on line %"PRId64" for %"PRId64". input frame.\n", tf, bf, s->line, inlink->frame_count_out);
+ av_log(ctx, AV_LOG_ERROR, "Out of range %"PRId64" and/or %"PRId64" on line %"PRId64" for %"PRId64". input frame.\n", tf, bf, s->line, inl->frame_count_out);
return AVERROR_INVALIDDATA;
}
break;
@@ -175,7 +178,7 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *in)
fseek(s->hint, 0, SEEK_SET);
continue;
}
- av_log(ctx, AV_LOG_ERROR, "Missing entry for %"PRId64". input frame.\n", inlink->frame_count_out);
+ av_log(ctx, AV_LOG_ERROR, "Missing entry for %"PRId64". input frame.\n", inl->frame_count_out);
return AVERROR_INVALIDDATA;
}
}
@@ -187,8 +190,8 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *in)
switch (s->mode) {
case ABSOLUTE_HINT:
- top = s->frame[tf - outlink->frame_count_in + 1];
- bottom = s->frame[bf - outlink->frame_count_in + 1];
+ top = s->frame[tf - outl->frame_count_in + 1];
+ bottom = s->frame[bf - outl->frame_count_in + 1];
break;
case PATTERN_HINT:
case RELATIVE_HINT:
diff --git a/libavfilter/vf_fieldmatch.c b/libavfilter/vf_fieldmatch.c
index 1625c08306..555ae81bc6 100644
--- a/libavfilter/vf_fieldmatch.c
+++ b/libavfilter/vf_fieldmatch.c
@@ -681,6 +681,7 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *in)
{
AVFilterContext *ctx = inlink->dst;
AVFilterLink *outlink = ctx->outputs[0];
+ FilterLink *outl = ff_filter_link(outlink);
FieldMatchContext *fm = ctx->priv;
int combs[] = { -1, -1, -1, -1, -1 };
int order, field, i, match, interlaced_frame, sc = 0, ret = 0;
@@ -753,7 +754,7 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *in)
/* scene change check */
if (fm->combmatch == COMBMATCH_SC) {
- if (fm->lastn == outlink->frame_count_in - 1) {
+ if (fm->lastn == outl->frame_count_in - 1) {
if (fm->lastscdiff > fm->scthresh)
sc = 1;
} else if (luma_abs_diff(fm->prv, fm->src) > fm->scthresh) {
@@ -761,7 +762,7 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *in)
}
if (!sc) {
- fm->lastn = outlink->frame_count_in;
+ fm->lastn = outl->frame_count_in;
fm->lastscdiff = luma_abs_diff(fm->src, fm->nxt);
sc = fm->lastscdiff > fm->scthresh;
}
@@ -831,7 +832,7 @@ FF_ENABLE_DEPRECATION_WARNINGS
if (interlaced_frame) {
dst->flags |= AV_FRAME_FLAG_INTERLACED;
av_log(ctx, AV_LOG_WARNING, "Frame #%"PRId64" at %s is still interlaced\n",
- outlink->frame_count_in, av_ts2timestr(in->pts, &inlink->time_base));
+ outl->frame_count_in, av_ts2timestr(in->pts, &inlink->time_base));
#if FF_API_INTERLACED_FRAME
FF_DISABLE_DEPRECATION_WARNINGS
dst->top_field_first = field;
diff --git a/libavfilter/vf_find_rect.c b/libavfilter/vf_find_rect.c
index f50052ded2..bfd6cc583c 100644
--- a/libavfilter/vf_find_rect.c
+++ b/libavfilter/vf_find_rect.c
@@ -24,6 +24,8 @@
#include "libavutil/mem.h"
#include "libavutil/opt.h"
+
+#include "filters.h"
#include "internal.h"
#include "video.h"
@@ -172,6 +174,7 @@ static float search(FOCContext *foc, int pass, int maxpass, int xmin, int xmax,
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
{
+ FilterLink *inl = ff_filter_link(inlink);
AVFilterContext *ctx = inlink->dst;
FOCContext *foc = ctx->priv;
float best_score;
@@ -208,7 +211,7 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *in)
}
av_log(ctx, AV_LOG_INFO, "Found at n=%"PRId64" pts_time=%f x=%d y=%d with score=%f\n",
- inlink->frame_count_out, TS2D(in->pts) * av_q2d(inlink->time_base),
+ inl->frame_count_out, TS2D(in->pts) * av_q2d(inlink->time_base),
best_x, best_y, best_score);
foc->last_x = best_x;
foc->last_y = best_y;
diff --git a/libavfilter/vf_framestep.c b/libavfilter/vf_framestep.c
index da69e2ba6c..115ac45fe4 100644
--- a/libavfilter/vf_framestep.c
+++ b/libavfilter/vf_framestep.c
@@ -64,9 +64,10 @@ static int config_output_props(AVFilterLink *outlink)
static int filter_frame(AVFilterLink *inlink, AVFrame *ref)
{
+ FilterLink *inl = ff_filter_link(inlink);
FrameStepContext *framestep = inlink->dst->priv;
- if (!(inlink->frame_count_out % framestep->frame_step)) {
+ if (!(inl->frame_count_out % framestep->frame_step)) {
return ff_filter_frame(inlink->dst->outputs[0], ref);
} else {
av_frame_free(&ref);
diff --git a/libavfilter/vf_freezeframes.c b/libavfilter/vf_freezeframes.c
index b630ad85fe..fffe86a2df 100644
--- a/libavfilter/vf_freezeframes.c
+++ b/libavfilter/vf_freezeframes.c
@@ -74,12 +74,14 @@ static int config_output(AVFilterLink *outlink)
static int activate(AVFilterContext *ctx)
{
+ FilterLink *inl0 = ff_filter_link(ctx->inputs[0]);
+ FilterLink *inl1 = ff_filter_link(ctx->inputs[1]);
AVFilterLink *outlink = ctx->outputs[0];
FreezeFramesContext *s = ctx->priv;
AVFrame *frame = NULL;
- int drop = ctx->inputs[0]->frame_count_out >= s->first &&
- ctx->inputs[0]->frame_count_out <= s->last;
- int replace = ctx->inputs[1]->frame_count_out == s->replace;
+ int drop = inl0->frame_count_out >= s->first &&
+ inl0->frame_count_out <= s->last;
+ int replace = inl1->frame_count_out == s->replace;
int ret;
FF_FILTER_FORWARD_STATUS_BACK_ALL(outlink, ctx);
diff --git a/libavfilter/vf_geq.c b/libavfilter/vf_geq.c
index 0efbce4d4f..b23c3e170c 100644
--- a/libavfilter/vf_geq.c
+++ b/libavfilter/vf_geq.c
@@ -32,6 +32,8 @@
#include "libavutil/mem.h"
#include "libavutil/opt.h"
#include "libavutil/pixdesc.h"
+
+#include "filters.h"
#include "formats.h"
#include "internal.h"
#include "video.h"
@@ -453,13 +455,14 @@ static int slice_geq_filter(AVFilterContext *ctx, void *arg, int jobnr, int nb_j
static int geq_filter_frame(AVFilterLink *inlink, AVFrame *in)
{
int plane;
+ FilterLink *inl = ff_filter_link(inlink);
AVFilterContext *ctx = inlink->dst;
const int nb_threads = FFMIN(MAX_NB_THREADS, ff_filter_get_nb_threads(ctx));
GEQContext *geq = ctx->priv;
AVFilterLink *outlink = inlink->dst->outputs[0];
AVFrame *out;
- geq->values[VAR_N] = inlink->frame_count_out,
+ geq->values[VAR_N] = inl->frame_count_out,
geq->values[VAR_T] = in->pts == AV_NOPTS_VALUE ? NAN : in->pts * av_q2d(inlink->time_base),
geq->picref = in;
diff --git a/libavfilter/vf_hue.c b/libavfilter/vf_hue.c
index 049f835c61..42f774772e 100644
--- a/libavfilter/vf_hue.c
+++ b/libavfilter/vf_hue.c
@@ -360,6 +360,7 @@ static void apply_lut10(HueContext *s,
static int filter_frame(AVFilterLink *inlink, AVFrame *inpic)
{
+ FilterLink *inl = ff_filter_link(inlink);
HueContext *hue = inlink->dst->priv;
AVFilterLink *outlink = inlink->dst->outputs[0];
AVFrame *outpic;
@@ -381,7 +382,7 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *inpic)
av_frame_copy_props(outpic, inpic);
}
- hue->var_values[VAR_N] = inlink->frame_count_out;
+ hue->var_values[VAR_N] = inl->frame_count_out;
hue->var_values[VAR_T] = TS2T(inpic->pts, inlink->time_base);
hue->var_values[VAR_PTS] = TS2D(inpic->pts);
diff --git a/libavfilter/vf_libplacebo.c b/libavfilter/vf_libplacebo.c
index 64c1ad6d4d..268bba8e38 100644
--- a/libavfilter/vf_libplacebo.c
+++ b/libavfilter/vf_libplacebo.c
@@ -742,6 +742,7 @@ static const AVFrame *ref_frame(const struct pl_frame_mix *mix)
static void update_crops(AVFilterContext *ctx, LibplaceboInput *in,
struct pl_frame *target, double target_pts)
{
+ FilterLink *outl = ff_filter_link(ctx->outputs[0]);
LibplaceboContext *s = ctx->priv;
const AVFrame *ref = ref_frame(&in->mix);
@@ -761,7 +762,7 @@ static void update_crops(AVFilterContext *ctx, LibplaceboInput *in,
av_q2d(in->link->sample_aspect_ratio) : 1.0;
s->var_values[VAR_IN_T] = s->var_values[VAR_T] = image_pts;
s->var_values[VAR_OUT_T] = s->var_values[VAR_OT] = target_pts;
- s->var_values[VAR_N] = ctx->outputs[0]->frame_count_out;
+ s->var_values[VAR_N] = outl->frame_count_out;
/* Clear these explicitly to avoid leaking previous frames' state */
s->var_values[VAR_CROP_W] = s->var_values[VAR_CW] = NAN;
@@ -1000,6 +1001,7 @@ static int libplacebo_activate(AVFilterContext *ctx)
int ret, ok = 0, retry = 0;
LibplaceboContext *s = ctx->priv;
AVFilterLink *outlink = ctx->outputs[0];
+ FilterLink *outl = ff_filter_link(outlink);
int64_t pts, out_pts;
FF_FILTER_FORWARD_STATUS_BACK_ALL(outlink, ctx);
@@ -1012,7 +1014,7 @@ static int libplacebo_activate(AVFilterContext *ctx)
if (ff_outlink_frame_wanted(outlink)) {
if (s->fps.num) {
- out_pts = outlink->frame_count_out;
+ out_pts = outl->frame_count_out;
} else {
/* Determine the PTS of the next frame from any active input */
out_pts = INT64_MAX;
diff --git a/libavfilter/vf_overlay.c b/libavfilter/vf_overlay.c
index 349b8d588c..d8ac36495d 100644
--- a/libavfilter/vf_overlay.c
+++ b/libavfilter/vf_overlay.c
@@ -36,6 +36,7 @@
#include "libavutil/opt.h"
#include "libavutil/timestamp.h"
#include "internal.h"
+#include "filters.h"
#include "drawutils.h"
#include "framesync.h"
#include "video.h"
@@ -881,6 +882,7 @@ static int do_blend(FFFrameSync *fs)
AVFrame *mainpic, *second;
OverlayContext *s = ctx->priv;
AVFilterLink *inlink = ctx->inputs[0];
+ FilterLink *inl = ff_filter_link(inlink);
int ret;
ret = ff_framesync_dualinput_get_writable(fs, &mainpic, &second);
@@ -891,7 +893,7 @@ static int do_blend(FFFrameSync *fs)
if (s->eval_mode == EVAL_MODE_FRAME) {
- s->var_values[VAR_N] = inlink->frame_count_out;
+ s->var_values[VAR_N] = inl->frame_count_out;
s->var_values[VAR_T] = mainpic->pts == AV_NOPTS_VALUE ?
NAN : mainpic->pts * av_q2d(inlink->time_base);
#if FF_API_FRAME_PKT
diff --git a/libavfilter/vf_overlay_cuda.c b/libavfilter/vf_overlay_cuda.c
index 1aee77b17c..5bc1e275fa 100644
--- a/libavfilter/vf_overlay_cuda.c
+++ b/libavfilter/vf_overlay_cuda.c
@@ -236,6 +236,7 @@ static int overlay_cuda_blend(FFFrameSync *fs)
OverlayCUDAContext *ctx = avctx->priv;
AVFilterLink *outlink = avctx->outputs[0];
AVFilterLink *inlink = avctx->inputs[0];
+ FilterLink *inl = ff_filter_link(inlink);
CudaFunctions *cu = ctx->hwctx->internal->cuda_dl;
CUcontext dummy, cuda_ctx = ctx->hwctx->cuda_ctx;
@@ -270,7 +271,7 @@ static int overlay_cuda_blend(FFFrameSync *fs)
}
if (ctx->eval_mode == EVAL_MODE_FRAME) {
- ctx->var_values[VAR_N] = inlink->frame_count_out;
+ ctx->var_values[VAR_N] = inl->frame_count_out;
ctx->var_values[VAR_T] = input_main->pts == AV_NOPTS_VALUE ?
NAN : input_main->pts * av_q2d(inlink->time_base);
diff --git a/libavfilter/vf_perspective.c b/libavfilter/vf_perspective.c
index a97b97bcb8..fedf8a03f1 100644
--- a/libavfilter/vf_perspective.c
+++ b/libavfilter/vf_perspective.c
@@ -26,6 +26,7 @@
#include "libavutil/pixdesc.h"
#include "libavutil/opt.h"
#include "avfilter.h"
+#include "filters.h"
#include "internal.h"
#include "video.h"
@@ -122,13 +123,15 @@ enum { VAR_W, VAR_H, VAR_IN, VAR_ON, VAR_VARS_
static int calc_persp_luts(AVFilterContext *ctx, AVFilterLink *inlink)
{
+ FilterLink *inl = ff_filter_link(inlink);
PerspectiveContext *s = ctx->priv;
AVFilterLink *outlink = ctx->outputs[0];
+ FilterLink *outl = ff_filter_link(outlink);
double (*ref)[2] = s->ref;
double values[VAR_VARS_NB] = { [VAR_W] = inlink->w, [VAR_H] = inlink->h,
- [VAR_IN] = inlink->frame_count_out + 1,
- [VAR_ON] = outlink->frame_count_in + 1 };
+ [VAR_IN] = inl->frame_count_out + 1,
+ [VAR_ON] = outl->frame_count_in + 1 };
const int h = values[VAR_H];
const int w = values[VAR_W];
double x0, x1, x2, x3, x4, x5, x6, x7, x8, q;
diff --git a/libavfilter/vf_quirc.c b/libavfilter/vf_quirc.c
index 62eb29b7ce..760f5d97de 100644
--- a/libavfilter/vf_quirc.c
+++ b/libavfilter/vf_quirc.c
@@ -89,6 +89,7 @@ static int query_formats(AVFilterContext *ctx)
static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
{
+ FilterLink *inl = ff_filter_link(inlink);
AVFilterContext *ctx = inlink->dst;
AVFilterLink *outlink = ctx->outputs[0];
QuircContext *quirc = ctx->priv;
@@ -104,7 +105,7 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
codes_count = quirc_count(quirc->quirc);
av_log(ctx, AV_LOG_VERBOSE,
- "Found count %d codes in image #%ld\n", codes_count, inlink->frame_count_out);
+ "Found count %d codes in image #%ld\n", codes_count, inl->frame_count_out);
if (codes_count) {
int i, j;
diff --git a/libavfilter/vf_rotate.c b/libavfilter/vf_rotate.c
index 3e65f26552..2c598921d1 100644
--- a/libavfilter/vf_rotate.c
+++ b/libavfilter/vf_rotate.c
@@ -33,6 +33,7 @@
#include "avfilter.h"
#include "drawutils.h"
+#include "filters.h"
#include "internal.h"
#include "video.h"
@@ -501,6 +502,7 @@ static int filter_slice(AVFilterContext *ctx, void *arg, int job, int nb_jobs)
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
{
+ FilterLink *inl = ff_filter_link(inlink);
AVFilterContext *ctx = inlink->dst;
AVFilterLink *outlink = ctx->outputs[0];
AVFrame *out;
@@ -515,7 +517,7 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *in)
}
av_frame_copy_props(out, in);
- rot->var_values[VAR_N] = inlink->frame_count_out;
+ rot->var_values[VAR_N] = inl->frame_count_out;
rot->var_values[VAR_T] = TS2T(in->pts, inlink->time_base);
rot->angle = res = av_expr_eval(rot->angle_expr, rot->var_values, rot);
diff --git a/libavfilter/vf_scale.c b/libavfilter/vf_scale.c
index febb3178de..454fb812e4 100644
--- a/libavfilter/vf_scale.c
+++ b/libavfilter/vf_scale.c
@@ -879,6 +879,7 @@ static int scale_field(ScaleContext *scale, AVFrame *dst, AVFrame *src,
static int scale_frame(AVFilterLink *link, AVFrame **frame_in,
AVFrame **frame_out)
{
+ FilterLink *inl = ff_filter_link(link);
AVFilterContext *ctx = link->dst;
ScaleContext *scale = ctx->priv;
AVFilterLink *outlink = ctx->outputs[0];
@@ -938,7 +939,7 @@ static int scale_frame(AVFilterLink *link, AVFrame **frame_in,
}
if (ctx->filter == &ff_vf_scale2ref) {
- scale->var_values[VAR_S2R_MAIN_N] = link->frame_count_out;
+ scale->var_values[VAR_S2R_MAIN_N] = inl->frame_count_out;
scale->var_values[VAR_S2R_MAIN_T] = TS2T(in->pts, link->time_base);
#if FF_API_FRAME_PKT
FF_DISABLE_DEPRECATION_WARNINGS
@@ -946,7 +947,7 @@ FF_DISABLE_DEPRECATION_WARNINGS
FF_ENABLE_DEPRECATION_WARNINGS
#endif
} else {
- scale->var_values[VAR_N] = link->frame_count_out;
+ scale->var_values[VAR_N] = inl->frame_count_out;
scale->var_values[VAR_T] = TS2T(in->pts, link->time_base);
#if FF_API_FRAME_PKT
FF_DISABLE_DEPRECATION_WARNINGS
@@ -1035,6 +1036,8 @@ static int do_scale(FFFrameSync *fs)
if (ref) {
AVFilterLink *reflink = ctx->inputs[1];
+ FilterLink *rl = ff_filter_link(reflink);
+
frame_changed = ref->width != reflink->w ||
ref->height != reflink->h ||
ref->format != reflink->format ||
@@ -1058,7 +1061,7 @@ static int do_scale(FFFrameSync *fs)
}
if (scale->eval_mode == EVAL_MODE_FRAME) {
- scale->var_values[VAR_REF_N] = reflink->frame_count_out;
+ scale->var_values[VAR_REF_N] = rl->frame_count_out;
scale->var_values[VAR_REF_T] = TS2T(ref->pts, reflink->time_base);
#if FF_API_FRAME_PKT
FF_DISABLE_DEPRECATION_WARNINGS
@@ -1097,6 +1100,7 @@ static int filter_frame(AVFilterLink *link, AVFrame *in)
static int filter_frame_ref(AVFilterLink *link, AVFrame *in)
{
+ FilterLink *l = ff_filter_link(link);
ScaleContext *scale = link->dst->priv;
AVFilterLink *outlink = link->dst->outputs[1];
int frame_changed;
@@ -1122,7 +1126,7 @@ static int filter_frame_ref(AVFilterLink *link, AVFrame *in)
}
if (scale->eval_mode == EVAL_MODE_FRAME) {
- scale->var_values[VAR_N] = link->frame_count_out;
+ scale->var_values[VAR_N] = l->frame_count_out;
scale->var_values[VAR_T] = TS2T(in->pts, link->time_base);
#if FF_API_FRAME_PKT
FF_DISABLE_DEPRECATION_WARNINGS
diff --git a/libavfilter/vf_scale_npp.c b/libavfilter/vf_scale_npp.c
index 7b67d33296..d229083e8a 100644
--- a/libavfilter/vf_scale_npp.c
+++ b/libavfilter/vf_scale_npp.c
@@ -791,6 +791,7 @@ static int (*const nppscale_process[])(AVFilterContext *ctx, NPPScaleStageContex
static int nppscale_scale(AVFilterLink *link, AVFrame *out, AVFrame *in)
{
+ FilterLink *inl = ff_filter_link(link);
AVFilterContext *ctx = link->dst;
NPPScaleContext *s = ctx->priv;
AVFilterLink *outlink = ctx->outputs[0];
@@ -840,7 +841,7 @@ static int nppscale_scale(AVFilterLink *link, AVFrame *out, AVFrame *in)
}
if (ctx->filter == &ff_vf_scale2ref_npp) {
- s->var_values[VAR_S2R_MAIN_N] = link->frame_count_out;
+ s->var_values[VAR_S2R_MAIN_N] = inl->frame_count_out;
s->var_values[VAR_S2R_MAIN_T] = TS2T(in->pts, link->time_base);
#if FF_API_FRAME_PKT
FF_DISABLE_DEPRECATION_WARNINGS
@@ -848,7 +849,7 @@ FF_DISABLE_DEPRECATION_WARNINGS
FF_ENABLE_DEPRECATION_WARNINGS
#endif
} else {
- s->var_values[VAR_N] = link->frame_count_out;
+ s->var_values[VAR_N] = inl->frame_count_out;
s->var_values[VAR_T] = TS2T(in->pts, link->time_base);
#if FF_API_FRAME_PKT
FF_DISABLE_DEPRECATION_WARNINGS
@@ -947,6 +948,7 @@ fail:
static int nppscale_filter_frame_ref(AVFilterLink *link, AVFrame *in)
{
+ FilterLink *inl = ff_filter_link(link);
NPPScaleContext *scale = link->dst->priv;
AVFilterLink *outlink = link->dst->outputs[1];
int frame_changed;
@@ -968,7 +970,7 @@ static int nppscale_filter_frame_ref(AVFilterLink *link, AVFrame *in)
}
if (scale->eval_mode == EVAL_MODE_FRAME) {
- scale->var_values[VAR_N] = link->frame_count_out;
+ scale->var_values[VAR_N] = inl->frame_count_out;
scale->var_values[VAR_T] = TS2T(in->pts, link->time_base);
#if FF_API_FRAME_PKT
FF_DISABLE_DEPRECATION_WARNINGS
diff --git a/libavfilter/vf_showinfo.c b/libavfilter/vf_showinfo.c
index f6ffcc39bb..6118e1a19a 100644
--- a/libavfilter/vf_showinfo.c
+++ b/libavfilter/vf_showinfo.c
@@ -728,6 +728,7 @@ static void update_sample_stats(int depth, int be, const uint8_t *src, int len,
static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
{
+ FilterLink *inl = ff_filter_link(inlink);
AVFilterContext *ctx = inlink->dst;
ShowInfoContext *s = ctx->priv;
const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
@@ -761,7 +762,7 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
"n:%4"PRId64" pts:%7s pts_time:%-7s duration:%7"PRId64
" duration_time:%-7s "
"fmt:%s cl:%s sar:%d/%d s:%dx%d i:%c iskey:%d type:%c ",
- inlink->frame_count_out,
+ inl->frame_count_out,
av_ts2str(frame->pts), av_ts2timestr(frame->pts, &inlink->time_base),
frame->duration, av_ts2timestr(frame->duration, &inlink->time_base),
desc->name, av_chroma_location_name(frame->chroma_location),
diff --git a/libavfilter/vf_swaprect.c b/libavfilter/vf_swaprect.c
index 54400f0304..e94e23b02b 100644
--- a/libavfilter/vf_swaprect.c
+++ b/libavfilter/vf_swaprect.c
@@ -25,6 +25,7 @@
#include "libavutil/opt.h"
#include "avfilter.h"
+#include "filters.h"
#include "formats.h"
#include "internal.h"
#include "video.h"
@@ -78,6 +79,7 @@ enum { VAR_W, VAR_H, VAR_A, VAR_N, VAR_T,
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
{
+ FilterLink *inl = ff_filter_link(inlink);
AVFilterContext *ctx = inlink->dst;
AVFilterLink *outlink = ctx->outputs[0];
SwapRectContext *s = ctx->priv;
@@ -97,7 +99,7 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *in)
var_values[VAR_A] = (float) inlink->w / inlink->h;
var_values[VAR_SAR] = inlink->sample_aspect_ratio.num ? av_q2d(inlink->sample_aspect_ratio) : 1;
var_values[VAR_DAR] = var_values[VAR_A] * var_values[VAR_SAR];
- var_values[VAR_N] = inlink->frame_count_out;
+ var_values[VAR_N] = inl->frame_count_out;
var_values[VAR_T] = in->pts == AV_NOPTS_VALUE ? NAN : in->pts * av_q2d(inlink->time_base);
#if FF_API_FRAME_PKT
FF_DISABLE_DEPRECATION_WARNINGS
diff --git a/libavfilter/vf_telecine.c b/libavfilter/vf_telecine.c
index fee3e43aff..652535142e 100644
--- a/libavfilter/vf_telecine.c
+++ b/libavfilter/vf_telecine.c
@@ -167,6 +167,7 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *inpicref)
{
AVFilterContext *ctx = inlink->dst;
AVFilterLink *outlink = ctx->outputs[0];
+ FilterLink *outl = ff_filter_link(outlink);
TelecineContext *s = ctx->priv;
int i, len, ret = 0, nout = 0;
@@ -281,7 +282,7 @@ FF_ENABLE_DEPRECATION_WARNINGS
else
frame->flags &= ~AV_FRAME_FLAG_TOP_FIELD_FIRST;
frame->pts = ((s->start_time == AV_NOPTS_VALUE) ? 0 : s->start_time) +
- av_rescale(outlink->frame_count_in, s->ts_unit.num,
+ av_rescale(outl->frame_count_in, s->ts_unit.num,
s->ts_unit.den);
ret = ff_filter_frame(outlink, frame);
}
diff --git a/libavfilter/vf_tinterlace.c b/libavfilter/vf_tinterlace.c
index 9c976eddd9..e864a84213 100644
--- a/libavfilter/vf_tinterlace.c
+++ b/libavfilter/vf_tinterlace.c
@@ -376,6 +376,7 @@ void copy_picture_field(TInterlaceContext *tinterlace,
static int filter_frame(AVFilterLink *inlink, AVFrame *picref)
{
+ FilterLink *inl = ff_filter_link(inlink);
AVFilterContext *ctx = inlink->dst;
AVFilterLink *outlink = ctx->outputs[0];
FilterLink *l = ff_filter_link(outlink);
@@ -418,12 +419,12 @@ FF_ENABLE_DEPRECATION_WARNINGS
copy_picture_field(tinterlace, out->data, out->linesize,
(const uint8_t **)cur->data, cur->linesize,
inlink->format, inlink->w, inlink->h,
- FIELD_UPPER_AND_LOWER, 1, tinterlace->mode == MODE_MERGEX2 ? (1 + inlink->frame_count_out) & 1 ? FIELD_LOWER : FIELD_UPPER : FIELD_UPPER, tinterlace->flags);
+ FIELD_UPPER_AND_LOWER, 1, tinterlace->mode == MODE_MERGEX2 ? (1 + inl->frame_count_out) & 1 ? FIELD_LOWER : FIELD_UPPER : FIELD_UPPER, tinterlace->flags);
/* write even frame lines into the lower field of the new frame */
copy_picture_field(tinterlace, out->data, out->linesize,
(const uint8_t **)next->data, next->linesize,
inlink->format, inlink->w, inlink->h,
- FIELD_UPPER_AND_LOWER, 1, tinterlace->mode == MODE_MERGEX2 ? (1 + inlink->frame_count_out) & 1 ? FIELD_UPPER : FIELD_LOWER : FIELD_LOWER, tinterlace->flags);
+ FIELD_UPPER_AND_LOWER, 1, tinterlace->mode == MODE_MERGEX2 ? (1 + inl->frame_count_out) & 1 ? FIELD_UPPER : FIELD_LOWER : FIELD_LOWER, tinterlace->flags);
if (tinterlace->mode != MODE_MERGEX2)
av_frame_free(&tinterlace->next);
break;
@@ -445,7 +446,7 @@ FF_ENABLE_DEPRECATION_WARNINGS
out->height = outlink->h;
out->sample_aspect_ratio = av_mul_q(cur->sample_aspect_ratio, av_make_q(2, 1));
- field = (1 + outlink->frame_count_in) & 1 ? FIELD_UPPER : FIELD_LOWER;
+ field = (1 + l->frame_count_in) & 1 ? FIELD_UPPER : FIELD_LOWER;
full = out->color_range == AVCOL_RANGE_JPEG || ff_fmt_is_in(out->format, full_scale_yuvj_pix_fmts);
/* copy upper and lower fields */
copy_picture_field(tinterlace, out->data, out->linesize,
diff --git a/libavfilter/vf_vignette.c b/libavfilter/vf_vignette.c
index 9d35ea8b13..cfce54264d 100644
--- a/libavfilter/vf_vignette.c
+++ b/libavfilter/vf_vignette.c
@@ -150,12 +150,13 @@ static double get_natural_factor(const VignetteContext *s, int x, int y)
static void update_context(VignetteContext *s, AVFilterLink *inlink, AVFrame *frame)
{
+ FilterLink *inl = ff_filter_link(inlink);
int x, y;
float *dst = s->fmap;
int dst_linesize = s->fmap_linesize;
if (frame) {
- s->var_values[VAR_N] = inlink->frame_count_out;
+ s->var_values[VAR_N] = inl->frame_count_out;
s->var_values[VAR_T] = TS2T(frame->pts, inlink->time_base);
s->var_values[VAR_PTS] = TS2D(frame->pts);
} else {
diff --git a/libavfilter/vf_weave.c b/libavfilter/vf_weave.c
index 84e8e3dab5..c545e43d6d 100644
--- a/libavfilter/vf_weave.c
+++ b/libavfilter/vf_weave.c
@@ -100,12 +100,13 @@ typedef struct ThreadData {
static int weave_slice(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
{
AVFilterLink *inlink = ctx->inputs[0];
+ FilterLink *inl = ff_filter_link(inlink);
WeaveContext *s = ctx->priv;
ThreadData *td = arg;
AVFrame *in = td->in;
AVFrame *out = td->out;
- const int weave = (s->double_weave && !(inlink->frame_count_out & 1));
+ const int weave = (s->double_weave && !(inl->frame_count_out & 1));
const int field1 = weave ? s->first_field : (!s->first_field);
const int field2 = weave ? (!s->first_field) : s->first_field;
diff --git a/libavfilter/vf_zoompan.c b/libavfilter/vf_zoompan.c
index e50484cbef..e98a8ea408 100644
--- a/libavfilter/vf_zoompan.c
+++ b/libavfilter/vf_zoompan.c
@@ -156,6 +156,7 @@ static int output_single_frame(AVFilterContext *ctx, AVFrame *in, double *var_va
{
ZPContext *s = ctx->priv;
AVFilterLink *outlink = ctx->outputs[0];
+ FilterLink *outl = ff_filter_link(outlink);
AVFilterLink *inlink = ctx->inputs[0];
int64_t pts = s->frame_count;
int k, x, y, w, h, ret = 0;
@@ -172,7 +173,7 @@ static int output_single_frame(AVFilterContext *ctx, AVFrame *in, double *var_va
var_values[VAR_OUT_TIME] = pts * av_q2d(outlink->time_base);
var_values[VAR_TIME] = var_values[VAR_OT] = var_values[VAR_OUT_TIME];
var_values[VAR_FRAME] = i;
- var_values[VAR_ON] = outlink->frame_count_in;
+ var_values[VAR_ON] = outl->frame_count_in;
*zoom = av_expr_eval(s->zoom_expr, var_values, NULL);
@@ -260,7 +261,9 @@ static int activate(AVFilterContext *ctx)
{
ZPContext *s = ctx->priv;
AVFilterLink *inlink = ctx->inputs[0];
+ FilterLink *inl = ff_filter_link(inlink);
AVFilterLink *outlink = ctx->outputs[0];
+ FilterLink *outl = ff_filter_link(outlink);
int status, ret = 0;
int64_t pts;
@@ -283,8 +286,8 @@ static int activate(AVFilterContext *ctx)
s->var_values[VAR_IN_H] = s->var_values[VAR_IH] = s->in->height;
s->var_values[VAR_OUT_W] = s->var_values[VAR_OW] = s->w;
s->var_values[VAR_OUT_H] = s->var_values[VAR_OH] = s->h;
- s->var_values[VAR_IN] = inlink->frame_count_out - 1;
- s->var_values[VAR_ON] = outlink->frame_count_in;
+ s->var_values[VAR_IN] = inl->frame_count_out - 1;
+ s->var_values[VAR_ON] = outl->frame_count_in;
s->var_values[VAR_PX] = s->x;
s->var_values[VAR_PY] = s->y;
s->var_values[VAR_X] = 0;
diff --git a/libavfilter/vsrc_mptestsrc.c b/libavfilter/vsrc_mptestsrc.c
index 3b9be4c8ed..8383370470 100644
--- a/libavfilter/vsrc_mptestsrc.c
+++ b/libavfilter/vsrc_mptestsrc.c
@@ -292,12 +292,13 @@ static int config_props(AVFilterLink *outlink)
static int request_frame(AVFilterLink *outlink)
{
+ FilterLink *outl = ff_filter_link(outlink);
MPTestContext *test = outlink->src->priv;
AVFrame *picref;
int w = WIDTH, h = HEIGHT,
cw = AV_CEIL_RSHIFT(w, test->hsub), ch = AV_CEIL_RSHIFT(h, test->vsub);
- uint64_t frame = outlink->frame_count_in / test->max_frames;
- uint64_t mod = outlink->frame_count_in % test->max_frames;
+ uint64_t frame = outl->frame_count_in / test->max_frames;
+ uint64_t mod = outl->frame_count_in % test->max_frames;
enum test_type tt = test->test;
int i;