diff options
author | Paul B Mahol <onemda@gmail.com> | 2023-11-19 13:22:31 +0100 |
---|---|---|
committer | Paul B Mahol <onemda@gmail.com> | 2023-11-19 13:41:13 +0100 |
commit | a9205620b19e0c25cf9f6165b0b3937edf9ce62e (patch) | |
tree | 6d6b868eafc3cf55f761210c2c5734583bbeeb8f | |
parent | 496df6881548b53a67271fde9b7ee751c07961df (diff) | |
download | ffmpeg-a9205620b19e0c25cf9f6165b0b3937edf9ce62e.tar.gz |
avfilter/af_afir: remove IR response video rendering support
And deprecate related options.
The same functionality can be done with specialized audio visualization filters.
-rw-r--r-- | doc/filters.texi | 10 | ||||
-rw-r--r-- | libavfilter/af_afir.c | 141 | ||||
-rw-r--r-- | libavfilter/af_afir.h | 1 | ||||
-rw-r--r-- | libavfilter/afir_template.c | 90 |
4 files changed, 9 insertions, 233 deletions
diff --git a/doc/filters.texi b/doc/filters.texi index ac9a1c7445..c3607dd036 100644 --- a/doc/filters.texi +++ b/doc/filters.texi @@ -1822,18 +1822,16 @@ Set max allowed Impulse Response filter duration in seconds. Default is 30 secon Allowed range is 0.1 to 60 seconds. @item response -Show IR frequency response, magnitude(magenta), phase(green) and group delay(yellow) in additional video stream. -By default it is disabled. +This option is deprecated, and does nothing. @item channel -Set for which IR channel to display frequency response. By default is first channel -displayed. This option is used only when @var{response} is enabled. +This option is deprecated, and does nothing. @item size -Set video stream size. This option is used only when @var{response} is enabled. +This option is deprecated, and does nothing. @item rate -Set video stream frame rate. This option is used only when @var{response} is enabled. +This option is deprecated, and does nothing. @item minp Set minimal partition size used for convolution. Default is @var{8192}. diff --git a/libavfilter/af_afir.c b/libavfilter/af_afir.c index ca4b585afd..a8c08f5d8c 100644 --- a/libavfilter/af_afir.c +++ b/libavfilter/af_afir.c @@ -36,7 +36,6 @@ #include "libavutil/log.h" #include "libavutil/opt.h" #include "libavutil/rational.h" -#include "libavutil/xga_font_data.h" #include "audio.h" #include "avfilter.h" @@ -47,55 +46,6 @@ #include "af_afirdsp.h" #include "video.h" -static void drawtext(AVFrame *pic, int x, int y, const char *txt, uint32_t color) -{ - const uint8_t *font; - int font_height; - int i; - - font = avpriv_cga_font, font_height = 8; - - for (i = 0; txt[i]; i++) { - int char_y, mask; - - uint8_t *p = pic->data[0] + y * pic->linesize[0] + (x + i * 8) * 4; - for (char_y = 0; char_y < font_height; char_y++) { - for (mask = 0x80; mask; mask >>= 1) { - if (font[txt[i] * font_height + char_y] & mask) - AV_WL32(p, color); - p += 4; - } - p += pic->linesize[0] - 8 * 4; - } - } -} - -static void draw_line(AVFrame *out, int x0, int y0, int x1, int y1, uint32_t color) -{ - int dx = FFABS(x1-x0); - int dy = FFABS(y1-y0), sy = y0 < y1 ? 1 : -1; - int err = (dx>dy ? dx : -dy) / 2, e2; - - for (;;) { - AV_WL32(out->data[0] + y0 * out->linesize[0] + x0 * 4, color); - - if (x0 == x1 && y0 == y1) - break; - - e2 = err; - - if (e2 >-dx) { - err -= dy; - x0--; - } - - if (e2 < dy) { - err += dx; - y0 += sy; - } - } -} - #define DEPTH 32 #include "afir_template.c" @@ -367,17 +317,6 @@ skip: return AVERROR_BUG; } - if (s->response) { - switch (s->format) { - case AV_SAMPLE_FMT_FLTP: - draw_response_float(ctx, s->video); - break; - case AV_SAMPLE_FMT_DBLP: - draw_response_double(ctx, s->video); - break; - } - } - cur_nb_taps = s->ir[selir]->nb_samples; nb_taps = cur_nb_taps; @@ -507,8 +446,6 @@ static int activate(AVFilterContext *ctx) int64_t pts; FF_FILTER_FORWARD_STATUS_BACK_ALL(ctx->outputs[0], ctx); - if (s->response) - FF_FILTER_FORWARD_STATUS_BACK_ALL(ctx->outputs[1], ctx); for (int i = 0; i < s->nb_irs; i++) { const int selir = i; @@ -524,8 +461,6 @@ static int activate(AVFilterContext *ctx) if (!s->eof_coeffs[selir]) { if (ff_outlink_frame_wanted(ctx->outputs[0])) ff_inlink_request_frame(ctx->inputs[1 + selir]); - else if (s->response && ff_outlink_frame_wanted(ctx->outputs[1])) - ff_inlink_request_frame(ctx->inputs[1 + selir]); return 0; } } @@ -549,20 +484,6 @@ static int activate(AVFilterContext *ctx) if (ret < 0) return ret; - if (s->response && s->have_coeffs[s->selir]) { - int64_t old_pts = s->video->pts; - int64_t new_pts = av_rescale_q(s->pts, ctx->inputs[0]->time_base, ctx->outputs[1]->time_base); - - if (ff_outlink_frame_wanted(ctx->outputs[1]) && old_pts < new_pts) { - AVFrame *clone; - s->video->pts = new_pts; - clone = av_frame_clone(s->video); - if (!clone) - return AVERROR(ENOMEM); - return ff_filter_frame(ctx->outputs[1], clone); - } - } - if (ff_inlink_queued_samples(ctx->inputs[0]) >= s->min_part_size) { ff_filter_set_ready(ctx, 10); return 0; @@ -571,8 +492,6 @@ static int activate(AVFilterContext *ctx) if (ff_inlink_acknowledge_status(ctx->inputs[0], &status, &pts)) { if (status == AVERROR_EOF) { ff_outlink_set_status(ctx->outputs[0], status, pts); - if (s->response) - ff_outlink_set_status(ctx->outputs[1], status, pts); return 0; } } @@ -582,12 +501,6 @@ static int activate(AVFilterContext *ctx) return 0; } - if (s->response && - ff_outlink_frame_wanted(ctx->outputs[1])) { - ff_inlink_request_frame(ctx->inputs[0]); - return 0; - } - return FFERROR_NOT_READY; } @@ -599,19 +512,8 @@ static int query_formats(AVFilterContext *ctx) { AV_SAMPLE_FMT_FLTP, AV_SAMPLE_FMT_NONE }, { AV_SAMPLE_FMT_DBLP, AV_SAMPLE_FMT_NONE }, }; - static const enum AVPixelFormat pix_fmts[] = { - AV_PIX_FMT_RGB0, - AV_PIX_FMT_NONE - }; int ret; - if (s->response) { - AVFilterLink *videolink = ctx->outputs[1]; - AVFilterFormats *formats = ff_make_format_list(pix_fmts); - if ((ret = ff_formats_ref(formats, &videolink->incfg.formats)) < 0) - return ret; - } - if (s->ir_format) { ret = ff_set_common_all_channel_counts(ctx); if (ret < 0) @@ -724,33 +626,12 @@ static av_cold void uninit(AVFilterContext *ctx) av_frame_free(&s->xfade[0]); av_frame_free(&s->xfade[1]); - - av_frame_free(&s->video); -} - -static int config_video(AVFilterLink *outlink) -{ - AVFilterContext *ctx = outlink->src; - AudioFIRContext *s = ctx->priv; - - outlink->sample_aspect_ratio = (AVRational){1,1}; - outlink->w = s->w; - outlink->h = s->h; - outlink->frame_rate = s->frame_rate; - outlink->time_base = av_inv_q(outlink->frame_rate); - - av_frame_free(&s->video); - s->video = ff_get_video_buffer(outlink, outlink->w, outlink->h); - if (!s->video) - return AVERROR(ENOMEM); - - return 0; } static av_cold int init(AVFilterContext *ctx) { AudioFIRContext *s = ctx->priv; - AVFilterPad pad, vpad; + AVFilterPad pad; int ret; s->prev_selir = FFMIN(s->nb_irs - 1, s->selir); @@ -788,18 +669,6 @@ static av_cold int init(AVFilterContext *ctx) if (ret < 0) return ret; - if (s->response) { - vpad = (AVFilterPad){ - .name = "filter_response", - .type = AVMEDIA_TYPE_VIDEO, - .config_props = config_video, - }; - - ret = ff_append_outpad(ctx, &vpad); - if (ret < 0) - return ret; - } - s->fdsp = avpriv_float_dsp_alloc(0); if (!s->fdsp) return AVERROR(ENOMEM); @@ -861,10 +730,10 @@ static const AVOption afir_options[] = { { "mono", "single channel", 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, AF, "irfmt" }, { "input", "same as input", 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, AF, "irfmt" }, { "maxir", "set max IR length", OFFSET(max_ir_len), AV_OPT_TYPE_FLOAT, {.dbl=30}, 0.1, 60, AF }, - { "response", "show IR frequency response", OFFSET(response), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, VF }, - { "channel", "set IR channel to display frequency response", OFFSET(ir_channel), AV_OPT_TYPE_INT, {.i64=0}, 0, 1024, VF }, - { "size", "set video size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str = "hd720"}, 0, 0, VF }, - { "rate", "set video rate", OFFSET(frame_rate), AV_OPT_TYPE_VIDEO_RATE, {.str = "25"}, 0, INT32_MAX, VF }, + { "response", "show IR frequency response", OFFSET(response), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, VF|AV_OPT_FLAG_DEPRECATED }, + { "channel", "set IR channel to display frequency response", OFFSET(ir_channel), AV_OPT_TYPE_INT, {.i64=0}, 0, 1024, VF|AV_OPT_FLAG_DEPRECATED }, + { "size", "set video size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str = "hd720"}, 0, 0, VF|AV_OPT_FLAG_DEPRECATED }, + { "rate", "set video rate", OFFSET(frame_rate), AV_OPT_TYPE_VIDEO_RATE, {.str = "25"}, 0, INT32_MAX, VF|AV_OPT_FLAG_DEPRECATED }, { "minp", "set min partition size", OFFSET(minp), AV_OPT_TYPE_INT, {.i64=8192}, 1, 65536, AF }, { "maxp", "set max partition size", OFFSET(maxp), AV_OPT_TYPE_INT, {.i64=8192}, 8, 65536, AF }, { "nbirs", "set number of input IRs",OFFSET(nb_irs),AV_OPT_TYPE_INT, {.i64=1}, 1, 32, AF }, diff --git a/libavfilter/af_afir.h b/libavfilter/af_afir.h index 5c39aa10b9..3517250299 100644 --- a/libavfilter/af_afir.h +++ b/libavfilter/af_afir.h @@ -98,7 +98,6 @@ typedef struct AudioFIRContext { AVFrame *fadein[2]; AVFrame *ir[MAX_IR_STREAMS]; AVFrame *norm_ir[MAX_IR_STREAMS]; - AVFrame *video; int min_part_size; int max_part_size; int64_t pts; diff --git a/libavfilter/afir_template.c b/libavfilter/afir_template.c index 676cec6dde..fb7bfca168 100644 --- a/libavfilter/afir_template.c +++ b/libavfilter/afir_template.c @@ -55,96 +55,6 @@ #define fn2(a,b) fn3(a,b) #define fn(a) fn2(a, SAMPLE_FORMAT) -static void fn(draw_response)(AVFilterContext *ctx, AVFrame *out) -{ - AudioFIRContext *s = ctx->priv; - ftype *mag, *phase, *delay, min = FLT_MAX, max = FLT_MIN; - ftype min_delay = FLT_MAX, max_delay = FLT_MIN; - int prev_ymag = -1, prev_yphase = -1, prev_ydelay = -1; - char text[32]; - int channel, i, x; - - for (int y = 0; y < s->h; y++) - memset(out->data[0] + y * out->linesize[0], 0, s->w * 4); - - phase = av_malloc_array(s->w, sizeof(*phase)); - mag = av_malloc_array(s->w, sizeof(*mag)); - delay = av_malloc_array(s->w, sizeof(*delay)); - if (!mag || !phase || !delay) - goto end; - - channel = av_clip(s->ir_channel, 0, s->ir[s->selir]->ch_layout.nb_channels - 1); - for (i = 0; i < s->w; i++) { - const ftype *src = (const ftype *)s->ir[s->selir]->extended_data[channel]; - double w = i * M_PI / (s->w - 1); - double div, real_num = 0., imag_num = 0., real = 0., imag = 0.; - - for (x = 0; x < s->nb_taps[s->selir]; x++) { - real += cos(-x * w) * src[x]; - imag += sin(-x * w) * src[x]; - real_num += cos(-x * w) * src[x] * x; - imag_num += sin(-x * w) * src[x] * x; - } - - mag[i] = hypot(real, imag); - phase[i] = atan2(imag, real); - div = real * real + imag * imag; - delay[i] = (real_num * real + imag_num * imag) / div; - min = fminf(min, mag[i]); - max = fmaxf(max, mag[i]); - min_delay = fminf(min_delay, delay[i]); - max_delay = fmaxf(max_delay, delay[i]); - } - - for (i = 0; i < s->w; i++) { - int ymag = mag[i] / max * (s->h - 1); - int ydelay = (delay[i] - min_delay) / (max_delay - min_delay) * (s->h - 1); - int yphase = (0.5 * (1. + phase[i] / M_PI)) * (s->h - 1); - - ymag = s->h - 1 - av_clip(ymag, 0, s->h - 1); - yphase = s->h - 1 - av_clip(yphase, 0, s->h - 1); - ydelay = s->h - 1 - av_clip(ydelay, 0, s->h - 1); - - if (prev_ymag < 0) - prev_ymag = ymag; - if (prev_yphase < 0) - prev_yphase = yphase; - if (prev_ydelay < 0) - prev_ydelay = ydelay; - - draw_line(out, i, ymag, FFMAX(i - 1, 0), prev_ymag, 0xFFFF00FF); - draw_line(out, i, yphase, FFMAX(i - 1, 0), prev_yphase, 0xFF00FF00); - draw_line(out, i, ydelay, FFMAX(i - 1, 0), prev_ydelay, 0xFF00FFFF); - - prev_ymag = ymag; - prev_yphase = yphase; - prev_ydelay = ydelay; - } - - if (s->w > 400 && s->h > 100) { - drawtext(out, 2, 2, "Max Magnitude:", 0xDDDDDDDD); - snprintf(text, sizeof(text), "%.2f", max); - drawtext(out, 15 * 8 + 2, 2, text, 0xDDDDDDDD); - - drawtext(out, 2, 12, "Min Magnitude:", 0xDDDDDDDD); - snprintf(text, sizeof(text), "%.2f", min); - drawtext(out, 15 * 8 + 2, 12, text, 0xDDDDDDDD); - - drawtext(out, 2, 22, "Max Delay:", 0xDDDDDDDD); - snprintf(text, sizeof(text), "%.2f", max_delay); - drawtext(out, 11 * 8 + 2, 22, text, 0xDDDDDDDD); - - drawtext(out, 2, 32, "Min Delay:", 0xDDDDDDDD); - snprintf(text, sizeof(text), "%.2f", min_delay); - drawtext(out, 11 * 8 + 2, 32, text, 0xDDDDDDDD); - } - -end: - av_free(delay); - av_free(phase); - av_free(mag); -} - static ftype fn(ir_gain)(AVFilterContext *ctx, AudioFIRContext *s, int cur_nb_taps, const ftype *time) { |