diff options
author | Mans Rullgard <mans@mansr.com> | 2011-03-19 15:14:17 +0000 |
---|---|---|
committer | Mans Rullgard <mans@mansr.com> | 2011-03-19 19:49:18 +0000 |
commit | 26f548bb59177cfc8c45ff633dd37b60cfd23edf (patch) | |
tree | 1e2f6b7d04f8554c60eb4b562844f1cf21b64b9b | |
parent | ec10a9ab461b26b96eff7bbbb8623f42d8ee04ad (diff) | |
download | ffmpeg-26f548bb59177cfc8c45ff633dd37b60cfd23edf.tar.gz |
fft: remove inline wrappers for function pointers
This removes the rather pointless wrappers (one not even inline)
for calling the fft_calc and related function pointers.
Signed-off-by: Mans Rullgard <mans@mansr.com>
-rw-r--r-- | libavcodec/aacdec.c | 6 | ||||
-rw-r--r-- | libavcodec/aacenc.c | 4 | ||||
-rw-r--r-- | libavcodec/aacsbr.c | 8 | ||||
-rw-r--r-- | libavcodec/ac3dec.c | 6 | ||||
-rw-r--r-- | libavcodec/ac3enc_float.c | 2 | ||||
-rw-r--r-- | libavcodec/atrac1.c | 2 | ||||
-rw-r--r-- | libavcodec/atrac3.c | 4 | ||||
-rw-r--r-- | libavcodec/avfft.c | 4 | ||||
-rw-r--r-- | libavcodec/binkaudio.c | 4 | ||||
-rw-r--r-- | libavcodec/cook.c | 2 | ||||
-rw-r--r-- | libavcodec/dct.c | 13 | ||||
-rw-r--r-- | libavcodec/fft-test.c | 24 | ||||
-rw-r--r-- | libavcodec/fft.h | 45 | ||||
-rw-r--r-- | libavcodec/imc.c | 4 | ||||
-rw-r--r-- | libavcodec/mdct.c | 4 | ||||
-rw-r--r-- | libavcodec/nellymoserdec.c | 2 | ||||
-rw-r--r-- | libavcodec/nellymoserenc.c | 4 | ||||
-rw-r--r-- | libavcodec/qdm2.c | 2 | ||||
-rw-r--r-- | libavcodec/rdft.c | 8 | ||||
-rw-r--r-- | libavcodec/synth_filter.c | 2 | ||||
-rw-r--r-- | libavcodec/twinvq.c | 3 | ||||
-rw-r--r-- | libavcodec/vorbis_dec.c | 6 | ||||
-rw-r--r-- | libavcodec/vorbis_enc.c | 2 | ||||
-rw-r--r-- | libavcodec/wmadec.c | 5 | ||||
-rw-r--r-- | libavcodec/wmaenc.c | 3 | ||||
-rw-r--r-- | libavcodec/wmaprodec.c | 6 | ||||
-rw-r--r-- | libavcodec/wmavoice.c | 14 |
27 files changed, 80 insertions, 109 deletions
diff --git a/libavcodec/aacdec.c b/libavcodec/aacdec.c index 0faf50fca0..a981fbeb7f 100644 --- a/libavcodec/aacdec.c +++ b/libavcodec/aacdec.c @@ -1750,7 +1750,7 @@ static void windowing_and_mdct_ltp(AACContext *ac, float *out, ac->dsp.vector_fmul_reverse(in + 1024 + 448, in + 1024 + 448, swindow, 128); memset(in + 1024 + 576, 0, 448 * sizeof(float)); } - ff_mdct_calc(&ac->mdct_ltp, out, in); + ac->mdct_ltp.mdct_calc(&ac->mdct_ltp, out, in); } /** @@ -1839,9 +1839,9 @@ static void imdct_and_windowing(AACContext *ac, SingleChannelElement *sce) // imdct if (ics->window_sequence[0] == EIGHT_SHORT_SEQUENCE) { for (i = 0; i < 1024; i += 128) - ff_imdct_half(&ac->mdct_small, buf + i, in + i); + ac->mdct_small.imdct_half(&ac->mdct_small, buf + i, in + i); } else - ff_imdct_half(&ac->mdct, buf, in); + ac->mdct.imdct_half(&ac->mdct, buf, in); /* window overlapping * NOTE: To simplify the overlapping code, all 'meaningless' short to long diff --git a/libavcodec/aacenc.c b/libavcodec/aacenc.c index 71aa0e37a5..f74e28526d 100644 --- a/libavcodec/aacenc.c +++ b/libavcodec/aacenc.c @@ -250,7 +250,7 @@ static void apply_window_and_mdct(AVCodecContext *avctx, AACEncContext *s, for (i = 0; i < 1024; i++) sce->saved[i] = audio[i * chans]; } - ff_mdct_calc(&s->mdct1024, sce->coeffs, output); + s->mdct1024.mdct_calc(&s->mdct1024, sce->coeffs, output); } else { for (k = 0; k < 1024; k += 128) { for (i = 448 + k; i < 448 + k + 256; i++) @@ -259,7 +259,7 @@ static void apply_window_and_mdct(AVCodecContext *avctx, AACEncContext *s, : audio[(i-1024)*chans]; s->dsp.vector_fmul (output, output, k ? swindow : pwindow, 128); s->dsp.vector_fmul_reverse(output+128, output+128, swindow, 128); - ff_mdct_calc(&s->mdct128, sce->coeffs + k, output); + s->mdct128.mdct_calc(&s->mdct128, sce->coeffs + k, output); } for (i = 0; i < 1024; i++) sce->saved[i] = audio[i * chans]; diff --git a/libavcodec/aacsbr.c b/libavcodec/aacsbr.c index 90f360730b..0df52490a4 100644 --- a/libavcodec/aacsbr.c +++ b/libavcodec/aacsbr.c @@ -1155,7 +1155,7 @@ static void sbr_qmf_analysis(DSPContext *dsp, FFTContext *mdct, const float *in, } z[64+63] = z[32]; - ff_imdct_half(mdct, z, z+64); + mdct->imdct_half(mdct, z, z+64); for (k = 0; k < 32; k++) { W[1][i][k][0] = -z[63-k]; W[1][i][k][1] = z[k]; @@ -1190,7 +1190,7 @@ static void sbr_qmf_synthesis(DSPContext *dsp, FFTContext *mdct, X[0][i][ n] = -X[0][i][n]; X[0][i][32+n] = X[1][i][31-n]; } - ff_imdct_half(mdct, mdct_buf[0], X[0][i]); + mdct->imdct_half(mdct, mdct_buf[0], X[0][i]); for (n = 0; n < 32; n++) { v[ n] = mdct_buf[0][63 - 2*n]; v[63 - n] = -mdct_buf[0][62 - 2*n]; @@ -1199,8 +1199,8 @@ static void sbr_qmf_synthesis(DSPContext *dsp, FFTContext *mdct, for (n = 1; n < 64; n+=2) { X[1][i][n] = -X[1][i][n]; } - ff_imdct_half(mdct, mdct_buf[0], X[0][i]); - ff_imdct_half(mdct, mdct_buf[1], X[1][i]); + mdct->imdct_half(mdct, mdct_buf[0], X[0][i]); + mdct->imdct_half(mdct, mdct_buf[1], X[1][i]); for (n = 0; n < 64; n++) { v[ n] = -mdct_buf[0][63 - n] + mdct_buf[1][ n ]; v[127 - n] = mdct_buf[0][63 - n] + mdct_buf[1][ n ]; diff --git a/libavcodec/ac3dec.c b/libavcodec/ac3dec.c index 3fd9fc144b..fbc8dd1c54 100644 --- a/libavcodec/ac3dec.c +++ b/libavcodec/ac3dec.c @@ -628,13 +628,13 @@ static inline void do_imdct(AC3DecodeContext *s, int channels) float *x = s->tmp_output+128; for(i=0; i<128; i++) x[i] = s->transform_coeffs[ch][2*i]; - ff_imdct_half(&s->imdct_256, s->tmp_output, x); + s->imdct_256.imdct_half(&s->imdct_256, s->tmp_output, x); s->dsp.vector_fmul_window(s->output[ch-1], s->delay[ch-1], s->tmp_output, s->window, 128); for(i=0; i<128; i++) x[i] = s->transform_coeffs[ch][2*i+1]; - ff_imdct_half(&s->imdct_256, s->delay[ch-1], x); + s->imdct_256.imdct_half(&s->imdct_256, s->delay[ch-1], x); } else { - ff_imdct_half(&s->imdct_512, s->tmp_output, s->transform_coeffs[ch]); + s->imdct_512.imdct_half(&s->imdct_512, s->tmp_output, s->transform_coeffs[ch]); s->dsp.vector_fmul_window(s->output[ch-1], s->delay[ch-1], s->tmp_output, s->window, 128); memcpy(s->delay[ch-1], s->tmp_output+128, 128*sizeof(float)); } diff --git a/libavcodec/ac3enc_float.c b/libavcodec/ac3enc_float.c index 079331bc76..e46ec6a85d 100644 --- a/libavcodec/ac3enc_float.c +++ b/libavcodec/ac3enc_float.c @@ -74,7 +74,7 @@ static av_cold int mdct_init(AVCodecContext *avctx, AC3MDCTContext *mdct, */ static void mdct512(AC3MDCTContext *mdct, float *out, float *in) { - ff_mdct_calc(&mdct->fft, out, in); + mdct->fft.mdct_calc(&mdct->fft, out, in); } diff --git a/libavcodec/atrac1.c b/libavcodec/atrac1.c index c0bd8eef49..0241238db6 100644 --- a/libavcodec/atrac1.c +++ b/libavcodec/atrac1.c @@ -99,7 +99,7 @@ static void at1_imdct(AT1Ctx *q, float *spec, float *out, int nbits, for (i = 0; i < transf_size / 2; i++) FFSWAP(float, spec[i], spec[transf_size - 1 - i]); } - ff_imdct_half(mdct_context, out, spec); + mdct_context->imdct_half(mdct_context, out, spec); } diff --git a/libavcodec/atrac3.c b/libavcodec/atrac3.c index 0449841268..563352094d 100644 --- a/libavcodec/atrac3.c +++ b/libavcodec/atrac3.c @@ -146,7 +146,7 @@ static void IMLT(ATRAC3Context *q, float *pInput, float *pOutput, int odd_band) /** * Reverse the odd bands before IMDCT, this is an effect of the QMF transform * or it gives better compression to do it this way. - * FIXME: It should be possible to handle this in ff_imdct_calc + * FIXME: It should be possible to handle this in imdct_calc * for that to happen a modification of the prerotation step of * all SIMD code and C code is needed. * Or fix the functions before so they generate a pre reversed spectrum. @@ -156,7 +156,7 @@ static void IMLT(ATRAC3Context *q, float *pInput, float *pOutput, int odd_band) FFSWAP(float, pInput[i], pInput[255-i]); } - ff_imdct_calc(&q->mdct_ctx,pOutput,pInput); + q->mdct_ctx.imdct_calc(&q->mdct_ctx,pOutput,pInput); /* Perform windowing on the output. */ dsp.vector_fmul(pOutput, pOutput, mdct_window, 512); diff --git a/libavcodec/avfft.c b/libavcodec/avfft.c index 7abf8fdb75..1e52fe67b1 100644 --- a/libavcodec/avfft.c +++ b/libavcodec/avfft.c @@ -101,7 +101,7 @@ RDFTContext *av_rdft_init(int nbits, enum RDFTransformType trans) void av_rdft_calc(RDFTContext *s, FFTSample *data) { - ff_rdft_calc(s, data); + s->rdft_calc(s, data); } void av_rdft_end(RDFTContext *s) @@ -128,7 +128,7 @@ DCTContext *av_dct_init(int nbits, enum DCTTransformType inverse) void av_dct_calc(DCTContext *s, FFTSample *data) { - ff_dct_calc(s, data); + s->dct_calc(s, data); } void av_dct_end(DCTContext *s) diff --git a/libavcodec/binkaudio.c b/libavcodec/binkaudio.c index 93adf1ced3..ec1d0233c6 100644 --- a/libavcodec/binkaudio.c +++ b/libavcodec/binkaudio.c @@ -223,11 +223,11 @@ static void decode_block(BinkAudioContext *s, short *out, int use_dct) if (CONFIG_BINKAUDIO_DCT_DECODER && use_dct) { coeffs[0] /= 0.5; - ff_dct_calc (&s->trans.dct, coeffs); + s->trans.dct.dct_calc(&s->trans.dct, coeffs); s->dsp.vector_fmul_scalar(coeffs, coeffs, s->frame_len / 2, s->frame_len); } else if (CONFIG_BINKAUDIO_RDFT_DECODER) - ff_rdft_calc(&s->trans.rdft, coeffs); + s->trans.rdft.rdft_calc(&s->trans.rdft, coeffs); } s->fmt_conv.float_to_int16_interleave(out, (const float **)s->coeffs_ptr, diff --git a/libavcodec/cook.c b/libavcodec/cook.c index 5d650d7d10..8e50daa24f 100644 --- a/libavcodec/cook.c +++ b/libavcodec/cook.c @@ -753,7 +753,7 @@ static void imlt_gain(COOKContext *q, float *inbuffer, int i; /* Inverse modified discrete cosine transform */ - ff_imdct_calc(&q->mdct_ctx, q->mono_mdct_output, inbuffer); + q->mdct_ctx.imdct_calc(&q->mdct_ctx, q->mono_mdct_output, inbuffer); q->imlt_window (q, buffer1, gains_ptr, previous_buffer); diff --git a/libavcodec/dct.c b/libavcodec/dct.c index 5f45b13fa8..6bafdc1136 100644 --- a/libavcodec/dct.c +++ b/libavcodec/dct.c @@ -59,7 +59,7 @@ static void ff_dst_calc_I_c(DCTContext *ctx, FFTSample *data) } data[n/2] *= 2; - ff_rdft_calc(&ctx->rdft, data); + ctx->rdft.rdft_calc(&ctx->rdft, data); data[0] *= 0.5f; @@ -93,7 +93,7 @@ static void ff_dct_calc_I_c(DCTContext *ctx, FFTSample *data) data[n - i] = tmp1 + s; } - ff_rdft_calc(&ctx->rdft, data); + ctx->rdft.rdft_calc(&ctx->rdft, data); data[n] = data[1]; data[1] = next; @@ -121,7 +121,7 @@ static void ff_dct_calc_III_c(DCTContext *ctx, FFTSample *data) data[1] = 2 * next; - ff_rdft_calc(&ctx->rdft, data); + ctx->rdft.rdft_calc(&ctx->rdft, data); for (i = 0; i < n / 2; i++) { float tmp1 = data[i ] * inv_n; @@ -152,7 +152,7 @@ static void ff_dct_calc_II_c(DCTContext *ctx, FFTSample *data) data[n-i-1] = tmp1 - s; } - ff_rdft_calc(&ctx->rdft, data); + ctx->rdft.rdft_calc(&ctx->rdft, data); next = data[1] * 0.5; data[1] *= -1; @@ -176,11 +176,6 @@ static void dct32_func(DCTContext *ctx, FFTSample *data) ctx->dct32(data, data); } -void ff_dct_calc(DCTContext *s, FFTSample *data) -{ - s->dct_calc(s, data); -} - av_cold int ff_dct_init(DCTContext *s, int nbits, enum DCTTransformType inverse) { int n = 1 << nbits; diff --git a/libavcodec/fft-test.c b/libavcodec/fft-test.c index 0313154ecf..bd95e2cd08 100644 --- a/libavcodec/fft-test.c +++ b/libavcodec/fft-test.c @@ -327,20 +327,20 @@ int main(int argc, char **argv) case TRANSFORM_MDCT: if (do_inverse) { imdct_ref((float *)tab_ref, (float *)tab1, fft_nbits); - ff_imdct_calc(m, tab2, (float *)tab1); + m->imdct_calc(m, tab2, (float *)tab1); err = check_diff((float *)tab_ref, tab2, fft_size, scale); } else { mdct_ref((float *)tab_ref, (float *)tab1, fft_nbits); - ff_mdct_calc(m, tab2, (float *)tab1); + m->mdct_calc(m, tab2, (float *)tab1); err = check_diff((float *)tab_ref, tab2, fft_size / 2, scale); } break; case TRANSFORM_FFT: memcpy(tab, tab1, fft_size * sizeof(FFTComplex)); - ff_fft_permute(s, tab); - ff_fft_calc(s, tab); + s->fft_permute(s, tab); + s->fft_calc(s, tab); fft_ref(tab_ref, tab1, fft_nbits); err = check_diff((float *)tab_ref, (float *)tab, fft_size * 2, 1.0); @@ -357,7 +357,7 @@ int main(int argc, char **argv) memcpy(tab2, tab1, fft_size * sizeof(FFTSample)); tab2[1] = tab1[fft_size_2].re; - ff_rdft_calc(r, tab2); + r->rdft_calc(r, tab2); fft_ref(tab_ref, tab1, fft_nbits); for (i = 0; i < fft_size; i++) { tab[i].re = tab2[i]; @@ -369,7 +369,7 @@ int main(int argc, char **argv) tab2[i] = tab1[i].re; tab1[i].im = 0; } - ff_rdft_calc(r, tab2); + r->rdft_calc(r, tab2); fft_ref(tab_ref, tab1, fft_nbits); tab_ref[0].im = tab_ref[fft_size_2].re; err = check_diff((float *)tab_ref, (float *)tab2, fft_size, 1.0); @@ -377,7 +377,7 @@ int main(int argc, char **argv) break; case TRANSFORM_DCT: memcpy(tab, tab1, fft_size * sizeof(FFTComplex)); - ff_dct_calc(d, tab); + d->dct_calc(d, tab); if (do_inverse) { idct_ref(tab_ref, tab1, fft_nbits); } else { @@ -402,22 +402,22 @@ int main(int argc, char **argv) switch (transform) { case TRANSFORM_MDCT: if (do_inverse) { - ff_imdct_calc(m, (float *)tab, (float *)tab1); + m->imdct_calc(m, (float *)tab, (float *)tab1); } else { - ff_mdct_calc(m, (float *)tab, (float *)tab1); + m->mdct_calc(m, (float *)tab, (float *)tab1); } break; case TRANSFORM_FFT: memcpy(tab, tab1, fft_size * sizeof(FFTComplex)); - ff_fft_calc(s, tab); + s->fft_calc(s, tab); break; case TRANSFORM_RDFT: memcpy(tab2, tab1, fft_size * sizeof(FFTSample)); - ff_rdft_calc(r, tab2); + r->rdft_calc(r, tab2); break; case TRANSFORM_DCT: memcpy(tab2, tab1, fft_size * sizeof(FFTSample)); - ff_dct_calc(d, tab2); + d->dct_calc(d, tab2); break; } } diff --git a/libavcodec/fft.h b/libavcodec/fft.h index 2196547131..610a9a9f44 100644 --- a/libavcodec/fft.h +++ b/libavcodec/fft.h @@ -39,7 +39,14 @@ struct FFTContext { /* pre/post rotation tables */ FFTSample *tcos; FFTSample *tsin; + /** + * Do the permutation needed BEFORE calling fft_calc(). + */ void (*fft_permute)(struct FFTContext *s, FFTComplex *z); + /** + * Do a complex FFT with the parameters defined in ff_fft_init(). The + * input data must be permuted before. No 1.0/sqrt(n) normalization is done. + */ void (*fft_calc)(struct FFTContext *s, FFTComplex *z); void (*imdct_calc)(struct FFTContext *s, FFTSample *output, const FFTSample *input); void (*imdct_half)(struct FFTContext *s, FFTSample *output, const FFTSample *input); @@ -115,40 +122,8 @@ void ff_fft_init_mmx(FFTContext *s); void ff_fft_init_arm(FFTContext *s); void ff_dct_init_mmx(DCTContext *s); -/** - * Do the permutation needed BEFORE calling ff_fft_calc(). - */ -static inline void ff_fft_permute(FFTContext *s, FFTComplex *z) -{ - s->fft_permute(s, z); -} -/** - * Do a complex FFT with the parameters defined in ff_fft_init(). The - * input data must be permuted before. No 1.0/sqrt(n) normalization is done. - */ -static inline void ff_fft_calc(FFTContext *s, FFTComplex *z) -{ - s->fft_calc(s, z); -} void ff_fft_end(FFTContext *s); -/* MDCT computation */ - -static inline void ff_imdct_calc(FFTContext *s, FFTSample *output, const FFTSample *input) -{ - s->imdct_calc(s, output, input); -} -static inline void ff_imdct_half(FFTContext *s, FFTSample *output, const FFTSample *input) -{ - s->imdct_half(s, output, input); -} - -static inline void ff_mdct_calc(FFTContext *s, FFTSample *output, - const FFTSample *input) -{ - s->mdct_calc(s, output, input); -} - /** * Maximum window size for ff_kbd_window_init. */ @@ -213,11 +188,6 @@ void ff_rdft_end(RDFTContext *s); void ff_rdft_init_arm(RDFTContext *s); -static av_always_inline void ff_rdft_calc(RDFTContext *s, FFTSample *data) -{ - s->rdft_calc(s, data); -} - /* Discrete Cosine Transform */ struct DCTContext { @@ -239,7 +209,6 @@ struct DCTContext { * @note the first element of the input of DST-I is ignored */ int ff_dct_init(DCTContext *s, int nbits, enum DCTTransformType type); -void ff_dct_calc(DCTContext *s, FFTSample *data); void ff_dct_end (DCTContext *s); #endif /* AVCODEC_FFT_H */ diff --git a/libavcodec/imc.c b/libavcodec/imc.c index b665e22ca3..ae2cc9d17a 100644 --- a/libavcodec/imc.c +++ b/libavcodec/imc.c @@ -564,8 +564,8 @@ static void imc_imdct256(IMCContext *q) { } /* FFT */ - ff_fft_permute(&q->fft, q->samples); - ff_fft_calc (&q->fft, q->samples); + q->fft.fft_permute(&q->fft, q->samples); + q->fft.fft_calc (&q->fft, q->samples); /* postrotation, window and reorder */ for(i = 0; i < COEFFS/2; i++){ diff --git a/libavcodec/mdct.c b/libavcodec/mdct.c index bb0ca58c7c..c99a6cfee2 100644 --- a/libavcodec/mdct.c +++ b/libavcodec/mdct.c @@ -146,7 +146,7 @@ void ff_imdct_half_c(FFTContext *s, FFTSample *output, const FFTSample *input) in1 += 2; in2 -= 2; } - ff_fft_calc(s, z); + s->fft_calc(s, z); /* post rotation + reordering */ for(k = 0; k < n8; k++) { @@ -213,7 +213,7 @@ void ff_mdct_calc_c(FFTContext *s, FFTSample *out, const FFTSample *input) CMUL(x[j].re, x[j].im, re, im, -tcos[n8 + i], tsin[n8 + i]); } - ff_fft_calc(s, x); + s->fft_calc(s, x); /* post rotation */ for(i=0;i<n8;i++) { diff --git a/libavcodec/nellymoserdec.c b/libavcodec/nellymoserdec.c index fd8568d5ab..32cf56c9ff 100644 --- a/libavcodec/nellymoserdec.c +++ b/libavcodec/nellymoserdec.c @@ -121,7 +121,7 @@ static void nelly_decode_block(NellyMoserDecodeContext *s, memset(&aptr[NELLY_FILL_LEN], 0, (NELLY_BUF_LEN - NELLY_FILL_LEN) * sizeof(float)); - ff_imdct_calc(&s->imdct_ctx, s->imdct_out, aptr); + s->imdct_ctx.imdct_calc(&s->imdct_ctx, s->imdct_out, aptr); /* XXX: overlapping and windowing should be part of a more generic imdct function */ overlap_and_window(s, s->state, aptr, s->imdct_out); diff --git a/libavcodec/nellymoserenc.c b/libavcodec/nellymoserenc.c index f9b085a644..cf73ea4a22 100644 --- a/libavcodec/nellymoserenc.c +++ b/libavcodec/nellymoserenc.c @@ -116,13 +116,13 @@ static void apply_mdct(NellyMoserEncodeContext *s) s->dsp.vector_fmul(s->in_buff, s->buf[s->bufsel], ff_sine_128, NELLY_BUF_LEN); s->dsp.vector_fmul_reverse(s->in_buff + NELLY_BUF_LEN, s->buf[s->bufsel] + NELLY_BUF_LEN, ff_sine_128, NELLY_BUF_LEN); - ff_mdct_calc(&s->mdct_ctx, s->mdct_out, s->in_buff); + s->mdct_ctx.mdct_calc(&s->mdct_ctx, s->mdct_out, s->in_buff); s->dsp.vector_fmul(s->buf[s->bufsel] + NELLY_BUF_LEN, s->buf[s->bufsel] + NELLY_BUF_LEN, ff_sine_128, NELLY_BUF_LEN); s->dsp.vector_fmul_reverse(s->buf[s->bufsel] + 2 * NELLY_BUF_LEN, s->buf[1 - s->bufsel], ff_sine_128, NELLY_BUF_LEN); - ff_mdct_calc(&s->mdct_ctx, s->mdct_out + NELLY_BUF_LEN, s->buf[s->bufsel] + NELLY_BUF_LEN); + s->mdct_ctx.mdct_calc(&s->mdct_ctx, s->mdct_out + NELLY_BUF_LEN, s->buf[s->bufsel] + NELLY_BUF_LEN); } static av_cold int encode_init(AVCodecContext *avctx) diff --git a/libavcodec/qdm2.c b/libavcodec/qdm2.c index 9c79ddff1e..3ef712cc97 100644 --- a/libavcodec/qdm2.c +++ b/libavcodec/qdm2.c @@ -1588,7 +1588,7 @@ static void qdm2_calculate_fft (QDM2Context *q, int channel, int sub_packet) int i; q->fft.complex[channel][0].re *= 2.0f; q->fft.complex[channel][0].im = 0.0f; - ff_rdft_calc(&q->rdft_ctx, (FFTSample *)q->fft.complex[channel]); + q->rdft_ctx.rdft_calc(&q->rdft_ctx, (FFTSample *)q->fft.complex[channel]); /* add samples to output buffer */ for (i = 0; i < ((q->fft_frame_size + 15) & ~15); i++) q->output_buffer[q->channels * i + channel] += ((float *) q->fft.complex[channel])[i] * gain; diff --git a/libavcodec/rdft.c b/libavcodec/rdft.c index 0ad1f4bf6d..23ce524dcd 100644 --- a/libavcodec/rdft.c +++ b/libavcodec/rdft.c @@ -65,8 +65,8 @@ static void ff_rdft_calc_c(RDFTContext* s, FFTSample* data) const FFTSample *tsin = s->tsin; if (!s->inverse) { - ff_fft_permute(&s->fft, (FFTComplex*)data); - ff_fft_calc(&s->fft, (FFTComplex*)data); + s->fft.fft_permute(&s->fft, (FFTComplex*)data); + s->fft.fft_calc(&s->fft, (FFTComplex*)data); } /* i=0 is a special case because of packing, the DC term is real, so we are going to throw the N/2 term (also real) in with it. */ @@ -91,8 +91,8 @@ static void ff_rdft_calc_c(RDFTContext* s, FFTSample* data) if (s->inverse) { data[0] *= k1; data[1] *= k1; - ff_fft_permute(&s->fft, (FFTComplex*)data); - ff_fft_calc(&s->fft, (FFTComplex*)data); + s->fft.fft_permute(&s->fft, (FFTComplex*)data); + s->fft.fft_calc(&s->fft, (FFTComplex*)data); } } diff --git a/libavcodec/synth_filter.c b/libavcodec/synth_filter.c index f8e63ca6bc..8e6f1202fe 100644 --- a/libavcodec/synth_filter.c +++ b/libavcodec/synth_filter.c @@ -29,7 +29,7 @@ static void synth_filter_float(FFTContext *imdct, float *synth_buf= synth_buf_ptr + *synth_buf_offset; int i, j; - ff_imdct_half(imdct, synth_buf, in); + imdct->imdct_half(imdct, synth_buf, in); for (i = 0; i < 16; i++){ float a= synth_buf2[i ]; diff --git a/libavcodec/twinvq.c b/libavcodec/twinvq.c index 66d3a9656b..275bf0aa66 100644 --- a/libavcodec/twinvq.c +++ b/libavcodec/twinvq.c @@ -608,6 +608,7 @@ static void dec_lpc_spectrum_inv(TwinContext *tctx, float *lsp, static void imdct_and_window(TwinContext *tctx, enum FrameType ftype, int wtype, float *in, float *prev, int ch) { + FFTContext *mdct = &tctx->mdct_ctx[ftype]; const ModeTab *mtab = tctx->mtab; int bsize = mtab->size / mtab->fmode[ftype].sub; int size = mtab->size; @@ -640,7 +641,7 @@ static void imdct_and_window(TwinContext *tctx, enum FrameType ftype, int wtype, wsize = types_sizes[wtype_to_wsize[sub_wtype]]; - ff_imdct_half(&tctx->mdct_ctx[ftype], buf1 + bsize*j, in + bsize*j); + mdct->imdct_half(mdct, buf1 + bsize*j, in + bsize*j); tctx->dsp.vector_fmul_window(out2, prev_buf + (bsize-wsize)/2, diff --git a/libavcodec/vorbis_dec.c b/libavcodec/vorbis_dec.c index b01094cf89..5fa7be1365 100644 --- a/libavcodec/vorbis_dec.c +++ b/libavcodec/vorbis_dec.c @@ -1448,7 +1448,7 @@ void vorbis_inverse_coupling(float *mag, float *ang, int blocksize) static int vorbis_parse_audio_packet(vorbis_context *vc) { GetBitContext *gb = &vc->gb; - + FFTContext *mdct; uint_fast8_t previous_window = vc->previous_window; uint_fast8_t mode_number; uint_fast8_t blockflag; @@ -1552,11 +1552,13 @@ static int vorbis_parse_audio_packet(vorbis_context *vc) // Dotproduct, MDCT + mdct = &vc->mdct[blockflag]; + for (j = vc->audio_channels-1;j >= 0; j--) { ch_floor_ptr = vc->channel_floors + j * blocksize / 2; ch_res_ptr = vc->channel_residues + res_chan[j] * blocksize / 2; vc->dsp.vector_fmul(ch_floor_ptr, ch_floor_ptr, ch_res_ptr, blocksize / 2); - ff_imdct_half(&vc->mdct[blockflag], ch_res_ptr, ch_floor_ptr); + mdct->imdct_half(mdct, ch_res_ptr, ch_floor_ptr); } // Overlap/add, save data for next overlapping FPMATH diff --git a/libavcodec/vorbis_enc.c b/libavcodec/vorbis_enc.c index 010483cb54..7c5d521464 100644 --- a/libavcodec/vorbis_enc.c +++ b/libavcodec/vorbis_enc.c @@ -935,7 +935,7 @@ static int apply_window_and_mdct(vorbis_enc_context *venc, const signed short *a } for (channel = 0; channel < venc->channels; channel++) - ff_mdct_calc(&venc->mdct[0], venc->coeffs + channel * window_len, + venc->mdct[0].mdct_calc(&venc->mdct[0], venc->coeffs + channel * window_len, venc->samples + channel * window_len * 2); if (samples) { diff --git a/libavcodec/wmadec.c b/libavcodec/wmadec.c index 74fc6bab1a..f6ed26cb59 100644 --- a/libavcodec/wmadec.c +++ b/libavcodec/wmadec.c @@ -447,6 +447,7 @@ static int wma_decode_block(WMACodecContext *s) int coef_nb_bits, total_gain; int nb_coefs[MAX_CHANNELS]; float mdct_norm; + FFTContext *mdct; #ifdef TRACE tprintf(s->avctx, "***decode_block: %d:%d\n", s->frame_count - 1, s->block_num); @@ -742,12 +743,14 @@ static int wma_decode_block(WMACodecContext *s) } next: + mdct = &s->mdct_ctx[bsize]; + for(ch = 0; ch < s->nb_channels; ch++) { int n4, index; n4 = s->block_len / 2; if(s->channel_coded[ch]){ - ff_imdct_calc(&s->mdct_ctx[bsize], s->output, s->coefs[ch]); + mdct->imdct_calc(mdct, s->output, s->coefs[ch]); }else if(!(s->ms_stereo && ch==1)) memset(s->output, 0, sizeof(s->output)); diff --git a/libavcodec/wmaenc.c b/libavcodec/wmaenc.c index 89370e7e7d..d2e811fd49 100644 --- a/libavcodec/wmaenc.c +++ b/libavcodec/wmaenc.c @@ -77,6 +77,7 @@ static int encode_init(AVCodecContext * avctx){ static void apply_window_and_mdct(AVCodecContext * avctx, const signed short * audio, int len) { WMACodecContext *s = avctx->priv_data; int window_index= s->frame_len_bits - s->block_len_bits; + FFTContext *mdct = &s->mdct_ctx[window_index]; int i, j, channel; const float * win = s->windows[window_index]; int window_len = 1 << s->block_len_bits; @@ -89,7 +90,7 @@ static void apply_window_and_mdct(AVCodecContext * avctx, const signed short * a s->output[i+window_len] = audio[j] / n * win[window_len - i - 1]; s->frame_out[channel][i] = audio[j] / n * win[i]; } - ff_mdct_calc(&s->mdct_ctx[window_index], s->coefs[channel], s->output); + mdct->mdct_calc(mdct, s->coefs[channel], s->output); } } diff --git a/libavcodec/wmaprodec.c b/libavcodec/wmaprodec.c index 242139d569..343ac84d9b 100644 --- a/libavcodec/wmaprodec.c +++ b/libavcodec/wmaprodec.c @@ -1222,6 +1222,7 @@ static int decode_subframe(WMAProDecodeCtx *s) get_bits_count(&s->gb) - s->subframe_offset); if (transmit_coeffs) { + FFTContext *mdct = &s->mdct_ctx[av_log2(subframe_len) - WMAPRO_BLOCK_MIN_BITS]; /** reconstruct the per channel data */ inverse_channel_transform(s); for (i = 0; i < s->channels_for_cur_subframe; i++) { @@ -1246,9 +1247,8 @@ static int decode_subframe(WMAProDecodeCtx *s) quant, end - start); } - /** apply imdct (ff_imdct_half == DCTIV with reverse) */ - ff_imdct_half(&s->mdct_ctx[av_log2(subframe_len) - WMAPRO_BLOCK_MIN_BITS], - s->channel[c].coeffs, s->tmp); + /** apply imdct (imdct_half == DCTIV with reverse) */ + mdct->imdct_half(mdct, s->channel[c].coeffs, s->tmp); } } diff --git a/libavcodec/wmavoice.c b/libavcodec/wmavoice.c index 5e7f8a6739..0b0a2885cf 100644 --- a/libavcodec/wmavoice.c +++ b/libavcodec/wmavoice.c @@ -558,7 +558,7 @@ static void calc_input_response(WMAVoiceContext *s, float *lpcs, int n, idx; /* Create frequency power spectrum of speech input (i.e. RDFT of LPCs) */ - ff_rdft_calc(&s->rdft, lpcs); + s->rdft.rdft_calc(&s->rdft, lpcs); #define log_range(var, assign) do { \ float tmp = log10f(assign); var = tmp; \ max = FFMAX(max, tmp); min = FFMIN(min, tmp); \ @@ -601,8 +601,8 @@ static void calc_input_response(WMAVoiceContext *s, float *lpcs, * is a sinus input) by doing a phase shift (in theory, H(sin())=cos()). * Hilbert_Transform(RDFT(x)) = Laplace_Transform(x), which calculates the * "moment" of the LPCs in this filter. */ - ff_dct_calc(&s->dct, lpcs); - ff_dct_calc(&s->dst, lpcs); + s->dct.dct_calc(&s->dct, lpcs); + s->dst.dct_calc(&s->dst, lpcs); /* Split out the coefficient indexes into phase/magnitude pairs */ idx = 255 + av_clip(lpcs[64], -255, 255); @@ -623,7 +623,7 @@ static void calc_input_response(WMAVoiceContext *s, float *lpcs, coeffs[1] = last_coeff; /* move into real domain */ - ff_rdft_calc(&s->irdft, coeffs); + s->irdft.rdft_calc(&s->irdft, coeffs); /* tilt correction and normalize scale */ memset(&coeffs[remainder], 0, sizeof(coeffs[0]) * (128 - remainder)); @@ -693,8 +693,8 @@ static void wiener_denoise(WMAVoiceContext *s, int fcb_type, /* apply coefficients (in frequency spectrum domain), i.e. complex * number multiplication */ memset(&synth_pf[size], 0, sizeof(synth_pf[0]) * (128 - size)); - ff_rdft_calc(&s->rdft, synth_pf); - ff_rdft_calc(&s->rdft, coeffs); + s->rdft.rdft_calc(&s->rdft, synth_pf); + s->rdft.rdft_calc(&s->rdft, coeffs); synth_pf[0] *= coeffs[0]; synth_pf[1] *= coeffs[1]; for (n = 1; n < 64; n++) { @@ -702,7 +702,7 @@ static void wiener_denoise(WMAVoiceContext *s, int fcb_type, synth_pf[n * 2] = v1 * coeffs[n * 2] - v2 * coeffs[n * 2 + 1]; synth_pf[n * 2 + 1] = v2 * coeffs[n * 2] + v1 * coeffs[n * 2 + 1]; } - ff_rdft_calc(&s->irdft, synth_pf); + s->irdft.rdft_calc(&s->irdft, synth_pf); } /* merge filter output with the history of previous runs */ |