diff options
author | Justin Ruggles <justin.ruggles@gmail.com> | 2011-01-27 15:20:43 +0000 |
---|---|---|
committer | Michael Niedermayer <michaelni@gmx.at> | 2011-01-28 03:15:35 +0100 |
commit | 733dbe7d18c267728ef03762d83e9e1b086668cd (patch) | |
tree | 41345c1989e46631e4209c3b09206b0fd45bc54a | |
parent | 2f7d8977bcdeb2c39fd9acbd753d605298824db8 (diff) | |
download | ffmpeg-733dbe7d18c267728ef03762d83e9e1b086668cd.tar.gz |
Remove the add bias hack for the C version of DSPContext.float_to_int16_*().
(cherry picked from commit 9d06d7bce3babb82ed650c13ed13a57f6f626a71)
-rw-r--r-- | libavcodec/aac.h | 1 | ||||
-rw-r--r-- | libavcodec/aacdec.c | 42 | ||||
-rw-r--r-- | libavcodec/aacsbr.c | 13 | ||||
-rw-r--r-- | libavcodec/ac3dec.c | 13 | ||||
-rw-r--r-- | libavcodec/ac3dec.h | 1 | ||||
-rw-r--r-- | libavcodec/binkaudio.c | 5 | ||||
-rw-r--r-- | libavcodec/dca.c | 47 | ||||
-rw-r--r-- | libavcodec/dsputil.c | 9 | ||||
-rw-r--r-- | libavcodec/dsputil.h | 3 | ||||
-rw-r--r-- | libavcodec/nellymoserdec.c | 9 | ||||
-rw-r--r-- | libavcodec/vorbis_dec.c | 38 | ||||
-rw-r--r-- | libavcodec/wmadec.c | 21 |
12 files changed, 54 insertions, 148 deletions
diff --git a/libavcodec/aac.h b/libavcodec/aac.h index b40d2c0003..714e314cba 100644 --- a/libavcodec/aac.h +++ b/libavcodec/aac.h @@ -276,7 +276,6 @@ typedef struct { * @{ */ float *output_data[MAX_CHANNELS]; ///< Points to each element's 'ret' buffer (PCM output). - float add_bias; ///< offset for dsp.float_to_int16 float sf_scale; ///< Pre-scale for correct IMDCT and dsp.float_to_int16. int sf_offset; ///< offset into pow2sf_tab as appropriate for dsp.float_to_int16 /** @} */ diff --git a/libavcodec/aacdec.c b/libavcodec/aacdec.c index fd65b1c3ba..fddec17fcc 100644 --- a/libavcodec/aacdec.c +++ b/libavcodec/aacdec.c @@ -566,18 +566,10 @@ static av_cold int aac_decode_init(AVCodecContext *avctx) ac->random_state = 0x1f2e3d4c; // -1024 - Compensate wrong IMDCT method. - // 32768 - Required to scale values to the correct range for the bias method - // for float to int16 conversion. - - if (ac->dsp.float_to_int16_interleave == ff_float_to_int16_interleave_c) { - ac->add_bias = 385.0f; - ac->sf_scale = 1. / (-1024. * 32768.); - ac->sf_offset = 0; - } else { - ac->add_bias = 0.0f; + // 60 - Required to scale values to the correct range [-32768,32767] + // for float to int16 conversion. (1 << (60 / 4)) == 32768 ac->sf_scale = 1. / -1024.; ac->sf_offset = 60; - } ff_aac_tableinit(); @@ -1701,7 +1693,7 @@ static void apply_tns(float coef[1024], TemporalNoiseShaping *tns, /** * Conduct IMDCT and windowing. */ -static void imdct_and_windowing(AACContext *ac, SingleChannelElement *sce, float bias) +static void imdct_and_windowing(AACContext *ac, SingleChannelElement *sce) { IndividualChannelStream *ics = &sce->ics; float *in = sce->coeffs; @@ -1729,29 +1721,29 @@ static void imdct_and_windowing(AACContext *ac, SingleChannelElement *sce, float */ if ((ics->window_sequence[1] == ONLY_LONG_SEQUENCE || ics->window_sequence[1] == LONG_STOP_SEQUENCE) && (ics->window_sequence[0] == ONLY_LONG_SEQUENCE || ics->window_sequence[0] == LONG_START_SEQUENCE)) { - ac->dsp.vector_fmul_window( out, saved, buf, lwindow_prev, bias, 512); + ac->dsp.vector_fmul_window( out, saved, buf, lwindow_prev, 0, 512); } else { for (i = 0; i < 448; i++) - out[i] = saved[i] + bias; + out[i] = saved[i]; if (ics->window_sequence[0] == EIGHT_SHORT_SEQUENCE) { - ac->dsp.vector_fmul_window(out + 448 + 0*128, saved + 448, buf + 0*128, swindow_prev, bias, 64); - ac->dsp.vector_fmul_window(out + 448 + 1*128, buf + 0*128 + 64, buf + 1*128, swindow, bias, 64); - ac->dsp.vector_fmul_window(out + 448 + 2*128, buf + 1*128 + 64, buf + 2*128, swindow, bias, 64); - ac->dsp.vector_fmul_window(out + 448 + 3*128, buf + 2*128 + 64, buf + 3*128, swindow, bias, 64); - ac->dsp.vector_fmul_window(temp, buf + 3*128 + 64, buf + 4*128, swindow, bias, 64); + ac->dsp.vector_fmul_window(out + 448 + 0*128, saved + 448, buf + 0*128, swindow_prev, 0, 64); + ac->dsp.vector_fmul_window(out + 448 + 1*128, buf + 0*128 + 64, buf + 1*128, swindow, 0, 64); + ac->dsp.vector_fmul_window(out + 448 + 2*128, buf + 1*128 + 64, buf + 2*128, swindow, 0, 64); + ac->dsp.vector_fmul_window(out + 448 + 3*128, buf + 2*128 + 64, buf + 3*128, swindow, 0, 64); + ac->dsp.vector_fmul_window(temp, buf + 3*128 + 64, buf + 4*128, swindow, 0, 64); memcpy( out + 448 + 4*128, temp, 64 * sizeof(float)); } else { - ac->dsp.vector_fmul_window(out + 448, saved + 448, buf, swindow_prev, bias, 64); + ac->dsp.vector_fmul_window(out + 448, saved + 448, buf, swindow_prev, 0, 64); for (i = 576; i < 1024; i++) - out[i] = buf[i-512] + bias; + out[i] = buf[i-512]; } } // buffer update if (ics->window_sequence[0] == EIGHT_SHORT_SEQUENCE) { for (i = 0; i < 64; i++) - saved[i] = temp[64 + i] - bias; + saved[i] = temp[64 + i]; ac->dsp.vector_fmul_window(saved + 64, buf + 4*128 + 64, buf + 5*128, swindow, 0, 64); ac->dsp.vector_fmul_window(saved + 192, buf + 5*128 + 64, buf + 6*128, swindow, 0, 64); ac->dsp.vector_fmul_window(saved + 320, buf + 6*128 + 64, buf + 7*128, swindow, 0, 64); @@ -1811,13 +1803,12 @@ static void apply_independent_coupling(AACContext *ac, { int i; const float gain = cce->coup.gain[index][0]; - const float bias = ac->add_bias; const float *src = cce->ch[0].ret; float *dest = target->ret; const int len = 1024 << (ac->m4ac.sbr == 1); for (i = 0; i < len; i++) - dest[i] += gain * (src[i] - bias); + dest[i] += gain * src[i]; } /** @@ -1861,7 +1852,6 @@ static void apply_channel_coupling(AACContext *ac, ChannelElement *cc, static void spectral_to_sample(AACContext *ac) { int i, type; - float imdct_bias = (ac->m4ac.sbr <= 0) ? ac->add_bias : 0.0f; for (type = 3; type >= 0; type--) { for (i = 0; i < MAX_ELEM_ID; i++) { ChannelElement *che = ac->che[type][i]; @@ -1875,9 +1865,9 @@ static void spectral_to_sample(AACContext *ac) if (type <= TYPE_CPE) apply_channel_coupling(ac, che, type, i, BETWEEN_TNS_AND_IMDCT, apply_dependent_coupling); if (type != TYPE_CCE || che->coup.coupling_point == AFTER_IMDCT) { - imdct_and_windowing(ac, &che->ch[0], imdct_bias); + imdct_and_windowing(ac, &che->ch[0]); if (type == TYPE_CPE) { - imdct_and_windowing(ac, &che->ch[1], imdct_bias); + imdct_and_windowing(ac, &che->ch[1]); } if (ac->m4ac.sbr > 0) { ff_sbr_apply(ac, &che->sbr, type, che->ch[0].ret, che->ch[1].ret); diff --git a/libavcodec/aacsbr.c b/libavcodec/aacsbr.c index 050305a3fe..9b10bf25fe 100644 --- a/libavcodec/aacsbr.c +++ b/libavcodec/aacsbr.c @@ -1175,12 +1175,10 @@ static void sbr_qmf_analysis(DSPContext *dsp, FFTContext *mdct, const float *in, static void sbr_qmf_synthesis(DSPContext *dsp, FFTContext *mdct, float *out, float X[2][38][64], float mdct_buf[2][64], - float *v0, int *v_off, const unsigned int div, - float bias, float scale) + float *v0, int *v_off, const unsigned int div) { int i, n; const float *sbr_qmf_window = div ? sbr_qmf_window_ds : sbr_qmf_window_us; - int scale_and_bias = scale != 1.0f || bias != 0.0f; float *v; for (i = 0; i < 32; i++) { if (*v_off == 0) { @@ -1222,9 +1220,6 @@ static void sbr_qmf_synthesis(DSPContext *dsp, FFTContext *mdct, dsp->vector_fmul_add(out, v + ( 960 >> div), sbr_qmf_window + (448 >> div), out , 64 >> div); dsp->vector_fmul_add(out, v + (1024 >> div), sbr_qmf_window + (512 >> div), out , 64 >> div); dsp->vector_fmul_add(out, v + (1216 >> div), sbr_qmf_window + (576 >> div), out , 64 >> div); - if (scale_and_bias) - for (n = 0; n < 64 >> div; n++) - out[n] = out[n] * scale + bias; out += 64 >> div; } } @@ -1760,12 +1755,10 @@ void ff_sbr_apply(AACContext *ac, SpectralBandReplication *sbr, int id_aac, sbr_qmf_synthesis(&ac->dsp, &sbr->mdct, L, sbr->X[0], sbr->qmf_filter_scratch, sbr->data[0].synthesis_filterbank_samples, &sbr->data[0].synthesis_filterbank_samples_offset, - downsampled, - ac->add_bias, -1024 * ac->sf_scale); + downsampled); if (nch == 2) sbr_qmf_synthesis(&ac->dsp, &sbr->mdct, R, sbr->X[1], sbr->qmf_filter_scratch, sbr->data[1].synthesis_filterbank_samples, &sbr->data[1].synthesis_filterbank_samples_offset, - downsampled, - ac->add_bias, -1024 * ac->sf_scale); + downsampled); } diff --git a/libavcodec/ac3dec.c b/libavcodec/ac3dec.c index a1e731dcf9..2f9bc261bd 100644 --- a/libavcodec/ac3dec.c +++ b/libavcodec/ac3dec.c @@ -196,13 +196,7 @@ static av_cold int ac3_decode_init(AVCodecContext *avctx) av_lfg_init(&s->dith_state, 0); /* set bias values for float to int16 conversion */ - if(s->dsp.float_to_int16_interleave == ff_float_to_int16_interleave_c) { - s->add_bias = 385.0f; - s->mul_bias = 1.0f; - } else { - s->add_bias = 0.0f; s->mul_bias = 32767.0f; - } /* allow downmixing to stereo or mono */ if (avctx->channels > 0 && avctx->request_channels > 0 && @@ -626,9 +620,6 @@ static void do_rematrixing(AC3DecodeContext *s) static inline void do_imdct(AC3DecodeContext *s, int channels) { int ch; - float add_bias = s->add_bias; - if(s->out_channels==1 && channels>1) - add_bias *= LEVEL_MINUS_3DB; // compensate for the gain in downmix for (ch=1; ch<=channels; ch++) { if (s->block_switch[ch]) { @@ -637,13 +628,13 @@ static inline void do_imdct(AC3DecodeContext *s, int channels) for(i=0; i<128; i++) x[i] = s->transform_coeffs[ch][2*i]; ff_imdct_half(&s->imdct_256, s->tmp_output, x); - s->dsp.vector_fmul_window(s->output[ch-1], s->delay[ch-1], s->tmp_output, s->window, add_bias, 128); + s->dsp.vector_fmul_window(s->output[ch-1], s->delay[ch-1], s->tmp_output, s->window, 0, 128); for(i=0; i<128; i++) x[i] = s->transform_coeffs[ch][2*i+1]; ff_imdct_half(&s->imdct_256, s->delay[ch-1], x); } else { ff_imdct_half(&s->imdct_512, s->tmp_output, s->transform_coeffs[ch]); - s->dsp.vector_fmul_window(s->output[ch-1], s->delay[ch-1], s->tmp_output, s->window, add_bias, 128); + s->dsp.vector_fmul_window(s->output[ch-1], s->delay[ch-1], s->tmp_output, s->window, 0, 128); memcpy(s->delay[ch-1], s->tmp_output+128, 128*sizeof(float)); } } diff --git a/libavcodec/ac3dec.h b/libavcodec/ac3dec.h index 0707769478..55520cdcee 100644 --- a/libavcodec/ac3dec.h +++ b/libavcodec/ac3dec.h @@ -190,7 +190,6 @@ typedef struct { ///@defgroup opt optimization DSPContext dsp; ///< for optimization - float add_bias; ///< offset for float_to_int16 conversion float mul_bias; ///< scaling for float_to_int16 conversion ///@} diff --git a/libavcodec/binkaudio.c b/libavcodec/binkaudio.c index 7f579822db..ae2f6c88b0 100644 --- a/libavcodec/binkaudio.c +++ b/libavcodec/binkaudio.c @@ -222,11 +222,6 @@ static void decode_block(BinkAudioContext *s, short *out, int use_dct) ff_rdft_calc(&s->trans.rdft, coeffs); } - if (s->dsp.float_to_int16_interleave == ff_float_to_int16_interleave_c) { - for (i = 0; i < s->channels; i++) - for (j = 0; j < s->frame_len; j++) - s->coeffs_ptr[i][j] = 385.0 + s->coeffs_ptr[i][j]*(1.0/32767.0); - } s->dsp.float_to_int16_interleave(out, (const float **)s->coeffs_ptr, s->frame_len, s->channels); if (!s->first) { diff --git a/libavcodec/dca.c b/libavcodec/dca.c index c1c70cde1b..aa71411efa 100644 --- a/libavcodec/dca.c +++ b/libavcodec/dca.c @@ -311,7 +311,6 @@ typedef struct { DECLARE_ALIGNED(16, float, raXin)[32]; int output; ///< type of output - float add_bias; ///< output bias float scale_bias; ///< output scale DECLARE_ALIGNED(16, float, subband_samples)[DCA_BLOCKS_MAX][DCA_PRIM_CHANNELS_MAX][DCA_SUBBANDS][8]; @@ -868,7 +867,7 @@ static int dca_subframe_header(DCAContext * s, int base_channel, int block_index static void qmf_32_subbands(DCAContext * s, int chans, float samples_in[32][8], float *samples_out, - float scale, float bias) + float scale) { const float *prCoeff; int i; @@ -897,7 +896,7 @@ static void qmf_32_subbands(DCAContext * s, int chans, s->synth.synth_filter_float(&s->imdct, s->subband_fir_hist[chans], &s->hist_index[chans], s->subband_fir_noidea[chans], prCoeff, - samples_out, s->raXin, scale, bias); + samples_out, s->raXin, scale, 0); samples_out+= 32; } @@ -905,8 +904,7 @@ static void qmf_32_subbands(DCAContext * s, int chans, static void lfe_interpolation_fir(DCAContext *s, int decimation_select, int num_deci_sample, float *samples_in, - float *samples_out, float scale, - float bias) + float *samples_out, float scale) { /* samples_in: An array holding decimated samples. * Samples in current subframe starts from samples_in[0], @@ -931,7 +929,7 @@ static void lfe_interpolation_fir(DCAContext *s, int decimation_select, /* Interpolation */ for (deciindex = 0; deciindex < num_deci_sample; deciindex++) { s->dcadsp.lfe_fir(samples_out, samples_in, prCoeff, decifactor, - scale, bias); + scale, 0); samples_in++; samples_out += 2 * decifactor; } @@ -939,19 +937,19 @@ static void lfe_interpolation_fir(DCAContext *s, int decimation_select, /* downmixing routines */ #define MIX_REAR1(samples, si1, rs, coef) \ - samples[i] += (samples[si1] - add_bias) * coef[rs][0]; \ - samples[i+256] += (samples[si1] - add_bias) * coef[rs][1]; + samples[i] += samples[si1] * coef[rs][0]; \ + samples[i+256] += samples[si1] * coef[rs][1]; #define MIX_REAR2(samples, si1, si2, rs, coef) \ - samples[i] += (samples[si1] - add_bias) * coef[rs][0] + (samples[si2] - add_bias) * coef[rs+1][0]; \ - samples[i+256] += (samples[si1] - add_bias) * coef[rs][1] + (samples[si2] - add_bias) * coef[rs+1][1]; + samples[i] += samples[si1] * coef[rs][0] + samples[si2] * coef[rs+1][0]; \ + samples[i+256] += samples[si1] * coef[rs][1] + samples[si2] * coef[rs+1][1]; #define MIX_FRONT3(samples, coef) \ - t = samples[i+c] - add_bias; \ - u = samples[i+l] - add_bias; \ - v = samples[i+r] - add_bias; \ - samples[i] = t * coef[0][0] + u * coef[1][0] + v * coef[2][0] + add_bias; \ - samples[i+256] = t * coef[0][1] + u * coef[1][1] + v * coef[2][1] + add_bias; + t = samples[i+c]; \ + u = samples[i+l]; \ + v = samples[i+r]; \ + samples[i] = t * coef[0][0] + u * coef[1][0] + v * coef[2][0]; \ + samples[i+256] = t * coef[0][1] + u * coef[1][1] + v * coef[2][1]; #define DOWNMIX_TO_STEREO(op1, op2) \ for (i = 0; i < 256; i++){ \ @@ -961,7 +959,7 @@ static void lfe_interpolation_fir(DCAContext *s, int decimation_select, static void dca_downmix(float *samples, int srcfmt, int downmix_coef[DCA_PRIM_CHANNELS_MAX][2], - const int8_t *channel_mapping, float add_bias) + const int8_t *channel_mapping) { int c,l,r,sl,sr,s; int i; @@ -1193,13 +1191,12 @@ static int dca_filter_channels(DCAContext * s, int block_index) /* static float pcm_to_double[8] = {32768.0, 32768.0, 524288.0, 524288.0, 0, 8388608.0, 8388608.0};*/ qmf_32_subbands(s, k, subband_samples[k], &s->samples[256 * s->channel_order_tab[k]], - M_SQRT1_2*s->scale_bias /*pcm_to_double[s->source_pcm_res] */ , - s->add_bias ); + M_SQRT1_2*s->scale_bias /*pcm_to_double[s->source_pcm_res] */ ); } /* Down mixing */ if (s->avctx->request_channels == 2 && s->prim_channels > 2) { - dca_downmix(s->samples, s->amode, s->downmix_coef, s->channel_order_tab, s->add_bias); + dca_downmix(s->samples, s->amode, s->downmix_coef, s->channel_order_tab); } /* Generate LFE samples for this subsubframe FIXME!!! */ @@ -1207,7 +1204,7 @@ static int dca_filter_channels(DCAContext * s, int block_index) lfe_interpolation_fir(s, s->lfe, 2 * s->lfe, s->lfe_data + 2 * s->lfe * (block_index + 4), &s->samples[256 * dca_lfe_index[s->amode]], - (1.0/256.0)*s->scale_bias, s->add_bias); + (1.0/256.0)*s->scale_bias); /* Outputs 20bits pcm samples */ } @@ -1798,8 +1795,8 @@ static int dca_decode_frame(AVCodecContext * avctx, float* rt_chan = s->samples + s->channel_order_tab[s->xch_base_channel - 1] * 256; int j; for(j = 0; j < 256; ++j) { - lt_chan[j] -= (back_chan[j] - s->add_bias) * M_SQRT1_2; - rt_chan[j] -= (back_chan[j] - s->add_bias) * M_SQRT1_2; + lt_chan[j] -= back_chan[j] * M_SQRT1_2; + rt_chan[j] -= back_chan[j] * M_SQRT1_2; } } @@ -1841,11 +1838,6 @@ static av_cold int dca_decode_init(AVCodecContext * avctx) s->samples_chanptr[i] = s->samples + i * 256; avctx->sample_fmt = AV_SAMPLE_FMT_S16; - if (s->dsp.float_to_int16_interleave == ff_float_to_int16_interleave_c) { - s->add_bias = 385.0f; - s->scale_bias = 1.0 / 32768.0; - } else { - s->add_bias = 0.0f; s->scale_bias = 1.0; /* allow downmixing to stereo */ @@ -1853,7 +1845,6 @@ static av_cold int dca_decode_init(AVCodecContext * avctx) avctx->request_channels == 2) { avctx->channels = avctx->request_channels; } - } return 0; diff --git a/libavcodec/dsputil.c b/libavcodec/dsputil.c index 2ed0052977..03a5eeea45 100644 --- a/libavcodec/dsputil.c +++ b/libavcodec/dsputil.c @@ -3910,14 +3910,7 @@ static void vector_clipf_c(float *dst, const float *src, float min, float max, i } static av_always_inline int float_to_int16_one(const float *src){ - int_fast32_t tmp = *(const int32_t*)src; - if(tmp & 0xf0000){ - tmp = (0x43c0ffff - tmp)>>31; - // is this faster on some gcc/cpu combinations? -// if(tmp > 0x43c0ffff) tmp = 0xFFFF; -// else tmp = 0; - } - return tmp - 0x8000; + return av_clip_int16(lrintf(*src)); } void ff_float_to_int16_c(int16_t *dst, const float *src, long len){ diff --git a/libavcodec/dsputil.h b/libavcodec/dsputil.h index baa68bebe7..eabecf0d48 100644 --- a/libavcodec/dsputil.h +++ b/libavcodec/dsputil.h @@ -435,8 +435,7 @@ typedef struct DSPContext { */ void (*butterflies_float)(float *restrict v1, float *restrict v2, int len); - /* C version: convert floats from the range [384.0,386.0] to ints in [-32768,32767] - * simd versions: convert floats from [-32768.0,32767.0] without rescaling and arrays are 16byte aligned */ + /* convert floats from [-32768.0,32767.0] without rescaling and arrays are 16byte aligned */ void (*float_to_int16)(int16_t *dst, const float *src, long len); void (*float_to_int16_interleave)(int16_t *dst, const float **src, long len, int channels); diff --git a/libavcodec/nellymoserdec.c b/libavcodec/nellymoserdec.c index 89dde91248..e70d0622da 100644 --- a/libavcodec/nellymoserdec.c +++ b/libavcodec/nellymoserdec.c @@ -49,7 +49,6 @@ typedef struct NellyMoserDecodeContext { float state[128]; AVLFG random_state; GetBitContext gb; - int add_bias; float scale_bias; DSPContext dsp; FFTContext imdct_ctx; @@ -65,7 +64,7 @@ static void overlap_and_window(NellyMoserDecodeContext *s, float *state, float * while (bot < NELLY_BUF_LEN) { audio[bot] = a_in [bot]*ff_sine_128[bot] - +state[bot]*ff_sine_128[top] + s->add_bias; + +state[bot]*ff_sine_128[top]; bot++; top--; @@ -136,13 +135,7 @@ static av_cold int decode_init(AVCodecContext * avctx) { dsputil_init(&s->dsp, avctx); - if(s->dsp.float_to_int16 == ff_float_to_int16_c) { - s->add_bias = 385; - s->scale_bias = 1.0/(8*32768); - } else { - s->add_bias = 0; s->scale_bias = 1.0/(1*8); - } /* Generate overlap window */ if (!ff_sine_128[127]) diff --git a/libavcodec/vorbis_dec.c b/libavcodec/vorbis_dec.c index cdb485a9c9..69b784c668 100644 --- a/libavcodec/vorbis_dec.c +++ b/libavcodec/vorbis_dec.c @@ -153,8 +153,7 @@ typedef struct vorbis_context_s { float *channel_residues; float *channel_floors; float *saved; - uint_fast32_t add_bias; // for float->int conversion - uint_fast32_t exp_bias; + float scale_bias; // for float->int conversion } vorbis_context; /* Helper functions */ @@ -932,8 +931,8 @@ static int vorbis_parse_id_hdr(vorbis_context *vc) vc->saved = av_mallocz((vc->blocksize[1] / 4) * vc->audio_channels * sizeof(float)); vc->previous_window = 0; - ff_mdct_init(&vc->mdct[0], bl0, 1, vc->exp_bias ? -(1 << 15) : -1.0); - ff_mdct_init(&vc->mdct[1], bl1, 1, vc->exp_bias ? -(1 << 15) : -1.0); + ff_mdct_init(&vc->mdct[0], bl0, 1, -vc->scale_bias); + ff_mdct_init(&vc->mdct[1], bl1, 1, -vc->scale_bias); AV_DEBUG(" vorbis version %d \n audio_channels %d \n audio_samplerate %d \n bitrate_max %d \n bitrate_nom %d \n bitrate_min %d \n blk_0 %d blk_1 %d \n ", vc->version, vc->audio_channels, vc->audio_samplerate, vc->bitrate_maximum, vc->bitrate_nominal, vc->bitrate_minimum, vc->blocksize[0], vc->blocksize[1]); @@ -963,13 +962,7 @@ static av_cold int vorbis_decode_init(AVCodecContext *avccontext) vc->avccontext = avccontext; dsputil_init(&vc->dsp, avccontext); - if (vc->dsp.float_to_int16_interleave == ff_float_to_int16_interleave_c) { - vc->add_bias = 385; - vc->exp_bias = 0; - } else { - vc->add_bias = 0; - vc->exp_bias = 15 << 23; - } + vc->scale_bias = 32768.0f; if (!headers_len) { av_log(avccontext, AV_LOG_ERROR, "Extradata missing.\n"); @@ -1453,18 +1446,6 @@ void vorbis_inverse_coupling(float *mag, float *ang, int blocksize) } } -static void copy_normalize(float *dst, float *src, int len, int exp_bias, - float add_bias) -{ - int i; - if (exp_bias) { - memcpy(dst, src, len * sizeof(float)); - } else { - for (i = 0; i < len; i++) - dst[i] = src[i] + add_bias; - } -} - // Decode the audio packet using the functions above static int vorbis_parse_audio_packet(vorbis_context *vc) @@ -1484,7 +1465,6 @@ static int vorbis_parse_audio_packet(vorbis_context *vc) uint_fast8_t res_chan[255]; uint_fast8_t res_num = 0; int_fast16_t retlen = 0; - float fadd_bias = vc->add_bias; if (get_bits1(gb)) { av_log(vc->avccontext, AV_LOG_ERROR, "Not a Vorbis I audio packet.\n"); @@ -1595,13 +1575,13 @@ static int vorbis_parse_audio_packet(vorbis_context *vc) const float *win = vc->win[blockflag & previous_window]; if (blockflag == previous_window) { - vc->dsp.vector_fmul_window(ret, saved, buf, win, fadd_bias, blocksize / 4); + vc->dsp.vector_fmul_window(ret, saved, buf, win, 0, blocksize / 4); } else if (blockflag > previous_window) { - vc->dsp.vector_fmul_window(ret, saved, buf, win, fadd_bias, bs0 / 4); - copy_normalize(ret+bs0/2, buf+bs0/4, (bs1-bs0)/4, vc->exp_bias, fadd_bias); + vc->dsp.vector_fmul_window(ret, saved, buf, win, 0, bs0 / 4); + memcpy(ret+bs0/2, buf+bs0/4, ((bs1-bs0)/4) * sizeof(float)); } else { - copy_normalize(ret, saved, (bs1 - bs0) / 4, vc->exp_bias, fadd_bias); - vc->dsp.vector_fmul_window(ret + (bs1 - bs0) / 4, saved + (bs1 - bs0) / 4, buf, win, fadd_bias, bs0 / 4); + memcpy(ret, saved, ((bs1 - bs0) / 4) * sizeof(float)); + vc->dsp.vector_fmul_window(ret + (bs1 - bs0) / 4, saved + (bs1 - bs0) / 4, buf, win, 0, bs0 / 4); } memcpy(saved, buf + blocksize / 4, blocksize / 4 * sizeof(float)); } diff --git a/libavcodec/wmadec.c b/libavcodec/wmadec.c index b720ea5098..03d7bd19fc 100644 --- a/libavcodec/wmadec.c +++ b/libavcodec/wmadec.c @@ -768,9 +768,8 @@ next: /* decode a frame of frame_len samples */ static int wma_decode_frame(WMACodecContext *s, int16_t *samples) { - int ret, i, n, ch, incr; - int16_t *ptr; - float *iptr; + int ret, n, ch, incr; + const float *output[MAX_CHANNELS]; #ifdef TRACE tprintf(s->avctx, "***decode_frame: %d size=%d\n", s->frame_count++, s->frame_len); @@ -790,21 +789,6 @@ static int wma_decode_frame(WMACodecContext *s, int16_t *samples) /* convert frame to integer */ n = s->frame_len; incr = s->nb_channels; - if (s->dsp.float_to_int16_interleave == ff_float_to_int16_interleave_c) { - for(ch = 0; ch < s->nb_channels; ch++) { - ptr = samples + ch; - iptr = s->frame_out[ch]; - - for(i=0;i<n;i++) { - *ptr = av_clip_int16(lrintf(*iptr++)); - ptr += incr; - } - /* prepare for next block */ - memmove(&s->frame_out[ch][0], &s->frame_out[ch][s->frame_len], - s->frame_len * sizeof(float)); - } - } else { - const float *output[MAX_CHANNELS]; for (ch = 0; ch < MAX_CHANNELS; ch++) output[ch] = s->frame_out[ch]; s->dsp.float_to_int16_interleave(samples, output, n, incr); @@ -812,7 +796,6 @@ static int wma_decode_frame(WMACodecContext *s, int16_t *samples) /* prepare for next block */ memmove(&s->frame_out[ch][0], &s->frame_out[ch][n], n * sizeof(float)); } - } #ifdef TRACE dump_shorts(s, "samples", samples, n * s->nb_channels); |