diff options
author | Michael Niedermayer <michaelni@gmx.at> | 2011-10-30 01:33:41 +0200 |
---|---|---|
committer | Michael Niedermayer <michaelni@gmx.at> | 2011-10-30 01:33:41 +0200 |
commit | d17e7070a099af04a1dc7bc9ddd82f67bfcf9827 (patch) | |
tree | 4be589d09939bead88ef3d4e1d5e90fe0348af6c /libavcodec | |
parent | 1af3571e05522df4e71a5b33de05bdb9e953a6c4 (diff) | |
parent | 7d1b17b83330aefe2f32a66fe84effe46ae51014 (diff) | |
download | ffmpeg-d17e7070a099af04a1dc7bc9ddd82f67bfcf9827.tar.gz |
Merge remote-tracking branch 'qatar/master'
* qatar/master: (51 commits)
cin audio: use sign_extend() instead of casting to int16_t
cin audio: restructure decoding loop to avoid a separate counter variable
cin audio: use local variable for delta value
cin audio: remove unneeded cast from void*
cin audio: validate the channel count
cin audio: remove unneeded AVCodecContext pointer from CinAudioContext
dsicin: fix several audio-related fields in the CIN demuxer
flacdec: use av_get_bytes_per_sample() to get sample size
dca: handle errors from dca_decode_block()
dca: return error if the frame header is invalid
dca: return proper error codes instead of -1
utvideo: handle empty Huffman trees
binkaudio: change short to int16_t
binkaudio: only decode one block at a time.
binkaudio: store interleaved overlap samples in BinkAudioContext.
binkaudio: pre-calculate quantization factors
binkaudio: add some buffer overread checks.
atrac3: support float or int16 output using request_sample_fmt
atrac3: add CODEC_CAP_SUBFRAMES capability
atrac3: return appropriate error codes instead of -1
...
Conflicts:
libavcodec/atrac1.c
libavcodec/dca.c
libavformat/mov.c
Merged-by: Michael Niedermayer <michaelni@gmx.at>
Diffstat (limited to 'libavcodec')
-rw-r--r-- | libavcodec/atrac1.c | 93 | ||||
-rw-r--r-- | libavcodec/atrac3.c | 143 | ||||
-rw-r--r-- | libavcodec/binkaudio.c | 117 | ||||
-rw-r--r-- | libavcodec/cook.c | 138 | ||||
-rw-r--r-- | libavcodec/dca.c | 64 | ||||
-rw-r--r-- | libavcodec/dsicinav.c | 44 | ||||
-rw-r--r-- | libavcodec/flacdec.c | 3 | ||||
-rw-r--r-- | libavcodec/h264.c | 4 | ||||
-rw-r--r-- | libavcodec/utvideo.c | 53 | ||||
-rw-r--r-- | libavcodec/vp3.c | 52 | ||||
-rw-r--r-- | libavcodec/vp8.c | 48 |
11 files changed, 486 insertions, 273 deletions
diff --git a/libavcodec/atrac1.c b/libavcodec/atrac1.c index 2ad99bf473..d4d5986821 100644 --- a/libavcodec/atrac1.c +++ b/libavcodec/atrac1.c @@ -36,6 +36,7 @@ #include "get_bits.h" #include "dsputil.h" #include "fft.h" +#include "fmtconvert.h" #include "sinewin.h" #include "atrac.h" @@ -78,10 +79,11 @@ typedef struct { DECLARE_ALIGNED(32, float, mid)[256]; DECLARE_ALIGNED(32, float, high)[512]; float* bands[3]; - DECLARE_ALIGNED(32, float, out_samples)[AT1_MAX_CHANNELS][AT1_SU_SAMPLES]; + float *out_samples[AT1_MAX_CHANNELS]; FFTContext mdct_ctx[3]; int channels; DSPContext dsp; + FmtConvertContext fmt_conv; } AT1Ctx; /** size of the transform in samples in the long mode for each QMF band */ @@ -129,7 +131,7 @@ static int at1_imdct_block(AT1SUCtx* su, AT1Ctx *q) nbits = mdct_long_nbits[band_num] - log2_block_count; if (nbits != 5 && nbits != 7 && nbits != 8) - return -1; + return AVERROR_INVALIDDATA; } else { block_size = 32; nbits = 5; @@ -173,14 +175,14 @@ static int at1_parse_bsm(GetBitContext* gb, int log2_block_cnt[AT1_QMF_BANDS]) /* low and mid band */ log2_block_count_tmp = get_bits(gb, 2); if (log2_block_count_tmp & 1) - return -1; + return AVERROR_INVALIDDATA; log2_block_cnt[i] = 2 - log2_block_count_tmp; } /* high band */ log2_block_count_tmp = get_bits(gb, 2); if (log2_block_count_tmp != 0 && log2_block_count_tmp != 3) - return -1; + return AVERROR_INVALIDDATA; log2_block_cnt[IDX_HIGH_BAND] = 3 - log2_block_count_tmp; skip_bits(gb, 2); @@ -229,7 +231,7 @@ static int at1_unpack_dequant(GetBitContext* gb, AT1SUCtx* su, /* check for bitstream overflow */ if (bits_used > AT1_SU_MAX_BITS) - return -1; + return AVERROR_INVALIDDATA; /* get the position of the 1st spec according to the block size mode */ pos = su->log2_block_count[band_num] ? bfu_start_short[bfu_num] : bfu_start_long[bfu_num]; @@ -276,14 +278,21 @@ static int atrac1_decode_frame(AVCodecContext *avctx, void *data, const uint8_t *buf = avpkt->data; int buf_size = avpkt->size; AT1Ctx *q = avctx->priv_data; - int ch, ret, i; + int ch, ret, out_size; GetBitContext gb; float* samples = data; if (buf_size < 212 * q->channels) { - av_log(avctx, AV_LOG_ERROR,"Not enought data to decode!\n"); - return -1; + av_log(avctx,AV_LOG_ERROR,"Not enough data to decode!\n"); + return AVERROR_INVALIDDATA; + } + + out_size = q->channels * AT1_SU_SAMPLES * + av_get_bytes_per_sample(avctx->sample_fmt); + if (*data_size < out_size) { + av_log(avctx, AV_LOG_ERROR, "Output buffer is too small\n"); + return AVERROR(EINVAL); } for (ch = 0; ch < q->channels; ch++) { @@ -303,44 +312,72 @@ static int atrac1_decode_frame(AVCodecContext *avctx, void *data, ret = at1_imdct_block(su, q); if (ret < 0) return ret; - at1_subband_synthesis(q, su, q->out_samples[ch]); + at1_subband_synthesis(q, su, q->channels == 1 ? samples : q->out_samples[ch]); } - /* interleave; FIXME, should create/use a DSP function */ - if (q->channels == 1) { - /* mono */ - memcpy(samples, q->out_samples[0], AT1_SU_SAMPLES * 4); - } else { - /* stereo */ - for (i = 0; i < AT1_SU_SAMPLES; i++) { - samples[i * 2] = q->out_samples[0][i]; - samples[i * 2 + 1] = q->out_samples[1][i]; - } + /* interleave */ + if (q->channels == 2) { + q->fmt_conv.float_interleave(samples, (const float **)q->out_samples, + AT1_SU_SAMPLES, 2); } - *data_size = q->channels * AT1_SU_SAMPLES * sizeof(*samples); + *data_size = out_size; return avctx->block_align; } +static av_cold int atrac1_decode_end(AVCodecContext * avctx) +{ + AT1Ctx *q = avctx->priv_data; + + av_freep(&q->out_samples[0]); + + ff_mdct_end(&q->mdct_ctx[0]); + ff_mdct_end(&q->mdct_ctx[1]); + ff_mdct_end(&q->mdct_ctx[2]); + + return 0; +} + + static av_cold int atrac1_decode_init(AVCodecContext *avctx) { AT1Ctx *q = avctx->priv_data; + int ret; avctx->sample_fmt = AV_SAMPLE_FMT_FLT; + if (avctx->channels < 1 || avctx->channels > AT1_MAX_CHANNELS) { + av_log(avctx, AV_LOG_ERROR, "Unsupported number of channels: %d\n", + avctx->channels); + return AVERROR(EINVAL); + } q->channels = avctx->channels; + if (avctx->channels == 2) { + q->out_samples[0] = av_malloc(2 * AT1_SU_SAMPLES * sizeof(*q->out_samples[0])); + q->out_samples[1] = q->out_samples[0] + AT1_SU_SAMPLES; + if (!q->out_samples[0]) { + av_freep(&q->out_samples[0]); + return AVERROR(ENOMEM); + } + } + /* Init the mdct transforms */ - ff_mdct_init(&q->mdct_ctx[0], 6, 1, -1.0/ (1 << 15)); - ff_mdct_init(&q->mdct_ctx[1], 8, 1, -1.0/ (1 << 15)); - ff_mdct_init(&q->mdct_ctx[2], 9, 1, -1.0/ (1 << 15)); + if ((ret = ff_mdct_init(&q->mdct_ctx[0], 6, 1, -1.0/ (1 << 15))) || + (ret = ff_mdct_init(&q->mdct_ctx[1], 8, 1, -1.0/ (1 << 15))) || + (ret = ff_mdct_init(&q->mdct_ctx[2], 9, 1, -1.0/ (1 << 15)))) { + av_log(avctx, AV_LOG_ERROR, "Error initializing MDCT\n"); + atrac1_decode_end(avctx); + return ret; + } ff_init_ff_sine_windows(5); atrac_generate_tables(); dsputil_init(&q->dsp, avctx); + ff_fmt_convert_init(&q->fmt_conv, avctx); q->bands[0] = q->low; q->bands[1] = q->mid; @@ -356,16 +393,6 @@ static av_cold int atrac1_decode_init(AVCodecContext *avctx) } -static av_cold int atrac1_decode_end(AVCodecContext * avctx) { - AT1Ctx *q = avctx->priv_data; - - ff_mdct_end(&q->mdct_ctx[0]); - ff_mdct_end(&q->mdct_ctx[1]); - ff_mdct_end(&q->mdct_ctx[2]); - return 0; -} - - AVCodec ff_atrac1_decoder = { .name = "atrac1", .type = AVMEDIA_TYPE_AUDIO, diff --git a/libavcodec/atrac3.c b/libavcodec/atrac3.c index 20ab75dfd7..25beeeeb6c 100644 --- a/libavcodec/atrac3.c +++ b/libavcodec/atrac3.c @@ -41,6 +41,7 @@ #include "dsputil.h" #include "bytestream.h" #include "fft.h" +#include "fmtconvert.h" #include "atrac.h" #include "atrac3data.h" @@ -48,6 +49,8 @@ #define JOINT_STEREO 0x12 #define STEREO 0x2 +#define SAMPLES_PER_FRAME 1024 +#define MDCT_SIZE 512 /* These structures are needed to store the parsed gain control data. */ typedef struct { @@ -70,12 +73,12 @@ typedef struct { int bandsCoded; int numComponents; tonal_component components[64]; - float prevFrame[1024]; + float prevFrame[SAMPLES_PER_FRAME]; int gcBlkSwitch; gain_block gainBlock[2]; - DECLARE_ALIGNED(32, float, spectrum)[1024]; - DECLARE_ALIGNED(32, float, IMDCT_buf)[1024]; + DECLARE_ALIGNED(32, float, spectrum)[SAMPLES_PER_FRAME]; + DECLARE_ALIGNED(32, float, IMDCT_buf)[SAMPLES_PER_FRAME]; float delayBuf1[46]; ///<qmf delay buffers float delayBuf2[46]; @@ -107,7 +110,7 @@ typedef struct { //@} //@{ /** data buffers */ - float outSamples[2048]; + float *outSamples[2]; uint8_t* decoded_bytes_buffer; float tempBuf[1070]; //@} @@ -120,9 +123,10 @@ typedef struct { //@} FFTContext mdct_ctx; + FmtConvertContext fmt_conv; } ATRAC3Context; -static DECLARE_ALIGNED(32, float, mdct_window)[512]; +static DECLARE_ALIGNED(32, float, mdct_window)[MDCT_SIZE]; static VLC spectral_coeff_tab[7]; static float gain_tab1[16]; static float gain_tab2[31]; @@ -159,7 +163,7 @@ static void IMLT(ATRAC3Context *q, float *pInput, float *pOutput, int odd_band) q->mdct_ctx.imdct_calc(&q->mdct_ctx,pOutput,pInput); /* Perform windowing on the output. */ - dsp.vector_fmul(pOutput, pOutput, mdct_window, 512); + dsp.vector_fmul(pOutput, pOutput, mdct_window, MDCT_SIZE); } @@ -192,7 +196,7 @@ static int decode_bytes(const uint8_t* inbuffer, uint8_t* out, int bytes){ } -static av_cold void init_atrac3_transforms(ATRAC3Context *q) { +static av_cold int init_atrac3_transforms(ATRAC3Context *q, int is_float) { float enc_window[256]; int i; @@ -208,7 +212,7 @@ static av_cold void init_atrac3_transforms(ATRAC3Context *q) { } /* Initialize the MDCT transform. */ - ff_mdct_init(&q->mdct_ctx, 9, 1, 1.0); + return ff_mdct_init(&q->mdct_ctx, 9, 1, is_float ? 1.0 / 32768 : 1.0); } /** @@ -221,6 +225,8 @@ static av_cold int atrac3_decode_close(AVCodecContext *avctx) av_free(q->pUnits); av_free(q->decoded_bytes_buffer); + av_freep(&q->outSamples[0]); + ff_mdct_end(&q->mdct_ctx); return 0; @@ -340,7 +346,7 @@ static int decodeSpectrum (GetBitContext *gb, float *pOut) /* Clear the subbands that were not coded. */ first = subbandTab[cnt]; - memset(pOut+first, 0, (1024 - first) * sizeof(float)); + memset(pOut+first, 0, (SAMPLES_PER_FRAME - first) * sizeof(float)); return numSubbands; } @@ -370,7 +376,7 @@ static int decodeTonalComponents (GetBitContext *gb, tonal_component *pComponent coding_mode_selector = get_bits(gb,2); if (coding_mode_selector == 2) - return -1; + return AVERROR_INVALIDDATA; coding_mode = coding_mode_selector & 1; @@ -382,7 +388,7 @@ static int decodeTonalComponents (GetBitContext *gb, tonal_component *pComponent quant_step_index = get_bits(gb,3); if (quant_step_index <= 1) - return -1; + return AVERROR_INVALIDDATA; if (coding_mode_selector == 3) coding_mode = get_bits1(gb); @@ -396,7 +402,7 @@ static int decodeTonalComponents (GetBitContext *gb, tonal_component *pComponent for (k=0; k<coded_components; k++) { sfIndx = get_bits(gb,6); pComponent[component_count].pos = j * 64 + (get_bits(gb,6)); - max_coded_values = 1024 - pComponent[component_count].pos; + max_coded_values = SAMPLES_PER_FRAME - pComponent[component_count].pos; coded_values = coded_values_per_component + 1; coded_values = FFMIN(max_coded_values,coded_values); @@ -445,7 +451,7 @@ static int decodeGainControl (GetBitContext *gb, gain_block *pGb, int numBands) pLevel[cf]= get_bits(gb,4); pLoc [cf]= get_bits(gb,5); if(cf && pLoc[cf] <= pLoc[cf-1]) - return -1; + return AVERROR_INVALIDDATA; } } @@ -662,12 +668,12 @@ static int decodeChannelSoundUnit (ATRAC3Context *q, GetBitContext *gb, channel_ if (codingMode == JOINT_STEREO && channelNum == 1) { if (get_bits(gb,2) != 3) { av_log(NULL,AV_LOG_ERROR,"JS mono Sound Unit id != 3.\n"); - return -1; + return AVERROR_INVALIDDATA; } } else { if (get_bits(gb,6) != 0x28) { av_log(NULL,AV_LOG_ERROR,"Sound Unit id != 0x28.\n"); - return -1; + return AVERROR_INVALIDDATA; } } @@ -719,7 +725,8 @@ static int decodeChannelSoundUnit (ATRAC3Context *q, GetBitContext *gb, channel_ * @param databuf the input data */ -static int decodeFrame(ATRAC3Context *q, const uint8_t* databuf) +static int decodeFrame(ATRAC3Context *q, const uint8_t* databuf, + float **out_samples) { int result, i; float *p1, *p2, *p3, *p4; @@ -731,7 +738,7 @@ static int decodeFrame(ATRAC3Context *q, const uint8_t* databuf) /* decode Sound Unit 1 */ init_get_bits(&q->gb,databuf,q->bits_per_frame); - result = decodeChannelSoundUnit(q,&q->gb, q->pUnits, q->outSamples, 0, JOINT_STEREO); + result = decodeChannelSoundUnit(q,&q->gb, q->pUnits, out_samples[0], 0, JOINT_STEREO); if (result != 0) return (result); @@ -753,7 +760,7 @@ static int decodeFrame(ATRAC3Context *q, const uint8_t* databuf) ptr1 = q->decoded_bytes_buffer; for (i = 4; *ptr1 == 0xF8; i++, ptr1++) { if (i >= q->bytes_per_frame) - return -1; + return AVERROR_INVALIDDATA; } @@ -772,14 +779,14 @@ static int decodeFrame(ATRAC3Context *q, const uint8_t* databuf) } /* Decode Sound Unit 2. */ - result = decodeChannelSoundUnit(q,&q->gb, &q->pUnits[1], &q->outSamples[1024], 1, JOINT_STEREO); + result = decodeChannelSoundUnit(q,&q->gb, &q->pUnits[1], out_samples[1], 1, JOINT_STEREO); if (result != 0) return (result); /* Reconstruct the channel coefficients. */ - reverseMatrixing(q->outSamples, &q->outSamples[1024], q->matrix_coeff_index_prev, q->matrix_coeff_index_now); + reverseMatrixing(out_samples[0], out_samples[1], q->matrix_coeff_index_prev, q->matrix_coeff_index_now); - channelWeighting(q->outSamples, &q->outSamples[1024], q->weighting_delay); + channelWeighting(out_samples[0], out_samples[1], q->weighting_delay); } else { /* normal stereo mode or mono */ @@ -789,22 +796,21 @@ static int decodeFrame(ATRAC3Context *q, const uint8_t* databuf) /* Set the bitstream reader at the start of a channel sound unit. */ init_get_bits(&q->gb, databuf+((i*q->bytes_per_frame)/q->channels), (q->bits_per_frame)/q->channels); - result = decodeChannelSoundUnit(q,&q->gb, &q->pUnits[i], &q->outSamples[i*1024], i, q->codingMode); + result = decodeChannelSoundUnit(q,&q->gb, &q->pUnits[i], out_samples[i], i, q->codingMode); if (result != 0) return (result); } } /* Apply the iQMF synthesis filter. */ - p1= q->outSamples; for (i=0 ; i<q->channels ; i++) { + p1 = out_samples[i]; p2= p1+256; p3= p2+256; p4= p3+256; atrac_iqmf (p1, p2, 256, p1, q->pUnits[i].delayBuf1, q->tempBuf); atrac_iqmf (p4, p3, 256, p3, q->pUnits[i].delayBuf2, q->tempBuf); atrac_iqmf (p1, p3, 512, p1, q->pUnits[i].delayBuf3, q->tempBuf); - p1 +=1024; } return 0; @@ -823,15 +829,22 @@ static int atrac3_decode_frame(AVCodecContext *avctx, const uint8_t *buf = avpkt->data; int buf_size = avpkt->size; ATRAC3Context *q = avctx->priv_data; - int result = 0, i; + int result = 0, out_size; const uint8_t* databuf; - int16_t* samples = data; + float *samples_flt = data; + int16_t *samples_s16 = data; if (buf_size < avctx->block_align) { av_log(avctx, AV_LOG_ERROR, "Frame too small (%d bytes). Truncated file?\n", buf_size); - *data_size = 0; - return buf_size; + return AVERROR_INVALIDDATA; + } + + out_size = SAMPLES_PER_FRAME * q->channels * + av_get_bytes_per_sample(avctx->sample_fmt); + if (*data_size < out_size) { + av_log(avctx, AV_LOG_ERROR, "Output buffer is too small\n"); + return AVERROR(EINVAL); } /* Check if we need to descramble and what buffer to pass on. */ @@ -842,26 +855,27 @@ static int atrac3_decode_frame(AVCodecContext *avctx, databuf = buf; } - result = decodeFrame(q, databuf); + if (q->channels == 1 && avctx->sample_fmt == AV_SAMPLE_FMT_FLT) + result = decodeFrame(q, databuf, &samples_flt); + else + result = decodeFrame(q, databuf, q->outSamples); if (result != 0) { av_log(NULL,AV_LOG_ERROR,"Frame decoding error!\n"); - return -1; + return result; } - if (q->channels == 1) { - /* mono */ - for (i = 0; i<1024; i++) - samples[i] = av_clip_int16(round(q->outSamples[i])); - *data_size = 1024 * sizeof(int16_t); - } else { - /* stereo */ - for (i = 0; i < 1024; i++) { - samples[i*2] = av_clip_int16(round(q->outSamples[i])); - samples[i*2+1] = av_clip_int16(round(q->outSamples[1024+i])); - } - *data_size = 2048 * sizeof(int16_t); + /* interleave */ + if (q->channels == 2 && avctx->sample_fmt == AV_SAMPLE_FMT_FLT) { + q->fmt_conv.float_interleave(samples_flt, + (const float **)q->outSamples, + SAMPLES_PER_FRAME, 2); + } else if (avctx->sample_fmt == AV_SAMPLE_FMT_S16) { + q->fmt_conv.float_to_int16_interleave(samples_s16, + (const float **)q->outSamples, + SAMPLES_PER_FRAME, q->channels); } + *data_size = out_size; return avctx->block_align; } @@ -875,7 +889,7 @@ static int atrac3_decode_frame(AVCodecContext *avctx, static av_cold int atrac3_decode_init(AVCodecContext *avctx) { - int i; + int i, ret; const uint8_t *edata_ptr = avctx->extradata; ATRAC3Context *q = avctx->priv_data; static VLC_TYPE atrac3_vlc_table[4096][2]; @@ -899,7 +913,7 @@ static av_cold int atrac3_decode_init(AVCodecContext *avctx) av_log(avctx,AV_LOG_DEBUG,"[12-13] %d\n",bytestream_get_le16(&edata_ptr)); //Unknown always 0 /* setup */ - q->samples_per_frame = 1024 * q->channels; + q->samples_per_frame = SAMPLES_PER_FRAME * q->channels; q->atrac3version = 4; q->delay = 0x88E; if (q->codingMode) @@ -912,7 +926,7 @@ static av_cold int atrac3_decode_init(AVCodecContext *avctx) if ((q->bytes_per_frame == 96*q->channels*q->frame_factor) || (q->bytes_per_frame == 152*q->channels*q->frame_factor) || (q->bytes_per_frame == 192*q->channels*q->frame_factor)) { } else { av_log(avctx,AV_LOG_ERROR,"Unknown frame/channel/frame_factor configuration %d/%d/%d\n", q->bytes_per_frame, q->channels, q->frame_factor); - return -1; + return AVERROR_INVALIDDATA; } } else if (avctx->extradata_size == 10) { @@ -932,17 +946,17 @@ static av_cold int atrac3_decode_init(AVCodecContext *avctx) if (q->atrac3version != 4) { av_log(avctx,AV_LOG_ERROR,"Version %d != 4.\n",q->atrac3version); - return -1; + return AVERROR_INVALIDDATA; } - if (q->samples_per_frame != 1024 && q->samples_per_frame != 2048) { + if (q->samples_per_frame != SAMPLES_PER_FRAME && q->samples_per_frame != SAMPLES_PER_FRAME*2) { av_log(avctx,AV_LOG_ERROR,"Unknown amount of samples per frame %d.\n",q->samples_per_frame); - return -1; + return AVERROR_INVALIDDATA; } if (q->delay != 0x88E) { av_log(avctx,AV_LOG_ERROR,"Unknown amount of delay %x != 0x88E.\n",q->delay); - return -1; + return AVERROR_INVALIDDATA; } if (q->codingMode == STEREO) { @@ -951,17 +965,17 @@ static av_cold int atrac3_decode_init(AVCodecContext *avctx) av_log(avctx,AV_LOG_DEBUG,"Joint stereo detected.\n"); } else { av_log(avctx,AV_LOG_ERROR,"Unknown channel coding mode %x!\n",q->codingMode); - return -1; + return AVERROR_INVALIDDATA; } if (avctx->channels <= 0 || avctx->channels > 2 /*|| ((avctx->channels * 1024) != q->samples_per_frame)*/) { av_log(avctx,AV_LOG_ERROR,"Channel configuration error!\n"); - return -1; + return AVERROR(EINVAL); } if(avctx->block_align >= UINT_MAX/2) - return -1; + return AVERROR(EINVAL); /* Pad the data buffer with FF_INPUT_BUFFER_PADDING_SIZE, * this is for the bitstream reader. */ @@ -981,7 +995,16 @@ static av_cold int atrac3_decode_init(AVCodecContext *avctx) vlcs_initialized = 1; } - init_atrac3_transforms(q); + if (avctx->request_sample_fmt == AV_SAMPLE_FMT_FLT) + avctx->sample_fmt = AV_SAMPLE_FMT_FLT; + else + avctx->sample_fmt = AV_SAMPLE_FMT_S16; + + if ((ret = init_atrac3_transforms(q, avctx->sample_fmt == AV_SAMPLE_FMT_FLT))) { + av_log(avctx, AV_LOG_ERROR, "Error initializing MDCT\n"); + av_freep(&q->decoded_bytes_buffer); + return ret; + } atrac_generate_tables(); @@ -1007,14 +1030,23 @@ static av_cold int atrac3_decode_init(AVCodecContext *avctx) } dsputil_init(&dsp, avctx); + ff_fmt_convert_init(&q->fmt_conv, avctx); q->pUnits = av_mallocz(sizeof(channel_unit)*q->channels); if (!q->pUnits) { - av_free(q->decoded_bytes_buffer); + atrac3_decode_close(avctx); return AVERROR(ENOMEM); } - avctx->sample_fmt = AV_SAMPLE_FMT_S16; + if (avctx->channels > 1 || avctx->sample_fmt == AV_SAMPLE_FMT_S16) { + q->outSamples[0] = av_mallocz(SAMPLES_PER_FRAME * avctx->channels * sizeof(*q->outSamples[0])); + q->outSamples[1] = q->outSamples[0] + SAMPLES_PER_FRAME; + if (!q->outSamples[0]) { + atrac3_decode_close(avctx); + return AVERROR(ENOMEM); + } + } + return 0; } @@ -1028,5 +1060,6 @@ AVCodec ff_atrac3_decoder = .init = atrac3_decode_init, .close = atrac3_decode_close, .decode = atrac3_decode_frame, + .capabilities = CODEC_CAP_SUBFRAMES, .long_name = NULL_IF_CONFIG_SMALL("Atrac 3 (Adaptive TRansform Acoustic Coding 3)"), }; diff --git a/libavcodec/binkaudio.c b/libavcodec/binkaudio.c index 2d06aaa9e9..b1e4de2711 100644 --- a/libavcodec/binkaudio.c +++ b/libavcodec/binkaudio.c @@ -39,6 +39,8 @@ extern const uint16_t ff_wma_critical_freqs[25]; +static float quant_table[95]; + #define MAX_CHANNELS 2 #define BINK_BLOCK_MAX_SIZE (MAX_CHANNELS << 11) @@ -56,8 +58,11 @@ typedef struct { unsigned int *bands; float root; DECLARE_ALIGNED(32, FFTSample, coeffs)[BINK_BLOCK_MAX_SIZE]; - DECLARE_ALIGNED(16, short, previous)[BINK_BLOCK_MAX_SIZE / 16]; ///< coeffs from previous audio block + DECLARE_ALIGNED(16, int16_t, previous)[BINK_BLOCK_MAX_SIZE / 16]; ///< coeffs from previous audio block + DECLARE_ALIGNED(16, int16_t, current)[BINK_BLOCK_MAX_SIZE / 16]; float *coeffs_ptr[MAX_CHANNELS]; ///< pointers to the coeffs arrays for float_to_int16_interleave + float *prev_ptr[MAX_CHANNELS]; ///< pointers to the overlap points in the coeffs array + uint8_t *packet_buffer; union { RDFTContext rdft; DCTContext dct; @@ -107,6 +112,10 @@ static av_cold int decode_init(AVCodecContext *avctx) s->block_size = (s->frame_len - s->overlap_len) * s->channels; sample_rate_half = (sample_rate + 1) / 2; s->root = 2.0 / sqrt(s->frame_len); + for (i = 0; i < 95; i++) { + /* constant is result of 0.066399999/log10(M_E) */ + quant_table[i] = expf(i * 0.15289164787221953823f) * s->root; + } /* calculate number of bands */ for (s->num_bands = 1; s->num_bands < 25; s->num_bands++) @@ -126,8 +135,10 @@ static av_cold int decode_init(AVCodecContext *avctx) s->first = 1; avctx->sample_fmt = AV_SAMPLE_FMT_S16; - for (i = 0; i < s->channels; i++) + for (i = 0; i < s->channels; i++) { s->coeffs_ptr[i] = s->coeffs + i * s->frame_len; + s->prev_ptr[i] = s->coeffs_ptr[i] + s->frame_len - s->overlap_len; + } if (CONFIG_BINKAUDIO_RDFT_DECODER && avctx->codec->id == CODEC_ID_BINKAUDIO_RDFT) ff_rdft_init(&s->trans.rdft, frame_len_bits, DFT_C2R); @@ -152,11 +163,18 @@ static const uint8_t rle_length_tab[16] = { 2, 3, 4, 5, 6, 8, 9, 10, 11, 12, 13, 14, 15, 16, 32, 64 }; +#define GET_BITS_SAFE(out, nbits) do { \ + if (get_bits_left(gb) < nbits) \ + return AVERROR_INVALIDDATA; \ + out = get_bits(gb, nbits); \ +} while (0) + /** * Decode Bink Audio block * @param[out] out Output buffer (must contain s->block_size elements) + * @return 0 on success, negative error code on failure */ -static void decode_block(BinkAudioContext *s, short *out, int use_dct) +static int decode_block(BinkAudioContext *s, int16_t *out, int use_dct) { int ch, i, j, k; float q, quant[25]; @@ -169,17 +187,22 @@ static void decode_block(BinkAudioContext *s, short *out, int use_dct) for (ch = 0; ch < s->channels; ch++) { FFTSample *coeffs = s->coeffs_ptr[ch]; if (s->version_b) { + if (get_bits_left(gb) < 64) + return AVERROR_INVALIDDATA; coeffs[0] = av_int2flt(get_bits(gb, 32)) * s->root; coeffs[1] = av_int2flt(get_bits(gb, 32)) * s->root; } else { + if (get_bits_left(gb) < 58) + return AVERROR_INVALIDDATA; coeffs[0] = get_float(gb) * s->root; coeffs[1] = get_float(gb) * s->root; } + if (get_bits_left(gb) < s->num_bands * 8) + return AVERROR_INVALIDDATA; for (i = 0; i < s->num_bands; i++) { - /* constant is result of 0.066399999/log10(M_E) */ int value = get_bits(gb, 8); - quant[i] = expf(FFMIN(value, 95) * 0.15289164787221953823f) * s->root; + quant[i] = quant_table[FFMIN(value, 95)]; } k = 0; @@ -190,15 +213,20 @@ static void decode_block(BinkAudioContext *s, short *out, int use_dct) while (i < s->frame_len) { if (s->version_b) { j = i + 16; - } else if (get_bits1(gb)) { - j = i + rle_length_tab[get_bits(gb, 4)] * 8; } else { - j = i + 8; + int v; + GET_BITS_SAFE(v, 1); + if (v) { + GET_BITS_SAFE(v, 4); + j = i + rle_length_tab[v] * 8; + } else { + j = i + 8; + } } j = FFMIN(j, s->frame_len); - width = get_bits(gb, 4); + GET_BITS_SAFE(width, 4); if (width == 0) { memset(coeffs + i, 0, (j - i) * sizeof(*coeffs)); i = j; @@ -208,9 +236,11 @@ static void decode_block(BinkAudioContext *s, short *out, int use_dct) while (i < j) { if (s->bands[k] == i) q = quant[k++]; - coeff = get_bits(gb, width); + GET_BITS_SAFE(coeff, width); if (coeff) { - if (get_bits1(gb)) + int v; + GET_BITS_SAFE(v, 1); + if (v) coeffs[i] = -q * coeff; else coeffs[i] = q * coeff; @@ -231,8 +261,12 @@ static void decode_block(BinkAudioContext *s, short *out, int use_dct) s->trans.rdft.rdft_calc(&s->trans.rdft, coeffs); } + s->fmt_conv.float_to_int16_interleave(s->current, + (const float **)s->prev_ptr, + s->overlap_len, s->channels); s->fmt_conv.float_to_int16_interleave(out, (const float **)s->coeffs_ptr, - s->frame_len, s->channels); + s->frame_len - s->overlap_len, + s->channels); if (!s->first) { int count = s->overlap_len * s->channels; @@ -242,16 +276,19 @@ static void decode_block(BinkAudioContext *s, short *out, int use_dct) } } - memcpy(s->previous, out + s->block_size, - s->overlap_len * s->channels * sizeof(*out)); + memcpy(s->previous, s->current, + s->overlap_len * s->channels * sizeof(*s->previous)); s->first = 0; + + return 0; } static av_cold int decode_end(AVCodecContext *avctx) { BinkAudioContext * s = avctx->priv_data; av_freep(&s->bands); + av_freep(&s->packet_buffer); if (CONFIG_BINKAUDIO_RDFT_DECODER && avctx->codec->id == CODEC_ID_BINKAUDIO_RDFT) ff_rdft_end(&s->trans.rdft); else if (CONFIG_BINKAUDIO_DCT_DECODER) @@ -270,25 +307,47 @@ static int decode_frame(AVCodecContext *avctx, AVPacket *avpkt) { BinkAudioContext *s = avctx->priv_data; - const uint8_t *buf = avpkt->data; - int buf_size = avpkt->size; - short *samples = data; - short *samples_end = (short*)((uint8_t*)data + *data_size); - int reported_size; + int16_t *samples = data; GetBitContext *gb = &s->gb; + int out_size, consumed = 0; + + if (!get_bits_left(gb)) { + uint8_t *buf; + /* handle end-of-stream */ + if (!avpkt->size) { + *data_size = 0; + return 0; + } + if (avpkt->size < 4) { + av_log(avctx, AV_LOG_ERROR, "Packet is too small\n"); + return AVERROR_INVALIDDATA; + } + buf = av_realloc(s->packet_buffer, avpkt->size + FF_INPUT_BUFFER_PADDING_SIZE); + if (!buf) + return AVERROR(ENOMEM); + s->packet_buffer = buf; + memcpy(s->packet_buffer, avpkt->data, avpkt->size); + init_get_bits(gb, s->packet_buffer, avpkt->size * 8); + consumed = avpkt->size; + + /* skip reported size */ + skip_bits_long(gb, 32); + } - init_get_bits(gb, buf, buf_size * 8); + out_size = s->block_size * av_get_bytes_per_sample(avctx->sample_fmt); + if (*data_size < out_size) { + av_log(avctx, AV_LOG_ERROR, "Output buffer is too small\n"); + return AVERROR(EINVAL); + } - reported_size = get_bits_long(gb, 32); - while (get_bits_count(gb) / 8 < buf_size && - samples + s->block_size <= samples_end) { - decode_block(s, samples, avctx->codec->id == CODEC_ID_BINKAUDIO_DCT); - samples += s->block_size; - get_bits_align32(gb); + if (decode_block(s, samples, avctx->codec->id == CODEC_ID_BINKAUDIO_DCT)) { + av_log(avctx, AV_LOG_ERROR, "Incomplete packet\n"); + return AVERROR_INVALIDDATA; } + get_bits_align32(gb); - *data_size = FFMIN(reported_size, (uint8_t*)samples - (uint8_t*)data); - return buf_size; + *data_size = out_size; + return consumed; } AVCodec ff_binkaudio_rdft_decoder = { @@ -299,6 +358,7 @@ AVCodec ff_binkaudio_rdft_decoder = { .init = decode_init, .close = decode_end, .decode = decode_frame, + .capabilities = CODEC_CAP_DELAY, .long_name = NULL_IF_CONFIG_SMALL("Bink Audio (RDFT)") }; @@ -310,5 +370,6 @@ AVCodec ff_binkaudio_dct_decoder = { .init = decode_init, .close = decode_end, .decode = decode_frame, + .capabilities = CODEC_CAP_DELAY, .long_name = NULL_IF_CONFIG_SMALL("Bink Audio (DCT)") }; diff --git a/libavcodec/cook.c b/libavcodec/cook.c index 0d09bb83fb..9cfd3960e0 100644 --- a/libavcodec/cook.c +++ b/libavcodec/cook.c @@ -42,12 +42,7 @@ * available. */ -#include <math.h> -#include <stddef.h> -#include <stdio.h> - #include "libavutil/lfg.h" -#include "libavutil/random_seed.h" #include "avcodec.h" #include "get_bits.h" #include "dsputil.h" @@ -124,7 +119,7 @@ typedef struct cook { void (* interpolate) (struct cook *q, float* buffer, int gain_index, int gain_index_next); - void (* saturate_output) (struct cook *q, int chan, int16_t *out); + void (* saturate_output) (struct cook *q, int chan, float *out); AVCodecContext* avctx; GetBitContext gb; @@ -217,11 +212,11 @@ static av_cold int init_cook_vlc_tables(COOKContext *q) { } static av_cold int init_cook_mlt(COOKContext *q) { - int j; + int j, ret; int mlt_size = q->samples_per_channel; - if ((q->mlt_window = av_malloc(sizeof(float)*mlt_size)) == 0) - return -1; + if ((q->mlt_window = av_malloc(mlt_size * sizeof(*q->mlt_window))) == 0) + return AVERROR(ENOMEM); /* Initialize the MLT window: simple sine window. */ ff_sine_window_init(q->mlt_window, mlt_size); @@ -229,9 +224,9 @@ static av_cold int init_cook_mlt(COOKContext *q) { q->mlt_window[j] *= sqrt(2.0 / q->samples_per_channel); /* Initialize the MDCT. */ - if (ff_mdct_init(&q->mdct_ctx, av_log2(mlt_size)+1, 1, 1.0)) { - av_free(q->mlt_window); - return -1; + if ((ret = ff_mdct_init(&q->mdct_ctx, av_log2(mlt_size)+1, 1, 1.0/32768.0))) { + av_free(q->mlt_window); + return ret; } av_log(q->avctx,AV_LOG_DEBUG,"MDCT initialized, order = %d.\n", av_log2(mlt_size)+1); @@ -410,9 +405,9 @@ static void categorize(COOKContext *q, COOKSubpacket *p, int* quant_index_table, //av_log(q->avctx, AV_LOG_ERROR, "bits_left = %d\n",bits_left); } - memset(&exp_index1,0,102*sizeof(int)); - memset(&exp_index2,0,102*sizeof(int)); - memset(&tmp_categorize_array,0,128*2*sizeof(int)); + memset(&exp_index1, 0, sizeof(exp_index1)); + memset(&exp_index2, 0, sizeof(exp_index2)); + memset(&tmp_categorize_array, 0, sizeof(tmp_categorize_array)); bias=-32; @@ -633,8 +628,8 @@ static void mono_decode(COOKContext *q, COOKSubpacket *p, float* mlt_buffer) { int quant_index_table[102]; int category[128]; - memset(&category, 0, 128*sizeof(int)); - memset(&category_index, 0, 128*sizeof(int)); + memset(&category, 0, sizeof(category)); + memset(&category_index, 0, sizeof(category_index)); decode_envelope(q, p, quant_index_table); q->num_vectors = get_bits(&q->gb,p->log2_numvector_size); @@ -663,14 +658,12 @@ static void interpolate_float(COOKContext *q, float* buffer, for(i=0 ; i<q->gain_size_factor ; i++){ buffer[i]*=fc1; } - return; } else { //smooth gain fc2 = q->gain_table[11 + (gain_index_next-gain_index)]; for(i=0 ; i<q->gain_size_factor ; i++){ buffer[i]*=fc1; fc1*=fc2; } - return; } } @@ -733,7 +726,8 @@ static void imlt_gain(COOKContext *q, float *inbuffer, } /* Save away the current to be previous block. */ - memcpy(previous_buffer, buffer0, sizeof(float)*q->samples_per_channel); + memcpy(previous_buffer, buffer0, + q->samples_per_channel * sizeof(*previous_buffer)); } @@ -744,27 +738,24 @@ static void imlt_gain(COOKContext *q, float *inbuffer, * @param decouple_tab decoupling array * */ +static void decouple_info(COOKContext *q, COOKSubpacket *p, int *decouple_tab) +{ + int i; + int vlc = get_bits1(&q->gb); + int start = cplband[p->js_subband_start]; + int end = cplband[p->subbands-1]; + int length = end - start + 1; -static void decouple_info(COOKContext *q, COOKSubpacket *p, int* decouple_tab){ - int length, i; - - if(get_bits1(&q->gb)) { - if(cplband[p->js_subband_start] > cplband[p->subbands-1]) return; - - length = cplband[p->subbands-1] - cplband[p->js_subband_start] + 1; - for (i=0 ; i<length ; i++) { - decouple_tab[cplband[p->js_subband_start] + i] = get_vlc2(&q->gb, p->ccpl.table, p->ccpl.bits, 2); - } + if (start > end) return; - } - - if(cplband[p->js_subband_start] > cplband[p->subbands-1]) return; - length = cplband[p->subbands-1] - cplband[p->js_subband_start] + 1; - for (i=0 ; i<length ; i++) { - decouple_tab[cplband[p->js_subband_start] + i] = get_bits(&q->gb, p->js_vlc_bits); + if (vlc) { + for (i = 0; i < length; i++) + decouple_tab[start + i] = get_vlc2(&q->gb, p->ccpl.table, p->ccpl.bits, 2); + } else { + for (i = 0; i < length; i++) + decouple_tab[start + i] = get_bits(&q->gb, p->js_vlc_bits); } - return; } /* @@ -811,11 +802,11 @@ static void joint_decode(COOKContext *q, COOKSubpacket *p, float* mlt_buffer1, const float* cplscale; memset(decouple_tab, 0, sizeof(decouple_tab)); - memset(decode_buffer, 0, sizeof(decode_buffer)); + memset(decode_buffer, 0, sizeof(q->decode_buffer_0)); /* Make sure the buffers are zeroed out. */ - memset(mlt_buffer1,0, 1024*sizeof(float)); - memset(mlt_buffer2,0, 1024*sizeof(float)); + memset(mlt_buffer1, 0, 1024 * sizeof(*mlt_buffer1)); + memset(mlt_buffer2, 0, 1024 * sizeof(*mlt_buffer2)); decouple_info(q, p, decouple_tab); mono_decode(q, p, decode_buffer); @@ -867,22 +858,18 @@ decode_bytes_and_gain(COOKContext *q, COOKSubpacket *p, const uint8_t *inbuffer, } /** - * Saturate the output signal to signed 16bit integers. + * Saturate the output signal and interleave. * * @param q pointer to the COOKContext * @param chan channel to saturate * @param out pointer to the output vector */ -static void -saturate_output_float (COOKContext *q, int chan, int16_t *out) +static void saturate_output_float(COOKContext *q, int chan, float *out) { int j; float *output = q->mono_mdct_output + q->samples_per_channel; - /* Clip and convert floats to 16 bits. - */ for (j = 0; j < q->samples_per_channel; j++) { - out[chan + q->nb_channels * j] = - av_clip_int16(lrintf(output[j])); + out[chan + q->nb_channels * j] = av_clipf(output[j], -1.0, 1.0); } } @@ -902,7 +889,7 @@ saturate_output_float (COOKContext *q, int chan, int16_t *out) static inline void mlt_compensate_output(COOKContext *q, float *decode_buffer, cook_gains *gains_ptr, float *previous_buffer, - int16_t *out, int chan) + float *out, int chan) { imlt_gain(q, decode_buffer, gains_ptr, previous_buffer); q->saturate_output (q, chan, out); @@ -917,7 +904,9 @@ mlt_compensate_output(COOKContext *q, float *decode_buffer, * @param inbuffer pointer to the inbuffer * @param outbuffer pointer to the outbuffer */ -static void decode_subpacket(COOKContext *q, COOKSubpacket* p, const uint8_t *inbuffer, int16_t *outbuffer) { +static void decode_subpacket(COOKContext *q, COOKSubpacket *p, + const uint8_t *inbuffer, float *outbuffer) +{ int sub_packet_size = p->size; /* packet dump */ // for (i=0 ; i<sub_packet_size ; i++) { @@ -966,13 +955,20 @@ static int cook_decode_frame(AVCodecContext *avctx, const uint8_t *buf = avpkt->data; int buf_size = avpkt->size; COOKContext *q = avctx->priv_data; - int i; + int i, out_size; int offset = 0; int chidx = 0; if (buf_size < avctx->block_align) return buf_size; + out_size = q->nb_channels * q->samples_per_channel * + av_get_bytes_per_sample(avctx->sample_fmt); + if (*data_size < out_size) { + av_log(avctx, AV_LOG_ERROR, "Output buffer is too small\n"); + return AVERROR(EINVAL); + } + /* estimate subpacket sizes */ q->subpacket[0].size = avctx->block_align; @@ -981,22 +977,21 @@ static int cook_decode_frame(AVCodecContext *avctx, q->subpacket[0].size -= q->subpacket[i].size + 1; if (q->subpacket[0].size < 0) { av_log(avctx,AV_LOG_DEBUG,"frame subpacket size total > avctx->block_align!\n"); - return -1; + return AVERROR_INVALIDDATA; } } /* decode supbackets */ - *data_size = 0; for(i=0;i<q->num_subpackets;i++){ q->subpacket[i].bits_per_subpacket = (q->subpacket[i].size*8)>>q->subpacket[i].bits_per_subpdiv; q->subpacket[i].ch_idx = chidx; av_log(avctx,AV_LOG_DEBUG,"subpacket[%i] size %i js %i %i block_align %i\n",i,q->subpacket[i].size,q->subpacket[i].joint_stereo,offset,avctx->block_align); - decode_subpacket(q, &q->subpacket[i], buf + offset, (int16_t*)data); + decode_subpacket(q, &q->subpacket[i], buf + offset, data); offset += q->subpacket[i].size; chidx += q->subpacket[i].num_channels; av_log(avctx,AV_LOG_DEBUG,"subpacket[%i] %i %i\n",i,q->subpacket[i].size * 8,get_bits_count(&q->gb)); } - *data_size = sizeof(int16_t) * q->nb_channels * q->samples_per_channel; + *data_size = out_size; /* Discard the first two frames: no valid audio. */ if (avctx->frame_number < 2) *data_size = 0; @@ -1053,12 +1048,13 @@ static av_cold int cook_decode_init(AVCodecContext *avctx) int extradata_size = avctx->extradata_size; int s = 0; unsigned int channel_mask = 0; + int ret; q->avctx = avctx; /* Take care of the codec specific extradata. */ if (extradata_size <= 0) { av_log(avctx,AV_LOG_ERROR,"Necessary extradata missing!\n"); - return -1; + return AVERROR_INVALIDDATA; } av_log(avctx,AV_LOG_DEBUG,"codecdata_length=%d\n",avctx->extradata_size); @@ -1103,7 +1099,7 @@ static av_cold int cook_decode_init(AVCodecContext *avctx) case MONO: if (q->nb_channels != 1) { av_log_ask_for_sample(avctx, "Container channels != 1.\n"); - return -1; + return AVERROR_PATCHWELCOME; } av_log(avctx,AV_LOG_DEBUG,"MONO\n"); break; @@ -1117,7 +1113,7 @@ static av_cold int cook_decode_init(AVCodecContext *avctx) case JOINT_STEREO: if (q->nb_channels != 2) { av_log_ask_for_sample(avctx, "Container channels != 2.\n"); - return -1; + return AVERROR_PATCHWELCOME; } av_log(avctx,AV_LOG_DEBUG,"JOINT_STEREO\n"); if (avctx->extradata_size >= 16){ @@ -1155,12 +1151,12 @@ static av_cold int cook_decode_init(AVCodecContext *avctx) break; default: av_log_ask_for_sample(avctx, "Unknown Cook version.\n"); - return -1; + return AVERROR_PATCHWELCOME; } if(s > 1 && q->subpacket[s].samples_per_channel != q->samples_per_channel) { av_log(avctx,AV_LOG_ERROR,"different number of samples per channel!\n"); - return -1; + return AVERROR_INVALIDDATA; } else q->samples_per_channel = q->subpacket[0].samples_per_channel; @@ -1171,18 +1167,18 @@ static av_cold int cook_decode_init(AVCodecContext *avctx) /* Try to catch some obviously faulty streams, othervise it might be exploitable */ if (q->subpacket[s].total_subbands > 53) { av_log_ask_for_sample(avctx, "total_subbands > 53\n"); - return -1; + return AVERROR_PATCHWELCOME; } if ((q->subpacket[s].js_vlc_bits > 6) || (q->subpacket[s].js_vlc_bits < 2*q->subpacket[s].joint_stereo)) { av_log(avctx,AV_LOG_ERROR,"js_vlc_bits = %d, only >= %d and <= 6 allowed!\n", q->subpacket[s].js_vlc_bits, 2*q->subpacket[s].joint_stereo); - return -1; + return AVERROR_INVALIDDATA; } if (q->subpacket[s].subbands > 50) { av_log_ask_for_sample(avctx, "subbands > 50\n"); - return -1; + return AVERROR_PATCHWELCOME; } q->subpacket[s].gains1.now = q->subpacket[s].gain_1; q->subpacket[s].gains1.previous = q->subpacket[s].gain_2; @@ -1193,7 +1189,7 @@ static av_cold int cook_decode_init(AVCodecContext *avctx) s++; if (s > MAX_SUBPACKETS) { av_log_ask_for_sample(avctx, "Too many subpackets > 5\n"); - return -1; + return AVERROR_PATCHWELCOME; } } /* Generate tables */ @@ -1201,12 +1197,12 @@ static av_cold int cook_decode_init(AVCodecContext *avctx) init_gain_table(q); init_cplscales_table(q); - if (init_cook_vlc_tables(q) != 0) - return -1; + if ((ret = init_cook_vlc_tables(q))) + return ret; if(avctx->block_align >= UINT_MAX/2) - return -1; + return AVERROR(EINVAL); /* Pad the databuffer with: DECODE_BYTES_PAD1 or DECODE_BYTES_PAD2 for decode_bytes(), @@ -1216,11 +1212,11 @@ static av_cold int cook_decode_init(AVCodecContext *avctx) + DECODE_BYTES_PAD1(avctx->block_align) + FF_INPUT_BUFFER_PADDING_SIZE); if (q->decoded_bytes_buffer == NULL) - return -1; + return AVERROR(ENOMEM); /* Initialize transform. */ - if ( init_cook_mlt(q) != 0 ) - return -1; + if ((ret = init_cook_mlt(q))) + return ret; /* Initialize COOK signal arithmetic handling */ if (1) { @@ -1237,10 +1233,10 @@ static av_cold int cook_decode_init(AVCodecContext *avctx) av_log_ask_for_sample(avctx, "unknown amount of samples_per_channel = %d\n", q->samples_per_channel); - return -1; + return AVERROR_PATCHWELCOME; } - avctx->sample_fmt = AV_SAMPLE_FMT_S16; + avctx->sample_fmt = AV_SAMPLE_FMT_FLT; if (channel_mask) avctx->channel_layout = channel_mask; else diff --git a/libavcodec/dca.c b/libavcodec/dca.c index 5cf5b2629a..37977e5c55 100644 --- a/libavcodec/dca.c +++ b/libavcodec/dca.c @@ -528,15 +528,15 @@ static int dca_parse_frame_header(DCAContext * s) s->sample_blocks = get_bits(&s->gb, 7) + 1; s->frame_size = get_bits(&s->gb, 14) + 1; if (s->frame_size < 95) - return -1; + return AVERROR_INVALIDDATA; s->amode = get_bits(&s->gb, 6); s->sample_rate = dca_sample_rates[get_bits(&s->gb, 4)]; if (!s->sample_rate) - return -1; + return AVERROR_INVALIDDATA; s->bit_rate_index = get_bits(&s->gb, 5); s->bit_rate = dca_bit_rates[s->bit_rate_index]; if (!s->bit_rate) - return -1; + return AVERROR_INVALIDDATA; s->downmix = get_bits(&s->gb, 1); s->dynrange = get_bits(&s->gb, 1); @@ -626,7 +626,7 @@ static int dca_subframe_header(DCAContext * s, int base_channel, int block_index int j, k; if (get_bits_left(&s->gb) < 0) - return -1; + return AVERROR_INVALIDDATA; if (!base_channel) { s->subsubframes[s->current_subframe] = get_bits(&s->gb, 2) + 1; @@ -658,7 +658,7 @@ static int dca_subframe_header(DCAContext * s, int base_channel, int block_index else if (s->bitalloc_huffman[j] == 7) { av_log(s->avctx, AV_LOG_ERROR, "Invalid bit allocation index\n"); - return -1; + return AVERROR_INVALIDDATA; } else { s->bitalloc[j][k] = get_bitalloc(&s->gb, &dca_bitalloc_index, s->bitalloc_huffman[j]); @@ -667,7 +667,7 @@ static int dca_subframe_header(DCAContext * s, int base_channel, int block_index if (s->bitalloc[j][k] > 26) { // av_log(s->avctx,AV_LOG_DEBUG,"bitalloc index [%i][%i] too big (%i)\n", // j, k, s->bitalloc[j][k]); - return -1; + return AVERROR_INVALIDDATA; } } } @@ -685,7 +685,7 @@ static int dca_subframe_header(DCAContext * s, int base_channel, int block_index } if (get_bits_left(&s->gb) < 0) - return -1; + return AVERROR_INVALIDDATA; for (j = base_channel; j < s->prim_channels; j++) { const uint32_t *scale_table; @@ -723,7 +723,7 @@ static int dca_subframe_header(DCAContext * s, int base_channel, int block_index } if (get_bits_left(&s->gb) < 0) - return -1; + return AVERROR_INVALIDDATA; /* Scale factors for joint subband coding */ for (j = base_channel; j < s->prim_channels; j++) { @@ -1055,7 +1055,7 @@ static int decode_blockcode(int code, int levels, int *values) return 0; else { av_log(NULL, AV_LOG_ERROR, "ERROR: block code look-up failed\n"); - return -1; + return AVERROR_INVALIDDATA; } } #endif @@ -1096,7 +1096,7 @@ static int dca_subsubframe(DCAContext * s, int base_channel, int block_index) for (k = base_channel; k < s->prim_channels; k++) { if (get_bits_left(&s->gb) < 0) - return -1; + return AVERROR_INVALIDDATA; for (l = 0; l < s->vq_start_subband[k]; l++) { int m; @@ -1275,12 +1275,13 @@ static int dca_subframe_footer(DCAContext * s, int base_channel) static int dca_decode_block(DCAContext * s, int base_channel, int block_index) { + int ret; /* Sanity check */ if (s->current_subframe >= s->subframes) { av_log(s->avctx, AV_LOG_DEBUG, "check failed: %i>%i", s->current_subframe, s->subframes); - return -1; + return AVERROR_INVALIDDATA; } if (!s->current_subsubframe) { @@ -1288,16 +1289,16 @@ static int dca_decode_block(DCAContext * s, int base_channel, int block_index) av_log(s->avctx, AV_LOG_DEBUG, "DSYNC dca_subframe_header\n"); #endif /* Read subframe header */ - if (dca_subframe_header(s, base_channel, block_index)) - return -1; + if ((ret = dca_subframe_header(s, base_channel, block_index))) + return ret; } /* Read subsubframe */ #ifdef TRACE av_log(s->avctx, AV_LOG_DEBUG, "DSYNC dca_subsubframe\n"); #endif - if (dca_subsubframe(s, base_channel, block_index)) - return -1; + if ((ret = dca_subsubframe(s, base_channel, block_index))) + return ret; /* Update state */ s->current_subsubframe++; @@ -1310,8 +1311,8 @@ static int dca_decode_block(DCAContext * s, int base_channel, int block_index) av_log(s->avctx, AV_LOG_DEBUG, "DSYNC dca_subframe_footer\n"); #endif /* Read subframe footer */ - if (dca_subframe_footer(s, base_channel)) - return -1; + if ((ret = dca_subframe_footer(s, base_channel))) + return ret; } return 0; @@ -1354,7 +1355,7 @@ static int dca_convert_bitstream(const uint8_t * src, int src_size, uint8_t * ds flush_put_bits(&pb); return (put_bits_count(&pb) + 7) >> 3; default: - return -1; + return AVERROR_INVALIDDATA; } } @@ -1637,7 +1638,7 @@ static int dca_decode_frame(AVCodecContext * avctx, int lfe_samples; int num_core_channels = 0; - int i; + int i, ret; float *samples_flt = data; int16_t *samples_s16 = data; int out_size; @@ -1650,16 +1651,15 @@ static int dca_decode_frame(AVCodecContext * avctx, s->dca_buffer_size = dca_convert_bitstream(buf, buf_size, s->dca_buffer, DCA_MAX_FRAME_SIZE + DCA_MAX_EXSS_HEADER_SIZE); - if (s->dca_buffer_size == -1) { + if (s->dca_buffer_size == AVERROR_INVALIDDATA) { av_log(avctx, AV_LOG_ERROR, "Not a valid DCA frame\n"); - return -1; + return AVERROR_INVALIDDATA; } init_get_bits(&s->gb, s->dca_buffer, s->dca_buffer_size * 8); - if (dca_parse_frame_header(s) < 0) { + if ((ret = dca_parse_frame_header(s)) < 0) { //seems like the frame is corrupt, try with the next one - *data_size=0; - return buf_size; + return ret; } //set AVCodec values with parsed data avctx->sample_rate = s->sample_rate; @@ -1669,7 +1669,10 @@ static int dca_decode_frame(AVCodecContext * avctx, s->profile = FF_PROFILE_DTS; for (i = 0; i < (s->sample_blocks / 8); i++) { - dca_decode_block(s, 0, i); + if ((ret = dca_decode_block(s, 0, i))) { + av_log(avctx, AV_LOG_ERROR, "error decoding block\n"); + return ret; + } } /* record number of core channels incase less than max channels are requested */ @@ -1725,7 +1728,10 @@ static int dca_decode_frame(AVCodecContext * avctx, dca_parse_audio_coding_header(s, s->xch_base_channel); for (i = 0; i < (s->sample_blocks / 8); i++) { - dca_decode_block(s, s->xch_base_channel, i); + if ((ret = dca_decode_block(s, s->xch_base_channel, i))) { + av_log(avctx, AV_LOG_ERROR, "error decoding XCh extension\n"); + continue; + } } s->xch_present = 1; @@ -1799,7 +1805,7 @@ static int dca_decode_frame(AVCodecContext * avctx, if (channels > !!s->lfe && s->channel_order_tab[channels - 1 - !!s->lfe] < 0) - return -1; + return AVERROR_INVALIDDATA; if (avctx->request_channels == 2 && s->prim_channels > 2) { channels = 2; @@ -1812,7 +1818,7 @@ static int dca_decode_frame(AVCodecContext * avctx, } } else { av_log(avctx, AV_LOG_ERROR, "Non standard configuration %d !\n",s->amode); - return -1; + return AVERROR_INVALIDDATA; } if (avctx->channels != channels) { @@ -1824,7 +1830,7 @@ static int dca_decode_frame(AVCodecContext * avctx, out_size = 256 / 8 * s->sample_blocks * channels * av_get_bytes_per_sample(avctx->sample_fmt); if (*data_size < out_size) - return -1; + return AVERROR(EINVAL); *data_size = out_size; /* filter to get final output */ diff --git a/libavcodec/dsicinav.c b/libavcodec/dsicinav.c index 05d9e4cc14..53d4f90d2e 100644 --- a/libavcodec/dsicinav.c +++ b/libavcodec/dsicinav.c @@ -26,6 +26,7 @@ #include "avcodec.h" #include "bytestream.h" +#include "mathops.h" typedef enum CinVideoBitmapIndex { @@ -43,7 +44,6 @@ typedef struct CinVideoContext { } CinVideoContext; typedef struct CinAudioContext { - AVCodecContext *avctx; int initial_decode_frame; int delta; } CinAudioContext; @@ -309,7 +309,11 @@ static av_cold int cinaudio_decode_init(AVCodecContext *avctx) { CinAudioContext *cin = avctx->priv_data; - cin->avctx = avctx; + if (avctx->channels != 1) { + av_log_ask_for_sample(avctx, "Number of channels is not supported\n"); + return AVERROR_PATCHWELCOME; + } + cin->initial_decode_frame = 1; cin->delta = 0; avctx->sample_fmt = AV_SAMPLE_FMT_S16; @@ -322,29 +326,35 @@ static int cinaudio_decode_frame(AVCodecContext *avctx, AVPacket *avpkt) { const uint8_t *buf = avpkt->data; - int buf_size = avpkt->size; CinAudioContext *cin = avctx->priv_data; - const uint8_t *src = buf; - int16_t *samples = (int16_t *)data; - - buf_size = FFMIN(buf_size, *data_size/2); + const uint8_t *buf_end = buf + avpkt->size; + int16_t *samples = data; + int delta, out_size; + + out_size = (avpkt->size - cin->initial_decode_frame) * + av_get_bytes_per_sample(avctx->sample_fmt); + if (*data_size < out_size) { + av_log(avctx, AV_LOG_ERROR, "Output buffer is too small\n"); + return AVERROR(EINVAL); + } + delta = cin->delta; if (cin->initial_decode_frame) { cin->initial_decode_frame = 0; - cin->delta = (int16_t)AV_RL16(src); src += 2; - *samples++ = cin->delta; - buf_size -= 2; + delta = sign_extend(AV_RL16(buf), 16); + buf += 2; + *samples++ = delta; } - while (buf_size > 0) { - cin->delta += cinaudio_delta16_table[*src++]; - cin->delta = av_clip_int16(cin->delta); - *samples++ = cin->delta; - --buf_size; + while (buf < buf_end) { + delta += cinaudio_delta16_table[*buf++]; + delta = av_clip_int16(delta); + *samples++ = delta; } + cin->delta = delta; - *data_size = (uint8_t *)samples - (uint8_t *)data; + *data_size = out_size; - return src - buf; + return avpkt->size; } diff --git a/libavcodec/flacdec.c b/libavcodec/flacdec.c index cebe3e3e62..c140440436 100644 --- a/libavcodec/flacdec.c +++ b/libavcodec/flacdec.c @@ -587,7 +587,8 @@ static int flac_decode_frame(AVCodecContext *avctx, bytes_read = (get_bits_count(&s->gb)+7)/8; /* check if allocated data size is large enough for output */ - output_size = s->blocksize * s->channels * (s->is32 ? 4 : 2); + output_size = s->blocksize * s->channels * + av_get_bytes_per_sample(avctx->sample_fmt); if (output_size > alloc_data_size) { av_log(s->avctx, AV_LOG_ERROR, "output data size is larger than " "allocated data size\n"); diff --git a/libavcodec/h264.c b/libavcodec/h264.c index 7792d03ce7..3302f71993 100644 --- a/libavcodec/h264.c +++ b/libavcodec/h264.c @@ -1815,7 +1815,7 @@ static av_always_inline void hl_decode_mb_predict_luma(H264Context *h, int mb_ty static const uint8_t dc_mapping[16] = { 0*16, 1*16, 4*16, 5*16, 2*16, 3*16, 6*16, 7*16, 8*16, 9*16,12*16,13*16,10*16,11*16,14*16,15*16}; for(i = 0; i < 16; i++) - dctcoef_set(h->mb+p*256, pixel_shift, dc_mapping[i], dctcoef_get(h->mb_luma_dc[p], pixel_shift, i)); + dctcoef_set(h->mb+(p*256 << pixel_shift), pixel_shift, dc_mapping[i], dctcoef_get(h->mb_luma_dc[p], pixel_shift, i)); } } }else @@ -2033,7 +2033,7 @@ static av_always_inline void hl_decode_mb_internal(H264Context *h, int simple, i } if (chroma422) { for(i=j*16+4; i<j*16+8; i++){ - if(h->non_zero_count_cache[ scan8[i] ] || dctcoef_get(h->mb, pixel_shift, i*16)) + if(h->non_zero_count_cache[ scan8[i+4] ] || dctcoef_get(h->mb, pixel_shift, i*16)) idct_add (dest[j-1] + block_offset[i+4], h->mb + (i*16 << pixel_shift), uvlinesize); } } diff --git a/libavcodec/utvideo.c b/libavcodec/utvideo.c index aac3969b15..4c3b2a1621 100644 --- a/libavcodec/utvideo.c +++ b/libavcodec/utvideo.c @@ -66,7 +66,7 @@ static int huff_cmp(const void *a, const void *b) return (aa->len - bb->len)*256 + aa->sym - bb->sym; } -static int build_huff(const uint8_t *src, VLC *vlc) +static int build_huff(const uint8_t *src, VLC *vlc, int *fsym) { int i; HuffEntry he[256]; @@ -76,13 +76,18 @@ static int build_huff(const uint8_t *src, VLC *vlc) uint8_t syms[256]; uint32_t code; + *fsym = -1; for (i = 0; i < 256; i++) { he[i].sym = i; he[i].len = *src++; } qsort(he, 256, sizeof(*he), huff_cmp); - if (!he[0].len || he[0].len > 32) + if (!he[0].len) { + *fsym = he[0].sym; + return 0; + } + if (he[0].len > 32) return -1; last = 255; @@ -112,12 +117,37 @@ static int decode_plane(UtvideoContext *c, int plane_no, int sstart, send; VLC vlc; GetBitContext gb; - int prev; + int prev, fsym; + const int cmask = ~(!plane_no && c->avctx->pix_fmt == PIX_FMT_YUV420P); - if (build_huff(src, &vlc)) { + if (build_huff(src, &vlc, &fsym)) { av_log(c->avctx, AV_LOG_ERROR, "Cannot build Huffman codes\n"); return AVERROR_INVALIDDATA; } + if (fsym >= 0) { // build_huff reported a symbol to fill slices with + send = 0; + for (slice = 0; slice < c->slices; slice++) { + uint8_t *dest; + + sstart = send; + send = (height * (slice + 1) / c->slices) & cmask; + dest = dst + sstart * stride; + + prev = 0x80; + for (j = sstart; j < send; j++) { + for (i = 0; i < width * step; i += step) { + pix = fsym; + if (use_pred) { + prev += pix; + pix = prev; + } + dest[i] = pix; + } + dest += stride; + } + } + return 0; + } src += 256; src_size -= 256; @@ -128,7 +158,7 @@ static int decode_plane(UtvideoContext *c, int plane_no, int slice_data_start, slice_data_end, slice_size; sstart = send; - send = height * (slice + 1) / c->slices; + send = (height * (slice + 1) / c->slices) & cmask; dest = dst + sstart * stride; // slice offset and size validation was done earlier @@ -204,16 +234,17 @@ static void restore_rgb_planes(uint8_t *src, int step, int stride, int width, in } static void restore_median(uint8_t *src, int step, int stride, - int width, int height, int slices) + int width, int height, int slices, int rmode) { int i, j, slice; int A, B, C; uint8_t *bsrc; int slice_start, slice_height; + const int cmask = ~rmode; for (slice = 0; slice < slices; slice++) { - slice_start = (slice * height) / slices; - slice_height = ((slice + 1) * height) / slices - slice_start; + slice_start = ((slice * height) / slices) & cmask; + slice_height = ((((slice + 1) * height) / slices) & cmask) - slice_start; bsrc = src + slice_start * stride; @@ -337,7 +368,7 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPac if (c->frame_pred == PRED_MEDIAN) restore_median(c->pic.data[0] + rgb_order[i], c->planes, c->pic.linesize[0], avctx->width, avctx->height, - c->slices); + c->slices, 0); } restore_rgb_planes(c->pic.data[0], c->planes, c->pic.linesize[0], avctx->width, avctx->height); @@ -353,7 +384,7 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPac if (c->frame_pred == PRED_MEDIAN) restore_median(c->pic.data[i], 1, c->pic.linesize[i], avctx->width >> !!i, avctx->height >> !!i, - c->slices); + c->slices, !i); } break; case PIX_FMT_YUV422P: @@ -366,7 +397,7 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPac return ret; if (c->frame_pred == PRED_MEDIAN) restore_median(c->pic.data[i], 1, c->pic.linesize[i], - avctx->width >> !!i, avctx->height, c->slices); + avctx->width >> !!i, avctx->height, c->slices, 0); } break; } diff --git a/libavcodec/vp3.c b/libavcodec/vp3.c index 77a3151b45..1f8841acb7 100644 --- a/libavcodec/vp3.c +++ b/libavcodec/vp3.c @@ -45,6 +45,7 @@ #define FRAGMENT_PIXELS 8 static av_cold int vp3_decode_end(AVCodecContext *avctx); +static void vp3_decode_flush(AVCodecContext *avctx); //FIXME split things out into their own arrays typedef struct Vp3Fragment { @@ -890,7 +891,7 @@ static int unpack_vlcs(Vp3DecodeContext *s, GetBitContext *gb, /* decode a VLC into a token */ token = get_vlc2(gb, vlc_table, 11, 3); /* use the token to get a zero run, a coefficient, and an eob run */ - if (token <= 6) { + if ((unsigned) token <= 6U) { eob_run = eob_run_base[token]; if (eob_run_get_bits[token]) eob_run += get_bits(gb, eob_run_get_bits[token]); @@ -908,7 +909,7 @@ static int unpack_vlcs(Vp3DecodeContext *s, GetBitContext *gb, coeff_i += eob_run; eob_run = 0; } - } else { + } else if (token >= 0) { bits_to_get = coeff_get_bits[token]; if (bits_to_get) bits_to_get = get_bits(gb, bits_to_get); @@ -942,6 +943,10 @@ static int unpack_vlcs(Vp3DecodeContext *s, GetBitContext *gb, for (i = coeff_index+1; i <= coeff_index+zero_run; i++) s->num_coded_frags[plane][i]--; coeff_i++; + } else { + av_log(s->avctx, AV_LOG_ERROR, + "Invalid token %d\n", token); + return -1; } } @@ -991,6 +996,8 @@ static int unpack_dct_coeffs(Vp3DecodeContext *s, GetBitContext *gb) /* unpack the Y plane DC coefficients */ residual_eob_run = unpack_vlcs(s, gb, &s->dc_vlc[dc_y_table], 0, 0, residual_eob_run); + if (residual_eob_run < 0) + return residual_eob_run; /* reverse prediction of the Y-plane DC coefficients */ reverse_dc_prediction(s, 0, s->fragment_width[0], s->fragment_height[0]); @@ -998,8 +1005,12 @@ static int unpack_dct_coeffs(Vp3DecodeContext *s, GetBitContext *gb) /* unpack the C plane DC coefficients */ residual_eob_run = unpack_vlcs(s, gb, &s->dc_vlc[dc_c_table], 0, 1, residual_eob_run); + if (residual_eob_run < 0) + return residual_eob_run; residual_eob_run = unpack_vlcs(s, gb, &s->dc_vlc[dc_c_table], 0, 2, residual_eob_run); + if (residual_eob_run < 0) + return residual_eob_run; /* reverse prediction of the C-plane DC coefficients */ if (!(s->avctx->flags & CODEC_FLAG_GRAY)) @@ -1036,11 +1047,17 @@ static int unpack_dct_coeffs(Vp3DecodeContext *s, GetBitContext *gb) for (i = 1; i <= 63; i++) { residual_eob_run = unpack_vlcs(s, gb, y_tables[i], i, 0, residual_eob_run); + if (residual_eob_run < 0) + return residual_eob_run; residual_eob_run = unpack_vlcs(s, gb, c_tables[i], i, 1, residual_eob_run); + if (residual_eob_run < 0) + return residual_eob_run; residual_eob_run = unpack_vlcs(s, gb, c_tables[i], i, 2, residual_eob_run); + if (residual_eob_run < 0) + return residual_eob_run; } return 0; @@ -1777,10 +1794,15 @@ static int vp3_update_thread_context(AVCodecContext *dst, const AVCodecContext * Vp3DecodeContext *s = dst->priv_data, *s1 = src->priv_data; int qps_changed = 0, i, err; +#define copy_fields(to, from, start_field, end_field) memcpy(&to->start_field, &from->start_field, (char*)&to->end_field - (char*)&to->start_field) + if (!s1->current_frame.data[0] ||s->width != s1->width - ||s->height!= s1->height) + ||s->height!= s1->height) { + if (s != s1) + copy_fields(s, s1, golden_frame, current_frame); return -1; + } if (s != s1) { // init tables if the first frame hasn't been decoded @@ -1796,8 +1818,6 @@ static int vp3_update_thread_context(AVCodecContext *dst, const AVCodecContext * memcpy(s->motion_val[1], s1->motion_val[1], c_fragment_count * sizeof(*s->motion_val[1])); } -#define copy_fields(to, from, start_field, end_field) memcpy(&to->start_field, &from->start_field, (char*)&to->end_field - (char*)&to->start_field) - // copy previous frame data copy_fields(s, s1, golden_frame, dsp); @@ -1990,9 +2010,6 @@ static av_cold int vp3_decode_end(AVCodecContext *avctx) Vp3DecodeContext *s = avctx->priv_data; int i; - if (avctx->is_copy && !s->current_frame.data[0]) - return 0; - av_free(s->superblock_coding); av_free(s->all_fragments); av_free(s->coded_fragment_list[0]); @@ -2339,6 +2356,23 @@ static void vp3_decode_flush(AVCodecContext *avctx) ff_thread_release_buffer(avctx, &s->current_frame); } +static int vp3_init_thread_copy(AVCodecContext *avctx) +{ + Vp3DecodeContext *s = avctx->priv_data; + + s->superblock_coding = NULL; + s->all_fragments = NULL; + s->coded_fragment_list[0] = NULL; + s->dct_tokens_base = NULL; + s->superblock_fragments = NULL; + s->macroblock_coding = NULL; + s->motion_val[0] = NULL; + s->motion_val[1] = NULL; + s->edge_emu_buffer = NULL; + + return 0; +} + AVCodec ff_theora_decoder = { .name = "theora", .type = AVMEDIA_TYPE_VIDEO, @@ -2350,6 +2384,7 @@ AVCodec ff_theora_decoder = { .capabilities = CODEC_CAP_DR1 | CODEC_CAP_DRAW_HORIZ_BAND | CODEC_CAP_FRAME_THREADS, .flush = vp3_decode_flush, .long_name = NULL_IF_CONFIG_SMALL("Theora"), + .init_thread_copy = ONLY_IF_THREADS_ENABLED(vp3_init_thread_copy), .update_thread_context = ONLY_IF_THREADS_ENABLED(vp3_update_thread_context) }; #endif @@ -2365,5 +2400,6 @@ AVCodec ff_vp3_decoder = { .capabilities = CODEC_CAP_DR1 | CODEC_CAP_DRAW_HORIZ_BAND | CODEC_CAP_FRAME_THREADS, .flush = vp3_decode_flush, .long_name = NULL_IF_CONFIG_SMALL("On2 VP3"), + .init_thread_copy = ONLY_IF_THREADS_ENABLED(vp3_init_thread_copy), .update_thread_context = ONLY_IF_THREADS_ENABLED(vp3_update_thread_context) }; diff --git a/libavcodec/vp8.c b/libavcodec/vp8.c index 9b07608078..37bdcf7525 100644 --- a/libavcodec/vp8.c +++ b/libavcodec/vp8.c @@ -50,7 +50,8 @@ static int vp8_alloc_frame(VP8Context *s, AVFrame *f) int ret; if ((ret = ff_thread_get_buffer(s->avctx, f)) < 0) return ret; - if (!s->maps_are_invalid && s->num_maps_to_be_freed) { + if (s->num_maps_to_be_freed) { + assert(!s->maps_are_invalid); f->ref_index[0] = s->segmentation_maps[--s->num_maps_to_be_freed]; } else if (!(f->ref_index[0] = av_mallocz(s->mb_width * s->mb_height))) { ff_thread_release_buffer(s->avctx, f); @@ -59,39 +60,50 @@ static int vp8_alloc_frame(VP8Context *s, AVFrame *f) return 0; } -static void vp8_release_frame(VP8Context *s, AVFrame *f, int is_close) +static void vp8_release_frame(VP8Context *s, AVFrame *f, int prefer_delayed_free, int can_direct_free) { - if (!is_close) { - if (f->ref_index[0]) { - assert(s->num_maps_to_be_freed < FF_ARRAY_ELEMS(s->segmentation_maps)); - s->segmentation_maps[s->num_maps_to_be_freed++] = f->ref_index[0]; + if (f->ref_index[0]) { + if (prefer_delayed_free) { + /* Upon a size change, we want to free the maps but other threads may still + * be using them, so queue them. Upon a seek, all threads are inactive so + * we want to cache one to prevent re-allocation in the next decoding + * iteration, but the rest we can free directly. */ + int max_queued_maps = can_direct_free ? 1 : FF_ARRAY_ELEMS(s->segmentation_maps); + if (s->num_maps_to_be_freed < max_queued_maps) { + s->segmentation_maps[s->num_maps_to_be_freed++] = f->ref_index[0]; + } else if (can_direct_free) /* vp8_decode_flush(), but our queue is full */ { + av_free(f->ref_index[0]); + } /* else: MEMLEAK (should never happen, but better that than crash) */ f->ref_index[0] = NULL; + } else /* vp8_decode_free() */ { + av_free(f->ref_index[0]); } - } else { - av_freep(&f->ref_index[0]); } ff_thread_release_buffer(s->avctx, f); } -static void vp8_decode_flush_impl(AVCodecContext *avctx, int force, int is_close) +static void vp8_decode_flush_impl(AVCodecContext *avctx, + int prefer_delayed_free, int can_direct_free, int free_mem) { VP8Context *s = avctx->priv_data; int i; - if (!avctx->is_copy || force) { + if (!avctx->is_copy) { for (i = 0; i < 5; i++) if (s->frames[i].data[0]) - vp8_release_frame(s, &s->frames[i], is_close); + vp8_release_frame(s, &s->frames[i], prefer_delayed_free, can_direct_free); } memset(s->framep, 0, sizeof(s->framep)); - free_buffers(s); - s->maps_are_invalid = 1; + if (free_mem) { + free_buffers(s); + s->maps_are_invalid = 1; + } } static void vp8_decode_flush(AVCodecContext *avctx) { - vp8_decode_flush_impl(avctx, 0, 0); + vp8_decode_flush_impl(avctx, 1, 1, 0); } static int update_dimensions(VP8Context *s, int width, int height) @@ -101,7 +113,7 @@ static int update_dimensions(VP8Context *s, int width, int height) if (av_image_check_size(width, height, 0, s->avctx)) return AVERROR_INVALIDDATA; - vp8_decode_flush_impl(s->avctx, 1, 0); + vp8_decode_flush_impl(s->avctx, 1, 0, 1); avcodec_set_dimensions(s->avctx, width, height); } @@ -1581,7 +1593,7 @@ static int vp8_decode_frame(AVCodecContext *avctx, void *data, int *data_size, &s->frames[i] != s->framep[VP56_FRAME_PREVIOUS] && &s->frames[i] != s->framep[VP56_FRAME_GOLDEN] && &s->frames[i] != s->framep[VP56_FRAME_GOLDEN2]) - vp8_release_frame(s, &s->frames[i], 0); + vp8_release_frame(s, &s->frames[i], 1, 0); // find a free buffer for (i = 0; i < 5; i++) @@ -1597,7 +1609,7 @@ static int vp8_decode_frame(AVCodecContext *avctx, void *data, int *data_size, abort(); } if (curframe->data[0]) - ff_thread_release_buffer(avctx, curframe); + vp8_release_frame(s, curframe, 1, 0); curframe->key_frame = s->keyframe; curframe->pict_type = s->keyframe ? AV_PICTURE_TYPE_I : AV_PICTURE_TYPE_P; @@ -1778,7 +1790,7 @@ static av_cold int vp8_decode_init(AVCodecContext *avctx) static av_cold int vp8_decode_free(AVCodecContext *avctx) { - vp8_decode_flush_impl(avctx, 0, 1); + vp8_decode_flush_impl(avctx, 0, 1, 1); release_queued_segmaps(avctx->priv_data, 1); return 0; } |