diff options
author | Michael Niedermayer <michaelni@gmx.at> | 2011-12-05 00:11:57 +0100 |
---|---|---|
committer | Michael Niedermayer <michaelni@gmx.at> | 2011-12-05 00:11:57 +0100 |
commit | 707138593af5c4783035d0b9cc2d7c8cb2137dfa (patch) | |
tree | 7ead2e3c73fd33764dede26546b0238bb40d484b | |
parent | 2f8b6e909dd733d9b722a5266ca516a9a5ba67e9 (diff) | |
parent | dc6d0430503ecd7ed0d81276f977b26b4c4bd916 (diff) | |
download | ffmpeg-707138593af5c4783035d0b9cc2d7c8cb2137dfa.tar.gz |
Merge remote-tracking branch 'qatar/master'
* qatar/master:
adpcmenc: cosmetics: pretty-printing
ac3dec: cosmetics: pretty-printing
yuv4mpeg: cosmetics: pretty-printing
shorten: remove dead initialization
roqvideodec: set AVFrame reference before reget_buffer.
bmp: fix some 1bit samples.
latmdec: add fate test for audio config change
oma: PCM support
oma: better format detection with small probe buffer
oma: clearify ambiguous if condition
wavpack: Properly clip samples during lossy decode
Code clean-up for crc.c, lfg.c, log.c, random_see.d, rational.c and tree.c.
Cleaned pixdesc.c file in libavutil
zmbv.c: coding style clean-up.
xan.c: coding style clean-up.
mpegvideo.c: code cleanup - first 500 lines.
Conflicts:
Changelog
libavcodec/adpcmenc.c
libavcodec/bmp.c
libavcodec/zmbv.c
libavutil/log.c
libavutil/pixdesc.c
Merged-by: Michael Niedermayer <michaelni@gmx.at>
-rw-r--r-- | Changelog | 1 | ||||
-rw-r--r-- | libavcodec/ac3dec.c | 509 | ||||
-rw-r--r-- | libavcodec/adpcmenc.c | 488 | ||||
-rw-r--r-- | libavcodec/bmp.c | 4 | ||||
-rw-r--r-- | libavcodec/mpegvideo.c | 333 | ||||
-rw-r--r-- | libavcodec/shorten.c | 1 | ||||
-rw-r--r-- | libavcodec/wavpack.c | 11 | ||||
-rw-r--r-- | libavcodec/xan.c | 53 | ||||
-rw-r--r-- | libavcodec/zmbv.c | 165 | ||||
-rw-r--r-- | libavformat/oma.c | 37 | ||||
-rw-r--r-- | libavformat/yuv4mpeg.c | 248 | ||||
-rw-r--r-- | libavutil/crc.c | 73 | ||||
-rw-r--r-- | libavutil/lfg.c | 34 | ||||
-rw-r--r-- | libavutil/log.c | 81 | ||||
-rw-r--r-- | libavutil/pixdesc.c | 967 | ||||
-rw-r--r-- | libavutil/random_seed.c | 28 | ||||
-rw-r--r-- | libavutil/rational.c | 110 | ||||
-rw-r--r-- | libavutil/tree.c | 196 | ||||
-rw-r--r-- | tests/fate/aac.mak | 4 |
19 files changed, 1784 insertions, 1559 deletions
@@ -129,6 +129,7 @@ easier to use. The changes are: - Playstation Portable PMP format demuxer - Microsoft Windows ICO demuxer - life source +- PCM format support in OMA demuxer version 0.8: diff --git a/libavcodec/ac3dec.c b/libavcodec/ac3dec.c index c650881430..5e8b36404b 100644 --- a/libavcodec/ac3dec.c +++ b/libavcodec/ac3dec.c @@ -44,7 +44,6 @@ */ static uint8_t ungroup_3_in_7_bits_tab[128][3]; - /** tables for ungrouping mantissas */ static int b1_mantissas[32][3]; static int b2_mantissas[128][3]; @@ -124,7 +123,7 @@ static av_cold void ac3_tables_init(void) /* generate table for ungrouping 3 values in 7 bits reference: Section 7.1.3 Exponent Decoding */ - for(i=0; i<128; i++) { + for (i = 0; i < 128; i++) { ungroup_3_in_7_bits_tab[i][0] = i / 25; ungroup_3_in_7_bits_tab[i][1] = (i % 25) / 5; ungroup_3_in_7_bits_tab[i][2] = (i % 25) % 5; @@ -132,13 +131,13 @@ static av_cold void ac3_tables_init(void) /* generate grouped mantissa tables reference: Section 7.3.5 Ungrouping of Mantissas */ - for(i=0; i<32; i++) { + for (i = 0; i < 32; i++) { /* bap=1 mantissas */ b1_mantissas[i][0] = symmetric_dequant(ff_ac3_ungroup_3_in_5_bits_tab[i][0], 3); b1_mantissas[i][1] = symmetric_dequant(ff_ac3_ungroup_3_in_5_bits_tab[i][1], 3); b1_mantissas[i][2] = symmetric_dequant(ff_ac3_ungroup_3_in_5_bits_tab[i][2], 3); } - for(i=0; i<128; i++) { + for (i = 0; i < 128; i++) { /* bap=2 mantissas */ b2_mantissas[i][0] = symmetric_dequant(ungroup_3_in_7_bits_tab[i][0], 5); b2_mantissas[i][1] = symmetric_dequant(ungroup_3_in_7_bits_tab[i][1], 5); @@ -150,24 +149,23 @@ static av_cold void ac3_tables_init(void) } /* generate ungrouped mantissa tables reference: Tables 7.21 and 7.23 */ - for(i=0; i<7; i++) { + for (i = 0; i < 7; i++) { /* bap=3 mantissas */ b3_mantissas[i] = symmetric_dequant(i, 7); } - for(i=0; i<15; i++) { + for (i = 0; i < 15; i++) { /* bap=5 mantissas */ b5_mantissas[i] = symmetric_dequant(i, 15); } /* generate dynamic range table reference: Section 7.7.1 Dynamic Range Control */ - for(i=0; i<256; i++) { + for (i = 0; i < 256; i++) { int v = (i >> 5) - ((i >> 7) << 3) - 5; dynamic_range_tab[i] = powf(2.0f, v) * ((i & 0x1F) | 0x20); } } - /** * AVCodec initialization */ @@ -250,7 +248,7 @@ static int ac3_parse_header(AC3DecodeContext *s) i = get_bits(gbc, 6); do { skip_bits(gbc, 8); - } while(i--); + } while (i--); } return 0; @@ -265,7 +263,7 @@ static int parse_frame_header(AC3DecodeContext *s) int err; err = avpriv_ac3_parse_header(&s->gbc, &hdr); - if(err) + if (err) return err; /* get decoding parameters from header info */ @@ -287,9 +285,9 @@ static int parse_frame_header(AC3DecodeContext *s) s->frame_type = hdr.frame_type; s->substreamid = hdr.substreamid; - if(s->lfe_on) { - s->start_freq[s->lfe_ch] = 0; - s->end_freq[s->lfe_ch] = 7; + if (s->lfe_on) { + s->start_freq[s->lfe_ch] = 0; + s->end_freq[s->lfe_ch] = 7; s->num_exp_groups[s->lfe_ch] = 2; s->channel_in_cpl[s->lfe_ch] = 0; } @@ -326,38 +324,39 @@ static void set_downmix_coeffs(AC3DecodeContext *s) float smix = gain_levels[surround_levels[s->surround_mix_level]]; float norm0, norm1; - for(i=0; i<s->fbw_channels; i++) { + for (i = 0; i < s->fbw_channels; i++) { s->downmix_coeffs[i][0] = gain_levels[ac3_default_coeffs[s->channel_mode][i][0]]; s->downmix_coeffs[i][1] = gain_levels[ac3_default_coeffs[s->channel_mode][i][1]]; } - if(s->channel_mode > 1 && s->channel_mode & 1) { + if (s->channel_mode > 1 && s->channel_mode & 1) { s->downmix_coeffs[1][0] = s->downmix_coeffs[1][1] = cmix; } - if(s->channel_mode == AC3_CHMODE_2F1R || s->channel_mode == AC3_CHMODE_3F1R) { + if (s->channel_mode == AC3_CHMODE_2F1R || s->channel_mode == AC3_CHMODE_3F1R) { int nf = s->channel_mode - 2; s->downmix_coeffs[nf][0] = s->downmix_coeffs[nf][1] = smix * LEVEL_MINUS_3DB; } - if(s->channel_mode == AC3_CHMODE_2F2R || s->channel_mode == AC3_CHMODE_3F2R) { + if (s->channel_mode == AC3_CHMODE_2F2R || s->channel_mode == AC3_CHMODE_3F2R) { int nf = s->channel_mode - 4; s->downmix_coeffs[nf][0] = s->downmix_coeffs[nf+1][1] = smix; } /* renormalize */ norm0 = norm1 = 0.0; - for(i=0; i<s->fbw_channels; i++) { + for (i = 0; i < s->fbw_channels; i++) { norm0 += s->downmix_coeffs[i][0]; norm1 += s->downmix_coeffs[i][1]; } norm0 = 1.0f / norm0; norm1 = 1.0f / norm1; - for(i=0; i<s->fbw_channels; i++) { + for (i = 0; i < s->fbw_channels; i++) { s->downmix_coeffs[i][0] *= norm0; s->downmix_coeffs[i][1] *= norm1; } - if(s->output_mode == AC3_CHMODE_MONO) { - for(i=0; i<s->fbw_channels; i++) - s->downmix_coeffs[i][0] = (s->downmix_coeffs[i][0] + s->downmix_coeffs[i][1]) * LEVEL_MINUS_3DB; + if (s->output_mode == AC3_CHMODE_MONO) { + for (i = 0; i < s->fbw_channels; i++) + s->downmix_coeffs[i][0] = (s->downmix_coeffs[i][0] + + s->downmix_coeffs[i][1]) * LEVEL_MINUS_3DB; } } @@ -374,7 +373,7 @@ static int decode_exponents(GetBitContext *gbc, int exp_strategy, int ngrps, /* unpack groups */ group_size = exp_strategy + (exp_strategy == EXP_D45); - for(grp=0,i=0; grp<ngrps; grp++) { + for (grp = 0, i = 0; grp < ngrps; grp++) { expacc = get_bits(gbc, 7); dexp[i++] = ungroup_3_in_7_bits_tab[expacc][0]; dexp[i++] = ungroup_3_in_7_bits_tab[expacc][1]; @@ -383,15 +382,15 @@ static int decode_exponents(GetBitContext *gbc, int exp_strategy, int ngrps, /* convert to absolute exps and expand groups */ prevexp = absexp; - for(i=0,j=0; i<ngrps*3; i++) { + for (i = 0, j = 0; i < ngrps * 3; i++) { prevexp += dexp[i] - 2; if (prevexp > 24U) return -1; switch (group_size) { - case 4: dexps[j++] = prevexp; - dexps[j++] = prevexp; - case 2: dexps[j++] = prevexp; - case 1: dexps[j++] = prevexp; + case 4: dexps[j++] = prevexp; + dexps[j++] = prevexp; + case 2: dexps[j++] = prevexp; + case 1: dexps[j++] = prevexp; } } return 0; @@ -414,7 +413,8 @@ static void calc_transform_coeffs_cpl(AC3DecodeContext *s) if (s->channel_in_cpl[ch]) { int cpl_coord = s->cpl_coords[ch][band] << 5; for (bin = band_start; bin < band_end; bin++) { - s->fixed_coeffs[ch][bin] = MULH(s->fixed_coeffs[CPL_CH][bin] << 4, cpl_coord); + s->fixed_coeffs[ch][bin] = + MULH(s->fixed_coeffs[CPL_CH][bin] << 4, cpl_coord); } if (ch == 2 && s->phase_flags[band]) { for (bin = band_start; bin < band_end; bin++) @@ -445,73 +445,70 @@ typedef struct { static void ac3_decode_transform_coeffs_ch(AC3DecodeContext *s, int ch_index, mant_groups *m) { int start_freq = s->start_freq[ch_index]; - int end_freq = s->end_freq[ch_index]; - uint8_t *baps = s->bap[ch_index]; - int8_t *exps = s->dexps[ch_index]; - int *coeffs = s->fixed_coeffs[ch_index]; - int dither = (ch_index == CPL_CH) || s->dither_flag[ch_index]; + int end_freq = s->end_freq[ch_index]; + uint8_t *baps = s->bap[ch_index]; + int8_t *exps = s->dexps[ch_index]; + int *coeffs = s->fixed_coeffs[ch_index]; + int dither = (ch_index == CPL_CH) || s->dither_flag[ch_index]; GetBitContext *gbc = &s->gbc; int freq; - for(freq = start_freq; freq < end_freq; freq++){ + for (freq = start_freq; freq < end_freq; freq++) { int bap = baps[freq]; int mantissa; - switch(bap){ - case 0: - if (dither) - mantissa = (av_lfg_get(&s->dith_state) & 0x7FFFFF) - 0x400000; - else - mantissa = 0; - break; - case 1: - if(m->b1){ - m->b1--; - mantissa = m->b1_mant[m->b1]; - } - else{ - int bits = get_bits(gbc, 5); - mantissa = b1_mantissas[bits][0]; - m->b1_mant[1] = b1_mantissas[bits][1]; - m->b1_mant[0] = b1_mantissas[bits][2]; - m->b1 = 2; - } - break; - case 2: - if(m->b2){ - m->b2--; - mantissa = m->b2_mant[m->b2]; - } - else{ - int bits = get_bits(gbc, 7); - mantissa = b2_mantissas[bits][0]; - m->b2_mant[1] = b2_mantissas[bits][1]; - m->b2_mant[0] = b2_mantissas[bits][2]; - m->b2 = 2; - } - break; - case 3: - mantissa = b3_mantissas[get_bits(gbc, 3)]; - break; - case 4: - if(m->b4){ - m->b4 = 0; - mantissa = m->b4_mant; - } - else{ - int bits = get_bits(gbc, 7); - mantissa = b4_mantissas[bits][0]; - m->b4_mant = b4_mantissas[bits][1]; - m->b4 = 1; - } - break; - case 5: - mantissa = b5_mantissas[get_bits(gbc, 4)]; - break; - default: /* 6 to 15 */ - /* Shift mantissa and sign-extend it. */ - mantissa = get_sbits(gbc, quantization_tab[bap]); - mantissa <<= 24 - quantization_tab[bap]; - break; + switch (bap) { + case 0: + if (dither) + mantissa = (av_lfg_get(&s->dith_state) & 0x7FFFFF) - 0x400000; + else + mantissa = 0; + break; + case 1: + if (m->b1) { + m->b1--; + mantissa = m->b1_mant[m->b1]; + } else { + int bits = get_bits(gbc, 5); + mantissa = b1_mantissas[bits][0]; + m->b1_mant[1] = b1_mantissas[bits][1]; + m->b1_mant[0] = b1_mantissas[bits][2]; + m->b1 = 2; + } + break; + case 2: + if (m->b2) { + m->b2--; + mantissa = m->b2_mant[m->b2]; + } else { + int bits = get_bits(gbc, 7); + mantissa = b2_mantissas[bits][0]; + m->b2_mant[1] = b2_mantissas[bits][1]; + m->b2_mant[0] = b2_mantissas[bits][2]; + m->b2 = 2; + } + break; + case 3: + mantissa = b3_mantissas[get_bits(gbc, 3)]; + break; + case 4: + if (m->b4) { + m->b4 = 0; + mantissa = m->b4_mant; + } else { + int bits = get_bits(gbc, 7); + mantissa = b4_mantissas[bits][0]; + m->b4_mant = b4_mantissas[bits][1]; + m->b4 = 1; + } + break; + case 5: + mantissa = b5_mantissas[get_bits(gbc, 4)]; + break; + default: /* 6 to 15 */ + /* Shift mantissa and sign-extend it. */ + mantissa = get_sbits(gbc, quantization_tab[bap]); + mantissa <<= 24 - quantization_tab[bap]; + break; } coeffs[freq] = mantissa >> exps[freq]; } @@ -525,10 +522,10 @@ static void ac3_decode_transform_coeffs_ch(AC3DecodeContext *s, int ch_index, ma static void remove_dithering(AC3DecodeContext *s) { int ch, i; - for(ch=1; ch<=s->fbw_channels; ch++) { - if(!s->dither_flag[ch] && s->channel_in_cpl[ch]) { - for(i = s->start_freq[CPL_CH]; i<s->end_freq[CPL_CH]; i++) { - if(!s->bap[CPL_CH][i]) + for (ch = 1; ch <= s->fbw_channels; ch++) { + if (!s->dither_flag[ch] && s->channel_in_cpl[ch]) { + for (i = s->start_freq[CPL_CH]; i < s->end_freq[CPL_CH]; i++) { + if (!s->bap[CPL_CH][i]) s->fixed_coeffs[ch][i] = 0; } } @@ -536,7 +533,7 @@ static void remove_dithering(AC3DecodeContext *s) { } static void decode_transform_coeffs_ch(AC3DecodeContext *s, int blk, int ch, - mant_groups *m) + mant_groups *m) { if (!s->channel_uses_aht[ch]) { ac3_decode_transform_coeffs_ch(s, ch, m); @@ -580,7 +577,7 @@ static void decode_transform_coeffs(AC3DecodeContext *s, int blk) } do s->fixed_coeffs[ch][end] = 0; - while(++end < 256); + while (++end < 256); } /* zero the dithered coefficients for appropriate channels */ @@ -598,10 +595,10 @@ static void do_rematrixing(AC3DecodeContext *s) end = FFMIN(s->end_freq[1], s->end_freq[2]); - for(bnd=0; bnd<s->num_rematrixing_bands; bnd++) { - if(s->rematrixing_flags[bnd]) { - bndend = FFMIN(end, ff_ac3_rematrix_band_tab[bnd+1]); - for(i=ff_ac3_rematrix_band_tab[bnd]; i<bndend; i++) { + for (bnd = 0; bnd < s->num_rematrixing_bands; bnd++) { + if (s->rematrixing_flags[bnd]) { + bndend = FFMIN(end, ff_ac3_rematrix_band_tab[bnd + 1]); + for (i = ff_ac3_rematrix_band_tab[bnd]; i < bndend; i++) { int tmp0 = s->fixed_coeffs[1][i]; s->fixed_coeffs[1][i] += s->fixed_coeffs[2][i]; s->fixed_coeffs[2][i] = tmp0 - s->fixed_coeffs[2][i]; @@ -619,21 +616,23 @@ static inline void do_imdct(AC3DecodeContext *s, int channels) { int ch; - for (ch=1; ch<=channels; ch++) { + for (ch = 1; ch <= channels; ch++) { if (s->block_switch[ch]) { int i; - float *x = s->tmp_output+128; - for(i=0; i<128; i++) - x[i] = s->transform_coeffs[ch][2*i]; + float *x = s->tmp_output + 128; + for (i = 0; i < 128; i++) + x[i] = s->transform_coeffs[ch][2 * i]; s->imdct_256.imdct_half(&s->imdct_256, s->tmp_output, x); - s->dsp.vector_fmul_window(s->output[ch-1], s->delay[ch-1], s->tmp_output, s->window, 128); - for(i=0; i<128; i++) - x[i] = s->transform_coeffs[ch][2*i+1]; - s->imdct_256.imdct_half(&s->imdct_256, s->delay[ch-1], x); + s->dsp.vector_fmul_window(s->output[ch - 1], s->delay[ch - 1], + s->tmp_output, s->window, 128); + for (i = 0; i < 128; i++) + x[i] = s->transform_coeffs[ch][2 * i + 1]; + s->imdct_256.imdct_half(&s->imdct_256, s->delay[ch - 1], x); } else { s->imdct_512.imdct_half(&s->imdct_512, s->tmp_output, s->transform_coeffs[ch]); - s->dsp.vector_fmul_window(s->output[ch-1], s->delay[ch-1], s->tmp_output, s->window, 128); - memcpy(s->delay[ch-1], s->tmp_output+128, 128*sizeof(float)); + s->dsp.vector_fmul_window(s->output[ch - 1], s->delay[ch - 1], + s->tmp_output, s->window, 128); + memcpy(s->delay[ch - 1], s->tmp_output + 128, 128 * sizeof(float)); } } } @@ -641,24 +640,25 @@ static inline void do_imdct(AC3DecodeContext *s, int channels) /** * Downmix the output to mono or stereo. */ -void ff_ac3_downmix_c(float (*samples)[256], float (*matrix)[2], int out_ch, int in_ch, int len) +void ff_ac3_downmix_c(float (*samples)[256], float (*matrix)[2], + int out_ch, int in_ch, int len) { int i, j; float v0, v1; - if(out_ch == 2) { - for(i=0; i<len; i++) { + if (out_ch == 2) { + for (i = 0; i < len; i++) { v0 = v1 = 0.0f; - for(j=0; j<in_ch; j++) { + for (j = 0; j < in_ch; j++) { v0 += samples[j][i] * matrix[j][0]; v1 += samples[j][i] * matrix[j][1]; } samples[0][i] = v0; samples[1][i] = v1; } - } else if(out_ch == 1) { - for(i=0; i<len; i++) { + } else if (out_ch == 1) { + for (i = 0; i < len; i++) { v0 = 0.0f; - for(j=0; j<in_ch; j++) + for (j = 0; j < in_ch; j++) v0 += samples[j][i] * matrix[j][0]; samples[0][i] = v0; } @@ -671,25 +671,25 @@ void ff_ac3_downmix_c(float (*samples)[256], float (*matrix)[2], int out_ch, int static void ac3_upmix_delay(AC3DecodeContext *s) { int channel_data_size = sizeof(s->delay[0]); - switch(s->channel_mode) { - case AC3_CHMODE_DUALMONO: - case AC3_CHMODE_STEREO: - /* upmix mono to stereo */ - memcpy(s->delay[1], s->delay[0], channel_data_size); - break; - case AC3_CHMODE_2F2R: - memset(s->delay[3], 0, channel_data_size); - case AC3_CHMODE_2F1R: - memset(s->delay[2], 0, channel_data_size); - break; - case AC3_CHMODE_3F2R: - memset(s->delay[4], 0, channel_data_size); - case AC3_CHMODE_3F1R: - memset(s->delay[3], 0, channel_data_size); - case AC3_CHMODE_3F: - memcpy(s->delay[2], s->delay[1], channel_data_size); - memset(s->delay[1], 0, channel_data_size); - break; + switch (s->channel_mode) { + case AC3_CHMODE_DUALMONO: + case AC3_CHMODE_STEREO: + /* upmix mono to stereo */ + memcpy(s->delay[1], s->delay[0], channel_data_size); + break; + case AC3_CHMODE_2F2R: + memset(s->delay[3], 0, channel_data_size); + case AC3_CHMODE_2F1R: + memset(s->delay[2], 0, channel_data_size); + break; + case AC3_CHMODE_3F2R: + memset(s->delay[4], 0, channel_data_size); + case AC3_CHMODE_3F1R: + memset(s->delay[3], 0, channel_data_size); + case AC3_CHMODE_3F: + memcpy(s->delay[2], s->delay[1], channel_data_size); + memset(s->delay[1], 0, channel_data_size); + break; } } @@ -742,7 +742,7 @@ static void decode_band_structure(GetBitContext *gbc, int blk, int eac3, bnd_sz[0] = ecpl ? 6 : 12; for (bnd = 0, subbnd = 1; subbnd < n_subbands; subbnd++) { int subbnd_size = (ecpl && subbnd < 4) ? 6 : 12; - if (band_struct[subbnd-1]) { + if (band_struct[subbnd - 1]) { n_bands--; bnd_sz[bnd] += subbnd_size; } else { @@ -779,7 +779,7 @@ static int decode_audio_block(AC3DecodeContext *s, int blk) if (s->block_switch_syntax) { for (ch = 1; ch <= fbw_channels; ch++) { s->block_switch[ch] = get_bits1(gbc); - if(ch > 1 && s->block_switch[ch] != s->block_switch[1]) + if (ch > 1 && s->block_switch[ch] != s->block_switch[1]) different_transforms = 1; } } @@ -794,13 +794,13 @@ static int decode_audio_block(AC3DecodeContext *s, int blk) /* dynamic range */ i = !(s->channel_mode); do { - if(get_bits1(gbc)) { - s->dynamic_range[i] = ((dynamic_range_tab[get_bits(gbc, 8)]-1.0) * - s->drc_scale)+1.0; - } else if(blk == 0) { + if (get_bits1(gbc)) { + s->dynamic_range[i] = ((dynamic_range_tab[get_bits(gbc, 8)] - 1.0) * + s->drc_scale) + 1.0; + } else if (blk == 0) { s->dynamic_range[i] = 1.0f; } - } while(i--); + } while (i--); /* spectral extension strategy */ if (s->eac3 && (!blk || get_bits1(gbc))) { @@ -881,7 +881,8 @@ static int decode_audio_block(AC3DecodeContext *s, int blk) bandsize = s->spx_band_sizes[bnd]; nratio = ((float)((bin + (bandsize >> 1))) / s->spx_dst_end_freq) - spx_blend; nratio = av_clipf(nratio, 0.0f, 1.0f); - nblend = sqrtf(3.0f * nratio); // noise is scaled by sqrt(3) to give unity variance + nblend = sqrtf(3.0f * nratio); // noise is scaled by sqrt(3) + // to give unity variance sblend = sqrtf(1.0f - nratio); bin += bandsize; @@ -891,7 +892,7 @@ static int decode_audio_block(AC3DecodeContext *s, int blk) if (spx_coord_exp == 15) spx_coord_mant <<= 1; else spx_coord_mant += 4; spx_coord_mant <<= (25 - spx_coord_exp - master_spx_coord); - spx_coord = spx_coord_mant * (1.0f/(1<<23)); + spx_coord = spx_coord_mant * (1.0f / (1 << 23)); /* multiply noise and signal blending factors by spx coordinate */ s->spx_noise_blend [ch][bnd] = nblend * spx_coord; @@ -964,8 +965,9 @@ static int decode_audio_block(AC3DecodeContext *s, int blk) s->phase_flags_in_use = 0; } } else if (!s->eac3) { - if(!blk) { - av_log(s->avctx, AV_LOG_ERROR, "new coupling strategy must be present in block 0\n"); + if (!blk) { + av_log(s->avctx, AV_LOG_ERROR, "new coupling strategy must " + "be present in block 0\n"); return -1; } else { s->cpl_in_use[blk] = s->cpl_in_use[blk-1]; @@ -994,7 +996,8 @@ static int decode_audio_block(AC3DecodeContext *s, int blk) s->cpl_coords[ch][bnd] >>= (cpl_coord_exp + master_cpl_coord); } } else if (!blk) { - av_log(s->avctx, AV_LOG_ERROR, "new coupling coordinates must be present in block 0\n"); + av_log(s->avctx, AV_LOG_ERROR, "new coupling coordinates must " + "be present in block 0\n"); return -1; } } else { @@ -1019,10 +1022,11 @@ static int decode_audio_block(AC3DecodeContext *s, int blk) } else if (s->spx_in_use && s->spx_src_start_freq <= 61) { s->num_rematrixing_bands--; } - for(bnd=0; bnd<s->num_rematrixing_bands; bnd++) + for (bnd = 0; bnd < s->num_rematrixing_bands; bnd++) s->rematrixing_flags[bnd] = get_bits1(gbc); } else if (!blk) { - av_log(s->avctx, AV_LOG_WARNING, "Warning: new rematrixing strategy not present in block 0\n"); + av_log(s->avctx, AV_LOG_WARNING, "Warning: " + "new rematrixing strategy not present in block 0\n"); s->num_rematrixing_bands = 0; } } @@ -1031,7 +1035,7 @@ static int decode_audio_block(AC3DecodeContext *s, int blk) for (ch = !cpl_in_use; ch <= s->channels; ch++) { if (!s->eac3) s->exp_strategy[blk][ch] = get_bits(gbc, 2 - (ch == s->lfe_ch)); - if(s->exp_strategy[blk][ch] != EXP_REUSE) + if (s->exp_strategy[blk][ch] != EXP_REUSE) bit_alloc_stages[ch] = 3; } @@ -1054,8 +1058,8 @@ static int decode_audio_block(AC3DecodeContext *s, int blk) s->end_freq[ch] = bandwidth_code * 3 + 73; } group_size = 3 << (s->exp_strategy[blk][ch] - 1); - s->num_exp_groups[ch] = (s->end_freq[ch]+group_size-4) / group_size; - if(blk > 0 && s->end_freq[ch] != prev) + s->num_exp_groups[ch] = (s->end_freq[ch] + group_size-4) / group_size; + if (blk > 0 && s->end_freq[ch] != prev) memset(bit_alloc_stages, 3, AC3_MAX_CHANNELS); } } @@ -1074,7 +1078,7 @@ static int decode_audio_block(AC3DecodeContext *s, int blk) av_log(s->avctx, AV_LOG_ERROR, "exponent out-of-range\n"); return -1; } - if(ch != CPL_CH && ch != s->lfe_ch) + if (ch != CPL_CH && ch != s->lfe_ch) skip_bits(gbc, 2); /* skip gainrng */ } } @@ -1087,17 +1091,18 @@ static int decode_audio_block(AC3DecodeContext *s, int blk) s->bit_alloc_params.slow_gain = ff_ac3_slow_gain_tab[get_bits(gbc, 2)]; s->bit_alloc_params.db_per_bit = ff_ac3_db_per_bit_tab[get_bits(gbc, 2)]; s->bit_alloc_params.floor = ff_ac3_floor_tab[get_bits(gbc, 3)]; - for(ch=!cpl_in_use; ch<=s->channels; ch++) + for (ch = !cpl_in_use; ch <= s->channels; ch++) bit_alloc_stages[ch] = FFMAX(bit_alloc_stages[ch], 2); } else if (!blk) { - av_log(s->avctx, AV_LOG_ERROR, "new bit allocation info must be present in block 0\n"); + av_log(s->avctx, AV_LOG_ERROR, "new bit allocation info must " + "be present in block 0\n"); return -1; } } /* signal-to-noise ratio offsets and fast gains (signal-to-mask ratios) */ - if(!s->eac3 || !blk){ - if(s->snr_offset_strategy && get_bits1(gbc)) { + if (!s->eac3 || !blk) { + if (s->snr_offset_strategy && get_bits1(gbc)) { int snr = 0; int csnr; csnr = (get_bits(gbc, 6) - 15) << 4; @@ -1106,7 +1111,7 @@ static int decode_audio_block(AC3DecodeContext *s, int blk) if (ch == i || s->snr_offset_strategy == 2) snr = (csnr + get_bits(gbc, 4)) << 2; /* run at least last bit allocation stage if snr offset changes */ - if(blk && s->snr_offset[ch] != snr) { + if (blk && s->snr_offset[ch] != snr) { bit_alloc_stages[ch] = FFMAX(bit_alloc_stages[ch], 1); } s->snr_offset[ch] = snr; @@ -1116,7 +1121,7 @@ static int decode_audio_block(AC3DecodeContext *s, int blk) int prev = s->fast_gain[ch]; s->fast_gain[ch] = ff_ac3_fast_gain_tab[get_bits(gbc, 3)]; /* run last 2 bit allocation stages if fast gain changes */ - if(blk && prev != s->fast_gain[ch]) + if (blk && prev != s->fast_gain[ch]) bit_alloc_stages[ch] = FFMAX(bit_alloc_stages[ch], 2); } } @@ -1132,7 +1137,7 @@ static int decode_audio_block(AC3DecodeContext *s, int blk) int prev = s->fast_gain[ch]; s->fast_gain[ch] = ff_ac3_fast_gain_tab[get_bits(gbc, 3)]; /* run last 2 bit allocation stages if fast gain changes */ - if(blk && prev != s->fast_gain[ch]) + if (blk && prev != s->fast_gain[ch]) bit_alloc_stages[ch] = FFMAX(bit_alloc_stages[ch], 2); } } else if (s->eac3 && !blk) { @@ -1152,14 +1157,15 @@ static int decode_audio_block(AC3DecodeContext *s, int blk) int sl = get_bits(gbc, 3); /* run last 2 bit allocation stages for coupling channel if coupling leak changes */ - if(blk && (fl != s->bit_alloc_params.cpl_fast_leak || - sl != s->bit_alloc_params.cpl_slow_leak)) { + if (blk && (fl != s->bit_alloc_params.cpl_fast_leak || + sl != s->bit_alloc_params.cpl_slow_leak)) { bit_alloc_stages[CPL_CH] = FFMAX(bit_alloc_stages[CPL_CH], 2); } s->bit_alloc_params.cpl_fast_leak = fl; s->bit_alloc_params.cpl_slow_leak = sl; } else if (!s->eac3 && !blk) { - av_log(s->avctx, AV_LOG_ERROR, "new coupling leak info must be present in block 0\n"); + av_log(s->avctx, AV_LOG_ERROR, "new coupling leak info must " + "be present in block 0\n"); return -1; } s->first_cpl_leak = 0; @@ -1183,40 +1189,40 @@ static int decode_audio_block(AC3DecodeContext *s, int blk) for (seg = 0; seg < s->dba_nsegs[ch]; seg++) { s->dba_offsets[ch][seg] = get_bits(gbc, 5); s->dba_lengths[ch][seg] = get_bits(gbc, 4); - s->dba_values[ch][seg] = get_bits(gbc, 3); + s->dba_values[ch][seg] = get_bits(gbc, 3); } /* run last 2 bit allocation stages if new dba values */ bit_alloc_stages[ch] = FFMAX(bit_alloc_stages[ch], 2); } } - } else if(blk == 0) { - for(ch=0; ch<=s->channels; ch++) { + } else if (blk == 0) { + for (ch = 0; ch <= s->channels; ch++) { s->dba_mode[ch] = DBA_NONE; } } /* Bit allocation */ - for(ch=!cpl_in_use; ch<=s->channels; ch++) { - if(bit_alloc_stages[ch] > 2) { + for (ch = !cpl_in_use; ch <= s->channels; ch++) { + if (bit_alloc_stages[ch] > 2) { /* Exponent mapping into PSD and PSD integration */ ff_ac3_bit_alloc_calc_psd(s->dexps[ch], s->start_freq[ch], s->end_freq[ch], s->psd[ch], s->band_psd[ch]); } - if(bit_alloc_stages[ch] > 1) { + if (bit_alloc_stages[ch] > 1) { /* Compute excitation function, Compute masking curve, and Apply delta bit allocation */ if (ff_ac3_bit_alloc_calc_mask(&s->bit_alloc_params, s->band_psd[ch], - s->start_freq[ch], s->end_freq[ch], - s->fast_gain[ch], (ch == s->lfe_ch), - s->dba_mode[ch], s->dba_nsegs[ch], + s->start_freq[ch], s->end_freq[ch], + s->fast_gain[ch], (ch == s->lfe_ch), + s->dba_mode[ch], s->dba_nsegs[ch], s->dba_offsets[ch], s->dba_lengths[ch], - s->dba_values[ch], s->mask[ch])) { + s->dba_values[ch], s->mask[ch])) { av_log(s->avctx, AV_LOG_ERROR, "error in bit allocation\n"); return -1; } } - if(bit_alloc_stages[ch] > 0) { + if (bit_alloc_stages[ch] > 0) { /* Compute bit allocation */ const uint8_t *bap_tab = s->channel_uses_aht[ch] ? ff_eac3_hebap_tab : ff_ac3_bap_tab; @@ -1231,7 +1237,7 @@ static int decode_audio_block(AC3DecodeContext *s, int blk) /* unused dummy data */ if (s->skip_syntax && get_bits1(gbc)) { int skipl = get_bits(gbc, 9); - while(skipl--) + while (skipl--) skip_bits(gbc, 8); } @@ -1242,18 +1248,19 @@ static int decode_audio_block(AC3DecodeContext *s, int blk) /* TODO: generate enhanced coupling coordinates and uncouple */ /* recover coefficients if rematrixing is in use */ - if(s->channel_mode == AC3_CHMODE_STEREO) + if (s->channel_mode == AC3_CHMODE_STEREO) do_rematrixing(s); /* apply scaling to coefficients (headroom, dynrng) */ - for(ch=1; ch<=s->channels; ch++) { + for (ch = 1; ch <= s->channels; ch++) { float gain = s->mul_bias / 4194304.0f; - if(s->channel_mode == AC3_CHMODE_DUALMONO) { - gain *= s->dynamic_range[2-ch]; + if (s->channel_mode == AC3_CHMODE_DUALMONO) { + gain *= s->dynamic_range[2 - ch]; } else { gain *= s->dynamic_range[0]; } - s->fmt_conv.int32_to_float_fmul_scalar(s->transform_coeffs[ch], s->fixed_coeffs[ch], gain, 256); + s->fmt_conv.int32_to_float_fmul_scalar(s->transform_coeffs[ch], + s->fixed_coeffs[ch], gain, 256); } /* apply spectral extension to high frequency bins */ @@ -1267,27 +1274,30 @@ static int decode_audio_block(AC3DecodeContext *s, int blk) downmix_output = s->channels != s->out_channels && !((s->output_mode & AC3_OUTPUT_LFEON) && s->fbw_channels == s->out_channels); - if(different_transforms) { + if (different_transforms) { /* the delay samples have already been downmixed, so we upmix the delay samples in order to reconstruct all channels before downmixing. */ - if(s->downmixed) { + if (s->downmixed) { s->downmixed = 0; ac3_upmix_delay(s); } do_imdct(s, s->channels); - if(downmix_output) { - s->dsp.ac3_downmix(s->output, s->downmix_coeffs, s->out_channels, s->fbw_channels, 256); + if (downmix_output) { + s->dsp.ac3_downmix(s->output, s->downmix_coeffs, + s->out_channels, s->fbw_channels, 256); } } else { - if(downmix_output) { - s->dsp.ac3_downmix(s->transform_coeffs+1, s->downmix_coeffs, s->out_channels, s->fbw_channels, 256); + if (downmix_output) { + s->dsp.ac3_downmix(s->transform_coeffs + 1, s->downmix_coeffs, + s->out_channels, s->fbw_channels, 256); } - if(downmix_output && !s->downmixed) { + if (downmix_output && !s->downmixed) { s->downmixed = 1; - s->dsp.ac3_downmix(s->delay, s->downmix_coeffs, s->out_channels, s->fbw_channels, 128); + s->dsp.ac3_downmix(s->delay, s->downmix_coeffs, s->out_channels, + s->fbw_channels, 128); } do_imdct(s, s->out_channels); @@ -1327,33 +1337,34 @@ static int ac3_decode_frame(AVCodecContext * avctx, void *data, err = parse_frame_header(s); if (err) { - switch(err) { - case AAC_AC3_PARSE_ERROR_SYNC: - av_log(avctx, AV_LOG_ERROR, "frame sync error\n"); - return -1; - case AAC_AC3_PARSE_ERROR_BSID: - av_log(avctx, AV_LOG_ERROR, "invalid bitstream id\n"); - break; - case AAC_AC3_PARSE_ERROR_SAMPLE_RATE: - av_log(avctx, AV_LOG_ERROR, "invalid sample rate\n"); - break; - case AAC_AC3_PARSE_ERROR_FRAME_SIZE: - av_log(avctx, AV_LOG_ERROR, "invalid frame size\n"); - break; - case AAC_AC3_PARSE_ERROR_FRAME_TYPE: - /* skip frame if CRC is ok. otherwise use error concealment. */ - /* TODO: add support for substreams and dependent frames */ - if(s->frame_type == EAC3_FRAME_TYPE_DEPENDENT || s->substreamid) { - av_log(avctx, AV_LOG_ERROR, "unsupported frame type : skipping frame\n"); - *got_frame_ptr = 0; - return s->frame_size; - } else { - av_log(avctx, AV_LOG_ERROR, "invalid frame type\n"); - } - break; - default: - av_log(avctx, AV_LOG_ERROR, "invalid header\n"); - break; + switch (err) { + case AAC_AC3_PARSE_ERROR_SYNC: + av_log(avctx, AV_LOG_ERROR, "frame sync error\n"); + return -1; + case AAC_AC3_PARSE_ERROR_BSID: + av_log(avctx, AV_LOG_ERROR, "invalid bitstream id\n"); + break; + case AAC_AC3_PARSE_ERROR_SAMPLE_RATE: + av_log(avctx, AV_LOG_ERROR, "invalid sample rate\n"); + break; + case AAC_AC3_PARSE_ERROR_FRAME_SIZE: + av_log(avctx, AV_LOG_ERROR, "invalid frame size\n"); + break; + case AAC_AC3_PARSE_ERROR_FRAME_TYPE: + /* skip frame if CRC is ok. otherwise use error concealment. */ + /* TODO: add support for substreams and dependent frames */ + if (s->frame_type == EAC3_FRAME_TYPE_DEPENDENT || s->substreamid) { + av_log(avctx, AV_LOG_ERROR, "unsupported frame type : " + "skipping frame\n"); + *got_frame_ptr = 0; + return s->frame_size; + } else { + av_log(avctx, AV_LOG_ERROR, "invalid frame type\n"); + } + break; + default: + av_log(avctx, AV_LOG_ERROR, "invalid header\n"); + break; } } else { /* check that reported frame size fits in input buffer */ @@ -1362,7 +1373,8 @@ static int ac3_decode_frame(AVCodecContext * avctx, void *data, err = AAC_AC3_PARSE_ERROR_FRAME_SIZE; } else if (avctx->err_recognition & AV_EF_CRCCHECK) { /* check for crc mismatch */ - if (av_crc(av_crc_get_table(AV_CRC_16_ANSI), 0, &buf[2], s->frame_size-2)) { + if (av_crc(av_crc_get_table(AV_CRC_16_ANSI), 0, &buf[2], + s->frame_size - 2)) { av_log(avctx, AV_LOG_ERROR, "frame CRC mismatch\n"); err = AAC_AC3_PARSE_ERROR_CRC; } @@ -1372,12 +1384,12 @@ static int ac3_decode_frame(AVCodecContext * avctx, void *data, /* if frame is ok, set audio parameters */ if (!err) { avctx->sample_rate = s->sample_rate; - avctx->bit_rate = s->bit_rate; + avctx->bit_rate = s->bit_rate; /* channel config */ s->out_channels = s->channels; - s->output_mode = s->channel_mode; - if(s->lfe_on) + s->output_mode = s->channel_mode; + if (s->lfe_on) s->output_mode |= AC3_OUTPUT_LFEON; if (avctx->request_channels > 0 && avctx->request_channels <= 2 && avctx->request_channels < s->channels) { @@ -1385,7 +1397,7 @@ static int ac3_decode_frame(AVCodecContext * avctx, void *data, s->output_mode = avctx->request_channels == 1 ? AC3_CHMODE_MONO : AC3_CHMODE_STEREO; s->channel_layout = ff_ac3_channel_layout_tab[s->output_mode]; } - avctx->channels = s->out_channels; + avctx->channels = s->out_channels; avctx->channel_layout = s->channel_layout; s->loro_center_mix_level = gain_levels[ center_levels[s-> center_mix_level]]; @@ -1393,13 +1405,13 @@ static int ac3_decode_frame(AVCodecContext * avctx, void *data, s->ltrt_center_mix_level = LEVEL_MINUS_3DB; s->ltrt_surround_mix_level = LEVEL_MINUS_3DB; /* set downmixing coefficients if needed */ - if(s->channels != s->out_channels && !((s->output_mode & AC3_OUTPUT_LFEON) && + if (s->channels != s->out_channels && !((s->output_mode & AC3_OUTPUT_LFEON) && s->fbw_channels == s->out_channels)) { set_downmix_coeffs(s); } } else if (!s->out_channels) { s->out_channels = avctx->channels; - if(s->out_channels < s->channels) + if (s->out_channels < s->channels) s->output_mode = s->out_channels == 1 ? AC3_CHMODE_MONO : AC3_CHMODE_STEREO; } /* set audio service type based on bitstream mode for AC-3 */ @@ -1476,19 +1488,19 @@ static const AVClass ac3_decoder_class = { }; AVCodec ff_ac3_decoder = { - .name = "ac3", - .type = AVMEDIA_TYPE_AUDIO, - .id = CODEC_ID_AC3, + .name = "ac3", + .type = AVMEDIA_TYPE_AUDIO, + .id = CODEC_ID_AC3, .priv_data_size = sizeof (AC3DecodeContext), - .init = ac3_decode_init, - .close = ac3_decode_end, - .decode = ac3_decode_frame, - .capabilities = CODEC_CAP_DR1, - .long_name = NULL_IF_CONFIG_SMALL("ATSC A/52A (AC-3)"), - .sample_fmts = (const enum AVSampleFormat[]) { - AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_NONE - }, - .priv_class = &ac3_decoder_class, + .init = ac3_decode_init, + .close = ac3_decode_end, + .decode = ac3_decode_frame, + .capabilities = CODEC_CAP_DR1, + .long_name = NULL_IF_CONFIG_SMALL("ATSC A/52A (AC-3)"), + .sample_fmts = (const enum AVSampleFormat[]) { AV_SAMPLE_FMT_FLT, + AV_SAMPLE_FMT_S16, + AV_SAMPLE_FMT_NONE }, + .priv_class = &ac3_decoder_class, }; #if CONFIG_EAC3_DECODER @@ -1498,19 +1510,20 @@ static const AVClass eac3_decoder_class = { .option = options, .version = LIBAVUTIL_VERSION_INT, }; + AVCodec ff_eac3_decoder = { - .name = "eac3", - .type = AVMEDIA_TYPE_AUDIO, - .id = CODEC_ID_EAC3, + .name = "eac3", + .type = AVMEDIA_TYPE_AUDIO, + .id = CODEC_ID_EAC3, .priv_data_size = sizeof (AC3DecodeContext), - .init = ac3_decode_init, - .close = ac3_decode_end, - .decode = ac3_decode_frame, - .capabilities = CODEC_CAP_DR1, - .long_name = NULL_IF_CONFIG_SMALL("ATSC A/52B (AC-3, E-AC-3)"), - .sample_fmts = (const enum AVSampleFormat[]) { - AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_NONE - }, - .priv_class = &eac3_decoder_class, + .init = ac3_decode_init, + .close = ac3_decode_end, + .decode = ac3_decode_frame, + .capabilities = CODEC_CAP_DR1, + .long_name = NULL_IF_CONFIG_SMALL("ATSC A/52B (AC-3, E-AC-3)"), + .sample_fmts = (const enum AVSampleFormat[]) { AV_SAMPLE_FMT_FLT, + AV_SAMPLE_FMT_S16, + AV_SAMPLE_FMT_NONE }, + .priv_class = &eac3_decoder_class, }; #endif diff --git a/libavcodec/adpcmenc.c b/libavcodec/adpcmenc.c index c193f5c7ef..e500a1cdbf 100644 --- a/libavcodec/adpcmenc.c +++ b/libavcodec/adpcmenc.c @@ -66,37 +66,45 @@ static av_cold int adpcm_encode_init(AVCodecContext *avctx) if (avctx->channels > 2) return -1; /* only stereo or mono =) */ - if(avctx->trellis && (unsigned)avctx->trellis > 16U){ + if (avctx->trellis && (unsigned)avctx->trellis > 16U) { av_log(avctx, AV_LOG_ERROR, "invalid trellis size\n"); return -1; } if (avctx->trellis) { - int frontier = 1 << avctx->trellis; + int frontier = 1 << avctx->trellis; int max_paths = frontier * FREEZE_INTERVAL; - FF_ALLOC_OR_GOTO(avctx, s->paths, max_paths * sizeof(*s->paths), error); - FF_ALLOC_OR_GOTO(avctx, s->node_buf, 2 * frontier * sizeof(*s->node_buf), error); - FF_ALLOC_OR_GOTO(avctx, s->nodep_buf, 2 * frontier * sizeof(*s->nodep_buf), error); - FF_ALLOC_OR_GOTO(avctx, s->trellis_hash, 65536 * sizeof(*s->trellis_hash), error); + FF_ALLOC_OR_GOTO(avctx, s->paths, + max_paths * sizeof(*s->paths), error); + FF_ALLOC_OR_GOTO(avctx, s->node_buf, + 2 * frontier * sizeof(*s->node_buf), error); + FF_ALLOC_OR_GOTO(avctx, s->nodep_buf, + 2 * frontier * sizeof(*s->nodep_buf), error); + FF_ALLOC_OR_GOTO(avctx, s->trellis_hash, + 65536 * sizeof(*s->trellis_hash), error); } avctx->bits_per_coded_sample = av_get_bits_per_sample(avctx->codec->id); - switch(avctx->codec->id) { + switch (avctx->codec->id) { case CODEC_ID_ADPCM_IMA_WAV: - avctx->frame_size = (BLKSIZE - 4 * avctx->channels) * 8 / (4 * avctx->channels) + 1; /* each 16 bits sample gives one nibble */ - /* and we have 4 bytes per channel overhead */ + /* each 16 bits sample gives one nibble + and we have 4 bytes per channel overhead */ + avctx->frame_size = (BLKSIZE - 4 * avctx->channels) * 8 / + (4 * avctx->channels) + 1; + /* seems frame_size isn't taken into account... + have to buffer the samples :-( */ avctx->block_align = BLKSIZE; avctx->bits_per_coded_sample = 4; - /* seems frame_size isn't taken into account... have to buffer the samples :-( */ break; case CODEC_ID_ADPCM_IMA_QT: - avctx->frame_size = 64; + avctx->frame_size = 64; avctx->block_align = 34 * avctx->channels; break; case CODEC_ID_ADPCM_MS: - avctx->frame_size = (BLKSIZE - 7 * avctx->channels) * 2 / avctx->channels + 2; /* each 16 bits sample gives one nibble */ - /* and we have 7 bytes per channel overhead */ + /* each 16 bits sample gives one nibble + and we have 7 bytes per channel overhead */ + avctx->frame_size = (BLKSIZE - 7 * avctx->channels) * 2 / avctx->channels + 2; avctx->block_align = BLKSIZE; avctx->bits_per_coded_sample = 4; avctx->extradata_size = 32; @@ -111,14 +119,15 @@ static av_cold int adpcm_encode_init(AVCodecContext *avctx) } break; case CODEC_ID_ADPCM_YAMAHA: - avctx->frame_size = BLKSIZE * avctx->channels; + avctx->frame_size = BLKSIZE * avctx->channels; avctx->block_align = BLKSIZE; break; case CODEC_ID_ADPCM_SWF: if (avctx->sample_rate != 11025 && avctx->sample_rate != 22050 && avctx->sample_rate != 44100) { - av_log(avctx, AV_LOG_ERROR, "Sample rate must be 11025, 22050 or 44100\n"); + av_log(avctx, AV_LOG_ERROR, "Sample rate must be 11025, " + "22050 or 44100\n"); goto error; } avctx->frame_size = 512 * (avctx->sample_rate / 11025); @@ -127,7 +136,7 @@ static av_cold int adpcm_encode_init(AVCodecContext *avctx) goto error; } - avctx->coded_frame= avcodec_alloc_frame(); + avctx->coded_frame = avcodec_alloc_frame(); avctx->coded_frame->key_frame= 1; return 0; @@ -152,19 +161,23 @@ static av_cold int adpcm_encode_close(AVCodecContext *avctx) } -static inline unsigned char adpcm_ima_compress_sample(ADPCMChannelStatus *c, short sample) +static inline unsigned char adpcm_ima_compress_sample(ADPCMChannelStatus *c, + short sample) { - int delta = sample - c->prev_sample; - int nibble = FFMIN(7, abs(delta)*4/ff_adpcm_step_table[c->step_index]) + (delta<0)*8; - c->prev_sample += ((ff_adpcm_step_table[c->step_index] * ff_adpcm_yamaha_difflookup[nibble]) / 8); + int delta = sample - c->prev_sample; + int nibble = FFMIN(7, abs(delta) * 4 / + ff_adpcm_step_table[c->step_index]) + (delta < 0) * 8; + c->prev_sample += ((ff_adpcm_step_table[c->step_index] * + ff_adpcm_yamaha_difflookup[nibble]) / 8); c->prev_sample = av_clip_int16(c->prev_sample); - c->step_index = av_clip(c->step_index + ff_adpcm_index_table[nibble], 0, 88); + c->step_index = av_clip(c->step_index + ff_adpcm_index_table[nibble], 0, 88); return nibble; } -static inline unsigned char adpcm_ima_qt_compress_sample(ADPCMChannelStatus *c, short sample) +static inline unsigned char adpcm_ima_qt_compress_sample(ADPCMChannelStatus *c, + short sample) { - int delta = sample - c->prev_sample; + int delta = sample - c->prev_sample; int diff, step = ff_adpcm_step_table[c->step_index]; int nibble = 8*(delta < 0); @@ -173,17 +186,17 @@ static inline unsigned char adpcm_ima_qt_compress_sample(ADPCMChannelStatus *c, if (delta >= step) { nibble |= 4; - delta -= step; + delta -= step; } step >>= 1; if (delta >= step) { nibble |= 2; - delta -= step; + delta -= step; } step >>= 1; if (delta >= step) { nibble |= 1; - delta -= step; + delta -= step; } diff -= delta; @@ -193,47 +206,53 @@ static inline unsigned char adpcm_ima_qt_compress_sample(ADPCMChannelStatus *c, c->prev_sample += diff; c->prev_sample = av_clip_int16(c->prev_sample); - c->step_index = av_clip(c->step_index + ff_adpcm_index_table[nibble], 0, 88); + c->step_index = av_clip(c->step_index + ff_adpcm_index_table[nibble], 0, 88); return nibble; } -static inline unsigned char adpcm_ms_compress_sample(ADPCMChannelStatus *c, short sample) +static inline unsigned char adpcm_ms_compress_sample(ADPCMChannelStatus *c, + short sample) { int predictor, nibble, bias; - predictor = (((c->sample1) * (c->coeff1)) + ((c->sample2) * (c->coeff2))) / 64; + predictor = (((c->sample1) * (c->coeff1)) + + (( c->sample2) * (c->coeff2))) / 64; - nibble= sample - predictor; - if(nibble>=0) bias= c->idelta/2; - else bias=-c->idelta/2; + nibble = sample - predictor; + if (nibble >= 0) + bias = c->idelta / 2; + else + bias = -c->idelta / 2; - nibble= (nibble + bias) / c->idelta; - nibble= av_clip(nibble, -8, 7)&0x0F; + nibble = (nibble + bias) / c->idelta; + nibble = av_clip(nibble, -8, 7) & 0x0F; - predictor += (signed)((nibble & 0x08)?(nibble - 0x10):(nibble)) * c->idelta; + predictor += (signed)((nibble & 0x08) ? (nibble - 0x10) : nibble) * c->idelta; c->sample2 = c->sample1; c->sample1 = av_clip_int16(predictor); c->idelta = (ff_adpcm_AdaptationTable[(int)nibble] * c->idelta) >> 8; - if (c->idelta < 16) c->idelta = 16; + if (c->idelta < 16) + c->idelta = 16; return nibble; } -static inline unsigned char adpcm_yamaha_compress_sample(ADPCMChannelStatus *c, short sample) +static inline unsigned char adpcm_yamaha_compress_sample(ADPCMChannelStatus *c, + short sample) { int nibble, delta; - if(!c->step) { + if (!c->step) { c->predictor = 0; - c->step = 127; + c->step = 127; } delta = sample - c->predictor; - nibble = FFMIN(7, abs(delta)*4/c->step) + (delta<0)*8; + nibble = FFMIN(7, abs(delta) * 4 / c->step) + (delta < 0) * 8; c->predictor += ((c->step * ff_adpcm_yamaha_difflookup[nibble]) / 8); c->predictor = av_clip_int16(c->predictor); @@ -249,57 +268,61 @@ static void adpcm_compress_trellis(AVCodecContext *avctx, const short *samples, //FIXME 6% faster if frontier is a compile-time constant ADPCMEncodeContext *s = avctx->priv_data; const int frontier = 1 << avctx->trellis; - const int stride = avctx->channels; - const int version = avctx->codec->id; - TrellisPath *paths = s->paths, *p; - TrellisNode *node_buf = s->node_buf; - TrellisNode **nodep_buf = s->nodep_buf; - TrellisNode **nodes = nodep_buf; // nodes[] is always sorted by .ssd + const int stride = avctx->channels; + const int version = avctx->codec->id; + TrellisPath *paths = s->paths, *p; + TrellisNode *node_buf = s->node_buf; + TrellisNode **nodep_buf = s->nodep_buf; + TrellisNode **nodes = nodep_buf; // nodes[] is always sorted by .ssd TrellisNode **nodes_next = nodep_buf + frontier; int pathn = 0, froze = -1, i, j, k, generation = 0; uint8_t *hash = s->trellis_hash; memset(hash, 0xff, 65536 * sizeof(*hash)); memset(nodep_buf, 0, 2 * frontier * sizeof(*nodep_buf)); - nodes[0] = node_buf + frontier; - nodes[0]->ssd = 0; - nodes[0]->path = 0; - nodes[0]->step = c->step_index; + nodes[0] = node_buf + frontier; + nodes[0]->ssd = 0; + nodes[0]->path = 0; + nodes[0]->step = c->step_index; nodes[0]->sample1 = c->sample1; nodes[0]->sample2 = c->sample2; - if((version == CODEC_ID_ADPCM_IMA_WAV) || (version == CODEC_ID_ADPCM_IMA_QT) || (version == CODEC_ID_ADPCM_SWF)) + if (version == CODEC_ID_ADPCM_IMA_WAV || + version == CODEC_ID_ADPCM_IMA_QT || + version == CODEC_ID_ADPCM_SWF) nodes[0]->sample1 = c->prev_sample; - if(version == CODEC_ID_ADPCM_MS) + if (version == CODEC_ID_ADPCM_MS) nodes[0]->step = c->idelta; - if(version == CODEC_ID_ADPCM_YAMAHA) { - if(c->step == 0) { - nodes[0]->step = 127; + if (version == CODEC_ID_ADPCM_YAMAHA) { + if (c->step == 0) { + nodes[0]->step = 127; nodes[0]->sample1 = 0; } else { - nodes[0]->step = c->step; + nodes[0]->step = c->step; nodes[0]->sample1 = c->predictor; } } - for(i=0; i<n; i++) { + for (i = 0; i < n; i++) { TrellisNode *t = node_buf + frontier*(i&1); TrellisNode **u; - int sample = samples[i*stride]; + int sample = samples[i * stride]; int heap_pos = 0; - memset(nodes_next, 0, frontier*sizeof(TrellisNode*)); - for(j=0; j<frontier && nodes[j]; j++) { - // higher j have higher ssd already, so they're likely to yield a suboptimal next sample too - const int range = (j < frontier/2) ? 1 : 0; - const int step = nodes[j]->step; + memset(nodes_next, 0, frontier * sizeof(TrellisNode*)); + for (j = 0; j < frontier && nodes[j]; j++) { + // higher j have higher ssd already, so they're likely + // to yield a suboptimal next sample too + const int range = (j < frontier / 2) ? 1 : 0; + const int step = nodes[j]->step; int nidx; - if(version == CODEC_ID_ADPCM_MS) { - const int predictor = ((nodes[j]->sample1 * c->coeff1) + (nodes[j]->sample2 * c->coeff2)) / 64; - const int div = (sample - predictor) / step; + if (version == CODEC_ID_ADPCM_MS) { + const int predictor = ((nodes[j]->sample1 * c->coeff1) + + (nodes[j]->sample2 * c->coeff2)) / 64; + const int div = (sample - predictor) / step; const int nmin = av_clip(div-range, -8, 6); const int nmax = av_clip(div+range, -7, 7); - for(nidx=nmin; nidx<=nmax; nidx++) { + for (nidx = nmin; nidx <= nmax; nidx++) { const int nibble = nidx & 0xf; - int dec_sample = predictor + nidx * step; + int dec_sample = predictor + nidx * step; #define STORE_NODE(NAME, STEP_INDEX)\ int d;\ uint32_t ssd;\ @@ -334,25 +357,26 @@ static void adpcm_compress_trellis(AVCodecContext *avctx, const short *samples, } else {\ /* Try to replace one of the leaf nodes with the new \ * one, but try a different slot each time. */\ - pos = (frontier >> 1) + (heap_pos & ((frontier >> 1) - 1));\ + pos = (frontier >> 1) +\ + (heap_pos & ((frontier >> 1) - 1));\ if (ssd > nodes_next[pos]->ssd)\ goto next_##NAME;\ heap_pos++;\ }\ *h = generation;\ - u = nodes_next[pos];\ - if(!u) {\ - assert(pathn < FREEZE_INTERVAL<<avctx->trellis);\ + u = nodes_next[pos];\ + if (!u) {\ + assert(pathn < FREEZE_INTERVAL << avctx->trellis);\ u = t++;\ nodes_next[pos] = u;\ u->path = pathn++;\ }\ - u->ssd = ssd;\ + u->ssd = ssd;\ u->step = STEP_INDEX;\ u->sample2 = nodes[j]->sample1;\ u->sample1 = dec_sample;\ paths[u->path].nibble = nibble;\ - paths[u->path].prev = nodes[j]->path;\ + paths[u->path].prev = nodes[j]->path;\ /* Sift the newly inserted node up in the heap to \ * restore the heap property. */\ while (pos > 0) {\ @@ -363,24 +387,34 @@ static void adpcm_compress_trellis(AVCodecContext *avctx, const short *samples, pos = parent;\ }\ next_##NAME:; - STORE_NODE(ms, FFMAX(16, (ff_adpcm_AdaptationTable[nibble] * step) >> 8)); + STORE_NODE(ms, FFMAX(16, + (ff_adpcm_AdaptationTable[nibble] * step) >> 8)); } - } else if((version == CODEC_ID_ADPCM_IMA_WAV)|| (version == CODEC_ID_ADPCM_IMA_QT)|| (version == CODEC_ID_ADPCM_SWF)) { + } else if (version == CODEC_ID_ADPCM_IMA_WAV || + version == CODEC_ID_ADPCM_IMA_QT || + version == CODEC_ID_ADPCM_SWF) { #define LOOP_NODES(NAME, STEP_TABLE, STEP_INDEX)\ const int predictor = nodes[j]->sample1;\ const int div = (sample - predictor) * 4 / STEP_TABLE;\ - int nmin = av_clip(div-range, -7, 6);\ - int nmax = av_clip(div+range, -6, 7);\ - if(nmin<=0) nmin--; /* distinguish -0 from +0 */\ - if(nmax<0) nmax--;\ - for(nidx=nmin; nidx<=nmax; nidx++) {\ - const int nibble = nidx<0 ? 7-nidx : nidx;\ - int dec_sample = predictor + (STEP_TABLE * ff_adpcm_yamaha_difflookup[nibble]) / 8;\ + int nmin = av_clip(div - range, -7, 6);\ + int nmax = av_clip(div + range, -6, 7);\ + if (nmin <= 0)\ + nmin--; /* distinguish -0 from +0 */\ + if (nmax < 0)\ + nmax--;\ + for (nidx = nmin; nidx <= nmax; nidx++) {\ + const int nibble = nidx < 0 ? 7 - nidx : nidx;\ + int dec_sample = predictor +\ + (STEP_TABLE *\ + ff_adpcm_yamaha_difflookup[nibble]) / 8;\ STORE_NODE(NAME, STEP_INDEX);\ } - LOOP_NODES(ima, ff_adpcm_step_table[step], av_clip(step + ff_adpcm_index_table[nibble], 0, 88)); + LOOP_NODES(ima, ff_adpcm_step_table[step], + av_clip(step + ff_adpcm_index_table[nibble], 0, 88)); } else { //CODEC_ID_ADPCM_YAMAHA - LOOP_NODES(yamaha, step, av_clip((step * ff_adpcm_yamaha_indexscale[nibble]) >> 8, 127, 24567)); + LOOP_NODES(yamaha, step, + av_clip((step * ff_adpcm_yamaha_indexscale[nibble]) >> 8, + 127, 24567)); #undef LOOP_NODES #undef STORE_NODE } @@ -397,16 +431,16 @@ static void adpcm_compress_trellis(AVCodecContext *avctx, const short *samples, } // prevent overflow - if(nodes[0]->ssd > (1<<28)) { - for(j=1; j<frontier && nodes[j]; j++) + if (nodes[0]->ssd > (1 << 28)) { + for (j = 1; j < frontier && nodes[j]; j++) nodes[j]->ssd -= nodes[0]->ssd; nodes[0]->ssd = 0; } // merge old paths to save memory - if(i == froze + FREEZE_INTERVAL) { + if (i == froze + FREEZE_INTERVAL) { p = &paths[nodes[0]->path]; - for(k=i; k>froze; k--) { + for (k = i; k > froze; k--) { dst[k] = p->nibble; p = &paths[p->prev]; } @@ -415,26 +449,26 @@ static void adpcm_compress_trellis(AVCodecContext *avctx, const short *samples, // other nodes might use paths that don't coincide with the frozen one. // checking which nodes do so is too slow, so just kill them all. // this also slightly improves quality, but I don't know why. - memset(nodes+1, 0, (frontier-1)*sizeof(TrellisNode*)); + memset(nodes + 1, 0, (frontier - 1) * sizeof(TrellisNode*)); } } p = &paths[nodes[0]->path]; - for(i=n-1; i>froze; i--) { + for (i = n - 1; i > froze; i--) { dst[i] = p->nibble; p = &paths[p->prev]; } - c->predictor = nodes[0]->sample1; - c->sample1 = nodes[0]->sample1; - c->sample2 = nodes[0]->sample2; + c->predictor = nodes[0]->sample1; + c->sample1 = nodes[0]->sample1; + c->sample2 = nodes[0]->sample2; c->step_index = nodes[0]->step; - c->step = nodes[0]->step; - c->idelta = nodes[0]->step; + c->step = nodes[0]->step; + c->idelta = nodes[0]->step; } static int adpcm_encode_frame(AVCodecContext *avctx, - unsigned char *frame, int buf_size, void *data) + unsigned char *frame, int buf_size, void *data) { int n, i, st; short *samples; @@ -444,98 +478,96 @@ static int adpcm_encode_frame(AVCodecContext *avctx, dst = frame; samples = (short *)data; - st= avctx->channels == 2; -/* n = (BLKSIZE - 4 * avctx->channels) / (2 * 8 * avctx->channels); */ + st = avctx->channels == 2; + /* n = (BLKSIZE - 4 * avctx->channels) / (2 * 8 * avctx->channels); */ switch(avctx->codec->id) { case CODEC_ID_ADPCM_IMA_WAV: n = avctx->frame_size / 8; - c->status[0].prev_sample = (signed short)samples[0]; /* XXX */ -/* c->status[0].step_index = 0; *//* XXX: not sure how to init the state machine */ - bytestream_put_le16(&dst, c->status[0].prev_sample); - *dst++ = (unsigned char)c->status[0].step_index; - *dst++ = 0; /* unknown */ + c->status[0].prev_sample = (signed short)samples[0]; /* XXX */ + /* c->status[0].step_index = 0; + XXX: not sure how to init the state machine */ + bytestream_put_le16(&dst, c->status[0].prev_sample); + *dst++ = (unsigned char)c->status[0].step_index; + *dst++ = 0; /* unknown */ + samples++; + if (avctx->channels == 2) { + c->status[1].prev_sample = (signed short)samples[0]; + /* c->status[1].step_index = 0; */ + bytestream_put_le16(&dst, c->status[1].prev_sample); + *dst++ = (unsigned char)c->status[1].step_index; + *dst++ = 0; samples++; - if (avctx->channels == 2) { - c->status[1].prev_sample = (signed short)samples[0]; -/* c->status[1].step_index = 0; */ - bytestream_put_le16(&dst, c->status[1].prev_sample); - *dst++ = (unsigned char)c->status[1].step_index; - *dst++ = 0; - samples++; - } + } - /* stereo: 4 bytes (8 samples) for left, 4 bytes for right, 4 bytes left, ... */ - if(avctx->trellis > 0) { - FF_ALLOC_OR_GOTO(avctx, buf, 2*n*8, error); - adpcm_compress_trellis(avctx, samples, buf, &c->status[0], n*8); - if(avctx->channels == 2) - adpcm_compress_trellis(avctx, samples+1, buf + n*8, &c->status[1], n*8); - for(i=0; i<n; i++) { - *dst++ = buf[8*i+0] | (buf[8*i+1] << 4); - *dst++ = buf[8*i+2] | (buf[8*i+3] << 4); - *dst++ = buf[8*i+4] | (buf[8*i+5] << 4); - *dst++ = buf[8*i+6] | (buf[8*i+7] << 4); - if (avctx->channels == 2) { - uint8_t *buf1 = buf + n*8; - *dst++ = buf1[8*i+0] | (buf1[8*i+1] << 4); - *dst++ = buf1[8*i+2] | (buf1[8*i+3] << 4); - *dst++ = buf1[8*i+4] | (buf1[8*i+5] << 4); - *dst++ = buf1[8*i+6] | (buf1[8*i+7] << 4); - } + /* stereo: 4 bytes (8 samples) for left, + 4 bytes for right, 4 bytes left, ... */ + if (avctx->trellis > 0) { + FF_ALLOC_OR_GOTO(avctx, buf, 2 * n * 8, error); + adpcm_compress_trellis(avctx, samples, buf, &c->status[0], n * 8); + if (avctx->channels == 2) + adpcm_compress_trellis(avctx, samples + 1, buf + n * 8, + &c->status[1], n * 8); + for (i = 0; i < n; i++) { + *dst++ = buf[8 * i + 0] | (buf[8 * i + 1] << 4); + *dst++ = buf[8 * i + 2] | (buf[8 * i + 3] << 4); + *dst++ = buf[8 * i + 4] | (buf[8 * i + 5] << 4); + *dst++ = buf[8 * i + 6] | (buf[8 * i + 7] << 4); + if (avctx->channels == 2) { + uint8_t *buf1 = buf + n * 8; + *dst++ = buf1[8 * i + 0] | (buf1[8 * i + 1] << 4); + *dst++ = buf1[8 * i + 2] | (buf1[8 * i + 3] << 4); + *dst++ = buf1[8 * i + 4] | (buf1[8 * i + 5] << 4); + *dst++ = buf1[8 * i + 6] | (buf1[8 * i + 7] << 4); } - av_free(buf); - } else - for (; n>0; n--) { - *dst = adpcm_ima_compress_sample(&c->status[0], samples[0]); - *dst |= adpcm_ima_compress_sample(&c->status[0], samples[avctx->channels]) << 4; - dst++; - *dst = adpcm_ima_compress_sample(&c->status[0], samples[avctx->channels * 2]); - *dst |= adpcm_ima_compress_sample(&c->status[0], samples[avctx->channels * 3]) << 4; - dst++; - *dst = adpcm_ima_compress_sample(&c->status[0], samples[avctx->channels * 4]); - *dst |= adpcm_ima_compress_sample(&c->status[0], samples[avctx->channels * 5]) << 4; - dst++; - *dst = adpcm_ima_compress_sample(&c->status[0], samples[avctx->channels * 6]); - *dst |= adpcm_ima_compress_sample(&c->status[0], samples[avctx->channels * 7]) << 4; - dst++; + } + av_free(buf); + } else { + for (; n > 0; n--) { + *dst = adpcm_ima_compress_sample(&c->status[0], samples[0]); + *dst++ |= adpcm_ima_compress_sample(&c->status[0], samples[avctx->channels ]) << 4; + *dst = adpcm_ima_compress_sample(&c->status[0], samples[avctx->channels * 2]); + *dst++ |= adpcm_ima_compress_sample(&c->status[0], samples[avctx->channels * 3]) << 4; + *dst = adpcm_ima_compress_sample(&c->status[0], samples[avctx->channels * 4]); + *dst++ |= adpcm_ima_compress_sample(&c->status[0], samples[avctx->channels * 5]) << 4; + *dst = adpcm_ima_compress_sample(&c->status[0], samples[avctx->channels * 6]); + *dst++ |= adpcm_ima_compress_sample(&c->status[0], samples[avctx->channels * 7]) << 4; /* right channel */ if (avctx->channels == 2) { - *dst = adpcm_ima_compress_sample(&c->status[1], samples[1]); - *dst |= adpcm_ima_compress_sample(&c->status[1], samples[3]) << 4; - dst++; - *dst = adpcm_ima_compress_sample(&c->status[1], samples[5]); - *dst |= adpcm_ima_compress_sample(&c->status[1], samples[7]) << 4; - dst++; - *dst = adpcm_ima_compress_sample(&c->status[1], samples[9]); - *dst |= adpcm_ima_compress_sample(&c->status[1], samples[11]) << 4; - dst++; - *dst = adpcm_ima_compress_sample(&c->status[1], samples[13]); - *dst |= adpcm_ima_compress_sample(&c->status[1], samples[15]) << 4; - dst++; + *dst = adpcm_ima_compress_sample(&c->status[1], samples[1 ]); + *dst++ |= adpcm_ima_compress_sample(&c->status[1], samples[3 ]) << 4; + *dst = adpcm_ima_compress_sample(&c->status[1], samples[5 ]); + *dst++ |= adpcm_ima_compress_sample(&c->status[1], samples[7 ]) << 4; + *dst = adpcm_ima_compress_sample(&c->status[1], samples[9 ]); + *dst++ |= adpcm_ima_compress_sample(&c->status[1], samples[11]) << 4; + *dst = adpcm_ima_compress_sample(&c->status[1], samples[13]); + *dst++ |= adpcm_ima_compress_sample(&c->status[1], samples[15]) << 4; } samples += 8 * avctx->channels; } + } break; case CODEC_ID_ADPCM_IMA_QT: { int ch, i; PutBitContext pb; - init_put_bits(&pb, dst, buf_size*8); + init_put_bits(&pb, dst, buf_size * 8); - for(ch=0; ch<avctx->channels; ch++){ + for (ch = 0; ch < avctx->channels; ch++) { put_bits(&pb, 9, (c->status[ch].prev_sample + 0x10000) >> 7); - put_bits(&pb, 7, c->status[ch].step_index); - if(avctx->trellis > 0) { + put_bits(&pb, 7, c->status[ch].step_index); + if (avctx->trellis > 0) { uint8_t buf[64]; adpcm_compress_trellis(avctx, samples+ch, buf, &c->status[ch], 64); - for(i=0; i<64; i++) - put_bits(&pb, 4, buf[i^1]); + for (i = 0; i < 64; i++) + put_bits(&pb, 4, buf[i ^ 1]); } else { - for (i=0; i<64; i+=2){ + for (i = 0; i < 64; i += 2) { int t1, t2; - t1 = adpcm_ima_qt_compress_sample(&c->status[ch], samples[avctx->channels*(i+0)+ch]); - t2 = adpcm_ima_qt_compress_sample(&c->status[ch], samples[avctx->channels*(i+1)+ch]); + t1 = adpcm_ima_qt_compress_sample(&c->status[ch], + samples[avctx->channels * (i + 0) + ch]); + t2 = adpcm_ima_qt_compress_sample(&c->status[ch], + samples[avctx->channels * (i + 1) + ch]); put_bits(&pb, 4, t2); put_bits(&pb, 4, t1); } @@ -543,119 +575,120 @@ static int adpcm_encode_frame(AVCodecContext *avctx, } flush_put_bits(&pb); - dst += put_bits_count(&pb)>>3; + dst += put_bits_count(&pb) >> 3; break; } case CODEC_ID_ADPCM_SWF: { int i; PutBitContext pb; - init_put_bits(&pb, dst, buf_size*8); + init_put_bits(&pb, dst, buf_size * 8); - n = avctx->frame_size-1; + n = avctx->frame_size - 1; - //Store AdpcmCodeSize - put_bits(&pb, 2, 2); //Set 4bits flash adpcm format + // store AdpcmCodeSize + put_bits(&pb, 2, 2); // set 4-bit flash adpcm format - //Init the encoder state - for(i=0; i<avctx->channels; i++){ - c->status[i].step_index = av_clip(c->status[i].step_index, 0, 63); // clip step so it fits 6 bits + // init the encoder state + for (i = 0; i < avctx->channels; i++) { + // clip step so it fits 6 bits + c->status[i].step_index = av_clip(c->status[i].step_index, 0, 63); put_sbits(&pb, 16, samples[i]); put_bits(&pb, 6, c->status[i].step_index); c->status[i].prev_sample = (signed short)samples[i]; } - if(avctx->trellis > 0) { - FF_ALLOC_OR_GOTO(avctx, buf, 2*n, error); - adpcm_compress_trellis(avctx, samples+2, buf, &c->status[0], n); + if (avctx->trellis > 0) { + FF_ALLOC_OR_GOTO(avctx, buf, 2 * n, error); + adpcm_compress_trellis(avctx, samples + 2, buf, &c->status[0], n); if (avctx->channels == 2) - adpcm_compress_trellis(avctx, samples+3, buf+n, &c->status[1], n); - for(i=0; i<n; i++) { + adpcm_compress_trellis(avctx, samples + 3, buf + n, + &c->status[1], n); + for (i = 0; i < n; i++) { put_bits(&pb, 4, buf[i]); if (avctx->channels == 2) - put_bits(&pb, 4, buf[n+i]); + put_bits(&pb, 4, buf[n + i]); } av_free(buf); } else { - for (i=1; i<avctx->frame_size; i++) { - put_bits(&pb, 4, adpcm_ima_compress_sample(&c->status[0], samples[avctx->channels*i])); + for (i = 1; i < avctx->frame_size; i++) { + put_bits(&pb, 4, adpcm_ima_compress_sample(&c->status[0], + samples[avctx->channels * i])); if (avctx->channels == 2) - put_bits(&pb, 4, adpcm_ima_compress_sample(&c->status[1], samples[2*i+1])); + put_bits(&pb, 4, adpcm_ima_compress_sample(&c->status[1], + samples[2 * i + 1])); } } flush_put_bits(&pb); - dst += put_bits_count(&pb)>>3; + dst += put_bits_count(&pb) >> 3; break; } case CODEC_ID_ADPCM_MS: - for(i=0; i<avctx->channels; i++){ - int predictor=0; - + for (i = 0; i < avctx->channels; i++) { + int predictor = 0; *dst++ = predictor; c->status[i].coeff1 = ff_adpcm_AdaptCoeff1[predictor]; c->status[i].coeff2 = ff_adpcm_AdaptCoeff2[predictor]; } - for(i=0; i<avctx->channels; i++){ + for (i = 0; i < avctx->channels; i++) { if (c->status[i].idelta < 16) c->status[i].idelta = 16; - bytestream_put_le16(&dst, c->status[i].idelta); } - for(i=0; i<avctx->channels; i++){ + for (i = 0; i < avctx->channels; i++) c->status[i].sample2= *samples++; - } - for(i=0; i<avctx->channels; i++){ - c->status[i].sample1= *samples++; - + for (i = 0; i < avctx->channels; i++) { + c->status[i].sample1 = *samples++; bytestream_put_le16(&dst, c->status[i].sample1); } - for(i=0; i<avctx->channels; i++) + for (i = 0; i < avctx->channels; i++) bytestream_put_le16(&dst, c->status[i].sample2); - if(avctx->trellis > 0) { - int n = avctx->block_align - 7*avctx->channels; - FF_ALLOC_OR_GOTO(avctx, buf, 2*n, error); - if(avctx->channels == 1) { + if (avctx->trellis > 0) { + int n = avctx->block_align - 7 * avctx->channels; + FF_ALLOC_OR_GOTO(avctx, buf, 2 * n, error); + if (avctx->channels == 1) { adpcm_compress_trellis(avctx, samples, buf, &c->status[0], n); - for(i=0; i<n; i+=2) - *dst++ = (buf[i] << 4) | buf[i+1]; + for (i = 0; i < n; i += 2) + *dst++ = (buf[i] << 4) | buf[i + 1]; } else { - adpcm_compress_trellis(avctx, samples, buf, &c->status[0], n); - adpcm_compress_trellis(avctx, samples+1, buf+n, &c->status[1], n); - for(i=0; i<n; i++) - *dst++ = (buf[i] << 4) | buf[n+i]; + adpcm_compress_trellis(avctx, samples, buf, &c->status[0], n); + adpcm_compress_trellis(avctx, samples + 1, buf + n, &c->status[1], n); + for (i = 0; i < n; i++) + *dst++ = (buf[i] << 4) | buf[n + i]; } av_free(buf); - } else - for(i=7*avctx->channels; i<avctx->block_align; i++) { - int nibble; - nibble = adpcm_ms_compress_sample(&c->status[ 0], *samples++)<<4; - nibble|= adpcm_ms_compress_sample(&c->status[st], *samples++); - *dst++ = nibble; + } else { + for (i = 7 * avctx->channels; i < avctx->block_align; i++) { + int nibble; + nibble = adpcm_ms_compress_sample(&c->status[ 0], *samples++) << 4; + nibble |= adpcm_ms_compress_sample(&c->status[st], *samples++); + *dst++ = nibble; + } } break; case CODEC_ID_ADPCM_YAMAHA: n = avctx->frame_size / 2; - if(avctx->trellis > 0) { - FF_ALLOC_OR_GOTO(avctx, buf, 2*n*2, error); + if (avctx->trellis > 0) { + FF_ALLOC_OR_GOTO(avctx, buf, 2 * n * 2, error); n *= 2; - if(avctx->channels == 1) { + if (avctx->channels == 1) { adpcm_compress_trellis(avctx, samples, buf, &c->status[0], n); - for(i=0; i<n; i+=2) - *dst++ = buf[i] | (buf[i+1] << 4); + for (i = 0; i < n; i += 2) + *dst++ = buf[i] | (buf[i + 1] << 4); } else { - adpcm_compress_trellis(avctx, samples, buf, &c->status[0], n); - adpcm_compress_trellis(avctx, samples+1, buf+n, &c->status[1], n); - for(i=0; i<n; i++) - *dst++ = buf[i] | (buf[n+i] << 4); + adpcm_compress_trellis(avctx, samples, buf, &c->status[0], n); + adpcm_compress_trellis(avctx, samples + 1, buf + n, &c->status[1], n); + for (i = 0; i < n; i++) + *dst++ = buf[i] | (buf[n + i] << 4); } av_free(buf); } else - for (n *= avctx->channels; n>0; n--) { + for (n *= avctx->channels; n > 0; n--) { int nibble; nibble = adpcm_yamaha_compress_sample(&c->status[ 0], *samples++); nibble |= adpcm_yamaha_compress_sample(&c->status[st], *samples++) << 4; - *dst++ = nibble; + *dst++ = nibble; } break; default: @@ -675,12 +708,13 @@ AVCodec ff_ ## name_ ## _encoder = { \ .init = adpcm_encode_init, \ .encode = adpcm_encode_frame, \ .close = adpcm_encode_close, \ - .sample_fmts = (const enum AVSampleFormat[]){AV_SAMPLE_FMT_S16,AV_SAMPLE_FMT_NONE}, \ + .sample_fmts = (const enum AVSampleFormat[]) { AV_SAMPLE_FMT_S16, \ + AV_SAMPLE_FMT_NONE}, \ .long_name = NULL_IF_CONFIG_SMALL(long_name_), \ } -ADPCM_ENCODER(CODEC_ID_ADPCM_IMA_QT, adpcm_ima_qt, "ADPCM IMA QuickTime"); +ADPCM_ENCODER(CODEC_ID_ADPCM_IMA_QT, adpcm_ima_qt, "ADPCM IMA QuickTime"); ADPCM_ENCODER(CODEC_ID_ADPCM_IMA_WAV, adpcm_ima_wav, "ADPCM IMA WAV"); -ADPCM_ENCODER(CODEC_ID_ADPCM_MS, adpcm_ms, "ADPCM Microsoft"); -ADPCM_ENCODER(CODEC_ID_ADPCM_SWF, adpcm_swf, "ADPCM Shockwave Flash"); -ADPCM_ENCODER(CODEC_ID_ADPCM_YAMAHA, adpcm_yamaha, "ADPCM Yamaha"); +ADPCM_ENCODER(CODEC_ID_ADPCM_MS, adpcm_ms, "ADPCM Microsoft"); +ADPCM_ENCODER(CODEC_ID_ADPCM_SWF, adpcm_swf, "ADPCM Shockwave Flash"); +ADPCM_ENCODER(CODEC_ID_ADPCM_YAMAHA, adpcm_yamaha, "ADPCM Yamaha"); diff --git a/libavcodec/bmp.c b/libavcodec/bmp.c index 81ecf6e50b..5971145401 100644 --- a/libavcodec/bmp.c +++ b/libavcodec/bmp.c @@ -263,9 +263,9 @@ static int bmp_decode_frame(AVCodecContext *avctx, }else{ switch(depth){ case 1: - for(i = 0; i < avctx->height; i++){ + for (i = 0; i < avctx->height; i++) { int j; - for(j = 0; j < n; j++){ + for (j = 0; j < n; j++) { ptr[j*8+0] = buf[j] >> 7; ptr[j*8+1] = (buf[j] >> 6) & 1; ptr[j*8+2] = (buf[j] >> 5) & 1; diff --git a/libavcodec/mpegvideo.c b/libavcodec/mpegvideo.c index 1d3bb0cdf2..d72e6d23de 100644 --- a/libavcodec/mpegvideo.c +++ b/libavcodec/mpegvideo.c @@ -66,44 +66,61 @@ static void dct_unquantize_h263_inter_c(MpegEncContext *s, //#define DEBUG -static const uint8_t ff_default_chroma_qscale_table[32]={ -// 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 - 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31 +static const uint8_t ff_default_chroma_qscale_table[32] = { +// 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, + 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31 }; -const uint8_t ff_mpeg1_dc_scale_table[128]={ -// 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 - 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, - 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, - 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, - 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, +const uint8_t ff_mpeg1_dc_scale_table[128] = { +// 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 + 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, + 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, + 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, + 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, + 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, + 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, + 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, + 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, }; -static const uint8_t mpeg2_dc_scale_table1[128]={ -// 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 - 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, - 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, - 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, - 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, +static const uint8_t mpeg2_dc_scale_table1[128] = { +// 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 + 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, }; -static const uint8_t mpeg2_dc_scale_table2[128]={ -// 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 - 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, - 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, - 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, - 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, +static const uint8_t mpeg2_dc_scale_table2[128] = { +// 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, }; -static const uint8_t mpeg2_dc_scale_table3[128]={ -// 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 - 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, +static const uint8_t mpeg2_dc_scale_table3[128] = { +// 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, }; -const uint8_t * const ff_mpeg2_dc_scale_table[4]={ +const uint8_t *const ff_mpeg2_dc_scale_table[4] = { ff_mpeg1_dc_scale_table, mpeg2_dc_scale_table1, mpeg2_dc_scale_table2, @@ -123,34 +140,37 @@ const enum PixelFormat ff_hwaccel_pixfmt_list_420[] = { PIX_FMT_NONE }; -const uint8_t *avpriv_mpv_find_start_code(const uint8_t * restrict p, const uint8_t *end, uint32_t * restrict state){ +const uint8_t *avpriv_mpv_find_start_code(const uint8_t *restrict p, + const uint8_t *end, + uint32_t * restrict state) +{ int i; - assert(p<=end); - if(p>=end) + assert(p <= end); + if (p >= end) return end; - for(i=0; i<3; i++){ - uint32_t tmp= *state << 8; - *state= tmp + *(p++); - if(tmp == 0x100 || p==end) + for (i = 0; i < 3; i++) { + uint32_t tmp = *state << 8; + *state = tmp + *(p++); + if (tmp == 0x100 || p == end) return p; } - while(p<end){ - if (p[-1] > 1 ) p+= 3; - else if(p[-2] ) p+= 2; - else if(p[-3]|(p[-1]-1)) p++; - else{ + while (p < end) { + if (p[-1] > 1 ) p += 3; + else if (p[-2] ) p += 2; + else if (p[-3]|(p[-1]-1)) p++; + else { p++; break; } } - p= FFMIN(p, end)-4; - *state= AV_RB32(p); + p = FFMIN(p, end) - 4; + *state = AV_RB32(p); - return p+4; + return p + 4; } /* init common dct for both encoder and decoder */ @@ -163,11 +183,11 @@ av_cold int ff_dct_common_init(MpegEncContext *s) s->dct_unquantize_mpeg1_intra = dct_unquantize_mpeg1_intra_c; s->dct_unquantize_mpeg1_inter = dct_unquantize_mpeg1_inter_c; s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_c; - if(s->flags & CODEC_FLAG_BITEXACT) + if (s->flags & CODEC_FLAG_BITEXACT) s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_bitexact; s->dct_unquantize_mpeg2_inter = dct_unquantize_mpeg2_inter_c; -#if HAVE_MMX +#if HAVE_MMX MPV_common_init_mmx(s); #elif ARCH_ALPHA MPV_common_init_axp(s); @@ -184,12 +204,12 @@ av_cold int ff_dct_common_init(MpegEncContext *s) #endif /* load & permutate scantables - note: only wmv uses different ones - */ - if(s->alternate_scan){ + * note: only wmv uses different ones + */ + if (s->alternate_scan) { ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable , ff_alternate_vertical_scan); ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable , ff_alternate_vertical_scan); - }else{ + } else { ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable , ff_zigzag_direct); ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable , ff_zigzag_direct); } @@ -199,9 +219,10 @@ av_cold int ff_dct_common_init(MpegEncContext *s) return 0; } -void ff_copy_picture(Picture *dst, Picture *src){ +void ff_copy_picture(Picture *dst, Picture *src) +{ *dst = *src; - dst->f.type= FF_BUFFER_TYPE_COPY; + dst->f.type = FF_BUFFER_TYPE_COPY; } /** @@ -210,11 +231,12 @@ void ff_copy_picture(Picture *dst, Picture *src){ static void free_frame_buffer(MpegEncContext *s, Picture *pic) { /* Windows Media Image codecs allocate internal buffers with different - dimensions; ignore user defined callbacks for these */ + * dimensions; ignore user defined callbacks for these + */ if (s->codec_id != CODEC_ID_WMV3IMAGE && s->codec_id != CODEC_ID_VC1IMAGE) - ff_thread_release_buffer(s->avctx, (AVFrame*)pic); + ff_thread_release_buffer(s->avctx, (AVFrame *) pic); else - avcodec_default_release_buffer(s->avctx, (AVFrame*)pic); + avcodec_default_release_buffer(s->avctx, (AVFrame *) pic); av_freep(&pic->f.hwaccel_picture_private); } @@ -237,9 +259,9 @@ static int alloc_frame_buffer(MpegEncContext *s, Picture *pic) } if (s->codec_id != CODEC_ID_WMV3IMAGE && s->codec_id != CODEC_ID_VC1IMAGE) - r = ff_thread_get_buffer(s->avctx, (AVFrame*)pic); + r = ff_thread_get_buffer(s->avctx, (AVFrame *) pic); else - r = avcodec_default_get_buffer(s->avctx, (AVFrame*)pic); + r = avcodec_default_get_buffer(s->avctx, (AVFrame *) pic); if (r < 0 || !pic->f.age || !pic->f.type || !pic->f.data[0]) { av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (%d %d %d %p)\n", @@ -248,14 +270,17 @@ static int alloc_frame_buffer(MpegEncContext *s, Picture *pic) return -1; } - if (s->linesize && (s->linesize != pic->f.linesize[0] || s->uvlinesize != pic->f.linesize[1])) { - av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (stride changed)\n"); + if (s->linesize && (s->linesize != pic->f.linesize[0] || + s->uvlinesize != pic->f.linesize[1])) { + av_log(s->avctx, AV_LOG_ERROR, + "get_buffer() failed (stride changed)\n"); free_frame_buffer(s, pic); return -1; } if (pic->f.linesize[1] != pic->f.linesize[2]) { - av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (uv stride mismatch)\n"); + av_log(s->avctx, AV_LOG_ERROR, + "get_buffer() failed (uv stride mismatch)\n"); free_frame_buffer(s, pic); return -1; } @@ -265,21 +290,25 @@ static int alloc_frame_buffer(MpegEncContext *s, Picture *pic) /** * allocates a Picture - * The pixels are allocated/set by calling get_buffer() if shared=0 + * The pixels are allocated/set by calling get_buffer() if shared = 0 */ -int ff_alloc_picture(MpegEncContext *s, Picture *pic, int shared){ - const int big_mb_num= s->mb_stride*(s->mb_height+1) + 1; //the +1 is needed so memset(,,stride*height) does not sig11 - const int mb_array_size= s->mb_stride*s->mb_height; - const int b8_array_size= s->b8_stride*s->mb_height*2; - const int b4_array_size= s->b4_stride*s->mb_height*4; +int ff_alloc_picture(MpegEncContext *s, Picture *pic, int shared) +{ + const int big_mb_num = s->mb_stride * (s->mb_height + 1) + 1; + + // the + 1 is needed so memset(,,stride*height) does not sig11 + + const int mb_array_size = s->mb_stride * s->mb_height; + const int b8_array_size = s->b8_stride * s->mb_height * 2; + const int b4_array_size = s->b4_stride * s->mb_height * 4; int i; - int r= -1; + int r = -1; - if(shared){ + if (shared) { assert(pic->f.data[0]); assert(pic->f.type == 0 || pic->f.type == FF_BUFFER_TYPE_SHARED); pic->f.type = FF_BUFFER_TYPE_SHARED; - }else{ + } else { assert(!pic->f.data[0]); if (alloc_frame_buffer(s, pic) < 0) @@ -291,49 +320,69 @@ int ff_alloc_picture(MpegEncContext *s, Picture *pic, int shared){ if (pic->f.qscale_table == NULL) { if (s->encoding) { - FF_ALLOCZ_OR_GOTO(s->avctx, pic->mb_var , mb_array_size * sizeof(int16_t) , fail) - FF_ALLOCZ_OR_GOTO(s->avctx, pic->mc_mb_var, mb_array_size * sizeof(int16_t) , fail) - FF_ALLOCZ_OR_GOTO(s->avctx, pic->mb_mean , mb_array_size * sizeof(int8_t ) , fail) + FF_ALLOCZ_OR_GOTO(s->avctx, pic->mb_var, + mb_array_size * sizeof(int16_t), fail) + FF_ALLOCZ_OR_GOTO(s->avctx, pic->mc_mb_var, + mb_array_size * sizeof(int16_t), fail) + FF_ALLOCZ_OR_GOTO(s->avctx, pic->mb_mean, + mb_array_size * sizeof(int8_t ), fail) } - FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.mbskip_table, mb_array_size * sizeof(uint8_t) + 2, fail) //the +2 is for the slice end check - FF_ALLOCZ_OR_GOTO(s->avctx, pic->qscale_table_base , (big_mb_num + s->mb_stride) * sizeof(uint8_t) , fail) - FF_ALLOCZ_OR_GOTO(s->avctx, pic->mb_type_base , (big_mb_num + s->mb_stride) * sizeof(uint32_t), fail) - pic->f.mb_type = pic->mb_type_base + 2*s->mb_stride + 1; - pic->f.qscale_table = pic->qscale_table_base + 2*s->mb_stride + 1; - if(s->out_format == FMT_H264){ - for(i=0; i<2; i++){ - FF_ALLOCZ_OR_GOTO(s->avctx, pic->motion_val_base[i], 2 * (b4_array_size+4) * sizeof(int16_t), fail) + FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.mbskip_table, + mb_array_size * sizeof(uint8_t) + 2, fail)// the + 2 is for the slice end check + FF_ALLOCZ_OR_GOTO(s->avctx, pic->qscale_table_base, + (big_mb_num + s->mb_stride) * sizeof(uint8_t), + fail) + FF_ALLOCZ_OR_GOTO(s->avctx, pic->mb_type_base, + (big_mb_num + s->mb_stride) * sizeof(uint32_t), + fail) + pic->f.mb_type = pic->mb_type_base + 2 * s->mb_stride + 1; + pic->f.qscale_table = pic->qscale_table_base + 2 * s->mb_stride + 1; + if (s->out_format == FMT_H264) { + for (i = 0; i < 2; i++) { + FF_ALLOCZ_OR_GOTO(s->avctx, pic->motion_val_base[i], + 2 * (b4_array_size + 4) * sizeof(int16_t), + fail) pic->f.motion_val[i] = pic->motion_val_base[i] + 4; - FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.ref_index[i], 4*mb_array_size * sizeof(uint8_t), fail) + FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.ref_index[i], + 4 * mb_array_size * sizeof(uint8_t), fail) } pic->f.motion_subsample_log2 = 2; - }else if(s->out_format == FMT_H263 || s->encoding || (s->avctx->debug&FF_DEBUG_MV) || (s->avctx->debug_mv)){ - for(i=0; i<2; i++){ - FF_ALLOCZ_OR_GOTO(s->avctx, pic->motion_val_base[i], 2 * (b8_array_size+4) * sizeof(int16_t), fail) + } else if (s->out_format == FMT_H263 || s->encoding || + (s->avctx->debug & FF_DEBUG_MV) || s->avctx->debug_mv) { + for (i = 0; i < 2; i++) { + FF_ALLOCZ_OR_GOTO(s->avctx, pic->motion_val_base[i], + 2 * (b8_array_size + 4) * sizeof(int16_t), + fail) pic->f.motion_val[i] = pic->motion_val_base[i] + 4; - FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.ref_index[i], 4*mb_array_size * sizeof(uint8_t), fail) + FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.ref_index[i], + 4 * mb_array_size * sizeof(uint8_t), fail) } pic->f.motion_subsample_log2 = 3; } - if(s->avctx->debug&FF_DEBUG_DCT_COEFF) { - FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.dct_coeff, 64 * mb_array_size * sizeof(DCTELEM) * 6, fail) + if (s->avctx->debug&FF_DEBUG_DCT_COEFF) { + FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.dct_coeff, + 64 * mb_array_size * sizeof(DCTELEM) * 6, fail) } pic->f.qstride = s->mb_stride; - FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.pan_scan , 1 * sizeof(AVPanScan), fail) + FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.pan_scan, + 1 * sizeof(AVPanScan), fail) } /* It might be nicer if the application would keep track of these * but it would require an API change. */ - memmove(s->prev_pict_types+1, s->prev_pict_types, PREV_PICT_TYPES_BUFFER_SIZE-1); - s->prev_pict_types[0]= s->dropable ? AV_PICTURE_TYPE_B : s->pict_type; - if (pic->f.age < PREV_PICT_TYPES_BUFFER_SIZE && s->prev_pict_types[pic->f.age] == AV_PICTURE_TYPE_B) - pic->f.age = INT_MAX; // Skipped MBs in B-frames are quite rare in MPEG-1/2 and it is a bit tricky to skip them anyway. + memmove(s->prev_pict_types + 1, s->prev_pict_types, + PREV_PICT_TYPES_BUFFER_SIZE-1); + s->prev_pict_types[0] = s->dropable ? AV_PICTURE_TYPE_B : s->pict_type; + if (pic->f.age < PREV_PICT_TYPES_BUFFER_SIZE && + s->prev_pict_types[pic->f.age] == AV_PICTURE_TYPE_B) + pic->f.age = INT_MAX; // Skipped MBs in B-frames are quite rare in MPEG-1/2 + // and it is a bit tricky to skip them anyway. pic->owner2 = s; return 0; -fail: //for the FF_ALLOCZ_OR_GOTO macro - if(r>=0) +fail: // for the FF_ALLOCZ_OR_GOTO macro + if (r >= 0) free_frame_buffer(s, pic); return -1; } @@ -341,7 +390,8 @@ fail: //for the FF_ALLOCZ_OR_GOTO macro /** * deallocates a picture */ -static void free_picture(MpegEncContext *s, Picture *pic){ +static void free_picture(MpegEncContext *s, Picture *pic) +{ int i; if (pic->f.data[0] && pic->f.type != FF_BUFFER_TYPE_SHARED) { @@ -357,13 +407,13 @@ static void free_picture(MpegEncContext *s, Picture *pic){ av_freep(&pic->f.dct_coeff); av_freep(&pic->f.pan_scan); pic->f.mb_type = NULL; - for(i=0; i<2; i++){ + for (i = 0; i < 2; i++) { av_freep(&pic->motion_val_base[i]); av_freep(&pic->f.ref_index[i]); } if (pic->f.type == FF_BUFFER_TYPE_SHARED) { - for(i=0; i<4; i++){ + for (i = 0; i < 4; i++) { pic->f.base[i] = pic->f.data[i] = NULL; } @@ -371,38 +421,47 @@ static void free_picture(MpegEncContext *s, Picture *pic){ } } -static int init_duplicate_context(MpegEncContext *s, MpegEncContext *base){ +static int init_duplicate_context(MpegEncContext *s, MpegEncContext *base) +{ int y_size = s->b8_stride * (2 * s->mb_height + 1); int c_size = s->mb_stride * (s->mb_height + 1); int yc_size = y_size + 2 * c_size; int i; - // edge emu needs blocksize + filter length - 1 (=17x17 for halfpel / 21x21 for h264) - FF_ALLOCZ_OR_GOTO(s->avctx, s->edge_emu_buffer, (s->width+64)*2*21*2, fail); //(width + edge + align)*interlaced*MBsize*tolerance - - //FIXME should be linesize instead of s->width*2 but that is not known before get_buffer() - FF_ALLOCZ_OR_GOTO(s->avctx, s->me.scratchpad, (s->width+64)*4*16*2*sizeof(uint8_t), fail) - s->me.temp= s->me.scratchpad; - s->rd_scratchpad= s->me.scratchpad; - s->b_scratchpad= s->me.scratchpad; - s->obmc_scratchpad= s->me.scratchpad + 16; + // edge emu needs blocksize + filter length - 1 + // (= 17x17 for halfpel / 21x21 for h264) + FF_ALLOCZ_OR_GOTO(s->avctx, s->edge_emu_buffer, + (s->width + 64) * 2 * 21 * 2, fail); // (width + edge + align)*interlaced*MBsize*tolerance + + // FIXME should be linesize instead of s->width * 2 + // but that is not known before get_buffer() + FF_ALLOCZ_OR_GOTO(s->avctx, s->me.scratchpad, + (s->width + 64) * 4 * 16 * 2 * sizeof(uint8_t), fail) + s->me.temp = s->me.scratchpad; + s->rd_scratchpad = s->me.scratchpad; + s->b_scratchpad = s->me.scratchpad; + s->obmc_scratchpad = s->me.scratchpad + 16; if (s->encoding) { - FF_ALLOCZ_OR_GOTO(s->avctx, s->me.map , ME_MAP_SIZE*sizeof(uint32_t), fail) - FF_ALLOCZ_OR_GOTO(s->avctx, s->me.score_map, ME_MAP_SIZE*sizeof(uint32_t), fail) - if(s->avctx->noise_reduction){ - FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_error_sum, 2 * 64 * sizeof(int), fail) + FF_ALLOCZ_OR_GOTO(s->avctx, s->me.map, + ME_MAP_SIZE * sizeof(uint32_t), fail) + FF_ALLOCZ_OR_GOTO(s->avctx, s->me.score_map, + ME_MAP_SIZE * sizeof(uint32_t), fail) + if (s->avctx->noise_reduction) { + FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_error_sum, + 2 * 64 * sizeof(int), fail) } } - FF_ALLOCZ_OR_GOTO(s->avctx, s->blocks, 64*12*2 * sizeof(DCTELEM), fail) - s->block= s->blocks[0]; + FF_ALLOCZ_OR_GOTO(s->avctx, s->blocks, 64 * 12 * 2 * sizeof(DCTELEM), fail) + s->block = s->blocks[0]; - for(i=0;i<12;i++){ + for (i = 0; i < 12; i++) { s->pblocks[i] = &s->block[i]; } if (s->out_format == FMT_H263) { /* ac values */ - FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_val_base, yc_size * sizeof(int16_t) * 16, fail); + FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_val_base, + yc_size * sizeof(int16_t) * 16, fail); s->ac_val[0] = s->ac_val_base + s->b8_stride + 1; s->ac_val[1] = s->ac_val_base + y_size + s->mb_stride + 1; s->ac_val[2] = s->ac_val[1] + c_size; @@ -410,29 +469,32 @@ static int init_duplicate_context(MpegEncContext *s, MpegEncContext *base){ return 0; fail: - return -1; //free() through MPV_common_end() + return -1; // free() through MPV_common_end() } -static void free_duplicate_context(MpegEncContext *s){ - if(s==NULL) return; +static void free_duplicate_context(MpegEncContext *s) +{ + if (s == NULL) + return; av_freep(&s->edge_emu_buffer); av_freep(&s->me.scratchpad); - s->me.temp= - s->rd_scratchpad= - s->b_scratchpad= - s->obmc_scratchpad= NULL; + s->me.temp = + s->rd_scratchpad = + s->b_scratchpad = + s->obmc_scratchpad = NULL; av_freep(&s->dct_error_sum); av_freep(&s->me.map); av_freep(&s->me.score_map); av_freep(&s->blocks); av_freep(&s->ac_val_base); - s->block= NULL; + s->block = NULL; } -static void backup_duplicate_context(MpegEncContext *bak, MpegEncContext *src){ -#define COPY(a) bak->a= src->a +static void backup_duplicate_context(MpegEncContext *bak, MpegEncContext *src) +{ +#define COPY(a) bak->a = src->a COPY(edge_emu_buffer); COPY(me.scratchpad); COPY(me.temp); @@ -457,28 +519,33 @@ static void backup_duplicate_context(MpegEncContext *bak, MpegEncContext *src){ #undef COPY } -void ff_update_duplicate_context(MpegEncContext *dst, MpegEncContext *src){ +void ff_update_duplicate_context(MpegEncContext *dst, MpegEncContext *src) +{ MpegEncContext bak; int i; - //FIXME copy only needed parts -//START_TIMER + // FIXME copy only needed parts + // START_TIMER backup_duplicate_context(&bak, dst); memcpy(dst, src, sizeof(MpegEncContext)); backup_duplicate_context(dst, &bak); - for(i=0;i<12;i++){ + for (i = 0; i < 12; i++) { dst->pblocks[i] = &dst->block[i]; } -//STOP_TIMER("update_duplicate_context") //about 10k cycles / 0.01 sec for 1000frames on 1ghz with 2 threads + // STOP_TIMER("update_duplicate_context") + // about 10k cycles / 0.01 sec for 1000frames on 1ghz with 2 threads } -int ff_mpeg_update_thread_context(AVCodecContext *dst, const AVCodecContext *src) +int ff_mpeg_update_thread_context(AVCodecContext *dst, + const AVCodecContext *src) { MpegEncContext *s = dst->priv_data, *s1 = src->priv_data; - if(dst == src || !s1->context_initialized) return 0; + if (dst == src || !s1->context_initialized) + return 0; - //FIXME can parameters change on I-frames? in that case dst may need a reinit - if(!s->context_initialized){ + // FIXME can parameters change on I-frames? + // in that case dst may need a reinit + if (!s->context_initialized) { memcpy(s, s1, sizeof(MpegEncContext)); s->avctx = dst; diff --git a/libavcodec/shorten.c b/libavcodec/shorten.c index 26ce6fe885..f0a173cc7e 100644 --- a/libavcodec/shorten.c +++ b/libavcodec/shorten.c @@ -331,7 +331,6 @@ static int read_header(ShortenContext *s) s->lpcqoffset = 0; s->blocksize = DEFAULT_BLOCK_SIZE; - s->channels = 1; s->nmean = -1; s->version = get_bits(&s->gb, 8); s->internal_ftype = get_uint(s, TYPESIZE); diff --git a/libavcodec/wavpack.c b/libavcodec/wavpack.c index 1ccaec665d..1a8c25943f 100644 --- a/libavcodec/wavpack.c +++ b/libavcodec/wavpack.c @@ -110,7 +110,7 @@ typedef struct WavpackFrameContext { int extra_bits; int and, or, shift; int post_shift; - int hybrid, hybrid_bitrate; + int hybrid, hybrid_bitrate, hybrid_maxclip; int float_flag; int float_shift; int float_max_exp; @@ -403,8 +403,14 @@ static inline int wv_get_value_integer(WavpackFrameContext *s, uint32_t *crc, in *crc = *crc * 9 + (S&0xffff) * 3 + ((unsigned)S>>16); } } + bit = (S & s->and) | s->or; - return (((S + bit) << s->shift) - bit) << s->post_shift; + bit = (((S + bit) << s->shift) - bit); + + if(s->hybrid) + bit = av_clip(bit, -s->hybrid_maxclip, s->hybrid_maxclip - 1); + + return bit << s->post_shift; } static float wv_get_value_float(WavpackFrameContext *s, uint32_t *crc, int S) @@ -792,6 +798,7 @@ static int wavpack_decode_block(AVCodecContext *avctx, int block_no, s->joint = s->frame_flags & WV_JOINT_STEREO; s->hybrid = s->frame_flags & WV_HYBRID_MODE; s->hybrid_bitrate = s->frame_flags & WV_HYBRID_BITRATE; + s->hybrid_maxclip = 1 << ((((s->frame_flags & 0x03) + 1) << 3) - 1); s->post_shift = 8 * (bpp-1-(s->frame_flags&0x03)) + ((s->frame_flags >> 13) & 0x1f); s->CRC = AV_RL32(buf); buf += 4; if(wc->mkv_mode) diff --git a/libavcodec/xan.c b/libavcodec/xan.c index edd4fe8197..c469594e34 100644 --- a/libavcodec/xan.c +++ b/libavcodec/xan.c @@ -113,13 +113,13 @@ static int xan_huffman_decode(unsigned char *dest, int dest_len, init_get_bits(&gb, ptr, ptr_len * 8); - while ( val != 0x16 ) { + while (val != 0x16) { unsigned idx = val - 0x17 + get_bits1(&gb) * byte; if (idx >= 2 * byte) return -1; val = src[idx]; - if ( val < 0x16 ) { + if (val < 0x16) { if (dest >= dest_end) return 0; *dest++ = val; @@ -149,27 +149,23 @@ static void xan_unpack(unsigned char *dest, int dest_len, if (opcode < 0xe0) { int size2, back; - if ( (opcode & 0x80) == 0 ) { - + if ((opcode & 0x80) == 0) { size = opcode & 3; back = ((opcode & 0x60) << 3) + *src++ + 1; size2 = ((opcode & 0x1c) >> 2) + 3; - - } else if ( (opcode & 0x40) == 0 ) { - + } else if ((opcode & 0x40) == 0) { size = *src >> 6; back = (bytestream_get_be16(&src) & 0x3fff) + 1; size2 = (opcode & 0x3f) + 4; - } else { - size = opcode & 3; back = ((opcode & 0x10) << 12) + bytestream_get_be16(&src) + 1; size2 = ((opcode & 0x0c) << 6) + *src++ + 5; } + if (dest_end - dest < size + size2 || dest + size - dest_org < back || src_end - src < size) @@ -205,7 +201,7 @@ static inline void xan_wc3_output_pixel_run(XanContext *s, line_inc = stride - width; index = y * stride + x; current_x = x; - while(pixel_count && (index < s->frame_size)) { + while (pixel_count && index < s->frame_size) { int count = FFMIN(pixel_count, width - current_x); memcpy(palette_plane + index, pixel_buffer, count); pixel_count -= count; @@ -220,8 +216,9 @@ static inline void xan_wc3_output_pixel_run(XanContext *s, } } -static inline void xan_wc3_copy_pixel_run(XanContext *s, - int x, int y, int pixel_count, int motion_x, int motion_y) +static inline void xan_wc3_copy_pixel_run(XanContext *s, int x, int y, + int pixel_count, int motion_x, + int motion_y) { int stride; int line_inc; @@ -230,8 +227,8 @@ static inline void xan_wc3_copy_pixel_run(XanContext *s, int width = s->avctx->width; unsigned char *palette_plane, *prev_palette_plane; - if ( y + motion_y < 0 || y + motion_y >= s->avctx->height || - x + motion_x < 0 || x + motion_x >= s->avctx->width) + if (y + motion_y < 0 || y + motion_y >= s->avctx->height || + x + motion_x < 0 || x + motion_x >= s->avctx->width) return; palette_plane = s->current_frame.data[0]; @@ -244,12 +241,14 @@ static inline void xan_wc3_copy_pixel_run(XanContext *s, curframe_x = x; prevframe_index = (y + motion_y) * stride + x + motion_x; prevframe_x = x + motion_x; - while(pixel_count && - curframe_index < s->frame_size && - prevframe_index < s->frame_size) { - int count = FFMIN3(pixel_count, width - curframe_x, width - prevframe_x); - - memcpy(palette_plane + curframe_index, prev_palette_plane + prevframe_index, count); + while (pixel_count && + curframe_index < s->frame_size && + prevframe_index < s->frame_size) { + int count = FFMIN3(pixel_count, width - curframe_x, + width - prevframe_x); + + memcpy(palette_plane + curframe_index, + prev_palette_plane + prevframe_index, count); pixel_count -= count; curframe_index += count; prevframe_index += count; @@ -270,7 +269,7 @@ static inline void xan_wc3_copy_pixel_run(XanContext *s, static int xan_wc3_decode_frame(XanContext *s) { - int width = s->avctx->width; + int width = s->avctx->width; int height = s->avctx->height; int total_pixels = width * height; unsigned char opcode; @@ -289,7 +288,8 @@ static int xan_wc3_decode_frame(XanContext *s) { const unsigned char *size_segment; const unsigned char *vector_segment; const unsigned char *imagedata_segment; - int huffman_offset, size_offset, vector_offset, imagedata_offset, imagedata_size; + int huffman_offset, size_offset, vector_offset, imagedata_offset, + imagedata_size; if (s->size < 8) return AVERROR_INVALIDDATA; @@ -374,6 +374,7 @@ static int xan_wc3_decode_frame(XanContext *s) { size_segment += 3; break; } + if (size > total_pixels) break; @@ -518,7 +519,8 @@ static int xan_decode_frame(AVCodecContext *avctx, return AVERROR_INVALIDDATA; if (s->palettes_count >= PALETTES_MAX) return AVERROR_INVALIDDATA; - tmpptr = av_realloc(s->palettes, (s->palettes_count + 1) * AVPALETTE_SIZE); + tmpptr = av_realloc(s->palettes, + (s->palettes_count + 1) * AVPALETTE_SIZE); if (!tmpptr) return AVERROR(ENOMEM); s->palettes = tmpptr; @@ -569,7 +571,8 @@ static int xan_decode_frame(AVCodecContext *avctx, if (!s->frame_size) s->frame_size = s->current_frame.linesize[0] * s->avctx->height; - memcpy(s->current_frame.data[1], s->palettes + s->cur_palette * AVPALETTE_COUNT, AVPALETTE_SIZE); + memcpy(s->current_frame.data[1], + s->palettes + s->cur_palette * AVPALETTE_COUNT, AVPALETTE_SIZE); s->buf = buf; s->size = buf_size; @@ -617,5 +620,5 @@ AVCodec ff_xan_wc3_decoder = { .close = xan_decode_end, .decode = xan_decode_frame, .capabilities = CODEC_CAP_DR1, - .long_name = NULL_IF_CONFIG_SMALL("Wing Commander III / Xan"), + .long_name = NULL_IF_CONFIG_SMALL("Wing Commander III / Xan"), }; diff --git a/libavcodec/zmbv.c b/libavcodec/zmbv.c index 2eb12e8031..6f89c7e3ad 100644 --- a/libavcodec/zmbv.c +++ b/libavcodec/zmbv.c @@ -88,8 +88,8 @@ static int zmbv_decode_xor_8(ZmbvContext *c) output = c->cur; prev = c->prev; - if(c->flags & ZMBV_DELTAPAL){ - for(i = 0; i < 768; i++) + if (c->flags & ZMBV_DELTAPAL) { + for (i = 0; i < 768; i++) c->pal[i] ^= *src++; } @@ -97,9 +97,9 @@ static int zmbv_decode_xor_8(ZmbvContext *c) src += ((c->bx * c->by * 2 + 3) & ~3); block = 0; - for(y = 0; y < c->height; y += c->bh) { + for (y = 0; y < c->height; y += c->bh) { bh2 = ((c->height - y) > c->bh) ? c->bh : (c->height - y); - for(x = 0; x < c->width; x += c->bw) { + for (x = 0; x < c->width; x += c->bw) { uint8_t *out, *tprev; d = mvec[block] & 1; @@ -114,12 +114,12 @@ static int zmbv_decode_xor_8(ZmbvContext *c) tprev = prev + x + dx + dy * c->width; mx = x + dx; my = y + dy; - for(j = 0; j < bh2; j++){ - if((my + j < 0) || (my + j >= c->height)) { + for (j = 0; j < bh2; j++) { + if (my + j < 0 || my + j >= c->height) { memset(out, 0, bw2); } else { - for(i = 0; i < bw2; i++){ - if((mx + i < 0) || (mx + i >= c->width)) + for (i = 0; i < bw2; i++) { + if (mx + i < 0 || mx + i >= c->width) out[i] = 0; else out[i] = tprev[i]; @@ -129,10 +129,10 @@ static int zmbv_decode_xor_8(ZmbvContext *c) tprev += c->width; } - if(d) { /* apply XOR'ed difference */ + if (d) { /* apply XOR'ed difference */ out = output + x; - for(j = 0; j < bh2; j++){ - for(i = 0; i < bw2; i++) + for (j = 0; j < bh2; j++) { + for (i = 0; i < bw2; i++) out[i] ^= *src++; out += c->width; } @@ -141,8 +141,9 @@ static int zmbv_decode_xor_8(ZmbvContext *c) output += c->width * c->bh; prev += c->width * c->bh; } - if(src - c->decomp_buf != c->decomp_len) - av_log(c->avctx, AV_LOG_ERROR, "Used %ti of %i bytes\n", src-c->decomp_buf, c->decomp_len); + if (src - c->decomp_buf != c->decomp_len) + av_log(c->avctx, AV_LOG_ERROR, "Used %ti of %i bytes\n", + src-c->decomp_buf, c->decomp_len); return 0; } @@ -168,9 +169,9 @@ static int zmbv_decode_xor_16(ZmbvContext *c) src += ((c->bx * c->by * 2 + 3) & ~3); block = 0; - for(y = 0; y < c->height; y += c->bh) { + for (y = 0; y < c->height; y += c->bh) { bh2 = ((c->height - y) > c->bh) ? c->bh : (c->height - y); - for(x = 0; x < c->width; x += c->bw) { + for (x = 0; x < c->width; x += c->bw) { uint16_t *out, *tprev; d = mvec[block] & 1; @@ -185,12 +186,12 @@ static int zmbv_decode_xor_16(ZmbvContext *c) tprev = prev + x + dx + dy * c->width; mx = x + dx; my = y + dy; - for(j = 0; j < bh2; j++){ - if((my + j < 0) || (my + j >= c->height)) { + for (j = 0; j < bh2; j++) { + if (my + j < 0 || my + j >= c->height) { memset(out, 0, bw2 * 2); } else { - for(i = 0; i < bw2; i++){ - if((mx + i < 0) || (mx + i >= c->width)) + for (i = 0; i < bw2; i++) { + if (mx + i < 0 || mx + i >= c->width) out[i] = 0; else out[i] = tprev[i]; @@ -200,10 +201,10 @@ static int zmbv_decode_xor_16(ZmbvContext *c) tprev += c->width; } - if(d) { /* apply XOR'ed difference */ + if (d) { /* apply XOR'ed difference */ out = output + x; - for(j = 0; j < bh2; j++){ - for(i = 0; i < bw2; i++) { + for (j = 0; j < bh2; j++){ + for (i = 0; i < bw2; i++) { out[i] ^= *((uint16_t*)src); src += 2; } @@ -214,8 +215,9 @@ static int zmbv_decode_xor_16(ZmbvContext *c) output += c->width * c->bh; prev += c->width * c->bh; } - if(src - c->decomp_buf != c->decomp_len) - av_log(c->avctx, AV_LOG_ERROR, "Used %ti of %i bytes\n", src-c->decomp_buf, c->decomp_len); + if (src - c->decomp_buf != c->decomp_len) + av_log(c->avctx, AV_LOG_ERROR, "Used %ti of %i bytes\n", + src-c->decomp_buf, c->decomp_len); return 0; } @@ -244,9 +246,9 @@ static int zmbv_decode_xor_24(ZmbvContext *c) src += ((c->bx * c->by * 2 + 3) & ~3); block = 0; - for(y = 0; y < c->height; y += c->bh) { + for (y = 0; y < c->height; y += c->bh) { bh2 = ((c->height - y) > c->bh) ? c->bh : (c->height - y); - for(x = 0; x < c->width; x += c->bw) { + for (x = 0; x < c->width; x += c->bw) { uint8_t *out, *tprev; d = mvec[block] & 1; @@ -261,12 +263,12 @@ static int zmbv_decode_xor_24(ZmbvContext *c) tprev = prev + (x + dx) * 3 + dy * stride; mx = x + dx; my = y + dy; - for(j = 0; j < bh2; j++){ - if((my + j < 0) || (my + j >= c->height)) { + for (j = 0; j < bh2; j++) { + if (my + j < 0 || my + j >= c->height) { memset(out, 0, bw2 * 3); } else { - for(i = 0; i < bw2; i++){ - if((mx + i < 0) || (mx + i >= c->width)) { + for (i = 0; i < bw2; i++){ + if (mx + i < 0 || mx + i >= c->width) { out[i * 3 + 0] = 0; out[i * 3 + 1] = 0; out[i * 3 + 2] = 0; @@ -281,10 +283,10 @@ static int zmbv_decode_xor_24(ZmbvContext *c) tprev += stride; } - if(d) { /* apply XOR'ed difference */ + if (d) { /* apply XOR'ed difference */ out = output + x * 3; - for(j = 0; j < bh2; j++){ - for(i = 0; i < bw2; i++) { + for (j = 0; j < bh2; j++) { + for (i = 0; i < bw2; i++) { out[i * 3 + 0] ^= *src++; out[i * 3 + 1] ^= *src++; out[i * 3 + 2] ^= *src++; @@ -296,8 +298,9 @@ static int zmbv_decode_xor_24(ZmbvContext *c) output += stride * c->bh; prev += stride * c->bh; } - if(src - c->decomp_buf != c->decomp_len) - av_log(c->avctx, AV_LOG_ERROR, "Used %i of %i bytes\n", src-c->decomp_buf, c->decomp_len); + if (src - c->decomp_buf != c->decomp_len) + av_log(c->avctx, AV_LOG_ERROR, "Used %i of %i bytes\n", + src-c->decomp_buf, c->decomp_len); return 0; } #endif //ZMBV_ENABLE_24BPP @@ -324,9 +327,9 @@ static int zmbv_decode_xor_32(ZmbvContext *c) src += ((c->bx * c->by * 2 + 3) & ~3); block = 0; - for(y = 0; y < c->height; y += c->bh) { + for (y = 0; y < c->height; y += c->bh) { bh2 = ((c->height - y) > c->bh) ? c->bh : (c->height - y); - for(x = 0; x < c->width; x += c->bw) { + for (x = 0; x < c->width; x += c->bw) { uint32_t *out, *tprev; d = mvec[block] & 1; @@ -341,12 +344,12 @@ static int zmbv_decode_xor_32(ZmbvContext *c) tprev = prev + x + dx + dy * c->width; mx = x + dx; my = y + dy; - for(j = 0; j < bh2; j++){ - if((my + j < 0) || (my + j >= c->height)) { + for (j = 0; j < bh2; j++) { + if (my + j < 0 || my + j >= c->height) { memset(out, 0, bw2 * 4); } else { - for(i = 0; i < bw2; i++){ - if((mx + i < 0) || (mx + i >= c->width)) + for (i = 0; i < bw2; i++){ + if (mx + i < 0 || mx + i >= c->width) out[i] = 0; else out[i] = tprev[i]; @@ -356,11 +359,11 @@ static int zmbv_decode_xor_32(ZmbvContext *c) tprev += c->width; } - if(d) { /* apply XOR'ed difference */ + if (d) { /* apply XOR'ed difference */ out = output + x; - for(j = 0; j < bh2; j++){ - for(i = 0; i < bw2; i++) { - out[i] ^= *((uint32_t*)src); + for (j = 0; j < bh2; j++){ + for (i = 0; i < bw2; i++) { + out[i] ^= *((uint32_t *) src); src += 4; } out += c->width; @@ -368,10 +371,11 @@ static int zmbv_decode_xor_32(ZmbvContext *c) } } output += c->width * c->bh; - prev += c->width * c->bh; + prev += c->width * c->bh; } - if(src - c->decomp_buf != c->decomp_len) - av_log(c->avctx, AV_LOG_ERROR, "Used %ti of %i bytes\n", src-c->decomp_buf, c->decomp_len); + if (src - c->decomp_buf != c->decomp_len) + av_log(c->avctx, AV_LOG_ERROR, "Used %ti of %i bytes\n", + src-c->decomp_buf, c->decomp_len); return 0; } @@ -401,12 +405,12 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPac int len = buf_size; int hi_ver, lo_ver; - if(c->pic.data[0]) + if (c->pic.data[0]) avctx->release_buffer(avctx, &c->pic); c->pic.reference = 3; c->pic.buffer_hints = FF_BUFFER_HINTS_VALID; - if(avctx->get_buffer(avctx, &c->pic) < 0){ + if (avctx->get_buffer(avctx, &c->pic) < 0) { av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); return -1; } @@ -414,7 +418,7 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPac /* parse header */ c->flags = buf[0]; buf++; len--; - if(c->flags & ZMBV_KEYFRAME) { + if (c->flags & ZMBV_KEYFRAME) { void *decode_intra = NULL; c->decode_intra= NULL; hi_ver = buf[0]; @@ -426,21 +430,26 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPac buf += 6; len -= 6; - av_log(avctx, AV_LOG_DEBUG, "Flags=%X ver=%i.%i comp=%i fmt=%i blk=%ix%i\n",c->flags,hi_ver,lo_ver,c->comp,c->fmt,c->bw,c->bh); - if(hi_ver != 0 || lo_ver != 1) { - av_log(avctx, AV_LOG_ERROR, "Unsupported version %i.%i\n", hi_ver, lo_ver); + av_log(avctx, AV_LOG_DEBUG, + "Flags=%X ver=%i.%i comp=%i fmt=%i blk=%ix%i\n", + c->flags,hi_ver,lo_ver,c->comp,c->fmt,c->bw,c->bh); + if (hi_ver != 0 || lo_ver != 1) { + av_log(avctx, AV_LOG_ERROR, "Unsupported version %i.%i\n", + hi_ver, lo_ver); return -1; } - if(c->bw == 0 || c->bh == 0) { - av_log(avctx, AV_LOG_ERROR, "Unsupported block size %ix%i\n", c->bw, c->bh); + if (c->bw == 0 || c->bh == 0) { + av_log(avctx, AV_LOG_ERROR, "Unsupported block size %ix%i\n", + c->bw, c->bh); return -1; } - if(c->comp != 0 && c->comp != 1) { - av_log(avctx, AV_LOG_ERROR, "Unsupported compression type %i\n", c->comp); + if (c->comp != 0 && c->comp != 1) { + av_log(avctx, AV_LOG_ERROR, "Unsupported compression type %i\n", + c->comp); return -1; } - switch(c->fmt) { + switch (c->fmt) { case ZMBV_FMT_8BPP: c->bpp = 8; decode_intra = zmbv_decode_intra; @@ -466,7 +475,8 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPac break; default: c->decode_xor = NULL; - av_log(avctx, AV_LOG_ERROR, "Unsupported (for now) format %i\n", c->fmt); + av_log(avctx, AV_LOG_ERROR, + "Unsupported (for now) format %i\n", c->fmt); return -1; } @@ -476,21 +486,21 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPac return -1; } - c->cur = av_realloc_f(c->cur, avctx->width * avctx->height, (c->bpp / 8)); + c->cur = av_realloc_f(c->cur, avctx->width * avctx->height, (c->bpp / 8)); c->prev = av_realloc_f(c->prev, avctx->width * avctx->height, (c->bpp / 8)); c->bx = (c->width + c->bw - 1) / c->bw; c->by = (c->height+ c->bh - 1) / c->bh; - if(!c->cur || !c->prev) + if (!c->cur || !c->prev) return -1; c->decode_intra= decode_intra; } - if(c->decode_intra == NULL) { + if (c->decode_intra == NULL) { av_log(avctx, AV_LOG_ERROR, "Error! Got no format or no keyframe!\n"); return -1; } - if(c->comp == 0) { //Uncompressed data + if (c->comp == 0) { //Uncompressed data memcpy(c->decomp_buf, buf, len); c->decomp_size = 1; } else { // ZLIB-compressed data @@ -502,14 +512,14 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPac inflate(&c->zstream, Z_FINISH); c->decomp_len = c->zstream.total_out; } - if(c->flags & ZMBV_KEYFRAME) { + if (c->flags & ZMBV_KEYFRAME) { c->pic.key_frame = 1; c->pic.pict_type = AV_PICTURE_TYPE_I; c->decode_intra(c); } else { c->pic.key_frame = 0; c->pic.pict_type = AV_PICTURE_TYPE_P; - if(c->decomp_len) + if (c->decomp_len) c->decode_xor(c); } @@ -520,10 +530,10 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPac out = c->pic.data[0]; src = c->cur; - switch(c->fmt) { + switch (c->fmt) { case ZMBV_FMT_8BPP: - for(j = 0; j < c->height; j++) { - for(i = 0; i < c->width; i++) { + for (j = 0; j < c->height; j++) { + for (i = 0; i < c->width; i++) { out[i * 3 + 0] = c->pal[(*src) * 3 + 0]; out[i * 3 + 1] = c->pal[(*src) * 3 + 1]; out[i * 3 + 2] = c->pal[(*src) * 3 + 2]; @@ -533,8 +543,8 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPac } break; case ZMBV_FMT_15BPP: - for(j = 0; j < c->height; j++) { - for(i = 0; i < c->width; i++) { + for (j = 0; j < c->height; j++) { + for (i = 0; i < c->width; i++) { uint16_t tmp = AV_RL16(src); src += 2; out[i * 3 + 0] = (tmp & 0x7C00) >> 7; @@ -545,8 +555,8 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPac } break; case ZMBV_FMT_16BPP: - for(j = 0; j < c->height; j++) { - for(i = 0; i < c->width; i++) { + for (j = 0; j < c->height; j++) { + for (i = 0; i < c->width; i++) { uint16_t tmp = AV_RL16(src); src += 2; out[i * 3 + 0] = (tmp & 0xF800) >> 8; @@ -558,7 +568,7 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPac break; #ifdef ZMBV_ENABLE_24BPP case ZMBV_FMT_24BPP: - for(j = 0; j < c->height; j++) { + for (j = 0; j < c->height; j++) { memcpy(out, src, c->width * 3); src += c->width * 3; out += c->pic.linesize[0]; @@ -566,8 +576,8 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPac break; #endif //ZMBV_ENABLE_24BPP case ZMBV_FMT_32BPP: - for(j = 0; j < c->height; j++) { - for(i = 0; i < c->width; i++) { + for (j = 0; j < c->height; j++) { + for (i = 0; i < c->width; i++) { uint32_t tmp = AV_RL32(src); src += 4; AV_WB24(out+(i*3), tmp); @@ -616,7 +626,8 @@ static av_cold int decode_init(AVCodecContext *avctx) /* Allocate decompression buffer */ if (c->decomp_size) { if ((c->decomp_buf = av_malloc(c->decomp_size)) == NULL) { - av_log(avctx, AV_LOG_ERROR, "Can't allocate decompression buffer.\n"); + av_log(avctx, AV_LOG_ERROR, + "Can't allocate decompression buffer.\n"); return 1; } } diff --git a/libavformat/oma.c b/libavformat/oma.c index 2fc6cfc554..0363f7fc50 100644 --- a/libavformat/oma.c +++ b/libavformat/oma.c @@ -61,9 +61,10 @@ enum { }; static const AVCodecTag codec_oma_tags[] = { - { CODEC_ID_ATRAC3, OMA_CODECID_ATRAC3 }, - { CODEC_ID_ATRAC3P, OMA_CODECID_ATRAC3P }, - { CODEC_ID_MP3, OMA_CODECID_MP3 }, + { CODEC_ID_ATRAC3, OMA_CODECID_ATRAC3 }, + { CODEC_ID_ATRAC3P, OMA_CODECID_ATRAC3P }, + { CODEC_ID_MP3, OMA_CODECID_MP3 }, + { CODEC_ID_PCM_S16BE, OMA_CODECID_LPCM }, }; static const uint64_t leaf_table[] = { @@ -205,8 +206,8 @@ static int decrypt_init(AVFormatContext *s, ID3v2ExtraMeta *em, uint8_t *header) while (em) { if (!strcmp(em->tag, "GEOB") && (geob = em->data) && - !strcmp(geob->description, "OMG_LSI") || - !strcmp(geob->description, "OMG_BKLSI")) { + (!strcmp(geob->description, "OMG_LSI") || + !strcmp(geob->description, "OMG_BKLSI"))) { break; } em = em->next; @@ -361,6 +362,16 @@ static int oma_read_header(AVFormatContext *s, st->need_parsing = AVSTREAM_PARSE_FULL; framesize = 1024; break; + case OMA_CODECID_LPCM: + /* PCM 44.1 kHz 16 bit stereo big-endian */ + st->codec->channels = 2; + st->codec->sample_rate = 44100; + framesize = 1024; + /* bit rate = sample rate x PCM block align (= 4) x 8 */ + st->codec->bit_rate = st->codec->sample_rate * 32; + st->codec->bits_per_coded_sample = av_get_bits_per_sample(st->codec->codec_id); + avpriv_set_pts_info(st, 64, 1, st->codec->sample_rate); + break; default: av_log(s, AV_LOG_ERROR, "Unsupported codec %d!\n",buf[32]); return -1; @@ -397,14 +408,20 @@ static int oma_read_probe(AVProbeData *p) unsigned tag_len = 0; buf = p->buf; - /* version must be 3 and flags byte zero */ - if (ff_id3v2_match(buf, ID3v2_EA3_MAGIC) && buf[3] == 3 && !buf[4]) - tag_len = ff_id3v2_tag_len(buf); - // This check cannot overflow as tag_len has at most 28 bits - if (p->buf_size < tag_len + 5) + if (p->buf_size < ID3v2_HEADER_SIZE || + !ff_id3v2_match(buf, ID3v2_EA3_MAGIC) || + buf[3] != 3 || // version must be 3 + buf[4]) // flags byte zero return 0; + tag_len = ff_id3v2_tag_len(buf); + + /* This check cannot overflow as tag_len has at most 28 bits */ + if (p->buf_size < tag_len + 5) + /* EA3 header comes late, might be outside of the probe buffer */ + return AVPROBE_SCORE_MAX / 2; + buf += tag_len; if (!memcmp(buf, "EA3", 3) && !buf[4] && buf[5] == EA3_HEADER_SIZE) diff --git a/libavformat/yuv4mpeg.c b/libavformat/yuv4mpeg.c index 6db51968e7..dad29d8d55 100644 --- a/libavformat/yuv4mpeg.c +++ b/libavformat/yuv4mpeg.c @@ -39,23 +39,24 @@ static int yuv4_generate_header(AVFormatContext *s, char* buf) char inter; const char *colorspace = ""; - st = s->streams[0]; - width = st->codec->width; + st = s->streams[0]; + width = st->codec->width; height = st->codec->height; - av_reduce(&raten, &rated, st->codec->time_base.den, st->codec->time_base.num, (1UL<<31)-1); + av_reduce(&raten, &rated, st->codec->time_base.den, + st->codec->time_base.num, (1UL << 31) - 1); aspectn = st->sample_aspect_ratio.num; aspectd = st->sample_aspect_ratio.den; - if ( aspectn == 0 && aspectd == 1 ) aspectd = 0; // 0:0 means unknown + if (aspectn == 0 && aspectd == 1) + aspectd = 0; // 0:0 means unknown inter = 'p'; /* progressive is the default */ - if (st->codec->coded_frame && st->codec->coded_frame->interlaced_frame) { + if (st->codec->coded_frame && st->codec->coded_frame->interlaced_frame) inter = st->codec->coded_frame->top_field_first ? 't' : 'b'; - } - switch(st->codec->pix_fmt) { + switch (st->codec->pix_fmt) { case PIX_FMT_GRAY8: colorspace = " Cmono"; break; @@ -63,9 +64,11 @@ static int yuv4_generate_header(AVFormatContext *s, char* buf) colorspace = " C411 XYSCSS=411"; break; case PIX_FMT_YUV420P: - colorspace = (st->codec->chroma_sample_location == AVCHROMA_LOC_TOPLEFT)?" C420paldv XYSCSS=420PALDV": - (st->codec->chroma_sample_location == AVCHROMA_LOC_LEFT) ?" C420mpeg2 XYSCSS=420MPEG2": - " C420jpeg XYSCSS=420JPEG"; + switch (st->codec->chroma_sample_location) { + case AVCHROMA_LOC_TOPLEFT: colorspace = " C420paldv XYSCSS=420PALDV"; break; + case AVCHROMA_LOC_LEFT: colorspace = " C420mpeg2 XYSCSS=420MPEG2"; break; + default: colorspace = " C420jpeg XYSCSS=420JPEG"; break; + } break; case PIX_FMT_YUV422P: colorspace = " C422 XYSCSS=422"; @@ -77,13 +80,8 @@ static int yuv4_generate_header(AVFormatContext *s, char* buf) /* construct stream header, if this is the first frame */ n = snprintf(buf, Y4M_LINE_MAX, "%s W%d H%d F%d:%d I%c A%d:%d%s\n", - Y4M_MAGIC, - width, - height, - raten, rated, - inter, - aspectn, aspectd, - colorspace); + Y4M_MAGIC, width, height, raten, rated, inter, + aspectn, aspectd, colorspace); return n; } @@ -96,7 +94,7 @@ static int yuv4_write_packet(AVFormatContext *s, AVPacket *pkt) int* first_pkt = s->priv_data; int width, height, h_chroma_shift, v_chroma_shift; int i; - char buf2[Y4M_LINE_MAX+1]; + char buf2[Y4M_LINE_MAX + 1]; char buf1[20]; uint8_t *ptr, *ptr1, *ptr2; @@ -106,7 +104,8 @@ static int yuv4_write_packet(AVFormatContext *s, AVPacket *pkt) if (*first_pkt) { *first_pkt = 0; if (yuv4_generate_header(s, buf2) < 0) { - av_log(s, AV_LOG_ERROR, "Error. YUV4MPEG stream header write failed.\n"); + av_log(s, AV_LOG_ERROR, + "Error. YUV4MPEG stream header write failed.\n"); return AVERROR(EIO); } else { avio_write(pb, buf2, strlen(buf2)); @@ -118,31 +117,32 @@ static int yuv4_write_packet(AVFormatContext *s, AVPacket *pkt) snprintf(buf1, sizeof(buf1), "%s\n", Y4M_FRAME_MAGIC); avio_write(pb, buf1, strlen(buf1)); - width = st->codec->width; + width = st->codec->width; height = st->codec->height; ptr = picture->data[0]; - for(i=0;i<height;i++) { + for (i = 0; i < height; i++) { avio_write(pb, ptr, width); ptr += picture->linesize[0]; } - if (st->codec->pix_fmt != PIX_FMT_GRAY8){ - // Adjust for smaller Cb and Cr planes - avcodec_get_chroma_sub_sample(st->codec->pix_fmt, &h_chroma_shift, &v_chroma_shift); - width >>= h_chroma_shift; - height >>= v_chroma_shift; - - ptr1 = picture->data[1]; - ptr2 = picture->data[2]; - for(i=0;i<height;i++) { /* Cb */ - avio_write(pb, ptr1, width); - ptr1 += picture->linesize[1]; - } - for(i=0;i<height;i++) { /* Cr */ - avio_write(pb, ptr2, width); + if (st->codec->pix_fmt != PIX_FMT_GRAY8) { + // Adjust for smaller Cb and Cr planes + avcodec_get_chroma_sub_sample(st->codec->pix_fmt, &h_chroma_shift, + &v_chroma_shift); + width >>= h_chroma_shift; + height >>= v_chroma_shift; + + ptr1 = picture->data[1]; + ptr2 = picture->data[2]; + for (i = 0; i < height; i++) { /* Cb */ + avio_write(pb, ptr1, width); + ptr1 += picture->linesize[1]; + } + for (i = 0; i < height; i++) { /* Cr */ + avio_write(pb, ptr2, width); ptr2 += picture->linesize[2]; - } + } } avio_flush(pb); return 0; @@ -150,7 +150,7 @@ static int yuv4_write_packet(AVFormatContext *s, AVPacket *pkt) static int yuv4_write_header(AVFormatContext *s) { - int* first_pkt = s->priv_data; + int *first_pkt = s->priv_data; if (s->nb_streams != 1) return AVERROR(EIO); @@ -162,13 +162,15 @@ static int yuv4_write_header(AVFormatContext *s) } if (s->streams[0]->codec->pix_fmt == PIX_FMT_YUV411P) { - av_log(s, AV_LOG_ERROR, "Warning: generating rarely used 4:1:1 YUV stream, some mjpegtools might not work.\n"); - } - else if ((s->streams[0]->codec->pix_fmt != PIX_FMT_YUV420P) && - (s->streams[0]->codec->pix_fmt != PIX_FMT_YUV422P) && - (s->streams[0]->codec->pix_fmt != PIX_FMT_GRAY8) && - (s->streams[0]->codec->pix_fmt != PIX_FMT_YUV444P)) { - av_log(s, AV_LOG_ERROR, "ERROR: yuv4mpeg only handles yuv444p, yuv422p, yuv420p, yuv411p and gray pixel formats. Use -pix_fmt to select one.\n"); + av_log(s, AV_LOG_ERROR, "Warning: generating rarely used 4:1:1 YUV " + "stream, some mjpegtools might not work.\n"); + } else if ((s->streams[0]->codec->pix_fmt != PIX_FMT_YUV420P) && + (s->streams[0]->codec->pix_fmt != PIX_FMT_YUV422P) && + (s->streams[0]->codec->pix_fmt != PIX_FMT_GRAY8) && + (s->streams[0]->codec->pix_fmt != PIX_FMT_YUV444P)) { + av_log(s, AV_LOG_ERROR, "ERROR: yuv4mpeg only handles yuv444p, " + "yuv422p, yuv420p, yuv411p and gray pixel formats. " + "Use -pix_fmt to select one.\n"); return AVERROR(EIO); } @@ -186,7 +188,7 @@ AVOutputFormat ff_yuv4mpegpipe_muxer = { .video_codec = CODEC_ID_RAWVIDEO, .write_header = yuv4_write_header, .write_packet = yuv4_write_packet, - .flags = AVFMT_RAWPICTURE, + .flags = AVFMT_RAWPICTURE, }; #endif @@ -196,85 +198,96 @@ AVOutputFormat ff_yuv4mpegpipe_muxer = { static int yuv4_read_header(AVFormatContext *s, AVFormatParameters *ap) { - char header[MAX_YUV4_HEADER+10]; // Include headroom for the longest option - char *tokstart,*tokend,*header_end; + char header[MAX_YUV4_HEADER + 10]; // Include headroom for + // the longest option + char *tokstart, *tokend, *header_end; int i; AVIOContext *pb = s->pb; - int width=-1, height=-1, raten=0, rated=0, aspectn=0, aspectd=0; - enum PixelFormat pix_fmt=PIX_FMT_NONE,alt_pix_fmt=PIX_FMT_NONE; + int width = -1, height = -1, raten = 0, + rated = 0, aspectn = 0, aspectd = 0; + enum PixelFormat pix_fmt = PIX_FMT_NONE, alt_pix_fmt = PIX_FMT_NONE; enum AVChromaLocation chroma_sample_location = AVCHROMA_LOC_UNSPECIFIED; AVStream *st; struct frame_attributes *s1 = s->priv_data; - for (i=0; i<MAX_YUV4_HEADER; i++) { + for (i = 0; i < MAX_YUV4_HEADER; i++) { header[i] = avio_r8(pb); if (header[i] == '\n') { - header[i+1] = 0x20; // Add a space after last option. Makes parsing "444" vs "444alpha" easier. - header[i+2] = 0; + header[i + 1] = 0x20; // Add a space after last option. + // Makes parsing "444" vs "444alpha" easier. + header[i + 2] = 0; break; } } - if (i == MAX_YUV4_HEADER) return -1; - if (strncmp(header, Y4M_MAGIC, strlen(Y4M_MAGIC))) return -1; + if (i == MAX_YUV4_HEADER) + return -1; + if (strncmp(header, Y4M_MAGIC, strlen(Y4M_MAGIC))) + return -1; s1->interlaced_frame = 0; s1->top_field_first = 0; - header_end = &header[i+1]; // Include space - for(tokstart = &header[strlen(Y4M_MAGIC) + 1]; tokstart < header_end; tokstart++) { - if (*tokstart==0x20) continue; + header_end = &header[i + 1]; // Include space + for (tokstart = &header[strlen(Y4M_MAGIC) + 1]; + tokstart < header_end; tokstart++) { + if (*tokstart == 0x20) + continue; switch (*tokstart++) { case 'W': // Width. Required. - width = strtol(tokstart, &tokend, 10); - tokstart=tokend; + width = strtol(tokstart, &tokend, 10); + tokstart = tokend; break; case 'H': // Height. Required. - height = strtol(tokstart, &tokend, 10); - tokstart=tokend; + height = strtol(tokstart, &tokend, 10); + tokstart = tokend; break; case 'C': // Color space - if (strncmp("420jpeg",tokstart,7)==0) { + if (strncmp("420jpeg", tokstart, 7) == 0) { pix_fmt = PIX_FMT_YUV420P; chroma_sample_location = AVCHROMA_LOC_CENTER; - } else if (strncmp("420mpeg2",tokstart,8)==0) { + } else if (strncmp("420mpeg2", tokstart, 8) == 0) { pix_fmt = PIX_FMT_YUV420P; chroma_sample_location = AVCHROMA_LOC_LEFT; - } else if (strncmp("420paldv", tokstart, 8)==0) { + } else if (strncmp("420paldv", tokstart, 8) == 0) { pix_fmt = PIX_FMT_YUV420P; chroma_sample_location = AVCHROMA_LOC_TOPLEFT; - } else if (strncmp("411", tokstart, 3)==0) + } else if (strncmp("411", tokstart, 3) == 0) pix_fmt = PIX_FMT_YUV411P; - else if (strncmp("422", tokstart, 3)==0) + else if (strncmp("422", tokstart, 3) == 0) pix_fmt = PIX_FMT_YUV422P; - else if (strncmp("444alpha", tokstart, 8)==0) { - av_log(s, AV_LOG_ERROR, "Cannot handle 4:4:4:4 YUV4MPEG stream.\n"); + else if (strncmp("444alpha", tokstart, 8) == 0 ) { + av_log(s, AV_LOG_ERROR, "Cannot handle 4:4:4:4 " + "YUV4MPEG stream.\n"); return -1; - } else if (strncmp("444", tokstart, 3)==0) + } else if (strncmp("444", tokstart, 3) == 0) pix_fmt = PIX_FMT_YUV444P; - else if (strncmp("mono",tokstart, 4)==0) { + else if (strncmp("mono", tokstart, 4) == 0) { pix_fmt = PIX_FMT_GRAY8; } else { - av_log(s, AV_LOG_ERROR, "YUV4MPEG stream contains an unknown pixel format.\n"); + av_log(s, AV_LOG_ERROR, "YUV4MPEG stream contains an unknown " + "pixel format.\n"); return -1; } - while(tokstart<header_end&&*tokstart!=0x20) tokstart++; + while (tokstart < header_end && *tokstart != 0x20) + tokstart++; break; case 'I': // Interlace type switch (*tokstart++){ case '?': break; case 'p': - s1->interlaced_frame=0; + s1->interlaced_frame = 0; break; case 't': - s1->interlaced_frame=1; - s1->top_field_first=1; + s1->interlaced_frame = 1; + s1->top_field_first = 1; break; case 'b': - s1->interlaced_frame=1; - s1->top_field_first=0; + s1->interlaced_frame = 1; + s1->top_field_first = 0; break; case 'm': - av_log(s, AV_LOG_ERROR, "YUV4MPEG stream contains mixed interlaced and non-interlaced frames.\n"); + av_log(s, AV_LOG_ERROR, "YUV4MPEG stream contains mixed " + "interlaced and non-interlaced frames.\n"); return -1; default: av_log(s, AV_LOG_ERROR, "YUV4MPEG has invalid header.\n"); @@ -282,36 +295,39 @@ static int yuv4_read_header(AVFormatContext *s, AVFormatParameters *ap) } break; case 'F': // Frame rate - sscanf(tokstart,"%d:%d",&raten,&rated); // 0:0 if unknown - while(tokstart<header_end&&*tokstart!=0x20) tokstart++; + sscanf(tokstart, "%d:%d", &raten, &rated); // 0:0 if unknown + while (tokstart < header_end && *tokstart != 0x20) + tokstart++; break; case 'A': // Pixel aspect - sscanf(tokstart,"%d:%d",&aspectn,&aspectd); // 0:0 if unknown - while(tokstart<header_end&&*tokstart!=0x20) tokstart++; + sscanf(tokstart, "%d:%d", &aspectn, &aspectd); // 0:0 if unknown + while (tokstart < header_end && *tokstart != 0x20) + tokstart++; break; case 'X': // Vendor extensions - if (strncmp("YSCSS=",tokstart,6)==0) { + if (strncmp("YSCSS=", tokstart, 6) == 0) { // Older nonstandard pixel format representation - tokstart+=6; - if (strncmp("420JPEG",tokstart,7)==0) - alt_pix_fmt=PIX_FMT_YUV420P; - else if (strncmp("420MPEG2",tokstart,8)==0) - alt_pix_fmt=PIX_FMT_YUV420P; - else if (strncmp("420PALDV",tokstart,8)==0) - alt_pix_fmt=PIX_FMT_YUV420P; - else if (strncmp("411",tokstart,3)==0) - alt_pix_fmt=PIX_FMT_YUV411P; - else if (strncmp("422",tokstart,3)==0) - alt_pix_fmt=PIX_FMT_YUV422P; - else if (strncmp("444",tokstart,3)==0) - alt_pix_fmt=PIX_FMT_YUV444P; + tokstart += 6; + if (strncmp("420JPEG", tokstart, 7) == 0) + alt_pix_fmt = PIX_FMT_YUV420P; + else if (strncmp("420MPEG2", tokstart, 8) == 0) + alt_pix_fmt = PIX_FMT_YUV420P; + else if (strncmp("420PALDV", tokstart, 8) == 0) + alt_pix_fmt = PIX_FMT_YUV420P; + else if (strncmp("411", tokstart, 3) == 0) + alt_pix_fmt = PIX_FMT_YUV411P; + else if (strncmp("422", tokstart, 3) == 0) + alt_pix_fmt = PIX_FMT_YUV422P; + else if (strncmp("444", tokstart, 3) == 0) + alt_pix_fmt = PIX_FMT_YUV444P; } - while(tokstart<header_end&&*tokstart!=0x20) tokstart++; + while (tokstart < header_end && *tokstart != 0x20) + tokstart++; break; } } - if ((width == -1) || (height == -1)) { + if (width == -1 || height == -1) { av_log(s, AV_LOG_ERROR, "YUV4MPEG has invalid header.\n"); return -1; } @@ -335,16 +351,16 @@ static int yuv4_read_header(AVFormatContext *s, AVFormatParameters *ap) } st = avformat_new_stream(s, NULL); - if(!st) + if (!st) return AVERROR(ENOMEM); - st->codec->width = width; + st->codec->width = width; st->codec->height = height; - av_reduce(&raten, &rated, raten, rated, (1UL<<31)-1); + av_reduce(&raten, &rated, raten, rated, (1UL << 31) - 1); avpriv_set_pts_info(st, 64, rated, raten); - st->codec->pix_fmt = pix_fmt; - st->codec->codec_type = AVMEDIA_TYPE_VIDEO; - st->codec->codec_id = CODEC_ID_RAWVIDEO; - st->sample_aspect_ratio= (AVRational){aspectn, aspectd}; + st->codec->pix_fmt = pix_fmt; + st->codec->codec_type = AVMEDIA_TYPE_VIDEO; + st->codec->codec_id = CODEC_ID_RAWVIDEO; + st->sample_aspect_ratio = (AVRational){ aspectn, aspectd }; st->codec->chroma_sample_location = chroma_sample_location; return 0; @@ -358,17 +374,19 @@ static int yuv4_read_packet(AVFormatContext *s, AVPacket *pkt) AVStream *st = s->streams[0]; struct frame_attributes *s1 = s->priv_data; - for (i=0; i<MAX_FRAME_HEADER; i++) { + for (i = 0; i < MAX_FRAME_HEADER; i++) { header[i] = avio_r8(s->pb); if (header[i] == '\n') { - header[i+1] = 0; + header[i + 1] = 0; break; } } - if (i == MAX_FRAME_HEADER) return -1; - if (strncmp(header, Y4M_FRAME_MAGIC, strlen(Y4M_FRAME_MAGIC))) return -1; + if (i == MAX_FRAME_HEADER) + return -1; + if (strncmp(header, Y4M_FRAME_MAGIC, strlen(Y4M_FRAME_MAGIC))) + return -1; - width = st->codec->width; + width = st->codec->width; height = st->codec->height; packet_size = avpicture_get_size(st->codec->pix_fmt, width, height); @@ -378,9 +396,9 @@ static int yuv4_read_packet(AVFormatContext *s, AVPacket *pkt) if (av_get_packet(s->pb, pkt, packet_size) != packet_size) return AVERROR(EIO); - if (s->streams[0]->codec->coded_frame) { - s->streams[0]->codec->coded_frame->interlaced_frame = s1->interlaced_frame; - s->streams[0]->codec->coded_frame->top_field_first = s1->top_field_first; + if (st->codec->coded_frame) { + st->codec->coded_frame->interlaced_frame = s1->interlaced_frame; + st->codec->coded_frame->top_field_first = s1->top_field_first; } pkt->stream_index = 0; @@ -390,7 +408,7 @@ static int yuv4_read_packet(AVFormatContext *s, AVPacket *pkt) static int yuv4_probe(AVProbeData *pd) { /* check file header */ - if (strncmp(pd->buf, Y4M_MAGIC, sizeof(Y4M_MAGIC)-1)==0) + if (strncmp(pd->buf, Y4M_MAGIC, sizeof(Y4M_MAGIC) - 1) == 0) return AVPROBE_SCORE_MAX; else return 0; @@ -404,6 +422,6 @@ AVInputFormat ff_yuv4mpegpipe_demuxer = { .read_probe = yuv4_probe, .read_header = yuv4_read_header, .read_packet = yuv4_read_packet, - .extensions = "y4m" + .extensions = "y4m" }; #endif diff --git a/libavutil/crc.c b/libavutil/crc.c index d0e736ed4d..d640184876 100644 --- a/libavutil/crc.c +++ b/libavutil/crc.c @@ -56,32 +56,34 @@ static AVCRC av_crc_table[AV_CRC_MAX][257]; * @param ctx_size size of ctx in bytes * @return <0 on failure */ -int av_crc_init(AVCRC *ctx, int le, int bits, uint32_t poly, int ctx_size){ +int av_crc_init(AVCRC *ctx, int le, int bits, uint32_t poly, int ctx_size) +{ unsigned i, j; uint32_t c; - if (bits < 8 || bits > 32 || poly >= (1LL<<bits)) + if (bits < 8 || bits > 32 || poly >= (1LL << bits)) return -1; - if (ctx_size != sizeof(AVCRC)*257 && ctx_size != sizeof(AVCRC)*1024) + if (ctx_size != sizeof(AVCRC) * 257 && ctx_size != sizeof(AVCRC) * 1024) return -1; for (i = 0; i < 256; i++) { if (le) { for (c = i, j = 0; j < 8; j++) - c = (c>>1)^(poly & (-(c&1))); + c = (c >> 1) ^ (poly & (-(c & 1))); ctx[i] = c; } else { for (c = i << 24, j = 0; j < 8; j++) - c = (c<<1) ^ ((poly<<(32-bits)) & (((int32_t)c)>>31) ); + c = (c << 1) ^ ((poly << (32 - bits)) & (((int32_t) c) >> 31)); ctx[i] = av_bswap32(c); } } - ctx[256]=1; + ctx[256] = 1; #if !CONFIG_SMALL - if(ctx_size >= sizeof(AVCRC)*1024) + if (ctx_size >= sizeof(AVCRC) * 1024) for (i = 0; i < 256; i++) - for(j=0; j<3; j++) - ctx[256*(j+1) + i]= (ctx[256*j + i]>>8) ^ ctx[ ctx[256*j + i]&0xFF ]; + for (j = 0; j < 3; j++) + ctx[256 *(j + 1) + i] = + (ctx[256 * j + i] >> 8) ^ ctx[ctx[256 * j + i] & 0xFF]; #endif return 0; @@ -92,9 +94,10 @@ int av_crc_init(AVCRC *ctx, int le, int bits, uint32_t poly, int ctx_size){ * @param crc_id ID of a standard CRC * @return a pointer to the CRC table or NULL on failure */ -const AVCRC *av_crc_get_table(AVCRCId crc_id){ +const AVCRC *av_crc_get_table(AVCRCId crc_id) +{ #if !CONFIG_HARDCODED_TABLES - if (!av_crc_table[crc_id][FF_ARRAY_ELEMS(av_crc_table[crc_id])-1]) + if (!av_crc_table[crc_id][FF_ARRAY_ELEMS(av_crc_table[crc_id]) - 1]) if (av_crc_init(av_crc_table[crc_id], av_crc_table_params[crc_id].le, av_crc_table_params[crc_id].bits, @@ -112,46 +115,50 @@ const AVCRC *av_crc_get_table(AVCRCId crc_id){ * * @see av_crc_init() "le" parameter */ -uint32_t av_crc(const AVCRC *ctx, uint32_t crc, const uint8_t *buffer, size_t length){ - const uint8_t *end= buffer+length; +uint32_t av_crc(const AVCRC *ctx, uint32_t crc, + const uint8_t *buffer, size_t length) +{ + const uint8_t *end = buffer + length; #if !CONFIG_SMALL - if(!ctx[256]) { - while(((intptr_t) buffer & 3) && buffer < end) - crc = ctx[((uint8_t)crc) ^ *buffer++] ^ (crc >> 8); + if (!ctx[256]) { + while (((intptr_t) buffer & 3) && buffer < end) + crc = ctx[((uint8_t) crc) ^ *buffer++] ^ (crc >> 8); - while(buffer<end-3){ - crc ^= av_le2ne32(*(const uint32_t*)buffer); buffer+=4; - crc = ctx[3*256 + ( crc &0xFF)] - ^ctx[2*256 + ((crc>>8 )&0xFF)] - ^ctx[1*256 + ((crc>>16)&0xFF)] - ^ctx[0*256 + ((crc>>24) )]; + while (buffer < end - 3) { + crc ^= av_le2ne32(*(const uint32_t *) buffer); buffer += 4; + crc = ctx[3 * 256 + ( crc & 0xFF)] ^ + ctx[2 * 256 + ((crc >> 8 ) & 0xFF)] ^ + ctx[1 * 256 + ((crc >> 16) & 0xFF)] ^ + ctx[0 * 256 + ((crc >> 24) )]; } } #endif - while(buffer<end) - crc = ctx[((uint8_t)crc) ^ *buffer++] ^ (crc >> 8); + while (buffer < end) + crc = ctx[((uint8_t) crc) ^ *buffer++] ^ (crc >> 8); return crc; } #ifdef TEST #undef printf -int main(void){ +int main(void) +{ uint8_t buf[1999]; int i; - int p[4][3]={{AV_CRC_32_IEEE_LE, 0xEDB88320, 0x3D5CDD04}, - {AV_CRC_32_IEEE , 0x04C11DB7, 0xC0F5BAE0}, - {AV_CRC_16_ANSI , 0x8005, 0x1FBB }, - {AV_CRC_8_ATM , 0x07, 0xE3 },}; + int p[4][3] = { { AV_CRC_32_IEEE_LE, 0xEDB88320, 0x3D5CDD04 }, + { AV_CRC_32_IEEE , 0x04C11DB7, 0xC0F5BAE0 }, + { AV_CRC_16_ANSI , 0x8005 , 0x1FBB }, + { AV_CRC_8_ATM , 0x07 , 0xE3 } + }; const AVCRC *ctx; - for(i=0; i<sizeof(buf); i++) - buf[i]= i+i*i; + for (i = 0; i < sizeof(buf); i++) + buf[i] = i + i * i; - for(i=0; i<4; i++){ + for (i = 0; i < 4; i++) { ctx = av_crc_get_table(p[i][0]); - printf("crc %08X =%X\n", p[i][1], av_crc(ctx, 0, buf, sizeof(buf))); + printf("crc %08X = %X\n", p[i][1], av_crc(ctx, 0, buf, sizeof(buf))); } return 0; } diff --git a/libavutil/lfg.c b/libavutil/lfg.c index b5db5a4b17..fb0b258ad4 100644 --- a/libavutil/lfg.c +++ b/libavutil/lfg.c @@ -27,19 +27,21 @@ #include "intreadwrite.h" #include "attributes.h" -void av_cold av_lfg_init(AVLFG *c, unsigned int seed){ - uint8_t tmp[16]={0}; +void av_cold av_lfg_init(AVLFG *c, unsigned int seed) +{ + uint8_t tmp[16] = { 0 }; int i; - for(i=8; i<64; i+=4){ - AV_WL32(tmp, seed); tmp[4]=i; - av_md5_sum(tmp, tmp, 16); - c->state[i ]= AV_RL32(tmp); - c->state[i+1]= AV_RL32(tmp+4); - c->state[i+2]= AV_RL32(tmp+8); - c->state[i+3]= AV_RL32(tmp+12); + for (i = 8; i < 64; i += 4) { + AV_WL32(tmp, seed); + tmp[4] = i; + av_md5_sum(tmp, tmp, 16); + c->state[i ] = AV_RL32(tmp); + c->state[i + 1] = AV_RL32(tmp + 4); + c->state[i + 2] = AV_RL32(tmp + 8); + c->state[i + 3] = AV_RL32(tmp + 12); } - c->index=0; + c->index = 0; } void av_bmg_get(AVLFG *lfg, double out[2]) @@ -47,9 +49,9 @@ void av_bmg_get(AVLFG *lfg, double out[2]) double x1, x2, w; do { - x1 = 2.0/UINT_MAX*av_lfg_get(lfg) - 1.0; - x2 = 2.0/UINT_MAX*av_lfg_get(lfg) - 1.0; - w = x1*x1 + x2*x2; + x1 = 2.0 / UINT_MAX * av_lfg_get(lfg) - 1.0; + x2 = 2.0 / UINT_MAX * av_lfg_get(lfg) - 1.0; + w = x1 * x1 + x2 * x2; } while (w >= 1.0); w = sqrt((-2.0 * log(w)) / w); @@ -63,7 +65,7 @@ void av_bmg_get(AVLFG *lfg, double out[2]) int main(void) { - int x=0; + int x = 0; int i, j; AVLFG state; @@ -71,8 +73,8 @@ int main(void) for (j = 0; j < 10000; j++) { START_TIMER for (i = 0; i < 624; i++) { -// av_log(NULL,AV_LOG_ERROR, "%X\n", av_lfg_get(&state)); - x+=av_lfg_get(&state); + //av_log(NULL, AV_LOG_ERROR, "%X\n", av_lfg_get(&state)); + x += av_lfg_get(&state); } STOP_TIMER("624 calls of av_lfg_get"); } diff --git a/libavutil/log.c b/libavutil/log.c index d38186f57a..3182885ace 100644 --- a/libavutil/log.c +++ b/libavutil/log.c @@ -34,49 +34,54 @@ static int flags; #if defined(_WIN32) && !defined(__MINGW32CE__) #include <windows.h> -static const uint8_t color[] = {12,12,12,14,7,7,7}; +static const uint8_t color[] = { 12, 12, 12, 14, 7, 7, 7 }; static int16_t background, attr_orig; static HANDLE con; #define set_color(x) SetConsoleTextAttribute(con, background | color[x]) #define reset_color() SetConsoleTextAttribute(con, attr_orig) #else -static const uint8_t color[]={0x41,0x41,0x11,0x03,9,9,9}; -#define set_color(x) fprintf(stderr, "\033[%d;3%dm", color[x]>>4, color[x]&15) +static const uint8_t color[] = { 0x41, 0x41, 0x11, 0x03, 9, 9, 9 }; +#define set_color(x) fprintf(stderr, "\033[%d;3%dm", color[x] >> 4, color[x]&15) #define reset_color() fprintf(stderr, "\033[0m") #endif -static int use_color=-1; +static int use_color = -1; #undef fprintf -static void colored_fputs(int level, const char *str){ - if(use_color<0){ +static void colored_fputs(int level, const char *str) +{ + if (use_color < 0) { #if defined(_WIN32) && !defined(__MINGW32CE__) CONSOLE_SCREEN_BUFFER_INFO con_info; con = GetStdHandle(STD_ERROR_HANDLE); - use_color = (con != INVALID_HANDLE_VALUE) && !getenv("NO_COLOR") && !getenv("AV_LOG_FORCE_NOCOLOR"); + use_color = (con != INVALID_HANDLE_VALUE) && !getenv("NO_COLOR") && + !getenv("AV_LOG_FORCE_NOCOLOR"); if (use_color) { GetConsoleScreenBufferInfo(con, &con_info); attr_orig = con_info.wAttributes; background = attr_orig & 0xF0; } #elif HAVE_ISATTY - use_color= !getenv("NO_COLOR") && !getenv("AV_LOG_FORCE_NOCOLOR") && - (getenv("TERM") && isatty(2) || getenv("AV_LOG_FORCE_COLOR")); + use_color = !getenv("NO_COLOR") && !getenv("AV_LOG_FORCE_NOCOLOR") && + (getenv("TERM") && isatty(2) || + getenv("AV_LOG_FORCE_COLOR")); #else - use_color= getenv("AV_LOG_FORCE_COLOR") && !getenv("NO_COLOR") && !getenv("AV_LOG_FORCE_NOCOLOR"); + use_color = getenv("AV_LOG_FORCE_COLOR") && !getenv("NO_COLOR") && + !getenv("AV_LOG_FORCE_NOCOLOR"); #endif } - if(use_color){ + if (use_color) { set_color(level); } fputs(str, stderr); - if(use_color){ + if (use_color) { reset_color(); } } -const char* av_default_item_name(void* ptr){ - return (*(AVClass**)ptr)->class_name; +const char *av_default_item_name(void *ptr) +{ + return (*(AVClass **) ptr)->class_name; } static void sanitize(uint8_t *line){ @@ -89,58 +94,64 @@ static void sanitize(uint8_t *line){ void av_log_default_callback(void* ptr, int level, const char* fmt, va_list vl) { - static int print_prefix=1; + static int print_prefix = 1; static int count; static char prev[1024]; char line[1024]; static int is_atty; - AVClass* avc= ptr ? *(AVClass**)ptr : NULL; - if(level>av_log_level) + AVClass* avc = ptr ? *(AVClass **) ptr : NULL; + if (level > av_log_level) return; - line[0]=0; + line[0] = 0; #undef fprintf - if(print_prefix && avc) { + if (print_prefix && avc) { if (avc->parent_log_context_offset) { - AVClass** parent= *(AVClass***)(((uint8_t*)ptr) + avc->parent_log_context_offset); - if(parent && *parent){ - snprintf(line, sizeof(line), "[%s @ %p] ", (*parent)->item_name(parent), parent); + AVClass** parent = *(AVClass ***) (((uint8_t *) ptr) + + avc->parent_log_context_offset); + if (parent && *parent) { + snprintf(line, sizeof(line), "[%s @ %p] ", + (*parent)->item_name(parent), parent); } } - snprintf(line + strlen(line), sizeof(line) - strlen(line), "[%s @ %p] ", avc->item_name(ptr), ptr); + snprintf(line + strlen(line), sizeof(line) - strlen(line), "[%s @ %p] ", + avc->item_name(ptr), ptr); } vsnprintf(line + strlen(line), sizeof(line) - strlen(line), fmt, vl); - print_prefix = strlen(line) && line[strlen(line)-1] == '\n'; + print_prefix = strlen(line) && line[strlen(line) - 1] == '\n'; #if HAVE_ISATTY - if(!is_atty) is_atty= isatty(2) ? 1 : -1; + if (!is_atty) + is_atty = isatty(2) ? 1 : -1; #endif - if(print_prefix && (flags & AV_LOG_SKIP_REPEATED) && !strcmp(line, prev)){ + if (print_prefix && (flags & AV_LOG_SKIP_REPEATED) && !strcmp(line, prev)){ count++; - if(is_atty==1) + if (is_atty == 1) fprintf(stderr, " Last message repeated %d times\r", count); return; } - if(count>0){ + if (count > 0) { fprintf(stderr, " Last message repeated %d times\n", count); - count=0; + count = 0; } strcpy(prev, line); sanitize(line); - colored_fputs(av_clip(level>>3, 0, 6), line); + colored_fputs(av_clip(level >> 3, 0, 6), line); } -static void (*av_log_callback)(void*, int, const char*, va_list) = av_log_default_callback; +static void (*av_log_callback)(void*, int, const char*, va_list) = + av_log_default_callback; void av_log(void* avcl, int level, const char *fmt, ...) { - AVClass* avc= avcl ? *(AVClass**)avcl : NULL; + AVClass* avc = avcl ? *(AVClass **) avcl : NULL; va_list vl; va_start(vl, fmt); - if(avc && avc->version >= (50<<16 | 15<<8 | 2) && avc->log_level_offset_offset && level>=AV_LOG_FATAL) - level += *(int*)(((uint8_t*)avcl) + avc->log_level_offset_offset); + if (avc && avc->version >= (50 << 16 | 15 << 8 | 2) && + avc->log_level_offset_offset && level >= AV_LOG_FATAL) + level += *(int *) (((uint8_t *) avcl) + avc->log_level_offset_offset); av_vlog(avcl, level, fmt, vl); va_end(vl); } @@ -162,7 +173,7 @@ void av_log_set_level(int level) void av_log_set_flags(int arg) { - flags= arg; + flags = arg; } void av_log_set_callback(void (*callback)(void*, int, const char*, va_list)) diff --git a/libavutil/pixdesc.c b/libavutil/pixdesc.c index b89b11e0a2..c6616884a5 100644 --- a/libavutil/pixdesc.c +++ b/libavutil/pixdesc.c @@ -27,45 +27,46 @@ #include "intreadwrite.h" void av_read_image_line(uint16_t *dst, const uint8_t *data[4], const int linesize[4], - const AVPixFmtDescriptor *desc, int x, int y, int c, int w, int read_pal_component) + const AVPixFmtDescriptor *desc, int x, int y, int c, int w, + int read_pal_component) { - AVComponentDescriptor comp= desc->comp[c]; - int plane= comp.plane; - int depth= comp.depth_minus1+1; - int mask = (1<<depth)-1; - int shift= comp.shift; - int step = comp.step_minus1+1; - int flags= desc->flags; + AVComponentDescriptor comp = desc->comp[c]; + int plane = comp.plane; + int depth = comp.depth_minus1 + 1; + int mask = (1 << depth) - 1; + int shift = comp.shift; + int step = comp.step_minus1 + 1; + int flags = desc->flags; - if (flags & PIX_FMT_BITSTREAM){ - int skip = x*step + comp.offset_plus1-1; - const uint8_t *p = data[plane] + y*linesize[plane] + (skip>>3); - int shift = 8 - depth - (skip&7); + if (flags & PIX_FMT_BITSTREAM) { + int skip = x * step + comp.offset_plus1 - 1; + const uint8_t *p = data[plane] + y * linesize[plane] + (skip >> 3); + int shift = 8 - depth - (skip & 7); - while(w--){ + while (w--) { int val = (*p >> shift) & mask; - if(read_pal_component) - val= data[1][4*val + c]; + if (read_pal_component) + val = data[1][4*val + c]; shift -= step; - p -= shift>>3; + p -= shift >> 3; shift &= 7; - *dst++= val; + *dst++ = val; } } else { - const uint8_t *p = data[plane]+ y*linesize[plane] + x*step + comp.offset_plus1-1; + const uint8_t *p = data[plane] + y * linesize[plane] + x * step + comp.offset_plus1 - 1; int is_8bit = shift + depth <= 8; if (is_8bit) p += !!(flags & PIX_FMT_BE); - while(w--){ + while (w--) { int val = is_8bit ? *p : flags & PIX_FMT_BE ? AV_RB16(p) : AV_RL16(p); - val = (val>>shift) & mask; - if(read_pal_component) - val= data[1][4*val + c]; - p+= step; - *dst++= val; + val = (val >> shift) & mask; + if (read_pal_component) + val = data[1][4 * val + c]; + p += step; + *dst++ = val; } } } @@ -75,41 +76,41 @@ void av_write_image_line(const uint16_t *src, uint8_t *data[4], const int linesi { AVComponentDescriptor comp = desc->comp[c]; int plane = comp.plane; - int depth = comp.depth_minus1+1; - int step = comp.step_minus1+1; + int depth = comp.depth_minus1 + 1; + int step = comp.step_minus1 + 1; int flags = desc->flags; if (flags & PIX_FMT_BITSTREAM) { - int skip = x*step + comp.offset_plus1-1; - uint8_t *p = data[plane] + y*linesize[plane] + (skip>>3); - int shift = 8 - depth - (skip&7); + int skip = x * step + comp.offset_plus1 - 1; + uint8_t *p = data[plane] + y * linesize[plane] + (skip >> 3); + int shift = 8 - depth - (skip & 7); while (w--) { *p |= *src++ << shift; shift -= step; - p -= shift>>3; + p -= shift >> 3; shift &= 7; } } else { int shift = comp.shift; - uint8_t *p = data[plane]+ y*linesize[plane] + x*step + comp.offset_plus1-1; + uint8_t *p = data[plane] + y * linesize[plane] + x * step + comp.offset_plus1 - 1; if (shift + depth <= 8) { p += !!(flags & PIX_FMT_BE); while (w--) { - *p |= (*src++<<shift); + *p |= (*src++ << shift); p += step; } } else { while (w--) { if (flags & PIX_FMT_BE) { - uint16_t val = AV_RB16(p) | (*src++<<shift); + uint16_t val = AV_RB16(p) | (*src++ << shift); AV_WB16(p, val); } else { - uint16_t val = AV_RL16(p) | (*src++<<shift); + uint16_t val = AV_RL16(p) | (*src++ << shift); AV_WL16(p, val); } - p+= step; + p += step; } } } @@ -118,171 +119,171 @@ void av_write_image_line(const uint16_t *src, uint8_t *data[4], const int linesi const AVPixFmtDescriptor av_pix_fmt_descriptors[PIX_FMT_NB] = { [PIX_FMT_YUV420P] = { .name = "yuv420p", - .nb_components= 3, - .log2_chroma_w= 1, - .log2_chroma_h= 1, + .nb_components = 3, + .log2_chroma_w = 1, + .log2_chroma_h = 1, .comp = { - {0,0,1,0,7}, /* Y */ - {1,0,1,0,7}, /* U */ - {2,0,1,0,7}, /* V */ + { 0, 0, 1, 0, 7 }, /* Y */ + { 1, 0, 1, 0, 7 }, /* U */ + { 2, 0, 1, 0, 7 }, /* V */ }, .flags = PIX_FMT_PLANAR, }, [PIX_FMT_YUYV422] = { .name = "yuyv422", - .nb_components= 3, - .log2_chroma_w= 1, - .log2_chroma_h= 0, + .nb_components = 3, + .log2_chroma_w = 1, + .log2_chroma_h = 0, .comp = { - {0,1,1,0,7}, /* Y */ - {0,3,2,0,7}, /* U */ - {0,3,4,0,7}, /* V */ + { 0, 1, 1, 0, 7 }, /* Y */ + { 0, 3, 2, 0, 7 }, /* U */ + { 0, 3, 4, 0, 7 }, /* V */ }, }, [PIX_FMT_RGB24] = { .name = "rgb24", - .nb_components= 3, - .log2_chroma_w= 0, - .log2_chroma_h= 0, + .nb_components = 3, + .log2_chroma_w = 0, + .log2_chroma_h = 0, .comp = { - {0,2,1,0,7}, /* R */ - {0,2,2,0,7}, /* G */ - {0,2,3,0,7}, /* B */ + { 0, 2, 1, 0, 7 }, /* R */ + { 0, 2, 2, 0, 7 }, /* G */ + { 0, 2, 3, 0, 7 }, /* B */ }, .flags = PIX_FMT_RGB, }, [PIX_FMT_BGR24] = { .name = "bgr24", - .nb_components= 3, - .log2_chroma_w= 0, - .log2_chroma_h= 0, + .nb_components = 3, + .log2_chroma_w = 0, + .log2_chroma_h = 0, .comp = { - {0,2,1,0,7}, /* B */ - {0,2,2,0,7}, /* G */ - {0,2,3,0,7}, /* R */ + { 0, 2, 1, 0, 7 }, /* B */ + { 0, 2, 2, 0, 7 }, /* G */ + { 0, 2, 3, 0, 7 }, /* R */ }, .flags = PIX_FMT_RGB, }, [PIX_FMT_YUV422P] = { .name = "yuv422p", - .nb_components= 3, - .log2_chroma_w= 1, - .log2_chroma_h= 0, + .nb_components = 3, + .log2_chroma_w = 1, + .log2_chroma_h = 0, .comp = { - {0,0,1,0,7}, /* Y */ - {1,0,1,0,7}, /* U */ - {2,0,1,0,7}, /* V */ + { 0, 0, 1, 0, 7 }, /* Y */ + { 1, 0, 1, 0, 7 }, /* U */ + { 2, 0, 1, 0, 7 }, /* V */ }, .flags = PIX_FMT_PLANAR, }, [PIX_FMT_YUV444P] = { .name = "yuv444p", - .nb_components= 3, - .log2_chroma_w= 0, - .log2_chroma_h= 0, + .nb_components = 3, + .log2_chroma_w = 0, + .log2_chroma_h = 0, .comp = { - {0,0,1,0,7}, /* Y */ - {1,0,1,0,7}, /* U */ - {2,0,1,0,7}, /* V */ + { 0, 0, 1, 0, 7 }, /* Y */ + { 1, 0, 1, 0, 7 }, /* U */ + { 2, 0, 1, 0, 7 }, /* V */ }, .flags = PIX_FMT_PLANAR, }, [PIX_FMT_YUV410P] = { .name = "yuv410p", - .nb_components= 3, - .log2_chroma_w= 2, - .log2_chroma_h= 2, + .nb_components = 3, + .log2_chroma_w = 2, + .log2_chroma_h = 2, .comp = { - {0,0,1,0,7}, /* Y */ - {1,0,1,0,7}, /* U */ - {2,0,1,0,7}, /* V */ + { 0, 0, 1, 0, 7 }, /* Y */ + { 1, 0, 1, 0, 7 }, /* U */ + { 2, 0, 1, 0, 7 }, /* V */ }, .flags = PIX_FMT_PLANAR, }, [PIX_FMT_YUV411P] = { .name = "yuv411p", - .nb_components= 3, - .log2_chroma_w= 2, - .log2_chroma_h= 0, + .nb_components = 3, + .log2_chroma_w = 2, + .log2_chroma_h = 0, .comp = { - {0,0,1,0,7}, /* Y */ - {1,0,1,0,7}, /* U */ - {2,0,1,0,7}, /* V */ + { 0, 0, 1, 0, 7 }, /* Y */ + { 1, 0, 1, 0, 7 }, /* U */ + { 2, 0, 1, 0, 7 }, /* V */ }, .flags = PIX_FMT_PLANAR, }, [PIX_FMT_GRAY8] = { .name = "gray", - .nb_components= 1, - .log2_chroma_w= 0, - .log2_chroma_h= 0, + .nb_components = 1, + .log2_chroma_w = 0, + .log2_chroma_h = 0, .comp = { - {0,0,1,0,7}, /* Y */ + { 0, 0, 1, 0, 7 }, /* Y */ }, }, [PIX_FMT_MONOWHITE] = { .name = "monow", - .nb_components= 1, - .log2_chroma_w= 0, - .log2_chroma_h= 0, + .nb_components = 1, + .log2_chroma_w = 0, + .log2_chroma_h = 0, .comp = { - {0,0,1,0,0}, /* Y */ + { 0, 0, 1, 0, 0 }, /* Y */ }, .flags = PIX_FMT_BITSTREAM, }, [PIX_FMT_MONOBLACK] = { .name = "monob", - .nb_components= 1, - .log2_chroma_w= 0, - .log2_chroma_h= 0, + .nb_components = 1, + .log2_chroma_w = 0, + .log2_chroma_h = 0, .comp = { - {0,0,1,7,0}, /* Y */ + { 0, 0, 1, 7, 0 }, /* Y */ }, .flags = PIX_FMT_BITSTREAM, }, [PIX_FMT_PAL8] = { .name = "pal8", - .nb_components= 1, - .log2_chroma_w= 0, - .log2_chroma_h= 0, + .nb_components = 1, + .log2_chroma_w = 0, + .log2_chroma_h = 0, .comp = { - {0,0,1,0,7}, + { 0, 0, 1, 0, 7 }, }, .flags = PIX_FMT_PAL, }, [PIX_FMT_YUVJ420P] = { .name = "yuvj420p", - .nb_components= 3, - .log2_chroma_w= 1, - .log2_chroma_h= 1, + .nb_components = 3, + .log2_chroma_w = 1, + .log2_chroma_h = 1, .comp = { - {0,0,1,0,7}, /* Y */ - {1,0,1,0,7}, /* U */ - {2,0,1,0,7}, /* V */ + { 0, 0, 1, 0, 7 }, /* Y */ + { 1, 0, 1, 0, 7 }, /* U */ + { 2, 0, 1, 0, 7 }, /* V */ }, .flags = PIX_FMT_PLANAR, }, [PIX_FMT_YUVJ422P] = { .name = "yuvj422p", - .nb_components= 3, - .log2_chroma_w= 1, - .log2_chroma_h= 0, + .nb_components = 3, + .log2_chroma_w = 1, + .log2_chroma_h = 0, .comp = { - {0,0,1,0,7}, /* Y */ - {1,0,1,0,7}, /* U */ - {2,0,1,0,7}, /* V */ + { 0, 0, 1, 0, 7 }, /* Y */ + { 1, 0, 1, 0, 7 }, /* U */ + { 2, 0, 1, 0, 7 }, /* V */ }, .flags = PIX_FMT_PLANAR, }, [PIX_FMT_YUVJ444P] = { .name = "yuvj444p", - .nb_components= 3, - .log2_chroma_w= 0, - .log2_chroma_h= 0, + .nb_components = 3, + .log2_chroma_w = 0, + .log2_chroma_h = 0, .comp = { - {0,0,1,0,7}, /* Y */ - {1,0,1,0,7}, /* U */ - {2,0,1,0,7}, /* V */ + {0, 0, 1, 0, 7}, /* Y */ + {1, 0, 1, 0, 7}, /* U */ + {2, 0, 1, 0, 7}, /* V */ }, .flags = PIX_FMT_PLANAR, }, @@ -296,171 +297,171 @@ const AVPixFmtDescriptor av_pix_fmt_descriptors[PIX_FMT_NB] = { }, [PIX_FMT_UYVY422] = { .name = "uyvy422", - .nb_components= 3, - .log2_chroma_w= 1, - .log2_chroma_h= 0, + .nb_components = 3, + .log2_chroma_w = 1, + .log2_chroma_h = 0, .comp = { - {0,1,2,0,7}, /* Y */ - {0,3,1,0,7}, /* U */ - {0,3,3,0,7}, /* V */ + { 0, 1, 2, 0, 7 }, /* Y */ + { 0, 3, 1, 0, 7 }, /* U */ + { 0, 3, 3, 0, 7 }, /* V */ }, }, [PIX_FMT_UYYVYY411] = { .name = "uyyvyy411", - .nb_components= 3, - .log2_chroma_w= 2, - .log2_chroma_h= 0, + .nb_components = 3, + .log2_chroma_w = 2, + .log2_chroma_h = 0, .comp = { - {0,3,2,0,7}, /* Y */ - {0,5,1,0,7}, /* U */ - {0,5,4,0,7}, /* V */ + { 0, 3, 2, 0, 7 }, /* Y */ + { 0, 5, 1, 0, 7 }, /* U */ + { 0, 5, 4, 0, 7 }, /* V */ }, }, [PIX_FMT_BGR8] = { .name = "bgr8", - .nb_components= 3, - .log2_chroma_w= 0, - .log2_chroma_h= 0, + .nb_components = 3, + .log2_chroma_w = 0, + .log2_chroma_h = 0, .comp = { - {0,0,1,6,1}, /* B */ - {0,0,1,3,2}, /* G */ - {0,0,1,0,2}, /* R */ + { 0, 0, 1, 6, 1 }, /* B */ + { 0, 0, 1, 3, 2 }, /* G */ + { 0, 0, 1, 0, 2 }, /* R */ }, .flags = PIX_FMT_PAL | PIX_FMT_RGB, }, [PIX_FMT_BGR4] = { .name = "bgr4", - .nb_components= 3, - .log2_chroma_w= 0, - .log2_chroma_h= 0, + .nb_components = 3, + .log2_chroma_w = 0, + .log2_chroma_h = 0, .comp = { - {0,3,1,0,0}, /* B */ - {0,3,2,0,1}, /* G */ - {0,3,4,0,0}, /* R */ + { 0, 3, 1, 0, 0 }, /* B */ + { 0, 3, 2, 0, 1 }, /* G */ + { 0, 3, 4, 0, 0 }, /* R */ }, .flags = PIX_FMT_BITSTREAM | PIX_FMT_RGB, }, [PIX_FMT_BGR4_BYTE] = { .name = "bgr4_byte", - .nb_components= 3, - .log2_chroma_w= 0, - .log2_chroma_h= 0, + .nb_components = 3, + .log2_chroma_w = 0, + .log2_chroma_h = 0, .comp = { - {0,0,1,3,0}, /* B */ - {0,0,1,1,1}, /* G */ - {0,0,1,0,0}, /* R */ + { 0, 0, 1, 3, 0 }, /* B */ + { 0, 0, 1, 1, 1 }, /* G */ + { 0, 0, 1, 0, 0 }, /* R */ }, .flags = PIX_FMT_PAL | PIX_FMT_RGB, }, [PIX_FMT_RGB8] = { .name = "rgb8", - .nb_components= 3, - .log2_chroma_w= 0, - .log2_chroma_h= 0, + .nb_components = 3, + .log2_chroma_w = 0, + .log2_chroma_h = 0, .comp = { - {0,0,1,6,1}, /* R */ - {0,0,1,3,2}, /* G */ - {0,0,1,0,2}, /* B */ + { 0, 0, 1, 6, 1 }, /* R */ + { 0, 0, 1, 3, 2 }, /* G */ + { 0, 0, 1, 0, 2 }, /* B */ }, .flags = PIX_FMT_PAL | PIX_FMT_RGB, }, [PIX_FMT_RGB4] = { .name = "rgb4", - .nb_components= 3, - .log2_chroma_w= 0, - .log2_chroma_h= 0, + .nb_components = 3, + .log2_chroma_w = 0, + .log2_chroma_h = 0, .comp = { - {0,3,1,0,0}, /* R */ - {0,3,2,0,1}, /* G */ - {0,3,4,0,0}, /* B */ + { 0, 3, 1, 0, 0 }, /* R */ + { 0, 3, 2, 0, 1 }, /* G */ + { 0, 3, 4, 0, 0 }, /* B */ }, .flags = PIX_FMT_BITSTREAM | PIX_FMT_RGB, }, [PIX_FMT_RGB4_BYTE] = { .name = "rgb4_byte", - .nb_components= 3, - .log2_chroma_w= 0, - .log2_chroma_h= 0, + .nb_components = 3, + .log2_chroma_w = 0, + .log2_chroma_h = 0, .comp = { - {0,0,1,3,0}, /* R */ - {0,0,1,1,1}, /* G */ - {0,0,1,0,0}, /* B */ + { 0, 0, 1, 3, 0 }, /* R */ + { 0, 0, 1, 1, 1 }, /* G */ + { 0, 0, 1, 0, 0 }, /* B */ }, .flags = PIX_FMT_PAL | PIX_FMT_RGB, }, [PIX_FMT_NV12] = { .name = "nv12", - .nb_components= 3, - .log2_chroma_w= 1, - .log2_chroma_h= 1, + .nb_components = 3, + .log2_chroma_w = 1, + .log2_chroma_h = 1, .comp = { - {0,0,1,0,7}, /* Y */ - {1,1,1,0,7}, /* U */ - {1,1,2,0,7}, /* V */ + { 0,0,1,0,7 }, /* Y */ + { 1,1,1,0,7 }, /* U */ + { 1,1,2,0,7 }, /* V */ }, .flags = PIX_FMT_PLANAR, }, [PIX_FMT_NV21] = { .name = "nv21", - .nb_components= 3, - .log2_chroma_w= 1, - .log2_chroma_h= 1, + .nb_components = 3, + .log2_chroma_w = 1, + .log2_chroma_h = 1, .comp = { - {0,0,1,0,7}, /* Y */ - {1,1,1,0,7}, /* V */ - {1,1,2,0,7}, /* U */ + { 0, 0, 1, 0, 7 }, /* Y */ + { 1, 1, 1, 0, 7 }, /* V */ + { 1, 1, 2, 0, 7 }, /* U */ }, .flags = PIX_FMT_PLANAR, }, [PIX_FMT_ARGB] = { .name = "argb", - .nb_components= 4, - .log2_chroma_w= 0, - .log2_chroma_h= 0, + .nb_components = 4, + .log2_chroma_w = 0, + .log2_chroma_h = 0, .comp = { - {0,3,1,0,7}, /* A */ - {0,3,2,0,7}, /* R */ - {0,3,3,0,7}, /* G */ - {0,3,4,0,7}, /* B */ + { 0, 3, 1, 0, 7 }, /* A */ + { 0, 3, 2, 0, 7 }, /* R */ + { 0, 3, 3, 0, 7 }, /* G */ + { 0, 3, 4, 0, 7 }, /* B */ }, .flags = PIX_FMT_RGB, }, [PIX_FMT_RGBA] = { .name = "rgba", - .nb_components= 4, - .log2_chroma_w= 0, - .log2_chroma_h= 0, + .nb_components = 4, + .log2_chroma_w = 0, + .log2_chroma_h = 0, .comp = { - {0,3,1,0,7}, /* R */ - {0,3,2,0,7}, /* G */ - {0,3,3,0,7}, /* B */ - {0,3,4,0,7}, /* A */ + { 0, 3, 1, 0, 7 }, /* R */ + { 0, 3, 2, 0, 7 }, /* G */ + { 0, 3, 3, 0, 7 }, /* B */ + { 0, 3, 4, 0, 7 }, /* A */ }, .flags = PIX_FMT_RGB, }, [PIX_FMT_ABGR] = { .name = "abgr", - .nb_components= 4, - .log2_chroma_w= 0, - .log2_chroma_h= 0, + .nb_components = 4, + .log2_chroma_w = 0, + .log2_chroma_h = 0, .comp = { - {0,3,1,0,7}, /* A */ - {0,3,2,0,7}, /* B */ - {0,3,3,0,7}, /* G */ - {0,3,4,0,7}, /* R */ + { 0, 3, 1, 0, 7 }, /* A */ + { 0, 3, 2, 0, 7 }, /* B */ + { 0, 3, 3, 0, 7 }, /* G */ + { 0, 3, 4, 0, 7 }, /* R */ }, .flags = PIX_FMT_RGB, }, [PIX_FMT_BGRA] = { .name = "bgra", - .nb_components= 4, - .log2_chroma_w= 0, - .log2_chroma_h= 0, + .nb_components = 4, + .log2_chroma_w = 0, + .log2_chroma_h = 0, .comp = { - {0,3,1,0,7}, /* B */ - {0,3,2,0,7}, /* G */ - {0,3,3,0,7}, /* R */ - {0,3,4,0,7}, /* A */ + { 0, 3, 1, 0, 7 }, /* B */ + { 0, 3, 2, 0, 7 }, /* G */ + { 0, 3, 3, 0, 7 }, /* R */ + { 0, 3, 4, 0, 7 }, /* A */ }, .flags = PIX_FMT_RGB, }, @@ -516,57 +517,57 @@ const AVPixFmtDescriptor av_pix_fmt_descriptors[PIX_FMT_NB] = { }, [PIX_FMT_GRAY16BE] = { .name = "gray16be", - .nb_components= 1, - .log2_chroma_w= 0, - .log2_chroma_h= 0, + .nb_components = 1, + .log2_chroma_w = 0, + .log2_chroma_h = 0, .comp = { - {0,1,1,0,15}, /* Y */ + { 0, 1, 1, 0, 15 }, /* Y */ }, .flags = PIX_FMT_BE, }, [PIX_FMT_GRAY16LE] = { .name = "gray16le", - .nb_components= 1, - .log2_chroma_w= 0, - .log2_chroma_h= 0, + .nb_components = 1, + .log2_chroma_w = 0, + .log2_chroma_h = 0, .comp = { - {0,1,1,0,15}, /* Y */ + { 0, 1, 1, 0, 15 }, /* Y */ }, }, [PIX_FMT_YUV440P] = { .name = "yuv440p", - .nb_components= 3, - .log2_chroma_w= 0, - .log2_chroma_h= 1, + .nb_components = 3, + .log2_chroma_w = 0, + .log2_chroma_h = 1, .comp = { - {0,0,1,0,7}, /* Y */ - {1,0,1,0,7}, /* U */ - {2,0,1,0,7}, /* V */ + { 0, 0, 1, 0, 7 }, /* Y */ + { 1, 0, 1, 0, 7 }, /* U */ + { 2, 0, 1, 0, 7 }, /* V */ }, .flags = PIX_FMT_PLANAR, }, [PIX_FMT_YUVJ440P] = { .name = "yuvj440p", - .nb_components= 3, - .log2_chroma_w= 0, - .log2_chroma_h= 1, + .nb_components = 3, + .log2_chroma_w = 0, + .log2_chroma_h = 1, .comp = { - {0,0,1,0,7}, /* Y */ - {1,0,1,0,7}, /* U */ - {2,0,1,0,7}, /* V */ + { 0, 0, 1, 0, 7 }, /* Y */ + { 1, 0, 1, 0, 7 }, /* U */ + { 2, 0, 1, 0, 7 }, /* V */ }, .flags = PIX_FMT_PLANAR, }, [PIX_FMT_YUVA420P] = { .name = "yuva420p", - .nb_components= 4, - .log2_chroma_w= 1, - .log2_chroma_h= 1, + .nb_components = 4, + .log2_chroma_w = 1, + .log2_chroma_h = 1, .comp = { - {0,0,1,0,7}, /* Y */ - {1,0,1,0,7}, /* U */ - {2,0,1,0,7}, /* V */ - {3,0,1,0,7}, /* A */ + { 0, 0, 1, 0, 7 }, /* Y */ + { 1, 0, 1, 0, 7 }, /* U */ + { 2, 0, 1, 0, 7 }, /* V */ + { 3, 0, 1, 0, 7 }, /* A */ }, .flags = PIX_FMT_PLANAR, }, @@ -608,25 +609,25 @@ const AVPixFmtDescriptor av_pix_fmt_descriptors[PIX_FMT_NB] = { }, [PIX_FMT_RGB48BE] = { .name = "rgb48be", - .nb_components= 3, - .log2_chroma_w= 0, - .log2_chroma_h= 0, + .nb_components = 3, + .log2_chroma_w = 0, + .log2_chroma_h = 0, .comp = { - {0,5,1,0,15}, /* R */ - {0,5,3,0,15}, /* G */ - {0,5,5,0,15}, /* B */ + { 0, 5, 1, 0, 15 }, /* R */ + { 0, 5, 3, 0, 15 }, /* G */ + { 0, 5, 5, 0, 15 }, /* B */ }, .flags = PIX_FMT_RGB | PIX_FMT_BE, }, [PIX_FMT_RGB48LE] = { .name = "rgb48le", - .nb_components= 3, - .log2_chroma_w= 0, - .log2_chroma_h= 0, + .nb_components = 3, + .log2_chroma_w = 0, + .log2_chroma_h = 0, .comp = { - {0,5,1,0,15}, /* R */ - {0,5,3,0,15}, /* G */ - {0,5,5,0,15}, /* B */ + { 0, 5, 1, 0, 15 }, /* R */ + { 0, 5, 3, 0, 15 }, /* G */ + { 0, 5, 5, 0, 15 }, /* B */ }, .flags = PIX_FMT_RGB, }, @@ -658,97 +659,97 @@ const AVPixFmtDescriptor av_pix_fmt_descriptors[PIX_FMT_NB] = { }, [PIX_FMT_RGB565BE] = { .name = "rgb565be", - .nb_components= 3, - .log2_chroma_w= 0, - .log2_chroma_h= 0, + .nb_components = 3, + .log2_chroma_w = 0, + .log2_chroma_h = 0, .comp = { - {0,1,0,3,4}, /* R */ - {0,1,1,5,5}, /* G */ - {0,1,1,0,4}, /* B */ + { 0, 1, 0, 3, 4 }, /* R */ + { 0, 1, 1, 5, 5 }, /* G */ + { 0, 1, 1, 0, 4 }, /* B */ }, .flags = PIX_FMT_BE | PIX_FMT_RGB, }, [PIX_FMT_RGB565LE] = { .name = "rgb565le", - .nb_components= 3, - .log2_chroma_w= 0, - .log2_chroma_h= 0, + .nb_components = 3, + .log2_chroma_w = 0, + .log2_chroma_h = 0, .comp = { - {0,1,2,3,4}, /* R */ - {0,1,1,5,5}, /* G */ - {0,1,1,0,4}, /* B */ + { 0, 1, 2, 3, 4 }, /* R */ + { 0, 1, 1, 5, 5 }, /* G */ + { 0, 1, 1, 0, 4 }, /* B */ }, .flags = PIX_FMT_RGB, }, [PIX_FMT_RGB555BE] = { .name = "rgb555be", - .nb_components= 3, - .log2_chroma_w= 0, - .log2_chroma_h= 0, + .nb_components = 3, + .log2_chroma_w = 0, + .log2_chroma_h = 0, .comp = { - {0,1,0,2,4}, /* R */ - {0,1,1,5,4}, /* G */ - {0,1,1,0,4}, /* B */ + { 0, 1, 0, 2, 4 }, /* R */ + { 0, 1, 1, 5, 4 }, /* G */ + { 0, 1, 1, 0, 4 }, /* B */ }, .flags = PIX_FMT_BE | PIX_FMT_RGB, }, [PIX_FMT_RGB555LE] = { .name = "rgb555le", - .nb_components= 3, - .log2_chroma_w= 0, - .log2_chroma_h= 0, + .nb_components = 3, + .log2_chroma_w = 0, + .log2_chroma_h = 0, .comp = { - {0,1,2,2,4}, /* R */ - {0,1,1,5,4}, /* G */ - {0,1,1,0,4}, /* B */ + { 0, 1, 2, 2, 4 }, /* R */ + { 0, 1, 1, 5, 4 }, /* G */ + { 0, 1, 1, 0, 4 }, /* B */ }, .flags = PIX_FMT_RGB, }, [PIX_FMT_RGB444BE] = { .name = "rgb444be", - .nb_components= 3, - .log2_chroma_w= 0, - .log2_chroma_h= 0, + .nb_components = 3, + .log2_chroma_w = 0, + .log2_chroma_h = 0, .comp = { - {0,1,0,0,3}, /* R */ - {0,1,1,4,3}, /* G */ - {0,1,1,0,3}, /* B */ + { 0, 1, 0, 0, 3 }, /* R */ + { 0, 1, 1, 4, 3 }, /* G */ + { 0, 1, 1, 0, 3 }, /* B */ }, .flags = PIX_FMT_BE | PIX_FMT_RGB, }, [PIX_FMT_RGB444LE] = { .name = "rgb444le", - .nb_components= 3, - .log2_chroma_w= 0, - .log2_chroma_h= 0, + .nb_components = 3, + .log2_chroma_w = 0, + .log2_chroma_h = 0, .comp = { - {0,1,2,0,3}, /* R */ - {0,1,1,4,3}, /* G */ - {0,1,1,0,3}, /* B */ + { 0, 1, 2, 0, 3 }, /* R */ + { 0, 1, 1, 4, 3 }, /* G */ + { 0, 1, 1, 0, 3 }, /* B */ }, .flags = PIX_FMT_RGB, }, [PIX_FMT_BGR48BE] = { .name = "bgr48be", - .nb_components= 3, - .log2_chroma_w= 0, - .log2_chroma_h= 0, + .nb_components = 3, + .log2_chroma_w = 0, + .log2_chroma_h = 0, .comp = { - {0,5,1,0,15}, /* B */ - {0,5,3,0,15}, /* G */ - {0,5,5,0,15}, /* R */ + { 0, 5, 1, 0, 15 }, /* B */ + { 0, 5, 3, 0, 15 }, /* G */ + { 0, 5, 5, 0, 15 }, /* R */ }, .flags = PIX_FMT_BE | PIX_FMT_RGB, }, [PIX_FMT_BGR48LE] = { .name = "bgr48le", - .nb_components= 3, - .log2_chroma_w= 0, - .log2_chroma_h= 0, + .nb_components = 3, + .log2_chroma_w = 0, + .log2_chroma_h = 0, .comp = { - {0,5,1,0,15}, /* B */ - {0,5,3,0,15}, /* G */ - {0,5,5,0,15}, /* R */ + { 0, 5, 1, 0, 15 }, /* B */ + { 0, 5, 3, 0, 15 }, /* G */ + { 0, 5, 5, 0, 15 }, /* R */ }, .flags = PIX_FMT_RGB, }, @@ -779,73 +780,73 @@ const AVPixFmtDescriptor av_pix_fmt_descriptors[PIX_FMT_NB] = { }, [PIX_FMT_BGR565BE] = { .name = "bgr565be", - .nb_components= 3, - .log2_chroma_w= 0, - .log2_chroma_h= 0, + .nb_components = 3, + .log2_chroma_w = 0, + .log2_chroma_h = 0, .comp = { - {0,1,0,3,4}, /* B */ - {0,1,1,5,5}, /* G */ - {0,1,1,0,4}, /* R */ + { 0, 1, 0, 3, 4 }, /* B */ + { 0, 1, 1, 5, 5 }, /* G */ + { 0, 1, 1, 0, 4 }, /* R */ }, .flags = PIX_FMT_BE | PIX_FMT_RGB, }, [PIX_FMT_BGR565LE] = { .name = "bgr565le", - .nb_components= 3, - .log2_chroma_w= 0, - .log2_chroma_h= 0, + .nb_components = 3, + .log2_chroma_w = 0, + .log2_chroma_h = 0, .comp = { - {0,1,2,3,4}, /* B */ - {0,1,1,5,5}, /* G */ - {0,1,1,0,4}, /* R */ + { 0, 1, 2, 3, 4 }, /* B */ + { 0, 1, 1, 5, 5 }, /* G */ + { 0, 1, 1, 0, 4 }, /* R */ }, .flags = PIX_FMT_RGB, }, [PIX_FMT_BGR555BE] = { .name = "bgr555be", - .nb_components= 3, - .log2_chroma_w= 0, - .log2_chroma_h= 0, + .nb_components = 3, + .log2_chroma_w = 0, + .log2_chroma_h = 0, .comp = { - {0,1,0,2,4}, /* B */ - {0,1,1,5,4}, /* G */ - {0,1,1,0,4}, /* R */ + { 0, 1, 0, 2, 4 }, /* B */ + { 0, 1, 1, 5, 4 }, /* G */ + { 0, 1, 1, 0, 4 }, /* R */ }, .flags = PIX_FMT_BE | PIX_FMT_RGB, }, [PIX_FMT_BGR555LE] = { .name = "bgr555le", - .nb_components= 3, - .log2_chroma_w= 0, - .log2_chroma_h= 0, + .nb_components = 3, + .log2_chroma_w = 0, + .log2_chroma_h = 0, .comp = { - {0,1,2,2,4}, /* B */ - {0,1,1,5,4}, /* G */ - {0,1,1,0,4}, /* R */ + { 0, 1, 2, 2, 4 }, /* B */ + { 0, 1, 1, 5, 4 }, /* G */ + { 0, 1, 1, 0, 4 }, /* R */ }, .flags = PIX_FMT_RGB, }, [PIX_FMT_BGR444BE] = { .name = "bgr444be", - .nb_components= 3, - .log2_chroma_w= 0, - .log2_chroma_h= 0, + .nb_components = 3, + .log2_chroma_w = 0, + .log2_chroma_h = 0, .comp = { - {0,1,0,0,3}, /* B */ - {0,1,1,4,3}, /* G */ - {0,1,1,0,3}, /* R */ + { 0, 1, 0, 0, 3 }, /* B */ + { 0, 1, 1, 4, 3 }, /* G */ + { 0, 1, 1, 0, 3 }, /* R */ }, .flags = PIX_FMT_BE | PIX_FMT_RGB, }, [PIX_FMT_BGR444LE] = { .name = "bgr444le", - .nb_components= 3, - .log2_chroma_w= 0, - .log2_chroma_h= 0, + .nb_components = 3, + .log2_chroma_w = 0, + .log2_chroma_h = 0, .comp = { - {0,1,2,0,3}, /* B */ - {0,1,1,4,3}, /* G */ - {0,1,1,0,3}, /* R */ + { 0, 1, 2, 0, 3 }, /* B */ + { 0, 1, 1, 4, 3 }, /* G */ + { 0, 1, 1, 0, 3 }, /* R */ }, .flags = PIX_FMT_RGB, }, @@ -875,93 +876,93 @@ const AVPixFmtDescriptor av_pix_fmt_descriptors[PIX_FMT_NB] = { }, [PIX_FMT_YUV420P9LE] = { .name = "yuv420p9le", - .nb_components= 3, - .log2_chroma_w= 1, - .log2_chroma_h= 1, + .nb_components = 3, + .log2_chroma_w = 1, + .log2_chroma_h = 1, .comp = { - {0,1,1,0,8}, /* Y */ - {1,1,1,0,8}, /* U */ - {2,1,1,0,8}, /* V */ + { 0, 1, 1, 0, 8 }, /* Y */ + { 1, 1, 1, 0, 8 }, /* U */ + { 2, 1, 1, 0, 8 }, /* V */ }, .flags = PIX_FMT_PLANAR, }, [PIX_FMT_YUV420P9BE] = { .name = "yuv420p9be", - .nb_components= 3, - .log2_chroma_w= 1, - .log2_chroma_h= 1, + .nb_components = 3, + .log2_chroma_w = 1, + .log2_chroma_h = 1, .comp = { - {0,1,1,0,8}, /* Y */ - {1,1,1,0,8}, /* U */ - {2,1,1,0,8}, /* V */ + { 0, 1, 1, 0, 8 }, /* Y */ + { 1, 1, 1, 0, 8 }, /* U */ + { 2, 1, 1, 0, 8 }, /* V */ }, .flags = PIX_FMT_BE | PIX_FMT_PLANAR, }, [PIX_FMT_YUV420P10LE] = { .name = "yuv420p10le", - .nb_components= 3, - .log2_chroma_w= 1, - .log2_chroma_h= 1, + .nb_components = 3, + .log2_chroma_w = 1, + .log2_chroma_h = 1, .comp = { - {0,1,1,0,9}, /* Y */ - {1,1,1,0,9}, /* U */ - {2,1,1,0,9}, /* V */ + { 0, 1, 1, 0, 9 }, /* Y */ + { 1, 1, 1, 0, 9 }, /* U */ + { 2, 1, 1, 0, 9 }, /* V */ }, .flags = PIX_FMT_PLANAR, }, [PIX_FMT_YUV420P10BE] = { .name = "yuv420p10be", - .nb_components= 3, - .log2_chroma_w= 1, - .log2_chroma_h= 1, + .nb_components = 3, + .log2_chroma_w = 1, + .log2_chroma_h = 1, .comp = { - {0,1,1,0,9}, /* Y */ - {1,1,1,0,9}, /* U */ - {2,1,1,0,9}, /* V */ + { 0, 1, 1, 0, 9 }, /* Y */ + { 1, 1, 1, 0, 9 }, /* U */ + { 2, 1, 1, 0, 9 }, /* V */ }, .flags = PIX_FMT_BE | PIX_FMT_PLANAR, }, [PIX_FMT_YUV420P16LE] = { .name = "yuv420p16le", - .nb_components= 3, - .log2_chroma_w= 1, - .log2_chroma_h= 1, + .nb_components = 3, + .log2_chroma_w = 1, + .log2_chroma_h = 1, .comp = { - {0,1,1,0,15}, /* Y */ - {1,1,1,0,15}, /* U */ - {2,1,1,0,15}, /* V */ + { 0, 1, 1, 0, 15 }, /* Y */ + { 1, 1, 1, 0, 15 }, /* U */ + { 2, 1, 1, 0, 15 }, /* V */ }, .flags = PIX_FMT_PLANAR, }, [PIX_FMT_YUV420P16BE] = { .name = "yuv420p16be", - .nb_components= 3, - .log2_chroma_w= 1, - .log2_chroma_h= 1, + .nb_components = 3, + .log2_chroma_w = 1, + .log2_chroma_h = 1, .comp = { - {0,1,1,0,15}, /* Y */ - {1,1,1,0,15}, /* U */ - {2,1,1,0,15}, /* V */ + { 0, 1, 1, 0, 15 }, /* Y */ + { 1, 1, 1, 0, 15 }, /* U */ + { 2, 1, 1, 0, 15 }, /* V */ }, .flags = PIX_FMT_BE | PIX_FMT_PLANAR, }, [PIX_FMT_YUV422P9LE] = { .name = "yuv422p9le", - .nb_components= 3, - .log2_chroma_w= 1, - .log2_chroma_h= 0, + .nb_components = 3, + .log2_chroma_w = 1, + .log2_chroma_h = 0, .comp = { - {0,1,1,0,8}, /* Y */ - {1,1,1,0,8}, /* U */ - {2,1,1,0,8}, /* V */ + { 0, 1, 1, 0, 8 }, /* Y */ + { 1, 1, 1, 0, 8 }, /* U */ + { 2, 1, 1, 0, 8 }, /* V */ }, .flags = PIX_FMT_PLANAR, }, [PIX_FMT_YUV422P9BE] = { .name = "yuv422p9be", - .nb_components= 3, - .log2_chroma_w= 1, - .log2_chroma_h= 0, + .nb_components = 3, + .log2_chroma_w = 1, + .log2_chroma_h = 0, .comp = { {0,1,1,0,8}, /* Y */ {1,1,1,0,8}, /* U */ @@ -971,121 +972,121 @@ const AVPixFmtDescriptor av_pix_fmt_descriptors[PIX_FMT_NB] = { }, [PIX_FMT_YUV422P10LE] = { .name = "yuv422p10le", - .nb_components= 3, - .log2_chroma_w= 1, - .log2_chroma_h= 0, + .nb_components = 3, + .log2_chroma_w = 1, + .log2_chroma_h = 0, .comp = { - {0,1,1,0,9}, /* Y */ - {1,1,1,0,9}, /* U */ - {2,1,1,0,9}, /* V */ + { 0, 1, 1, 0, 9 }, /* Y */ + { 1, 1, 1, 0, 9 }, /* U */ + { 2, 1, 1, 0, 9 }, /* V */ }, .flags = PIX_FMT_PLANAR, }, [PIX_FMT_YUV422P10BE] = { .name = "yuv422p10be", - .nb_components= 3, - .log2_chroma_w= 1, - .log2_chroma_h= 0, + .nb_components = 3, + .log2_chroma_w = 1, + .log2_chroma_h = 0, .comp = { - {0,1,1,0,9}, /* Y */ - {1,1,1,0,9}, /* U */ - {2,1,1,0,9}, /* V */ + { 0, 1, 1, 0, 9 }, /* Y */ + { 1, 1, 1, 0, 9 }, /* U */ + { 2, 1, 1, 0, 9 }, /* V */ }, .flags = PIX_FMT_BE | PIX_FMT_PLANAR, }, [PIX_FMT_YUV422P16LE] = { .name = "yuv422p16le", - .nb_components= 3, - .log2_chroma_w= 1, - .log2_chroma_h= 0, + .nb_components = 3, + .log2_chroma_w = 1, + .log2_chroma_h = 0, .comp = { - {0,1,1,0,15}, /* Y */ - {1,1,1,0,15}, /* U */ - {2,1,1,0,15}, /* V */ + { 0, 1, 1, 0, 15 }, /* Y */ + { 1, 1, 1, 0, 15 }, /* U */ + { 2, 1, 1, 0, 15 }, /* V */ }, .flags = PIX_FMT_PLANAR, }, [PIX_FMT_YUV422P16BE] = { .name = "yuv422p16be", - .nb_components= 3, - .log2_chroma_w= 1, - .log2_chroma_h= 0, + .nb_components = 3, + .log2_chroma_w = 1, + .log2_chroma_h = 0, .comp = { - {0,1,1,0,15}, /* Y */ - {1,1,1,0,15}, /* U */ - {2,1,1,0,15}, /* V */ + { 0, 1, 1, 0, 15 }, /* Y */ + { 1, 1, 1, 0, 15 }, /* U */ + { 2, 1, 1, 0, 15 }, /* V */ }, .flags = PIX_FMT_BE | PIX_FMT_PLANAR, }, [PIX_FMT_YUV444P16LE] = { .name = "yuv444p16le", - .nb_components= 3, - .log2_chroma_w= 0, - .log2_chroma_h= 0, + .nb_components = 3, + .log2_chroma_w = 0, + .log2_chroma_h = 0, .comp = { - {0,1,1,0,15}, /* Y */ - {1,1,1,0,15}, /* U */ - {2,1,1,0,15}, /* V */ + { 0, 1, 1, 0, 15 }, /* Y */ + { 1, 1, 1, 0, 15 }, /* U */ + { 2, 1, 1, 0, 15 }, /* V */ }, .flags = PIX_FMT_PLANAR, }, [PIX_FMT_YUV444P16BE] = { .name = "yuv444p16be", - .nb_components= 3, - .log2_chroma_w= 0, - .log2_chroma_h= 0, + .nb_components = 3, + .log2_chroma_w = 0, + .log2_chroma_h = 0, .comp = { - {0,1,1,0,15}, /* Y */ - {1,1,1,0,15}, /* U */ - {2,1,1,0,15}, /* V */ + { 0, 1, 1, 0, 15 }, /* Y */ + { 1, 1, 1, 0, 15 }, /* U */ + { 2, 1, 1, 0, 15 }, /* V */ }, .flags = PIX_FMT_BE | PIX_FMT_PLANAR, }, [PIX_FMT_YUV444P10LE] = { .name = "yuv444p10le", - .nb_components= 3, - .log2_chroma_w= 0, - .log2_chroma_h= 0, + .nb_components = 3, + .log2_chroma_w = 0, + .log2_chroma_h = 0, .comp = { - {0,1,1,0,9}, /* Y */ - {1,1,1,0,9}, /* U */ - {2,1,1,0,9}, /* V */ + { 0, 1, 1, 0, 9 }, /* Y */ + { 1, 1, 1, 0, 9 }, /* U */ + { 2, 1, 1, 0, 9 }, /* V */ }, .flags = PIX_FMT_PLANAR, }, [PIX_FMT_YUV444P10BE] = { .name = "yuv444p10be", - .nb_components= 3, - .log2_chroma_w= 0, - .log2_chroma_h= 0, + .nb_components = 3, + .log2_chroma_w = 0, + .log2_chroma_h = 0, .comp = { - {0,1,1,0,9}, /* Y */ - {1,1,1,0,9}, /* U */ - {2,1,1,0,9}, /* V */ + { 0, 1, 1, 0, 9 }, /* Y */ + { 1, 1, 1, 0, 9 }, /* U */ + { 2, 1, 1, 0, 9 }, /* V */ }, .flags = PIX_FMT_BE | PIX_FMT_PLANAR, }, [PIX_FMT_YUV444P9LE] = { .name = "yuv444p9le", - .nb_components= 3, - .log2_chroma_w= 0, - .log2_chroma_h= 0, + .nb_components = 3, + .log2_chroma_w = 0, + .log2_chroma_h = 0, .comp = { - {0,1,1,0,8}, /* Y */ - {1,1,1,0,8}, /* U */ - {2,1,1,0,8}, /* V */ + { 0, 1, 1, 0, 8 }, /* Y */ + { 1, 1, 1, 0, 8 }, /* U */ + { 2, 1, 1, 0, 8 }, /* V */ }, .flags = PIX_FMT_PLANAR, }, [PIX_FMT_YUV444P9BE] = { .name = "yuv444p9be", - .nb_components= 3, - .log2_chroma_w= 0, - .log2_chroma_h= 0, + .nb_components = 3, + .log2_chroma_w = 0, + .log2_chroma_h = 0, .comp = { - {0,1,1,0,8}, /* Y */ - {1,1,1,0,8}, /* U */ - {2,1,1,0,8}, /* V */ + { 0, 1, 1, 0, 8 }, /* Y */ + { 1, 1, 1, 0, 8 }, /* U */ + { 2, 1, 1, 0, 8 }, /* V */ }, .flags = PIX_FMT_BE | PIX_FMT_PLANAR, }, @@ -1103,10 +1104,10 @@ const AVPixFmtDescriptor av_pix_fmt_descriptors[PIX_FMT_NB] = { }, [PIX_FMT_GRAY8A] = { .name = "gray8a", - .nb_components= 2, + .nb_components = 2, .comp = { - {0,1,1,0,7}, /* Y */ - {0,1,2,0,7}, /* A */ + { 0, 1, 1, 0, 7 }, /* Y */ + { 0, 1, 2, 0, 7 }, /* A */ }, }, [PIX_FMT_GBR24P] = { @@ -1121,85 +1122,85 @@ const AVPixFmtDescriptor av_pix_fmt_descriptors[PIX_FMT_NB] = { }, [PIX_FMT_GBRP] = { .name = "gbrp", - .nb_components= 3, - .log2_chroma_w= 0, - .log2_chroma_h= 0, + .nb_components = 3, + .log2_chroma_w = 0, + .log2_chroma_h = 0, .comp = { - {0,0,1,0,7}, /* G */ - {1,0,1,0,7}, /* B */ - {2,0,1,0,7}, /* R */ + { 0, 0, 1, 0, 7 }, /* G */ + { 1, 0, 1, 0, 7 }, /* B */ + { 2, 0, 1, 0, 7 }, /* R */ }, .flags = PIX_FMT_PLANAR | PIX_FMT_RGB, }, [PIX_FMT_GBRP9LE] = { .name = "gbrp9le", - .nb_components= 3, - .log2_chroma_w= 0, - .log2_chroma_h= 0, + .nb_components = 3, + .log2_chroma_w = 0, + .log2_chroma_h = 0, .comp = { - {0,1,1,0,8}, /* G */ - {1,1,1,0,8}, /* B */ - {2,1,1,0,8}, /* R */ + { 0, 1, 1, 0, 8 }, /* G */ + { 1, 1, 1, 0, 8 }, /* B */ + { 2, 1, 1, 0, 8 }, /* R */ }, .flags = PIX_FMT_PLANAR | PIX_FMT_RGB, }, [PIX_FMT_GBRP9BE] = { .name = "gbrp9be", - .nb_components= 3, - .log2_chroma_w= 0, - .log2_chroma_h= 0, + .nb_components = 3, + .log2_chroma_w = 0, + .log2_chroma_h = 0, .comp = { - {0,1,1,0,8}, /* G */ - {1,1,1,0,8}, /* B */ - {2,1,1,0,8}, /* R */ + { 0, 1, 1, 0, 8 }, /* G */ + { 1, 1, 1, 0, 8 }, /* B */ + { 2, 1, 1, 0, 8 }, /* R */ }, .flags = PIX_FMT_BE | PIX_FMT_PLANAR | PIX_FMT_RGB, }, [PIX_FMT_GBRP10LE] = { .name = "gbrp10le", - .nb_components= 3, - .log2_chroma_w= 0, - .log2_chroma_h= 0, + .nb_components = 3, + .log2_chroma_w = 0, + .log2_chroma_h = 0, .comp = { - {0,1,1,0,9}, /* G */ - {1,1,1,0,9}, /* B */ - {2,1,1,0,9}, /* R */ + { 0, 1, 1, 0, 9 }, /* G */ + { 1, 1, 1, 0, 9 }, /* B */ + { 2, 1, 1, 0, 9 }, /* R */ }, .flags = PIX_FMT_PLANAR | PIX_FMT_RGB, }, [PIX_FMT_GBRP10BE] = { .name = "gbrp10be", - .nb_components= 3, - .log2_chroma_w= 0, - .log2_chroma_h= 0, + .nb_components = 3, + .log2_chroma_w = 0, + .log2_chroma_h = 0, .comp = { - {0,1,1,0,9}, /* G */ - {1,1,1,0,9}, /* B */ - {2,1,1,0,9}, /* R */ + { 0, 1, 1, 0, 9 }, /* G */ + { 1, 1, 1, 0, 9 }, /* B */ + { 2, 1, 1, 0, 9 }, /* R */ }, .flags = PIX_FMT_BE | PIX_FMT_PLANAR | PIX_FMT_RGB, }, [PIX_FMT_GBRP16LE] = { .name = "gbrp16le", - .nb_components= 3, - .log2_chroma_w= 0, - .log2_chroma_h= 0, + .nb_components = 3, + .log2_chroma_w = 0, + .log2_chroma_h = 0, .comp = { - {0,1,1,0,15}, /* G */ - {1,1,1,0,15}, /* B */ - {2,1,1,0,15}, /* R */ + { 0, 1, 1, 0, 15 }, /* G */ + { 1, 1, 1, 0, 15 }, /* B */ + { 2, 1, 1, 0, 15 }, /* R */ }, .flags = PIX_FMT_PLANAR | PIX_FMT_RGB, }, [PIX_FMT_GBRP16BE] = { .name = "gbrp16be", - .nb_components= 3, - .log2_chroma_w= 0, - .log2_chroma_h= 0, + .nb_components = 3, + .log2_chroma_w = 0, + .log2_chroma_h = 0, .comp = { - {0,1,1,0,15}, /* G */ - {1,1,1,0,15}, /* B */ - {2,1,1,0,15}, /* R */ + { 0, 1, 1, 0, 15 }, /* G */ + { 1, 1, 1, 0, 15 }, /* B */ + { 2, 1, 1, 0, 15 }, /* R */ }, .flags = PIX_FMT_BE | PIX_FMT_PLANAR | PIX_FMT_RGB, }, @@ -1254,8 +1255,8 @@ int av_get_bits_per_pixel(const AVPixFmtDescriptor *pixdesc) int log2_pixels = pixdesc->log2_chroma_w + pixdesc->log2_chroma_h; for (c = 0; c < pixdesc->nb_components; c++) { - int s = c==1 || c==2 ? 0 : log2_pixels; - bits += (pixdesc->comp[c].depth_minus1+1) << s; + int s = c == 1 || c == 2 ? 0 : log2_pixels; + bits += (pixdesc->comp[c].depth_minus1 + 1) << s; } return bits >> log2_pixels; @@ -1265,11 +1266,11 @@ char *av_get_pix_fmt_string (char *buf, int buf_size, enum PixelFormat pix_fmt) { /* print header */ if (pix_fmt < 0) { - snprintf (buf, buf_size, "name " " nb_components" " nb_bits"); + snprintf (buf, buf_size, "name" " nb_components" " nb_bits"); } else { const AVPixFmtDescriptor *pixdesc = &av_pix_fmt_descriptors[pix_fmt]; - snprintf(buf, buf_size, "%-11s %7d %10d", - pixdesc->name, pixdesc->nb_components, av_get_bits_per_pixel(pixdesc)); + snprintf(buf, buf_size, "%-11s %7d %10d", pixdesc->name, + pixdesc->nb_components, av_get_bits_per_pixel(pixdesc)); } return buf; diff --git a/libavutil/random_seed.c b/libavutil/random_seed.c index 81805e5db0..235028b5c5 100644 --- a/libavutil/random_seed.c +++ b/libavutil/random_seed.c @@ -40,24 +40,24 @@ static int read_random(uint32_t *dst, const char *file) static uint32_t get_generic_seed(void) { - clock_t last_t=0; - int bits=0; - uint64_t random=0; + clock_t last_t = 0; + int bits = 0; + uint64_t random = 0; unsigned i; - float s=0.000000000001; + float s = 0.000000000001; - for(i=0;bits<64;i++){ - clock_t t= clock(); - if(last_t && fabs(t-last_t)>s || t==(clock_t)-1){ - if(i<10000 && s<(1<<24)){ - s+=s; - i=t=0; - }else{ - random= 2*random + (i&1); + for (i = 0; bits < 64; i++) { + clock_t t = clock(); + if (last_t && fabs(t - last_t) > s || t == (clock_t) -1) { + if (i < 10000 && s < (1 << 24)) { + s += s; + i = t = 0; + } else { + random = 2 * random + (i & 1); bits++; } } - last_t= t; + last_t = t; } #ifdef AV_READ_TIME random ^= AV_READ_TIME(); @@ -65,7 +65,7 @@ static uint32_t get_generic_seed(void) random ^= clock(); #endif - random += random>>32; + random += random >> 32; return random; } diff --git a/libavutil/rational.c b/libavutil/rational.c index b1bd655158..1a833ebec1 100644 --- a/libavutil/rational.c +++ b/libavutil/rational.c @@ -33,75 +33,86 @@ #include "mathematics.h" #include "rational.h" -int av_reduce(int *dst_num, int *dst_den, int64_t num, int64_t den, int64_t max){ - AVRational a0={0,1}, a1={1,0}; - int sign= (num<0) ^ (den<0); - int64_t gcd= av_gcd(FFABS(num), FFABS(den)); - - if(gcd){ - num = FFABS(num)/gcd; - den = FFABS(den)/gcd; +int av_reduce(int *dst_num, int *dst_den, + int64_t num, int64_t den, int64_t max) +{ + AVRational a0 = { 0, 1 }, a1 = { 1, 0 }; + int sign = (num < 0) ^ (den < 0); + int64_t gcd = av_gcd(FFABS(num), FFABS(den)); + + if (gcd) { + num = FFABS(num) / gcd; + den = FFABS(den) / gcd; } - if(num<=max && den<=max){ - a1= (AVRational){num, den}; - den=0; + if (num <= max && den <= max) { + a1 = (AVRational) { num, den }; + den = 0; } - while(den){ - uint64_t x = num / den; - int64_t next_den= num - den*x; - int64_t a2n= x*a1.num + a0.num; - int64_t a2d= x*a1.den + a0.den; + while (den) { + uint64_t x = num / den; + int64_t next_den = num - den * x; + int64_t a2n = x * a1.num + a0.num; + int64_t a2d = x * a1.den + a0.den; - if(a2n > max || a2d > max){ - if(a1.num) x= (max - a0.num) / a1.num; - if(a1.den) x= FFMIN(x, (max - a0.den) / a1.den); + if (a2n > max || a2d > max) { + if (a1.num) x = (max - a0.num) / a1.num; + if (a1.den) x = FFMIN(x, (max - a0.den) / a1.den); - if (den*(2*x*a1.den + a0.den) > num*a1.den) - a1 = (AVRational){x*a1.num + a0.num, x*a1.den + a0.den}; + if (den * (2 * x * a1.den + a0.den) > num * a1.den) + a1 = (AVRational) { x * a1.num + a0.num, x * a1.den + a0.den }; break; } - a0= a1; - a1= (AVRational){a2n, a2d}; - num= den; - den= next_den; + a0 = a1; + a1 = (AVRational) { a2n, a2d }; + num = den; + den = next_den; } av_assert2(av_gcd(a1.num, a1.den) <= 1U); *dst_num = sign ? -a1.num : a1.num; *dst_den = a1.den; - return den==0; + return den == 0; } -AVRational av_mul_q(AVRational b, AVRational c){ - av_reduce(&b.num, &b.den, b.num * (int64_t)c.num, b.den * (int64_t)c.den, INT_MAX); +AVRational av_mul_q(AVRational b, AVRational c) +{ + av_reduce(&b.num, &b.den, + b.num * (int64_t) c.num, + b.den * (int64_t) c.den, INT_MAX); return b; } -AVRational av_div_q(AVRational b, AVRational c){ - return av_mul_q(b, (AVRational){c.den, c.num}); +AVRational av_div_q(AVRational b, AVRational c) +{ + return av_mul_q(b, (AVRational) { c.den, c.num }); } -AVRational av_add_q(AVRational b, AVRational c){ - av_reduce(&b.num, &b.den, b.num * (int64_t)c.den + c.num * (int64_t)b.den, b.den * (int64_t)c.den, INT_MAX); +AVRational av_add_q(AVRational b, AVRational c) { + av_reduce(&b.num, &b.den, + b.num * (int64_t) c.den + + c.num * (int64_t) b.den, + b.den * (int64_t) c.den, INT_MAX); return b; } -AVRational av_sub_q(AVRational b, AVRational c){ - return av_add_q(b, (AVRational){-c.num, c.den}); +AVRational av_sub_q(AVRational b, AVRational c) +{ + return av_add_q(b, (AVRational) { -c.num, c.den }); } -AVRational av_d2q(double d, int max){ +AVRational av_d2q(double d, int max) +{ AVRational a; #define LOG2 0.69314718055994530941723212145817656807550013436025 int exponent; int64_t den; if (isnan(d)) - return (AVRational){0,0}; + return (AVRational) { 0,0 }; if (isinf(d)) - return (AVRational){ d<0 ? -1:1, 0 }; + return (AVRational) { d < 0 ? -1 : 1, 0 }; exponent = FFMAX( (int)(log(fabs(d) + 1e-20)/LOG2), 0); den = 1LL << (61 - exponent); av_reduce(&a.num, &a.den, (int64_t)(d * den + 0.5), den, max); @@ -127,7 +138,7 @@ int av_nearer_q(AVRational q, AVRational q1, AVRational q2) int av_find_nearest_q_idx(AVRational q, const AVRational* q_list) { int i, nearest_q_idx = 0; - for(i=0; q_list[i].den; i++) + for (i = 0; q_list[i].den; i++) if (av_nearer_q(q, q_list[i], q_list[nearest_q_idx]) > 0) nearest_q_idx = i; @@ -138,16 +149,19 @@ int av_find_nearest_q_idx(AVRational q, const AVRational* q_list) int main(void) { AVRational a,b; - for(a.num=-2; a.num<=2; a.num++){ - for(a.den=-2; a.den<=2; a.den++){ - for(b.num=-2; b.num<=2; b.num++){ - for(b.den=-2; b.den<=2; b.den++){ - int c= av_cmp_q(a,b); - double d= av_q2d(a) == av_q2d(b) ? 0 : (av_q2d(a) - av_q2d(b)); - if(d>0) d=1; - else if(d<0) d=-1; - else if(d != d) d= INT_MIN; - if(c!=d) av_log(0, AV_LOG_ERROR, "%d/%d %d/%d, %d %f\n", a.num, a.den, b.num, b.den, c,d); + for (a.num = -2; a.num <= 2; a.num++) { + for (a.den = -2; a.den <= 2; a.den++) { + for (b.num = -2; b.num <= 2; b.num++) { + for (b.den = -2; b.den <= 2; b.den++) { + int c = av_cmp_q(a,b); + double d = av_q2d(a) == av_q2d(b) ? + 0 : (av_q2d(a) - av_q2d(b)); + if (d > 0) d = 1; + else if (d < 0) d = -1; + else if (d != d) d = INT_MIN; + if (c != d) + av_log(0, AV_LOG_ERROR, "%d/%d %d/%d, %d %f\n", a.num, + a.den, b.num, b.den, c,d); } } } diff --git a/libavutil/tree.c b/libavutil/tree.c index 8769c76b0f..58cd33d770 100644 --- a/libavutil/tree.c +++ b/libavutil/tree.c @@ -21,22 +21,24 @@ #include "log.h" #include "tree.h" -typedef struct AVTreeNode{ +typedef struct AVTreeNode { struct AVTreeNode *child[2]; void *elem; int state; -}AVTreeNode; +} AVTreeNode; const int av_tree_node_size = sizeof(AVTreeNode); -void *av_tree_find(const AVTreeNode *t, void *key, int (*cmp)(void *key, const void *b), void *next[2]){ - if(t){ - unsigned int v= cmp(key, t->elem); - if(v){ - if(next) next[v>>31]= t->elem; - return av_tree_find(t->child[(v>>31)^1], key, cmp, next); - }else{ - if(next){ +void *av_tree_find(const AVTreeNode *t, void *key, + int (*cmp)(void *key, const void *b), void *next[2]) +{ + if (t) { + unsigned int v = cmp(key, t->elem); + if (v) { + if (next) next[v >> 31] = t->elem; + return av_tree_find(t->child[(v >> 31) ^ 1], key, cmp, next); + } else { + if (next) { av_tree_find(t->child[0], key, cmp, next); av_tree_find(t->child[1], key, cmp, next); } @@ -46,41 +48,43 @@ void *av_tree_find(const AVTreeNode *t, void *key, int (*cmp)(void *key, const v return NULL; } -void *av_tree_insert(AVTreeNode **tp, void *key, int (*cmp)(void *key, const void *b), AVTreeNode **next){ - AVTreeNode *t= *tp; - if(t){ - unsigned int v= cmp(t->elem, key); +void *av_tree_insert(AVTreeNode **tp, void *key, + int (*cmp)(void *key, const void *b), AVTreeNode **next) +{ + AVTreeNode *t = *tp; + if (t) { + unsigned int v = cmp(t->elem, key); void *ret; - if(!v){ - if(*next) + if (!v) { + if (*next) return t->elem; - else if(t->child[0]||t->child[1]){ - int i= !t->child[0]; + else if (t->child[0] || t->child[1]) { + int i = !t->child[0]; void *next_elem[2]; av_tree_find(t->child[i], key, cmp, next_elem); - key= t->elem= next_elem[i]; - v= -i; - }else{ - *next= t; - *tp=NULL; + key = t->elem = next_elem[i]; + v = -i; + } else { + *next = t; + *tp = NULL; return NULL; } } - ret= av_tree_insert(&t->child[v>>31], key, cmp, next); - if(!ret){ - int i= (v>>31) ^ !!*next; - AVTreeNode **child= &t->child[i]; - t->state += 2*i - 1; - - if(!(t->state&1)){ - if(t->state){ + ret = av_tree_insert(&t->child[v >> 31], key, cmp, next); + if (!ret) { + int i = (v >> 31) ^ !!*next; + AVTreeNode **child = &t->child[i]; + t->state += 2 * i - 1; + + if (!(t->state & 1)) { + if (t->state) { /* The following code is equivalent to if((*child)->state*2 == -t->state) rotate(child, i^1); rotate(tp, i); with rotate(): - static void rotate(AVTreeNode **tp, int i){ + static void rotate(AVTreeNode **tp, int i) { AVTreeNode *t= *tp; *tp= t->child[i]; @@ -92,54 +96,62 @@ void *av_tree_insert(AVTreeNode **tp, void *key, int (*cmp)(void *key, const voi } but such a rotate function is both bigger and slower */ - if((*child)->state*2 == -t->state){ - *tp= (*child)->child[i^1]; - (*child)->child[i^1]= (*tp)->child[i]; - (*tp)->child[i]= *child; - *child= (*tp)->child[i^1]; - (*tp)->child[i^1]= t; - - (*tp)->child[0]->state= -((*tp)->state>0); - (*tp)->child[1]->state= (*tp)->state<0 ; - (*tp)->state=0; - }else{ - *tp= *child; - *child= (*child)->child[i^1]; - (*tp)->child[i^1]= t; - if((*tp)->state) t->state = 0; - else t->state>>= 1; - (*tp)->state= -t->state; + if (( *child )->state * 2 == -t->state) { + *tp = (*child)->child[i ^ 1]; + (*child)->child[i ^ 1] = (*tp)->child[i]; + (*tp)->child[i] = *child; + *child = ( *tp )->child[i ^ 1]; + (*tp)->child[i ^ 1] = t; + + (*tp)->child[0]->state = -((*tp)->state > 0); + (*tp)->child[1]->state = (*tp)->state < 0; + (*tp)->state = 0; + } else { + *tp = *child; + *child = (*child)->child[i ^ 1]; + (*tp)->child[i ^ 1] = t; + if ((*tp)->state) t->state = 0; + else t->state >>= 1; + (*tp)->state = -t->state; } } } - if(!(*tp)->state ^ !!*next) + if (!(*tp)->state ^ !!*next) return key; } return ret; - }else{ - *tp= *next; *next= NULL; - if(*tp){ - (*tp)->elem= key; + } else { + *tp = *next; + *next = NULL; + if (*tp) { + (*tp)->elem = key; return NULL; - }else + } else return key; } } -void av_tree_destroy(AVTreeNode *t){ - if(t){ +void av_tree_destroy(AVTreeNode *t) +{ + if (t) { av_tree_destroy(t->child[0]); av_tree_destroy(t->child[1]); av_free(t); } } -void av_tree_enumerate(AVTreeNode *t, void *opaque, int (*cmp)(void *opaque, void *elem), int (*enu)(void *opaque, void *elem)){ - if(t){ - int v= cmp ? cmp(opaque, t->elem) : 0; - if(v>=0) av_tree_enumerate(t->child[0], opaque, cmp, enu); - if(v==0) enu(opaque, t->elem); - if(v<=0) av_tree_enumerate(t->child[1], opaque, cmp, enu); +void av_tree_enumerate(AVTreeNode *t, void *opaque, + int (*cmp)(void *opaque, void *elem), + int (*enu)(void *opaque, void *elem)) +{ + if (t) { + int v = cmp ? cmp(opaque, t->elem) : 0; + if (v >= 0) + av_tree_enumerate(t->child[0], opaque, cmp, enu); + if (v == 0) + enu(opaque, t->elem); + if (v <= 0) + av_tree_enumerate(t->child[1], opaque, cmp, enu); } } @@ -147,64 +159,68 @@ void av_tree_enumerate(AVTreeNode *t, void *opaque, int (*cmp)(void *opaque, voi #include "lfg.h" -static int check(AVTreeNode *t){ - if(t){ - int left= check(t->child[0]); - int right= check(t->child[1]); +static int check(AVTreeNode *t) +{ + if (t) { + int left = check(t->child[0]); + int right = check(t->child[1]); - if(left>999 || right>999) + if (left>999 || right>999) return 1000; - if(right - left != t->state) + if (right - left != t->state) return 1000; - if(t->state>1 || t->state<-1) + if (t->state>1 || t->state<-1) return 1000; - return FFMAX(left, right)+1; + return FFMAX(left, right) + 1; } return 0; } -static void print(AVTreeNode *t, int depth){ +static void print(AVTreeNode *t, int depth) +{ int i; - for(i=0; i<depth*4; i++) av_log(NULL, AV_LOG_ERROR, " "); - if(t){ + for (i = 0; i < depth * 4; i++) av_log(NULL, AV_LOG_ERROR, " "); + if (t) { av_log(NULL, AV_LOG_ERROR, "Node %p %2d %p\n", t, t->state, t->elem); - print(t->child[0], depth+1); - print(t->child[1], depth+1); - }else + print(t->child[0], depth + 1); + print(t->child[1], depth + 1); + } else av_log(NULL, AV_LOG_ERROR, "NULL\n"); } -static int cmp(void *a, const void *b){ - return (uint8_t*)a-(const uint8_t*)b; +static int cmp(void *a, const void *b) +{ + return (uint8_t *) a - (const uint8_t *) b; } -int main(void){ +int main (void) +{ int i; void *k; - AVTreeNode *root= NULL, *node=NULL; + AVTreeNode *root = NULL, *node = NULL; AVLFG prng; av_lfg_init(&prng, 1); - for(i=0; i<10000; i++){ + for (i = 0; i < 10000; i++) { int j = av_lfg_get(&prng) % 86294; - if(check(root) > 999){ + if (check(root) > 999) { av_log(NULL, AV_LOG_ERROR, "FATAL error %d\n", i); print(root, 0); return -1; } av_log(NULL, AV_LOG_ERROR, "inserting %4d\n", j); - if(!node) - node= av_mallocz(av_tree_node_size); - av_tree_insert(&root, (void*)(j+1), cmp, &node); + if (!node) + node = av_mallocz(av_tree_node_size); + av_tree_insert(&root, (void *) (j + 1), cmp, &node); j = av_lfg_get(&prng) % 86294; { - AVTreeNode *node2=NULL; + AVTreeNode *node2 = NULL; av_log(NULL, AV_LOG_ERROR, "removing %4d\n", j); - av_tree_insert(&root, (void*)(j+1), cmp, &node2); - k= av_tree_find(root, (void*)(j+1), cmp, NULL); - if(k) + av_tree_insert(&root, (void *) (j + 1), cmp, &node2); + k = av_tree_find(root, (void *) (j + 1), cmp, NULL); + if (k) av_log(NULL, AV_LOG_ERROR, "removal failure %d\n", i); } } diff --git a/tests/fate/aac.mak b/tests/fate/aac.mak index 9abae7d866..f17c914d1a 100644 --- a/tests/fate/aac.mak +++ b/tests/fate/aac.mak @@ -38,6 +38,10 @@ FATE_AAC += fate-aac-ap05_48 fate-aac-ap05_48: CMD = pcm -i $(SAMPLES)/aac/ap05_48.mp4 fate-aac-ap05_48: REF = $(SAMPLES)/aac/ap05_48.s16 +FATE_AAC += fate-aac-latm_stereo_to_51 +fate-aac-latm_stereo_to_51: CMD = pcm -i $(SAMPLES)/aac/latm_stereo_to_51.ts -ac 6 +fate-aac-latm_stereo_to_51: REF = $(SAMPLES)/aac/latm_stereo_to_51.s16 + fate-aac-ct%: CMD = pcm -i $(SAMPLES)/aac/CT_DecoderCheck/$(@:fate-aac-ct-%=%) fate-aac-ct%: REF = $(SAMPLES)/aac/CT_DecoderCheck/aacPlusv2.wav |