diff options
author | Michael Niedermayer <michaelni@gmx.at> | 2012-01-24 02:41:53 +0100 |
---|---|---|
committer | Michael Niedermayer <michaelni@gmx.at> | 2012-01-24 02:41:53 +0100 |
commit | 0bb57f8bf029427059be21a562527dcfa0e264c9 (patch) | |
tree | 8e6743c4fc1f16f36899bdea87e485735c0d8d59 /libavcodec | |
parent | b955d4072e3e563b230c9ab4d6575577a3dc7314 (diff) | |
parent | 0fec2cb15cc6ff1fcc724c774ec36abadcb7b6ad (diff) | |
download | ffmpeg-0bb57f8bf029427059be21a562527dcfa0e264c9.tar.gz |
Merge remote-tracking branch 'qatar/master'
* qatar/master:
Remove ffmpeg.
aacenc: Simplify windowing
aacenc: Move saved overlap samples to the beginning of the same buffer as incoming samples.
aacenc: Deinterleave input samples before processing.
aacenc: Store channel count in AACEncContext.
aacenc: Move Q^3/4 calculation to it's own table
aacenc: Request normalized float samples instead of converting s16 samples to float.
aacpsy: Replace an if with FFMAX in LAME windowing.
aacenc: cosmetics, replace 'rd' with 'bits' in codebook_trellis_rate to make it more clear what is being calculated.
aacpsy: cosmetics, change a FIXME to a NOTE about subshort comparisons
aacenc: cosmetics: move init() and end() to the bottom of the file.
aacenc: aac_encode_init() cleanup
XWD encoder and decoder
vc1: don't read the interpfrm and bfraction elements for interlaced frames
mxfdec: fix memleak on mxf_read_close()
westwood: split the AUD and VQA demuxers into separate files.
Conflicts:
.gitignore
Changelog
Makefile
configure
doc/ffmpeg.texi
ffmpeg.c
libavcodec/Makefile
libavcodec/aacenc.c
libavcodec/allcodecs.c
libavcodec/avcodec.h
libavcodec/version.h
libavformat/Makefile
libavformat/img2.c
Merged-by: Michael Niedermayer <michaelni@gmx.at>
Diffstat (limited to 'libavcodec')
-rw-r--r-- | libavcodec/Makefile | 2 | ||||
-rw-r--r-- | libavcodec/aaccoder.c | 35 | ||||
-rw-r--r-- | libavcodec/aacenc.c | 354 | ||||
-rw-r--r-- | libavcodec/aacenc.h | 9 | ||||
-rw-r--r-- | libavcodec/aacpsy.c | 34 | ||||
-rw-r--r-- | libavcodec/allcodecs.c | 1 | ||||
-rw-r--r-- | libavcodec/avcodec.h | 1 | ||||
-rw-r--r-- | libavcodec/psymodel.c | 17 | ||||
-rw-r--r-- | libavcodec/psymodel.h | 12 | ||||
-rw-r--r-- | libavcodec/vc1.c | 16 | ||||
-rw-r--r-- | libavcodec/version.h | 4 | ||||
-rw-r--r-- | libavcodec/xwd.h | 41 | ||||
-rw-r--r-- | libavcodec/xwddec.c | 267 | ||||
-rw-r--r-- | libavcodec/xwdenc.c | 246 |
14 files changed, 845 insertions, 194 deletions
diff --git a/libavcodec/Makefile b/libavcodec/Makefile index f094d16a88..5942f67a8b 100644 --- a/libavcodec/Makefile +++ b/libavcodec/Makefile @@ -470,6 +470,8 @@ OBJS-$(CONFIG_XBIN_DECODER) += bintext.o cga_data.o OBJS-$(CONFIG_XL_DECODER) += xl.o OBJS-$(CONFIG_XSUB_DECODER) += xsubdec.o OBJS-$(CONFIG_XSUB_ENCODER) += xsubenc.o +OBJS-$(CONFIG_XWD_DECODER) += xwddec.o +OBJS-$(CONFIG_XWD_ENCODER) += xwdenc.o OBJS-$(CONFIG_Y41P_DECODER) += y41pdec.o OBJS-$(CONFIG_Y41P_ENCODER) += y41penc.o OBJS-$(CONFIG_YOP_DECODER) += yop.o diff --git a/libavcodec/aaccoder.c b/libavcodec/aaccoder.c index d6750a2454..8738460b81 100644 --- a/libavcodec/aaccoder.c +++ b/libavcodec/aaccoder.c @@ -110,14 +110,15 @@ static av_always_inline float quantize_and_encode_band_cost_template( int *bits, int BT_ZERO, int BT_UNSIGNED, int BT_PAIR, int BT_ESC) { - const float IQ = ff_aac_pow2sf_tab[POW_SF2_ZERO + scale_idx - SCALE_ONE_POS + SCALE_DIV_512]; - const float Q = ff_aac_pow2sf_tab[POW_SF2_ZERO - scale_idx + SCALE_ONE_POS - SCALE_DIV_512]; + const int q_idx = POW_SF2_ZERO - scale_idx + SCALE_ONE_POS - SCALE_DIV_512; + const float Q = ff_aac_pow2sf_tab [q_idx]; + const float Q34 = ff_aac_pow34sf_tab[q_idx]; + const float IQ = ff_aac_pow2sf_tab [POW_SF2_ZERO + scale_idx - SCALE_ONE_POS + SCALE_DIV_512]; const float CLIPPED_ESCAPE = 165140.0f*IQ; int i, j; float cost = 0; const int dim = BT_PAIR ? 2 : 4; int resbits = 0; - const float Q34 = sqrtf(Q * sqrtf(Q)); const int range = aac_cb_range[cb]; const int maxval = aac_cb_maxval[cb]; int off; @@ -420,7 +421,7 @@ static void codebook_trellis_rate(AACEncContext *s, SingleChannelElement *sce, const int run_esc = (1 << run_bits) - 1; int idx, ppos, count; int stackrun[120], stackcb[120], stack_len; - float next_minrd = INFINITY; + float next_minbits = INFINITY; int next_mincb = 0; abs_pow34_v(s->scoefs, sce->coeffs, 1024); @@ -434,7 +435,7 @@ static void codebook_trellis_rate(AACEncContext *s, SingleChannelElement *sce, size = sce->ics.swb_sizes[swb]; if (sce->zeroes[win*16 + swb]) { float cost_stay_here = path[swb][0].cost; - float cost_get_here = next_minrd + run_bits + 4; + float cost_get_here = next_minbits + run_bits + 4; if ( run_value_bits[sce->ics.num_windows == 8][path[swb][0].run] != run_value_bits[sce->ics.num_windows == 8][path[swb][0].run+1]) cost_stay_here += run_bits; @@ -447,7 +448,7 @@ static void codebook_trellis_rate(AACEncContext *s, SingleChannelElement *sce, path[swb+1][0].cost = cost_stay_here; path[swb+1][0].run = path[swb][0].run + 1; } - next_minrd = path[swb+1][0].cost; + next_minbits = path[swb+1][0].cost; next_mincb = 0; for (cb = 1; cb < 12; cb++) { path[swb+1][cb].cost = 61450; @@ -455,10 +456,10 @@ static void codebook_trellis_rate(AACEncContext *s, SingleChannelElement *sce, path[swb+1][cb].run = 0; } } else { - float minrd = next_minrd; + float minbits = next_minbits; int mincb = next_mincb; int startcb = sce->band_type[win*16+swb]; - next_minrd = INFINITY; + next_minbits = INFINITY; next_mincb = 0; for (cb = 0; cb < startcb; cb++) { path[swb+1][cb].cost = 61450; @@ -467,15 +468,15 @@ static void codebook_trellis_rate(AACEncContext *s, SingleChannelElement *sce, } for (cb = startcb; cb < 12; cb++) { float cost_stay_here, cost_get_here; - float rd = 0.0f; + float bits = 0.0f; for (w = 0; w < group_len; w++) { - rd += quantize_band_cost(s, sce->coeffs + start + w*128, - s->scoefs + start + w*128, size, - sce->sf_idx[(win+w)*16+swb], cb, - 0, INFINITY, NULL); + bits += quantize_band_cost(s, sce->coeffs + start + w*128, + s->scoefs + start + w*128, size, + sce->sf_idx[(win+w)*16+swb], cb, + 0, INFINITY, NULL); } - cost_stay_here = path[swb][cb].cost + rd; - cost_get_here = minrd + rd + run_bits + 4; + cost_stay_here = path[swb][cb].cost + bits; + cost_get_here = minbits + bits + run_bits + 4; if ( run_value_bits[sce->ics.num_windows == 8][path[swb][cb].run] != run_value_bits[sce->ics.num_windows == 8][path[swb][cb].run+1]) cost_stay_here += run_bits; @@ -488,8 +489,8 @@ static void codebook_trellis_rate(AACEncContext *s, SingleChannelElement *sce, path[swb+1][cb].cost = cost_stay_here; path[swb+1][cb].run = path[swb][cb].run + 1; } - if (path[swb+1][cb].cost < next_minrd) { - next_minrd = path[swb+1][cb].cost; + if (path[swb+1][cb].cost < next_minbits) { + next_minbits = path[swb+1][cb].cost; next_mincb = cb; } } diff --git a/libavcodec/aacenc.c b/libavcodec/aacenc.c index 5ab0f1ff6e..a88d75a610 100644 --- a/libavcodec/aacenc.c +++ b/libavcodec/aacenc.c @@ -46,6 +46,14 @@ #define AAC_MAX_CHANNELS 6 +#define ERROR_IF(cond, ...) \ + if (cond) { \ + av_log(avctx, AV_LOG_ERROR, __VA_ARGS__); \ + return AVERROR(EINVAL); \ + } + +float ff_aac_pow34sf_tab[428]; + static const uint8_t swb_size_1024_96[] = { 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 8, 8, 8, 8, 8, 12, 12, 12, 12, 12, 16, 16, 24, 28, 36, 44, @@ -136,6 +144,18 @@ static const uint8_t aac_chan_configs[6][5] = { }; /** + * Table to remap channels from Libav's default order to AAC order. + */ +static const uint8_t aac_chan_maps[AAC_MAX_CHANNELS][AAC_MAX_CHANNELS] = { + { 0 }, + { 0, 1 }, + { 2, 0, 1 }, + { 2, 0, 1, 3 }, + { 2, 0, 1, 3, 4 }, + { 2, 0, 1, 4, 5, 3 }, +}; + +/** * Make AAC audio config object. * @see 1.6.2.1 "Syntax - AudioSpecificConfig" */ @@ -147,7 +167,7 @@ static void put_audio_specific_config(AVCodecContext *avctx) init_put_bits(&pb, avctx->extradata, avctx->extradata_size*8); put_bits(&pb, 5, 2); //object type - AAC-LC put_bits(&pb, 4, s->samplerate_index); //sample rate index - put_bits(&pb, 4, avctx->channels); + put_bits(&pb, 4, s->channels); //GASpecificConfig put_bits(&pb, 1, 0); //frame length - 1024 samples put_bits(&pb, 1, 0); //does not depend on core coder @@ -160,117 +180,80 @@ static void put_audio_specific_config(AVCodecContext *avctx) flush_put_bits(&pb); } -static av_cold int aac_encode_init(AVCodecContext *avctx) -{ - AACEncContext *s = avctx->priv_data; - int i; - const uint8_t *sizes[2]; - uint8_t grouping[AAC_MAX_CHANNELS]; - int lengths[2]; - - avctx->frame_size = 1024; +#define WINDOW_FUNC(type) \ +static void apply_ ##type ##_window(DSPContext *dsp, SingleChannelElement *sce, const float *audio) - for (i = 0; i < 16; i++) - if (avctx->sample_rate == avpriv_mpeg4audio_sample_rates[i]) - break; - if (i == 16) { - av_log(avctx, AV_LOG_ERROR, "Unsupported sample rate %d\n", avctx->sample_rate); - return -1; - } - if (avctx->channels > AAC_MAX_CHANNELS) { - av_log(avctx, AV_LOG_ERROR, "Unsupported number of channels: %d\n", avctx->channels); - return -1; - } - if (avctx->profile != FF_PROFILE_UNKNOWN && avctx->profile != FF_PROFILE_AAC_LOW) { - av_log(avctx, AV_LOG_ERROR, "Unsupported profile %d\n", avctx->profile); - return -1; - } - if (1024.0 * avctx->bit_rate / avctx->sample_rate > 6144 * avctx->channels) { - av_log(avctx, AV_LOG_ERROR, "Too many bits per frame requested\n"); - return -1; - } - s->samplerate_index = i; - - dsputil_init(&s->dsp, avctx); - ff_mdct_init(&s->mdct1024, 11, 0, 1.0); - ff_mdct_init(&s->mdct128, 8, 0, 1.0); - // window init - ff_kbd_window_init(ff_aac_kbd_long_1024, 4.0, 1024); - ff_kbd_window_init(ff_aac_kbd_short_128, 6.0, 128); - ff_init_ff_sine_windows(10); - ff_init_ff_sine_windows(7); - - s->chan_map = aac_chan_configs[avctx->channels-1]; - s->samples = av_malloc(2 * 1024 * avctx->channels * sizeof(s->samples[0])); - s->cpe = av_mallocz(sizeof(ChannelElement) * s->chan_map[0]); - avctx->extradata = av_mallocz(5 + FF_INPUT_BUFFER_PADDING_SIZE); - avctx->extradata_size = 5; - put_audio_specific_config(avctx); +WINDOW_FUNC(only_long) +{ + const float *lwindow = sce->ics.use_kb_window[0] ? ff_aac_kbd_long_1024 : ff_sine_1024; + const float *pwindow = sce->ics.use_kb_window[1] ? ff_aac_kbd_long_1024 : ff_sine_1024; + float *out = sce->ret; - sizes[0] = swb_size_1024[i]; - sizes[1] = swb_size_128[i]; - lengths[0] = ff_aac_num_swb_1024[i]; - lengths[1] = ff_aac_num_swb_128[i]; - for (i = 0; i < s->chan_map[0]; i++) - grouping[i] = s->chan_map[i + 1] == TYPE_CPE; - ff_psy_init(&s->psy, avctx, 2, sizes, lengths, s->chan_map[0], grouping); - s->psypp = ff_psy_preprocess_init(avctx); - s->coder = &ff_aac_coders[s->options.aac_coder]; + dsp->vector_fmul (out, audio, lwindow, 1024); + dsp->vector_fmul_reverse(out + 1024, audio + 1024, pwindow, 1024); +} - s->lambda = avctx->global_quality ? avctx->global_quality : 120; +WINDOW_FUNC(long_start) +{ + const float *lwindow = sce->ics.use_kb_window[1] ? ff_aac_kbd_long_1024 : ff_sine_1024; + const float *swindow = sce->ics.use_kb_window[0] ? ff_aac_kbd_short_128 : ff_sine_128; + float *out = sce->ret; + + dsp->vector_fmul(out, audio, lwindow, 1024); + memcpy(out + 1024, audio, sizeof(out[0]) * 448); + dsp->vector_fmul_reverse(out + 1024 + 448, audio, swindow, 128); + memset(out + 1024 + 576, 0, sizeof(out[0]) * 448); +} - ff_aac_tableinit(); +WINDOW_FUNC(long_stop) +{ + const float *lwindow = sce->ics.use_kb_window[0] ? ff_aac_kbd_long_1024 : ff_sine_1024; + const float *swindow = sce->ics.use_kb_window[1] ? ff_aac_kbd_short_128 : ff_sine_128; + float *out = sce->ret; + + memset(out, 0, sizeof(out[0]) * 448); + dsp->vector_fmul(out + 448, audio + 448, swindow, 128); + memcpy(out + 576, audio + 576, sizeof(out[0]) * 448); + dsp->vector_fmul_reverse(out + 1024, audio + 1024, lwindow, 1024); +} - return 0; +WINDOW_FUNC(eight_short) +{ + const float *swindow = sce->ics.use_kb_window[0] ? ff_aac_kbd_short_128 : ff_sine_128; + const float *pwindow = sce->ics.use_kb_window[1] ? ff_aac_kbd_short_128 : ff_sine_128; + const float *in = audio + 448; + float *out = sce->ret; + + for (int w = 0; w < 8; w++) { + dsp->vector_fmul (out, in, w ? pwindow : swindow, 128); + out += 128; + in += 128; + dsp->vector_fmul_reverse(out, in, swindow, 128); + out += 128; + } } -static void apply_window_and_mdct(AVCodecContext *avctx, AACEncContext *s, - SingleChannelElement *sce, short *audio) +static void (*const apply_window[4])(DSPContext *dsp, SingleChannelElement *sce, const float *audio) = { + [ONLY_LONG_SEQUENCE] = apply_only_long_window, + [LONG_START_SEQUENCE] = apply_long_start_window, + [EIGHT_SHORT_SEQUENCE] = apply_eight_short_window, + [LONG_STOP_SEQUENCE] = apply_long_stop_window +}; + +static void apply_window_and_mdct(AACEncContext *s, SingleChannelElement *sce, + float *audio) { - int i, k; - const int chans = avctx->channels; - const float * lwindow = sce->ics.use_kb_window[0] ? ff_aac_kbd_long_1024 : ff_sine_1024; - const float * swindow = sce->ics.use_kb_window[0] ? ff_aac_kbd_short_128 : ff_sine_128; - const float * pwindow = sce->ics.use_kb_window[1] ? ff_aac_kbd_short_128 : ff_sine_128; + int i; float *output = sce->ret; - if (sce->ics.window_sequence[0] != EIGHT_SHORT_SEQUENCE) { - memcpy(output, sce->saved, sizeof(float)*1024); - if (sce->ics.window_sequence[0] == LONG_STOP_SEQUENCE) { - memset(output, 0, sizeof(output[0]) * 448); - for (i = 448; i < 576; i++) - output[i] = sce->saved[i] * pwindow[i - 448]; - for (i = 576; i < 704; i++) - output[i] = sce->saved[i]; - } - if (sce->ics.window_sequence[0] != LONG_START_SEQUENCE) { - for (i = 0; i < 1024; i++) { - output[i+1024] = audio[i * chans] * lwindow[1024 - i - 1]; - sce->saved[i] = audio[i * chans] * lwindow[i]; - } - } else { - for (i = 0; i < 448; i++) - output[i+1024] = audio[i * chans]; - for (; i < 576; i++) - output[i+1024] = audio[i * chans] * swindow[576 - i - 1]; - memset(output+1024+576, 0, sizeof(output[0]) * 448); - for (i = 0; i < 1024; i++) - sce->saved[i] = audio[i * chans]; - } + apply_window[sce->ics.window_sequence[0]](&s->dsp, sce, audio); + + if (sce->ics.window_sequence[0] != EIGHT_SHORT_SEQUENCE) s->mdct1024.mdct_calc(&s->mdct1024, sce->coeffs, output); - } else { - for (k = 0; k < 1024; k += 128) { - for (i = 448 + k; i < 448 + k + 256; i++) - output[i - 448 - k] = (i < 1024) - ? sce->saved[i] - : audio[(i-1024)*chans]; - s->dsp.vector_fmul (output, output, k ? swindow : pwindow, 128); - s->dsp.vector_fmul_reverse(output+128, output+128, swindow, 128); - s->mdct128.mdct_calc(&s->mdct128, sce->coeffs + k, output); - } - for (i = 0; i < 1024; i++) - sce->saved[i] = audio[i * chans]; - } + else + for (i = 0; i < 1024; i += 128) + s->mdct128.mdct_calc(&s->mdct128, sce->coeffs + i, output + i*2); + memcpy(audio, audio + 1024, sizeof(audio[0]) * 1024); } /** @@ -488,11 +471,37 @@ static void put_bitstream_info(AVCodecContext *avctx, AACEncContext *s, put_bits(&s->pb, 12 - padbits, 0); } +/* + * Deinterleave input samples. + * Channels are reordered from Libav's default order to AAC order. + */ +static void deinterleave_input_samples(AACEncContext *s, + const float *samples) +{ + int ch, i; + const int sinc = s->channels; + const uint8_t *channel_map = aac_chan_maps[sinc - 1]; + + /* deinterleave and remap input samples */ + for (ch = 0; ch < sinc; ch++) { + const float *sptr = samples + channel_map[ch]; + + /* copy last 1024 samples of previous frame to the start of the current frame */ + memcpy(&s->planar_samples[ch][0], &s->planar_samples[ch][1024], 1024 * sizeof(s->planar_samples[0][0])); + + /* deinterleave */ + for (i = 1024; i < 1024 * 2; i++) { + s->planar_samples[ch][i] = *sptr; + sptr += sinc; + } + } +} + static int aac_encode_frame(AVCodecContext *avctx, uint8_t *frame, int buf_size, void *data) { AACEncContext *s = avctx->priv_data; - int16_t *samples = s->samples, *samples2, *la; + float **samples = s->planar_samples, *samples2, *la, *overlap; ChannelElement *cpe; int i, ch, w, g, chans, tag, start_ch; int chan_el_counter[4]; @@ -500,27 +509,15 @@ static int aac_encode_frame(AVCodecContext *avctx, if (s->last_frame) return 0; + if (data) { - if (!s->psypp) { - memcpy(s->samples + 1024 * avctx->channels, data, - 1024 * avctx->channels * sizeof(s->samples[0])); - } else { - start_ch = 0; - samples2 = s->samples + 1024 * avctx->channels; - for (i = 0; i < s->chan_map[0]; i++) { - tag = s->chan_map[i+1]; - chans = tag == TYPE_CPE ? 2 : 1; - ff_psy_preprocess(s->psypp, (uint16_t*)data + start_ch, - samples2 + start_ch, start_ch, chans); - start_ch += chans; - } - } + deinterleave_input_samples(s, data); + if (s->psypp) + ff_psy_preprocess(s->psypp, s->planar_samples, s->channels); } - if (!avctx->frame_number) { - memcpy(s->samples, s->samples + 1024 * avctx->channels, - 1024 * avctx->channels * sizeof(s->samples[0])); + + if (!avctx->frame_number) return 0; - } start_ch = 0; for (i = 0; i < s->chan_map[0]; i++) { @@ -531,8 +528,9 @@ static int aac_encode_frame(AVCodecContext *avctx, for (ch = 0; ch < chans; ch++) { IndividualChannelStream *ics = &cpe->ch[ch].ics; int cur_channel = start_ch + ch; - samples2 = samples + cur_channel; - la = samples2 + (448+64) * avctx->channels; + overlap = &samples[cur_channel][0]; + samples2 = overlap + 1024; + la = samples2 + (448+64); if (!data) la = NULL; if (tag == TYPE_LFE) { @@ -560,7 +558,7 @@ static int aac_encode_frame(AVCodecContext *avctx, for (w = 0; w < ics->num_windows; w++) ics->group_len[w] = wi[ch].grouping[w]; - apply_window_and_mdct(avctx, s, &cpe->ch[ch], samples2); + apply_window_and_mdct(s, &cpe->ch[ch], overlap); } start_ch += chans; } @@ -626,8 +624,8 @@ static int aac_encode_frame(AVCodecContext *avctx, } frame_bits = put_bits_count(&s->pb); - if (frame_bits <= 6144 * avctx->channels - 3) { - s->psy.bitres.bits = frame_bits / avctx->channels; + if (frame_bits <= 6144 * s->channels - 3) { + s->psy.bitres.bits = frame_bits / s->channels; break; } @@ -648,8 +646,7 @@ static int aac_encode_frame(AVCodecContext *avctx, if (!data) s->last_frame = 1; - memcpy(s->samples, s->samples + 1024 * avctx->channels, - 1024 * avctx->channels * sizeof(s->samples[0])); + return put_bits_count(&s->pb)>>3; } @@ -660,12 +657,109 @@ static av_cold int aac_encode_end(AVCodecContext *avctx) ff_mdct_end(&s->mdct1024); ff_mdct_end(&s->mdct128); ff_psy_end(&s->psy); - ff_psy_preprocess_end(s->psypp); - av_freep(&s->samples); + if (s->psypp) + ff_psy_preprocess_end(s->psypp); + av_freep(&s->buffer.samples); av_freep(&s->cpe); return 0; } +static av_cold int dsp_init(AVCodecContext *avctx, AACEncContext *s) +{ + int ret = 0; + + dsputil_init(&s->dsp, avctx); + + // window init + ff_kbd_window_init(ff_aac_kbd_long_1024, 4.0, 1024); + ff_kbd_window_init(ff_aac_kbd_short_128, 6.0, 128); + ff_init_ff_sine_windows(10); + ff_init_ff_sine_windows(7); + + if (ret = ff_mdct_init(&s->mdct1024, 11, 0, 32768.0)) + return ret; + if (ret = ff_mdct_init(&s->mdct128, 8, 0, 32768.0)) + return ret; + + return 0; +} + +static av_cold int alloc_buffers(AVCodecContext *avctx, AACEncContext *s) +{ + FF_ALLOCZ_OR_GOTO(avctx, s->buffer.samples, 3 * 1024 * s->channels * sizeof(s->buffer.samples[0]), alloc_fail); + FF_ALLOCZ_OR_GOTO(avctx, s->cpe, sizeof(ChannelElement) * s->chan_map[0], alloc_fail); + FF_ALLOCZ_OR_GOTO(avctx, avctx->extradata, 5 + FF_INPUT_BUFFER_PADDING_SIZE, alloc_fail); + + for(int ch = 0; ch < s->channels; ch++) + s->planar_samples[ch] = s->buffer.samples + 3 * 1024 * ch; + + return 0; +alloc_fail: + return AVERROR(ENOMEM); +} + +static av_cold int aac_encode_init(AVCodecContext *avctx) +{ + AACEncContext *s = avctx->priv_data; + int i, ret = 0; + const uint8_t *sizes[2]; + uint8_t grouping[AAC_MAX_CHANNELS]; + int lengths[2]; + + avctx->frame_size = 1024; + + for (i = 0; i < 16; i++) + if (avctx->sample_rate == avpriv_mpeg4audio_sample_rates[i]) + break; + + s->channels = avctx->channels; + + ERROR_IF(i == 16, + "Unsupported sample rate %d\n", avctx->sample_rate); + ERROR_IF(s->channels > AAC_MAX_CHANNELS, + "Unsupported number of channels: %d\n", s->channels); + ERROR_IF(avctx->profile != FF_PROFILE_UNKNOWN && avctx->profile != FF_PROFILE_AAC_LOW, + "Unsupported profile %d\n", avctx->profile); + ERROR_IF(1024.0 * avctx->bit_rate / avctx->sample_rate > 6144 * s->channels, + "Too many bits per frame requested\n"); + + s->samplerate_index = i; + + s->chan_map = aac_chan_configs[s->channels-1]; + + if (ret = dsp_init(avctx, s)) + goto fail; + + if (ret = alloc_buffers(avctx, s)) + goto fail; + + avctx->extradata_size = 5; + put_audio_specific_config(avctx); + + sizes[0] = swb_size_1024[i]; + sizes[1] = swb_size_128[i]; + lengths[0] = ff_aac_num_swb_1024[i]; + lengths[1] = ff_aac_num_swb_128[i]; + for (i = 0; i < s->chan_map[0]; i++) + grouping[i] = s->chan_map[i + 1] == TYPE_CPE; + if (ret = ff_psy_init(&s->psy, avctx, 2, sizes, lengths, s->chan_map[0], grouping)) + goto fail; + s->psypp = ff_psy_preprocess_init(avctx); + s->coder = &ff_aac_coders[s->options.aac_coder]; + + s->lambda = avctx->global_quality ? avctx->global_quality : 120; + + ff_aac_tableinit(); + + for (i = 0; i < 428; i++) + ff_aac_pow34sf_tab[i] = sqrt(ff_aac_pow2sf_tab[i] * sqrt(ff_aac_pow2sf_tab[i])); + + return 0; +fail: + aac_encode_end(avctx); + return ret; +} + #define AACENC_FLAGS AV_OPT_FLAG_ENCODING_PARAM | AV_OPT_FLAG_AUDIO_PARAM static const AVOption aacenc_options[] = { {"stereo_mode", "Stereo coding method", offsetof(AACEncContext, options.stereo_mode), AV_OPT_TYPE_INT, {.dbl = 0}, -1, 1, AACENC_FLAGS, "stereo_mode"}, @@ -692,7 +786,7 @@ AVCodec ff_aac_encoder = { .encode = aac_encode_frame, .close = aac_encode_end, .capabilities = CODEC_CAP_SMALL_LAST_FRAME | CODEC_CAP_DELAY | CODEC_CAP_EXPERIMENTAL, - .sample_fmts = (const enum AVSampleFormat[]){AV_SAMPLE_FMT_S16,AV_SAMPLE_FMT_NONE}, + .sample_fmts = (const enum AVSampleFormat[]){AV_SAMPLE_FMT_FLT,AV_SAMPLE_FMT_NONE}, .long_name = NULL_IF_CONFIG_SMALL("Advanced Audio Coding"), .priv_class = &aacenc_class, }; diff --git a/libavcodec/aacenc.h b/libavcodec/aacenc.h index e48fc18060..d87cc0479b 100644 --- a/libavcodec/aacenc.h +++ b/libavcodec/aacenc.h @@ -61,9 +61,10 @@ typedef struct AACEncContext { FFTContext mdct1024; ///< long (1024 samples) frame transform context FFTContext mdct128; ///< short (128 samples) frame transform context DSPContext dsp; - int16_t *samples; ///< saved preprocessed input + float *planar_samples[6]; ///< saved preprocessed input int samplerate_index; ///< MPEG-4 samplerate index + int channels; ///< channel count const uint8_t *chan_map; ///< channel configuration map ChannelElement *cpe; ///< channel elements @@ -75,6 +76,12 @@ typedef struct AACEncContext { float lambda; DECLARE_ALIGNED(16, int, qcoefs)[96]; ///< quantized coefficients DECLARE_ALIGNED(32, float, scoefs)[1024]; ///< scaled coefficients + + struct { + float *samples; + } buffer; } AACEncContext; +extern float ff_aac_pow34sf_tab[428]; + #endif /* AVCODEC_AACENC_H */ diff --git a/libavcodec/aacpsy.c b/libavcodec/aacpsy.c index f81ed487ba..396a2d28b2 100644 --- a/libavcodec/aacpsy.c +++ b/libavcodec/aacpsy.c @@ -400,7 +400,7 @@ static av_unused FFPsyWindowInfo psy_3gpp_window(FFPsyContext *ctx, int stay_short = 0; for (i = 0; i < 8; i++) { for (j = 0; j < 128; j++) { - v = iir_filter(la[(i*128+j)*ctx->avctx->channels], pch->iir_state); + v = iir_filter(la[i*128+j], pch->iir_state); sum += v*v; } s[i] = sum; @@ -776,9 +776,8 @@ static void lame_apply_block_type(AacPsyChannel *ctx, FFPsyWindowInfo *wi, int u ctx->next_window_seq = blocktype; } -static FFPsyWindowInfo psy_lame_window(FFPsyContext *ctx, - const int16_t *audio, const int16_t *la, - int channel, int prev_type) +static FFPsyWindowInfo psy_lame_window(FFPsyContext *ctx, const float *audio, + const float *la, int channel, int prev_type) { AacPsyContext *pctx = (AacPsyContext*) ctx->model_priv_data; AacPsyChannel *pch = &pctx->ch[channel]; @@ -795,20 +794,20 @@ static FFPsyWindowInfo psy_lame_window(FFPsyContext *ctx, float attack_intensity[(AAC_NUM_BLOCKS_SHORT + 1) * PSY_LAME_NUM_SUBBLOCKS]; float energy_subshort[(AAC_NUM_BLOCKS_SHORT + 1) * PSY_LAME_NUM_SUBBLOCKS]; float energy_short[AAC_NUM_BLOCKS_SHORT + 1] = { 0 }; - int chans = ctx->avctx->channels; - const int16_t *firbuf = la + (AAC_BLOCK_SIZE_SHORT/4 - PSY_LAME_FIR_LEN) * chans; + const float *firbuf = la + (AAC_BLOCK_SIZE_SHORT/4 - PSY_LAME_FIR_LEN); int j, att_sum = 0; /* LAME comment: apply high pass filter of fs/4 */ for (i = 0; i < AAC_BLOCK_SIZE_LONG; i++) { float sum1, sum2; - sum1 = firbuf[(i + ((PSY_LAME_FIR_LEN - 1) / 2)) * chans]; + sum1 = firbuf[i + (PSY_LAME_FIR_LEN - 1) / 2]; sum2 = 0.0; for (j = 0; j < ((PSY_LAME_FIR_LEN - 1) / 2) - 1; j += 2) { - sum1 += psy_fir_coeffs[j] * (firbuf[(i + j) * chans] + firbuf[(i + PSY_LAME_FIR_LEN - j) * chans]); - sum2 += psy_fir_coeffs[j + 1] * (firbuf[(i + j + 1) * chans] + firbuf[(i + PSY_LAME_FIR_LEN - j - 1) * chans]); + sum1 += psy_fir_coeffs[j] * (firbuf[i + j] + firbuf[i + PSY_LAME_FIR_LEN - j]); + sum2 += psy_fir_coeffs[j + 1] * (firbuf[i + j + 1] + firbuf[i + PSY_LAME_FIR_LEN - j - 1]); } - hpfsmpl[i] = sum1 + sum2; + /* NOTE: The LAME psymodel expects it's input in the range -32768 to 32768. Tuning this for normalized floats would be difficult. */ + hpfsmpl[i] = (sum1 + sum2) * 32768.0f; } /* Calculate the energies of each sub-shortblock */ @@ -823,16 +822,15 @@ static FFPsyWindowInfo psy_lame_window(FFPsyContext *ctx, float const *const pfe = pf + AAC_BLOCK_SIZE_LONG / (AAC_NUM_BLOCKS_SHORT * PSY_LAME_NUM_SUBBLOCKS); float p = 1.0f; for (; pf < pfe; pf++) - if (p < fabsf(*pf)) - p = fabsf(*pf); + p = FFMAX(p, fabsf(*pf)); pch->prev_energy_subshort[i] = energy_subshort[i + PSY_LAME_NUM_SUBBLOCKS] = p; energy_short[1 + i / PSY_LAME_NUM_SUBBLOCKS] += p; - /* FIXME: The indexes below are [i + 3 - 2] in the LAME source. - * Obviously the 3 and 2 have some significance, or this would be just [i + 1] - * (which is what we use here). What the 3 stands for is ambigious, as it is both - * number of short blocks, and the number of sub-short blocks. - * It seems that LAME is comparing each sub-block to sub-block + 1 in the - * previous block. + /* NOTE: The indexes below are [i + 3 - 2] in the LAME source. + * Obviously the 3 and 2 have some significance, or this would be just [i + 1] + * (which is what we use here). What the 3 stands for is ambiguous, as it is both + * number of short blocks, and the number of sub-short blocks. + * It seems that LAME is comparing each sub-block to sub-block + 1 in the + * previous block. */ if (p > energy_subshort[i + 1]) p = p / energy_subshort[i + 1]; diff --git a/libavcodec/allcodecs.c b/libavcodec/allcodecs.c index 1e36235f3a..9ce50d2634 100644 --- a/libavcodec/allcodecs.c +++ b/libavcodec/allcodecs.c @@ -245,6 +245,7 @@ void avcodec_register_all(void) REGISTER_DECODER (XAN_WC3, xan_wc3); REGISTER_DECODER (XAN_WC4, xan_wc4); REGISTER_DECODER (XL, xl); + REGISTER_ENCDEC (XWD, xwd); REGISTER_ENCDEC (Y41P, y41p); REGISTER_DECODER (YOP, yop); REGISTER_ENCDEC (YUV4, yuv4); diff --git a/libavcodec/avcodec.h b/libavcodec/avcodec.h index 215bfc9bb4..cbcf4e3c42 100644 --- a/libavcodec/avcodec.h +++ b/libavcodec/avcodec.h @@ -255,6 +255,7 @@ enum CodecID { CODEC_ID_VBLE, CODEC_ID_DXTORY, CODEC_ID_V410, + CODEC_ID_XWD, CODEC_ID_Y41P = MKBETAG('Y','4','1','P'), CODEC_ID_UTVIDEO = 0x800, CODEC_ID_ESCAPE130 = MKBETAG('E','1','3','0'), diff --git a/libavcodec/psymodel.c b/libavcodec/psymodel.c index faadb1b870..b3cfb0e75f 100644 --- a/libavcodec/psymodel.c +++ b/libavcodec/psymodel.c @@ -112,20 +112,15 @@ av_cold struct FFPsyPreprocessContext* ff_psy_preprocess_init(AVCodecContext *av return ctx; } -void ff_psy_preprocess(struct FFPsyPreprocessContext *ctx, - const int16_t *audio, int16_t *dest, - int tag, int channels) +void ff_psy_preprocess(struct FFPsyPreprocessContext *ctx, float **audio, int channels) { - int ch, i; + int ch; + int frame_size = ctx->avctx->frame_size; + if (ctx->fstate) { for (ch = 0; ch < channels; ch++) - ff_iir_filter(ctx->fcoeffs, ctx->fstate[tag+ch], ctx->avctx->frame_size, - audio + ch, ctx->avctx->channels, - dest + ch, ctx->avctx->channels); - } else { - for (ch = 0; ch < channels; ch++) - for (i = 0; i < ctx->avctx->frame_size; i++) - dest[i*ctx->avctx->channels + ch] = audio[i*ctx->avctx->channels + ch]; + ff_iir_filter_flt(ctx->fcoeffs, ctx->fstate[ch], frame_size, + &audio[ch][frame_size], 1, &audio[ch][frame_size], 1); } } diff --git a/libavcodec/psymodel.h b/libavcodec/psymodel.h index a7b7948cd2..317974bca4 100644 --- a/libavcodec/psymodel.h +++ b/libavcodec/psymodel.h @@ -109,7 +109,7 @@ typedef struct FFPsyModel { * * @return suggested window information in a structure */ - FFPsyWindowInfo (*window)(FFPsyContext *ctx, const int16_t *audio, const int16_t *la, int channel, int prev_type); + FFPsyWindowInfo (*window)(FFPsyContext *ctx, const float *audio, const float *la, int channel, int prev_type); /** * Perform psychoacoustic analysis and set band info (threshold, energy) for a group of channels. @@ -174,14 +174,10 @@ av_cold struct FFPsyPreprocessContext* ff_psy_preprocess_init(AVCodecContext *av * Preprocess several channel in audio frame in order to compress it better. * * @param ctx preprocessing context - * @param audio samples to preprocess - * @param dest place to put filtered samples - * @param tag channel number - * @param channels number of channel to preprocess (some additional work may be done on stereo pair) + * @param audio samples to be filtered (in place) + * @param channels number of channel to preprocess */ -void ff_psy_preprocess(struct FFPsyPreprocessContext *ctx, - const int16_t *audio, int16_t *dest, - int tag, int channels); +void ff_psy_preprocess(struct FFPsyPreprocessContext *ctx, float **audio, int channels); /** * Cleanup audio preprocessing module. diff --git a/libavcodec/vc1.c b/libavcodec/vc1.c index 775caa69f8..64884fcc50 100644 --- a/libavcodec/vc1.c +++ b/libavcodec/vc1.c @@ -918,13 +918,15 @@ int vc1_parse_frame_header_adv(VC1Context *v, GetBitContext* gb) } goto parse_common_info; } - if (v->finterpflag) - v->interpfrm = get_bits1(gb); - if (v->s.pict_type == AV_PICTURE_TYPE_B) { - v->bfraction_lut_index = get_vlc2(gb, ff_vc1_bfraction_vlc.table, VC1_BFRACTION_VLC_BITS, 1); - v->bfraction = ff_vc1_bfraction_lut[v->bfraction_lut_index]; - if (v->bfraction == 0) { - v->s.pict_type = AV_PICTURE_TYPE_BI; /* XXX: should not happen here */ + if (v->fcm == PROGRESSIVE) { + if (v->finterpflag) + v->interpfrm = get_bits1(gb); + if (v->s.pict_type == AV_PICTURE_TYPE_B) { + v->bfraction_lut_index = get_vlc2(gb, ff_vc1_bfraction_vlc.table, VC1_BFRACTION_VLC_BITS, 1); + v->bfraction = ff_vc1_bfraction_lut[v->bfraction_lut_index]; + if (v->bfraction == 0) { + v->s.pict_type = AV_PICTURE_TYPE_BI; /* XXX: should not happen here */ + } } } diff --git a/libavcodec/version.h b/libavcodec/version.h index 5125916fe1..4473c9404d 100644 --- a/libavcodec/version.h +++ b/libavcodec/version.h @@ -21,8 +21,8 @@ #define AVCODEC_VERSION_H #define LIBAVCODEC_VERSION_MAJOR 53 -#define LIBAVCODEC_VERSION_MINOR 57 -#define LIBAVCODEC_VERSION_MICRO 105 +#define LIBAVCODEC_VERSION_MINOR 58 +#define LIBAVCODEC_VERSION_MICRO 100 #define LIBAVCODEC_VERSION_INT AV_VERSION_INT(LIBAVCODEC_VERSION_MAJOR, \ LIBAVCODEC_VERSION_MINOR, \ diff --git a/libavcodec/xwd.h b/libavcodec/xwd.h new file mode 100644 index 0000000000..f41e2cd651 --- /dev/null +++ b/libavcodec/xwd.h @@ -0,0 +1,41 @@ +/* + * XWD image format + * + * Copyright (c) 2012 Paul B Mahol + * + * This file is part of Libav. + * + * Libav is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * Libav is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with Libav; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVCODEC_XWD_H +#define AVCODEC_XWD_H + +#define XWD_VERSION 7 +#define XWD_HEADER_SIZE 100 +#define XWD_CMAP_SIZE 12 + +#define XWD_XY_BITMAP 0 +#define XWD_XY_PIXMAP 1 +#define XWD_Z_PIXMAP 2 + +#define XWD_STATIC_GRAY 0 +#define XWD_GRAY_SCALE 1 +#define XWD_STATIC_COLOR 2 +#define XWD_PSEUDO_COLOR 3 +#define XWD_TRUE_COLOR 4 +#define XWD_DIRECT_COLOR 5 + +#endif /* AVCODEC_XWD_H */ diff --git a/libavcodec/xwddec.c b/libavcodec/xwddec.c new file mode 100644 index 0000000000..97f3a6a979 --- /dev/null +++ b/libavcodec/xwddec.c @@ -0,0 +1,267 @@ +/* + * XWD image format + * + * Copyright (c) 2012 Paul B Mahol + * + * This file is part of Libav. + * + * Libav is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * Libav is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with Libav; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#include "libavutil/imgutils.h" +#include "avcodec.h" +#include "bytestream.h" +#include "xwd.h" + +static av_cold int xwd_decode_init(AVCodecContext *avctx) +{ + avctx->coded_frame = avcodec_alloc_frame(); + if (!avctx->coded_frame) + return AVERROR(ENOMEM); + + return 0; +} + +static int xwd_decode_frame(AVCodecContext *avctx, void *data, + int *data_size, AVPacket *avpkt) +{ + AVFrame *p = avctx->coded_frame; + const uint8_t *buf = avpkt->data; + int i, ret, buf_size = avpkt->size; + uint32_t version, header_size, vclass, ncolors; + uint32_t xoffset, be, bpp, lsize, rsize; + uint32_t pixformat, pixdepth, bunit, bitorder, bpad; + uint32_t rgb[3]; + uint8_t *ptr; + + if (buf_size < XWD_HEADER_SIZE) + return AVERROR_INVALIDDATA; + + header_size = bytestream_get_be32(&buf); + if (buf_size < header_size) + return AVERROR_INVALIDDATA; + + version = bytestream_get_be32(&buf); + if (version != XWD_VERSION) { + av_log(avctx, AV_LOG_ERROR, "unsupported version\n"); + return AVERROR_INVALIDDATA; + } + + if (header_size < XWD_HEADER_SIZE) { + av_log(avctx, AV_LOG_ERROR, "invalid header size\n"); + return AVERROR_INVALIDDATA; + } + + pixformat = bytestream_get_be32(&buf); + pixdepth = bytestream_get_be32(&buf); + avctx->width = bytestream_get_be32(&buf); + avctx->height = bytestream_get_be32(&buf); + xoffset = bytestream_get_be32(&buf); + be = bytestream_get_be32(&buf); + bunit = bytestream_get_be32(&buf); + bitorder = bytestream_get_be32(&buf); + bpad = bytestream_get_be32(&buf); + bpp = bytestream_get_be32(&buf); + lsize = bytestream_get_be32(&buf); + vclass = bytestream_get_be32(&buf); + rgb[0] = bytestream_get_be32(&buf); + rgb[1] = bytestream_get_be32(&buf); + rgb[2] = bytestream_get_be32(&buf); + buf += 8; + ncolors = bytestream_get_be32(&buf); + buf += header_size - (XWD_HEADER_SIZE - 20); + + av_log(avctx, AV_LOG_DEBUG, "pixformat %d, pixdepth %d, bunit %d, bitorder %d, bpad %d\n", + pixformat, pixdepth, bunit, bitorder, bpad); + av_log(avctx, AV_LOG_DEBUG, "vclass %d, ncolors %d, bpp %d, be %d, lsize %d, xoffset %d\n", + vclass, ncolors, bpp, be, lsize, xoffset); + av_log(avctx, AV_LOG_DEBUG, "red %0x, green %0x, blue %0x\n", rgb[0], rgb[1], rgb[2]); + + if (pixformat > XWD_Z_PIXMAP) { + av_log(avctx, AV_LOG_ERROR, "invalid pixmap format\n"); + return AVERROR_INVALIDDATA; + } + + if (pixdepth == 0 || pixdepth > 32) { + av_log(avctx, AV_LOG_ERROR, "invalid pixmap depth\n"); + return AVERROR_INVALIDDATA; + } + + if (xoffset) { + av_log_ask_for_sample(avctx, "unsupported xoffset %d\n", xoffset); + return AVERROR_PATCHWELCOME; + } + + if (be > 1) { + av_log(avctx, AV_LOG_ERROR, "invalid byte order\n"); + return AVERROR_INVALIDDATA; + } + + if (bitorder > 1) { + av_log(avctx, AV_LOG_ERROR, "invalid bitmap bit order\n"); + return AVERROR_INVALIDDATA; + } + + if (bunit != 8 && bunit != 16 && bunit != 32) { + av_log(avctx, AV_LOG_ERROR, "invalid bitmap unit\n"); + return AVERROR_INVALIDDATA; + } + + if (bpad != 8 && bpad != 16 && bpad != 32) { + av_log(avctx, AV_LOG_ERROR, "invalid bitmap scan-line pad\n"); + return AVERROR_INVALIDDATA; + } + + if (bpp == 0 || bpp > 32) { + av_log(avctx, AV_LOG_ERROR, "invalid bits per pixel\n"); + return AVERROR_INVALIDDATA; + } + + if (ncolors > 256) { + av_log(avctx, AV_LOG_ERROR, "invalid number of entries in colormap\n"); + return AVERROR_INVALIDDATA; + } + + if ((ret = av_image_check_size(avctx->width, avctx->height, 0, NULL)) < 0) + return ret; + + rsize = FFALIGN(avctx->width * bpp, bpad) / 8; + if (lsize < rsize) { + av_log(avctx, AV_LOG_ERROR, "invalid bytes per scan-line\n"); + return AVERROR_INVALIDDATA; + } + + if (buf_size < header_size + ncolors * XWD_CMAP_SIZE + avctx->height * lsize) { + av_log(avctx, AV_LOG_ERROR, "input buffer too small\n"); + return AVERROR_INVALIDDATA; + } + + if (pixformat != XWD_Z_PIXMAP) { + av_log(avctx, AV_LOG_ERROR, "pixmap format %d unsupported\n", pixformat); + return AVERROR_PATCHWELCOME; + } + + avctx->pix_fmt = PIX_FMT_NONE; + switch (vclass) { + case XWD_STATIC_GRAY: + case XWD_GRAY_SCALE: + if (bpp != 1) + return AVERROR_INVALIDDATA; + if (pixdepth == 1) + avctx->pix_fmt = PIX_FMT_MONOWHITE; + break; + case XWD_STATIC_COLOR: + case XWD_PSEUDO_COLOR: + if (bpp == 8) + avctx->pix_fmt = PIX_FMT_PAL8; + break; + case XWD_TRUE_COLOR: + case XWD_DIRECT_COLOR: + if (bpp != 16 && bpp != 24 && bpp != 32) + return AVERROR_INVALIDDATA; + if (bpp == 16 && pixdepth == 15) { + if (rgb[0] == 0x7C00 && rgb[1] == 0x3E0 && rgb[2] == 0x1F) + avctx->pix_fmt = be ? PIX_FMT_RGB555BE : PIX_FMT_RGB555LE; + else if (rgb[0] == 0x1F && rgb[1] == 0x3E0 && rgb[2] == 0x7C00) + avctx->pix_fmt = be ? PIX_FMT_BGR555BE : PIX_FMT_BGR555LE; + } else if (bpp == 16 && pixdepth == 16) { + if (rgb[0] == 0xF800 && rgb[1] == 0x7E0 && rgb[2] == 0x1F) + avctx->pix_fmt = be ? PIX_FMT_RGB565BE : PIX_FMT_RGB565LE; + else if (rgb[0] == 0x1F && rgb[1] == 0x7E0 && rgb[2] == 0xF800) + avctx->pix_fmt = be ? PIX_FMT_BGR565BE : PIX_FMT_BGR565LE; + } else if (bpp == 24) { + if (rgb[0] == 0xFF0000 && rgb[1] == 0xFF00 && rgb[2] == 0xFF) + avctx->pix_fmt = be ? PIX_FMT_RGB24 : PIX_FMT_BGR24; + else if (rgb[0] == 0xFF && rgb[1] == 0xFF00 && rgb[2] == 0xFF0000) + avctx->pix_fmt = be ? PIX_FMT_BGR24 : PIX_FMT_RGB24; + } else if (bpp == 32) { + if (rgb[0] == 0xFF0000 && rgb[1] == 0xFF00 && rgb[2] == 0xFF) + avctx->pix_fmt = be ? PIX_FMT_ARGB : PIX_FMT_BGRA; + else if (rgb[0] == 0xFF && rgb[1] == 0xFF00 && rgb[2] == 0xFF0000) + avctx->pix_fmt = be ? PIX_FMT_ABGR : PIX_FMT_RGBA; + } + buf += ncolors * XWD_CMAP_SIZE; + break; + default: + av_log(avctx, AV_LOG_ERROR, "invalid visual class\n"); + return AVERROR_INVALIDDATA; + } + + if (avctx->pix_fmt == PIX_FMT_NONE) { + av_log_ask_for_sample(avctx, "unknown file: bpp %d, pixdepth %d, vclass %d\n", bpp, pixdepth, vclass); + return AVERROR_PATCHWELCOME; + } + + if (p->data[0]) + avctx->release_buffer(avctx, p); + + p->reference = 0; + if ((ret = avctx->get_buffer(avctx, p)) < 0) { + av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); + return ret; + } + + p->key_frame = 1; + p->pict_type = AV_PICTURE_TYPE_I; + + if (avctx->pix_fmt == PIX_FMT_PAL8) { + uint32_t *dst = (uint32_t *)p->data[1]; + uint8_t red, green, blue; + + for (i = 0; i < ncolors; i++) { + + buf += 4; // skip colormap entry number + red = *buf; buf += 2; + green = *buf; buf += 2; + blue = *buf; buf += 2; + buf += 2; // skip bitmask flag and padding + + dst[i] = red << 16 | green << 8 | blue; + } + } + + ptr = p->data[0]; + for (i = 0; i < avctx->height; i++) { + bytestream_get_buffer(&buf, ptr, rsize); + buf += lsize - rsize; + ptr += p->linesize[0]; + } + + *data_size = sizeof(AVFrame); + *(AVFrame *)data = *p; + + return buf_size; +} + +static av_cold int xwd_decode_close(AVCodecContext *avctx) +{ + if (avctx->coded_frame->data[0]) + avctx->release_buffer(avctx, avctx->coded_frame); + + av_freep(&avctx->coded_frame); + + return 0; +} + +AVCodec ff_xwd_decoder = { + .name = "xwd", + .type = AVMEDIA_TYPE_VIDEO, + .id = CODEC_ID_XWD, + .init = xwd_decode_init, + .close = xwd_decode_close, + .decode = xwd_decode_frame, + .capabilities = CODEC_CAP_DR1, + .long_name = NULL_IF_CONFIG_SMALL("XWD (X Window Dump) image"), +}; diff --git a/libavcodec/xwdenc.c b/libavcodec/xwdenc.c new file mode 100644 index 0000000000..5bfdaf780a --- /dev/null +++ b/libavcodec/xwdenc.c @@ -0,0 +1,246 @@ +/* + * XWD image format + * + * Copyright (c) 2012 Paul B Mahol + * + * This file is part of Libav. + * + * Libav is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * Libav is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with Libav; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#include "libavutil/intreadwrite.h" +#include "libavutil/pixdesc.h" +#include "avcodec.h" +#include "bytestream.h" +#include "xwd.h" + +#define WINDOW_NAME "lavcxwdenc" +#define WINDOW_NAME_SIZE 11 + +static av_cold int xwd_encode_init(AVCodecContext *avctx) +{ + avctx->coded_frame = avcodec_alloc_frame(); + if (!avctx->coded_frame) + return AVERROR(ENOMEM); + + return 0; +} + +static int xwd_encode_frame(AVCodecContext *avctx, uint8_t *buf, + int buf_size, void *data) +{ + AVFrame *p = data; + enum PixelFormat pix_fmt = avctx->pix_fmt; + uint32_t pixdepth, bpp, bpad, ncolors = 0, lsize, vclass, be = 0; + uint32_t rgb[3] = { 0 }; + uint32_t header_size; + int i, out_size; + uint8_t *ptr; + + pixdepth = av_get_bits_per_pixel(&av_pix_fmt_descriptors[pix_fmt]); + if (av_pix_fmt_descriptors[pix_fmt].flags & PIX_FMT_BE) + be = 1; + switch (pix_fmt) { + case PIX_FMT_ARGB: + case PIX_FMT_BGRA: + case PIX_FMT_RGBA: + case PIX_FMT_ABGR: + if (pix_fmt == PIX_FMT_ARGB || + pix_fmt == PIX_FMT_ABGR) + be = 1; + if (pix_fmt == PIX_FMT_ABGR || + pix_fmt == PIX_FMT_RGBA) { + rgb[0] = 0xFF; + rgb[1] = 0xFF00; + rgb[2] = 0xFF0000; + } else { + rgb[0] = 0xFF0000; + rgb[1] = 0xFF00; + rgb[2] = 0xFF; + } + bpp = 32; + pixdepth = 24; + vclass = XWD_TRUE_COLOR; + bpad = 32; + break; + case PIX_FMT_BGR24: + case PIX_FMT_RGB24: + if (pix_fmt == PIX_FMT_RGB24) + be = 1; + bpp = 24; + vclass = XWD_TRUE_COLOR; + bpad = 32; + rgb[0] = 0xFF0000; + rgb[1] = 0xFF00; + rgb[2] = 0xFF; + break; + case PIX_FMT_RGB565LE: + case PIX_FMT_RGB565BE: + case PIX_FMT_BGR565LE: + case PIX_FMT_BGR565BE: + if (pix_fmt == PIX_FMT_BGR565LE || + pix_fmt == PIX_FMT_BGR565BE) { + rgb[0] = 0x1F; + rgb[1] = 0x7E0; + rgb[2] = 0xF800; + } else { + rgb[0] = 0xF800; + rgb[1] = 0x7E0; + rgb[2] = 0x1F; + } + bpp = 16; + vclass = XWD_TRUE_COLOR; + bpad = 16; + break; + case PIX_FMT_RGB555LE: + case PIX_FMT_RGB555BE: + case PIX_FMT_BGR555LE: + case PIX_FMT_BGR555BE: + if (pix_fmt == PIX_FMT_BGR555LE || + pix_fmt == PIX_FMT_BGR555BE) { + rgb[0] = 0x1F; + rgb[1] = 0x3E0; + rgb[2] = 0x7C00; + } else { + rgb[0] = 0x7C00; + rgb[1] = 0x3E0; + rgb[2] = 0x1F; + } + bpp = 16; + vclass = XWD_TRUE_COLOR; + bpad = 16; + break; + case PIX_FMT_RGB8: + case PIX_FMT_BGR8: + case PIX_FMT_RGB4_BYTE: + case PIX_FMT_BGR4_BYTE: + case PIX_FMT_PAL8: + bpp = 8; + vclass = XWD_PSEUDO_COLOR; + bpad = 8; + ncolors = 256; + break; + case PIX_FMT_MONOWHITE: + bpp = 1; + bpad = 8; + vclass = XWD_STATIC_GRAY; + break; + default: + av_log(avctx, AV_LOG_INFO, "unsupported pixel format\n"); + return AVERROR(EINVAL); + } + + lsize = FFALIGN(bpp * avctx->width, bpad) / 8; + header_size = XWD_HEADER_SIZE + WINDOW_NAME_SIZE; + out_size = header_size + ncolors * XWD_CMAP_SIZE + avctx->height * lsize; + + if (buf_size < out_size) { + av_log(avctx, AV_LOG_ERROR, "output buffer too small\n"); + return AVERROR(ENOMEM); + } + + avctx->coded_frame->key_frame = 1; + avctx->coded_frame->pict_type = AV_PICTURE_TYPE_I; + + bytestream_put_be32(&buf, header_size); + bytestream_put_be32(&buf, XWD_VERSION); // file version + bytestream_put_be32(&buf, XWD_Z_PIXMAP); // pixmap format + bytestream_put_be32(&buf, pixdepth); // pixmap depth in pixels + bytestream_put_be32(&buf, avctx->width); // pixmap width in pixels + bytestream_put_be32(&buf, avctx->height); // pixmap height in pixels + bytestream_put_be32(&buf, 0); // bitmap x offset + bytestream_put_be32(&buf, be); // byte order + bytestream_put_be32(&buf, 32); // bitmap unit + bytestream_put_be32(&buf, be); // bit-order of image data + bytestream_put_be32(&buf, bpad); // bitmap scan-line pad in bits + bytestream_put_be32(&buf, bpp); // bits per pixel + bytestream_put_be32(&buf, lsize); // bytes per scan-line + bytestream_put_be32(&buf, vclass); // visual class + bytestream_put_be32(&buf, rgb[0]); // red mask + bytestream_put_be32(&buf, rgb[1]); // green mask + bytestream_put_be32(&buf, rgb[2]); // blue mask + bytestream_put_be32(&buf, 8); // size of each bitmask in bits + bytestream_put_be32(&buf, ncolors); // number of colors + bytestream_put_be32(&buf, ncolors); // number of entries in color map + bytestream_put_be32(&buf, avctx->width); // window width + bytestream_put_be32(&buf, avctx->height); // window height + bytestream_put_be32(&buf, 0); // window upper left X coordinate + bytestream_put_be32(&buf, 0); // window upper left Y coordinate + bytestream_put_be32(&buf, 0); // window border width + bytestream_put_buffer(&buf, WINDOW_NAME, WINDOW_NAME_SIZE); + + for (i = 0; i < ncolors; i++) { + uint32_t val; + uint8_t red, green, blue; + + val = AV_RN32A(p->data[1] + i * 4); + red = (val >> 16) & 0xFF; + green = (val >> 8) & 0xFF; + blue = val & 0xFF; + + bytestream_put_be32(&buf, i); // colormap entry number + bytestream_put_be16(&buf, red << 8); + bytestream_put_be16(&buf, green << 8); + bytestream_put_be16(&buf, blue << 8); + bytestream_put_byte(&buf, 0x7); // bitmask flag + bytestream_put_byte(&buf, 0); // padding + } + + ptr = p->data[0]; + for (i = 0; i < avctx->height; i++) { + bytestream_put_buffer(&buf, ptr, lsize); + ptr += p->linesize[0]; + } + + return out_size; +} + +static av_cold int xwd_encode_close(AVCodecContext *avctx) +{ + av_freep(&avctx->coded_frame); + + return 0; +} + +AVCodec ff_xwd_encoder = { + .name = "xwd", + .type = AVMEDIA_TYPE_VIDEO, + .id = CODEC_ID_XWD, + .init = xwd_encode_init, + .encode = xwd_encode_frame, + .close = xwd_encode_close, + .pix_fmts = (const enum PixelFormat[]) { PIX_FMT_BGRA, + PIX_FMT_RGBA, + PIX_FMT_ARGB, + PIX_FMT_ABGR, + PIX_FMT_RGB24, + PIX_FMT_BGR24, + PIX_FMT_RGB565BE, + PIX_FMT_RGB565LE, + PIX_FMT_BGR565BE, + PIX_FMT_BGR565LE, + PIX_FMT_RGB555BE, + PIX_FMT_RGB555LE, + PIX_FMT_BGR555BE, + PIX_FMT_BGR555LE, + PIX_FMT_RGB8, + PIX_FMT_BGR8, + PIX_FMT_RGB4_BYTE, + PIX_FMT_BGR4_BYTE, + PIX_FMT_PAL8, + PIX_FMT_MONOWHITE, + PIX_FMT_NONE }, + .long_name = NULL_IF_CONFIG_SMALL("XWD (X Window Dump) image"), +}; |