diff options
author | Michael Niedermayer <michaelni@gmx.at> | 2012-02-26 04:47:56 +0100 |
---|---|---|
committer | Michael Niedermayer <michaelni@gmx.at> | 2012-02-26 05:11:21 +0100 |
commit | 305e4b35ea7b643ab3ce8a37fa8cb464e8adeb3f (patch) | |
tree | 9e3773a72155b936e1ca66d4f089fa1b7eeeb791 /libavcodec | |
parent | 8e039121335f482234214bc4e0322ac72c1089ef (diff) | |
parent | 0a9efe4c6eb48bf863e2e630b3ad907a198961c5 (diff) | |
download | ffmpeg-305e4b35ea7b643ab3ce8a37fa8cb464e8adeb3f.tar.gz |
Merge remote-tracking branch 'qatar/master'
* qatar/master: (34 commits)
mlp_parser: fix the channel mask value used for the top surround channel
vorbisenc: check all allocations for failure
roqaudioenc: return AVERROR codes instead of -1
roqaudioenc: set correct bit rate
roqaudioenc: use AVCodecContext.frame_size correctly.
roqaudioenc: remove unneeded sample_fmt check
ra144enc: use int16_t* for input samples rather than void*
ra144enc: set AVCodecContext.coded_frame
ra144enc: remove unneeded sample_fmt check
nellymoserenc: set AVCodecContext.coded_frame
nellymoserenc: improve error checking in encode_init()
nellymoserenc: return AVERROR codes instead of -1
libvorbis: improve error checking in oggvorbis_encode_init()
mpegaudioenc: return AVERROR codes instead of -1
libfaac: improve error checking and handling in Faac_encode_init()
avutil: add AVERROR_UNKNOWN
check for coded_frame allocation failure in several audio encoders
audio encoders: do not set coded_frame->key_frame.
g722enc: check for trellis data allocation error
libspeexenc: export encoder delay through AVCodecContext.delay
...
Conflicts:
doc/APIchanges
libavcodec/avcodec.h
libavcodec/fraps.c
libavcodec/kgv1dec.c
libavcodec/libfaac.c
libavcodec/libgsm.c
libavcodec/libvorbis.c
libavcodec/mlp_parser.c
libavcodec/roqaudioenc.c
libavcodec/vorbisenc.c
libavutil/avutil.h
libavutil/error.c
libavutil/error.h
Merged-by: Michael Niedermayer <michaelni@gmx.at>
Diffstat (limited to 'libavcodec')
-rw-r--r-- | libavcodec/avcodec.h | 13 | ||||
-rw-r--r-- | libavcodec/cdxl.c | 4 | ||||
-rw-r--r-- | libavcodec/fraps.c | 11 | ||||
-rw-r--r-- | libavcodec/g722enc.c | 32 | ||||
-rw-r--r-- | libavcodec/kgv1dec.c | 70 | ||||
-rw-r--r-- | libavcodec/libfaac.c | 55 | ||||
-rw-r--r-- | libavcodec/libspeexenc.c | 7 | ||||
-rw-r--r-- | libavcodec/libvo-aacenc.c | 2 | ||||
-rw-r--r-- | libavcodec/libvo-amrwbenc.c | 2 | ||||
-rw-r--r-- | libavcodec/libvorbis.c | 131 | ||||
-rw-r--r-- | libavcodec/mpegaudioenc.c | 9 | ||||
-rw-r--r-- | libavcodec/nellymoserenc.c | 47 | ||||
-rw-r--r-- | libavcodec/ra144enc.c | 32 | ||||
-rw-r--r-- | libavcodec/roqaudioenc.c | 101 | ||||
-rw-r--r-- | libavcodec/utils.c | 12 | ||||
-rw-r--r-- | libavcodec/vorbisdec.c | 12 | ||||
-rw-r--r-- | libavcodec/vorbisenc.c | 128 | ||||
-rw-r--r-- | libavcodec/xwdenc.c | 6 |
18 files changed, 444 insertions, 230 deletions
diff --git a/libavcodec/avcodec.h b/libavcodec/avcodec.h index 8f7566548f..b250e95861 100644 --- a/libavcodec/avcodec.h +++ b/libavcodec/avcodec.h @@ -1372,6 +1372,19 @@ typedef struct AVCodecContext { * the decoder output. (we assume the decoder matches the spec) * Decoding: Number of frames delay in addition to what a standard decoder * as specified in the spec would produce. + * + * Video: + * Number of frames the decoded output will be delayed relative to the + * encoded input. + * + * Audio: + * Number of "priming" samples added to the beginning of the stream + * during encoding. The decoded output will be delayed by this many + * samples relative to the input to the encoder. Note that this field is + * purely informational and does not directly affect the pts output by + * the encoder, which should always be based on the actual presentation + * time, including any delay. + * * - encoding: Set by libavcodec. * - decoding: Set by libavcodec. */ diff --git a/libavcodec/cdxl.c b/libavcodec/cdxl.c index a8546348dc..6bf30cf747 100644 --- a/libavcodec/cdxl.c +++ b/libavcodec/cdxl.c @@ -122,7 +122,7 @@ static void cdxl_decode_ham6(CDXLVideoContext *c) g = index * 0x11 << 8; break; } - AV_WN32(out + x * 3, r | g | b); + AV_WL24(out + x * 3, r | g | b); } out += c->frame.linesize[0]; } @@ -165,7 +165,7 @@ static void cdxl_decode_ham8(CDXLVideoContext *c) g = (index << 10) | (g & (3 << 8)); break; } - AV_WN32(out + x * 3, r | g | b); + AV_WL24(out + x * 3, r | g | b); } out += c->frame.linesize[0]; } diff --git a/libavcodec/fraps.c b/libavcodec/fraps.c index da976c857d..45d95c9c24 100644 --- a/libavcodec/fraps.c +++ b/libavcodec/fraps.c @@ -142,7 +142,7 @@ static int decode_frame(AVCodecContext *avctx, int i, j, is_chroma; const int planes = 3; uint8_t *out; - + enum PixelFormat pix_fmt; header = AV_RL32(buf); version = header & 0xff; @@ -157,8 +157,6 @@ static int decode_frame(AVCodecContext *avctx, buf += header_size; - avctx->pix_fmt = version & 1 ? PIX_FMT_BGR24 : PIX_FMT_YUVJ420P; - if (version < 2) { unsigned needed_size = avctx->width*avctx->height*3; if (version == 0) needed_size /= 2; @@ -205,6 +203,13 @@ static int decode_frame(AVCodecContext *avctx, f->key_frame = 1; f->reference = 0; f->buffer_hints = FF_BUFFER_HINTS_VALID; + + pix_fmt = version & 1 ? PIX_FMT_BGR24 : PIX_FMT_YUVJ420P; + if (avctx->pix_fmt != pix_fmt && f->data[0]) { + avctx->release_buffer(avctx, f); + } + avctx->pix_fmt = pix_fmt; + if (ff_thread_get_buffer(avctx, f)) { av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); return -1; diff --git a/libavcodec/g722enc.c b/libavcodec/g722enc.c index 1cb0070649..a5ae0a5153 100644 --- a/libavcodec/g722enc.c +++ b/libavcodec/g722enc.c @@ -41,9 +41,22 @@ #define MIN_TRELLIS 0 #define MAX_TRELLIS 16 +static av_cold int g722_encode_close(AVCodecContext *avctx) +{ + G722Context *c = avctx->priv_data; + int i; + for (i = 0; i < 2; i++) { + av_freep(&c->paths[i]); + av_freep(&c->node_buf[i]); + av_freep(&c->nodep_buf[i]); + } + return 0; +} + static av_cold int g722_encode_init(AVCodecContext * avctx) { G722Context *c = avctx->priv_data; + int ret; if (avctx->channels != 1) { av_log(avctx, AV_LOG_ERROR, "Only mono tracks are allowed.\n"); @@ -62,6 +75,10 @@ static av_cold int g722_encode_init(AVCodecContext * avctx) c->paths[i] = av_mallocz(max_paths * sizeof(**c->paths)); c->node_buf[i] = av_mallocz(2 * frontier * sizeof(**c->node_buf)); c->nodep_buf[i] = av_mallocz(2 * frontier * sizeof(**c->nodep_buf)); + if (!c->paths[i] || !c->node_buf[i] || !c->nodep_buf[i]) { + ret = AVERROR(ENOMEM); + goto error; + } } } @@ -100,18 +117,9 @@ static av_cold int g722_encode_init(AVCodecContext * avctx) } return 0; -} - -static av_cold int g722_encode_close(AVCodecContext *avctx) -{ - G722Context *c = avctx->priv_data; - int i; - for (i = 0; i < 2; i++) { - av_freep(&c->paths[i]); - av_freep(&c->node_buf[i]); - av_freep(&c->nodep_buf[i]); - } - return 0; +error: + g722_encode_close(avctx); + return ret; } static const int16_t low_quant[33] = { diff --git a/libavcodec/kgv1dec.c b/libavcodec/kgv1dec.c index 4566e35b74..264efa2a29 100644 --- a/libavcodec/kgv1dec.c +++ b/libavcodec/kgv1dec.c @@ -30,10 +30,17 @@ typedef struct { AVCodecContext *avctx; - AVFrame pic; - uint16_t *prev, *cur; + AVFrame prev, cur; } KgvContext; +static void decode_flush(AVCodecContext *avctx) +{ + KgvContext * const c = avctx->priv_data; + + if (c->prev.data[0]) + avctx->release_buffer(avctx, &c->prev); +} + static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPacket *avpkt) { const uint8_t *buf = avpkt->data; @@ -42,7 +49,7 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPac int offsets[8]; uint16_t *out, *prev; int outcnt = 0, maxcnt; - int w, h, i; + int w, h, i, res; if (avpkt->size < 2) return -1; @@ -54,20 +61,23 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPac if (av_image_check_size(w, h, 0, avctx)) return -1; - if (w != avctx->width || h != avctx->height) + if (w != avctx->width || h != avctx->height) { + if (c->prev.data[0]) + avctx->release_buffer(avctx, &c->prev); avcodec_set_dimensions(avctx, w, h); + } maxcnt = w * h; - out = av_realloc(c->cur, w * h * 2); - if (!out) - return -1; - c->cur = out; - - prev = av_realloc(c->prev, w * h * 2); - if (!prev) - return -1; - c->prev = prev; + c->cur.reference = 3; + if ((res = avctx->get_buffer(avctx, &c->cur)) < 0) + return res; + out = (uint16_t *) c->cur.data[0]; + if (c->prev.data[0]) { + prev = (uint16_t *) c->prev.data[0]; + } else { + prev = NULL; + } for (i = 0; i < 8; i++) offsets[i] = -1; @@ -80,6 +90,7 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPac out[outcnt++] = code; // rgb555 pixel coded directly } else { int count; + int inp_off; uint16_t *inp; if ((code & 0x6000) == 0x6000) { @@ -101,7 +112,14 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPac if (maxcnt - start < count) break; - inp = prev + start; + if (!prev) { + av_log(avctx, AV_LOG_ERROR, + "Frame reference does not exist\n"); + break; + } + + inp = prev; + inp_off = start; } else { // copy from earlier in this frame int offset = (code & 0x1FFF) + 1; @@ -119,27 +137,28 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPac if (outcnt < offset) break; - inp = out + outcnt - offset; + inp = out; + inp_off = outcnt - offset; } if (maxcnt - outcnt < count) break; - for (i = 0; i < count; i++) + for (i = inp_off; i < count + inp_off; i++) { out[outcnt++] = inp[i]; + } } } if (outcnt - maxcnt) av_log(avctx, AV_LOG_DEBUG, "frame finished with %d diff\n", outcnt - maxcnt); - c->pic.data[0] = (uint8_t *)c->cur; - c->pic.linesize[0] = w * 2; - *data_size = sizeof(AVFrame); - *(AVFrame*)data = c->pic; + *(AVFrame*)data = c->cur; - FFSWAP(uint16_t *, c->cur, c->prev); + if (c->prev.data[0]) + avctx->release_buffer(avctx, &c->prev); + FFSWAP(AVFrame, c->cur, c->prev); return avpkt->size; } @@ -150,18 +169,14 @@ static av_cold int decode_init(AVCodecContext *avctx) c->avctx = avctx; avctx->pix_fmt = PIX_FMT_RGB555; - avcodec_get_frame_defaults(&c->pic); + avctx->flags |= CODEC_FLAG_EMU_EDGE; return 0; } static av_cold int decode_end(AVCodecContext *avctx) { - KgvContext * const c = avctx->priv_data; - - av_freep(&c->cur); - av_freep(&c->prev); - + decode_flush(avctx); return 0; } @@ -173,5 +188,6 @@ AVCodec ff_kgv1_decoder = { .init = decode_init, .close = decode_end, .decode = decode_frame, + .flush = decode_flush, .long_name = NULL_IF_CONFIG_SMALL("Kega Game Video"), }; diff --git a/libavcodec/libfaac.c b/libavcodec/libfaac.c index 31dc1a41ed..4fa570e155 100644 --- a/libavcodec/libfaac.c +++ b/libavcodec/libfaac.c @@ -38,28 +38,47 @@ static const int channel_maps[][6] = { { 2, 0, 1, 4, 5, 3 }, //< C L R Ls Rs LFE }; +static av_cold int Faac_encode_close(AVCodecContext *avctx) +{ + FaacAudioContext *s = avctx->priv_data; + + av_freep(&avctx->coded_frame); + av_freep(&avctx->extradata); + + if (s->faac_handle) + faacEncClose(s->faac_handle); + return 0; +} + static av_cold int Faac_encode_init(AVCodecContext *avctx) { FaacAudioContext *s = avctx->priv_data; faacEncConfigurationPtr faac_cfg; unsigned long samples_input, max_bytes_output; + int ret; /* number of channels */ if (avctx->channels < 1 || avctx->channels > 6) { av_log(avctx, AV_LOG_ERROR, "encoding %d channel(s) is not allowed\n", avctx->channels); - return -1; + ret = AVERROR(EINVAL); + goto error; } s->faac_handle = faacEncOpen(avctx->sample_rate, avctx->channels, &samples_input, &max_bytes_output); + if (!s->faac_handle) { + av_log(avctx, AV_LOG_ERROR, "error in faacEncOpen()\n"); + ret = AVERROR_UNKNOWN; + goto error; + } /* check faac version */ faac_cfg = faacEncGetCurrentConfiguration(s->faac_handle); if (faac_cfg->version != FAAC_CFG_VERSION) { av_log(avctx, AV_LOG_ERROR, "wrong libfaac version (compiled for: %d, using %d)\n", FAAC_CFG_VERSION, faac_cfg->version); - faacEncClose(s->faac_handle); - return -1; + ret = AVERROR(EINVAL); + goto error; } /* put the options in the configuration struct */ @@ -79,8 +98,8 @@ static av_cold int Faac_encode_init(AVCodecContext *avctx) break; default: av_log(avctx, AV_LOG_ERROR, "invalid AAC profile\n"); - faacEncClose(s->faac_handle); - return -1; + ret = AVERROR(EINVAL); + goto error; } faac_cfg->mpegVersion = MPEG4; faac_cfg->useTns = 0; @@ -100,7 +119,10 @@ static av_cold int Faac_encode_init(AVCodecContext *avctx) avctx->frame_size = samples_input / avctx->channels; avctx->coded_frame= avcodec_alloc_frame(); - avctx->coded_frame->key_frame= 1; + if (!avctx->coded_frame) { + ret = AVERROR(ENOMEM); + goto error; + } /* Set decoder specific info */ avctx->extradata_size = 0; @@ -112,6 +134,10 @@ static av_cold int Faac_encode_init(AVCodecContext *avctx) if (!faacEncGetDecoderSpecificInfo(s->faac_handle, &buffer, &decoder_specific_info_size)) { avctx->extradata = av_malloc(decoder_specific_info_size + FF_INPUT_BUFFER_PADDING_SIZE); + if (!avctx->extradata) { + ret = AVERROR(ENOMEM); + goto error; + } avctx->extradata_size = decoder_specific_info_size; memcpy(avctx->extradata, buffer, avctx->extradata_size); faac_cfg->outputFormat = 0; @@ -123,10 +149,14 @@ static av_cold int Faac_encode_init(AVCodecContext *avctx) if (!faacEncSetConfiguration(s->faac_handle, faac_cfg)) { av_log(avctx, AV_LOG_ERROR, "libfaac doesn't support this output format!\n"); - return -1; + ret = AVERROR(EINVAL); + goto error; } return 0; +error: + Faac_encode_close(avctx); + return ret; } static int Faac_encode_frame(AVCodecContext *avctx, @@ -145,17 +175,6 @@ static int Faac_encode_frame(AVCodecContext *avctx, return bytes_written; } -static av_cold int Faac_encode_close(AVCodecContext *avctx) -{ - FaacAudioContext *s = avctx->priv_data; - - av_freep(&avctx->coded_frame); - av_freep(&avctx->extradata); - - faacEncClose(s->faac_handle); - return 0; -} - static const AVProfile profiles[] = { { FF_PROFILE_AAC_MAIN, "Main" }, { FF_PROFILE_AAC_LOW, "LC" }, diff --git a/libavcodec/libspeexenc.c b/libavcodec/libspeexenc.c index 7deb98b6c3..0fb9b8f8a0 100644 --- a/libavcodec/libspeexenc.c +++ b/libavcodec/libspeexenc.c @@ -81,7 +81,6 @@ typedef struct { int cbr_quality; ///< CBR quality 0 to 10 int abr; ///< flag to enable ABR int pkt_frame_count; ///< frame count for the current packet - int lookahead; ///< encoder delay int64_t next_pts; ///< next pts, in sample_rate time base int pkt_sample_count; ///< sample count in the current packet } LibSpeexEncContext; @@ -200,8 +199,7 @@ static av_cold int encode_init(AVCodecContext *avctx) s->header.frames_per_packet = s->frames_per_packet; /* set encoding delay */ - speex_encoder_ctl(s->enc_state, SPEEX_GET_LOOKAHEAD, &s->lookahead); - s->next_pts = -s->lookahead; + speex_encoder_ctl(s->enc_state, SPEEX_GET_LOOKAHEAD, &avctx->delay); /* create header packet bytes from header struct */ /* note: libspeex allocates the memory for header_data, which is freed @@ -257,7 +255,8 @@ static int encode_frame(AVCodecContext *avctx, uint8_t *frame, int buf_size, /* write output if all frames for the packet have been encoded */ if (s->pkt_frame_count == s->frames_per_packet) { s->pkt_frame_count = 0; - avctx->coded_frame->pts = ff_samples_to_time_base(avctx, s->next_pts); + avctx->coded_frame->pts = ff_samples_to_time_base(avctx, s->next_pts - + avctx->delay); s->next_pts += s->pkt_sample_count; s->pkt_sample_count = 0; if (buf_size > speex_bits_nbytes(&s->bits)) { diff --git a/libavcodec/libvo-aacenc.c b/libavcodec/libvo-aacenc.c index 25e8a317f7..bb6d50f2f5 100644 --- a/libavcodec/libvo-aacenc.c +++ b/libavcodec/libvo-aacenc.c @@ -39,6 +39,8 @@ static av_cold int aac_encode_init(AVCodecContext *avctx) int index; avctx->coded_frame = avcodec_alloc_frame(); + if (!avctx->coded_frame) + return AVERROR(ENOMEM); avctx->frame_size = 1024; voGetAACEncAPI(&s->codec_api); diff --git a/libavcodec/libvo-amrwbenc.c b/libavcodec/libvo-amrwbenc.c index 22c71b75f1..2621d97954 100644 --- a/libavcodec/libvo-amrwbenc.c +++ b/libavcodec/libvo-amrwbenc.c @@ -87,6 +87,8 @@ static av_cold int amr_wb_encode_init(AVCodecContext *avctx) avctx->frame_size = 320; avctx->coded_frame = avcodec_alloc_frame(); + if (!avctx->coded_frame) + return AVERROR(ENOMEM); s->state = E_IF_init(); diff --git a/libavcodec/libvorbis.c b/libavcodec/libvorbis.c index ca9376baf1..19dfa34fcc 100644 --- a/libavcodec/libvorbis.c +++ b/libavcodec/libvorbis.c @@ -61,13 +61,13 @@ static const AVOption options[] = { }; static const AVClass class = { "libvorbis", av_default_item_name, options, LIBAVUTIL_VERSION_INT }; -static const char * error(int oggerr, int *averr) +static int vorbis_error_to_averror(int ov_err) { - switch (oggerr) { - case OV_EFAULT: *averr = AVERROR(EFAULT); return "internal error"; - case OV_EIMPL: *averr = AVERROR(EINVAL); return "not supported"; - case OV_EINVAL: *averr = AVERROR(EINVAL); return "invalid request"; - default: *averr = AVERROR(EINVAL); return "unknown error"; + switch (ov_err) { + case OV_EFAULT: return AVERROR_BUG; + case OV_EINVAL: return AVERROR(EINVAL); + case OV_EIMPL: return AVERROR(EINVAL); + default: return AVERROR_UNKNOWN; } } @@ -75,49 +75,41 @@ static av_cold int oggvorbis_init_encoder(vorbis_info *vi, AVCodecContext *avcco { OggVorbisContext *context = avccontext->priv_data; double cfreq; - int r; + int ret; if (avccontext->flags & CODEC_FLAG_QSCALE) { /* variable bitrate */ - float quality = avccontext->global_quality / (float)FF_QP2LAMBDA; - r = vorbis_encode_setup_vbr(vi, avccontext->channels, - avccontext->sample_rate, - quality / 10.0); - if (r) { - av_log(avccontext, AV_LOG_ERROR, - "Unable to set quality to %g: %s\n", quality, error(r, &r)); - return r; - } + float q = avccontext->global_quality / (float)FF_QP2LAMBDA; + if ((ret = vorbis_encode_setup_vbr(vi, avccontext->channels, + avccontext->sample_rate, + q / 10.0))) + goto error; } else { int minrate = avccontext->rc_min_rate > 0 ? avccontext->rc_min_rate : -1; int maxrate = avccontext->rc_min_rate > 0 ? avccontext->rc_max_rate : -1; /* constant bitrate */ - r = vorbis_encode_setup_managed(vi, avccontext->channels, - avccontext->sample_rate, minrate, - avccontext->bit_rate, maxrate); - if (r) { - av_log(avccontext, AV_LOG_ERROR, - "Unable to set CBR to %d: %s\n", avccontext->bit_rate, - error(r, &r)); - return r; - } + if ((ret = vorbis_encode_setup_managed(vi, avccontext->channels, + avccontext->sample_rate, minrate, + avccontext->bit_rate, maxrate))) + goto error; /* variable bitrate by estimate, disable slow rate management */ if (minrate == -1 && maxrate == -1) - if (vorbis_encode_ctl(vi, OV_ECTL_RATEMANAGE2_SET, NULL)) - return AVERROR(EINVAL); /* should not happen */ + if ((ret = vorbis_encode_ctl(vi, OV_ECTL_RATEMANAGE2_SET, NULL))) + goto error; /* should not happen */ } /* cutoff frequency */ if (avccontext->cutoff > 0) { cfreq = avccontext->cutoff / 1000.0; - if (vorbis_encode_ctl(vi, OV_ECTL_LOWPASS_SET, &cfreq)) - return AVERROR(EINVAL); /* should not happen */ + if ((ret = vorbis_encode_ctl(vi, OV_ECTL_LOWPASS_SET, &cfreq))) + goto error; /* should not happen */ } if (context->iblock) { - vorbis_encode_ctl(vi, OV_ECTL_IBLOCK_SET, &context->iblock); + if ((ret = vorbis_encode_ctl(vi, OV_ECTL_IBLOCK_SET, &context->iblock))) + goto error; } if (avccontext->channels == 3 && @@ -149,7 +141,12 @@ static av_cold int oggvorbis_init_encoder(vorbis_info *vi, AVCodecContext *avcco } } - return vorbis_encode_setup_init(vi); + if ((ret = vorbis_encode_setup_init(vi))) + goto error; + + return 0; +error: + return vorbis_error_to_averror(ret); } /* How many bytes are needed for a buffer of length 'l' */ @@ -158,34 +155,63 @@ static int xiph_len(int l) return 1 + l / 255 + l; } +static av_cold int oggvorbis_encode_close(AVCodecContext *avccontext) +{ + OggVorbisContext *context = avccontext->priv_data; +/* ogg_packet op ; */ + + vorbis_analysis_wrote(&context->vd, 0); /* notify vorbisenc this is EOF */ + + vorbis_block_clear(&context->vb); + vorbis_dsp_clear(&context->vd); + vorbis_info_clear(&context->vi); + + av_freep(&avccontext->coded_frame); + av_freep(&avccontext->extradata); + + return 0; +} + static av_cold int oggvorbis_encode_init(AVCodecContext *avccontext) { OggVorbisContext *context = avccontext->priv_data; ogg_packet header, header_comm, header_code; uint8_t *p; unsigned int offset; - int r; + int ret; vorbis_info_init(&context->vi); - r = oggvorbis_init_encoder(&context->vi, avccontext); - if (r < 0) { - av_log(avccontext, AV_LOG_ERROR, "oggvorbis_encode_init failed\n"); - return r; + if ((ret = oggvorbis_init_encoder(&context->vi, avccontext))) { + av_log(avccontext, AV_LOG_ERROR, "oggvorbis_encode_init: init_encoder failed\n"); + goto error; + } + if ((ret = vorbis_analysis_init(&context->vd, &context->vi))) { + ret = vorbis_error_to_averror(ret); + goto error; + } + if ((ret = vorbis_block_init(&context->vd, &context->vb))) { + ret = vorbis_error_to_averror(ret); + goto error; } - vorbis_analysis_init(&context->vd, &context->vi); - vorbis_block_init(&context->vd, &context->vb); vorbis_comment_init(&context->vc); vorbis_comment_add_tag(&context->vc, "encoder", LIBAVCODEC_IDENT); - vorbis_analysis_headerout(&context->vd, &context->vc, &header, - &header_comm, &header_code); + if ((ret = vorbis_analysis_headerout(&context->vd, &context->vc, &header, + &header_comm, &header_code))) { + ret = vorbis_error_to_averror(ret); + goto error; + } avccontext->extradata_size = 1 + xiph_len(header.bytes) + xiph_len(header_comm.bytes) + header_code.bytes; p = avccontext->extradata = av_malloc(avccontext->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE); + if (!p) { + ret = AVERROR(ENOMEM); + goto error; + } p[0] = 2; offset = 1; offset += av_xiphlacing(&p[offset], header.bytes); @@ -208,9 +234,15 @@ static av_cold int oggvorbis_encode_init(AVCodecContext *avccontext) avccontext->frame_size = OGGVORBIS_FRAME_SIZE; avccontext->coded_frame = avcodec_alloc_frame(); - avccontext->coded_frame->key_frame = 1; + if (!avccontext->coded_frame) { + ret = AVERROR(ENOMEM); + goto error; + } return 0; +error: + oggvorbis_encode_close(avccontext); + return ret; } static int oggvorbis_encode_frame(AVCodecContext *avccontext, @@ -286,23 +318,6 @@ static int oggvorbis_encode_frame(AVCodecContext *avccontext, return l; } -static av_cold int oggvorbis_encode_close(AVCodecContext *avccontext) -{ - OggVorbisContext *context = avccontext->priv_data; -/* ogg_packet op ; */ - - vorbis_analysis_wrote(&context->vd, 0); /* notify vorbisenc this is EOF */ - - vorbis_block_clear(&context->vb); - vorbis_dsp_clear(&context->vd); - vorbis_info_clear(&context->vi); - - av_freep(&avccontext->coded_frame); - av_freep(&avccontext->extradata); - - return 0; -} - AVCodec ff_libvorbis_encoder = { .name = "libvorbis", .type = AVMEDIA_TYPE_AUDIO, diff --git a/libavcodec/mpegaudioenc.c b/libavcodec/mpegaudioenc.c index 6b71d01ae9..b3cb0bba59 100644 --- a/libavcodec/mpegaudioenc.c +++ b/libavcodec/mpegaudioenc.c @@ -75,7 +75,7 @@ static av_cold int MPA_encode_init(AVCodecContext *avctx) if (channels <= 0 || channels > 2){ av_log(avctx, AV_LOG_ERROR, "encoding %d channel(s) is not allowed in mp2\n", channels); - return -1; + return AVERROR(EINVAL); } bitrate = bitrate / 1000; s->nb_channels = channels; @@ -93,7 +93,7 @@ static av_cold int MPA_encode_init(AVCodecContext *avctx) } if (i == 3){ av_log(avctx, AV_LOG_ERROR, "Sampling rate %d is not allowed in mp2\n", freq); - return -1; + return AVERROR(EINVAL); } s->freq_index = i; @@ -104,7 +104,7 @@ static av_cold int MPA_encode_init(AVCodecContext *avctx) } if (i == 15){ av_log(avctx, AV_LOG_ERROR, "bitrate %d is not allowed in mp2\n", bitrate); - return -1; + return AVERROR(EINVAL); } s->bitrate_index = i; @@ -181,7 +181,8 @@ static av_cold int MPA_encode_init(AVCodecContext *avctx) } avctx->coded_frame= avcodec_alloc_frame(); - avctx->coded_frame->key_frame= 1; + if (!avctx->coded_frame) + return AVERROR(ENOMEM); return 0; } diff --git a/libavcodec/nellymoserenc.c b/libavcodec/nellymoserenc.c index 5ee1d72bcc..8e018c1b7f 100644 --- a/libavcodec/nellymoserenc.c +++ b/libavcodec/nellymoserenc.c @@ -127,14 +127,29 @@ static void apply_mdct(NellyMoserEncodeContext *s) s->mdct_ctx.mdct_calc(&s->mdct_ctx, s->mdct_out + NELLY_BUF_LEN, s->buf[s->bufsel] + NELLY_BUF_LEN); } +static av_cold int encode_end(AVCodecContext *avctx) +{ + NellyMoserEncodeContext *s = avctx->priv_data; + + ff_mdct_end(&s->mdct_ctx); + + if (s->avctx->trellis) { + av_free(s->opt); + av_free(s->path); + } + av_freep(&avctx->coded_frame); + + return 0; +} + static av_cold int encode_init(AVCodecContext *avctx) { NellyMoserEncodeContext *s = avctx->priv_data; - int i; + int i, ret; if (avctx->channels != 1) { av_log(avctx, AV_LOG_ERROR, "Nellymoser supports only 1 channel\n"); - return -1; + return AVERROR(EINVAL); } if (avctx->sample_rate != 8000 && avctx->sample_rate != 16000 && @@ -142,12 +157,13 @@ static av_cold int encode_init(AVCodecContext *avctx) avctx->sample_rate != 22050 && avctx->sample_rate != 44100 && avctx->strict_std_compliance >= FF_COMPLIANCE_NORMAL) { av_log(avctx, AV_LOG_ERROR, "Nellymoser works only with 8000, 16000, 11025, 22050 and 44100 sample rate\n"); - return -1; + return AVERROR(EINVAL); } avctx->frame_size = NELLY_SAMPLES; s->avctx = avctx; - ff_mdct_init(&s->mdct_ctx, 8, 0, 32768.0); + if ((ret = ff_mdct_init(&s->mdct_ctx, 8, 0, 32768.0)) < 0) + goto error; ff_dsputil_init(&s->dsp, avctx); /* Generate overlap window */ @@ -158,23 +174,22 @@ static av_cold int encode_init(AVCodecContext *avctx) if (s->avctx->trellis) { s->opt = av_malloc(NELLY_BANDS * OPT_SIZE * sizeof(float )); s->path = av_malloc(NELLY_BANDS * OPT_SIZE * sizeof(uint8_t)); + if (!s->opt || !s->path) { + ret = AVERROR(ENOMEM); + goto error; + } } - return 0; -} - -static av_cold int encode_end(AVCodecContext *avctx) -{ - NellyMoserEncodeContext *s = avctx->priv_data; - - ff_mdct_end(&s->mdct_ctx); - - if (s->avctx->trellis) { - av_free(s->opt); - av_free(s->path); + avctx->coded_frame = avcodec_alloc_frame(); + if (!avctx->coded_frame) { + ret = AVERROR(ENOMEM); + goto error; } return 0; +error: + encode_end(avctx); + return ret; } #define find_best(val, table, LUT, LUT_add, LUT_size) \ diff --git a/libavcodec/ra144enc.c b/libavcodec/ra144enc.c index 91bf7e174f..b3710e871b 100644 --- a/libavcodec/ra144enc.c +++ b/libavcodec/ra144enc.c @@ -33,15 +33,20 @@ #include "ra144.h" +static av_cold int ra144_encode_close(AVCodecContext *avctx) +{ + RA144Context *ractx = avctx->priv_data; + ff_lpc_end(&ractx->lpc_ctx); + av_freep(&avctx->coded_frame); + return 0; +} + + static av_cold int ra144_encode_init(AVCodecContext * avctx) { RA144Context *ractx; int ret; - if (avctx->sample_fmt != AV_SAMPLE_FMT_S16) { - av_log(avctx, AV_LOG_ERROR, "invalid sample format\n"); - return -1; - } if (avctx->channels != 1) { av_log(avctx, AV_LOG_ERROR, "invalid number of channels: %d\n", avctx->channels); @@ -55,15 +60,19 @@ static av_cold int ra144_encode_init(AVCodecContext * avctx) ractx->avctx = avctx; ret = ff_lpc_init(&ractx->lpc_ctx, avctx->frame_size, LPC_ORDER, FF_LPC_TYPE_LEVINSON); - return ret; -} + if (ret < 0) + goto error; + avctx->coded_frame = avcodec_alloc_frame(); + if (!avctx->coded_frame) { + ret = AVERROR(ENOMEM); + goto error; + } -static av_cold int ra144_encode_close(AVCodecContext *avctx) -{ - RA144Context *ractx = avctx->priv_data; - ff_lpc_end(&ractx->lpc_ctx); return 0; +error: + ra144_encode_close(avctx); + return ret; } @@ -432,6 +441,7 @@ static int ra144_encode_frame(AVCodecContext *avctx, uint8_t *frame, int16_t block_coefs[NBLOCKS][LPC_ORDER]; int lpc_refl[LPC_ORDER]; /**< reflection coefficients of the frame */ unsigned int refl_rms[NBLOCKS]; /**< RMS of the reflection coefficients */ + const int16_t *samples = data; int energy = 0; int i, idx; @@ -506,7 +516,7 @@ static int ra144_encode_frame(AVCodecContext *avctx, uint8_t *frame, ractx->lpc_refl_rms[1] = ractx->lpc_refl_rms[0]; FFSWAP(unsigned int *, ractx->lpc_coef[0], ractx->lpc_coef[1]); for (i = 0; i < NBLOCKS * BLOCKSIZE; i++) - ractx->curr_block[i] = *((int16_t *)data + i) >> 2; + ractx->curr_block[i] = samples[i] >> 2; return FRAMESIZE; } diff --git a/libavcodec/roqaudioenc.c b/libavcodec/roqaudioenc.c index da833411b5..1cff219bff 100644 --- a/libavcodec/roqaudioenc.c +++ b/libavcodec/roqaudioenc.c @@ -25,9 +25,8 @@ #include "avcodec.h" #include "bytestream.h" -#define ROQ_FIRST_FRAME_SIZE (735*8) #define ROQ_FRAME_SIZE 735 - +#define ROQ_HEADER_SIZE 8 #define MAX_DPCM (127*127) @@ -35,34 +34,59 @@ typedef struct { short lastSample[2]; + int input_frames; + int buffered_samples; + int16_t *frame_buffer; } ROQDPCMContext; + +static av_cold int roq_dpcm_encode_close(AVCodecContext *avctx) +{ + ROQDPCMContext *context = avctx->priv_data; + + av_freep(&avctx->coded_frame); + av_freep(&context->frame_buffer); + + return 0; +} + static av_cold int roq_dpcm_encode_init(AVCodecContext *avctx) { ROQDPCMContext *context = avctx->priv_data; + int ret; if (avctx->channels > 2) { av_log(avctx, AV_LOG_ERROR, "Audio must be mono or stereo\n"); - return -1; + return AVERROR(EINVAL); } if (avctx->sample_rate != 22050) { av_log(avctx, AV_LOG_ERROR, "Audio must be 22050 Hz\n"); - return -1; - } - if (avctx->sample_fmt != AV_SAMPLE_FMT_S16) { - av_log(avctx, AV_LOG_ERROR, "Audio must be signed 16-bit\n"); - return -1; + return AVERROR(EINVAL); } - avctx->frame_size = ROQ_FIRST_FRAME_SIZE; + avctx->frame_size = ROQ_FRAME_SIZE; + avctx->bit_rate = (ROQ_HEADER_SIZE + ROQ_FRAME_SIZE * avctx->channels) * + (22050 / ROQ_FRAME_SIZE) * 8; + + context->frame_buffer = av_malloc(8 * ROQ_FRAME_SIZE * avctx->channels * + sizeof(*context->frame_buffer)); + if (!context->frame_buffer) { + ret = AVERROR(ENOMEM); + goto error; + } context->lastSample[0] = context->lastSample[1] = 0; avctx->coded_frame= avcodec_alloc_frame(); - if (!avctx->coded_frame) - return AVERROR(ENOMEM); + if (!avctx->coded_frame) { + ret = AVERROR(ENOMEM); + goto error; + } return 0; +error: + roq_dpcm_encode_close(avctx); + return ret; } static unsigned char dpcm_predict(short *previous, short current) @@ -108,25 +132,45 @@ static unsigned char dpcm_predict(short *previous, short current) static int roq_dpcm_encode_frame(AVCodecContext *avctx, unsigned char *frame, int buf_size, void *data) { - int i, samples, stereo, ch; - const short *in; - unsigned char *out; - + int i, stereo, data_size; + const int16_t *in = data; + uint8_t *out = frame; ROQDPCMContext *context = avctx->priv_data; stereo = (avctx->channels == 2); + if (!data && context->input_frames >= 8) + return 0; + + if (data && context->input_frames < 8) { + memcpy(&context->frame_buffer[context->buffered_samples * avctx->channels], + in, avctx->frame_size * avctx->channels * sizeof(*in)); + context->buffered_samples += avctx->frame_size; + if (context->input_frames < 7) { + context->input_frames++; + return 0; + } + in = context->frame_buffer; + } + if (stereo) { context->lastSample[0] &= 0xFF00; context->lastSample[1] &= 0xFF00; } - out = frame; - in = data; + if (context->input_frames == 7 || !data) + data_size = avctx->channels * context->buffered_samples; + else + data_size = avctx->channels * avctx->frame_size; + + if (buf_size < ROQ_HEADER_SIZE + data_size) { + av_log(avctx, AV_LOG_ERROR, "output buffer is too small\n"); + return AVERROR(EINVAL); + } bytestream_put_byte(&out, stereo ? 0x21 : 0x20); bytestream_put_byte(&out, 0x10); - bytestream_put_le32(&out, avctx->frame_size*avctx->channels); + bytestream_put_le32(&out, data_size); if (stereo) { bytestream_put_byte(&out, (context->lastSample[1])>>8); @@ -135,23 +179,15 @@ static int roq_dpcm_encode_frame(AVCodecContext *avctx, bytestream_put_le16(&out, context->lastSample[0]); /* Write the actual samples */ - samples = avctx->frame_size; - for (i=0; i<samples; i++) - for (ch=0; ch<avctx->channels; ch++) - *out++ = dpcm_predict(&context->lastSample[ch], *in++); + for (i = 0; i < data_size; i++) + *out++ = dpcm_predict(&context->lastSample[i & 1], *in++); - /* Use smaller frames from now on */ - avctx->frame_size = ROQ_FRAME_SIZE; + context->input_frames++; + if (!data) + context->input_frames = FFMAX(context->input_frames, 8); /* Return the result size */ - return out - frame; -} - -static av_cold int roq_dpcm_encode_close(AVCodecContext *avctx) -{ - av_freep(&avctx->coded_frame); - - return 0; + return ROQ_HEADER_SIZE + data_size; } AVCodec ff_roq_dpcm_encoder = { @@ -162,6 +198,7 @@ AVCodec ff_roq_dpcm_encoder = { .init = roq_dpcm_encode_init, .encode = roq_dpcm_encode_frame, .close = roq_dpcm_encode_close, + .capabilities = CODEC_CAP_DELAY, .sample_fmts = (const enum AVSampleFormat[]){AV_SAMPLE_FMT_S16,AV_SAMPLE_FMT_NONE}, .long_name = NULL_IF_CONFIG_SMALL("id RoQ DPCM"), }; diff --git a/libavcodec/utils.c b/libavcodec/utils.c index 7bd1059164..e17224a0fb 100644 --- a/libavcodec/utils.c +++ b/libavcodec/utils.c @@ -603,6 +603,8 @@ int avcodec_default_reget_buffer(AVCodecContext *s, AVFrame *pic){ return s->get_buffer(s, pic); } + assert(s->pix_fmt == pic->pix_fmt); + /* If internal buffer type return the same buffer */ if(pic->type == FF_BUFFER_TYPE_INTERNAL) { return 0; @@ -967,6 +969,8 @@ int attribute_align_arg avcodec_encode_audio2(AVCodecContext *avctx, int user_packet = !!avpkt->data; int nb_samples; + *got_packet_ptr = 0; + if (!(avctx->codec->capabilities & CODEC_CAP_DELAY) && !frame) { av_init_packet(avpkt); avpkt->size = 0; @@ -988,7 +992,6 @@ int attribute_align_arg avcodec_encode_audio2(AVCodecContext *avctx, } if (avctx->codec->encode2) { - *got_packet_ptr = 0; ret = avctx->codec->encode2(avctx, avpkt, frame, got_packet_ptr); if (!ret && *got_packet_ptr) { if (!(avctx->codec->capabilities & CODEC_CAP_DELAY)) { @@ -1196,10 +1199,11 @@ int attribute_align_arg avcodec_encode_video2(AVCodecContext *avctx, int ret; int user_packet = !!avpkt->data; + *got_packet_ptr = 0; + if (!(avctx->codec->capabilities & CODEC_CAP_DELAY) && !frame) { av_init_packet(avpkt); avpkt->size = 0; - *got_packet_ptr = 0; return 0; } @@ -1208,17 +1212,15 @@ int attribute_align_arg avcodec_encode_video2(AVCodecContext *avctx, av_assert0(avctx->codec->encode2); - *got_packet_ptr = 0; ret = avctx->codec->encode2(avctx, avpkt, frame, got_packet_ptr); if (!ret) { if (!*got_packet_ptr) avpkt->size = 0; else if (!(avctx->codec->capabilities & CODEC_CAP_DELAY)) avpkt->pts = avpkt->dts = frame->pts; - } - if (!ret) avctx->frame_number++; + } emms_c(); return ret; diff --git a/libavcodec/vorbisdec.c b/libavcodec/vorbisdec.c index 678aec70b5..54651f42a3 100644 --- a/libavcodec/vorbisdec.c +++ b/libavcodec/vorbisdec.c @@ -1710,6 +1710,17 @@ static av_cold int vorbis_decode_close(AVCodecContext *avccontext) return 0; } +static av_cold void vorbis_decode_flush(AVCodecContext *avccontext) +{ + vorbis_context *vc = avccontext->priv_data; + + if (vc->saved) { + memset(vc->saved, 0, (vc->blocksize[1] / 4) * vc->audio_channels * + sizeof(*vc->saved)); + } + vc->previous_window = 0; +} + AVCodec ff_vorbis_decoder = { .name = "vorbis", .type = AVMEDIA_TYPE_AUDIO, @@ -1718,6 +1729,7 @@ AVCodec ff_vorbis_decoder = { .init = vorbis_decode_init, .close = vorbis_decode_close, .decode = vorbis_decode_frame, + .flush = vorbis_decode_flush, .capabilities = CODEC_CAP_DR1, .long_name = NULL_IF_CONFIG_SMALL("Vorbis"), .channel_layouts = ff_vorbis_channel_layouts, diff --git a/libavcodec/vorbisenc.c b/libavcodec/vorbisenc.c index 80d722db4c..e0c8d0a193 100644 --- a/libavcodec/vorbisenc.c +++ b/libavcodec/vorbisenc.c @@ -155,7 +155,7 @@ static int cb_lookup_vals(int lookup, int dimentions, int entries) return 0; } -static void ready_codebook(vorbis_enc_codebook *cb) +static int ready_codebook(vorbis_enc_codebook *cb) { int i; @@ -167,6 +167,8 @@ static void ready_codebook(vorbis_enc_codebook *cb) int vals = cb_lookup_vals(cb->lookup, cb->ndimentions, cb->nentries); cb->dimentions = av_malloc(sizeof(float) * cb->nentries * cb->ndimentions); cb->pow2 = av_mallocz(sizeof(float) * cb->nentries); + if (!cb->dimentions || !cb->pow2) + return AVERROR(ENOMEM); for (i = 0; i < cb->nentries; i++) { float last = 0; int j; @@ -187,13 +189,16 @@ static void ready_codebook(vorbis_enc_codebook *cb) cb->pow2[i] /= 2.; } } + return 0; } -static void ready_residue(vorbis_enc_residue *rc, vorbis_enc_context *venc) +static int ready_residue(vorbis_enc_residue *rc, vorbis_enc_context *venc) { int i; assert(rc->type == 2); rc->maxes = av_mallocz(sizeof(float[2]) * rc->classifications); + if (!rc->maxes) + return AVERROR(ENOMEM); for (i = 0; i < rc->classifications; i++) { int j; vorbis_enc_codebook * cb; @@ -223,15 +228,16 @@ static void ready_residue(vorbis_enc_residue *rc, vorbis_enc_context *venc) rc->maxes[i][0] += 0.8; rc->maxes[i][1] += 0.8; } + return 0; } -static void create_vorbis_context(vorbis_enc_context *venc, - AVCodecContext *avccontext) +static int create_vorbis_context(vorbis_enc_context *venc, + AVCodecContext *avccontext) { vorbis_enc_floor *fc; vorbis_enc_residue *rc; vorbis_enc_mapping *mc; - int i, book; + int i, book, ret; venc->channels = avccontext->channels; venc->sample_rate = avccontext->sample_rate; @@ -239,6 +245,8 @@ static void create_vorbis_context(vorbis_enc_context *venc, venc->ncodebooks = FF_ARRAY_ELEMS(cvectors); venc->codebooks = av_malloc(sizeof(vorbis_enc_codebook) * venc->ncodebooks); + if (!venc->codebooks) + return AVERROR(ENOMEM); // codebook 0..14 - floor1 book, values 0..255 // codebook 15 residue masterbook @@ -255,27 +263,36 @@ static void create_vorbis_context(vorbis_enc_context *venc, cb->lens = av_malloc(sizeof(uint8_t) * cb->nentries); cb->codewords = av_malloc(sizeof(uint32_t) * cb->nentries); + if (!cb->lens || !cb->codewords) + return AVERROR(ENOMEM); memcpy(cb->lens, cvectors[book].clens, cvectors[book].len); memset(cb->lens + cvectors[book].len, 0, cb->nentries - cvectors[book].len); if (cb->lookup) { vals = cb_lookup_vals(cb->lookup, cb->ndimentions, cb->nentries); cb->quantlist = av_malloc(sizeof(int) * vals); + if (!cb->quantlist) + return AVERROR(ENOMEM); for (i = 0; i < vals; i++) cb->quantlist[i] = cvectors[book].quant[i]; } else { cb->quantlist = NULL; } - ready_codebook(cb); + if ((ret = ready_codebook(cb)) < 0) + return ret; } venc->nfloors = 1; venc->floors = av_malloc(sizeof(vorbis_enc_floor) * venc->nfloors); + if (!venc->floors) + return AVERROR(ENOMEM); // just 1 floor fc = &venc->floors[0]; fc->partitions = NUM_FLOOR_PARTITIONS; fc->partition_to_class = av_malloc(sizeof(int) * fc->partitions); + if (!fc->partition_to_class) + return AVERROR(ENOMEM); fc->nclasses = 0; for (i = 0; i < fc->partitions; i++) { static const int a[] = {0, 1, 2, 2, 3, 3, 4, 4}; @@ -284,6 +301,8 @@ static void create_vorbis_context(vorbis_enc_context *venc, } fc->nclasses++; fc->classes = av_malloc(sizeof(vorbis_enc_floor_class) * fc->nclasses); + if (!fc->classes) + return AVERROR(ENOMEM); for (i = 0; i < fc->nclasses; i++) { vorbis_enc_floor_class * c = &fc->classes[i]; int j, books; @@ -292,6 +311,8 @@ static void create_vorbis_context(vorbis_enc_context *venc, c->masterbook = floor_classes[i].masterbook; books = (1 << c->subclass); c->books = av_malloc(sizeof(int) * books); + if (!c->books) + return AVERROR(ENOMEM); for (j = 0; j < books; j++) c->books[j] = floor_classes[i].nbooks[j]; } @@ -303,6 +324,8 @@ static void create_vorbis_context(vorbis_enc_context *venc, fc->values += fc->classes[fc->partition_to_class[i]].dim; fc->list = av_malloc(sizeof(vorbis_floor1_entry) * fc->values); + if (!fc->list) + return AVERROR(ENOMEM); fc->list[0].x = 0; fc->list[1].x = 1 << fc->rangebits; for (i = 2; i < fc->values; i++) { @@ -317,6 +340,8 @@ static void create_vorbis_context(vorbis_enc_context *venc, venc->nresidues = 1; venc->residues = av_malloc(sizeof(vorbis_enc_residue) * venc->nresidues); + if (!venc->residues) + return AVERROR(ENOMEM); // single residue rc = &venc->residues[0]; @@ -327,6 +352,8 @@ static void create_vorbis_context(vorbis_enc_context *venc, rc->classifications = 10; rc->classbook = 15; rc->books = av_malloc(sizeof(*rc->books) * rc->classifications); + if (!rc->books) + return AVERROR(ENOMEM); { static const int8_t a[10][8] = { { -1, -1, -1, -1, -1, -1, -1, -1, }, @@ -342,19 +369,26 @@ static void create_vorbis_context(vorbis_enc_context *venc, }; memcpy(rc->books, a, sizeof a); } - ready_residue(rc, venc); + if ((ret = ready_residue(rc, venc)) < 0) + return ret; venc->nmappings = 1; venc->mappings = av_malloc(sizeof(vorbis_enc_mapping) * venc->nmappings); + if (!venc->mappings) + return AVERROR(ENOMEM); // single mapping mc = &venc->mappings[0]; mc->submaps = 1; mc->mux = av_malloc(sizeof(int) * venc->channels); + if (!mc->mux) + return AVERROR(ENOMEM); for (i = 0; i < venc->channels; i++) mc->mux[i] = 0; mc->floor = av_malloc(sizeof(int) * mc->submaps); mc->residue = av_malloc(sizeof(int) * mc->submaps); + if (!mc->floor || !mc->residue) + return AVERROR(ENOMEM); for (i = 0; i < mc->submaps; i++) { mc->floor[i] = 0; mc->residue[i] = 0; @@ -362,6 +396,8 @@ static void create_vorbis_context(vorbis_enc_context *venc, mc->coupling_steps = venc->channels == 2 ? 1 : 0; mc->magnitude = av_malloc(sizeof(int) * mc->coupling_steps); mc->angle = av_malloc(sizeof(int) * mc->coupling_steps); + if (!mc->magnitude || !mc->angle) + return AVERROR(ENOMEM); if (mc->coupling_steps) { mc->magnitude[0] = 0; mc->angle[0] = 1; @@ -369,6 +405,8 @@ static void create_vorbis_context(vorbis_enc_context *venc, venc->nmodes = 1; venc->modes = av_malloc(sizeof(vorbis_enc_mode) * venc->nmodes); + if (!venc->modes) + return AVERROR(ENOMEM); // single mode venc->modes[0].blockflag = 0; @@ -379,12 +417,18 @@ static void create_vorbis_context(vorbis_enc_context *venc, venc->samples = av_malloc(sizeof(float) * venc->channels * (1 << venc->log2_blocksize[1])); venc->floor = av_malloc(sizeof(float) * venc->channels * (1 << venc->log2_blocksize[1]) / 2); venc->coeffs = av_malloc(sizeof(float) * venc->channels * (1 << venc->log2_blocksize[1]) / 2); + if (!venc->saved || !venc->samples || !venc->floor || !venc->coeffs) + return AVERROR(ENOMEM); venc->win[0] = ff_vorbis_vwin[venc->log2_blocksize[0] - 6]; venc->win[1] = ff_vorbis_vwin[venc->log2_blocksize[1] - 6]; - ff_mdct_init(&venc->mdct[0], venc->log2_blocksize[0], 0, 1.0); - ff_mdct_init(&venc->mdct[1], venc->log2_blocksize[1], 0, 1.0); + if ((ret = ff_mdct_init(&venc->mdct[0], venc->log2_blocksize[0], 0, 1.0)) < 0) + return ret; + if ((ret = ff_mdct_init(&venc->mdct[1], venc->log2_blocksize[1], 0, 1.0)) < 0) + return ret; + + return 0; } static void put_float(PutBitContext *pb, float f) @@ -647,6 +691,8 @@ static int put_main_header(vorbis_enc_context *venc, uint8_t **out) len = hlens[0] + hlens[1] + hlens[2]; p = *out = av_mallocz(64 + len + len/255); + if (!p) + return AVERROR(ENOMEM); *p++ = 2; p += av_xiphlacing(p, hlens[0]); @@ -952,33 +998,6 @@ static int apply_window_and_mdct(vorbis_enc_context *venc, const signed short *a return 1; } -static av_cold int vorbis_encode_init(AVCodecContext *avccontext) -{ - vorbis_enc_context *venc = avccontext->priv_data; - - if (avccontext->channels != 2) { - av_log(avccontext, AV_LOG_ERROR, "Current FFmpeg Vorbis encoder only supports 2 channels.\n"); - return -1; - } - - create_vorbis_context(venc, avccontext); - - if (avccontext->flags & CODEC_FLAG_QSCALE) - venc->quality = avccontext->global_quality / (float)FF_QP2LAMBDA / 10.; - else - venc->quality = 0.03; - venc->quality *= venc->quality; - - avccontext->extradata_size = put_main_header(venc, (uint8_t**)&avccontext->extradata); - - avccontext->frame_size = 1 << (venc->log2_blocksize[0] - 1); - - avccontext->coded_frame = avcodec_alloc_frame(); - avccontext->coded_frame->key_frame = 1; - - return 0; -} - static int vorbis_encode_frame(AVCodecContext *avccontext, unsigned char *packets, int buf_size, void *data) @@ -1102,6 +1121,43 @@ static av_cold int vorbis_encode_close(AVCodecContext *avccontext) return 0 ; } +static av_cold int vorbis_encode_init(AVCodecContext *avccontext) +{ + vorbis_enc_context *venc = avccontext->priv_data; + int ret; + + if (avccontext->channels != 2) { + av_log(avccontext, AV_LOG_ERROR, "Current FFmpeg Vorbis encoder only supports 2 channels.\n"); + return -1; + } + + if ((ret = create_vorbis_context(venc, avccontext)) < 0) + goto error; + + if (avccontext->flags & CODEC_FLAG_QSCALE) + venc->quality = avccontext->global_quality / (float)FF_QP2LAMBDA / 10.; + else + venc->quality = 0.03; + venc->quality *= venc->quality; + + if ((ret = put_main_header(venc, (uint8_t**)&avccontext->extradata)) < 0) + goto error; + avccontext->extradata_size = ret; + + avccontext->frame_size = 1 << (venc->log2_blocksize[0] - 1); + + avccontext->coded_frame = avcodec_alloc_frame(); + if (!avccontext->coded_frame) { + ret = AVERROR(ENOMEM); + goto error; + } + + return 0; +error: + vorbis_encode_close(avccontext); + return ret; +} + AVCodec ff_vorbis_encoder = { .name = "vorbis", .type = AVMEDIA_TYPE_AUDIO, diff --git a/libavcodec/xwdenc.c b/libavcodec/xwdenc.c index 67fac81619..8c98ef9f5d 100644 --- a/libavcodec/xwdenc.c +++ b/libavcodec/xwdenc.c @@ -44,7 +44,7 @@ static int xwd_encode_frame(AVCodecContext *avctx, AVPacket *pkt, { enum PixelFormat pix_fmt = avctx->pix_fmt; uint32_t pixdepth, bpp, bpad, ncolors = 0, lsize, vclass, be = 0; - uint32_t rgb[3] = { 0 }; + uint32_t rgb[3] = { 0 }, bitorder = 0; uint32_t header_size; int i, out_size, ret; uint8_t *ptr, *buf; @@ -133,6 +133,8 @@ static int xwd_encode_frame(AVCodecContext *avctx, AVPacket *pkt, ncolors = 256; break; case PIX_FMT_MONOWHITE: + be = 1; + bitorder = 1; bpp = 1; bpad = 8; vclass = XWD_STATIC_GRAY; @@ -164,7 +166,7 @@ static int xwd_encode_frame(AVCodecContext *avctx, AVPacket *pkt, bytestream_put_be32(&buf, 0); // bitmap x offset bytestream_put_be32(&buf, be); // byte order bytestream_put_be32(&buf, 32); // bitmap unit - bytestream_put_be32(&buf, be); // bit-order of image data + bytestream_put_be32(&buf, bitorder); // bit-order of image data bytestream_put_be32(&buf, bpad); // bitmap scan-line pad in bits bytestream_put_be32(&buf, bpp); // bits per pixel bytestream_put_be32(&buf, lsize); // bytes per scan-line |