aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMichael Niedermayer <michaelni@gmx.at>2011-10-29 02:08:54 +0200
committerMichael Niedermayer <michaelni@gmx.at>2011-10-29 02:08:54 +0200
commit6faf0a21e18f314c48a886864145abe715be6572 (patch)
treef67c3e543a8b2c3283875881536d0a69da515e5e
parented1aa8921749a1c70d4453326da7f7b5a6f6f6e7 (diff)
parent61856d06eb30955290911140e6745bad93a25323 (diff)
downloadffmpeg-6faf0a21e18f314c48a886864145abe715be6572.tar.gz
Merge remote-tracking branch 'qatar/master'
* qatar/master: (53 commits) probe: Restore identification of files with very large id3 tags and no extension. probe: Remove id3 tag presence as a criteria to do file extension checking. mpegts: MP4 SL support mpegts: MP4 OD support mpegts: Add support for Sections in PMT mpegts: Replace the MP4 descriptor parser with a recursive parser. mpegts: Add support for multiple mp4 descriptors mpegts: Parse mpeg2 SL descriptors. isom: Add MPEG4SYSTEMS dummy object type indication. aacdec: allow output reconfiguration on channel changes nellymoserenc: take float input samples instead of int16 nellymoserdec: use dsp functions for overlap and windowing nellymoserdec: do not fail if there is extra data in the packet nellymoserdec: fail if output buffer is too small nellymoserdec: remove pointless buffer size check. lavf: add init_put_byte() to the list of visible symbols. seek-test: free options dictionary after use snow: do not draw_edge if emu_edge is set tools/pktdumper: update to recent avformat api seek-test: update to recent avformat api ... Conflicts: doc/APIchanges libavcodec/mpegaudiodec.c libavcodec/nellymoserdec.c libavcodec/snow.c libavcodec/version.h libavcodec/wmadec.c libavformat/avformat.h libavformat/mpegts.c libavformat/mxfdec.c libavformat/utils.c libavformat/wtv.c Merged-by: Michael Niedermayer <michaelni@gmx.at>
-rw-r--r--doc/APIchanges4
-rw-r--r--libavcodec/apedec.c209
-rw-r--r--libavcodec/avcodec.h20
-rw-r--r--libavcodec/mpegaudiodec.c925
-rw-r--r--libavcodec/mpegaudiodec_float.c8
-rw-r--r--libavcodec/nellymoserdec.c42
-rw-r--r--libavcodec/nellymoserenc.c12
-rw-r--r--libavcodec/snow.c2
-rw-r--r--libavcodec/version.h5
-rw-r--r--libavcodec/wmadec.c25
-rw-r--r--libavcodec/wmaprodec.c35
-rw-r--r--libavcodec/wmavoice.c33
-rw-r--r--libavformat/asfdec.c2
-rw-r--r--libavformat/avformat.h136
-rw-r--r--libavformat/internal.h33
-rw-r--r--libavformat/isom.c2
-rw-r--r--libavformat/isom.h2
-rw-r--r--libavformat/matroskadec.c2
-rw-r--r--libavformat/mpegts.c451
-rw-r--r--libavformat/mpegts.h29
-rw-r--r--libavformat/mxfdec.c3
-rw-r--r--libavformat/nutdec.c8
-rw-r--r--libavformat/oggdec.c3
-rw-r--r--libavformat/seek-test.c11
-rw-r--r--libavformat/utils.c46
-rw-r--r--libavformat/version.h6
-rw-r--r--libavformat/wtvdec.c2
-rw-r--r--tools/pktdumper.c6
28 files changed, 1342 insertions, 720 deletions
diff --git a/doc/APIchanges b/doc/APIchanges
index c2415f0be8..ede1c60699 100644
--- a/doc/APIchanges
+++ b/doc/APIchanges
@@ -16,6 +16,10 @@ API changes, most recent first:
2011-10-20 - b35e9e1 - lavu 51.22.0
Add av_strtok() to avstring.h.
+2011-xx-xx - xxxxxxx - lavc 53.15.0
+ Remove avcodec_parse_frame.
+ Deprecate AVCodecContext.parse_only and CODEC_CAP_PARSE_ONLY.
+
2011-10-xx - xxxxxxx - lavf 53.10.0
Add avformat_new_stream(). Deprecate av_new_stream().
diff --git a/libavcodec/apedec.c b/libavcodec/apedec.c
index 300a0097d8..9d2ce1dfaa 100644
--- a/libavcodec/apedec.c
+++ b/libavcodec/apedec.c
@@ -26,6 +26,7 @@
#include "get_bits.h"
#include "bytestream.h"
#include "libavutil/audioconvert.h"
+#include "libavutil/avassert.h"
/**
* @file
@@ -163,22 +164,34 @@ typedef struct APEContext {
// TODO: dsputilize
-static av_cold int ape_decode_init(AVCodecContext * avctx)
+static av_cold int ape_decode_close(AVCodecContext *avctx)
+{
+ APEContext *s = avctx->priv_data;
+ int i;
+
+ for (i = 0; i < APE_FILTER_LEVELS; i++)
+ av_freep(&s->filterbuf[i]);
+
+ av_freep(&s->data);
+ return 0;
+}
+
+static av_cold int ape_decode_init(AVCodecContext *avctx)
{
APEContext *s = avctx->priv_data;
int i;
if (avctx->extradata_size != 6) {
av_log(avctx, AV_LOG_ERROR, "Incorrect extradata\n");
- return -1;
+ return AVERROR(EINVAL);
}
if (avctx->bits_per_coded_sample != 16) {
av_log(avctx, AV_LOG_ERROR, "Only 16-bit samples are supported\n");
- return -1;
+ return AVERROR(EINVAL);
}
if (avctx->channels > 2) {
av_log(avctx, AV_LOG_ERROR, "Only mono and stereo is supported\n");
- return -1;
+ return AVERROR(EINVAL);
}
s->avctx = avctx;
s->channels = avctx->channels;
@@ -186,34 +199,29 @@ static av_cold int ape_decode_init(AVCodecContext * avctx)
s->compression_level = AV_RL16(avctx->extradata + 2);
s->flags = AV_RL16(avctx->extradata + 4);
- av_log(avctx, AV_LOG_DEBUG, "Compression Level: %d - Flags: %d\n", s->compression_level, s->flags);
+ av_log(avctx, AV_LOG_DEBUG, "Compression Level: %d - Flags: %d\n",
+ s->compression_level, s->flags);
if (s->compression_level % 1000 || s->compression_level > COMPRESSION_LEVEL_INSANE) {
- av_log(avctx, AV_LOG_ERROR, "Incorrect compression level %d\n", s->compression_level);
- return -1;
+ av_log(avctx, AV_LOG_ERROR, "Incorrect compression level %d\n",
+ s->compression_level);
+ return AVERROR_INVALIDDATA;
}
s->fset = s->compression_level / 1000 - 1;
for (i = 0; i < APE_FILTER_LEVELS; i++) {
if (!ape_filter_orders[s->fset][i])
break;
- s->filterbuf[i] = av_malloc((ape_filter_orders[s->fset][i] * 3 + HISTORY_SIZE) * 4);
+ FF_ALLOC_OR_GOTO(avctx, s->filterbuf[i],
+ (ape_filter_orders[s->fset][i] * 3 + HISTORY_SIZE) * 4,
+ filter_alloc_fail);
}
dsputil_init(&s->dsp, avctx);
avctx->sample_fmt = AV_SAMPLE_FMT_S16;
avctx->channel_layout = (avctx->channels==2) ? AV_CH_LAYOUT_STEREO : AV_CH_LAYOUT_MONO;
return 0;
-}
-
-static av_cold int ape_decode_close(AVCodecContext * avctx)
-{
- APEContext *s = avctx->priv_data;
- int i;
-
- for (i = 0; i < APE_FILTER_LEVELS; i++)
- av_freep(&s->filterbuf[i]);
-
- av_freep(&s->data);
- return 0;
+filter_alloc_fail:
+ ape_decode_close(avctx);
+ return AVERROR(ENOMEM);
}
/**
@@ -228,7 +236,7 @@ static av_cold int ape_decode_close(AVCodecContext * avctx)
#define BOTTOM_VALUE (TOP_VALUE >> 8)
/** Start the decoder */
-static inline void range_start_decoding(APEContext * ctx)
+static inline void range_start_decoding(APEContext *ctx)
{
ctx->rc.buffer = bytestream_get_byte(&ctx->ptr);
ctx->rc.low = ctx->rc.buffer >> (8 - EXTRA_BITS);
@@ -236,13 +244,16 @@ static inline void range_start_decoding(APEContext * ctx)
}
/** Perform normalization */
-static inline void range_dec_normalize(APEContext * ctx)
+static inline void range_dec_normalize(APEContext *ctx)
{
while (ctx->rc.range <= BOTTOM_VALUE) {
ctx->rc.buffer <<= 8;
- if(ctx->ptr < ctx->data_end)
+ if(ctx->ptr < ctx->data_end) {
ctx->rc.buffer += *ctx->ptr;
- ctx->ptr++;
+ ctx->ptr++;
+ } else {
+ ctx->error = 1;
+ }
ctx->rc.low = (ctx->rc.low << 8) | ((ctx->rc.buffer >> 1) & 0xFF);
ctx->rc.range <<= 8;
}
@@ -254,7 +265,7 @@ static inline void range_dec_normalize(APEContext * ctx)
* @param tot_f is the total frequency or (code_value)1<<shift
* @return the culmulative frequency
*/
-static inline int range_decode_culfreq(APEContext * ctx, int tot_f)
+static inline int range_decode_culfreq(APEContext *ctx, int tot_f)
{
range_dec_normalize(ctx);
ctx->rc.help = ctx->rc.range / tot_f;
@@ -266,7 +277,7 @@ static inline int range_decode_culfreq(APEContext * ctx, int tot_f)
* @param ctx decoder context
* @param shift number of bits to decode
*/
-static inline int range_decode_culshift(APEContext * ctx, int shift)
+static inline int range_decode_culshift(APEContext *ctx, int shift)
{
range_dec_normalize(ctx);
ctx->rc.help = ctx->rc.range >> shift;
@@ -280,14 +291,14 @@ static inline int range_decode_culshift(APEContext * ctx, int shift)
* @param sy_f the interval length (frequency of the symbol)
* @param lt_f the lower end (frequency sum of < symbols)
*/
-static inline void range_decode_update(APEContext * ctx, int sy_f, int lt_f)
+static inline void range_decode_update(APEContext *ctx, int sy_f, int lt_f)
{
ctx->rc.low -= ctx->rc.help * lt_f;
ctx->rc.range = ctx->rc.help * sy_f;
}
/** Decode n bits (n <= 16) without modelling */
-static inline int range_decode_bits(APEContext * ctx, int n)
+static inline int range_decode_bits(APEContext *ctx, int n)
{
int sym = range_decode_culshift(ctx, n);
range_decode_update(ctx, 1, sym);
@@ -339,7 +350,7 @@ static const uint16_t counts_diff_3980[21] = {
* @param counts probability range start position
* @param counts_diff probability range widths
*/
-static inline int range_get_symbol(APEContext * ctx,
+static inline int range_get_symbol(APEContext *ctx,
const uint16_t counts[],
const uint16_t counts_diff[])
{
@@ -374,7 +385,7 @@ static inline void update_rice(APERice *rice, int x)
rice->k++;
}
-static inline int ape_decode_value(APEContext * ctx, APERice *rice)
+static inline int ape_decode_value(APEContext *ctx, APERice *rice)
{
int x, overflow;
@@ -441,7 +452,7 @@ static inline int ape_decode_value(APEContext * ctx, APERice *rice)
return -(x >> 1);
}
-static void entropy_decode(APEContext * ctx, int blockstodecode, int stereo)
+static void entropy_decode(APEContext *ctx, int blockstodecode, int stereo)
{
int32_t *decoded0 = ctx->decoded0;
int32_t *decoded1 = ctx->decoded1;
@@ -464,9 +475,11 @@ static void entropy_decode(APEContext * ctx, int blockstodecode, int stereo)
range_dec_normalize(ctx); /* normalize to use up all bytes */
}
-static void init_entropy_decoder(APEContext * ctx)
+static int init_entropy_decoder(APEContext *ctx)
{
/* Read the CRC */
+ if (ctx->data_end - ctx->ptr < 6)
+ return AVERROR_INVALIDDATA;
ctx->CRC = bytestream_get_be32(&ctx->ptr);
/* Read the frame flags if they exist */
@@ -474,6 +487,8 @@ static void init_entropy_decoder(APEContext * ctx)
if ((ctx->fileversion > 3820) && (ctx->CRC & 0x80000000)) {
ctx->CRC &= ~0x80000000;
+ if (ctx->data_end - ctx->ptr < 6)
+ return AVERROR_INVALIDDATA;
ctx->frameflags = bytestream_get_be32(&ctx->ptr);
}
@@ -490,13 +505,15 @@ static void init_entropy_decoder(APEContext * ctx)
ctx->ptr++;
range_start_decoding(ctx);
+
+ return 0;
}
static const int32_t initial_coeffs[4] = {
360, 317, -109, 98
};
-static void init_predictor_decoder(APEContext * ctx)
+static void init_predictor_decoder(APEContext *ctx)
{
APEPredictor *p = &ctx->predictor;
@@ -519,7 +536,10 @@ static inline int APESIGN(int32_t x) {
return (x < 0) - (x > 0);
}
-static av_always_inline int predictor_update_filter(APEPredictor *p, const int decoded, const int filter, const int delayA, const int delayB, const int adaptA, const int adaptB)
+static av_always_inline int predictor_update_filter(APEPredictor *p,
+ const int decoded, const int filter,
+ const int delayA, const int delayB,
+ const int adaptA, const int adaptB)
{
int32_t predictionA, predictionB, sign;
@@ -563,7 +583,7 @@ static av_always_inline int predictor_update_filter(APEPredictor *p, const int d
return p->filterA[filter];
}
-static void predictor_decode_stereo(APEContext * ctx, int count)
+static void predictor_decode_stereo(APEContext *ctx, int count)
{
APEPredictor *p = &ctx->predictor;
int32_t *decoded0 = ctx->decoded0;
@@ -571,9 +591,11 @@ static void predictor_decode_stereo(APEContext * ctx, int count)
while (count--) {
/* Predictor Y */
- *decoded0 = predictor_update_filter(p, *decoded0, 0, YDELAYA, YDELAYB, YADAPTCOEFFSA, YADAPTCOEFFSB);
+ *decoded0 = predictor_update_filter(p, *decoded0, 0, YDELAYA, YDELAYB,
+ YADAPTCOEFFSA, YADAPTCOEFFSB);
decoded0++;
- *decoded1 = predictor_update_filter(p, *decoded1, 1, XDELAYA, XDELAYB, XADAPTCOEFFSA, XADAPTCOEFFSB);
+ *decoded1 = predictor_update_filter(p, *decoded1, 1, XDELAYA, XDELAYB,
+ XADAPTCOEFFSA, XADAPTCOEFFSB);
decoded1++;
/* Combined */
@@ -587,7 +609,7 @@ static void predictor_decode_stereo(APEContext * ctx, int count)
}
}
-static void predictor_decode_mono(APEContext * ctx, int count)
+static void predictor_decode_mono(APEContext *ctx, int count)
{
APEPredictor *p = &ctx->predictor;
int32_t *decoded0 = ctx->decoded0;
@@ -632,7 +654,7 @@ static void predictor_decode_mono(APEContext * ctx, int count)
p->lastA[0] = currentA;
}
-static void do_init_filter(APEFilter *f, int16_t * buf, int order)
+static void do_init_filter(APEFilter *f, int16_t *buf, int order)
{
f->coeffs = buf;
f->historybuffer = buf + order;
@@ -644,20 +666,23 @@ static void do_init_filter(APEFilter *f, int16_t * buf, int order)
f->avg = 0;
}
-static void init_filter(APEContext * ctx, APEFilter *f, int16_t * buf, int order)
+static void init_filter(APEContext *ctx, APEFilter *f, int16_t *buf, int order)
{
do_init_filter(&f[0], buf, order);
do_init_filter(&f[1], buf + order * 3 + HISTORY_SIZE, order);
}
-static void do_apply_filter(APEContext * ctx, int version, APEFilter *f, int32_t *data, int count, int order, int fracbits)
+static void do_apply_filter(APEContext *ctx, int version, APEFilter *f,
+ int32_t *data, int count, int order, int fracbits)
{
int res;
int absres;
while (count--) {
/* round fixedpoint scalar product */
- res = ctx->dsp.scalarproduct_and_madd_int16(f->coeffs, f->delay - order, f->adaptcoeffs - order, order, APESIGN(*data));
+ res = ctx->dsp.scalarproduct_and_madd_int16(f->coeffs, f->delay - order,
+ f->adaptcoeffs - order,
+ order, APESIGN(*data));
res = (res + (1 << (fracbits - 1))) >> fracbits;
res += *data;
*data++ = res;
@@ -676,7 +701,8 @@ static void do_apply_filter(APEContext * ctx, int version, APEFilter *f, int32_t
/* Update the adaption coefficients */
absres = FFABS(res);
if (absres)
- *f->adaptcoeffs = ((res & (1<<31)) - (1<<30)) >> (25 + (absres <= f->avg*3) + (absres <= f->avg*4/3));
+ *f->adaptcoeffs = ((res & (1<<31)) - (1<<30)) >>
+ (25 + (absres <= f->avg*3) + (absres <= f->avg*4/3));
else
*f->adaptcoeffs = 0;
@@ -699,8 +725,8 @@ static void do_apply_filter(APEContext * ctx, int version, APEFilter *f, int32_t
}
}
-static void apply_filter(APEContext * ctx, APEFilter *f,
- int32_t * data0, int32_t * data1,
+static void apply_filter(APEContext *ctx, APEFilter *f,
+ int32_t *data0, int32_t *data1,
int count, int order, int fracbits)
{
do_apply_filter(ctx, ctx->fileversion, &f[0], data0, count, order, fracbits);
@@ -708,34 +734,38 @@ static void apply_filter(APEContext * ctx, APEFilter *f,
do_apply_filter(ctx, ctx->fileversion, &f[1], data1, count, order, fracbits);
}
-static void ape_apply_filters(APEContext * ctx, int32_t * decoded0,
- int32_t * decoded1, int count)
+static void ape_apply_filters(APEContext *ctx, int32_t *decoded0,
+ int32_t *decoded1, int count)
{
int i;
for (i = 0; i < APE_FILTER_LEVELS; i++) {
if (!ape_filter_orders[ctx->fset][i])
break;
- apply_filter(ctx, ctx->filters[i], decoded0, decoded1, count, ape_filter_orders[ctx->fset][i], ape_filter_fracbits[ctx->fset][i]);
+ apply_filter(ctx, ctx->filters[i], decoded0, decoded1, count,
+ ape_filter_orders[ctx->fset][i],
+ ape_filter_fracbits[ctx->fset][i]);
}
}
-static void init_frame_decoder(APEContext * ctx)
+static int init_frame_decoder(APEContext *ctx)
{
- int i;
- init_entropy_decoder(ctx);
+ int i, ret;
+ if ((ret = init_entropy_decoder(ctx)) < 0)
+ return ret;
init_predictor_decoder(ctx);
for (i = 0; i < APE_FILTER_LEVELS; i++) {
if (!ape_filter_orders[ctx->fset][i])
break;
- init_filter(ctx, ctx->filters[i], ctx->filterbuf[i], ape_filter_orders[ctx->fset][i]);
+ init_filter(ctx, ctx->filters[i], ctx->filterbuf[i],
+ ape_filter_orders[ctx->fset][i]);
}
+ return 0;
}
-static void ape_unpack_mono(APEContext * ctx, int count)
+static void ape_unpack_mono(APEContext *ctx, int count)
{
- int32_t left;
int32_t *decoded0 = ctx->decoded0;
int32_t *decoded1 = ctx->decoded1;
@@ -754,14 +784,11 @@ static void ape_unpack_mono(APEContext * ctx, int count)
/* Pseudo-stereo - just copy left channel to right channel */
if (ctx->channels == 2) {
- while (count--) {
- left = *decoded0;
- *(decoded1++) = *(decoded0++) = left;
- }
+ memcpy(decoded1, decoded0, count * sizeof(*decoded1));
}
}
-static void ape_unpack_stereo(APEContext * ctx, int count)
+static void ape_unpack_stereo(APEContext *ctx, int count)
{
int32_t left, right;
int32_t *decoded0 = ctx->decoded0;
@@ -789,7 +816,7 @@ static void ape_unpack_stereo(APEContext * ctx, int count)
}
}
-static int ape_decode_frame(AVCodecContext * avctx,
+static int ape_decode_frame(AVCodecContext *avctx,
void *data, int *data_size,
AVPacket *avpkt)
{
@@ -797,49 +824,65 @@ static int ape_decode_frame(AVCodecContext * avctx,
int buf_size = avpkt->size;
APEContext *s = avctx->priv_data;
int16_t *samples = data;
- int nblocks;
- int i, n;
+ uint32_t nblocks;
+ int i;
int blockstodecode;
int bytes_used;
- if (buf_size == 0 && !s->samples) {
- *data_size = 0;
- return 0;
- }
-
/* should not happen but who knows */
if (BLOCKS_PER_LOOP * 2 * avctx->channels > *data_size) {
- av_log (avctx, AV_LOG_ERROR, "Packet size is too big to be handled in lavc! (max is %d where you have %d)\n", *data_size, s->samples * 2 * avctx->channels);
- return -1;
+ av_log (avctx, AV_LOG_ERROR, "Output buffer is too small.\n");
+ return AVERROR(EINVAL);
}
+ /* this should never be negative, but bad things will happen if it is, so
+ check it just to make sure. */
+ av_assert0(s->samples >= 0);
+
if(!s->samples){
- s->data = av_realloc(s->data, (buf_size + 3) & ~3);
+ uint32_t offset;
+ void *tmp_data;
+
+ if (buf_size < 8) {
+ av_log(avctx, AV_LOG_ERROR, "Packet is too small\n");
+ return AVERROR_INVALIDDATA;
+ }
+
+ tmp_data = av_realloc(s->data, FFALIGN(buf_size, 4));
+ if (!tmp_data)
+ return AVERROR(ENOMEM);
+ s->data = tmp_data;
s->dsp.bswap_buf((uint32_t*)s->data, (const uint32_t*)buf, buf_size >> 2);
s->ptr = s->last_ptr = s->data;
s->data_end = s->data + buf_size;
- nblocks = s->samples = bytestream_get_be32(&s->ptr);
- n = bytestream_get_be32(&s->ptr);
- if(n < 0 || n > 3){
+ nblocks = bytestream_get_be32(&s->ptr);
+ offset = bytestream_get_be32(&s->ptr);
+ if (offset > 3) {
av_log(avctx, AV_LOG_ERROR, "Incorrect offset passed\n");
s->data = NULL;
- return -1;
+ return AVERROR_INVALIDDATA;
}
- s->ptr += n;
+ if (s->data_end - s->ptr < offset) {
+ av_log(avctx, AV_LOG_ERROR, "Packet is too small\n");
+ return AVERROR_INVALIDDATA;
+ }
+ s->ptr += offset;
- s->currentframeblocks = nblocks;
- buf += 4;
- if (s->samples <= 0) {
- *data_size = 0;
- return buf_size;
+ if (!nblocks || nblocks > INT_MAX) {
+ av_log(avctx, AV_LOG_ERROR, "Invalid sample count: %u.\n", nblocks);
+ return AVERROR_INVALIDDATA;
}
+ s->currentframeblocks = s->samples = nblocks;
memset(s->decoded0, 0, sizeof(s->decoded0));
memset(s->decoded1, 0, sizeof(s->decoded1));
/* Initialize the frame decoder */
- init_frame_decoder(s);
+ if (init_frame_decoder(s) < 0) {
+ av_log(avctx, AV_LOG_ERROR, "Error reading frame header\n");
+ return AVERROR_INVALIDDATA;
+ }
}
if (!s->data) {
@@ -858,10 +901,10 @@ static int ape_decode_frame(AVCodecContext * avctx,
ape_unpack_stereo(s, blockstodecode);
emms_c();
- if(s->error || s->ptr > s->data_end){
+ if (s->error) {
s->samples=0;
av_log(avctx, AV_LOG_ERROR, "Error decoding frame\n");
- return -1;
+ return AVERROR_INVALIDDATA;
}
for (i = 0; i < blockstodecode; i++) {
diff --git a/libavcodec/avcodec.h b/libavcodec/avcodec.h
index ad3dd01198..f5b06d3936 100644
--- a/libavcodec/avcodec.h
+++ b/libavcodec/avcodec.h
@@ -383,6 +383,8 @@ enum CodecID {
CODEC_ID_MPEG2TS= 0x20000, /**< _FAKE_ codec to indicate a raw MPEG-2 TS
* stream (only used by libavformat) */
+ CODEC_ID_MPEG4SYSTEMS = 0x20001, /**< _FAKE_ codec to indicate a MPEG-4 Systems
+ * stream (only used by libavformat) */
CODEC_ID_FFMETADATA=0x21000, ///< Dummy codec for streams containing only metadata information.
};
@@ -682,8 +684,10 @@ typedef struct RcOverride{
* assume the buffer was allocated by avcodec_default_get_buffer.
*/
#define CODEC_CAP_DR1 0x0002
+#if FF_API_PARSE_FRAME
/* If 'parse_only' field is true, then avcodec_parse_frame() can be used. */
#define CODEC_CAP_PARSE_ONLY 0x0004
+#endif
#define CODEC_CAP_TRUNCATED 0x0008
/* Codec can export data for HW decoding (XvMC). */
#define CODEC_CAP_HWACCEL 0x0010
@@ -1590,9 +1594,15 @@ typedef struct AVCodecContext {
*/
int block_align;
- int parse_only; /* - decoding only: If true, only parsing is done
- (function avcodec_parse_frame()). The frame
- data is returned. Only MPEG codecs support this now. */
+#if FF_API_PARSE_FRAME
+ /**
+ * If true, only parsing is done. The frame data is returned.
+ * Only MPEG audio decoders support this now.
+ * - encoding: unused
+ * - decoding: Set by user
+ */
+ attribute_deprecated int parse_only;
+#endif
/**
* 0-> h263 quant 1-> mpeg quant
@@ -4047,10 +4057,6 @@ int avcodec_decode_subtitle2(AVCodecContext *avctx, AVSubtitle *sub,
*/
void avsubtitle_free(AVSubtitle *sub);
-int avcodec_parse_frame(AVCodecContext *avctx, uint8_t **pdata,
- int *data_size_ptr,
- uint8_t *buf, int buf_size);
-
/**
* Encode an audio frame from samples into buf.
*
diff --git a/libavcodec/mpegaudiodec.c b/libavcodec/mpegaudiodec.c
index 286e08f27a..11d7f1fb93 100644
--- a/libavcodec/mpegaudiodec.c
+++ b/libavcodec/mpegaudiodec.c
@@ -21,7 +21,7 @@
/**
* @file
- * MPEG Audio decoder.
+ * MPEG Audio decoder
*/
#include "libavutil/audioconvert.h"
@@ -63,7 +63,7 @@ typedef struct GranuleDef {
typedef struct MPADecodeContext {
MPA_DECODE_HEADER
- uint8_t last_buf[2*BACKSTEP_SIZE + EXTRABYTES];
+ uint8_t last_buf[2 * BACKSTEP_SIZE + EXTRABYTES];
int last_buf_size;
/* next header (used in free format parsing) */
uint32_t free_format_next_header;
@@ -74,9 +74,6 @@ typedef struct MPADecodeContext {
DECLARE_ALIGNED(32, INTFLOAT, sb_samples)[MPA_MAX_CHANNELS][36][SBLIMIT];
INTFLOAT mdct_buf[MPA_MAX_CHANNELS][SBLIMIT * 18]; /* previous samples, for layer 3 MDCT */
GranuleDef granules[2][2]; /* Used in Layer 3 */
-#ifdef DEBUG
- int frame_count;
-#endif
int adu_mode; ///< 0 for standard mp3, 1 for adu formatted mp3
int dither_state;
int err_recognition;
@@ -95,7 +92,7 @@ typedef struct MPADecodeContext {
# define OUT_FMT AV_SAMPLE_FMT_FLT
#else
# define SHR(a,b) ((a)>>(b))
-/* WARNING: only correct for posititive numbers */
+/* WARNING: only correct for positive numbers */
# define FIXR_OLD(a) ((int)((a) * FRAC_ONE + 0.5))
# define FIXR(a) ((int)((a) * FRAC_ONE + 0.5))
# define FIXHR(a) ((int)((a) * (1LL<<32) + 0.5))
@@ -115,18 +112,16 @@ typedef struct MPADecodeContext {
/* vlc structure for decoding layer 3 huffman tables */
static VLC huff_vlc[16];
static VLC_TYPE huff_vlc_tables[
- 0+128+128+128+130+128+154+166+
- 142+204+190+170+542+460+662+414
+ 0 + 128 + 128 + 128 + 130 + 128 + 154 + 166 +
+ 142 + 204 + 190 + 170 + 542 + 460 + 662 + 414
][2];
static const int huff_vlc_tables_sizes[16] = {
- 0, 128, 128, 128, 130, 128, 154, 166,
- 142, 204, 190, 170, 542, 460, 662, 414
+ 0, 128, 128, 128, 130, 128, 154, 166,
+ 142, 204, 190, 170, 542, 460, 662, 414
};
static VLC huff_quad_vlc[2];
-static VLC_TYPE huff_quad_vlc_tables[128+16][2];
-static const int huff_quad_vlc_tables_sizes[2] = {
- 128, 16
-};
+static VLC_TYPE huff_quad_vlc_tables[128+16][2];
+static const int huff_quad_vlc_tables_sizes[2] = { 128, 16 };
/* computed from band_size_long */
static uint16_t band_index_long[9][23];
#include "mpegaudio_tablegen.h"
@@ -163,17 +158,19 @@ static const int32_t scale_factor_mult2[3][3] = {
* Convert region offsets to region sizes and truncate
* size to big_values.
*/
-static void ff_region_offset2size(GranuleDef *g){
- int i, k, j=0;
- g->region_size[2] = (576 / 2);
- for(i=0;i<3;i++) {
+static void ff_region_offset2size(GranuleDef *g)
+{
+ int i, k, j = 0;
+ g->region_size[2] = 576 / 2;
+ for (i = 0; i < 3; i++) {
k = FFMIN(g->region_size[i], g->big_values);
g->region_size[i] = k - j;
j = k;
}
}
-static void ff_init_short_region(MPADecodeContext *s, GranuleDef *g){
+static void ff_init_short_region(MPADecodeContext *s, GranuleDef *g)
+{
if (g->block_type == 2)
g->region_size[0] = (36 / 2);
else {
@@ -187,17 +184,17 @@ static void ff_init_short_region(MPADecodeContext *s, GranuleDef *g){
g->region_size[1] = (576 / 2);
}
-static void ff_init_long_region(MPADecodeContext *s, GranuleDef *g, int ra1, int ra2){
+static void ff_init_long_region(MPADecodeContext *s, GranuleDef *g, int ra1, int ra2)
+{
int l;
- g->region_size[0] =
- band_index_long[s->sample_rate_index][ra1 + 1] >> 1;
+ g->region_size[0] = band_index_long[s->sample_rate_index][ra1 + 1] >> 1;
/* should not overflow */
l = FFMIN(ra1 + ra2 + 2, 22);
- g->region_size[1] =
- band_index_long[s->sample_rate_index][l] >> 1;
+ g->region_size[1] = band_index_long[s->sample_rate_index][ l] >> 1;
}
-static void ff_compute_band_indexes(MPADecodeContext *s, GranuleDef *g){
+static void ff_compute_band_indexes(MPADecodeContext *s, GranuleDef *g)
+{
if (g->block_type == 2) {
if (g->switch_point) {
/* if switched mode, we handle the 36 first samples as
@@ -212,12 +209,12 @@ static void ff_compute_band_indexes(MPADecodeContext *s, GranuleDef *g){
g->short_start = 2 + (s->sample_rate_index != 8);
} else {
- g->long_end = 0;
+ g->long_end = 0;
g->short_start = 0;
}
} else {
g->short_start = 13;
- g->long_end = 22;
+ g->long_end = 22;
}
}
@@ -228,11 +225,11 @@ static inline int l1_unscale(int n, int mant, int scale_factor)
int shift, mod;
int64_t val;
- shift = scale_factor_modshift[scale_factor];
- mod = shift & 3;
+ shift = scale_factor_modshift[scale_factor];
+ mod = shift & 3;
shift >>= 2;
- val = MUL64(mant + (-1 << n) + 1, scale_factor_mult[n-1][mod]);
- shift += n;
+ val = MUL64(mant + (-1 << n) + 1, scale_factor_mult[n-1][mod]);
+ shift += n;
/* NOTE: at this point, 1 <= shift >= 21 + 15 */
return (int)((val + (1LL << (shift - 1))) >> shift);
}
@@ -241,8 +238,8 @@ static inline int l2_unscale_group(int steps, int mant, int scale_factor)
{
int shift, mod, val;
- shift = scale_factor_modshift[scale_factor];
- mod = shift & 3;
+ shift = scale_factor_modshift[scale_factor];
+ mod = shift & 3;
shift >>= 2;
val = (mant - (steps >> 1)) * scale_factor_mult2[steps >> 2][mod];
@@ -258,13 +255,13 @@ static inline int l3_unscale(int value, int exponent)
unsigned int m;
int e;
- e = table_4_3_exp [4*value + (exponent&3)];
- m = table_4_3_value[4*value + (exponent&3)];
- e -= (exponent >> 2);
- assert(e>=1);
+ e = table_4_3_exp [4 * value + (exponent & 3)];
+ m = table_4_3_value[4 * value + (exponent & 3)];
+ e -= exponent >> 2;
+ assert(e >= 1);
if (e > 31)
return 0;
- m = (m + (1 << (e-1))) >> e;
+ m = (m + (1 << (e - 1))) >> e;
return m;
}
@@ -272,7 +269,7 @@ static inline int l3_unscale(int value, int exponent)
static av_cold int decode_init(AVCodecContext * avctx)
{
MPADecodeContext *s = avctx->priv_data;
- static int init=0;
+ static int init = 0;
int i, j, k;
s->avctx = avctx;
@@ -282,28 +279,31 @@ static av_cold int decode_init(AVCodecContext * avctx)
avctx->sample_fmt= OUT_FMT;
s->err_recognition = avctx->err_recognition;
+#if FF_API_PARSE_FRAME
if (!init && !avctx->parse_only) {
+#else
+ if (!init) {
+#endif
int offset;
/* scale factors table for layer 1/2 */
- for(i=0;i<64;i++) {
+ for (i = 0; i < 64; i++) {
int shift, mod;
/* 1.0 (i = 3) is normalized to 2 ^ FRAC_BITS */
- shift = (i / 3);
- mod = i % 3;
+ shift = i / 3;
+ mod = i % 3;
scale_factor_modshift[i] = mod | (shift << 2);
}
/* scale factor multiply for layer 1 */
- for(i=0;i<15;i++) {
+ for (i = 0; i < 15; i++) {
int n, norm;
n = i + 2;
norm = ((INT64_C(1) << n) * FRAC_ONE) / ((1 << n) - 1);
scale_factor_mult[i][0] = MULLx(norm, FIXR(1.0 * 2.0), FRAC_BITS);
scale_factor_mult[i][1] = MULLx(norm, FIXR(0.7937005259 * 2.0), FRAC_BITS);
scale_factor_mult[i][2] = MULLx(norm, FIXR(0.6299605249 * 2.0), FRAC_BITS);
- av_dlog(avctx, "%d: norm=%x s=%x %x %x\n",
- i, norm,
+ av_dlog(avctx, "%d: norm=%x s=%x %x %x\n", i, norm,
scale_factor_mult[i][0],
scale_factor_mult[i][1],
scale_factor_mult[i][2]);
@@ -313,7 +313,7 @@ static av_cold int decode_init(AVCodecContext * avctx)
/* huffman decode tables */
offset = 0;
- for(i=1;i<16;i++) {
+ for (i = 1; i < 16; i++) {
const HuffTable *h = &mpa_huff_tables[i];
int xsize, x, y;
uint8_t tmp_bits [512];
@@ -325,8 +325,8 @@ static av_cold int decode_init(AVCodecContext * avctx)
xsize = h->xsize;
j = 0;
- for(x=0;x<xsize;x++) {
- for(y=0;y<xsize;y++){
+ for (x = 0; x < xsize; x++) {
+ for (y = 0; y < xsize; y++) {
tmp_bits [(x << 5) | y | ((x&&y)<<4)]= h->bits [j ];
tmp_codes[(x << 5) | y | ((x&&y)<<4)]= h->codes[j++];
}
@@ -343,7 +343,7 @@ static av_cold int decode_init(AVCodecContext * avctx)
assert(offset == FF_ARRAY_ELEMS(huff_vlc_tables));
offset = 0;
- for(i=0;i<2;i++) {
+ for (i = 0; i < 2; i++) {
huff_quad_vlc[i].table = huff_quad_vlc_tables+offset;
huff_quad_vlc[i].table_allocated = huff_quad_vlc_tables_sizes[i];
init_vlc(&huff_quad_vlc[i], i == 0 ? 7 : 4, 16,
@@ -353,9 +353,9 @@ static av_cold int decode_init(AVCodecContext * avctx)
}
assert(offset == FF_ARRAY_ELEMS(huff_quad_vlc_tables));
- for(i=0;i<9;i++) {
+ for (i = 0; i < 9; i++) {
k = 0;
- for(j=0;j<22;j++) {
+ for (j = 0; j < 22; j++) {
band_index_long[i][j] = k;
k += band_size_long[i][j];
}
@@ -366,21 +366,23 @@ static av_cold int decode_init(AVCodecContext * avctx)
mpegaudio_tableinit();
- for (i = 0; i < 4; i++)
- if (ff_mpa_quant_bits[i] < 0)
- for (j = 0; j < (1<<(-ff_mpa_quant_bits[i]+1)); j++) {
+ for (i = 0; i < 4; i++) {
+ if (ff_mpa_quant_bits[i] < 0) {
+ for (j = 0; j < (1 << (-ff_mpa_quant_bits[i]+1)); j++) {
int val1, val2, val3, steps;
int val = j;
- steps = ff_mpa_quant_steps[i];
- val1 = val % steps;
- val /= steps;
- val2 = val % steps;
- val3 = val / steps;
+ steps = ff_mpa_quant_steps[i];
+ val1 = val % steps;
+ val /= steps;
+ val2 = val % steps;
+ val3 = val / steps;
division_tabs[i][j] = val1 + (val2 << 4) + (val3 << 8);
}
+ }
+ }
- for(i=0;i<7;i++) {
+ for (i = 0; i < 7; i++) {
float f;
INTFLOAT v;
if (i != 6) {
@@ -389,30 +391,30 @@ static av_cold int decode_init(AVCodecContext * avctx)
} else {
v = FIXR(1.0);
}
- is_table[0][i] = v;
+ is_table[0][ i] = v;
is_table[1][6 - i] = v;
}
/* invalid values */
- for(i=7;i<16;i++)
+ for (i = 7; i < 16; i++)
is_table[0][i] = is_table[1][i] = 0.0;
- for(i=0;i<16;i++) {
+ for (i = 0; i < 16; i++) {
double f;
int e, k;
- for(j=0;j<2;j++) {
+ for (j = 0; j < 2; j++) {
e = -(j + 1) * ((i + 1) >> 1);
f = pow(2.0, e / 4.0);
k = i & 1;
is_table_lsf[j][k ^ 1][i] = FIXR(f);
- is_table_lsf[j][k][i] = FIXR(1.0);
+ is_table_lsf[j][k ][i] = FIXR(1.0);
av_dlog(avctx, "is_table_lsf %d %d: %f %f\n",
i, j, (float) is_table_lsf[j][0][i],
(float) is_table_lsf[j][1][i]);
}
}
- for(i=0;i<8;i++) {
+ for (i = 0; i < 8; i++) {
float ci, cs, ca;
ci = ci_table[i];
cs = 1.0 / sqrt(1.0 + ci * ci);
@@ -431,27 +433,27 @@ static av_cold int decode_init(AVCodecContext * avctx)
}
/* compute mdct windows */
- for(i=0;i<36;i++) {
- for(j=0; j<4; j++){
+ for (i = 0; i < 36; i++) {
+ for (j = 0; j < 4; j++) {
double d;
- if(j==2 && i%3 != 1)
+ if (j == 2 && i % 3 != 1)
continue;
- d= sin(M_PI * (i + 0.5) / 36.0);
- if(j==1){
- if (i>=30) d= 0;
- else if(i>=24) d= sin(M_PI * (i - 18 + 0.5) / 12.0);
- else if(i>=18) d= 1;
- }else if(j==3){
- if (i< 6) d= 0;
- else if(i< 12) d= sin(M_PI * (i - 6 + 0.5) / 12.0);
- else if(i< 18) d= 1;
+ d = sin(M_PI * (i + 0.5) / 36.0);
+ if (j == 1) {
+ if (i >= 30) d = 0;
+ else if (i >= 24) d = sin(M_PI * (i - 18 + 0.5) / 12.0);
+ else if (i >= 18) d = 1;
+ } else if (j == 3) {
+ if (i < 6) d = 0;
+ else if (i < 12) d = sin(M_PI * (i - 6 + 0.5) / 12.0);
+ else if (i < 18) d = 1;
}
//merge last stage of imdct into the window coefficients
- d*= 0.5 / cos(M_PI*(2*i + 19)/72);
+ d *= 0.5 / cos(M_PI * (2 * i + 19) / 72);
- if(j==2)
+ if (j == 2)
mdct_win[j][i/3] = FIXHR((d / (1<<5)));
else
mdct_win[j][i ] = FIXHR((d / (1<<5)));
@@ -460,9 +462,9 @@ static av_cold int decode_init(AVCodecContext * avctx)
/* NOTE: we do frequency inversion adter the MDCT by changing
the sign of the right window coefs */
- for(j=0;j<4;j++) {
- for(i=0;i<36;i+=2) {
- mdct_win[j + 4][i] = mdct_win[j][i];
+ for (j = 0; j < 4; j++) {
+ for (i = 0; i < 36; i += 2) {
+ mdct_win[j + 4][i ] = mdct_win[j][i ];
mdct_win[j + 4][i + 1] = -mdct_win[j][i + 1];
}
}
@@ -509,41 +511,41 @@ static void imdct12(INTFLOAT *out, INTFLOAT *in)
{
INTFLOAT in0, in1, in2, in3, in4, in5, t1, t2;
- in0= in[0*3];
- in1= in[1*3] + in[0*3];
- in2= in[2*3] + in[1*3];
- in3= in[3*3] + in[2*3];
- in4= in[4*3] + in[3*3];
- in5= in[5*3] + in[4*3];
+ in0 = in[0*3];
+ in1 = in[1*3] + in[0*3];
+ in2 = in[2*3] + in[1*3];
+ in3 = in[3*3] + in[2*3];
+ in4 = in[4*3] + in[3*3];
+ in5 = in[5*3] + in[4*3];
in5 += in3;
in3 += in1;
- in2= MULH3(in2, C3, 2);
- in3= MULH3(in3, C3, 4);
-
- t1 = in0 - in4;
- t2 = MULH3(in1 - in5, icos36h[4], 2);
-
- out[ 7]=
- out[10]= t1 + t2;
- out[ 1]=
- out[ 4]= t1 - t2;
-
- in0 += SHR(in4, 1);
- in4 = in0 + in2;
- in5 += 2*in1;
- in1 = MULH3(in5 + in3, icos36h[1], 1);
- out[ 8]=
- out[ 9]= in4 + in1;
- out[ 2]=
- out[ 3]= in4 - in1;
-
- in0 -= in2;
- in5 = MULH3(in5 - in3, icos36h[7], 2);
- out[ 0]=
- out[ 5]= in0 - in5;
- out[ 6]=
- out[11]= in0 + in5;
+ in2 = MULH3(in2, C3, 2);
+ in3 = MULH3(in3, C3, 4);
+
+ t1 = in0 - in4;
+ t2 = MULH3(in1 - in5, icos36h[4], 2);
+
+ out[ 7] =
+ out[10] = t1 + t2;
+ out[ 1] =
+ out[ 4] = t1 - t2;
+
+ in0 += SHR(in4, 1);
+ in4 = in0 + in2;
+ in5 += 2*in1;
+ in1 = MULH3(in5 + in3, icos36h[1], 1);
+ out[ 8] =
+ out[ 9] = in4 + in1;
+ out[ 2] =
+ out[ 3] = in4 - in1;
+
+ in0 -= in2;
+ in5 = MULH3(in5 - in3, icos36h[7], 2);
+ out[ 0] =
+ out[ 5] = in0 - in5;
+ out[ 6] =
+ out[11] = in0 + in5;
}
/* cos(pi*i/18) */
@@ -564,12 +566,12 @@ static void imdct36(INTFLOAT *out, INTFLOAT *buf, INTFLOAT *in, INTFLOAT *win)
INTFLOAT t0, t1, t2, t3, s0, s1, s2, s3;
INTFLOAT tmp[18], *tmp1, *in1;
- for(i=17;i>=1;i--)
+ for (i = 17; i >= 1; i--)
in[i] += in[i-1];
- for(i=17;i>=3;i-=2)
+ for (i = 17; i >= 3; i -= 2)
in[i] += in[i-2];
- for(j=0;j<2;j++) {
+ for (j = 0; j < 2; j++) {
tmp1 = tmp + j;
in1 = in + j;
@@ -601,7 +603,7 @@ static void imdct36(INTFLOAT *out, INTFLOAT *buf, INTFLOAT *in, INTFLOAT *win)
}
i = 0;
- for(j=0;j<4;j++) {
+ for (j = 0; j < 4; j++) {
t0 = tmp[i];
t1 = tmp[i + 2];
s0 = t1 + t0;
@@ -609,22 +611,22 @@ static void imdct36(INTFLOAT *out, INTFLOAT *buf, INTFLOAT *in, INTFLOAT *win)
t2 = tmp[i + 1];
t3 = tmp[i + 3];
- s1 = MULH3(t3 + t2, icos36h[j], 2);
- s3 = MULLx(t3 - t2, icos36[8 - j], FRAC_BITS);
+ s1 = MULH3(t3 + t2, icos36h[ j], 2);
+ s3 = MULLx(t3 - t2, icos36 [8 - j], FRAC_BITS);
t0 = s0 + s1;
t1 = s0 - s1;
- out[(9 + j)*SBLIMIT] = MULH3(t1, win[9 + j], 1) + buf[9 + j];
- out[(8 - j)*SBLIMIT] = MULH3(t1, win[8 - j], 1) + buf[8 - j];
- buf[9 + j] = MULH3(t0, win[18 + 9 + j], 1);
- buf[8 - j] = MULH3(t0, win[18 + 8 - j], 1);
+ out[(9 + j) * SBLIMIT] = MULH3(t1, win[ 9 + j], 1) + buf[9 + j];
+ out[(8 - j) * SBLIMIT] = MULH3(t1, win[ 8 - j], 1) + buf[8 - j];
+ buf[ 9 + j ] = MULH3(t0, win[18 + 9 + j], 1);
+ buf[ 8 - j ] = MULH3(t0, win[18 + 8 - j], 1);
t0 = s2 + s3;
t1 = s2 - s3;
- out[(9 + 8 - j)*SBLIMIT] = MULH3(t1, win[9 + 8 - j], 1) + buf[9 + 8 - j];
- out[( j)*SBLIMIT] = MULH3(t1, win[ j], 1) + buf[ j];
- buf[9 + 8 - j] = MULH3(t0, win[18 + 9 + 8 - j], 1);
- buf[ + j] = MULH3(t0, win[18 + j], 1);
+ out[(9 + 8 - j) * SBLIMIT] = MULH3(t1, win[ 9 + 8 - j], 1) + buf[9 + 8 - j];
+ out[ j * SBLIMIT] = MULH3(t1, win[ j], 1) + buf[ j];
+ buf[ 9 + 8 - j ] = MULH3(t0, win[18 + 9 + 8 - j], 1);
+ buf[ j ] = MULH3(t0, win[18 + j], 1);
i += 4;
}
@@ -632,10 +634,10 @@ static void imdct36(INTFLOAT *out, INTFLOAT *buf, INTFLOAT *in, INTFLOAT *win)
s1 = MULH3(tmp[17], icos36h[4], 2);
t0 = s0 + s1;
t1 = s0 - s1;
- out[(9 + 4)*SBLIMIT] = MULH3(t1, win[9 + 4], 1) + buf[9 + 4];
- out[(8 - 4)*SBLIMIT] = MULH3(t1, win[8 - 4], 1) + buf[8 - 4];
- buf[9 + 4] = MULH3(t0, win[18 + 9 + 4], 1);
- buf[8 - 4] = MULH3(t0, win[18 + 8 - 4], 1);
+ out[(9 + 4) * SBLIMIT] = MULH3(t1, win[ 9 + 4], 1) + buf[9 + 4];
+ out[(8 - 4) * SBLIMIT] = MULH3(t1, win[ 8 - 4], 1) + buf[8 - 4];
+ buf[ 9 + 4 ] = MULH3(t0, win[18 + 9 + 4], 1);
+ buf[ 8 - 4 ] = MULH3(t0, win[18 + 8 - 4], 1);
}
/* return the number of decoded frames */
@@ -651,23 +653,22 @@ static int mp_decode_layer1(MPADecodeContext *s)
bound = SBLIMIT;
/* allocation bits */
- for(i=0;i<bound;i++) {
- for(ch=0;ch<s->nb_channels;ch++) {
+ for (i = 0; i < bound; i++) {
+ for (ch = 0; ch < s->nb_channels; ch++) {
allocation[ch][i] = get_bits(&s->gb, 4);
}
}
- for(i=bound;i<SBLIMIT;i++) {
+ for (i = bound; i < SBLIMIT; i++)
allocation[0][i] = get_bits(&s->gb, 4);
- }
/* scale factors */
- for(i=0;i<bound;i++) {
- for(ch=0;ch<s->nb_channels;ch++) {
+ for (i = 0; i < bound; i++) {
+ for (ch = 0; ch < s->nb_channels; ch++) {
if (allocation[ch][i])
scale_factors[ch][i] = get_bits(&s->gb, 6);
}
}
- for(i=bound;i<SBLIMIT;i++) {
+ for (i = bound; i < SBLIMIT; i++) {
if (allocation[0][i]) {
scale_factors[0][i] = get_bits(&s->gb, 6);
scale_factors[1][i] = get_bits(&s->gb, 6);
@@ -675,9 +676,9 @@ static int mp_decode_layer1(MPADecodeContext *s)
}
/* compute samples */
- for(j=0;j<12;j++) {
- for(i=0;i<bound;i++) {
- for(ch=0;ch<s->nb_channels;ch++) {
+ for (j = 0; j < 12; j++) {
+ for (i = 0; i < bound; i++) {
+ for (ch = 0; ch < s->nb_channels; ch++) {
n = allocation[ch][i];
if (n) {
mant = get_bits(&s->gb, n + 1);
@@ -688,7 +689,7 @@ static int mp_decode_layer1(MPADecodeContext *s)
s->sb_samples[ch][j][i] = v;
}
}
- for(i=bound;i<SBLIMIT;i++) {
+ for (i = bound; i < SBLIMIT; i++) {
n = allocation[0][i];
if (n) {
mant = get_bits(&s->gb, n + 1);
@@ -717,8 +718,8 @@ static int mp_decode_layer2(MPADecodeContext *s)
/* select decoding table */
table = ff_mpa_l2_select_table(s->bit_rate / 1000, s->nb_channels,
- s->sample_rate, s->lsf);
- sblimit = ff_mpa_sblimit_table[table];
+ s->sample_rate, s->lsf);
+ sblimit = ff_mpa_sblimit_table[table];
alloc_table = ff_mpa_alloc_tables[table];
if (s->mode == MPA_JSTEREO)
@@ -729,18 +730,18 @@ static int mp_decode_layer2(MPADecodeContext *s)
av_dlog(s->avctx, "bound=%d sblimit=%d\n", bound, sblimit);
/* sanity check */
- if( bound > sblimit ) bound = sblimit;
+ if (bound > sblimit)
+ bound = sblimit;
/* parse bit allocation */
j = 0;
- for(i=0;i<bound;i++) {
+ for (i = 0; i < bound; i++) {
bit_alloc_bits = alloc_table[j];
- for(ch=0;ch<s->nb_channels;ch++) {
+ for (ch = 0; ch < s->nb_channels; ch++)
bit_alloc[ch][i] = get_bits(&s->gb, bit_alloc_bits);
- }
j += 1 << bit_alloc_bits;
}
- for(i=bound;i<sblimit;i++) {
+ for (i = bound; i < sblimit; i++) {
bit_alloc_bits = alloc_table[j];
v = get_bits(&s->gb, bit_alloc_bits);
bit_alloc[0][i] = v;
@@ -749,19 +750,19 @@ static int mp_decode_layer2(MPADecodeContext *s)
}
/* scale codes */
- for(i=0;i<sblimit;i++) {
- for(ch=0;ch<s->nb_channels;ch++) {
+ for (i = 0; i < sblimit; i++) {
+ for (ch = 0; ch < s->nb_channels; ch++) {
if (bit_alloc[ch][i])
scale_code[ch][i] = get_bits(&s->gb, 2);
}
}
/* scale factors */
- for(i=0;i<sblimit;i++) {
- for(ch=0;ch<s->nb_channels;ch++) {
+ for (i = 0; i < sblimit; i++) {
+ for (ch = 0; ch < s->nb_channels; ch++) {
if (bit_alloc[ch][i]) {
sf = scale_factors[ch][i];
- switch(scale_code[ch][i]) {
+ switch (scale_code[ch][i]) {
default:
case 0:
sf[0] = get_bits(&s->gb, 6);
@@ -789,12 +790,12 @@ static int mp_decode_layer2(MPADecodeContext *s)
}
/* samples */
- for(k=0;k<3;k++) {
- for(l=0;l<12;l+=3) {
+ for (k = 0; k < 3; k++) {
+ for (l = 0; l < 12; l += 3) {
j = 0;
- for(i=0;i<bound;i++) {
+ for (i = 0; i < bound; i++) {
bit_alloc_bits = alloc_table[j];
- for(ch=0;ch<s->nb_channels;ch++) {
+ for (ch = 0; ch < s->nb_channels; ch++) {
b = bit_alloc[ch][i];
if (b) {
scale = scale_factors[ch][i][k];
@@ -808,13 +809,13 @@ static int mp_decode_layer2(MPADecodeContext *s)
steps = ff_mpa_quant_steps[qindex];
s->sb_samples[ch][k * 12 + l + 0][i] =
- l2_unscale_group(steps, v2 & 15, scale);
+ l2_unscale_group(steps, v2 & 15, scale);
s->sb_samples[ch][k * 12 + l + 1][i] =
l2_unscale_group(steps, (v2 >> 4) & 15, scale);
s->sb_samples[ch][k * 12 + l + 2][i] =
l2_unscale_group(steps, v2 >> 8 , scale);
} else {
- for(m=0;m<3;m++) {
+ for (m = 0; m < 3; m++) {
v = get_bits(&s->gb, bits);
v = l1_unscale(bits - 1, v, scale);
s->sb_samples[ch][k * 12 + l + m][i] = v;
@@ -830,7 +831,7 @@ static int mp_decode_layer2(MPADecodeContext *s)
j += 1 << bit_alloc_bits;
}
/* XXX: find a way to avoid this duplication of code */
- for(i=bound;i<sblimit;i++) {
+ for (i = bound; i < sblimit; i++) {
bit_alloc_bits = alloc_table[j];
b = bit_alloc[0][i];
if (b) {
@@ -860,7 +861,7 @@ static int mp_decode_layer2(MPADecodeContext *s)
s->sb_samples[1][k * 12 + l + 2][i] =
l2_unscale_group(steps, v, scale1);
} else {
- for(m=0;m<3;m++) {
+ for (m = 0; m < 3; m++) {
mant = get_bits(&s->gb, bits);
s->sb_samples[0][k * 12 + l + m][i] =
l1_unscale(bits - 1, mant, scale0);
@@ -880,8 +881,8 @@ static int mp_decode_layer2(MPADecodeContext *s)
j += 1 << bit_alloc_bits;
}
/* fill remaining samples to zero */
- for(i=sblimit;i<SBLIMIT;i++) {
- for(ch=0;ch<s->nb_channels;ch++) {
+ for (i = sblimit; i < SBLIMIT; i++) {
+ for (ch = 0; ch < s->nb_channels; ch++) {
s->sb_samples[ch][k * 12 + l + 0][i] = 0;
s->sb_samples[ch][k * 12 + l + 1][i] = 0;
s->sb_samples[ch][k * 12 + l + 2][i] = 0;
@@ -892,28 +893,28 @@ static int mp_decode_layer2(MPADecodeContext *s)
return 3 * 12;
}
-#define SPLIT(dst,sf,n)\
- if(n==3){\
- int m= (sf*171)>>9;\
- dst= sf - 3*m;\
- sf=m;\
- }else if(n==4){\
- dst= sf&3;\
- sf>>=2;\
- }else if(n==5){\
- int m= (sf*205)>>10;\
- dst= sf - 5*m;\
- sf=m;\
- }else if(n==6){\
- int m= (sf*171)>>10;\
- dst= sf - 6*m;\
- sf=m;\
- }else{\
- dst=0;\
+#define SPLIT(dst,sf,n) \
+ if (n == 3) { \
+ int m = (sf * 171) >> 9; \
+ dst = sf - 3 * m; \
+ sf = m; \
+ } else if (n == 4) { \
+ dst = sf & 3; \
+ sf >>= 2; \
+ } else if (n == 5) { \
+ int m = (sf * 205) >> 10; \
+ dst = sf - 5 * m; \
+ sf = m; \
+ } else if (n == 6) { \
+ int m = (sf * 171) >> 10; \
+ dst = sf - 6 * m; \
+ sf = m; \
+ } else { \
+ dst = 0; \
}
-static av_always_inline void lsf_sf_expand(int *slen,
- int sf, int n1, int n2, int n3)
+static av_always_inline void lsf_sf_expand(int *slen, int sf, int n1, int n2,
+ int n3)
{
SPLIT(slen[3], sf, n3)
SPLIT(slen[2], sf, n2)
@@ -921,8 +922,7 @@ static av_always_inline void lsf_sf_expand(int *slen,
slen[0] = sf;
}
-static void exponents_from_scale_factors(MPADecodeContext *s,
- GranuleDef *g,
+static void exponents_from_scale_factors(MPADecodeContext *s, GranuleDef *g,
int16_t *exponents)
{
const uint8_t *bstab, *pretab;
@@ -930,30 +930,30 @@ static void exponents_from_scale_factors(MPADecodeContext *s,
int16_t *exp_ptr;
exp_ptr = exponents;
- gain = g->global_gain - 210;
- shift = g->scalefac_scale + 1;
+ gain = g->global_gain - 210;
+ shift = g->scalefac_scale + 1;
- bstab = band_size_long[s->sample_rate_index];
+ bstab = band_size_long[s->sample_rate_index];
pretab = mpa_pretab[g->preflag];
- for(i=0;i<g->long_end;i++) {
+ for (i = 0; i < g->long_end; i++) {
v0 = gain - ((g->scale_factors[i] + pretab[i]) << shift) + 400;
len = bstab[i];
- for(j=len;j>0;j--)
+ for (j = len; j > 0; j--)
*exp_ptr++ = v0;
}
if (g->short_start < 13) {
- bstab = band_size_short[s->sample_rate_index];
+ bstab = band_size_short[s->sample_rate_index];
gains[0] = gain - (g->subblock_gain[0] << 3);
gains[1] = gain - (g->subblock_gain[1] << 3);
gains[2] = gain - (g->subblock_gain[2] << 3);
- k = g->long_end;
- for(i=g->short_start;i<13;i++) {
+ k = g->long_end;
+ for (i = g->short_start; i < 13; i++) {
len = bstab[i];
- for(l=0;l<3;l++) {
+ for (l = 0; l < 3; l++) {
v0 = gains[l] - (g->scale_factors[k++] << shift) + 400;
- for(j=len;j>0;j--)
- *exp_ptr++ = v0;
+ for (j = len; j > 0; j--)
+ *exp_ptr++ = v0;
}
}
}
@@ -962,22 +962,21 @@ static void exponents_from_scale_factors(MPADecodeContext *s,
/* handle n = 0 too */
static inline int get_bitsz(GetBitContext *s, int n)
{
- if (n == 0)
- return 0;
- else
- return get_bits(s, n);
+ return n ? get_bits(s, n) : 0;
}
-static void switch_buffer(MPADecodeContext *s, int *pos, int *end_pos, int *end_pos2){
- if(s->in_gb.buffer && *pos >= s->gb.size_in_bits){
- s->gb= s->in_gb;
- s->in_gb.buffer=NULL;
+static void switch_buffer(MPADecodeContext *s, int *pos, int *end_pos,
+ int *end_pos2)
+{
+ if (s->in_gb.buffer && *pos >= s->gb.size_in_bits) {
+ s->gb = s->in_gb;
+ s->in_gb.buffer = NULL;
assert((get_bits_count(&s->gb) & 7) == 0);
skip_bits_long(&s->gb, *pos - *end_pos);
- *end_pos2=
- *end_pos= *end_pos2 + get_bits_count(&s->gb) - *pos;
- *pos= get_bits_count(&s->gb);
+ *end_pos2 =
+ *end_pos = *end_pos2 + get_bits_count(&s->gb) - *pos;
+ *pos = get_bits_count(&s->gb);
}
}
@@ -988,13 +987,13 @@ static void switch_buffer(MPADecodeContext *s, int *pos, int *end_pos, int *end_
*dst = v;
*/
#if CONFIG_FLOAT
-#define READ_FLIP_SIGN(dst,src)\
- v = AV_RN32A(src) ^ (get_bits1(&s->gb)<<31);\
- AV_WN32A(dst, v);
+#define READ_FLIP_SIGN(dst,src) \
+ v = AV_RN32A(src) ^ (get_bits1(&s->gb) << 31); \
+ AV_WN32A(dst, v);
#else
-#define READ_FLIP_SIGN(dst,src)\
- v= -get_bits1(&s->gb);\
- *(dst) = (*(src) ^ v) - v;
+#define READ_FLIP_SIGN(dst,src) \
+ v = -get_bits1(&s->gb); \
+ *(dst) = (*(src) ^ v) - v;
#endif
static int huffman_decode(MPADecodeContext *s, GranuleDef *g,
@@ -1004,43 +1003,43 @@ static int huffman_decode(MPADecodeContext *s, GranuleDef *g,
int i;
int last_pos, bits_left;
VLC *vlc;
- int end_pos= FFMIN(end_pos2, s->gb.size_in_bits);
+ int end_pos = FFMIN(end_pos2, s->gb.size_in_bits);
/* low frequencies (called big values) */
s_index = 0;
- for(i=0;i<3;i++) {
+ for (i = 0; i < 3; i++) {
int j, k, l, linbits;
j = g->region_size[i];
if (j == 0)
continue;
/* select vlc table */
- k = g->table_select[i];
- l = mpa_huff_data[k][0];
+ k = g->table_select[i];
+ l = mpa_huff_data[k][0];
linbits = mpa_huff_data[k][1];
- vlc = &huff_vlc[l];
+ vlc = &huff_vlc[l];
- if(!l){
- memset(&g->sb_hybrid[s_index], 0, sizeof(*g->sb_hybrid)*2*j);
- s_index += 2*j;
+ if (!l) {
+ memset(&g->sb_hybrid[s_index], 0, sizeof(*g->sb_hybrid) * 2 * j);
+ s_index += 2 * j;
continue;
}
/* read huffcode and compute each couple */
- for(;j>0;j--) {
+ for (; j > 0; j--) {
int exponent, x, y;
int v;
- int pos= get_bits_count(&s->gb);
+ int pos = get_bits_count(&s->gb);
if (pos >= end_pos){
// av_log(NULL, AV_LOG_ERROR, "pos: %d %d %d %d\n", pos, end_pos, end_pos2, s_index);
switch_buffer(s, &pos, &end_pos, &end_pos2);
// av_log(NULL, AV_LOG_ERROR, "new pos: %d %d\n", pos, end_pos);
- if(pos >= end_pos)
+ if (pos >= end_pos)
break;
}
y = get_vlc2(&s->gb, vlc->table, 7, 3);
- if(!y){
+ if (!y) {
g->sb_hybrid[s_index ] =
g->sb_hybrid[s_index+1] = 0;
s_index += 2;
@@ -1051,54 +1050,54 @@ static int huffman_decode(MPADecodeContext *s, GranuleDef *g,
av_dlog(s->avctx, "region=%d n=%d x=%d y=%d exp=%d\n",
i, g->region_size[i] - j, x, y, exponent);
- if(y&16){
+ if (y & 16) {
x = y >> 5;
y = y & 0x0f;
- if (x < 15){
- READ_FLIP_SIGN(g->sb_hybrid+s_index, RENAME(expval_table)[ exponent ]+x)
- }else{
+ if (x < 15) {
+ READ_FLIP_SIGN(g->sb_hybrid + s_index, RENAME(expval_table)[exponent] + x)
+ } else {
x += get_bitsz(&s->gb, linbits);
- v = l3_unscale(x, exponent);
+ v = l3_unscale(x, exponent);
if (get_bits1(&s->gb))
v = -v;
g->sb_hybrid[s_index] = v;
}
- if (y < 15){
- READ_FLIP_SIGN(g->sb_hybrid+s_index+1, RENAME(expval_table)[ exponent ]+y)
- }else{
+ if (y < 15) {
+ READ_FLIP_SIGN(g->sb_hybrid + s_index + 1, RENAME(expval_table)[exponent] + y)
+ } else {
y += get_bitsz(&s->gb, linbits);
- v = l3_unscale(y, exponent);
+ v = l3_unscale(y, exponent);
if (get_bits1(&s->gb))
v = -v;
g->sb_hybrid[s_index+1] = v;
}
- }else{
+ } else {
x = y >> 5;
y = y & 0x0f;
x += y;
- if (x < 15){
- READ_FLIP_SIGN(g->sb_hybrid+s_index+!!y, RENAME(expval_table)[ exponent ]+x)
- }else{
+ if (x < 15) {
+ READ_FLIP_SIGN(g->sb_hybrid + s_index + !!y, RENAME(expval_table)[exponent] + x)
+ } else {
x += get_bitsz(&s->gb, linbits);
- v = l3_unscale(x, exponent);
+ v = l3_unscale(x, exponent);
if (get_bits1(&s->gb))
v = -v;
g->sb_hybrid[s_index+!!y] = v;
}
- g->sb_hybrid[s_index+ !y] = 0;
+ g->sb_hybrid[s_index + !y] = 0;
}
- s_index+=2;
+ s_index += 2;
}
}
/* high frequencies */
vlc = &huff_quad_vlc[g->count1table_select];
- last_pos=0;
+ last_pos = 0;
while (s_index <= 572) {
int pos, code;
pos = get_bits_count(&s->gb);
if (pos >= end_pos) {
- if (pos > end_pos2 && last_pos){
+ if (pos > end_pos2 && last_pos) {
/* some encoders generate an incorrect size for this
part. We must go back into the data */
s_index -= 4;
@@ -1111,25 +1110,25 @@ static int huffman_decode(MPADecodeContext *s, GranuleDef *g,
// av_log(NULL, AV_LOG_ERROR, "pos2: %d %d %d %d\n", pos, end_pos, end_pos2, s_index);
switch_buffer(s, &pos, &end_pos, &end_pos2);
// av_log(NULL, AV_LOG_ERROR, "new pos2: %d %d %d\n", pos, end_pos, s_index);
- if(pos >= end_pos)
+ if (pos >= end_pos)
break;
}
- last_pos= pos;
+ last_pos = pos;
code = get_vlc2(&s->gb, vlc->table, vlc->bits, 1);
av_dlog(s->avctx, "t=%d code=%d\n", g->count1table_select, code);
- g->sb_hybrid[s_index+0]=
- g->sb_hybrid[s_index+1]=
- g->sb_hybrid[s_index+2]=
- g->sb_hybrid[s_index+3]= 0;
- while(code){
- static const int idxtab[16]={3,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0};
+ g->sb_hybrid[s_index+0] =
+ g->sb_hybrid[s_index+1] =
+ g->sb_hybrid[s_index+2] =
+ g->sb_hybrid[s_index+3] = 0;
+ while (code) {
+ static const int idxtab[16] = { 3,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0 };
int v;
- int pos= s_index+idxtab[code];
- code ^= 8>>idxtab[code];
- READ_FLIP_SIGN(g->sb_hybrid+pos, RENAME(exp_table)+exponents[pos])
+ int pos = s_index + idxtab[code];
+ code ^= 8 >> idxtab[code];
+ READ_FLIP_SIGN(g->sb_hybrid + pos, RENAME(exp_table)+exponents[pos])
}
- s_index+=4;
+ s_index += 4;
}
/* skip extension bits */
bits_left = end_pos2 - get_bits_count(&s->gb);
@@ -1137,14 +1136,14 @@ static int huffman_decode(MPADecodeContext *s, GranuleDef *g,
if (bits_left < 0 && (s->err_recognition & AV_EF_BITSTREAM)) {
av_log(s->avctx, AV_LOG_ERROR, "bits_left=%d\n", bits_left);
s_index=0;
- }else if(bits_left > 0 && (s->err_recognition & AV_EF_BUFFER)){
+ } else if (bits_left > 0 && (s->err_recognition & AV_EF_BUFFER)) {
av_log(s->avctx, AV_LOG_ERROR, "bits_left=%d\n", bits_left);
- s_index=0;
+ s_index = 0;
}
- memset(&g->sb_hybrid[s_index], 0, sizeof(*g->sb_hybrid)*(576 - s_index));
+ memset(&g->sb_hybrid[s_index], 0, sizeof(*g->sb_hybrid) * (576 - s_index));
skip_bits_long(&s->gb, bits_left);
- i= get_bits_count(&s->gb);
+ i = get_bits_count(&s->gb);
switch_buffer(s, &i, &end_pos, &end_pos2);
return 0;
@@ -1163,34 +1162,32 @@ static void reorder_block(MPADecodeContext *s, GranuleDef *g)
return;
if (g->switch_point) {
- if (s->sample_rate_index != 8) {
+ if (s->sample_rate_index != 8)
ptr = g->sb_hybrid + 36;
- } else {
+ else
ptr = g->sb_hybrid + 48;
- }
} else {
ptr = g->sb_hybrid;
}
- for(i=g->short_start;i<13;i++) {
- len = band_size_short[s->sample_rate_index][i];
+ for (i = g->short_start; i < 13; i++) {
+ len = band_size_short[s->sample_rate_index][i];
ptr1 = ptr;
- dst = tmp;
- for(j=len;j>0;j--) {
+ dst = tmp;
+ for (j = len; j > 0; j--) {
*dst++ = ptr[0*len];
*dst++ = ptr[1*len];
*dst++ = ptr[2*len];
ptr++;
}
- ptr+=2*len;
+ ptr += 2 * len;
memcpy(ptr1, tmp, len * 3 * sizeof(*ptr1));
}
}
#define ISQRT2 FIXR(0.70710678118654752440)
-static void compute_stereo(MPADecodeContext *s,
- GranuleDef *g0, GranuleDef *g1)
+static void compute_stereo(MPADecodeContext *s, GranuleDef *g0, GranuleDef *g1)
{
int i, j, k, l;
int sf_max, sf, len, non_zero_found;
@@ -1214,17 +1211,17 @@ static void compute_stereo(MPADecodeContext *s,
non_zero_found_short[1] = 0;
non_zero_found_short[2] = 0;
k = (13 - g1->short_start) * 3 + g1->long_end - 3;
- for(i = 12;i >= g1->short_start;i--) {
+ for (i = 12; i >= g1->short_start; i--) {
/* for last band, use previous scale factor */
if (i != 11)
k -= 3;
len = band_size_short[s->sample_rate_index][i];
- for(l=2;l>=0;l--) {
+ for (l = 2; l >= 0; l--) {
tab0 -= len;
tab1 -= len;
if (!non_zero_found_short[l]) {
/* test if non zero band. if so, stop doing i-stereo */
- for(j=0;j<len;j++) {
+ for (j = 0; j < len; j++) {
if (tab1[j] != 0) {
non_zero_found_short[l] = 1;
goto found1;
@@ -1236,19 +1233,19 @@ static void compute_stereo(MPADecodeContext *s,
v1 = is_tab[0][sf];
v2 = is_tab[1][sf];
- for(j=0;j<len;j++) {
- tmp0 = tab0[j];
+ for (j = 0; j < len; j++) {
+ tmp0 = tab0[j];
tab0[j] = MULLx(tmp0, v1, FRAC_BITS);
tab1[j] = MULLx(tmp0, v2, FRAC_BITS);
}
} else {
- found1:
+found1:
if (s->mode_ext & MODE_EXT_MS_STEREO) {
/* lower part of the spectrum : do ms stereo
if enabled */
- for(j=0;j<len;j++) {
- tmp0 = tab0[j];
- tmp1 = tab1[j];
+ for (j = 0; j < len; j++) {
+ tmp0 = tab0[j];
+ tmp1 = tab1[j];
tab0[j] = MULLx(tmp0 + tmp1, ISQRT2, FRAC_BITS);
tab1[j] = MULLx(tmp0 - tmp1, ISQRT2, FRAC_BITS);
}
@@ -1258,41 +1255,41 @@ static void compute_stereo(MPADecodeContext *s,
}
non_zero_found = non_zero_found_short[0] |
- non_zero_found_short[1] |
- non_zero_found_short[2];
+ non_zero_found_short[1] |
+ non_zero_found_short[2];
- for(i = g1->long_end - 1;i >= 0;i--) {
- len = band_size_long[s->sample_rate_index][i];
+ for (i = g1->long_end - 1;i >= 0;i--) {
+ len = band_size_long[s->sample_rate_index][i];
tab0 -= len;
tab1 -= len;
/* test if non zero band. if so, stop doing i-stereo */
if (!non_zero_found) {
- for(j=0;j<len;j++) {
+ for (j = 0; j < len; j++) {
if (tab1[j] != 0) {
non_zero_found = 1;
goto found2;
}
}
/* for last band, use previous scale factor */
- k = (i == 21) ? 20 : i;
+ k = (i == 21) ? 20 : i;
sf = g1->scale_factors[k];
if (sf >= sf_max)
goto found2;
v1 = is_tab[0][sf];
v2 = is_tab[1][sf];
- for(j=0;j<len;j++) {
- tmp0 = tab0[j];
+ for (j = 0; j < len; j++) {
+ tmp0 = tab0[j];
tab0[j] = MULLx(tmp0, v1, FRAC_BITS);
tab1[j] = MULLx(tmp0, v2, FRAC_BITS);
}
} else {
- found2:
+found2:
if (s->mode_ext & MODE_EXT_MS_STEREO) {
/* lower part of the spectrum : do ms stereo
if enabled */
- for(j=0;j<len;j++) {
- tmp0 = tab0[j];
- tmp1 = tab1[j];
+ for (j = 0; j < len; j++) {
+ tmp0 = tab0[j];
+ tmp1 = tab1[j];
tab0[j] = MULLx(tmp0 + tmp1, ISQRT2, FRAC_BITS);
tab1[j] = MULLx(tmp0 - tmp1, ISQRT2, FRAC_BITS);
}
@@ -1305,9 +1302,9 @@ static void compute_stereo(MPADecodeContext *s,
global gain */
tab0 = g0->sb_hybrid;
tab1 = g1->sb_hybrid;
- for(i=0;i<576;i++) {
- tmp0 = tab0[i];
- tmp1 = tab1[i];
+ for (i = 0; i < 576; i++) {
+ tmp0 = tab0[i];
+ tmp1 = tab1[i];
tab0[i] = tmp0 + tmp1;
tab1[i] = tmp0 - tmp1;
}
@@ -1326,8 +1323,8 @@ static void compute_stereo(MPADecodeContext *s,
int tmp0 = ptr[-1-j]; \
int tmp1 = ptr[ j]; \
int tmp2 = MULH(tmp0 + tmp1, csa_table[j][0]); \
- ptr[-1-j] = 4*(tmp2 - MULH(tmp1, csa_table[j][2])); \
- ptr[ j] = 4*(tmp2 + MULH(tmp0, csa_table[j][3])); \
+ ptr[-1-j] = 4 * (tmp2 - MULH(tmp1, csa_table[j][2])); \
+ ptr[ j] = 4 * (tmp2 + MULH(tmp0, csa_table[j][3])); \
} while (0)
#endif
@@ -1347,7 +1344,7 @@ static void compute_antialias(MPADecodeContext *s, GranuleDef *g)
}
ptr = g->sb_hybrid + 18;
- for(i = n;i > 0;i--) {
+ for (i = n; i > 0; i--) {
AA(0);
AA(1);
AA(2);
@@ -1361,23 +1358,21 @@ static void compute_antialias(MPADecodeContext *s, GranuleDef *g)
}
}
-static void compute_imdct(MPADecodeContext *s,
- GranuleDef *g,
- INTFLOAT *sb_samples,
- INTFLOAT *mdct_buf)
+static void compute_imdct(MPADecodeContext *s, GranuleDef *g,
+ INTFLOAT *sb_samples, INTFLOAT *mdct_buf)
{
INTFLOAT *win, *win1, *out_ptr, *ptr, *buf, *ptr1;
INTFLOAT out2[12];
int i, j, mdct_long_end, sblimit;
/* find last non zero block */
- ptr = g->sb_hybrid + 576;
+ ptr = g->sb_hybrid + 576;
ptr1 = g->sb_hybrid + 2 * 18;
while (ptr >= ptr1) {
int32_t *p;
ptr -= 6;
- p= (int32_t*)ptr;
- if(p[0] | p[1] | p[2] | p[3] | p[4] | p[5])
+ p = (int32_t*)ptr;
+ if (p[0] | p[1] | p[2] | p[3] | p[4] | p[5])
break;
}
sblimit = ((ptr - g->sb_hybrid) / 18) + 1;
@@ -1394,7 +1389,7 @@ static void compute_imdct(MPADecodeContext *s,
buf = mdct_buf;
ptr = g->sb_hybrid;
- for(j=0;j<mdct_long_end;j++) {
+ for (j = 0; j < mdct_long_end; j++) {
/* apply window & overlap with previous buffer */
out_ptr = sb_samples + j;
/* select window */
@@ -1405,33 +1400,33 @@ static void compute_imdct(MPADecodeContext *s,
/* select frequency inversion */
win = win1 + ((4 * 36) & -(j & 1));
imdct36(out_ptr, buf, ptr, win);
- out_ptr += 18*SBLIMIT;
- ptr += 18;
- buf += 18;
+ out_ptr += 18 * SBLIMIT;
+ ptr += 18;
+ buf += 18;
}
- for(j=mdct_long_end;j<sblimit;j++) {
+ for (j = mdct_long_end; j < sblimit; j++) {
/* select frequency inversion */
- win = mdct_win[2] + ((4 * 36) & -(j & 1));
+ win = mdct_win[2] + ((4 * 36) & -(j & 1));
out_ptr = sb_samples + j;
- for(i=0; i<6; i++){
+ for (i = 0; i < 6; i++) {
*out_ptr = buf[i];
out_ptr += SBLIMIT;
}
imdct12(out2, ptr + 0);
- for(i=0;i<6;i++) {
+ for (i = 0; i < 6; i++) {
*out_ptr = MULH3(out2[i ], win[i ], 1) + buf[i + 6*1];
buf[i + 6*2] = MULH3(out2[i + 6], win[i + 6], 1);
out_ptr += SBLIMIT;
}
imdct12(out2, ptr + 1);
- for(i=0;i<6;i++) {
+ for (i = 0; i < 6; i++) {
*out_ptr = MULH3(out2[i ], win[i ], 1) + buf[i + 6*2];
buf[i + 6*0] = MULH3(out2[i + 6], win[i + 6], 1);
out_ptr += SBLIMIT;
}
imdct12(out2, ptr + 2);
- for(i=0;i<6;i++) {
+ for (i = 0; i < 6; i++) {
buf[i + 6*0] = MULH3(out2[i ], win[i ], 1) + buf[i + 6*0];
buf[i + 6*1] = MULH3(out2[i + 6], win[i + 6], 1);
buf[i + 6*2] = 0;
@@ -1440,12 +1435,12 @@ static void compute_imdct(MPADecodeContext *s,
buf += 18;
}
/* zero bands */
- for(j=sblimit;j<SBLIMIT;j++) {
+ for (j = sblimit; j < SBLIMIT; j++) {
/* overlap */
out_ptr = sb_samples + j;
- for(i=0;i<18;i++) {
+ for (i = 0; i < 18; i++) {
*out_ptr = buf[i];
- buf[i] = 0;
+ buf[i] = 0;
out_ptr += SBLIMIT;
}
buf += 18;
@@ -1472,21 +1467,21 @@ static int mp_decode_layer3(MPADecodeContext *s)
else
skip_bits(&s->gb, 5);
nb_granules = 2;
- for(ch=0;ch<s->nb_channels;ch++) {
+ for (ch = 0; ch < s->nb_channels; ch++) {
s->granules[ch][0].scfsi = 0;/* all scale factors are transmitted */
s->granules[ch][1].scfsi = get_bits(&s->gb, 4);
}
}
- for(gr=0;gr<nb_granules;gr++) {
- for(ch=0;ch<s->nb_channels;ch++) {
+ for (gr = 0; gr < nb_granules; gr++) {
+ for (ch = 0; ch < s->nb_channels; ch++) {
av_dlog(s->avctx, "gr=%d ch=%d: side_info\n", gr, ch);
g = &s->granules[ch][gr];
g->part2_3_length = get_bits(&s->gb, 12);
- g->big_values = get_bits(&s->gb, 9);
- if(g->big_values > 288){
+ g->big_values = get_bits(&s->gb, 9);
+ if (g->big_values > 288) {
av_log(s->avctx, AV_LOG_ERROR, "big_values too big\n");
- return -1;
+ return AVERROR_INVALIDDATA;
}
g->global_gain = get_bits(&s->gb, 8);
@@ -1502,21 +1497,21 @@ static int mp_decode_layer3(MPADecodeContext *s)
blocksplit_flag = get_bits1(&s->gb);
if (blocksplit_flag) {
g->block_type = get_bits(&s->gb, 2);
- if (g->block_type == 0){
+ if (g->block_type == 0) {
av_log(s->avctx, AV_LOG_ERROR, "invalid block type\n");
- return -1;
+ return AVERROR_INVALIDDATA;
}
g->switch_point = get_bits1(&s->gb);
- for(i=0;i<2;i++)
+ for (i = 0; i < 2; i++)
g->table_select[i] = get_bits(&s->gb, 5);
- for(i=0;i<3;i++)
+ for (i = 0; i < 3; i++)
g->subblock_gain[i] = get_bits(&s->gb, 3);
ff_init_short_region(s, g);
} else {
int region_address1, region_address2;
g->block_type = 0;
g->switch_point = 0;
- for(i=0;i<3;i++)
+ for (i = 0; i < 3; i++)
g->table_select[i] = get_bits(&s->gb, 5);
/* compute huffman coded region sizes */
region_address1 = get_bits(&s->gb, 4);
@@ -1531,38 +1526,38 @@ static int mp_decode_layer3(MPADecodeContext *s)
g->preflag = 0;
if (!s->lsf)
g->preflag = get_bits1(&s->gb);
- g->scalefac_scale = get_bits1(&s->gb);
+ g->scalefac_scale = get_bits1(&s->gb);
g->count1table_select = get_bits1(&s->gb);
av_dlog(s->avctx, "block_type=%d switch_point=%d\n",
g->block_type, g->switch_point);
}
}
- if (!s->adu_mode) {
- const uint8_t *ptr = s->gb.buffer + (get_bits_count(&s->gb)>>3);
- assert((get_bits_count(&s->gb) & 7) == 0);
- /* now we get bits from the main_data_begin offset */
- av_dlog(s->avctx, "seekback: %d\n", main_data_begin);
-//av_log(NULL, AV_LOG_ERROR, "backstep:%d, lastbuf:%d\n", main_data_begin, s->last_buf_size);
+ if (!s->adu_mode) {
+ const uint8_t *ptr = s->gb.buffer + (get_bits_count(&s->gb)>>3);
+ assert((get_bits_count(&s->gb) & 7) == 0);
+ /* now we get bits from the main_data_begin offset */
+ av_dlog(s->avctx, "seekback: %d\n", main_data_begin);
+ //av_log(NULL, AV_LOG_ERROR, "backstep:%d, lastbuf:%d\n", main_data_begin, s->last_buf_size);
- memcpy(s->last_buf + s->last_buf_size, ptr, EXTRABYTES);
- s->in_gb= s->gb;
+ memcpy(s->last_buf + s->last_buf_size, ptr, EXTRABYTES);
+ s->in_gb = s->gb;
init_get_bits(&s->gb, s->last_buf, s->last_buf_size*8);
skip_bits_long(&s->gb, 8*(s->last_buf_size - main_data_begin));
- }
+ }
- for(gr=0;gr<nb_granules;gr++) {
- for(ch=0;ch<s->nb_channels;ch++) {
+ for (gr = 0; gr < nb_granules; gr++) {
+ for (ch = 0; ch < s->nb_channels; ch++) {
g = &s->granules[ch][gr];
- if(get_bits_count(&s->gb)<0){
+ if (get_bits_count(&s->gb) < 0) {
av_log(s->avctx, AV_LOG_DEBUG, "mdb:%d, lastbuf:%d skipping granule %d\n",
- main_data_begin, s->last_buf_size, gr);
+ main_data_begin, s->last_buf_size, gr);
skip_bits_long(&s->gb, g->part2_3_length);
memset(g->sb_hybrid, 0, sizeof(g->sb_hybrid));
- if(get_bits_count(&s->gb) >= s->gb.size_in_bits && s->in_gb.buffer){
+ if (get_bits_count(&s->gb) >= s->gb.size_in_bits && s->in_gb.buffer) {
skip_bits_long(&s->in_gb, get_bits_count(&s->gb) - s->gb.size_in_bits);
- s->gb= s->in_gb;
- s->in_gb.buffer=NULL;
+ s->gb = s->in_gb;
+ s->in_gb.buffer = NULL;
}
continue;
}
@@ -1580,39 +1575,39 @@ static int mp_decode_layer3(MPADecodeContext *s)
if (g->block_type == 2) {
n = g->switch_point ? 17 : 18;
j = 0;
- if(slen1){
- for(i=0;i<n;i++)
+ if (slen1) {
+ for (i = 0; i < n; i++)
g->scale_factors[j++] = get_bits(&s->gb, slen1);
- }else{
- for(i=0;i<n;i++)
+ } else {
+ for (i = 0; i < n; i++)
g->scale_factors[j++] = 0;
}
- if(slen2){
- for(i=0;i<18;i++)
+ if (slen2) {
+ for (i = 0; i < 18; i++)
g->scale_factors[j++] = get_bits(&s->gb, slen2);
- for(i=0;i<3;i++)
+ for (i = 0; i < 3; i++)
g->scale_factors[j++] = 0;
- }else{
- for(i=0;i<21;i++)
+ } else {
+ for (i = 0; i < 21; i++)
g->scale_factors[j++] = 0;
}
} else {
sc = s->granules[ch][0].scale_factors;
j = 0;
- for(k=0;k<4;k++) {
- n = (k == 0 ? 6 : 5);
+ for (k = 0; k < 4; k++) {
+ n = k == 0 ? 6 : 5;
if ((g->scfsi & (0x8 >> k)) == 0) {
slen = (k < 2) ? slen1 : slen2;
- if(slen){
- for(i=0;i<n;i++)
+ if (slen) {
+ for (i = 0; i < n; i++)
g->scale_factors[j++] = get_bits(&s->gb, slen);
- }else{
- for(i=0;i<n;i++)
+ } else {
+ for (i = 0; i < n; i++)
g->scale_factors[j++] = 0;
}
} else {
/* simply copy from last granule */
- for(i=0;i<n;i++) {
+ for (i = 0; i < n; i++) {
g->scale_factors[j] = sc[j];
j++;
}
@@ -1624,11 +1619,11 @@ static int mp_decode_layer3(MPADecodeContext *s)
int tindex, tindex2, slen[4], sl, sf;
/* LSF scale factors */
- if (g->block_type == 2) {
+ if (g->block_type == 2)
tindex = g->switch_point ? 2 : 1;
- } else {
+ else
tindex = 0;
- }
+
sf = g->scalefac_compress;
if ((s->mode_ext & MODE_EXT_I_STEREO) && ch == 1) {
/* intensity stereo case */
@@ -1659,19 +1654,19 @@ static int mp_decode_layer3(MPADecodeContext *s)
}
j = 0;
- for(k=0;k<4;k++) {
- n = lsf_nsf_table[tindex2][tindex][k];
+ for (k = 0; k < 4; k++) {
+ n = lsf_nsf_table[tindex2][tindex][k];
sl = slen[k];
- if(sl){
- for(i=0;i<n;i++)
+ if (sl) {
+ for (i = 0; i < n; i++)
g->scale_factors[j++] = get_bits(&s->gb, sl);
- }else{
- for(i=0;i<n;i++)
+ } else {
+ for (i = 0; i < n; i++)
g->scale_factors[j++] = 0;
}
}
/* XXX: should compute exact size */
- for(;j<40;j++)
+ for (; j < 40; j++)
g->scale_factors[j] = 0;
}
@@ -1684,7 +1679,7 @@ static int mp_decode_layer3(MPADecodeContext *s)
if (s->nb_channels == 2)
compute_stereo(s, &s->granules[0][gr], &s->granules[1][gr]);
- for(ch=0;ch<s->nb_channels;ch++) {
+ for (ch = 0; ch < s->nb_channels; ch++) {
g = &s->granules[ch][gr];
reorder_block(s, g);
@@ -1692,18 +1687,18 @@ static int mp_decode_layer3(MPADecodeContext *s)
compute_imdct(s, g, &s->sb_samples[ch][18 * gr][0], s->mdct_buf[ch]);
}
} /* gr */
- if(get_bits_count(&s->gb)<0)
+ if (get_bits_count(&s->gb) < 0)
skip_bits_long(&s->gb, -get_bits_count(&s->gb));
return nb_granules * 18;
}
-static int mp_decode_frame(MPADecodeContext *s,
- OUT_INT *samples, const uint8_t *buf, int buf_size)
+static int mp_decode_frame(MPADecodeContext *s, OUT_INT *samples,
+ const uint8_t *buf, int buf_size)
{
int i, nb_frames, ch;
OUT_INT *samples_ptr;
- init_get_bits(&s->gb, buf + HEADER_SIZE, (buf_size - HEADER_SIZE)*8);
+ init_get_bits(&s->gb, buf + HEADER_SIZE, (buf_size - HEADER_SIZE) * 8);
/* skip error protection field */
if (s->error_protection)
@@ -1724,28 +1719,28 @@ static int mp_decode_frame(MPADecodeContext *s,
nb_frames = mp_decode_layer3(s);
s->last_buf_size=0;
- if(s->in_gb.buffer){
+ if (s->in_gb.buffer) {
align_get_bits(&s->gb);
- i= get_bits_left(&s->gb)>>3;
- if(i >= 0 && i <= BACKSTEP_SIZE){
+ i = get_bits_left(&s->gb)>>3;
+ if (i >= 0 && i <= BACKSTEP_SIZE) {
memmove(s->last_buf, s->gb.buffer + (get_bits_count(&s->gb)>>3), i);
s->last_buf_size=i;
- }else
+ } else
av_log(s->avctx, AV_LOG_ERROR, "invalid old backstep %d\n", i);
- s->gb= s->in_gb;
- s->in_gb.buffer= NULL;
+ s->gb = s->in_gb;
+ s->in_gb.buffer = NULL;
}
align_get_bits(&s->gb);
assert((get_bits_count(&s->gb) & 7) == 0);
- i= get_bits_left(&s->gb)>>3;
+ i = get_bits_left(&s->gb) >> 3;
- if(i<0 || i > BACKSTEP_SIZE || nb_frames<0){
- if(i<0)
+ if (i < 0 || i > BACKSTEP_SIZE || nb_frames < 0) {
+ if (i < 0)
av_log(s->avctx, AV_LOG_ERROR, "invalid new backstep %d\n", i);
- i= FFMIN(BACKSTEP_SIZE, buf_size - HEADER_SIZE);
+ i = FFMIN(BACKSTEP_SIZE, buf_size - HEADER_SIZE);
}
- assert(i <= buf_size - HEADER_SIZE && i>= 0);
+ assert(i <= buf_size - HEADER_SIZE && i >= 0);
memcpy(s->last_buf + s->last_buf_size, s->gb.buffer + buf_size - HEADER_SIZE - i, i);
s->last_buf_size += i;
@@ -1753,9 +1748,9 @@ static int mp_decode_frame(MPADecodeContext *s,
}
/* apply the synthesis filter */
- for(ch=0;ch<s->nb_channels;ch++) {
+ for (ch = 0; ch < s->nb_channels; ch++) {
samples_ptr = samples + ch;
- for(i=0;i<nb_frames;i++) {
+ for (i = 0; i < nb_frames; i++) {
RENAME(ff_mpa_synth_filter)(
&s->mpadsp,
s->synth_buf[ch], &(s->synth_buf_offset[ch]),
@@ -1769,74 +1764,80 @@ static int mp_decode_frame(MPADecodeContext *s,
return nb_frames * 32 * sizeof(OUT_INT) * s->nb_channels;
}
-static int decode_frame(AVCodecContext * avctx,
- void *data, int *data_size,
+static int decode_frame(AVCodecContext * avctx, void *data, int *data_size,
AVPacket *avpkt)
{
- const uint8_t *buf = avpkt->data;
- int buf_size = avpkt->size;
+ const uint8_t *buf = avpkt->data;
+ int buf_size = avpkt->size;
MPADecodeContext *s = avctx->priv_data;
uint32_t header;
int out_size;
OUT_INT *out_samples = data;
- if(buf_size < HEADER_SIZE)
- return -1;
+ if (buf_size < HEADER_SIZE)
+ return AVERROR_INVALIDDATA;
header = AV_RB32(buf);
- if(ff_mpa_check_header(header) < 0){
+ if (ff_mpa_check_header(header) < 0) {
av_log(avctx, AV_LOG_ERROR, "Header missing\n");
- return -1;
+ return AVERROR_INVALIDDATA;
}
if (avpriv_mpegaudio_decode_header((MPADecodeHeader *)s, header) == 1) {
/* free format: prepare to compute frame size */
s->frame_size = -1;
- return -1;
+ return AVERROR_INVALIDDATA;
}
/* update codec info */
- avctx->channels = s->nb_channels;
+ avctx->channels = s->nb_channels;
avctx->channel_layout = s->nb_channels == 1 ? AV_CH_LAYOUT_MONO : AV_CH_LAYOUT_STEREO;
if (!avctx->bit_rate)
avctx->bit_rate = s->bit_rate;
avctx->sub_id = s->layer;
- if(*data_size < 1152*avctx->channels*sizeof(OUT_INT))
- return -1;
+ if (*data_size < avctx->frame_size * avctx->channels * sizeof(OUT_INT))
+ return AVERROR(EINVAL);
*data_size = 0;
- if(s->frame_size<=0 || s->frame_size > buf_size){
+ if (s->frame_size <= 0 || s->frame_size > buf_size) {
av_log(avctx, AV_LOG_ERROR, "incomplete frame\n");
- return -1;
+ return AVERROR_INVALIDDATA;
}else if(s->frame_size < buf_size){
av_log(avctx, AV_LOG_DEBUG, "incorrect frame size - multiple frames in buffer?\n");
buf_size= s->frame_size;
}
out_size = mp_decode_frame(s, out_samples, buf, buf_size);
- if(out_size>=0){
- *data_size = out_size;
+ if (out_size >= 0) {
+ *data_size = out_size;
avctx->sample_rate = s->sample_rate;
//FIXME maybe move the other codec info stuff from above here too
- }else
- av_log(avctx, AV_LOG_DEBUG, "Error while decoding MPEG audio frame.\n"); //FIXME return -1 / but also return the number of bytes consumed
+ } else {
+ av_log(avctx, AV_LOG_ERROR, "Error while decoding MPEG audio frame.\n");
+ /* Only return an error if the bad frame makes up the whole packet.
+ If there is more data in the packet, just consume the bad frame
+ instead of returning an error, which would discard the whole
+ packet. */
+ if (buf_size == avpkt->size)
+ return out_size;
+ }
s->frame_size = 0;
return buf_size;
}
-static void flush(AVCodecContext *avctx){
+static void flush(AVCodecContext *avctx)
+{
MPADecodeContext *s = avctx->priv_data;
memset(s->synth_buf, 0, sizeof(s->synth_buf));
- s->last_buf_size= 0;
+ s->last_buf_size = 0;
}
#if CONFIG_MP3ADU_DECODER || CONFIG_MP3ADUFLOAT_DECODER
-static int decode_frame_adu(AVCodecContext * avctx,
- void *data, int *data_size,
- AVPacket *avpkt)
+static int decode_frame_adu(AVCodecContext *avctx, void *data, int *data_size,
+ AVPacket *avpkt)
{
- const uint8_t *buf = avpkt->data;
- int buf_size = avpkt->size;
+ const uint8_t *buf = avpkt->data;
+ int buf_size = avpkt->size;
MPADecodeContext *s = avctx->priv_data;
uint32_t header;
int len, out_size;
@@ -1846,8 +1847,8 @@ static int decode_frame_adu(AVCodecContext * avctx,
// Discard too short frames
if (buf_size < HEADER_SIZE) {
- *data_size = 0;
- return buf_size;
+ av_log(avctx, AV_LOG_ERROR, "Packet is too small\n");
+ return AVERROR_INVALIDDATA;
}
@@ -1858,25 +1859,29 @@ static int decode_frame_adu(AVCodecContext * avctx,
header = AV_RB32(buf) | 0xffe00000;
if (ff_mpa_check_header(header) < 0) { // Bad header, discard frame
- *data_size = 0;
- return buf_size;
+ av_log(avctx, AV_LOG_ERROR, "Invalid frame header\n");
+ return AVERROR_INVALIDDATA;
}
avpriv_mpegaudio_decode_header((MPADecodeHeader *)s, header);
/* update codec info */
avctx->sample_rate = s->sample_rate;
- avctx->channels = s->nb_channels;
+ avctx->channels = s->nb_channels;
if (!avctx->bit_rate)
avctx->bit_rate = s->bit_rate;
avctx->sub_id = s->layer;
+ if (*data_size < avctx->frame_size * avctx->channels * sizeof(OUT_INT))
+ return AVERROR(EINVAL);
+
s->frame_size = len;
- if (avctx->parse_only) {
+#if FF_API_PARSE_FRAME
+ if (avctx->parse_only)
out_size = buf_size;
- } else {
- out_size = mp_decode_frame(s, out_samples, buf, buf_size);
- }
+ else
+#endif
+ out_size = mp_decode_frame(s, out_samples, buf, buf_size);
*data_size = out_size;
return buf_size;
@@ -1889,9 +1894,9 @@ static int decode_frame_adu(AVCodecContext * avctx,
* Context for MP3On4 decoder
*/
typedef struct MP3On4DecodeContext {
- int frames; ///< number of mp3 frames per block (number of mp3 decoder instances)
- int syncword; ///< syncword patch
- const uint8_t *coff; ///< channels offsets in output buffer
+ int frames; ///< number of mp3 frames per block (number of mp3 decoder instances)
+ int syncword; ///< syncword patch
+ const uint8_t *coff; ///< channel offsets in output buffer
MPADecodeContext *mp3decctx[5]; ///< MPADecodeContext for every decoder instance
OUT_INT *decoded_buf; ///< output buffer for decoded samples
} MP3On4DecodeContext;
@@ -1899,17 +1904,20 @@ typedef struct MP3On4DecodeContext {
#include "mpeg4audio.h"
/* Next 3 arrays are indexed by channel config number (passed via codecdata) */
-static const uint8_t mp3Frames[8] = {0,1,1,2,3,3,4,5}; /* number of mp3 decoder instances */
+
+/* number of mp3 decoder instances */
+static const uint8_t mp3Frames[8] = { 0, 1, 1, 2, 3, 3, 4, 5 };
+
/* offsets into output buffer, assume output order is FL FR C LFE BL BR SL SR */
static const uint8_t chan_offset[8][5] = {
- {0},
- {0}, // C
- {0}, // FLR
- {2,0}, // C FLR
- {2,0,3}, // C FLR BS
- {2,0,3}, // C FLR BLRS
- {2,0,4,3}, // C FLR BLRS LFE
- {2,0,6,4,3}, // C FLR BLRS BLR LFE
+ { 0 },
+ { 0 }, // C
+ { 0 }, // FLR
+ { 2, 0 }, // C FLR
+ { 2, 0, 3 }, // C FLR BS
+ { 2, 0, 3 }, // C FLR BLRS
+ { 2, 0, 4, 3 }, // C FLR BLRS LFE
+ { 2, 0, 6, 4, 3 }, // C FLR BLRS BLR LFE
};
/* mp3on4 channel layouts */
@@ -1946,17 +1954,17 @@ static int decode_init_mp3on4(AVCodecContext * avctx)
if ((avctx->extradata_size < 2) || (avctx->extradata == NULL)) {
av_log(avctx, AV_LOG_ERROR, "Codec extradata missing or too short.\n");
- return -1;
+ return AVERROR_INVALIDDATA;
}
avpriv_mpeg4audio_get_config(&cfg, avctx->extradata, avctx->extradata_size);
if (!cfg.chan_config || cfg.chan_config > 7) {
av_log(avctx, AV_LOG_ERROR, "Invalid channel config number.\n");
- return -1;
+ return AVERROR_INVALIDDATA;
}
- s->frames = mp3Frames[cfg.chan_config];
- s->coff = chan_offset[cfg.chan_config];
- avctx->channels = ff_mpeg4audio_channels[cfg.chan_config];
+ s->frames = mp3Frames[cfg.chan_config];
+ s->coff = chan_offset[cfg.chan_config];
+ avctx->channels = ff_mpeg4audio_channels[cfg.chan_config];
avctx->channel_layout = chan_layout[cfg.chan_config];
if (cfg.sample_rate < 16000)
@@ -2024,8 +2032,8 @@ static int decode_frame_mp3on4(AVCodecContext * avctx,
void *data, int *data_size,
AVPacket *avpkt)
{
- const uint8_t *buf = avpkt->data;
- int buf_size = avpkt->size;
+ const uint8_t *buf = avpkt->data;
+ int buf_size = avpkt->size;
MP3On4DecodeContext *s = avctx->priv_data;
MPADecodeContext *m;
int fsize, len = buf_size, out_size = 0;
@@ -2039,10 +2047,9 @@ static int decode_frame_mp3on4(AVCodecContext * avctx,
return AVERROR(EINVAL);
}
- *data_size = 0;
// Discard too short frames
if (buf_size < HEADER_SIZE)
- return -1;
+ return AVERROR_INVALIDDATA;
// If only one decoder interleave is not needed
outptr = s->frames == 1 ? out_samples : s->decoded_buf;
@@ -2053,8 +2060,8 @@ static int decode_frame_mp3on4(AVCodecContext * avctx,
for (fr = 0; fr < s->frames; fr++) {
fsize = AV_RB16(buf) >> 4;
fsize = FFMIN3(fsize, len, MPA_MAX_CODED_FRAME_SIZE);
- m = s->mp3decctx[fr];
- assert (m != NULL);
+ m = s->mp3decctx[fr];
+ assert(m != NULL);
header = (AV_RB32(buf) & 0x000fffff) | s->syncword; // patch header
@@ -2071,23 +2078,23 @@ static int decode_frame_mp3on4(AVCodecContext * avctx,
ch += m->nb_channels;
out_size += mp_decode_frame(m, outptr, buf, fsize);
- buf += fsize;
- len -= fsize;
+ buf += fsize;
+ len -= fsize;
- if(s->frames > 1) {
+ if (s->frames > 1) {
n = m->avctx->frame_size*m->nb_channels;
/* interleave output data */
bp = out_samples + s->coff[fr];
- if(m->nb_channels == 1) {
- for(j = 0; j < n; j++) {
+ if (m->nb_channels == 1) {
+ for (j = 0; j < n; j++) {
*bp = s->decoded_buf[j];
bp += avctx->channels;
}
} else {
- for(j = 0; j < n; j++) {
+ for (j = 0; j < n; j++) {
bp[0] = s->decoded_buf[j++];
bp[1] = s->decoded_buf[j];
- bp += avctx->channels;
+ bp += avctx->channels;
}
}
}
@@ -2111,7 +2118,9 @@ AVCodec ff_mp1_decoder = {
.priv_data_size = sizeof(MPADecodeContext),
.init = decode_init,
.decode = decode_frame,
+#if FF_API_PARSE_FRAME
.capabilities = CODEC_CAP_PARSE_ONLY,
+#endif
.flush = flush,
.long_name = NULL_IF_CONFIG_SMALL("MP1 (MPEG audio layer 1)"),
};
@@ -2124,7 +2133,9 @@ AVCodec ff_mp2_decoder = {
.priv_data_size = sizeof(MPADecodeContext),
.init = decode_init,
.decode = decode_frame,
+#if FF_API_PARSE_FRAME
.capabilities = CODEC_CAP_PARSE_ONLY,
+#endif
.flush = flush,
.long_name = NULL_IF_CONFIG_SMALL("MP2 (MPEG audio layer 2)"),
};
@@ -2137,7 +2148,9 @@ AVCodec ff_mp3_decoder = {
.priv_data_size = sizeof(MPADecodeContext),
.init = decode_init,
.decode = decode_frame,
+#if FF_API_PARSE_FRAME
.capabilities = CODEC_CAP_PARSE_ONLY,
+#endif
.flush = flush,
.long_name = NULL_IF_CONFIG_SMALL("MP3 (MPEG audio layer 3)"),
};
@@ -2150,7 +2163,9 @@ AVCodec ff_mp3adu_decoder = {
.priv_data_size = sizeof(MPADecodeContext),
.init = decode_init,
.decode = decode_frame_adu,
+#if FF_API_PARSE_FRAME
.capabilities = CODEC_CAP_PARSE_ONLY,
+#endif
.flush = flush,
.long_name = NULL_IF_CONFIG_SMALL("ADU (Application Data Unit) MP3 (MPEG audio layer 3)"),
};
diff --git a/libavcodec/mpegaudiodec_float.c b/libavcodec/mpegaudiodec_float.c
index 312b84278f..4482168a3e 100644
--- a/libavcodec/mpegaudiodec_float.c
+++ b/libavcodec/mpegaudiodec_float.c
@@ -30,7 +30,9 @@ AVCodec ff_mp1float_decoder = {
.priv_data_size = sizeof(MPADecodeContext),
.init = decode_init,
.decode = decode_frame,
+#if FF_API_PARSE_FRAME
.capabilities = CODEC_CAP_PARSE_ONLY,
+#endif
.flush = flush,
.long_name = NULL_IF_CONFIG_SMALL("MP1 (MPEG audio layer 1)"),
};
@@ -43,7 +45,9 @@ AVCodec ff_mp2float_decoder = {
.priv_data_size = sizeof(MPADecodeContext),
.init = decode_init,
.decode = decode_frame,
+#if FF_API_PARSE_FRAME
.capabilities = CODEC_CAP_PARSE_ONLY,
+#endif
.flush = flush,
.long_name = NULL_IF_CONFIG_SMALL("MP2 (MPEG audio layer 2)"),
};
@@ -56,7 +60,9 @@ AVCodec ff_mp3float_decoder = {
.priv_data_size = sizeof(MPADecodeContext),
.init = decode_init,
.decode = decode_frame,
+#if FF_API_PARSE_FRAME
.capabilities = CODEC_CAP_PARSE_ONLY,
+#endif
.flush = flush,
.long_name = NULL_IF_CONFIG_SMALL("MP3 (MPEG audio layer 3)"),
};
@@ -69,7 +75,9 @@ AVCodec ff_mp3adufloat_decoder = {
.priv_data_size = sizeof(MPADecodeContext),
.init = decode_init,
.decode = decode_frame_adu,
+#if FF_API_PARSE_FRAME
.capabilities = CODEC_CAP_PARSE_ONLY,
+#endif
.flush = flush,
.long_name = NULL_IF_CONFIG_SMALL("ADU (Application Data Unit) MP3 (MPEG audio layer 3)"),
};
diff --git a/libavcodec/nellymoserdec.c b/libavcodec/nellymoserdec.c
index 04d966173a..cd054826f1 100644
--- a/libavcodec/nellymoserdec.c
+++ b/libavcodec/nellymoserdec.c
@@ -48,7 +48,7 @@
typedef struct NellyMoserDecodeContext {
AVCodecContext* avctx;
float *float_buf;
- float state[NELLY_BUF_LEN];
+ DECLARE_ALIGNED(16, float, state)[NELLY_BUF_LEN];
AVLFG random_state;
GetBitContext gb;
float scale_bias;
@@ -58,23 +58,6 @@ typedef struct NellyMoserDecodeContext {
DECLARE_ALIGNED(32, float, imdct_out)[NELLY_BUF_LEN * 2];
} NellyMoserDecodeContext;
-static void overlap_and_window(NellyMoserDecodeContext *s, float *state, float *audio, float *a_in)
-{
- int bot, top;
-
- bot = 0;
- top = NELLY_BUF_LEN-1;
-
- while (bot < NELLY_BUF_LEN) {
- audio[bot] = a_in [bot]*ff_sine_128[bot]
- +state[bot]*ff_sine_128[top];
-
- bot++;
- top--;
- }
- memcpy(state, a_in + NELLY_BUF_LEN, sizeof(float)*NELLY_BUF_LEN);
-}
-
static void nelly_decode_block(NellyMoserDecodeContext *s,
const unsigned char block[NELLY_BLOCK_LEN],
float audio[NELLY_SAMPLES])
@@ -125,7 +108,9 @@ static void nelly_decode_block(NellyMoserDecodeContext *s,
s->imdct_ctx.imdct_calc(&s->imdct_ctx, s->imdct_out, aptr);
/* XXX: overlapping and windowing should be part of a more
generic imdct function */
- overlap_and_window(s, s->state, aptr, s->imdct_out);
+ s->dsp.vector_fmul_reverse(s->state, s->state, ff_sine_128, NELLY_BUF_LEN);
+ s->dsp.vector_fmul_add(aptr, s->imdct_out, ff_sine_128, s->state, NELLY_BUF_LEN);
+ memcpy(s->state, s->imdct_out + NELLY_BUF_LEN, sizeof(float)*NELLY_BUF_LEN);
}
}
@@ -172,20 +157,21 @@ static int decode_tag(AVCodecContext * avctx,
float *samples_flt = data;
*data_size = 0;
- if (buf_size < avctx->block_align) {
- return buf_size;
- }
-
- if (buf_size % NELLY_BLOCK_LEN) {
- av_log(avctx, AV_LOG_ERROR, "Tag size %d.\n", buf_size);
- return buf_size;
- }
block_size = NELLY_SAMPLES * av_get_bytes_per_sample(avctx->sample_fmt);
- blocks = FFMIN(buf_size / NELLY_BLOCK_LEN, data_max / block_size);
+ blocks = buf_size / NELLY_BLOCK_LEN;
+
if (blocks <= 0) {
+ av_log(avctx, AV_LOG_ERROR, "Packet is too small\n");
+ return AVERROR_INVALIDDATA;
+ }
+ if (data_max < blocks * block_size) {
av_log(avctx, AV_LOG_ERROR, "Output buffer is too small\n");
return AVERROR(EINVAL);
}
+ if (buf_size % NELLY_BLOCK_LEN) {
+ av_log(avctx, AV_LOG_WARNING, "Leftover bytes: %d.\n",
+ buf_size % NELLY_BLOCK_LEN);
+ }
/* Normal numbers of blocks for sample rates:
* 8000 Hz - 1
* 11025 Hz - 2
diff --git a/libavcodec/nellymoserenc.c b/libavcodec/nellymoserenc.c
index 1d35cda9a1..5af1c5c6ca 100644
--- a/libavcodec/nellymoserenc.c
+++ b/libavcodec/nellymoserenc.c
@@ -146,7 +146,7 @@ static av_cold int encode_init(AVCodecContext *avctx)
avctx->frame_size = NELLY_SAMPLES;
s->avctx = avctx;
- ff_mdct_init(&s->mdct_ctx, 8, 0, 1.0);
+ ff_mdct_init(&s->mdct_ctx, 8, 0, 32768.0);
dsputil_init(&s->dsp, avctx);
/* Generate overlap window */
@@ -352,17 +352,15 @@ static void encode_block(NellyMoserEncodeContext *s, unsigned char *output, int
static int encode_frame(AVCodecContext *avctx, uint8_t *frame, int buf_size, void *data)
{
NellyMoserEncodeContext *s = avctx->priv_data;
- const int16_t *samples = data;
+ const float *samples = data;
int i;
if (s->last_frame)
return 0;
if (data) {
- for (i = 0; i < avctx->frame_size; i++) {
- s->buf[s->bufsel][i] = samples[i];
- }
- for (; i < NELLY_SAMPLES; i++) {
+ memcpy(s->buf[s->bufsel], samples, avctx->frame_size * sizeof(*samples));
+ for (i = avctx->frame_size; i < NELLY_SAMPLES; i++) {
s->buf[s->bufsel][i] = 0;
}
s->bufsel = 1 - s->bufsel;
@@ -393,5 +391,5 @@ AVCodec ff_nellymoser_encoder = {
.close = encode_end,
.capabilities = CODEC_CAP_SMALL_LAST_FRAME | CODEC_CAP_DELAY,
.long_name = NULL_IF_CONFIG_SMALL("Nellymoser Asao"),
- .sample_fmts = (const enum AVSampleFormat[]){AV_SAMPLE_FMT_S16,AV_SAMPLE_FMT_NONE},
+ .sample_fmts = (const enum AVSampleFormat[]){AV_SAMPLE_FMT_FLT,AV_SAMPLE_FMT_NONE},
};
diff --git a/libavcodec/snow.c b/libavcodec/snow.c
index a4ebf803e0..ca01f9c684 100644
--- a/libavcodec/snow.c
+++ b/libavcodec/snow.c
@@ -1665,7 +1665,7 @@ static int frame_start(SnowContext *s){
int w= s->avctx->width; //FIXME round up to x16 ?
int h= s->avctx->height;
- if(s->current_picture.data[0] && !(s->avctx->flags&CODEC_FLAG_EMU_EDGE)){
+ if (s->current_picture.data[0] && !(s->avctx->flags&CODEC_FLAG_EMU_EDGE)) {
s->dsp.draw_edges(s->current_picture.data[0],
s->current_picture.linesize[0], w , h ,
EDGE_WIDTH , EDGE_WIDTH , EDGE_TOP | EDGE_BOTTOM);
diff --git a/libavcodec/version.h b/libavcodec/version.h
index 0fd5e72874..f8b9920b19 100644
--- a/libavcodec/version.h
+++ b/libavcodec/version.h
@@ -21,7 +21,7 @@
#define AVCODEC_VERSION_H
#define LIBAVCODEC_VERSION_MAJOR 53
-#define LIBAVCODEC_VERSION_MINOR 23
+#define LIBAVCODEC_VERSION_MINOR 24
#define LIBAVCODEC_VERSION_MICRO 0
#define LIBAVCODEC_VERSION_INT AV_VERSION_INT(LIBAVCODEC_VERSION_MAJOR, \
@@ -101,5 +101,8 @@
#ifndef FF_API_GET_ALPHA_INFO
#define FF_API_GET_ALPHA_INFO (LIBAVCODEC_VERSION_MAJOR < 54)
#endif
+#ifndef FF_API_PARSE_FRAME
+#define FF_API_PARSE_FRAME (LIBAVCODEC_VERSION_MAJOR < 54)
+#endif
#endif /* AVCODEC_VERSION_H */
diff --git a/libavcodec/wmadec.c b/libavcodec/wmadec.c
index b4a4c800af..bf6e7ee71e 100644
--- a/libavcodec/wmadec.c
+++ b/libavcodec/wmadec.c
@@ -816,7 +816,7 @@ static int wma_decode_superframe(AVCodecContext *avctx,
const uint8_t *buf = avpkt->data;
int buf_size = avpkt->size;
WMACodecContext *s = avctx->priv_data;
- int nb_frames, bit_offset, i, pos, len;
+ int nb_frames, bit_offset, i, pos, len, out_size;
uint8_t *q;
int16_t *samples;
@@ -838,13 +838,19 @@ static int wma_decode_superframe(AVCodecContext *avctx,
if (s->use_bit_reservoir) {
/* read super frame header */
skip_bits(&s->gb, 4); /* super frame index */
- nb_frames = get_bits(&s->gb, 4) - 1;
+ nb_frames = get_bits(&s->gb, 4) - (s->last_superframe_len <= 0);
+ } else {
+ nb_frames = 1;
+ }
- if((nb_frames+1) * s->nb_channels * s->frame_len * sizeof(int16_t) > *data_size){
- av_log(s->avctx, AV_LOG_ERROR, "Insufficient output space\n");
- goto fail;
- }
+ out_size = nb_frames * s->frame_len * s->nb_channels *
+ av_get_bytes_per_sample(avctx->sample_fmt);
+ if (*data_size < out_size) {
+ av_log(s->avctx, AV_LOG_ERROR, "Insufficient output space\n");
+ goto fail;
+ }
+ if (s->use_bit_reservoir) {
bit_offset = get_bits(&s->gb, s->byte_offset_bits + 3);
if (s->last_superframe_len > 0) {
@@ -873,6 +879,7 @@ static int wma_decode_superframe(AVCodecContext *avctx,
if (wma_decode_frame(s, samples) < 0)
goto fail;
samples += s->nb_channels * s->frame_len;
+ nb_frames--;
}
/* read each frame starting from bit_offset */
@@ -901,10 +908,6 @@ static int wma_decode_superframe(AVCodecContext *avctx,
s->last_superframe_len = len;
memcpy(s->last_superframe, buf + pos, len);
} else {
- if(s->nb_channels * s->frame_len * sizeof(int16_t) > *data_size){
- av_log(s->avctx, AV_LOG_ERROR, "Insufficient output space\n");
- goto fail;
- }
/* single frame decode */
if (wma_decode_frame(s, samples) < 0)
goto fail;
@@ -912,7 +915,7 @@ static int wma_decode_superframe(AVCodecContext *avctx,
}
//av_log(NULL, AV_LOG_ERROR, "%d %d %d %d outbytes:%d eaten:%d\n", s->frame_len_bits, s->block_len_bits, s->frame_len, s->block_len, (int8_t *)samples - (int8_t *)data, s->block_align);
- *data_size = (int8_t *)samples - (int8_t *)data;
+ *data_size = out_size;
return buf_size;
fail:
/* when error, we reset the bit reservoir */
diff --git a/libavcodec/wmaprodec.c b/libavcodec/wmaprodec.c
index 119027b7b7..868a28393d 100644
--- a/libavcodec/wmaprodec.c
+++ b/libavcodec/wmaprodec.c
@@ -86,12 +86,14 @@
* subframe in order to reconstruct the output samples.
*/
+#include "libavutil/intreadwrite.h"
#include "avcodec.h"
#include "internal.h"
#include "get_bits.h"
#include "put_bits.h"
#include "wmaprodata.h"
#include "dsputil.h"
+#include "fmtconvert.h"
#include "sinewin.h"
#include "wma.h"
@@ -166,6 +168,7 @@ typedef struct WMAProDecodeCtx {
/* generic decoder variables */
AVCodecContext* avctx; ///< codec context for av_log
DSPContext dsp; ///< accelerated DSP functions
+ FmtConvertContext fmt_conv;
uint8_t frame_data[MAX_FRAMESIZE +
FF_INPUT_BUFFER_PADDING_SIZE];///< compressed frame data
PutBitContext pb; ///< context for filling the frame_data buffer
@@ -279,6 +282,7 @@ static av_cold int decode_init(AVCodecContext *avctx)
s->avctx = avctx;
dsputil_init(&s->dsp, avctx);
+ ff_fmt_convert_init(&s->fmt_conv, avctx);
init_put_bits(&s->pb, s->frame_data, MAX_FRAMESIZE);
avctx->sample_fmt = AV_SAMPLE_FMT_FLT;
@@ -767,7 +771,7 @@ static int decode_coeffs(WMAProDecodeCtx *s, int c)
/* Integers 0..15 as single-precision floats. The table saves a
costly int to float conversion, and storing the values as
integers allows fast sign-flipping. */
- static const int fval_tab[16] = {
+ static const uint32_t fval_tab[16] = {
0x00000000, 0x3f800000, 0x40000000, 0x40400000,
0x40800000, 0x40a00000, 0x40c00000, 0x40e00000,
0x41000000, 0x41100000, 0x41200000, 0x41300000,
@@ -799,7 +803,7 @@ static int decode_coeffs(WMAProDecodeCtx *s, int c)
4 vector coded large values) */
while ((s->transmit_num_vec_coeffs || !rl_mode) &&
(cur_coeff + 3 < ci->num_vec_coeffs)) {
- int vals[4];
+ uint32_t vals[4];
int i;
unsigned int idx;
@@ -809,15 +813,15 @@ static int decode_coeffs(WMAProDecodeCtx *s, int c)
for (i = 0; i < 4; i += 2) {
idx = get_vlc2(&s->gb, vec2_vlc.table, VLCBITS, VEC2MAXDEPTH);
if (idx == HUFF_VEC2_SIZE - 1) {
- int v0, v1;
+ uint32_t v0, v1;
v0 = get_vlc2(&s->gb, vec1_vlc.table, VLCBITS, VEC1MAXDEPTH);
if (v0 == HUFF_VEC1_SIZE - 1)
v0 += ff_wma_get_large_val(&s->gb);
v1 = get_vlc2(&s->gb, vec1_vlc.table, VLCBITS, VEC1MAXDEPTH);
if (v1 == HUFF_VEC1_SIZE - 1)
v1 += ff_wma_get_large_val(&s->gb);
- ((float*)vals)[i ] = v0;
- ((float*)vals)[i+1] = v1;
+ vals[i ] = ((av_alias32){ .f32 = v0 }).u32;
+ vals[i+1] = ((av_alias32){ .f32 = v1 }).u32;
} else {
vals[i] = fval_tab[symbol_to_vec2[idx] >> 4 ];
vals[i+1] = fval_tab[symbol_to_vec2[idx] & 0xF];
@@ -833,8 +837,8 @@ static int decode_coeffs(WMAProDecodeCtx *s, int c)
/** decode sign */
for (i = 0; i < 4; i++) {
if (vals[i]) {
- int sign = get_bits1(&s->gb) - 1;
- *(uint32_t*)&ci->coeffs[cur_coeff] = vals[i] ^ sign<<31;
+ uint32_t sign = get_bits1(&s->gb) - 1;
+ AV_WN32A(&ci->coeffs[cur_coeff], vals[i] ^ sign << 31);
num_zeros = 0;
} else {
ci->coeffs[cur_coeff] = 0;
@@ -1281,6 +1285,7 @@ static int decode_frame(WMAProDecodeCtx *s)
int more_frames = 0;
int len = 0;
int i;
+ const float *out_ptr[WMAPRO_MAX_CHANNELS];
/** check for potential output buffer overflow */
if (s->num_channels * s->samples_per_frame > s->samples_end - s->samples) {
@@ -1356,18 +1361,12 @@ static int decode_frame(WMAProDecodeCtx *s)
}
/** interleave samples and write them to the output buffer */
- for (i = 0; i < s->num_channels; i++) {
- float* ptr = s->samples + i;
- int incr = s->num_channels;
- float* iptr = s->channel[i].out;
- float* iend = iptr + s->samples_per_frame;
-
- // FIXME should create/use a DSP function here
- while (iptr < iend) {
- *ptr = *iptr++;
- ptr += incr;
- }
+ for (i = 0; i < s->num_channels; i++)
+ out_ptr[i] = s->channel[i].out;
+ s->fmt_conv.float_interleave(s->samples, out_ptr, s->samples_per_frame,
+ s->num_channels);
+ for (i = 0; i < s->num_channels; i++) {
/** reuse second half of the IMDCT output for the next frame */
memcpy(&s->channel[i].out[0],
&s->channel[i].out[s->samples_per_frame],
diff --git a/libavcodec/wmavoice.c b/libavcodec/wmavoice.c
index b7a6f88a5b..ca7b368f63 100644
--- a/libavcodec/wmavoice.c
+++ b/libavcodec/wmavoice.c
@@ -1730,7 +1730,7 @@ static int synth_superframe(AVCodecContext *ctx,
{
WMAVoiceContext *s = ctx->priv_data;
GetBitContext *gb = &s->gb, s_gb;
- int n, res, n_samples = 480;
+ int n, res, out_size, n_samples = 480;
double lsps[MAX_FRAMES][MAX_LSPS];
const double *mean_lsf = s->lsps == 16 ?
wmavoice_mean_lsf16[s->lsp_def_mode] : wmavoice_mean_lsf10[s->lsp_def_mode];
@@ -1748,7 +1748,10 @@ static int synth_superframe(AVCodecContext *ctx,
s->sframe_cache_size = 0;
}
- if ((res = check_bits_for_superframe(gb, s)) == 1) return 1;
+ if ((res = check_bits_for_superframe(gb, s)) == 1) {
+ *data_size = 0;
+ return 1;
+ }
/* First bit is speech/music bit, it differentiates between WMAVoice
* speech samples (the actual codec) and WMAVoice music samples, which
@@ -1789,6 +1792,14 @@ static int synth_superframe(AVCodecContext *ctx,
stabilize_lsps(lsps[n], s->lsps);
}
+ out_size = n_samples * av_get_bytes_per_sample(ctx->sample_fmt);
+ if (*data_size < out_size) {
+ av_log(ctx, AV_LOG_ERROR,
+ "Output buffer too small (%d given - %zu needed)\n",
+ *data_size, out_size);
+ return -1;
+ }
+
/* Parse frames, optionally preceeded by per-frame (independent) LSPs. */
for (n = 0; n < 3; n++) {
if (!s->has_residual_lsps) {
@@ -1808,8 +1819,10 @@ static int synth_superframe(AVCodecContext *ctx,
&samples[n * MAX_FRAMESIZE],
lsps[n], n == 0 ? s->prev_lsps : lsps[n - 1],
&excitation[s->history_nsamples + n * MAX_FRAMESIZE],
- &synth[s->lsps + n * MAX_FRAMESIZE])))
+ &synth[s->lsps + n * MAX_FRAMESIZE]))) {
+ *data_size = 0;
return res;
+ }
}
/* Statistics? FIXME - we don't check for length, a slight overrun
@@ -1821,7 +1834,7 @@ static int synth_superframe(AVCodecContext *ctx,
}
/* Specify nr. of output samples */
- *data_size = n_samples * sizeof(float);
+ *data_size = out_size;
/* Update history */
memcpy(s->prev_lsps, lsps[2],
@@ -1915,22 +1928,16 @@ static int wmavoice_decode_packet(AVCodecContext *ctx, void *data,
GetBitContext *gb = &s->gb;
int size, res, pos;
- if (*data_size < 480 * sizeof(float)) {
- av_log(ctx, AV_LOG_ERROR,
- "Output buffer too small (%d given - %zu needed)\n",
- *data_size, 480 * sizeof(float));
- return -1;
- }
- *data_size = 0;
-
/* Packets are sometimes a multiple of ctx->block_align, with a packet
* header at each ctx->block_align bytes. However, FFmpeg's ASF demuxer
* feeds us ASF packets, which may concatenate multiple "codec" packets
* in a single "muxer" packet, so we artificially emulate that by
* capping the packet size at ctx->block_align. */
for (size = avpkt->size; size > ctx->block_align; size -= ctx->block_align);
- if (!size)
+ if (!size) {
+ *data_size = 0;
return 0;
+ }
init_get_bits(&s->gb, avpkt->data, size << 3);
/* size == ctx->block_align is used to indicate whether we are dealing with
diff --git a/libavformat/asfdec.c b/libavformat/asfdec.c
index 6af4439e87..3dc0b1716a 100644
--- a/libavformat/asfdec.c
+++ b/libavformat/asfdec.c
@@ -1297,7 +1297,7 @@ static int asf_read_seek(AVFormatContext *s, int stream_index, int64_t pts, int
}
}
/* no index or seeking by index failed */
- if(av_seek_frame_binary(s, stream_index, pts, flags)<0)
+ if (ff_seek_frame_binary(s, stream_index, pts, flags) < 0)
return -1;
asf_reset_header(s);
return 0;
diff --git a/libavformat/avformat.h b/libavformat/avformat.h
index 425198fcda..60ff6dcf03 100644
--- a/libavformat/avformat.h
+++ b/libavformat/avformat.h
@@ -523,8 +523,10 @@ typedef struct AVStream {
AVRational r_frame_rate;
void *priv_data;
+#if FF_API_REORDER_PRIVATE
/* internal data used in av_find_stream_info() */
int64_t first_dts;
+#endif
/**
* encoding: pts generation when outputting stream
@@ -539,7 +541,9 @@ typedef struct AVStream {
* encoding: set by libavformat in av_write_header
*/
AVRational time_base;
+#if FF_API_REORDER_PRIVATE
int pts_wrap_bits; /**< number of bits in pts (used for wrapping control) */
+#endif
#if FF_API_STREAM_COPY
/* ffmpeg.c private use */
attribute_deprecated int stream_copy; /**< If set, just copy stream. */
@@ -572,6 +576,7 @@ typedef struct AVStream {
*/
int64_t duration;
+#if FF_API_REORDER_PRIVATE
/* av_read_frame() support */
enum AVStreamParseType need_parsing;
struct AVCodecParserContext *parser;
@@ -584,14 +589,17 @@ typedef struct AVStream {
support seeking natively. */
int nb_index_entries;
unsigned int index_entries_allocated_size;
+#endif
int64_t nb_frames; ///< number of frames in this stream if known or 0
int disposition; /**< AV_DISPOSITION_* bit field */
+#if FF_API_REORDER_PRIVATE
AVProbeData probe_data;
#define MAX_REORDER_DELAY 16
int64_t pts_buffer[MAX_REORDER_DELAY+1];
+#endif
/**
* sample aspect ratio (0 if unknown)
@@ -602,6 +610,7 @@ typedef struct AVStream {
AVDictionary *metadata;
+#if FF_API_REORDER_PRIVATE
/* Intended mostly for av_read_frame() support. Not supposed to be used by */
/* external applications; try to use something else if at all possible. */
const uint8_t *cur_ptr;
@@ -630,12 +639,21 @@ typedef struct AVStream {
* used internally, NOT PART OF PUBLIC API, dont read or write from outside of libav*
*/
struct AVPacketList *last_in_packet_buffer;
+#endif
/**
* Average framerate
*/
AVRational avg_frame_rate;
+ /*****************************************************************
+ * All fields below this line are not part of the public API. They
+ * may not be used outside of libavformat and can be changed and
+ * removed at will.
+ * New public fields should be added right above.
+ *****************************************************************
+ */
+
/**
* Number of frames that have been demuxed during av_find_stream_info()
*/
@@ -665,6 +683,49 @@ typedef struct AVStream {
* NOT PART OF PUBLIC API
*/
int request_probe;
+#if !FF_API_REORDER_PRIVATE
+ const uint8_t *cur_ptr;
+ int cur_len;
+ AVPacket cur_pkt;
+
+ // Timestamp generation support:
+ /**
+ * Timestamp corresponding to the last dts sync point.
+ *
+ * Initialized when AVCodecParserContext.dts_sync_point >= 0 and
+ * a DTS is received from the underlying container. Otherwise set to
+ * AV_NOPTS_VALUE by default.
+ */
+ int64_t reference_dts;
+ int64_t first_dts;
+ int64_t cur_dts;
+ int last_IP_duration;
+ int64_t last_IP_pts;
+
+ /**
+ * Number of packets to buffer for codec probing
+ */
+#define MAX_PROBE_PACKETS 2500
+ int probe_packets;
+
+ /**
+ * last packet in packet_buffer for this stream when muxing.
+ */
+ struct AVPacketList *last_in_packet_buffer;
+ AVProbeData probe_data;
+#define MAX_REORDER_DELAY 16
+ int64_t pts_buffer[MAX_REORDER_DELAY+1];
+ /* av_read_frame() support */
+ enum AVStreamParseType need_parsing;
+ struct AVCodecParserContext *parser;
+
+ AVIndexEntry *index_entries; /**< Only used if the format does not
+ support seeking natively. */
+ int nb_index_entries;
+ unsigned int index_entries_allocated_size;
+
+ int pts_wrap_bits; /**< number of bits in pts (used for wrapping control) */
+#endif
} AVStream;
#define AV_PROGRAM_RUNNING 1
@@ -724,6 +785,7 @@ typedef struct AVFormatContext {
#endif
int ctx_flags; /**< Format-specific flags, see AVFMTCTX_xx */
+#if FF_API_REORDER_PRIVATE
/* private data for pts handling (do not modify directly). */
/**
* This buffer is only needed when packets were already buffered but
@@ -731,6 +793,7 @@ typedef struct AVFormatContext {
* streams.
*/
struct AVPacketList *packet_buffer;
+#endif
/**
* Decoding: position of the first frame of the component, in
@@ -761,11 +824,13 @@ typedef struct AVFormatContext {
*/
int bit_rate;
+#if FF_API_REORDER_PRIVATE
/* av_read_frame() support */
AVStream *cur_st;
/* av_seek_frame() support */
int64_t data_offset; /**< offset of the first packet */
+#endif
#if FF_API_MUXRATE
/**
@@ -876,6 +941,7 @@ typedef struct AVFormatContext {
int debug;
#define FF_FDEBUG_TS 0x0001
+#if FF_API_REORDER_PRIVATE
/**
* Raw packets from the demuxer, prior to parsing and decoding.
* This buffer is used for buffering packets until the codec can
@@ -886,15 +952,18 @@ typedef struct AVFormatContext {
struct AVPacketList *raw_packet_buffer_end;
struct AVPacketList *packet_buffer_end;
+#endif
AVDictionary *metadata;
+#if FF_API_REORDER_PRIVATE
/**
* Remaining size available for raw_packet_buffer, in bytes.
* NOT PART OF PUBLIC API
*/
#define RAW_PACKET_BUFFER_SIZE 2500000
int raw_packet_buffer_remaining_size;
+#endif
/**
* Start time of the stream in real world time, in microseconds
@@ -923,6 +992,43 @@ typedef struct AVFormatContext {
* This will be moved into demuxer private options. Thus no API/ABI compatibility
*/
int ts_id;
+
+ /*****************************************************************
+ * All fields below this line are not part of the public API. They
+ * may not be used outside of libavformat and can be changed and
+ * removed at will.
+ * New public fields should be added right above.
+ *****************************************************************
+ */
+#if !FF_API_REORDER_PRIVATE
+ /**
+ * Raw packets from the demuxer, prior to parsing and decoding.
+ * This buffer is used for buffering packets until the codec can
+ * be identified, as parsing cannot be done without knowing the
+ * codec.
+ */
+ struct AVPacketList *raw_packet_buffer;
+ struct AVPacketList *raw_packet_buffer_end;
+ /**
+ * Remaining size available for raw_packet_buffer, in bytes.
+ */
+#define RAW_PACKET_BUFFER_SIZE 2500000
+ int raw_packet_buffer_remaining_size;
+
+ /**
+ * This buffer is only needed when packets were already buffered but
+ * not decoded, for example to get the codec parameters in MPEG
+ * streams.
+ */
+ struct AVPacketList *packet_buffer;
+ struct AVPacketList *packet_buffer_end;
+
+ /* av_read_frame() support */
+ AVStream *cur_st;
+
+ /* av_seek_frame() support */
+ int64_t data_offset; /**< offset of the first packet */
+#endif
} AVFormatContext;
typedef struct AVPacketList {
@@ -1479,40 +1585,20 @@ int av_index_search_timestamp(AVStream *st, int64_t timestamp, int flags);
int av_add_index_entry(AVStream *st, int64_t pos, int64_t timestamp,
int size, int distance, int flags);
-/**
- * Perform a binary search using av_index_search_timestamp() and
- * AVInputFormat.read_timestamp().
- * This is not supposed to be called directly by a user application,
- * but by demuxers.
- * @param target_ts target timestamp in the time base of the given stream
- * @param stream_index stream number
- */
+#if FF_API_SEEK_PUBLIC
+attribute_deprecated
int av_seek_frame_binary(AVFormatContext *s, int stream_index,
int64_t target_ts, int flags);
-
-/**
- * Update cur_dts of all streams based on the given timestamp and AVStream.
- *
- * Stream ref_st unchanged, others set cur_dts in their native time base.
- * Only needed for timestamp wrapping or if (dts not set and pts!=dts).
- * @param timestamp new dts expressed in time_base of param ref_st
- * @param ref_st reference stream giving time_base of param timestamp
- */
+attribute_deprecated
void av_update_cur_dts(AVFormatContext *s, AVStream *ref_st, int64_t timestamp);
-
-/**
- * Perform a binary search using read_timestamp().
- * This is not supposed to be called directly by a user application,
- * but by demuxers.
- * @param target_ts target timestamp in the time base of the given stream
- * @param stream_index stream number
- */
+attribute_deprecated
int64_t av_gen_search(AVFormatContext *s, int stream_index,
int64_t target_ts, int64_t pos_min,
int64_t pos_max, int64_t pos_limit,
int64_t ts_min, int64_t ts_max,
int flags, int64_t *ts_ret,
int64_t (*read_timestamp)(struct AVFormatContext *, int , int64_t *, int64_t ));
+#endif
/**
* media file output
diff --git a/libavformat/internal.h b/libavformat/internal.h
index a2e80f2de3..582a2c8fe1 100644
--- a/libavformat/internal.h
+++ b/libavformat/internal.h
@@ -251,4 +251,37 @@ enum CodecID ff_guess_image2_codec(const char *filename);
*/
int64_t ff_iso8601_to_unix_time(const char *datestr);
+/**
+ * Perform a binary search using av_index_search_timestamp() and
+ * AVInputFormat.read_timestamp().
+ *
+ * @param target_ts target timestamp in the time base of the given stream
+ * @param stream_index stream number
+ */
+int ff_seek_frame_binary(AVFormatContext *s, int stream_index,
+ int64_t target_ts, int flags);
+
+/**
+ * Update cur_dts of all streams based on the given timestamp and AVStream.
+ *
+ * Stream ref_st unchanged, others set cur_dts in their native time base.
+ * Only needed for timestamp wrapping or if (dts not set and pts!=dts).
+ * @param timestamp new dts expressed in time_base of param ref_st
+ * @param ref_st reference stream giving time_base of param timestamp
+ */
+void ff_update_cur_dts(AVFormatContext *s, AVStream *ref_st, int64_t timestamp);
+
+/**
+ * Perform a binary search using read_timestamp().
+ *
+ * @param target_ts target timestamp in the time base of the given stream
+ * @param stream_index stream number
+ */
+int64_t ff_gen_search(AVFormatContext *s, int stream_index,
+ int64_t target_ts, int64_t pos_min,
+ int64_t pos_max, int64_t pos_limit,
+ int64_t ts_min, int64_t ts_max,
+ int flags, int64_t *ts_ret,
+ int64_t (*read_timestamp)(struct AVFormatContext *, int , int64_t *, int64_t ));
+
#endif /* AVFORMAT_INTERNAL_H */
diff --git a/libavformat/isom.c b/libavformat/isom.c
index 81a89fa405..c7272ddc9c 100644
--- a/libavformat/isom.c
+++ b/libavformat/isom.c
@@ -61,6 +61,8 @@ const AVCodecTag ff_mp4_obj_type[] = {
{ CODEC_ID_VORBIS , 0xDD }, /* non standard, gpac uses it */
{ CODEC_ID_DVD_SUBTITLE, 0xE0 }, /* non standard, see unsupported-embedded-subs-2.mp4 */
{ CODEC_ID_QCELP , 0xE1 },
+ { CODEC_ID_MPEG4SYSTEMS, 0x01 },
+ { CODEC_ID_MPEG4SYSTEMS, 0x02 },
{ CODEC_ID_NONE , 0 },
};
diff --git a/libavformat/isom.h b/libavformat/isom.h
index 9269799e5e..8f92caed0c 100644
--- a/libavformat/isom.h
+++ b/libavformat/isom.h
@@ -150,10 +150,12 @@ int ff_mp4_read_descr(AVFormatContext *fc, AVIOContext *pb, int *tag);
int ff_mp4_read_dec_config_descr(AVFormatContext *fc, AVStream *st, AVIOContext *pb);
void ff_mp4_parse_es_descr(AVIOContext *pb, int *es_id);
+#define MP4ODescrTag 0x01
#define MP4IODescrTag 0x02
#define MP4ESDescrTag 0x03
#define MP4DecConfigDescrTag 0x04
#define MP4DecSpecificDescrTag 0x05
+#define MP4SLDescrTag 0x06
int ff_mov_read_esds(AVFormatContext *fc, AVIOContext *pb, MOVAtom atom);
enum CodecID ff_mov_get_lpcm_codec_id(int bps, int flags);
diff --git a/libavformat/matroskadec.c b/libavformat/matroskadec.c
index 78e337bfab..f31c731c70 100644
--- a/libavformat/matroskadec.c
+++ b/libavformat/matroskadec.c
@@ -2071,7 +2071,7 @@ static int matroska_read_seek(AVFormatContext *s, int stream_index,
matroska->skip_to_keyframe = !(flags & AVSEEK_FLAG_ANY);
matroska->skip_to_timecode = st->index_entries[index].timestamp;
matroska->done = 0;
- av_update_cur_dts(s, st, st->index_entries[index].timestamp);
+ ff_update_cur_dts(s, st, st->index_entries[index].timestamp);
return 0;
}
diff --git a/libavformat/mpegts.c b/libavformat/mpegts.c
index f193b0f3a7..1bedbf2948 100644
--- a/libavformat/mpegts.c
+++ b/libavformat/mpegts.c
@@ -29,6 +29,7 @@
#include "libavutil/opt.h"
#include "libavutil/avassert.h"
#include "libavcodec/bytestream.h"
+#include "libavcodec/get_bits.h"
#include "avformat.h"
#include "mpegts.h"
#include "internal.h"
@@ -43,6 +44,8 @@
#define MAX_PES_PAYLOAD 200*1024
+#define MAX_MP4_DESCR_COUNT 16
+
enum MpegTSFilterType {
MPEGTS_PES,
MPEGTS_SECTION,
@@ -73,6 +76,7 @@ typedef struct MpegTSSectionFilter {
struct MpegTSFilter {
int pid;
+ int es_id;
int last_cc; /* last cc code (-1 if first packet) */
enum MpegTSFilterType type;
union {
@@ -173,6 +177,7 @@ typedef struct PESContext {
int64_t ts_packet_pos; /**< position of first TS packet of this PES packet */
uint8_t header[MAX_PES_HEADER_SIZE];
uint8_t *buffer;
+ SLConfigDescr sl;
} PESContext;
extern AVInputFormat ff_mpegts_demuxer;
@@ -327,6 +332,7 @@ static MpegTSFilter *mpegts_open_section_filter(MpegTSContext *ts, unsigned int
ts->pids[pid] = filter;
filter->type = MPEGTS_SECTION;
filter->pid = pid;
+ filter->es_id = -1;
filter->last_cc = -1;
sec = &filter->u.section_filter;
sec->section_cb = section_cb;
@@ -355,6 +361,7 @@ static MpegTSFilter *mpegts_open_pes_filter(MpegTSContext *ts, unsigned int pid,
ts->pids[pid] = filter;
filter->type = MPEGTS_PES;
filter->pid = pid;
+ filter->es_id = -1;
filter->last_cc = -1;
pes = &filter->u.pes_filter;
pes->pes_cb = pes_cb;
@@ -682,6 +689,83 @@ static void new_pes_packet(PESContext *pes, AVPacket *pkt)
pes->flags = 0;
}
+static uint64_t get_bits64(GetBitContext *gb, int bits)
+{
+ uint64_t ret = 0;
+ while (bits > 17) {
+ ret <<= 17;
+ ret |= get_bits(gb, 17);
+ bits -= 17;
+ }
+ ret <<= bits;
+ ret |= get_bits(gb, bits);
+ return ret;
+}
+
+static int read_sl_header(PESContext *pes, SLConfigDescr *sl, const uint8_t *buf, int buf_size)
+{
+ GetBitContext gb;
+ int au_start_flag = 0, au_end_flag = 0, ocr_flag = 0, idle_flag = 0;
+ int padding_flag = 0, padding_bits = 0, inst_bitrate_flag = 0;
+ int dts_flag = -1, cts_flag = -1;
+ int64_t dts = AV_NOPTS_VALUE, cts = AV_NOPTS_VALUE;
+ init_get_bits(&gb, buf, buf_size*8);
+
+ if (sl->use_au_start)
+ au_start_flag = get_bits1(&gb);
+ if (sl->use_au_end)
+ au_end_flag = get_bits1(&gb);
+ if (!sl->use_au_start && !sl->use_au_end)
+ au_start_flag = au_end_flag = 1;
+ if (sl->ocr_len > 0)
+ ocr_flag = get_bits1(&gb);
+ if (sl->use_idle)
+ idle_flag = get_bits1(&gb);
+ if (sl->use_padding)
+ padding_flag = get_bits1(&gb);
+ if (padding_flag)
+ padding_bits = get_bits(&gb, 3);
+
+ if (!idle_flag && (!padding_flag || padding_bits != 0)) {
+ if (sl->packet_seq_num_len)
+ skip_bits_long(&gb, sl->packet_seq_num_len);
+ if (sl->degr_prior_len)
+ if (get_bits1(&gb))
+ skip_bits(&gb, sl->degr_prior_len);
+ if (ocr_flag)
+ skip_bits_long(&gb, sl->ocr_len);
+ if (au_start_flag) {
+ if (sl->use_rand_acc_pt)
+ get_bits1(&gb);
+ if (sl->au_seq_num_len > 0)
+ skip_bits_long(&gb, sl->au_seq_num_len);
+ if (sl->use_timestamps) {
+ dts_flag = get_bits1(&gb);
+ cts_flag = get_bits1(&gb);
+ }
+ }
+ if (sl->inst_bitrate_len)
+ inst_bitrate_flag = get_bits1(&gb);
+ if (dts_flag == 1)
+ dts = get_bits64(&gb, sl->timestamp_len);
+ if (cts_flag == 1)
+ cts = get_bits64(&gb, sl->timestamp_len);
+ if (sl->au_len > 0)
+ skip_bits_long(&gb, sl->au_len);
+ if (inst_bitrate_flag)
+ skip_bits_long(&gb, sl->inst_bitrate_len);
+ }
+
+ if (dts != AV_NOPTS_VALUE)
+ pes->dts = dts;
+ if (cts != AV_NOPTS_VALUE)
+ pes->pts = cts;
+
+ av_set_pts_info(pes->st, sl->timestamp_len, 1, sl->timestamp_res);
+
+ return (get_bits_count(&gb) + 7) >> 3;
+}
+
/* return non zero if a packet could be constructed */
static int mpegts_push_data(MpegTSFilter *filter,
const uint8_t *buf, int buf_size, int is_start,
@@ -833,6 +917,12 @@ static int mpegts_push_data(MpegTSFilter *filter,
/* we got the full header. We parse it and get the payload */
pes->state = MPEGTS_PAYLOAD;
pes->data_index = 0;
+ if (pes->stream_type == 0x12) {
+ int sl_header_bytes = read_sl_header(pes, &pes->sl, p, buf_size);
+ pes->pes_header_size += sl_header_bytes;
+ p += sl_header_bytes;
+ buf_size -= sl_header_bytes;
+ }
}
break;
case MPEGTS_PAYLOAD:
@@ -897,48 +987,289 @@ static PESContext *add_pes_stream(MpegTSContext *ts, int pid, int pcr_pid)
return pes;
}
+#define MAX_LEVEL 4
+typedef struct {
+ AVFormatContext *s;
+ AVIOContext pb;
+ Mp4Descr *descr;
+ Mp4Descr *active_descr;
+ int descr_count;
+ int max_descr_count;
+ int level;
+} MP4DescrParseContext;
+
+static int init_MP4DescrParseContext(
+ MP4DescrParseContext *d, AVFormatContext *s, const uint8_t *buf,
+ unsigned size, Mp4Descr *descr, int max_descr_count)
+{
+ int ret;
+ if (size > (1<<30))
+ return AVERROR_INVALIDDATA;
+
+ if ((ret = ffio_init_context(&d->pb, (unsigned char*)buf, size, 0,
+ NULL, NULL, NULL, NULL)) < 0)
+ return ret;
+
+ d->s = s;
+ d->level = 0;
+ d->descr_count = 0;
+ d->descr = descr;
+ d->active_descr = NULL;
+ d->max_descr_count = max_descr_count;
+
+ return 0;
+}
+
+static void update_offsets(AVIOContext *pb, int64_t *off, int *len) {
+ int64_t new_off = avio_tell(pb);
+ (*len) -= new_off - *off;
+ *off = new_off;
+}
+
+static int parse_mp4_descr(MP4DescrParseContext *d, int64_t off, int len,
+ int target_tag);
+
+static int parse_mp4_descr_arr(MP4DescrParseContext *d, int64_t off, int len)
+{
+ while (len > 0) {
+ if (parse_mp4_descr(d, off, len, 0) < 0)
+ return -1;
+ update_offsets(&d->pb, &off, &len);
+ }
+ return 0;
+}
+
+static int parse_MP4IODescrTag(MP4DescrParseContext *d, int64_t off, int len)
+{
+ avio_rb16(&d->pb); // ID
+ avio_r8(&d->pb);
+ avio_r8(&d->pb);
+ avio_r8(&d->pb);
+ avio_r8(&d->pb);
+ avio_r8(&d->pb);
+ update_offsets(&d->pb, &off, &len);
+ return parse_mp4_descr_arr(d, off, len);
+}
+
+static int parse_MP4ODescrTag(MP4DescrParseContext *d, int64_t off, int len)
+{
+ int id_flags;
+ if (len < 2)
+ return 0;
+ id_flags = avio_rb16(&d->pb);
+ if (!(id_flags & 0x0020)) { //URL_Flag
+ update_offsets(&d->pb, &off, &len);
+ return parse_mp4_descr_arr(d, off, len); //ES_Descriptor[]
+ } else {
+ return 0;
+ }
+}
+
+static int parse_MP4ESDescrTag(MP4DescrParseContext *d, int64_t off, int len)
+{
+ int es_id = 0;
+ if (d->descr_count >= d->max_descr_count)
+ return -1;
+ ff_mp4_parse_es_descr(&d->pb, &es_id);
+ d->active_descr = d->descr + (d->descr_count++);
+
+ d->active_descr->es_id = es_id;
+ update_offsets(&d->pb, &off, &len);
+ parse_mp4_descr(d, off, len, MP4DecConfigDescrTag);
+ update_offsets(&d->pb, &off, &len);
+ if (len > 0)
+ parse_mp4_descr(d, off, len, MP4SLDescrTag);
+ d->active_descr = NULL;
+ return 0;
+}
+
+static int parse_MP4DecConfigDescrTag(MP4DescrParseContext *d, int64_t off, int len)
+{
+ Mp4Descr *descr = d->active_descr;
+ if (!descr)
+ return -1;
+ d->active_descr->dec_config_descr = av_malloc(len);
+ if (!descr->dec_config_descr)
+ return AVERROR(ENOMEM);
+ descr->dec_config_descr_len = len;
+ avio_read(&d->pb, descr->dec_config_descr, len);
+ return 0;
+}
+
+static int parse_MP4SLDescrTag(MP4DescrParseContext *d, int64_t off, int len)
+{
+ Mp4Descr *descr = d->active_descr;
+ int predefined;
+ if (!descr)
+ return -1;
+
+ predefined = avio_r8(&d->pb);
+ if (!predefined) {
+ int lengths;
+ int flags = avio_r8(&d->pb);
+ descr->sl.use_au_start = !!(flags & 0x80);
+ descr->sl.use_au_end = !!(flags & 0x40);
+ descr->sl.use_rand_acc_pt = !!(flags & 0x20);
+ descr->sl.use_padding = !!(flags & 0x08);
+ descr->sl.use_timestamps = !!(flags & 0x04);
+ descr->sl.use_idle = !!(flags & 0x02);
+ descr->sl.timestamp_res = avio_rb32(&d->pb);
+ avio_rb32(&d->pb);
+ descr->sl.timestamp_len = avio_r8(&d->pb);
+ descr->sl.ocr_len = avio_r8(&d->pb);
+ descr->sl.au_len = avio_r8(&d->pb);
+ descr->sl.inst_bitrate_len = avio_r8(&d->pb);
+ lengths = avio_rb16(&d->pb);
+ descr->sl.degr_prior_len = lengths >> 12;
+ descr->sl.au_seq_num_len = (lengths >> 7) & 0x1f;
+ descr->sl.packet_seq_num_len = (lengths >> 2) & 0x1f;
+ } else {
+ av_log_missing_feature(d->s, "Predefined SLConfigDescriptor\n", 0);
+ }
+ return 0;
+}
+
+static int parse_mp4_descr(MP4DescrParseContext *d, int64_t off, int len,
+ int target_tag) {
+ int tag;
+ int len1 = ff_mp4_read_descr(d->s, &d->pb, &tag);
+ update_offsets(&d->pb, &off, &len);
+ if (len < 0 || len1 > len || len1 <= 0) {
+ av_log(d->s, AV_LOG_ERROR, "Tag %x length violation new length %d bytes remaining %d\n", tag, len1, len);
+ return -1;
+ }
+
+ if (d->level++ >= MAX_LEVEL) {
+ av_log(d->s, AV_LOG_ERROR, "Maximum MP4 descriptor level exceeded\n");
+ goto done;
+ }
+
+ if (target_tag && tag != target_tag) {
+ av_log(d->s, AV_LOG_ERROR, "Found tag %x expected %x\n", tag, target_tag);
+ goto done;
+ }
+
+ switch (tag) {
+ case MP4IODescrTag:
+ parse_MP4IODescrTag(d, off, len1);
+ break;
+ case MP4ODescrTag:
+ parse_MP4ODescrTag(d, off, len1);
+ break;
+ case MP4ESDescrTag:
+ parse_MP4ESDescrTag(d, off, len1);
+ break;
+ case MP4DecConfigDescrTag:
+ parse_MP4DecConfigDescrTag(d, off, len1);
+ break;
+ case MP4SLDescrTag:
+ parse_MP4SLDescrTag(d, off, len1);
+ break;
+ }
+
+done:
+ d->level--;
+ avio_seek(&d->pb, off + len1, SEEK_SET);
+ return 0;
+}
+
static int mp4_read_iods(AVFormatContext *s, const uint8_t *buf, unsigned size,
- int *es_id, uint8_t **dec_config_descr,
- int *dec_config_descr_size)
+ Mp4Descr *descr, int *descr_count, int max_descr_count)
+{
+ MP4DescrParseContext d;
+ if (init_MP4DescrParseContext(&d, s, buf, size, descr, max_descr_count) < 0)
+ return -1;
+
+ parse_mp4_descr(&d, avio_tell(&d.pb), size, MP4IODescrTag);
+
+ *descr_count = d.descr_count;
+ return 0;
+}
+
+static int mp4_read_od(AVFormatContext *s, const uint8_t *buf, unsigned size,
+ Mp4Descr *descr, int *descr_count, int max_descr_count)
{
+ MP4DescrParseContext d;
+ if (init_MP4DescrParseContext(&d, s, buf, size, descr, max_descr_count) < 0)
+ return -1;
+
+ parse_mp4_descr_arr(&d, avio_tell(&d.pb), size);
+
+ *descr_count = d.descr_count;
+ return 0;
+}
+
+static void m4sl_cb(MpegTSFilter *filter, const uint8_t *section, int section_len)
+{
+ MpegTSContext *ts = filter->u.section_filter.opaque;
+ SectionHeader h;
+ const uint8_t *p, *p_end;
AVIOContext pb;
- int tag;
- unsigned len;
-
- ffio_init_context(&pb, buf, size, 0, NULL, NULL, NULL, NULL);
-
- len = ff_mp4_read_descr(s, &pb, &tag);
- if (tag == MP4IODescrTag) {
- avio_rb16(&pb); // ID
- avio_r8(&pb);
- avio_r8(&pb);
- avio_r8(&pb);
- avio_r8(&pb);
- avio_r8(&pb);
- len = ff_mp4_read_descr(s, &pb, &tag);
- if (tag == MP4ESDescrTag) {
- ff_mp4_parse_es_descr(&pb, es_id);
- av_dlog(s, "ES_ID %#x\n", *es_id);
- len = ff_mp4_read_descr(s, &pb, &tag);
- if (tag == MP4DecConfigDescrTag) {
- *dec_config_descr = av_malloc(len);
- if (!*dec_config_descr)
- return AVERROR(ENOMEM);
- *dec_config_descr_size = len;
- avio_read(&pb, *dec_config_descr, len);
+ Mp4Descr mp4_descr[MAX_MP4_DESCR_COUNT] = {{ 0 }};
+ int mp4_descr_count = 0;
+ int i, pid;
+ AVFormatContext *s = ts->stream;
+
+ p_end = section + section_len - 4;
+ p = section;
+ if (parse_section_header(&h, &p, p_end) < 0)
+ return;
+ if (h.tid != M4OD_TID)
+ return;
+
+ mp4_read_od(s, p, (unsigned)(p_end - p), mp4_descr, &mp4_descr_count, MAX_MP4_DESCR_COUNT);
+
+ for (pid = 0; pid < NB_PID_MAX; pid++) {
+ if (!ts->pids[pid])
+ continue;
+ for (i = 0; i < mp4_descr_count; i++) {
+ PESContext *pes;
+ AVStream *st;
+ if (ts->pids[pid]->es_id != mp4_descr[i].es_id)
+ continue;
+ if (!(ts->pids[pid] && ts->pids[pid]->type == MPEGTS_PES)) {
+ av_log(s, AV_LOG_ERROR, "pid %x is not PES\n", pid);
+ continue;
+ }
+ pes = ts->pids[pid]->u.pes_filter.opaque;
+ st = pes->st;
+ if (!st) {
+ continue;
+ }
+
+ pes->sl = mp4_descr[i].sl;
+
+ ffio_init_context(&pb, mp4_descr[i].dec_config_descr,
+ mp4_descr[i].dec_config_descr_len, 0, NULL, NULL, NULL, NULL);
+ ff_mp4_read_dec_config_descr(s, st, &pb);
+ if (st->codec->codec_id == CODEC_ID_AAC &&
+ st->codec->extradata_size > 0)
+ st->need_parsing = 0;
+ if (st->codec->codec_id == CODEC_ID_H264 &&
+ st->codec->extradata_size > 0)
+ st->need_parsing = 0;
+
+ if (st->codec->codec_id <= CODEC_ID_NONE) {
+ } else if (st->codec->codec_id < CODEC_ID_FIRST_AUDIO) {
+ st->codec->codec_type = AVMEDIA_TYPE_VIDEO;
+ } else if (st->codec->codec_id < CODEC_ID_FIRST_SUBTITLE) {
+ st->codec->codec_type = AVMEDIA_TYPE_AUDIO;
+ } else if (st->codec->codec_id < CODEC_ID_FIRST_UNKNOWN) {
+ st->codec->codec_type = AVMEDIA_TYPE_SUBTITLE;
}
}
}
- return 0;
+ for (i = 0; i < mp4_descr_count; i++)
+ av_free(mp4_descr[i].dec_config_descr);
}
int ff_parse_mpeg2_descriptor(AVFormatContext *fc, AVStream *st, int stream_type,
const uint8_t **pp, const uint8_t *desc_list_end,
- int mp4_dec_config_descr_len, int mp4_es_id, int pid,
- uint8_t *mp4_dec_config_descr)
+ Mp4Descr *mp4_descr, int mp4_descr_count, int pid,
+ MpegTSContext *ts)
{
const uint8_t *desc_end;
- int desc_len, desc_tag;
+ int desc_len, desc_tag, desc_es_id;
char language[252];
int i;
@@ -959,13 +1290,31 @@ int ff_parse_mpeg2_descriptor(AVFormatContext *fc, AVStream *st, int stream_type
mpegts_find_stream_type(st, desc_tag, DESC_types);
switch(desc_tag) {
+ case 0x1E: /* SL descriptor */
+ desc_es_id = get16(pp, desc_end);
+ if (ts && ts->pids[pid])
+ ts->pids[pid]->es_id = desc_es_id;
+ for (i = 0; i < mp4_descr_count; i++)
+ if (mp4_descr[i].dec_config_descr_len &&
+ mp4_descr[i].es_id == desc_es_id) {
+ AVIOContext pb;
+ ffio_init_context(&pb, mp4_descr[i].dec_config_descr,
+ mp4_descr[i].dec_config_descr_len, 0, NULL, NULL, NULL, NULL);
+ ff_mp4_read_dec_config_descr(fc, st, &pb);
+ if (st->codec->codec_id == CODEC_ID_AAC &&
+ st->codec->extradata_size > 0)
+ st->need_parsing = 0;
+ if (st->codec->codec_id == CODEC_ID_MPEG4SYSTEMS)
+ mpegts_open_section_filter(ts, pid, m4sl_cb, ts, 1);
+ }
+ break;
case 0x1F: /* FMC descriptor */
get16(pp, desc_end);
- if ((st->codec->codec_id == CODEC_ID_AAC_LATM || st->request_probe>0) &&
- mp4_dec_config_descr_len && mp4_es_id == pid) {
+ if (mp4_descr_count > 0 && (st->codec->codec_id == CODEC_ID_AAC_LATM || st->request_probe>0) &&
+ mp4_descr->dec_config_descr_len && mp4_descr->es_id == pid) {
AVIOContext pb;
- ffio_init_context(&pb, mp4_dec_config_descr,
- mp4_dec_config_descr_len, 0, NULL, NULL, NULL, NULL);
+ ffio_init_context(&pb, mp4_descr->dec_config_descr,
+ mp4_descr->dec_config_descr_len, 0, NULL, NULL, NULL, NULL);
ff_mp4_read_dec_config_descr(fc, st, &pb);
if (st->codec->codec_id == CODEC_ID_AAC &&
st->codec->extradata_size > 0){
@@ -1054,9 +1403,10 @@ static void pmt_cb(MpegTSFilter *filter, const uint8_t *section, int section_len
int program_info_length, pcr_pid, pid, stream_type;
int desc_list_len;
uint32_t prog_reg_desc = 0; /* registration descriptor */
- uint8_t *mp4_dec_config_descr = NULL;
- int mp4_dec_config_descr_len = 0;
- int mp4_es_id = 0;
+
+ Mp4Descr mp4_descr[MAX_MP4_DESCR_COUNT] = {{ 0 }};
+ int mp4_descr_count = 0;
+ int i;
av_dlog(ts->stream, "PMT: len %i\n", section_len);
hex_dump_debug(ts->stream, (uint8_t *)section, section_len);
@@ -1099,8 +1449,8 @@ static void pmt_cb(MpegTSFilter *filter, const uint8_t *section, int section_len
get8(&p, p_end); // scope
get8(&p, p_end); // label
len -= 2;
- mp4_read_iods(ts->stream, p, len, &mp4_es_id,
- &mp4_dec_config_descr, &mp4_dec_config_descr_len);
+ mp4_read_iods(ts->stream, p, len, mp4_descr + mp4_descr_count,
+ &mp4_descr_count, MAX_MP4_DESCR_COUNT);
} else if (tag == 0x05 && len >= 4) { // registration descriptor
prog_reg_desc = bytestream_get_le32(&p);
len -= 4;
@@ -1117,6 +1467,7 @@ static void pmt_cb(MpegTSFilter *filter, const uint8_t *section, int section_len
for(;;) {
st = 0;
+ pes = NULL;
stream_type = get8(&p, p_end);
if (stream_type < 0)
break;
@@ -1132,19 +1483,28 @@ static void pmt_cb(MpegTSFilter *filter, const uint8_t *section, int section_len
pes->st->id = pes->pid;
}
st = pes->st;
- } else {
+ } else if (stream_type != 0x13) {
if (ts->pids[pid]) mpegts_close_filter(ts, ts->pids[pid]); //wrongly added sdt filter probably
pes = add_pes_stream(ts, pid, pcr_pid);
if (pes) {
st = avformat_new_stream(pes->stream, NULL);
st->id = pes->pid;
}
+ } else {
+ int idx = ff_find_stream_index(ts->stream, pid);
+ if (idx >= 0) {
+ st = ts->stream->streams[idx];
+ } else {
+ st = avformat_new_stream(pes->stream, NULL);
+ st->id = pid;
+ st->codec->codec_type = AVMEDIA_TYPE_DATA;
+ }
}
if (!st)
goto out;
- if (!pes->stream_type)
+ if (pes && !pes->stream_type)
mpegts_set_stream_info(st, pes, stream_type, prog_reg_desc);
add_pid_to_pmt(ts, h->id, pid);
@@ -1159,10 +1519,10 @@ static void pmt_cb(MpegTSFilter *filter, const uint8_t *section, int section_len
break;
for(;;) {
if (ff_parse_mpeg2_descriptor(ts->stream, st, stream_type, &p, desc_list_end,
- mp4_dec_config_descr_len, mp4_es_id, pid, mp4_dec_config_descr) < 0)
+ mp4_descr, mp4_descr_count, pid, ts) < 0)
break;
- if (prog_reg_desc == AV_RL32("HDMV") && stream_type == 0x83 && pes->sub_st) {
+ if (pes && prog_reg_desc == AV_RL32("HDMV") && stream_type == 0x83 && pes->sub_st) {
ff_program_add_stream_index(ts->stream, h->id, pes->sub_st->index);
pes->sub_st->codec->codec_tag = st->codec->codec_tag;
}
@@ -1171,7 +1531,8 @@ static void pmt_cb(MpegTSFilter *filter, const uint8_t *section, int section_len
}
out:
- av_free(mp4_dec_config_descr);
+ for (i = 0; i < mp4_descr_count; i++)
+ av_free(mp4_descr[i].dec_config_descr);
}
static void pat_cb(MpegTSFilter *filter, const uint8_t *section, int section_len)
@@ -1839,7 +2200,7 @@ static int read_seek2(AVFormatContext *s,
ts_adj = target_ts;
stream_index_gen_search = stream_index;
}
- pos = av_gen_search(s, stream_index_gen_search, ts_adj,
+ pos = ff_gen_search(s, stream_index_gen_search, ts_adj,
0, INT64_MAX, -1,
AV_NOPTS_VALUE,
AV_NOPTS_VALUE,
diff --git a/libavformat/mpegts.h b/libavformat/mpegts.h
index e60329711b..edec79ce2f 100644
--- a/libavformat/mpegts.h
+++ b/libavformat/mpegts.h
@@ -39,6 +39,7 @@
/* table ids */
#define PAT_TID 0x00
#define PMT_TID 0x02
+#define M4OD_TID 0x05
#define SDT_TID 0x42
#define STREAM_TYPE_VIDEO_MPEG1 0x01
@@ -64,6 +65,30 @@ int ff_mpegts_parse_packet(MpegTSContext *ts, AVPacket *pkt,
const uint8_t *buf, int len);
void ff_mpegts_parse_close(MpegTSContext *ts);
+typedef struct {
+ int use_au_start;
+ int use_au_end;
+ int use_rand_acc_pt;
+ int use_padding;
+ int use_timestamps;
+ int use_idle;
+ int timestamp_res;
+ int timestamp_len;
+ int ocr_len;
+ int au_len;
+ int inst_bitrate_len;
+ int degr_prior_len;
+ int au_seq_num_len;
+ int packet_seq_num_len;
+} SLConfigDescr;
+
+typedef struct {
+ int es_id;
+ int dec_config_descr_len;
+ uint8_t *dec_config_descr;
+ SLConfigDescr sl;
+} Mp4Descr;
+
/**
* Parse an MPEG-2 descriptor
* @param[in] fc Format context (used for logging only)
@@ -79,7 +104,7 @@ void ff_mpegts_parse_close(MpegTSContext *ts);
*/
int ff_parse_mpeg2_descriptor(AVFormatContext *fc, AVStream *st, int stream_type,
const uint8_t **pp, const uint8_t *desc_list_end,
- int mp4_dec_config_descr_len, int mp4_es_id, int pid,
- uint8_t *mp4_dec_config_descr);
+ Mp4Descr *mp4_descr, int mp4_descr_count, int pid,
+ MpegTSContext *ts);
#endif /* AVFORMAT_MPEGTS_H */
diff --git a/libavformat/mxfdec.c b/libavformat/mxfdec.c
index e1fec3dbbb..efc4bd1aef 100644
--- a/libavformat/mxfdec.c
+++ b/libavformat/mxfdec.c
@@ -49,6 +49,7 @@
#include "libavutil/mathematics.h"
#include "libavcodec/bytestream.h"
#include "avformat.h"
+#include "internal.h"
#include "mxf.h"
typedef enum {
@@ -1141,7 +1142,7 @@ static int mxf_read_seek(AVFormatContext *s, int stream_index, int64_t sample_ti
seconds = av_rescale(sample_time, st->time_base.num, st->time_base.den);
if (avio_seek(s->pb, (s->bit_rate * seconds) >> 3, SEEK_SET) < 0)
return -1;
- av_update_cur_dts(s, st, sample_time);
+ ff_update_cur_dts(s, st, sample_time);
return 0;
}
diff --git a/libavformat/nutdec.c b/libavformat/nutdec.c
index 6015bf6900..f0b54f6982 100644
--- a/libavformat/nutdec.c
+++ b/libavformat/nutdec.c
@@ -874,16 +874,16 @@ static int read_seek(AVFormatContext *s, int stream_index, int64_t pts, int flag
(void **) next_node);
av_log(s, AV_LOG_DEBUG, "%"PRIu64"-%"PRIu64" %"PRId64"-%"PRId64"\n", next_node[0]->pos, next_node[1]->pos,
next_node[0]->ts , next_node[1]->ts);
- pos= av_gen_search(s, -1, dummy.ts, next_node[0]->pos, next_node[1]->pos, next_node[1]->pos,
- next_node[0]->ts , next_node[1]->ts, AVSEEK_FLAG_BACKWARD, &ts, nut_read_timestamp);
+ pos = ff_gen_search(s, -1, dummy.ts, next_node[0]->pos, next_node[1]->pos, next_node[1]->pos,
+ next_node[0]->ts , next_node[1]->ts, AVSEEK_FLAG_BACKWARD, &ts, nut_read_timestamp);
if(!(flags & AVSEEK_FLAG_BACKWARD)){
dummy.pos= pos+16;
next_node[1]= &nopts_sp;
av_tree_find(nut->syncpoints, &dummy, (void *) ff_nut_sp_pos_cmp,
(void **) next_node);
- pos2= av_gen_search(s, -2, dummy.pos, next_node[0]->pos , next_node[1]->pos, next_node[1]->pos,
- next_node[0]->back_ptr, next_node[1]->back_ptr, flags, &ts, nut_read_timestamp);
+ pos2 = ff_gen_search(s, -2, dummy.pos, next_node[0]->pos , next_node[1]->pos, next_node[1]->pos,
+ next_node[0]->back_ptr, next_node[1]->back_ptr, flags, &ts, nut_read_timestamp);
if(pos2>=0)
pos= pos2;
//FIXME dir but I think it does not matter
diff --git a/libavformat/oggdec.c b/libavformat/oggdec.c
index 8e7653c6b8..d453ae6775 100644
--- a/libavformat/oggdec.c
+++ b/libavformat/oggdec.c
@@ -32,6 +32,7 @@
#include <stdio.h>
#include "oggdec.h"
#include "avformat.h"
+#include "internal.h"
#include "vorbiscomment.h"
#define MAX_PAGE_SIZE 65307
@@ -661,7 +662,7 @@ static int ogg_read_seek(AVFormatContext *s, int stream_index,
&& !(flags & AVSEEK_FLAG_ANY))
os->keyframe_seek = 1;
- ret = av_seek_frame_binary(s, stream_index, timestamp, flags);
+ ret = ff_seek_frame_binary(s, stream_index, timestamp, flags);
os = ogg->streams + stream_index;
if (ret < 0)
os->keyframe_seek = 0;
diff --git a/libavformat/seek-test.c b/libavformat/seek-test.c
index cea36cabb2..3a448cfe3b 100644
--- a/libavformat/seek-test.c
+++ b/libavformat/seek-test.c
@@ -64,10 +64,10 @@ int main(int argc, char **argv)
AVFormatContext *ic = NULL;
int i, ret, stream_id;
int64_t timestamp;
- AVFormatParameters params, *ap= &params;
- memset(ap, 0, sizeof(params));
- ap->channels=1;
- ap->sample_rate= 22050;
+ AVDictionary *format_opts = NULL;
+
+ av_dict_set(&format_opts, "channels", "1", 0);
+ av_dict_set(&format_opts, "sample_rate", "22050", 0);
/* initialize libavcodec, and register all codecs and formats */
av_register_all();
@@ -80,7 +80,8 @@ int main(int argc, char **argv)
filename = argv[1];
- ret = av_open_input_file(&ic, filename, NULL, 0, ap);
+ ret = avformat_open_input(&ic, filename, NULL, &format_opts);
+ av_dict_free(&format_opts);
if (ret < 0) {
fprintf(stderr, "cannot open %s\n", filename);
exit(1);
diff --git a/libavformat/utils.c b/libavformat/utils.c
index 1c6a4684a1..8a78308447 100644
--- a/libavformat/utils.c
+++ b/libavformat/utils.c
@@ -340,6 +340,7 @@ AVInputFormat *av_probe_input_format3(AVProbeData *pd, int is_opened, int *score
fmt = NULL;
}
*score_ret= score_max;
+
return fmt;
}
@@ -1427,7 +1428,15 @@ void ff_read_frame_flush(AVFormatContext *s)
}
}
-void av_update_cur_dts(AVFormatContext *s, AVStream *ref_st, int64_t timestamp){
+#if FF_API_SEEK_PUBLIC
+void av_update_cur_dts(AVFormatContext *s, AVStream *ref_st, int64_t timestamp)
+{
+ return ff_update_cur_dts(s, ref_st, timestamp);
+}
+#endif
+
+void ff_update_cur_dts(AVFormatContext *s, AVStream *ref_st, int64_t timestamp)
+{
int i;
for(i = 0; i < s->nb_streams; i++) {
@@ -1547,7 +1556,14 @@ int av_index_search_timestamp(AVStream *st, int64_t wanted_timestamp,
wanted_timestamp, flags);
}
+#if FF_API_SEEK_PUBLIC
int av_seek_frame_binary(AVFormatContext *s, int stream_index, int64_t target_ts, int flags){
+ return ff_seek_frame_binary(s, stream_index, target_ts, flags);
+}
+#endif
+
+int ff_seek_frame_binary(AVFormatContext *s, int stream_index, int64_t target_ts, int flags)
+{
AVInputFormat *avif= s->iformat;
int64_t av_uninit(pos_min), av_uninit(pos_max), pos, pos_limit;
int64_t ts_min, ts_max, ts;
@@ -1594,7 +1610,7 @@ int av_seek_frame_binary(AVFormatContext *s, int stream_index, int64_t target_ts
}
}
- pos= av_gen_search(s, stream_index, target_ts, pos_min, pos_max, pos_limit, ts_min, ts_max, flags, &ts, avif->read_timestamp);
+ pos= ff_gen_search(s, stream_index, target_ts, pos_min, pos_max, pos_limit, ts_min, ts_max, flags, &ts, avif->read_timestamp);
if(pos<0)
return -1;
@@ -1603,12 +1619,28 @@ int av_seek_frame_binary(AVFormatContext *s, int stream_index, int64_t target_ts
return ret;
ff_read_frame_flush(s);
- av_update_cur_dts(s, st, ts);
+ ff_update_cur_dts(s, st, ts);
return 0;
}
-int64_t av_gen_search(AVFormatContext *s, int stream_index, int64_t target_ts, int64_t pos_min, int64_t pos_max, int64_t pos_limit, int64_t ts_min, int64_t ts_max, int flags, int64_t *ts_ret, int64_t (*read_timestamp)(struct AVFormatContext *, int , int64_t *, int64_t )){
+#if FF_API_SEEK_PUBLIC
+int64_t av_gen_search(AVFormatContext *s, int stream_index, int64_t target_ts,
+ int64_t pos_min, int64_t pos_max, int64_t pos_limit,
+ int64_t ts_min, int64_t ts_max, int flags, int64_t *ts_ret,
+ int64_t (*read_timestamp)(struct AVFormatContext *, int , int64_t *, int64_t ))
+{
+ return ff_gen_search(s, stream_index, target_ts, pos_min, pos_max,
+ pos_limit, ts_min, ts_max, flags, ts_ret,
+ read_timestamp);
+}
+#endif
+
+int64_t ff_gen_search(AVFormatContext *s, int stream_index, int64_t target_ts,
+ int64_t pos_min, int64_t pos_max, int64_t pos_limit,
+ int64_t ts_min, int64_t ts_max, int flags, int64_t *ts_ret,
+ int64_t (*read_timestamp)(struct AVFormatContext *, int , int64_t *, int64_t ))
+{
int64_t pos, ts;
int64_t start_pos, filesize;
int no_change;
@@ -1775,7 +1807,7 @@ static int seek_frame_generic(AVFormatContext *s,
ie= &st->index_entries[st->nb_index_entries-1];
if ((ret = avio_seek(s->pb, ie->pos, SEEK_SET)) < 0)
return ret;
- av_update_cur_dts(s, st, ie->timestamp);
+ ff_update_cur_dts(s, st, ie->timestamp);
}else{
if ((ret = avio_seek(s->pb, s->data_offset, SEEK_SET)) < 0)
return ret;
@@ -1812,7 +1844,7 @@ static int seek_frame_generic(AVFormatContext *s,
ie = &st->index_entries[index];
if ((ret = avio_seek(s->pb, ie->pos, SEEK_SET)) < 0)
return ret;
- av_update_cur_dts(s, st, ie->timestamp);
+ ff_update_cur_dts(s, st, ie->timestamp);
return 0;
}
@@ -1853,7 +1885,7 @@ int av_seek_frame(AVFormatContext *s, int stream_index, int64_t timestamp, int f
if (s->iformat->read_timestamp && !(s->iformat->flags & AVFMT_NOBINSEARCH)) {
ff_read_frame_flush(s);
- return av_seek_frame_binary(s, stream_index, timestamp, flags);
+ return ff_seek_frame_binary(s, stream_index, timestamp, flags);
} else if (!(s->iformat->flags & AVFMT_NOGENSEARCH)) {
ff_read_frame_flush(s);
return seek_frame_generic(s, stream_index, timestamp, flags);
diff --git a/libavformat/version.h b/libavformat/version.h
index 58c3a74bff..6494212647 100644
--- a/libavformat/version.h
+++ b/libavformat/version.h
@@ -107,5 +107,11 @@
#ifndef FF_API_STREAM_COPY
#define FF_API_STREAM_COPY (LIBAVFORMAT_VERSION_MAJOR < 54)
#endif
+#ifndef FF_API_SEEK_PUBLIC
+#define FF_API_SEEK_PUBLIC (LIBAVFORMAT_VERSION_MAJOR < 54)
+#endif
+#ifndef FF_API_REORDER_PRIVATE
+#define FF_API_REORDER_PRIVATE (LIBAVFORMAT_VERSION_MAJOR < 54)
+#endif
#endif /* AVFORMAT_VERSION_H */
diff --git a/libavformat/wtvdec.c b/libavformat/wtvdec.c
index 88587c4b52..543fcb68d2 100644
--- a/libavformat/wtvdec.c
+++ b/libavformat/wtvdec.c
@@ -773,7 +773,7 @@ static int parse_chunks(AVFormatContext *s, int mode, int64_t seekts, int *len_p
buf_size = FFMIN(len - consumed, sizeof(buf));
avio_read(pb, buf, buf_size);
consumed += buf_size;
- ff_parse_mpeg2_descriptor(s, st, 0, &pbuf, buf + buf_size, 0, 0, 0, 0);
+ ff_parse_mpeg2_descriptor(s, st, 0, &pbuf, buf + buf_size, NULL, 0, 0, NULL);
}
} else if (!ff_guidcmp(g, EVENTID_AudioTypeSpanningEvent)) {
int stream_index = ff_find_stream_index(s, sid);
diff --git a/tools/pktdumper.c b/tools/pktdumper.c
index 82f417f9f1..831462b07f 100644
--- a/tools/pktdumper.c
+++ b/tools/pktdumper.c
@@ -44,7 +44,7 @@ int main(int argc, char **argv)
{
char fntemplate[PATH_MAX];
char pktfilename[PATH_MAX];
- AVFormatContext *fctx;
+ AVFormatContext *fctx = NULL;
AVPacket pkt;
int64_t pktnum = 0;
int64_t maxpkts = 0;
@@ -83,9 +83,9 @@ int main(int argc, char **argv)
// register all file formats
av_register_all();
- err = av_open_input_file(&fctx, argv[1], NULL, 0, NULL);
+ err = avformat_open_input(&fctx, argv[1], NULL, NULL);
if (err < 0) {
- fprintf(stderr, "av_open_input_file: error %d\n", err);
+ fprintf(stderr, "cannot open input: error %d\n", err);
return 1;
}