aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMichael Niedermayer <michaelni@gmx.at>2012-01-03 02:25:56 +0100
committerMichael Niedermayer <michaelni@gmx.at>2012-01-03 03:06:45 +0100
commit7d8f1158436c261d2d1657c33e731f9bec650c51 (patch)
treecf3c0261ba5202ad949af637a026b11dc4631a00
parent45552371e3434fb7aa4d0bc566fd4ef954f9af14 (diff)
parent881a5e047dc78ec9ab771817497dffec503d77ee (diff)
downloadffmpeg-7d8f1158436c261d2d1657c33e731f9bec650c51.tar.gz
Merge remote-tracking branch 'qatar/master'
* qatar/master: mpegenc: use avctx->slices as number of slices v410enc: fix undefined signed left shift caused by integer promotion Release notes: mention cleaned up header includes fix Changelog file Fix a bunch of typos. Drop some pointless void* return value casts from av_malloc() invocations. wavpack: fix typos in previous cosmetic clean-up commit wavpack: cosmetics: K&R pretty-printing avconv: remove the 'codec framerate is different from stream' warning wavpack: determine sample_fmt before requesting a buffer bmv audio: implement new audio decoding API mpegaudiodec: skip all channels when skipping granules mpegenc: simplify muxrate calculation Conflicts: Changelog avconv.c doc/RELEASE_NOTES libavcodec/h264.c libavcodec/mpeg12.c libavcodec/mpegaudiodec.c libavcodec/mpegvideo.c libavformat/mpegenc.c Merged-by: Michael Niedermayer <michaelni@gmx.at>
-rw-r--r--Changelog4
-rw-r--r--avconv.c11
-rw-r--r--ffmpeg.c11
-rw-r--r--libavcodec/bmv.c34
-rw-r--r--libavcodec/error_resilience.c8
-rw-r--r--libavcodec/h264.c12
-rw-r--r--libavcodec/imgconvert.c2
-rw-r--r--libavcodec/mpeg12.c4
-rw-r--r--libavcodec/mpegaudiodec.c1
-rw-r--r--libavcodec/mpegvideo.c53
-rw-r--r--libavcodec/mpegvideo.h1
-rw-r--r--libavcodec/mpegvideo_enc.c5
-rw-r--r--libavcodec/options.c2
-rw-r--r--libavcodec/snow.c6
-rw-r--r--libavcodec/v410enc.c2
-rw-r--r--libavcodec/wavpack.c690
-rw-r--r--libavformat/mmst.c2
-rw-r--r--libavformat/mpegenc.c2
-rw-r--r--libavformat/rmenc.c2
-rw-r--r--libpostproc/postprocess_template.c4
-rw-r--r--libswscale/colorspace-test.c4
-rwxr-xr-xtests/codec-regression.sh6
-rwxr-xr-xtools/patcheck2
23 files changed, 454 insertions, 414 deletions
diff --git a/Changelog b/Changelog
index 7b5d27778e..09e44d4935 100644
--- a/Changelog
+++ b/Changelog
@@ -13,8 +13,6 @@ version next:
- tinterlace video filter
- astreamsync audio filter
- amerge audio filter
-- Indeo 4 decoder
-- SMJPEG demuxer
- Automatic thread count based on detection number of (available) CPU cores
- y41p Brooktree Uncompressed 4:1:1 12-bit encoder and decoder
@@ -153,6 +151,8 @@ easier to use. The changes are:
- Dxtory capture format decoder
- cellauto source
- Simple segmenting muxer
+- Indeo 4 decoder
+- SMJPEG demuxer
version 0.8:
diff --git a/avconv.c b/avconv.c
index d4ffcb0765..a8f3d70339 100644
--- a/avconv.c
+++ b/avconv.c
@@ -3196,7 +3196,7 @@ static AVCodec *choose_decoder(OptionsContext *o, AVFormatContext *s, AVStream *
*/
static void add_input_streams(OptionsContext *o, AVFormatContext *ic)
{
- int i, rfps, rfps_base;
+ int i;
for (i = 0; i < ic->nb_streams; i++) {
AVStream *st = ic->streams[i];
@@ -3225,19 +3225,10 @@ static void add_input_streams(OptionsContext *o, AVFormatContext *ic)
case AVMEDIA_TYPE_VIDEO:
if(!ist->dec)
ist->dec = avcodec_find_decoder(dec->codec_id);
- rfps = ic->streams[i]->r_frame_rate.num;
- rfps_base = ic->streams[i]->r_frame_rate.den;
if (dec->lowres) {
dec->flags |= CODEC_FLAG_EMU_EDGE;
}
- if (dec->time_base.den != rfps * dec->ticks_per_frame || dec->time_base.num != rfps_base) {
-
- av_log(NULL, AV_LOG_INFO,"\nSeems stream %d codec frame rate differs from container frame rate: %2.2f (%d/%d) -> %2.2f (%d/%d)\n",
- i, (float)dec->time_base.den / dec->time_base.num, dec->time_base.den, dec->time_base.num,
- (float)rfps / rfps_base, rfps, rfps_base);
- }
-
if (o->video_disable)
st->discard = AVDISCARD_ALL;
else if (video_discard)
diff --git a/ffmpeg.c b/ffmpeg.c
index fe15eea3f7..2323552fdf 100644
--- a/ffmpeg.c
+++ b/ffmpeg.c
@@ -3463,7 +3463,7 @@ static AVCodec *choose_decoder(OptionsContext *o, AVFormatContext *s, AVStream *
*/
static void add_input_streams(OptionsContext *o, AVFormatContext *ic)
{
- int i, rfps, rfps_base;
+ int i;
char *next, *codec_tag = NULL;
for (i = 0; i < ic->nb_streams; i++) {
@@ -3501,19 +3501,10 @@ static void add_input_streams(OptionsContext *o, AVFormatContext *ic)
case AVMEDIA_TYPE_VIDEO:
if(!ist->dec)
ist->dec = avcodec_find_decoder(dec->codec_id);
- rfps = ic->streams[i]->r_frame_rate.num;
- rfps_base = ic->streams[i]->r_frame_rate.den;
if (dec->lowres) {
dec->flags |= CODEC_FLAG_EMU_EDGE;
}
- if (dec->time_base.den != rfps * dec->ticks_per_frame || dec->time_base.num != rfps_base) {
-
- av_log(NULL, AV_LOG_INFO,"\nSeems stream %d codec frame rate differs from container frame rate: %2.2f (%d/%d) -> %2.2f (%d/%d)\n",
- i, (float)dec->time_base.den / dec->time_base.num, dec->time_base.den, dec->time_base.num,
- (float)rfps / rfps_base, rfps, rfps_base);
- }
-
if (o->video_disable)
st->discard = AVDISCARD_ALL;
else if (video_discard)
diff --git a/libavcodec/bmv.c b/libavcodec/bmv.c
index e98d5998ca..37c844858f 100644
--- a/libavcodec/bmv.c
+++ b/libavcodec/bmv.c
@@ -285,12 +285,17 @@ static av_cold int decode_end(AVCodecContext *avctx)
return 0;
}
+typedef struct BMVAudioDecContext {
+ AVFrame frame;
+} BMVAudioDecContext;
+
static const int bmv_aud_mults[16] = {
16512, 8256, 4128, 2064, 1032, 516, 258, 192, 129, 88, 64, 56, 48, 40, 36, 32
};
static av_cold int bmv_aud_decode_init(AVCodecContext *avctx)
{
+ BMVAudioDecContext *c = avctx->priv_data;
if (avctx->channels != 2) {
av_log(avctx, AV_LOG_INFO, "invalid number of channels\n");
@@ -299,17 +304,21 @@ static av_cold int bmv_aud_decode_init(AVCodecContext *avctx)
avctx->sample_fmt = AV_SAMPLE_FMT_S16;
+ avcodec_get_frame_defaults(&c->frame);
+ avctx->coded_frame = &c->frame;
+
return 0;
}
-static int bmv_aud_decode_frame(AVCodecContext *avctx, void *data, int *data_size,
- AVPacket *avpkt)
+static int bmv_aud_decode_frame(AVCodecContext *avctx, void *data,
+ int *got_frame_ptr, AVPacket *avpkt)
{
+ BMVAudioDecContext *c = avctx->priv_data;
const uint8_t *buf = avpkt->data;
int buf_size = avpkt->size;
int blocks = 0, total_blocks, i;
- int out_size;
- int16_t *output_samples = data;
+ int ret;
+ int16_t *output_samples;
int scale[2];
total_blocks = *buf++;
@@ -318,11 +327,14 @@ static int bmv_aud_decode_frame(AVCodecContext *avctx, void *data, int *data_siz
total_blocks * 65 + 1, buf_size);
return AVERROR_INVALIDDATA;
}
- out_size = total_blocks * 64 * sizeof(*output_samples);
- if (*data_size < out_size) {
- av_log(avctx, AV_LOG_ERROR, "Output buffer is too small\n");
- return AVERROR(EINVAL);
+
+ /* get output buffer */
+ c->frame.nb_samples = total_blocks * 32;
+ if ((ret = avctx->get_buffer(avctx, &c->frame)) < 0) {
+ av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
+ return ret;
}
+ output_samples = (int16_t *)c->frame.data[0];
for (blocks = 0; blocks < total_blocks; blocks++) {
uint8_t code = *buf++;
@@ -335,7 +347,9 @@ static int bmv_aud_decode_frame(AVCodecContext *avctx, void *data, int *data_siz
}
}
- *data_size = out_size;
+ *got_frame_ptr = 1;
+ *(AVFrame *)data = c->frame;
+
return buf_size;
}
@@ -354,7 +368,9 @@ AVCodec ff_bmv_audio_decoder = {
.name = "bmv_audio",
.type = AVMEDIA_TYPE_AUDIO,
.id = CODEC_ID_BMV_AUDIO,
+ .priv_data_size = sizeof(BMVAudioDecContext),
.init = bmv_aud_decode_init,
.decode = bmv_aud_decode_frame,
+ .capabilities = CODEC_CAP_DR1,
.long_name = NULL_IF_CONFIG_SMALL("Discworld II BMV audio"),
};
diff --git a/libavcodec/error_resilience.c b/libavcodec/error_resilience.c
index ee0e6fcfe2..638215f7d8 100644
--- a/libavcodec/error_resilience.c
+++ b/libavcodec/error_resilience.c
@@ -50,7 +50,11 @@ static void decode_mb(MpegEncContext *s, int ref){
h->mb_xy= s->mb_x + s->mb_y*s->mb_stride;
memset(h->non_zero_count_cache, 0, sizeof(h->non_zero_count_cache));
assert(ref>=0);
- if(ref >= h->ref_count[0]) //FIXME it is posible albeit uncommon that slice references differ between slices, we take the easy approuch and ignore it for now. If this turns out to have any relevance in practice then correct remapping should be added
+ /* FIXME: It is posible albeit uncommon that slice references
+ * differ between slices. We take the easy approach and ignore
+ * it for now. If this turns out to have any relevance in
+ * practice then correct remapping should be added. */
+ if (ref >= h->ref_count[0])
ref=0;
fill_rectangle(&s->current_picture.f.ref_index[0][4*h->mb_xy], 2, 2, 2, ref, 1);
fill_rectangle(&h->ref_cache[0][scan8[0]], 4, 4, 8, ref, 1);
@@ -684,7 +688,7 @@ static int is_intra_more_likely(MpegEncContext *s){
if(CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration && s->pict_type == AV_PICTURE_TYPE_I)
return 1;
- skip_amount= FFMAX(undamaged_count/50, 1); //check only upto 50 MBs
+ skip_amount = FFMAX(undamaged_count / 50, 1); // check only up to 50 MBs
is_intra_likely=0;
j=0;
diff --git a/libavcodec/h264.c b/libavcodec/h264.c
index a9f15f44d9..8a79311f7c 100644
--- a/libavcodec/h264.c
+++ b/libavcodec/h264.c
@@ -1315,7 +1315,6 @@ int ff_h264_frame_start(H264Context *h){
MpegEncContext * const s = &h->s;
int i;
const int pixel_shift = h->pixel_shift;
- int thread_count = (s->avctx->active_thread_type & FF_THREAD_SLICE) ? s->avctx->thread_count : 1;
if(MPV_frame_start(s, s->avctx) < 0)
return -1;
@@ -1345,7 +1344,7 @@ int ff_h264_frame_start(H264Context *h){
/* can't be in alloc_tables because linesize isn't known there.
* FIXME: redo bipred weight to not require extra buffer? */
- for(i = 0; i < thread_count; i++)
+ for(i = 0; i < s->slice_context_count; i++)
if(h->thread_context[i] && !h->thread_context[i]->s.obmc_scratchpad)
h->thread_context[i]->s.obmc_scratchpad = av_malloc(16*6*s->linesize);
@@ -2852,7 +2851,7 @@ static int decode_slice_header(H264Context *h, H264Context *h0){
return -1;
}
} else {
- for(i = 1; i < s->avctx->thread_count; i++) {
+ for(i = 1; i < s->slice_context_count; i++) {
H264Context *c;
c = h->thread_context[i] = av_malloc(sizeof(H264Context));
memcpy(c, h->s.thread_context[i], sizeof(MpegEncContext));
@@ -2866,7 +2865,7 @@ static int decode_slice_header(H264Context *h, H264Context *h0){
clone_tables(c, h, i);
}
- for(i = 0; i < s->avctx->thread_count; i++)
+ for(i = 0; i < s->slice_context_count; i++)
if (context_init(h->thread_context[i]) < 0) {
av_log(h->s.avctx, AV_LOG_ERROR, "context_init() failed.\n");
return -1;
@@ -3782,7 +3781,10 @@ static int decode_nal_units(H264Context *h, const uint8_t *buf, int buf_size){
h->nal_unit_type= 0;
- h->max_contexts = (HAVE_THREADS && (s->avctx->active_thread_type&FF_THREAD_SLICE)) ? avctx->thread_count : 1;
+ if(!s->slice_context_count)
+ s->slice_context_count= 1;
+ h->max_contexts = s->slice_context_count;
+
if(!(s->flags2 & CODEC_FLAG2_CHUNKS)){
h->current_slice = 0;
if (!s->first_field)
diff --git a/libavcodec/imgconvert.c b/libavcodec/imgconvert.c
index 2279875db2..8248793a0a 100644
--- a/libavcodec/imgconvert.c
+++ b/libavcodec/imgconvert.c
@@ -885,7 +885,7 @@ static void deinterlace_bottom_field_inplace(uint8_t *src1, int src_wrap,
uint8_t *src_m1, *src_0, *src_p1, *src_p2;
int y;
uint8_t *buf;
- buf = (uint8_t*)av_malloc(width);
+ buf = av_malloc(width);
src_m1 = src1;
memcpy(buf,src_m1,width);
diff --git a/libavcodec/mpeg12.c b/libavcodec/mpeg12.c
index 990b6d67cd..fcbea5429d 100644
--- a/libavcodec/mpeg12.c
+++ b/libavcodec/mpeg12.c
@@ -2485,7 +2485,9 @@ static int decode_chunks(AVCodecContext *avctx,
}
if (HAVE_THREADS && (avctx->active_thread_type & FF_THREAD_SLICE)) {
- int threshold= (s2->mb_height*s->slice_count + avctx->thread_count/2) / avctx->thread_count;
+ int threshold = (s2->mb_height * s->slice_count +
+ s2->slice_context_count / 2) /
+ s2->slice_context_count;
av_assert0(avctx->thread_count > 1);
if (threshold <= mb_y) {
MpegEncContext *thread_context = s2->thread_context[s->slice_count];
diff --git a/libavcodec/mpegaudiodec.c b/libavcodec/mpegaudiodec.c
index 77a7cd95e7..eae030ce44 100644
--- a/libavcodec/mpegaudiodec.c
+++ b/libavcodec/mpegaudiodec.c
@@ -1427,6 +1427,7 @@ static int mp_decode_layer3(MPADecodeContext *s)
}
if (!s->adu_mode) {
+ int skip;
const uint8_t *ptr = s->gb.buffer + (get_bits_count(&s->gb)>>3);
assert((get_bits_count(&s->gb) & 7) == 0);
/* now we get bits from the main_data_begin offset */
diff --git a/libavcodec/mpegvideo.c b/libavcodec/mpegvideo.c
index 5c04e9d078..90eb737ec7 100644
--- a/libavcodec/mpegvideo.c
+++ b/libavcodec/mpegvideo.c
@@ -638,6 +638,8 @@ void MPV_common_defaults(MpegEncContext *s)
s->picture_range_start = 0;
s->picture_range_end = MAX_PICTURE_COUNT;
+
+ s->slice_context_count = 1;
}
/**
@@ -656,11 +658,13 @@ void MPV_decode_defaults(MpegEncContext *s)
*/
av_cold int MPV_common_init(MpegEncContext *s)
{
- int y_size, c_size, yc_size, i, mb_array_size, mv_table_size, x, y,
- threads = (s->encoding ||
- (HAVE_THREADS &&
- s->avctx->active_thread_type & FF_THREAD_SLICE)) ?
- s->avctx->thread_count : 1;
+ int y_size, c_size, yc_size, i, mb_array_size, mv_table_size, x, y;
+ int nb_slices = (HAVE_THREADS &&
+ s->avctx->active_thread_type & FF_THREAD_SLICE) ?
+ s->avctx->thread_count : 1;
+
+ if (s->encoding && s->avctx->slices)
+ nb_slices = s->avctx->slices;
if (s->codec_id == CODEC_ID_MPEG2VIDEO && !s->progressive_sequence)
s->mb_height = (s->height + 31) / 32 * 2;
@@ -673,14 +677,15 @@ av_cold int MPV_common_init(MpegEncContext *s)
return -1;
}
- if ((s->encoding || (s->avctx->active_thread_type & FF_THREAD_SLICE)) &&
- (s->avctx->thread_count > MAX_THREADS ||
- (s->avctx->thread_count > s->mb_height && s->mb_height))) {
- int max_threads = FFMIN(MAX_THREADS, s->mb_height);
- av_log(s->avctx, AV_LOG_WARNING,
- "too many threads (%d), reducing to %d\n",
- s->avctx->thread_count, max_threads);
- threads = max_threads;
+ if (nb_slices > MAX_THREADS || (nb_slices > s->mb_height && s->mb_height)) {
+ int max_slices;
+ if (s->mb_height)
+ max_slices = FFMIN(MAX_THREADS, s->mb_height);
+ else
+ max_slices = MAX_THREADS;
+ av_log(s->avctx, AV_LOG_WARNING, "too many threads/slices (%d),"
+ " reducing to %d\n", nb_slices, max_slices);
+ nb_slices = max_slices;
}
if ((s->width || s->height) &&
@@ -831,17 +836,20 @@ av_cold int MPV_common_init(MpegEncContext *s)
s->context_initialized = 1;
s->thread_context[0] = s;
- if (s->encoding || (HAVE_THREADS && s->avctx->active_thread_type&FF_THREAD_SLICE)) {
- for (i = 1; i < threads; i++) {
+// if (s->width && s->height) {
+ if (nb_slices > 1) {
+ for (i = 1; i < nb_slices; i++) {
s->thread_context[i] = av_malloc(sizeof(MpegEncContext));
memcpy(s->thread_context[i], s, sizeof(MpegEncContext));
}
- for (i = 0; i < threads; i++) {
+ for (i = 0; i < nb_slices; i++) {
if (init_duplicate_context(s->thread_context[i], s) < 0)
goto fail;
- s->thread_context[i]->start_mb_y = (s->mb_height*(i ) + s->avctx->thread_count / 2) / s->avctx->thread_count;
- s->thread_context[i]->end_mb_y = (s->mb_height*(i+1) + s->avctx->thread_count / 2) / s->avctx->thread_count;
+ s->thread_context[i]->start_mb_y =
+ (s->mb_height * (i) + nb_slices / 2) / nb_slices;
+ s->thread_context[i]->end_mb_y =
+ (s->mb_height * (i + 1) + nb_slices / 2) / nb_slices;
}
} else {
if (init_duplicate_context(s, s) < 0)
@@ -849,6 +857,8 @@ av_cold int MPV_common_init(MpegEncContext *s)
s->start_mb_y = 0;
s->end_mb_y = s->mb_height;
}
+ s->slice_context_count = nb_slices;
+// }
return 0;
fail:
@@ -861,13 +871,14 @@ void MPV_common_end(MpegEncContext *s)
{
int i, j, k;
- if (s->encoding || (HAVE_THREADS && s->avctx->active_thread_type & FF_THREAD_SLICE)) {
- for (i = 0; i < s->avctx->thread_count; i++) {
+ if (s->slice_context_count > 1) {
+ for (i = 0; i < s->slice_context_count; i++) {
free_duplicate_context(s->thread_context[i]);
}
- for (i = 1; i < s->avctx->thread_count; i++) {
+ for (i = 1; i < s->slice_context_count; i++) {
av_freep(&s->thread_context[i]);
}
+ s->slice_context_count = 1;
} else free_duplicate_context(s);
av_freep(&s->parse_context.buffer);
diff --git a/libavcodec/mpegvideo.h b/libavcodec/mpegvideo.h
index b0b21c7779..f7c8fb784e 100644
--- a/libavcodec/mpegvideo.h
+++ b/libavcodec/mpegvideo.h
@@ -270,6 +270,7 @@ typedef struct MpegEncContext {
int start_mb_y; ///< start mb_y of this thread (so current thread should process start_mb_y <= row < end_mb_y)
int end_mb_y; ///< end mb_y of this thread (so current thread should process start_mb_y <= row < end_mb_y)
struct MpegEncContext *thread_context[MAX_THREADS];
+ int slice_context_count; ///< number of used thread_contexts
/**
* copy of the previous picture structure.
diff --git a/libavcodec/mpegvideo_enc.c b/libavcodec/mpegvideo_enc.c
index 77e251e00c..a02dd7bdb4 100644
--- a/libavcodec/mpegvideo_enc.c
+++ b/libavcodec/mpegvideo_enc.c
@@ -1435,7 +1435,8 @@ int MPV_encode_picture(AVCodecContext *avctx,
{
MpegEncContext *s = avctx->priv_data;
AVFrame *pic_arg = data;
- int i, stuffing_count, context_count = avctx->thread_count;
+ int i, stuffing_count;
+ int context_count = s->slice_context_count;
for (i = 0; i < context_count; i++) {
int start_y = s->thread_context[i]->start_mb_y;
@@ -3072,7 +3073,7 @@ static int encode_picture(MpegEncContext *s, int picture_number)
{
int i;
int bits;
- int context_count = s->avctx->thread_count;
+ int context_count = s->slice_context_count;
s->picture_number = picture_number;
diff --git a/libavcodec/options.c b/libavcodec/options.c
index cc67291cb0..27c8e502b8 100644
--- a/libavcodec/options.c
+++ b/libavcodec/options.c
@@ -507,7 +507,7 @@ static const AVOption options[]={
{"cholesky", NULL, 0, AV_OPT_TYPE_CONST, {.dbl = AV_LPC_TYPE_CHOLESKY }, INT_MIN, INT_MAX, A|E, "lpc_type"},
{"lpc_passes", "deprecated, use flac-specific options", OFFSET(lpc_passes), AV_OPT_TYPE_INT, {.dbl = -1 }, INT_MIN, INT_MAX, A|E},
#endif
-{"slices", "number of slices, used in parallelized decoding", OFFSET(slices), AV_OPT_TYPE_INT, {.dbl = 0 }, 0, INT_MAX, V|E},
+{"slices", "number of slices, used in parallelized encoding", OFFSET(slices), AV_OPT_TYPE_INT, {.dbl = 0 }, 0, INT_MAX, V|E},
{"thread_type", "select multithreading type", OFFSET(thread_type), AV_OPT_TYPE_FLAGS, {.dbl = FF_THREAD_SLICE|FF_THREAD_FRAME }, 0, INT_MAX, V|E|D, "thread_type"},
{"slice", NULL, 0, AV_OPT_TYPE_CONST, {.dbl = FF_THREAD_SLICE }, INT_MIN, INT_MAX, V|E|D, "thread_type"},
{"frame", NULL, 0, AV_OPT_TYPE_CONST, {.dbl = FF_THREAD_FRAME }, INT_MIN, INT_MAX, V|E|D, "thread_type"},
diff --git a/libavcodec/snow.c b/libavcodec/snow.c
index 0ce9b28479..660162a1b3 100644
--- a/libavcodec/snow.c
+++ b/libavcodec/snow.c
@@ -516,9 +516,9 @@ static void halfpel_interpol(SnowContext *s, uint8_t *halfpel[4][4], AVFrame *fr
int ls= frame->linesize[p];
uint8_t *src= frame->data[p];
- halfpel[1][p]= (uint8_t*)av_malloc(ls * (h+2*EDGE_WIDTH)) + EDGE_WIDTH*(1+ls);
- halfpel[2][p]= (uint8_t*)av_malloc(ls * (h+2*EDGE_WIDTH)) + EDGE_WIDTH*(1+ls);
- halfpel[3][p]= (uint8_t*)av_malloc(ls * (h+2*EDGE_WIDTH)) + EDGE_WIDTH*(1+ls);
+ halfpel[1][p] = av_malloc(ls * (h + 2 * EDGE_WIDTH)) + EDGE_WIDTH * (1 + ls);
+ halfpel[2][p] = av_malloc(ls * (h + 2 * EDGE_WIDTH)) + EDGE_WIDTH * (1 + ls);
+ halfpel[3][p] = av_malloc(ls * (h + 2 * EDGE_WIDTH)) + EDGE_WIDTH * (1 + ls);
halfpel[0][p]= src;
for(y=0; y<h; y++){
diff --git a/libavcodec/v410enc.c b/libavcodec/v410enc.c
index e6c837eef6..e7d9c4e384 100644
--- a/libavcodec/v410enc.c
+++ b/libavcodec/v410enc.c
@@ -67,7 +67,7 @@ static int v410_encode_frame(AVCodecContext *avctx, uint8_t *buf,
for (j = 0; j < avctx->width; j++) {
val = u[j] << 2;
val |= y[j] << 12;
- val |= v[j] << 22;
+ val |= (uint32_t) v[j] << 22;
AV_WL32(dst, val);
dst += 4;
output_size += 4;
diff --git a/libavcodec/wavpack.c b/libavcodec/wavpack.c
index 1ac0ecd3c7..7d358238b2 100644
--- a/libavcodec/wavpack.c
+++ b/libavcodec/wavpack.c
@@ -18,20 +18,22 @@
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
+
#define BITSTREAM_READER_LE
+
+#include "libavutil/audioconvert.h"
#include "avcodec.h"
#include "get_bits.h"
#include "unary.h"
-#include "libavutil/audioconvert.h"
/**
* @file
* WavPack lossless audio decoder
*/
-#define WV_MONO 0x00000004
-#define WV_JOINT_STEREO 0x00000010
-#define WV_FALSE_STEREO 0x40000000
+#define WV_MONO 0x00000004
+#define WV_JOINT_STEREO 0x00000010
+#define WV_FALSE_STEREO 0x40000000
#define WV_HYBRID_MODE 0x00000008
#define WV_HYBRID_SHAPE 0x00000008
@@ -44,14 +46,14 @@
#define WV_FLT_ZERO_SENT 0x08
#define WV_FLT_ZERO_SIGN 0x10
-enum WP_ID_Flags{
+enum WP_ID_Flags {
WP_IDF_MASK = 0x1F,
WP_IDF_IGNORE = 0x20,
WP_IDF_ODD = 0x40,
WP_IDF_LONG = 0x80
};
-enum WP_ID{
+enum WP_ID {
WP_ID_DUMMY = 0,
WP_ID_ENCINFO,
WP_ID_DECTERMS,
@@ -178,7 +180,7 @@ static av_always_inline int wp_exp2(int16_t val)
{
int res, neg = 0;
- if(val < 0){
+ if (val < 0) {
val = -val;
neg = 1;
}
@@ -193,13 +195,13 @@ static av_always_inline int wp_log2(int32_t val)
{
int bits;
- if(!val)
+ if (!val)
return 0;
- if(val == 1)
+ if (val == 1)
return 256;
val += val >> 9;
bits = av_log2(val) + 1;
- if(bits < 9)
+ if (bits < 9)
return (bits << 8) + wp_log2_table[(val << (9 - bits)) & 0xFF];
else
return (bits << 8) + wp_log2_table[(val >> (bits - 9)) & 0xFF];
@@ -209,33 +211,35 @@ static av_always_inline int wp_log2(int32_t val)
// macros for manipulating median values
#define GET_MED(n) ((c->median[n] >> 4) + 1)
-#define DEC_MED(n) c->median[n] -= ((c->median[n] + (128>>n) - 2) / (128>>n)) * 2
-#define INC_MED(n) c->median[n] += ((c->median[n] + (128>>n)) / (128>>n)) * 5
+#define DEC_MED(n) c->median[n] -= ((c->median[n] + (128 >> n) - 2) / (128 >> n)) * 2
+#define INC_MED(n) c->median[n] += ((c->median[n] + (128 >> n) ) / (128 >> n)) * 5
// macros for applying weight
#define UPDATE_WEIGHT_CLIP(weight, delta, samples, in) \
- if(samples && in){ \
- if((samples ^ in) < 0){ \
- weight -= delta; \
- if(weight < -1024) weight = -1024; \
- }else{ \
- weight += delta; \
- if(weight > 1024) weight = 1024; \
- } \
- }
+ if (samples && in) { \
+ if ((samples ^ in) < 0) { \
+ weight -= delta; \
+ if (weight < -1024) \
+ weight = -1024; \
+ } else { \
+ weight += delta; \
+ if (weight > 1024) \
+ weight = 1024; \
+ } \
+ }
static av_always_inline int get_tail(GetBitContext *gb, int k)
{
int p, e, res;
- if(k<1)return 0;
+ if (k < 1)
+ return 0;
p = av_log2(k);
e = (1 << (p + 1)) - k - 1;
res = p ? get_bits(gb, p) : 0;
- if(res >= e){
- res = (res<<1) - e + get_bits1(gb);
- }
+ if (res >= e)
+ res = (res << 1) - e + get_bits1(gb);
return res;
}
@@ -243,37 +247,38 @@ static void update_error_limit(WavpackFrameContext *ctx)
{
int i, br[2], sl[2];
- for(i = 0; i <= ctx->stereo_in; i++){
+ for (i = 0; i <= ctx->stereo_in; i++) {
ctx->ch[i].bitrate_acc += ctx->ch[i].bitrate_delta;
br[i] = ctx->ch[i].bitrate_acc >> 16;
sl[i] = LEVEL_DECAY(ctx->ch[i].slow_level);
}
- if(ctx->stereo_in && ctx->hybrid_bitrate){
+ if (ctx->stereo_in && ctx->hybrid_bitrate) {
int balance = (sl[1] - sl[0] + br[1] + 1) >> 1;
- if(balance > br[0]){
+ if (balance > br[0]) {
br[1] = br[0] << 1;
br[0] = 0;
- }else if(-balance > br[0]){
+ } else if (-balance > br[0]) {
br[0] <<= 1;
br[1] = 0;
- }else{
+ } else {
br[1] = br[0] + balance;
br[0] = br[0] - balance;
}
}
- for(i = 0; i <= ctx->stereo_in; i++){
- if(ctx->hybrid_bitrate){
- if(sl[i] - br[i] > -0x100)
+ for (i = 0; i <= ctx->stereo_in; i++) {
+ if (ctx->hybrid_bitrate) {
+ if (sl[i] - br[i] > -0x100)
ctx->ch[i].error_limit = wp_exp2(sl[i] - br[i] + 0x100);
else
ctx->ch[i].error_limit = 0;
- }else{
+ } else {
ctx->ch[i].error_limit = wp_exp2(br[i]);
}
}
}
-static int wv_get_value(WavpackFrameContext *ctx, GetBitContext *gb, int channel, int *last)
+static int wv_get_value(WavpackFrameContext *ctx, GetBitContext *gb,
+ int channel, int *last)
{
int t, t2;
int sign, base, add, ret;
@@ -281,25 +286,26 @@ static int wv_get_value(WavpackFrameContext *ctx, GetBitContext *gb, int channel
*last = 0;
- if((ctx->ch[0].median[0] < 2U) && (ctx->ch[1].median[0] < 2U) && !ctx->zero && !ctx->one){
- if(ctx->zeroes){
+ if ((ctx->ch[0].median[0] < 2U) && (ctx->ch[1].median[0] < 2U) &&
+ !ctx->zero && !ctx->one) {
+ if (ctx->zeroes) {
ctx->zeroes--;
- if(ctx->zeroes){
+ if (ctx->zeroes) {
c->slow_level -= LEVEL_DECAY(c->slow_level);
return 0;
}
- }else{
+ } else {
t = get_unary_0_33(gb);
- if(t >= 2){
- if(get_bits_left(gb) < t-1)
+ if (t >= 2) {
+ if (get_bits_left(gb) < t - 1)
goto error;
t = get_bits(gb, t - 1) | (1 << (t-1));
- }else{
- if(get_bits_left(gb) < 0)
+ } else {
+ if (get_bits_left(gb) < 0)
goto error;
}
ctx->zeroes = t;
- if(ctx->zeroes){
+ if (ctx->zeroes) {
memset(ctx->ch[0].median, 0, sizeof(ctx->ch[0].median));
memset(ctx->ch[1].median, 0, sizeof(ctx->ch[1].median));
c->slow_level -= LEVEL_DECAY(c->slow_level);
@@ -308,81 +314,81 @@ static int wv_get_value(WavpackFrameContext *ctx, GetBitContext *gb, int channel
}
}
- if(ctx->zero){
+ if (ctx->zero) {
t = 0;
ctx->zero = 0;
- }else{
+ } else {
t = get_unary_0_33(gb);
- if(get_bits_left(gb) < 0)
+ if (get_bits_left(gb) < 0)
goto error;
- if(t == 16) {
+ if (t == 16) {
t2 = get_unary_0_33(gb);
- if(t2 < 2){
- if(get_bits_left(gb) < 0)
+ if (t2 < 2) {
+ if (get_bits_left(gb) < 0)
goto error;
t += t2;
- }else{
- if(get_bits_left(gb) < t2 - 1)
+ } else {
+ if (get_bits_left(gb) < t2 - 1)
goto error;
t += get_bits(gb, t2 - 1) | (1 << (t2 - 1));
}
}
- if(ctx->one){
- ctx->one = t&1;
- t = (t>>1) + 1;
- }else{
- ctx->one = t&1;
+ if (ctx->one) {
+ ctx->one = t & 1;
+ t = (t >> 1) + 1;
+ } else {
+ ctx->one = t & 1;
t >>= 1;
}
ctx->zero = !ctx->one;
}
- if(ctx->hybrid && !channel)
+ if (ctx->hybrid && !channel)
update_error_limit(ctx);
- if(!t){
+ if (!t) {
base = 0;
- add = GET_MED(0) - 1;
+ add = GET_MED(0) - 1;
DEC_MED(0);
- }else if(t == 1){
+ } else if (t == 1) {
base = GET_MED(0);
- add = GET_MED(1) - 1;
+ add = GET_MED(1) - 1;
INC_MED(0);
DEC_MED(1);
- }else if(t == 2){
+ } else if (t == 2) {
base = GET_MED(0) + GET_MED(1);
- add = GET_MED(2) - 1;
+ add = GET_MED(2) - 1;
INC_MED(0);
INC_MED(1);
DEC_MED(2);
- }else{
+ } else {
base = GET_MED(0) + GET_MED(1) + GET_MED(2) * (t - 2);
- add = GET_MED(2) - 1;
+ add = GET_MED(2) - 1;
INC_MED(0);
INC_MED(1);
INC_MED(2);
}
- if(!c->error_limit){
+ if (!c->error_limit) {
ret = base + get_tail(gb, add);
if (get_bits_left(gb) <= 0)
goto error;
- }else{
- int mid = (base*2 + add + 1) >> 1;
- while(add > c->error_limit){
- if(get_bits_left(gb) <= 0)
+ } else {
+ int mid = (base * 2 + add + 1) >> 1;
+ while (add > c->error_limit) {
+ if (get_bits_left(gb) <= 0)
goto error;
- if(get_bits1(gb)){
+ if (get_bits1(gb)) {
add -= (mid - base);
base = mid;
- }else
+ } else
add = mid - base - 1;
- mid = (base*2 + add + 1) >> 1;
+ mid = (base * 2 + add + 1) >> 1;
}
ret = mid;
}
sign = get_bits1(gb);
- if(ctx->hybrid_bitrate)
+ if (ctx->hybrid_bitrate)
c->slow_level += wp_log2(ret) - LEVEL_DECAY(c->slow_level);
return sign ? ~ret : ret;
@@ -391,23 +397,24 @@ error:
return 0;
}
-static inline int wv_get_value_integer(WavpackFrameContext *s, uint32_t *crc, int S)
+static inline int wv_get_value_integer(WavpackFrameContext *s, uint32_t *crc,
+ int S)
{
int bit;
- if(s->extra_bits){
+ if (s->extra_bits){
S <<= s->extra_bits;
- if(s->got_extra_bits && get_bits_left(&s->gb_extra_bits) >= s->extra_bits){
+ if (s->got_extra_bits && get_bits_left(&s->gb_extra_bits) >= s->extra_bits) {
S |= get_bits(&s->gb_extra_bits, s->extra_bits);
- *crc = *crc * 9 + (S&0xffff) * 3 + ((unsigned)S>>16);
+ *crc = *crc * 9 + (S & 0xffff) * 3 + ((unsigned)S >> 16);
}
}
bit = (S & s->and) | s->or;
bit = (((S + bit) << s->shift) - bit) << s->post_shift;
- if(s->hybrid)
+ if (s->hybrid)
bit = av_clip(bit, -s->hybrid_maxclip - 1, s->hybrid_maxclip);
return bit;
@@ -423,58 +430,58 @@ static float wv_get_value_float(WavpackFrameContext *s, uint32_t *crc, int S)
int sign;
int exp = s->float_max_exp;
- if(s->got_extra_bits){
- const int max_bits = 1 + 23 + 8 + 1;
+ if (s->got_extra_bits) {
+ const int max_bits = 1 + 23 + 8 + 1;
const int left_bits = get_bits_left(&s->gb_extra_bits);
- if(left_bits + 8 * FF_INPUT_BUFFER_PADDING_SIZE < max_bits)
+ if (left_bits + 8 * FF_INPUT_BUFFER_PADDING_SIZE < max_bits)
return 0.0;
}
- if(S){
+ if (S) {
S <<= s->float_shift;
sign = S < 0;
- if(sign)
+ if (sign)
S = -S;
- if(S >= 0x1000000){
- if(s->got_extra_bits && get_bits1(&s->gb_extra_bits)){
+ if (S >= 0x1000000) {
+ if (s->got_extra_bits && get_bits1(&s->gb_extra_bits))
S = get_bits(&s->gb_extra_bits, 23);
- }else{
+ else
S = 0;
- }
exp = 255;
- }else if(exp){
+ } else if (exp) {
int shift = 23 - av_log2(S);
exp = s->float_max_exp;
- if(exp <= shift){
+ if (exp <= shift)
shift = --exp;
- }
exp -= shift;
- if(shift){
+ if (shift) {
S <<= shift;
- if((s->float_flag & WV_FLT_SHIFT_ONES) ||
- (s->got_extra_bits && (s->float_flag & WV_FLT_SHIFT_SAME) && get_bits1(&s->gb_extra_bits)) ){
+ if ((s->float_flag & WV_FLT_SHIFT_ONES) ||
+ (s->got_extra_bits && (s->float_flag & WV_FLT_SHIFT_SAME) &&
+ get_bits1(&s->gb_extra_bits))) {
S |= (1 << shift) - 1;
- } else if(s->got_extra_bits && (s->float_flag & WV_FLT_SHIFT_SENT)){
+ } else if (s->got_extra_bits &&
+ (s->float_flag & WV_FLT_SHIFT_SENT)) {
S |= get_bits(&s->gb_extra_bits, shift);
}
}
- }else{
+ } else {
exp = s->float_max_exp;
}
S &= 0x7fffff;
- }else{
+ } else {
sign = 0;
exp = 0;
- if(s->got_extra_bits && (s->float_flag & WV_FLT_ZERO_SENT)){
- if(get_bits1(&s->gb_extra_bits)){
+ if (s->got_extra_bits && (s->float_flag & WV_FLT_ZERO_SENT)) {
+ if (get_bits1(&s->gb_extra_bits)) {
S = get_bits(&s->gb_extra_bits, 23);
- if(s->float_max_exp >= 25)
+ if (s->float_max_exp >= 25)
exp = get_bits(&s->gb_extra_bits, 8);
sign = get_bits1(&s->gb_extra_bits);
- }else{
- if(s->float_flag & WV_FLT_ZERO_SIGN)
+ } else {
+ if (s->float_flag & WV_FLT_ZERO_SIGN)
sign = get_bits1(&s->gb_extra_bits);
}
}
@@ -492,7 +499,8 @@ static void wv_reset_saved_context(WavpackFrameContext *s)
s->sc.crc = s->extra_sc.crc = 0xFFFFFFFF;
}
-static inline int wv_unpack_stereo(WavpackFrameContext *s, GetBitContext *gb, void *dst, const int type)
+static inline int wv_unpack_stereo(WavpackFrameContext *s, GetBitContext *gb,
+ void *dst, const int type)
{
int i, j, count = 0;
int last, t;
@@ -506,69 +514,71 @@ static inline int wv_unpack_stereo(WavpackFrameContext *s, GetBitContext *gb, vo
const int channel_pad = s->avctx->channels - 2;
s->one = s->zero = s->zeroes = 0;
- do{
+ do {
L = wv_get_value(s, gb, 0, &last);
- if(last) break;
+ if (last)
+ break;
R = wv_get_value(s, gb, 1, &last);
- if(last) break;
- for(i = 0; i < s->terms; i++){
+ if (last)
+ break;
+ for (i = 0; i < s->terms; i++) {
t = s->decorr[i].value;
- if(t > 0){
- if(t > 8){
- if(t & 1){
+ if (t > 0) {
+ if (t > 8) {
+ if (t & 1) {
A = 2 * s->decorr[i].samplesA[0] - s->decorr[i].samplesA[1];
B = 2 * s->decorr[i].samplesB[0] - s->decorr[i].samplesB[1];
- }else{
+ } else {
A = (3 * s->decorr[i].samplesA[0] - s->decorr[i].samplesA[1]) >> 1;
B = (3 * s->decorr[i].samplesB[0] - s->decorr[i].samplesB[1]) >> 1;
}
s->decorr[i].samplesA[1] = s->decorr[i].samplesA[0];
s->decorr[i].samplesB[1] = s->decorr[i].samplesB[0];
j = 0;
- }else{
+ } else {
A = s->decorr[i].samplesA[pos];
B = s->decorr[i].samplesB[pos];
j = (pos + t) & 7;
}
- if(type != AV_SAMPLE_FMT_S16){
+ if (type != AV_SAMPLE_FMT_S16) {
L2 = L + ((s->decorr[i].weightA * (int64_t)A + 512) >> 10);
R2 = R + ((s->decorr[i].weightB * (int64_t)B + 512) >> 10);
- }else{
+ } else {
L2 = L + ((s->decorr[i].weightA * A + 512) >> 10);
R2 = R + ((s->decorr[i].weightB * B + 512) >> 10);
}
- if(A && L) s->decorr[i].weightA -= ((((L ^ A) >> 30) & 2) - 1) * s->decorr[i].delta;
- if(B && R) s->decorr[i].weightB -= ((((R ^ B) >> 30) & 2) - 1) * s->decorr[i].delta;
+ if (A && L) s->decorr[i].weightA -= ((((L ^ A) >> 30) & 2) - 1) * s->decorr[i].delta;
+ if (B && R) s->decorr[i].weightB -= ((((R ^ B) >> 30) & 2) - 1) * s->decorr[i].delta;
s->decorr[i].samplesA[j] = L = L2;
s->decorr[i].samplesB[j] = R = R2;
- }else if(t == -1){
- if(type != AV_SAMPLE_FMT_S16)
+ } else if (t == -1) {
+ if (type != AV_SAMPLE_FMT_S16)
L2 = L + ((s->decorr[i].weightA * (int64_t)s->decorr[i].samplesA[0] + 512) >> 10);
else
L2 = L + ((s->decorr[i].weightA * s->decorr[i].samplesA[0] + 512) >> 10);
UPDATE_WEIGHT_CLIP(s->decorr[i].weightA, s->decorr[i].delta, s->decorr[i].samplesA[0], L);
L = L2;
- if(type != AV_SAMPLE_FMT_S16)
+ if (type != AV_SAMPLE_FMT_S16)
R2 = R + ((s->decorr[i].weightB * (int64_t)L2 + 512) >> 10);
else
R2 = R + ((s->decorr[i].weightB * L2 + 512) >> 10);
UPDATE_WEIGHT_CLIP(s->decorr[i].weightB, s->decorr[i].delta, L2, R);
R = R2;
s->decorr[i].samplesA[0] = R;
- }else{
- if(type != AV_SAMPLE_FMT_S16)
+ } else {
+ if (type != AV_SAMPLE_FMT_S16)
R2 = R + ((s->decorr[i].weightB * (int64_t)s->decorr[i].samplesB[0] + 512) >> 10);
else
R2 = R + ((s->decorr[i].weightB * s->decorr[i].samplesB[0] + 512) >> 10);
UPDATE_WEIGHT_CLIP(s->decorr[i].weightB, s->decorr[i].delta, s->decorr[i].samplesB[0], R);
R = R2;
- if(t == -3){
+ if (t == -3) {
R2 = s->decorr[i].samplesA[0];
s->decorr[i].samplesA[0] = R;
}
- if(type != AV_SAMPLE_FMT_S16)
+ if (type != AV_SAMPLE_FMT_S16)
L2 = L + ((s->decorr[i].weightA * (int64_t)R2 + 512) >> 10);
else
L2 = L + ((s->decorr[i].weightA * R2 + 512) >> 10);
@@ -578,15 +588,15 @@ static inline int wv_unpack_stereo(WavpackFrameContext *s, GetBitContext *gb, vo
}
}
pos = (pos + 1) & 7;
- if(s->joint)
+ if (s->joint)
L += (R -= (L >> 1));
crc = (crc * 3 + L) * 3 + R;
- if(type == AV_SAMPLE_FMT_FLT){
+ if (type == AV_SAMPLE_FMT_FLT) {
*dstfl++ = wv_get_value_float(s, &crc_extra_bits, L);
*dstfl++ = wv_get_value_float(s, &crc_extra_bits, R);
dstfl += channel_pad;
- } else if(type == AV_SAMPLE_FMT_S32){
+ } else if (type == AV_SAMPLE_FMT_S32) {
*dst32++ = wv_get_value_integer(s, &crc_extra_bits, L);
*dst32++ = wv_get_value_integer(s, &crc_extra_bits, R);
dst32 += channel_pad;
@@ -598,20 +608,21 @@ static inline int wv_unpack_stereo(WavpackFrameContext *s, GetBitContext *gb, vo
count++;
} while (!last && count < s->samples);
- wv_reset_saved_context(s);
- if(crc != s->CRC){
- av_log(s->avctx, AV_LOG_ERROR, "CRC error\n");
- return -1;
- }
- if(s->got_extra_bits && crc_extra_bits != s->crc_extra_bits){
- av_log(s->avctx, AV_LOG_ERROR, "Extra bits CRC error\n");
- return -1;
- }
+ wv_reset_saved_context(s);
+ if (crc != s->CRC) {
+ av_log(s->avctx, AV_LOG_ERROR, "CRC error\n");
+ return -1;
+ }
+ if (s->got_extra_bits && crc_extra_bits != s->crc_extra_bits) {
+ av_log(s->avctx, AV_LOG_ERROR, "Extra bits CRC error\n");
+ return -1;
+ }
return count * 2;
}
-static inline int wv_unpack_mono(WavpackFrameContext *s, GetBitContext *gb, void *dst, const int type)
+static inline int wv_unpack_mono(WavpackFrameContext *s, GetBitContext *gb,
+ void *dst, const int type)
{
int i, j, count = 0;
int last, t;
@@ -625,55 +636,57 @@ static inline int wv_unpack_mono(WavpackFrameContext *s, GetBitContext *gb, void
const int channel_stride = s->avctx->channels;
s->one = s->zero = s->zeroes = 0;
- do{
+ do {
T = wv_get_value(s, gb, 0, &last);
S = 0;
- if(last) break;
- for(i = 0; i < s->terms; i++){
+ if (last)
+ break;
+ for (i = 0; i < s->terms; i++) {
t = s->decorr[i].value;
- if(t > 8){
- if(t & 1)
- A = 2 * s->decorr[i].samplesA[0] - s->decorr[i].samplesA[1];
+ if (t > 8) {
+ if (t & 1)
+ A = 2 * s->decorr[i].samplesA[0] - s->decorr[i].samplesA[1];
else
A = (3 * s->decorr[i].samplesA[0] - s->decorr[i].samplesA[1]) >> 1;
s->decorr[i].samplesA[1] = s->decorr[i].samplesA[0];
j = 0;
- }else{
+ } else {
A = s->decorr[i].samplesA[pos];
j = (pos + t) & 7;
}
- if(type != AV_SAMPLE_FMT_S16)
+ if (type != AV_SAMPLE_FMT_S16)
S = T + ((s->decorr[i].weightA * (int64_t)A + 512) >> 10);
else
S = T + ((s->decorr[i].weightA * A + 512) >> 10);
- if(A && T) s->decorr[i].weightA -= ((((T ^ A) >> 30) & 2) - 1) * s->decorr[i].delta;
+ if (A && T)
+ s->decorr[i].weightA -= ((((T ^ A) >> 30) & 2) - 1) * s->decorr[i].delta;
s->decorr[i].samplesA[j] = T = S;
}
pos = (pos + 1) & 7;
crc = crc * 3 + S;
- if(type == AV_SAMPLE_FMT_FLT){
+ if (type == AV_SAMPLE_FMT_FLT) {
*dstfl = wv_get_value_float(s, &crc_extra_bits, S);
dstfl += channel_stride;
- }else if(type == AV_SAMPLE_FMT_S32){
+ } else if (type == AV_SAMPLE_FMT_S32) {
*dst32 = wv_get_value_integer(s, &crc_extra_bits, S);
dst32 += channel_stride;
- }else{
+ } else {
*dst16 = wv_get_value_integer(s, &crc_extra_bits, S);
dst16 += channel_stride;
}
count++;
} while (!last && count < s->samples);
- wv_reset_saved_context(s);
- if(crc != s->CRC){
- av_log(s->avctx, AV_LOG_ERROR, "CRC error\n");
- return -1;
- }
- if(s->got_extra_bits && crc_extra_bits != s->crc_extra_bits){
- av_log(s->avctx, AV_LOG_ERROR, "Extra bits CRC error\n");
- return -1;
- }
+ wv_reset_saved_context(s);
+ if (crc != s->CRC) {
+ av_log(s->avctx, AV_LOG_ERROR, "CRC error\n");
+ return -1;
+ }
+ if (s->got_extra_bits && crc_extra_bits != s->crc_extra_bits) {
+ av_log(s->avctx, AV_LOG_ERROR, "Extra bits CRC error\n");
+ return -1;
+ }
return count;
}
@@ -681,11 +694,11 @@ static inline int wv_unpack_mono(WavpackFrameContext *s, GetBitContext *gb, void
static av_cold int wv_alloc_frame_context(WavpackContext *c)
{
- if(c->fdec_num == WV_MAX_FRAME_DECODERS)
+ if (c->fdec_num == WV_MAX_FRAME_DECODERS)
return -1;
c->fdec[c->fdec_num] = av_mallocz(sizeof(**c->fdec));
- if(!c->fdec[c->fdec_num])
+ if (!c->fdec[c->fdec_num])
return -1;
c->fdec_num++;
c->fdec[c->fdec_num - 1]->avctx = c->avctx;
@@ -699,20 +712,21 @@ static av_cold int wavpack_decode_init(AVCodecContext *avctx)
WavpackContext *s = avctx->priv_data;
s->avctx = avctx;
- if(avctx->bits_per_coded_sample <= 16)
+ if (avctx->bits_per_coded_sample <= 16)
avctx->sample_fmt = AV_SAMPLE_FMT_S16;
else
avctx->sample_fmt = AV_SAMPLE_FMT_S32;
- if(avctx->channels <= 2 && !avctx->channel_layout)
- avctx->channel_layout = (avctx->channels==2) ? AV_CH_LAYOUT_STEREO : AV_CH_LAYOUT_MONO;
+ if (avctx->channels <= 2 && !avctx->channel_layout)
+ avctx->channel_layout = (avctx->channels == 2) ? AV_CH_LAYOUT_STEREO :
+ AV_CH_LAYOUT_MONO;
s->multichannel = avctx->channels > 2;
/* lavf demuxer does not provide extradata, Matroska stores 0x403
there, use this to detect decoding mode for multichannel */
s->mkv_mode = 0;
- if(s->multichannel && avctx->extradata && avctx->extradata_size == 2){
+ if (s->multichannel && avctx->extradata && avctx->extradata_size == 2) {
int ver = AV_RL16(avctx->extradata);
- if(ver >= 0x402 && ver <= 0x410)
+ if (ver >= 0x402 && ver <= 0x410)
s->mkv_mode = 1;
}
@@ -729,7 +743,7 @@ static av_cold int wavpack_decode_end(AVCodecContext *avctx)
WavpackContext *s = avctx->priv_data;
int i;
- for(i = 0; i < s->fdec_num; i++)
+ for (i = 0; i < s->fdec_num; i++)
av_freep(&s->fdec[i]);
s->fdec_num = 0;
@@ -744,101 +758,96 @@ static int wavpack_decode_block(AVCodecContext *avctx, int block_no,
WavpackFrameContext *s;
void *samples = data;
int samplecount;
- int got_terms = 0, got_weights = 0, got_samples = 0, got_entropy = 0, got_bs = 0, got_float = 0;
- int got_hybrid = 0;
- const uint8_t* orig_buf = buf;
- const uint8_t* buf_end = buf + buf_size;
+ int got_terms = 0, got_weights = 0, got_samples = 0,
+ got_entropy = 0, got_bs = 0, got_float = 0, got_hybrid = 0;
+ const uint8_t *orig_buf = buf;
+ const uint8_t *buf_end = buf + buf_size;
int i, j, id, size, ssize, weights, t;
int bpp, chan, chmask;
- if (buf_size == 0){
+ if (buf_size == 0) {
*got_frame_ptr = 0;
return 0;
}
- if(block_no >= wc->fdec_num && wv_alloc_frame_context(wc) < 0){
+ if (block_no >= wc->fdec_num && wv_alloc_frame_context(wc) < 0) {
av_log(avctx, AV_LOG_ERROR, "Error creating frame decode context\n");
return -1;
}
s = wc->fdec[block_no];
- if(!s){
+ if (!s) {
av_log(avctx, AV_LOG_ERROR, "Context for block %d is not present\n", block_no);
return -1;
}
- memset(s->decorr, 0, MAX_TERMS * sizeof(Decorr));
- memset(s->ch, 0, sizeof(s->ch));
- s->extra_bits = 0;
- s->and = s->or = s->shift = 0;
- s->got_extra_bits = 0;
+ memset(s->decorr, 0, MAX_TERMS * sizeof(Decorr));
+ memset(s->ch, 0, sizeof(s->ch));
+ s->extra_bits = 0;
+ s->and = s->or = s->shift = 0;
+ s->got_extra_bits = 0;
- if(!wc->mkv_mode){
+ if (!wc->mkv_mode) {
s->samples = AV_RL32(buf); buf += 4;
- if(!s->samples){
+ if (!s->samples) {
*got_frame_ptr = 0;
return 0;
}
- }else{
+ } else {
s->samples = wc->samples;
}
s->frame_flags = AV_RL32(buf); buf += 4;
- if(s->frame_flags&0x80){
- avctx->sample_fmt = AV_SAMPLE_FMT_FLT;
- } else if((s->frame_flags&0x03) <= 1){
- avctx->sample_fmt = AV_SAMPLE_FMT_S16;
- } else {
- avctx->sample_fmt = AV_SAMPLE_FMT_S32;
- }
bpp = av_get_bytes_per_sample(avctx->sample_fmt);
samples = (uint8_t*)samples + bpp * wc->ch_offset;
- s->stereo = !(s->frame_flags & WV_MONO);
- s->stereo_in = (s->frame_flags & WV_FALSE_STEREO) ? 0 : s->stereo;
- s->joint = s->frame_flags & WV_JOINT_STEREO;
- s->hybrid = s->frame_flags & WV_HYBRID_MODE;
- s->hybrid_bitrate = s->frame_flags & WV_HYBRID_BITRATE;
+ s->stereo = !(s->frame_flags & WV_MONO);
+ s->stereo_in = (s->frame_flags & WV_FALSE_STEREO) ? 0 : s->stereo;
+ s->joint = s->frame_flags & WV_JOINT_STEREO;
+ s->hybrid = s->frame_flags & WV_HYBRID_MODE;
+ s->hybrid_bitrate = s->frame_flags & WV_HYBRID_BITRATE;
s->hybrid_maxclip = (1LL << ((((s->frame_flags & 0x03) + 1) << 3) - 1)) - 1;
- s->post_shift = 8 * (bpp-1-(s->frame_flags&0x03)) + ((s->frame_flags >> 13) & 0x1f);
- s->CRC = AV_RL32(buf); buf += 4;
- if(wc->mkv_mode)
+ s->post_shift = 8 * (bpp - 1 - (s->frame_flags & 0x03)) +
+ ((s->frame_flags >> 13) & 0x1f);
+ s->CRC = AV_RL32(buf); buf += 4;
+ if (wc->mkv_mode)
buf += 4; //skip block size;
wc->ch_offset += 1 + s->stereo;
// parse metadata blocks
- while(buf < buf_end){
- id = *buf++;
+ while (buf < buf_end) {
+ id = *buf++;
size = *buf++;
- if(id & WP_IDF_LONG) {
+ if (id & WP_IDF_LONG) {
size |= (*buf++) << 8;
size |= (*buf++) << 16;
}
size <<= 1; // size is specified in words
ssize = size;
- if(id & WP_IDF_ODD) size--;
- if(size < 0){
+ if (id & WP_IDF_ODD)
+ size--;
+ if (size < 0) {
av_log(avctx, AV_LOG_ERROR, "Got incorrect block %02X with size %i\n", id, size);
break;
}
- if(buf + ssize > buf_end){
+ if (buf + ssize > buf_end) {
av_log(avctx, AV_LOG_ERROR, "Block size %i is out of bounds\n", size);
break;
}
- if(id & WP_IDF_IGNORE){
+ if (id & WP_IDF_IGNORE) {
buf += ssize;
continue;
}
- switch(id & WP_IDF_MASK){
+ switch (id & WP_IDF_MASK) {
case WP_ID_DECTERMS:
- if(size > MAX_TERMS){
+ if (size > MAX_TERMS) {
av_log(avctx, AV_LOG_ERROR, "Too many decorrelation terms\n");
s->terms = 0;
buf += ssize;
continue;
}
s->terms = size;
- for(i = 0; i < s->terms; i++) {
+ for (i = 0; i < s->terms; i++) {
s->decorr[s->terms - i - 1].value = (*buf & 0x1F) - 5;
s->decorr[s->terms - i - 1].delta = *buf >> 5;
buf++;
@@ -846,56 +855,57 @@ static int wavpack_decode_block(AVCodecContext *avctx, int block_no,
got_terms = 1;
break;
case WP_ID_DECWEIGHTS:
- if(!got_terms){
+ if (!got_terms) {
av_log(avctx, AV_LOG_ERROR, "No decorrelation terms met\n");
continue;
}
weights = size >> s->stereo_in;
- if(weights > MAX_TERMS || weights > s->terms){
+ if (weights > MAX_TERMS || weights > s->terms) {
av_log(avctx, AV_LOG_ERROR, "Too many decorrelation weights\n");
buf += ssize;
continue;
}
- for(i = 0; i < weights; i++) {
+ for (i = 0; i < weights; i++) {
t = (int8_t)(*buf++);
s->decorr[s->terms - i - 1].weightA = t << 3;
- if(s->decorr[s->terms - i - 1].weightA > 0)
- s->decorr[s->terms - i - 1].weightA += (s->decorr[s->terms - i - 1].weightA + 64) >> 7;
- if(s->stereo_in){
+ if (s->decorr[s->terms - i - 1].weightA > 0)
+ s->decorr[s->terms - i - 1].weightA +=
+ (s->decorr[s->terms - i - 1].weightA + 64) >> 7;
+ if (s->stereo_in) {
t = (int8_t)(*buf++);
s->decorr[s->terms - i - 1].weightB = t << 3;
- if(s->decorr[s->terms - i - 1].weightB > 0)
- s->decorr[s->terms - i - 1].weightB += (s->decorr[s->terms - i - 1].weightB + 64) >> 7;
+ if (s->decorr[s->terms - i - 1].weightB > 0)
+ s->decorr[s->terms - i - 1].weightB +=
+ (s->decorr[s->terms - i - 1].weightB + 64) >> 7;
}
}
got_weights = 1;
break;
case WP_ID_DECSAMPLES:
- if(!got_terms){
+ if (!got_terms) {
av_log(avctx, AV_LOG_ERROR, "No decorrelation terms met\n");
continue;
}
t = 0;
- for(i = s->terms - 1; (i >= 0) && (t < size); i--) {
- if(s->decorr[i].value > 8){
+ for (i = s->terms - 1; (i >= 0) && (t < size); i--) {
+ if (s->decorr[i].value > 8) {
s->decorr[i].samplesA[0] = wp_exp2(AV_RL16(buf)); buf += 2;
s->decorr[i].samplesA[1] = wp_exp2(AV_RL16(buf)); buf += 2;
- if(s->stereo_in){
+ if (s->stereo_in) {
s->decorr[i].samplesB[0] = wp_exp2(AV_RL16(buf)); buf += 2;
s->decorr[i].samplesB[1] = wp_exp2(AV_RL16(buf)); buf += 2;
t += 4;
}
t += 4;
- }else if(s->decorr[i].value < 0){
+ } else if (s->decorr[i].value < 0) {
s->decorr[i].samplesA[0] = wp_exp2(AV_RL16(buf)); buf += 2;
s->decorr[i].samplesB[0] = wp_exp2(AV_RL16(buf)); buf += 2;
t += 4;
- }else{
- for(j = 0; j < s->decorr[i].value; j++){
+ } else {
+ for (j = 0; j < s->decorr[i].value; j++) {
s->decorr[i].samplesA[j] = wp_exp2(AV_RL16(buf)); buf += 2;
- if(s->stereo_in){
+ if (s->stereo_in)
s->decorr[i].samplesB[j] = wp_exp2(AV_RL16(buf)); buf += 2;
- }
}
t += s->decorr[i].value * 2 * (s->stereo_in + 1);
}
@@ -903,13 +913,14 @@ static int wavpack_decode_block(AVCodecContext *avctx, int block_no,
got_samples = 1;
break;
case WP_ID_ENTROPY:
- if(size != 6 * (s->stereo_in + 1)){
- av_log(avctx, AV_LOG_ERROR, "Entropy vars size should be %i, got %i", 6 * (s->stereo_in + 1), size);
+ if (size != 6 * (s->stereo_in + 1)) {
+ av_log(avctx, AV_LOG_ERROR, "Entropy vars size should be %i, "
+ "got %i", 6 * (s->stereo_in + 1), size);
buf += ssize;
continue;
}
- for(j = 0; j <= s->stereo_in; j++){
- for(i = 0; i < 3; i++){
+ for (j = 0; j <= s->stereo_in; j++) {
+ for (i = 0; i < 3; i++) {
s->ch[j].median[i] = wp_exp2(AV_RL16(buf));
buf += 2;
}
@@ -917,56 +928,56 @@ static int wavpack_decode_block(AVCodecContext *avctx, int block_no,
got_entropy = 1;
break;
case WP_ID_HYBRID:
- if(s->hybrid_bitrate){
- for(i = 0; i <= s->stereo_in; i++){
+ if (s->hybrid_bitrate) {
+ for (i = 0; i <= s->stereo_in; i++) {
s->ch[i].slow_level = wp_exp2(AV_RL16(buf));
buf += 2;
size -= 2;
}
}
- for(i = 0; i < (s->stereo_in + 1); i++){
+ for (i = 0; i < (s->stereo_in + 1); i++) {
s->ch[i].bitrate_acc = AV_RL16(buf) << 16;
buf += 2;
size -= 2;
}
- if(size > 0){
- for(i = 0; i < (s->stereo_in + 1); i++){
+ if (size > 0) {
+ for (i = 0; i < (s->stereo_in + 1); i++) {
s->ch[i].bitrate_delta = wp_exp2((int16_t)AV_RL16(buf));
buf += 2;
}
- }else{
- for(i = 0; i < (s->stereo_in + 1); i++)
+ } else {
+ for (i = 0; i < (s->stereo_in + 1); i++)
s->ch[i].bitrate_delta = 0;
}
got_hybrid = 1;
break;
case WP_ID_INT32INFO:
- if(size != 4){
+ if (size != 4) {
av_log(avctx, AV_LOG_ERROR, "Invalid INT32INFO, size = %i, sent_bits = %i\n", size, *buf);
buf += ssize;
continue;
}
- if(buf[0])
+ if (buf[0])
s->extra_bits = buf[0];
- else if(buf[1])
+ else if (buf[1])
s->shift = buf[1];
- else if(buf[2]){
+ else if (buf[2]){
s->and = s->or = 1;
s->shift = buf[2];
- }else if(buf[3]){
- s->and = 1;
+ } else if(buf[3]) {
+ s->and = 1;
s->shift = buf[3];
}
buf += 4;
break;
case WP_ID_FLOATINFO:
- if(size != 4){
+ if (size != 4) {
av_log(avctx, AV_LOG_ERROR, "Invalid FLOATINFO, size = %i\n", size);
buf += ssize;
continue;
}
- s->float_flag = buf[0];
- s->float_shift = buf[1];
+ s->float_flag = buf[0];
+ s->float_shift = buf[1];
s->float_max_exp = buf[2];
buf += 4;
got_float = 1;
@@ -980,8 +991,9 @@ static int wavpack_decode_block(AVCodecContext *avctx, int block_no,
got_bs = 1;
break;
case WP_ID_EXTRABITS:
- if(size <= 4){
- av_log(avctx, AV_LOG_ERROR, "Invalid EXTRABITS, size = %i\n", size);
+ if (size <= 4) {
+ av_log(avctx, AV_LOG_ERROR, "Invalid EXTRABITS, size = %i\n",
+ size);
buf += size;
continue;
}
@@ -993,89 +1005,84 @@ static int wavpack_decode_block(AVCodecContext *avctx, int block_no,
s->got_extra_bits = 1;
break;
case WP_ID_CHANINFO:
- if(size <= 1){
+ if (size <= 1) {
av_log(avctx, AV_LOG_ERROR, "Insufficient channel information\n");
return -1;
}
chan = *buf++;
- switch(size - 2){
- case 0:
- chmask = *buf;
- break;
- case 1:
- chmask = AV_RL16(buf);
- break;
- case 2:
- chmask = AV_RL24(buf);
- break;
- case 3:
- chmask = AV_RL32(buf);
- break;
+ switch (size - 2) {
+ case 0: chmask = *buf; break;
+ case 1: chmask = AV_RL16(buf); break;
+ case 2: chmask = AV_RL24(buf); break;
+ case 3: chmask = AV_RL32(buf); break;
case 5:
chan |= (buf[1] & 0xF) << 8;
chmask = AV_RL24(buf + 2);
break;
default:
- av_log(avctx, AV_LOG_ERROR, "Invalid channel info size %d\n", size);
- chan = avctx->channels;
+ av_log(avctx, AV_LOG_ERROR, "Invalid channel info size %d\n",
+ size);
+ chan = avctx->channels;
chmask = avctx->channel_layout;
}
- if(chan != avctx->channels){
- av_log(avctx, AV_LOG_ERROR, "Block reports total %d channels, decoder believes it's %d channels\n",
- chan, avctx->channels);
+ if (chan != avctx->channels) {
+ av_log(avctx, AV_LOG_ERROR, "Block reports total %d channels, "
+ "decoder believes it's %d channels\n", chan,
+ avctx->channels);
return -1;
}
- if(!avctx->channel_layout)
+ if (!avctx->channel_layout)
avctx->channel_layout = chmask;
buf += size - 1;
break;
default:
buf += size;
}
- if(id & WP_IDF_ODD) buf++;
+ if (id & WP_IDF_ODD)
+ buf++;
}
- if(!got_terms){
- av_log(avctx, AV_LOG_ERROR, "No block with decorrelation terms\n");
- return -1;
- }
- if(!got_weights){
- av_log(avctx, AV_LOG_ERROR, "No block with decorrelation weights\n");
- return -1;
- }
- if(!got_samples){
- av_log(avctx, AV_LOG_ERROR, "No block with decorrelation samples\n");
- return -1;
- }
- if(!got_entropy){
- av_log(avctx, AV_LOG_ERROR, "No block with entropy info\n");
- return -1;
- }
- if(s->hybrid && !got_hybrid){
- av_log(avctx, AV_LOG_ERROR, "Hybrid config not found\n");
- return -1;
- }
- if(!got_bs){
- av_log(avctx, AV_LOG_ERROR, "Packed samples not found\n");
- return -1;
- }
- if(!got_float && avctx->sample_fmt == AV_SAMPLE_FMT_FLT){
- av_log(avctx, AV_LOG_ERROR, "Float information not found\n");
- return -1;
- }
- if(s->got_extra_bits && avctx->sample_fmt != AV_SAMPLE_FMT_FLT){
- const int size = get_bits_left(&s->gb_extra_bits);
- const int wanted = s->samples * s->extra_bits << s->stereo_in;
- if(size < wanted){
- av_log(avctx, AV_LOG_ERROR, "Too small EXTRABITS\n");
- s->got_extra_bits = 0;
- }
+ if (!got_terms) {
+ av_log(avctx, AV_LOG_ERROR, "No block with decorrelation terms\n");
+ return -1;
+ }
+ if (!got_weights) {
+ av_log(avctx, AV_LOG_ERROR, "No block with decorrelation weights\n");
+ return -1;
+ }
+ if (!got_samples) {
+ av_log(avctx, AV_LOG_ERROR, "No block with decorrelation samples\n");
+ return -1;
+ }
+ if (!got_entropy) {
+ av_log(avctx, AV_LOG_ERROR, "No block with entropy info\n");
+ return -1;
+ }
+ if (s->hybrid && !got_hybrid) {
+ av_log(avctx, AV_LOG_ERROR, "Hybrid config not found\n");
+ return -1;
+ }
+ if (!got_bs) {
+ av_log(avctx, AV_LOG_ERROR, "Packed samples not found\n");
+ return -1;
+ }
+ if (!got_float && avctx->sample_fmt == AV_SAMPLE_FMT_FLT) {
+ av_log(avctx, AV_LOG_ERROR, "Float information not found\n");
+ return -1;
+ }
+ if (s->got_extra_bits && avctx->sample_fmt != AV_SAMPLE_FMT_FLT) {
+ const int size = get_bits_left(&s->gb_extra_bits);
+ const int wanted = s->samples * s->extra_bits << s->stereo_in;
+ if (size < wanted) {
+ av_log(avctx, AV_LOG_ERROR, "Too small EXTRABITS\n");
+ s->got_extra_bits = 0;
}
+ }
- if(s->stereo_in){
- if(avctx->sample_fmt == AV_SAMPLE_FMT_S16)
+ if (s->stereo_in) {
+ if (avctx->sample_fmt == AV_SAMPLE_FMT_S16)
samplecount = wv_unpack_stereo(s, &s->gb, samples, AV_SAMPLE_FMT_S16);
- else if(avctx->sample_fmt == AV_SAMPLE_FMT_S32)
+ else if (avctx->sample_fmt == AV_SAMPLE_FMT_S32)
samplecount = wv_unpack_stereo(s, &s->gb, samples, AV_SAMPLE_FMT_S32);
else
samplecount = wv_unpack_stereo(s, &s->gb, samples, AV_SAMPLE_FMT_FLT);
@@ -1084,12 +1091,12 @@ static int wavpack_decode_block(AVCodecContext *avctx, int block_no,
return -1;
samplecount >>= 1;
- }else{
+ } else {
const int channel_stride = avctx->channels;
- if(avctx->sample_fmt == AV_SAMPLE_FMT_S16)
+ if (avctx->sample_fmt == AV_SAMPLE_FMT_S16)
samplecount = wv_unpack_mono(s, &s->gb, samples, AV_SAMPLE_FMT_S16);
- else if(avctx->sample_fmt == AV_SAMPLE_FMT_S32)
+ else if (avctx->sample_fmt == AV_SAMPLE_FMT_S32)
samplecount = wv_unpack_mono(s, &s->gb, samples, AV_SAMPLE_FMT_S32);
else
samplecount = wv_unpack_mono(s, &s->gb, samples, AV_SAMPLE_FMT_FLT);
@@ -1097,29 +1104,29 @@ static int wavpack_decode_block(AVCodecContext *avctx, int block_no,
if (samplecount < 0)
return -1;
- if(s->stereo && avctx->sample_fmt == AV_SAMPLE_FMT_S16){
+ if (s->stereo && avctx->sample_fmt == AV_SAMPLE_FMT_S16) {
int16_t *dst = (int16_t*)samples + 1;
int16_t *src = (int16_t*)samples;
int cnt = samplecount;
- while(cnt--){
+ while (cnt--) {
*dst = *src;
src += channel_stride;
dst += channel_stride;
}
- }else if(s->stereo && avctx->sample_fmt == AV_SAMPLE_FMT_S32){
+ } else if (s->stereo && avctx->sample_fmt == AV_SAMPLE_FMT_S32) {
int32_t *dst = (int32_t*)samples + 1;
int32_t *src = (int32_t*)samples;
int cnt = samplecount;
- while(cnt--){
+ while (cnt--) {
*dst = *src;
src += channel_stride;
dst += channel_stride;
}
- }else if(s->stereo){
+ } else if (s->stereo) {
float *dst = (float*)samples + 1;
float *src = (float*)samples;
int cnt = samplecount;
- while(cnt--){
+ while (cnt--) {
*dst = *src;
src += channel_stride;
dst += channel_stride;
@@ -1144,23 +1151,27 @@ static void wavpack_decode_flush(AVCodecContext *avctx)
static int wavpack_decode_frame(AVCodecContext *avctx, void *data,
int *got_frame_ptr, AVPacket *avpkt)
{
- WavpackContext *s = avctx->priv_data;
+ WavpackContext *s = avctx->priv_data;
const uint8_t *buf = avpkt->data;
- int buf_size = avpkt->size;
- int frame_size, ret;
+ int buf_size = avpkt->size;
+ int frame_size, ret, frame_flags;
int samplecount = 0;
- s->block = 0;
+ s->block = 0;
s->ch_offset = 0;
/* determine number of samples */
- if(s->mkv_mode){
- s->samples = AV_RL32(buf); buf += 4;
+ if (s->mkv_mode) {
+ s->samples = AV_RL32(buf); buf += 4;
+ frame_flags = AV_RL32(buf);
} else {
- if (s->multichannel)
- s->samples = AV_RL32(buf + 4);
- else
- s->samples = AV_RL32(buf);
+ if (s->multichannel) {
+ s->samples = AV_RL32(buf + 4);
+ frame_flags = AV_RL32(buf + 8);
+ } else {
+ s->samples = AV_RL32(buf);
+ frame_flags = AV_RL32(buf + 4);
+ }
}
if (s->samples <= 0) {
av_log(avctx, AV_LOG_ERROR, "Invalid number of samples: %d\n",
@@ -1168,6 +1179,14 @@ static int wavpack_decode_frame(AVCodecContext *avctx, void *data,
return AVERROR(EINVAL);
}
+ if (frame_flags & 0x80) {
+ avctx->sample_fmt = AV_SAMPLE_FMT_FLT;
+ } else if ((frame_flags & 0x03) <= 1) {
+ avctx->sample_fmt = AV_SAMPLE_FMT_S16;
+ } else {
+ avctx->sample_fmt = AV_SAMPLE_FMT_S32;
+ }
+
/* get output buffer */
s->frame.nb_samples = s->samples;
if ((ret = avctx->get_buffer(avctx, &s->frame)) < 0) {
@@ -1175,26 +1194,27 @@ static int wavpack_decode_frame(AVCodecContext *avctx, void *data,
return ret;
}
- while(buf_size > 0){
- if(!s->multichannel){
+ while (buf_size > 0) {
+ if (!s->multichannel) {
frame_size = buf_size;
- }else{
- if(!s->mkv_mode){
+ } else {
+ if (!s->mkv_mode) {
frame_size = AV_RL32(buf) - 12; buf += 4; buf_size -= 4;
- }else{
- if(buf_size < 12) //MKV files can have zero flags after last block
+ } else {
+ if (buf_size < 12) //MKV files can have zero flags after last block
break;
frame_size = AV_RL32(buf + 8) + 12;
}
}
- if(frame_size < 0 || frame_size > buf_size){
- av_log(avctx, AV_LOG_ERROR, "Block %d has invalid size (size %d vs. %d bytes left)\n",
- s->block, frame_size, buf_size);
+ if (frame_size < 0 || frame_size > buf_size) {
+ av_log(avctx, AV_LOG_ERROR, "Block %d has invalid size (size %d "
+ "vs. %d bytes left)\n", s->block, frame_size, buf_size);
wavpack_decode_flush(avctx);
return -1;
}
- if((samplecount = wavpack_decode_block(avctx, s->block, s->frame.data[0],
- got_frame_ptr, buf, frame_size)) < 0) {
+ if ((samplecount = wavpack_decode_block(avctx, s->block,
+ s->frame.data[0], got_frame_ptr,
+ buf, frame_size)) < 0) {
wavpack_decode_flush(avctx);
return -1;
}
@@ -1218,5 +1238,5 @@ AVCodec ff_wavpack_decoder = {
.decode = wavpack_decode_frame,
.flush = wavpack_decode_flush,
.capabilities = CODEC_CAP_SUBFRAMES | CODEC_CAP_DR1,
- .long_name = NULL_IF_CONFIG_SMALL("WavPack"),
+ .long_name = NULL_IF_CONFIG_SMALL("WavPack"),
};
diff --git a/libavformat/mmst.c b/libavformat/mmst.c
index a6fe696f5f..8d6f8a3a48 100644
--- a/libavformat/mmst.c
+++ b/libavformat/mmst.c
@@ -606,7 +606,7 @@ static int mms_read(URLContext *h, uint8_t *buf, int size)
// copy the data to the packet buffer.
result = ff_mms_read_data(mms, buf, size);
if (result == 0) {
- av_dlog(NULL, "read asf media paket size is zero!\n");
+ av_dlog(NULL, "Read ASF media packet size is zero!\n");
break;
}
}
diff --git a/libavformat/mpegenc.c b/libavformat/mpegenc.c
index 455be5706d..dce1786b5e 100644
--- a/libavformat/mpegenc.c
+++ b/libavformat/mpegenc.c
@@ -429,7 +429,7 @@ static int mpeg_mux_init(AVFormatContext *ctx)
if (!s->mux_rate) {
/* we increase slightly the bitrate to take into account the
headers. XXX: compute it exactly */
- bitrate += bitrate*5LL/100;
+ bitrate += bitrate / 20;
bitrate += 10000;
s->mux_rate = (bitrate + (8 * 50) - 1) / (8 * 50);
}
diff --git a/libavformat/rmenc.c b/libavformat/rmenc.c
index a601331e2e..b3d0cf6a66 100644
--- a/libavformat/rmenc.c
+++ b/libavformat/rmenc.c
@@ -355,7 +355,7 @@ static int rm_write_audio(AVFormatContext *s, const uint8_t *buf, int size, int
int i;
/* XXX: suppress this malloc */
- buf1= (uint8_t*) av_malloc( size * sizeof(uint8_t) );
+ buf1 = av_malloc(size * sizeof(uint8_t));
write_packet_header(s, stream, size, !!(flags & AV_PKT_FLAG_KEY));
diff --git a/libpostproc/postprocess_template.c b/libpostproc/postprocess_template.c
index 04dd36c314..924a39b9f1 100644
--- a/libpostproc/postprocess_template.c
+++ b/libpostproc/postprocess_template.c
@@ -3369,14 +3369,14 @@ static void RENAME(postProcess)(const uint8_t src[], int srcStride, uint8_t dst[
linecpy(tempSrc + srcStride*copyAhead, srcBlock + srcStride*copyAhead,
FFMAX(height-y-copyAhead, 0), srcStride);
- /* duplicate last line of src to fill the void upto line (copyAhead+7) */
+ /* duplicate last line of src to fill the void up to line (copyAhead+7) */
for(i=FFMAX(height-y, 8); i<copyAhead+8; i++)
memcpy(tempSrc + srcStride*i, src + srcStride*(height-1), FFABS(srcStride));
/* copy up to (copyAhead+1) lines of dst (line -1 to (copyAhead-1))*/
linecpy(tempDst, dstBlock - dstStride, FFMIN(height-y+1, copyAhead+1), dstStride);
- /* duplicate last line of dst to fill the void upto line (copyAhead) */
+ /* duplicate last line of dst to fill the void up to line (copyAhead) */
for(i=height-y+1; i<=copyAhead; i++)
memcpy(tempDst + dstStride*i, dst + dstStride*(height-1), FFABS(dstStride));
diff --git a/libswscale/colorspace-test.c b/libswscale/colorspace-test.c
index 34095d8532..a5709e482e 100644
--- a/libswscale/colorspace-test.c
+++ b/libswscale/colorspace-test.c
@@ -36,8 +36,8 @@
int main(int argc, char **argv)
{
int i, funcNum;
- uint8_t *srcBuffer= (uint8_t*)av_malloc(SIZE);
- uint8_t *dstBuffer= (uint8_t*)av_malloc(SIZE);
+ uint8_t *srcBuffer = av_malloc(SIZE);
+ uint8_t *dstBuffer = av_malloc(SIZE);
int failedNum=0;
int passedNum=0;
diff --git a/tests/codec-regression.sh b/tests/codec-regression.sh
index 491e032bf7..f3ead20298 100755
--- a/tests/codec-regression.sh
+++ b/tests/codec-regression.sh
@@ -62,13 +62,13 @@ fi
if [ -n "$do_mpeg2thread" ] ; then
# mpeg2 encoding interlaced
-do_video_encoding mpeg2thread.mpg "-qscale 10 -vcodec mpeg2video -f mpeg1video -bf 2 -flags +ildct+ilme -threads 2"
+do_video_encoding mpeg2thread.mpg "-qscale 10 -vcodec mpeg2video -f mpeg1video -bf 2 -flags +ildct+ilme -threads 2 -slices 2"
do_video_decoding
fi
if [ -n "$do_mpeg2thread_ilace" ]; then
# mpeg2 encoding interlaced using intra vlc
-do_video_encoding mpeg2threadivlc.mpg "-qscale 10 -vcodec mpeg2video -f mpeg1video -bf 2 -flags +ildct+ilme -flags2 +ivlc -threads 2"
+do_video_encoding mpeg2threadivlc.mpg "-qscale 10 -vcodec mpeg2video -f mpeg1video -bf 2 -flags +ildct+ilme -flags2 +ivlc -threads 2 -slices 2"
do_video_decoding
# mpeg2 encoding interlaced
@@ -158,7 +158,7 @@ do_video_decoding
fi
if [ -n "$do_mpeg4thread" ] ; then
-do_video_encoding mpeg4-thread.avi "-b 500k -flags +mv4+part+aic -trellis 1 -mbd bits -ps 200 -bf 2 -an -vcodec mpeg4 -threads 2"
+do_video_encoding mpeg4-thread.avi "-b 500k -flags +mv4+part+aic -trellis 1 -mbd bits -ps 200 -bf 2 -an -vcodec mpeg4 -threads 2 -slices 2"
do_video_decoding
fi
diff --git a/tools/patcheck b/tools/patcheck
index cd6beee9bf..39f5715e2a 100755
--- a/tools/patcheck
+++ b/tools/patcheck
@@ -67,7 +67,7 @@ $EGREP $OPT '^\+ *(const *|)static' $*| $EGREP --color=always '[^=]= *(0|NULL)[^
cat $TMP
hiegrep '# *ifdef * (HAVE|CONFIG)_' 'ifdefs that should be #if' $*
-hiegrep '\b(awnser|cant|dont|wont|usefull|successfull|occured|teh|alot|wether|skiped|heigth|informations|colums|loosy|loosing|seperate|preceed)\b' 'common typos' $*
+hiegrep '\b(awnser|cant|dont|wont|usefull|successfull|occured|teh|alot|wether|skiped|heigth|informations|colums|loosy|loosing|seperate|preceed|upto|paket)\b' 'common typos' $*
hiegrep 'av_log\( *NULL' 'Missing context in av_log' $*
hiegrep '[^sn]printf' 'Please use av_log' $*