aboutsummaryrefslogtreecommitdiffstats
path: root/libavcodec
diff options
context:
space:
mode:
authorMichael Niedermayer <michaelni@gmx.at>2011-04-25 02:47:47 +0200
committerMichael Niedermayer <michaelni@gmx.at>2011-04-25 03:49:47 +0200
commit2ebd47841f16d1d521d7dd9b5ae0b8015443b690 (patch)
tree5e32bef0eda02346d15fa212326806ab58e9103b /libavcodec
parent9d7244c4c60d9f85f58b3770065a394c71fdce3f (diff)
parent989fb05fe344d9666db858e0577c44969625184e (diff)
downloadffmpeg-2ebd47841f16d1d521d7dd9b5ae0b8015443b690.tar.gz
Merge branch 'master' into oldabi
* master: (172 commits) Check mmap() return against correct value Signed-off-by: Michael Niedermayer <michaelni@gmx.at> vorbisdec: Employ proper printf format specifiers for uint_fast32_t. Support fourcc MMJP. Support fourcc XVIX. Support fourcc M263. Support fourcc auv2. Fix indentation. Support PARSER_FLAG_COMPLETE_FRAMES for h261 and h263 parsers. ffplay: avoid SIGFPE exception in SDL_DisplayYUVOverlay avi: try to synchronize the points in time of the starts of streams after seeking. Signed-off-by: Michael Niedermayer <michaelni@gmx.at> Add flag to force demuxers to sort more strictly by dts. This enables non interleaved AVI mode for example. Players that are picky on strict interleaving can set this. Patches to only switch to non intereaved AVI mode when the index is not strictly correctly interleaved are welcome. Signed-off-by: Michael Niedermayer <michaelni@gmx.at> applehttp: Don't export variant_bitrate if it isn't known crypto: Use av_freep instead of av_free CrystalHD: Add AVOption to configure hardware downscaling. Check for malloc failures in fraps decoder. Use av_fast_malloc instead of av_realloc in fraps decoder. general.texi: document libcelt decoder. Fix some passing argument from incompatible pointer type warnings. Signed-off-by: Michael Niedermayer <michaelni@gmx.at> configure: Add missing libm library dependencies to .pc files. oggdec: reindent after 8f3eebd6 ... Conflicts: libavcodec/version.h Merged-by: Michael Niedermayer <michaelni@gmx.at>
Diffstat (limited to 'libavcodec')
-rw-r--r--libavcodec/Makefile7
-rw-r--r--libavcodec/aaccoder.c1
-rw-r--r--libavcodec/aacdec.c28
-rw-r--r--libavcodec/aacenc.c4
-rw-r--r--libavcodec/aacpsy.c328
-rw-r--r--libavcodec/aacsbr.c1
-rw-r--r--libavcodec/adpcm.c1
-rw-r--r--libavcodec/allcodecs.c1
-rw-r--r--libavcodec/arm/fft_fixed_neon.S2
-rw-r--r--libavcodec/atrac3.c2
-rw-r--r--libavcodec/avcodec.h9
-rw-r--r--libavcodec/cook.c16
-rw-r--r--libavcodec/crystalhd.c67
-rw-r--r--libavcodec/dnxhdenc.c1
-rw-r--r--libavcodec/dv.c3
-rw-r--r--libavcodec/ffv1.c3
-rw-r--r--libavcodec/flac.c3
-rw-r--r--libavcodec/flicvideo.c9
-rw-r--r--libavcodec/fraps.c9
-rw-r--r--libavcodec/h261_parser.c14
-rw-r--r--libavcodec/h263_parser.c14
-rw-r--r--libavcodec/h264.c4
-rw-r--r--libavcodec/iff.c276
-rw-r--r--libavcodec/libcelt_dec.c136
-rw-r--r--libavcodec/libmp3lame.c72
-rw-r--r--libavcodec/libvo-aacenc.c8
-rw-r--r--libavcodec/libvo-amrwbenc.c2
-rw-r--r--libavcodec/libx264.c68
-rw-r--r--libavcodec/loco.c2
-rw-r--r--libavcodec/mjpegdec.c4
-rw-r--r--libavcodec/mlp_parser.c14
-rw-r--r--libavcodec/mlp_parser.h3
-rw-r--r--libavcodec/mlpdec.c17
-rw-r--r--libavcodec/mpeg12.c4
-rw-r--r--libavcodec/mpeg12enc.c4
-rw-r--r--libavcodec/mpeg4videoenc.c2
-rw-r--r--libavcodec/mpegvideo_enc.c1
-rw-r--r--libavcodec/opt.c89
-rw-r--r--libavcodec/pcm.c2
-rw-r--r--libavcodec/pthread.c3
-rw-r--r--libavcodec/raw.c1
-rw-r--r--libavcodec/rv10.c2
-rw-r--r--libavcodec/sp5xdec.c3
-rw-r--r--libavcodec/truemotion1.c4
-rw-r--r--libavcodec/tta.c5
-rw-r--r--libavcodec/twinvq.c2
-rw-r--r--libavcodec/utils.c23
-rw-r--r--libavcodec/v210x.c2
-rw-r--r--libavcodec/version.h10
-rw-r--r--libavcodec/vorbisdec.c (renamed from libavcodec/vorbis_dec.c)2
-rw-r--r--libavcodec/vorbisenc.c (renamed from libavcodec/vorbis_enc.c)0
-rw-r--r--libavcodec/vqavideo.c2
-rw-r--r--libavcodec/wnv1.c6
53 files changed, 1074 insertions, 222 deletions
diff --git a/libavcodec/Makefile b/libavcodec/Makefile
index e1432b3a96..df4c248644 100644
--- a/libavcodec/Makefile
+++ b/libavcodec/Makefile
@@ -141,7 +141,7 @@ OBJS-$(CONFIG_FFV1_DECODER) += ffv1.o rangecoder.o
OBJS-$(CONFIG_FFV1_ENCODER) += ffv1.o rangecoder.o
OBJS-$(CONFIG_FFVHUFF_DECODER) += huffyuv.o
OBJS-$(CONFIG_FFVHUFF_ENCODER) += huffyuv.o
-OBJS-$(CONFIG_FLAC_DECODER) += flacdec.o flacdata.o flac.o
+OBJS-$(CONFIG_FLAC_DECODER) += flacdec.o flacdata.o flac.o vorbis_data.o
OBJS-$(CONFIG_FLAC_ENCODER) += flacenc.o flacdata.o flac.o
OBJS-$(CONFIG_FLASHSV_DECODER) += flashsv.o
OBJS-$(CONFIG_FLASHSV_ENCODER) += flashsvenc.o
@@ -393,9 +393,9 @@ OBJS-$(CONFIG_VCR1_ENCODER) += vcr1.o
OBJS-$(CONFIG_VMDAUDIO_DECODER) += vmdav.o
OBJS-$(CONFIG_VMDVIDEO_DECODER) += vmdav.o
OBJS-$(CONFIG_VMNC_DECODER) += vmnc.o
-OBJS-$(CONFIG_VORBIS_DECODER) += vorbis_dec.o vorbis.o \
+OBJS-$(CONFIG_VORBIS_DECODER) += vorbisdec.o vorbis.o \
vorbis_data.o xiph.o
-OBJS-$(CONFIG_VORBIS_ENCODER) += vorbis_enc.o vorbis.o \
+OBJS-$(CONFIG_VORBIS_ENCODER) += vorbisenc.o vorbis.o \
vorbis_data.o
OBJS-$(CONFIG_VP3_DECODER) += vp3.o vp3dsp.o
OBJS-$(CONFIG_VP5_DECODER) += vp5.o vp56.o vp56data.o vp56dsp.o \
@@ -554,6 +554,7 @@ OBJS-$(CONFIG_WEBM_MUXER) += xiph.o mpeg4audio.o \
mpegaudiodata.o
# external codec libraries
+OBJS-$(CONFIG_LIBCELT_DECODER) += libcelt_dec.o
OBJS-$(CONFIG_LIBDIRAC_DECODER) += libdiracdec.o
OBJS-$(CONFIG_LIBDIRAC_ENCODER) += libdiracenc.o libdirac_libschro.o
OBJS-$(CONFIG_LIBFAAC_ENCODER) += libfaac.o
diff --git a/libavcodec/aaccoder.c b/libavcodec/aaccoder.c
index 60be5ef47d..9748fe1e1a 100644
--- a/libavcodec/aaccoder.c
+++ b/libavcodec/aaccoder.c
@@ -31,6 +31,7 @@
***********************************/
#include <float.h>
+#include <math.h>
#include "avcodec.h"
#include "put_bits.h"
#include "aac.h"
diff --git a/libavcodec/aacdec.c b/libavcodec/aacdec.c
index a0b67f4af7..7b1e501f83 100644
--- a/libavcodec/aacdec.c
+++ b/libavcodec/aacdec.c
@@ -180,9 +180,8 @@ static ChannelElement *get_che(AACContext *ac, int type, int elem_id)
* @return Returns error status. 0 - OK, !0 - error
*/
static av_cold int che_configure(AACContext *ac,
- enum ChannelPosition che_pos[4][MAX_ELEM_ID],
- int type, int id,
- int *channels)
+ enum ChannelPosition che_pos[4][MAX_ELEM_ID],
+ int type, int id, int *channels)
{
if (che_pos[type][id]) {
if (!ac->che[type][id] && !(ac->che[type][id] = av_mallocz(sizeof(ChannelElement))))
@@ -212,9 +211,9 @@ static av_cold int che_configure(AACContext *ac,
* @return Returns error status. 0 - OK, !0 - error
*/
static av_cold int output_configure(AACContext *ac,
- enum ChannelPosition che_pos[4][MAX_ELEM_ID],
- enum ChannelPosition new_che_pos[4][MAX_ELEM_ID],
- int channel_config, enum OCStatus oc_type)
+ enum ChannelPosition che_pos[4][MAX_ELEM_ID],
+ enum ChannelPosition new_che_pos[4][MAX_ELEM_ID],
+ int channel_config, enum OCStatus oc_type)
{
AVCodecContext *avctx = ac->avctx;
int i, type, channels = 0, ret;
@@ -231,7 +230,7 @@ static av_cold int output_configure(AACContext *ac,
return ret;
}
- memset(ac->tag_che_map, 0, 4 * MAX_ELEM_ID * sizeof(ac->che[0][0]));
+ memset(ac->tag_che_map, 0, 4 * MAX_ELEM_ID * sizeof(ac->che[0][0]));
avctx->channel_layout = aac_channel_layout[channel_config - 1];
} else {
@@ -346,8 +345,8 @@ static int decode_pce(AVCodecContext *avctx, MPEG4AudioConfig *m4ac,
* @return Returns error status. 0 - OK, !0 - error
*/
static av_cold int set_default_channel_config(AVCodecContext *avctx,
- enum ChannelPosition new_che_pos[4][MAX_ELEM_ID],
- int channel_config)
+ enum ChannelPosition new_che_pos[4][MAX_ELEM_ID],
+ int channel_config)
{
if (channel_config < 1 || channel_config > 7) {
av_log(avctx, AV_LOG_ERROR, "invalid default channel configuration (%d)\n",
@@ -464,6 +463,11 @@ static int decode_audio_specific_config(AACContext *ac,
GetBitContext gb;
int i;
+ av_dlog(avctx, "extradata size %d\n", avctx->extradata_size);
+ for (i = 0; i < avctx->extradata_size; i++)
+ av_dlog(avctx, "%02x ", avctx->extradata[i]);
+ av_dlog(avctx, "\n");
+
init_get_bits(&gb, data, data_size * 8);
if ((i = ff_mpeg4audio_get_config(m4ac, data, data_size)) < 0)
@@ -490,6 +494,10 @@ static int decode_audio_specific_config(AACContext *ac,
return -1;
}
+ av_dlog(avctx, "AOT %d chan config %d sampling index %d (%d) SBR %d PS %d\n",
+ m4ac->object_type, m4ac->chan_config, m4ac->sampling_index,
+ m4ac->sample_rate, m4ac->sbr, m4ac->ps);
+
return get_bits_count(&gb);
}
@@ -1240,7 +1248,7 @@ static av_always_inline float flt16_trunc(float pf)
static av_always_inline void predict(PredictorState *ps, float *coef,
float sf_scale, float inv_sf_scale,
- int output_enable)
+ int output_enable)
{
const float a = 0.953125; // 61.0 / 64
const float alpha = 0.90625; // 29.0 / 32
diff --git a/libavcodec/aacenc.c b/libavcodec/aacenc.c
index 8fc8f70726..8843cbdb59 100644
--- a/libavcodec/aacenc.c
+++ b/libavcodec/aacenc.c
@@ -606,8 +606,10 @@ static int aac_encode_frame(AVCodecContext *avctx,
}
frame_bits = put_bits_count(&s->pb);
- if (frame_bits <= 6144 * avctx->channels - 3)
+ if (frame_bits <= 6144 * avctx->channels - 3) {
+ s->psy.bitres.bits = frame_bits / avctx->channels;
break;
+ }
s->lambda *= avctx->bit_rate * 1024.0f / avctx->sample_rate / frame_bits;
diff --git a/libavcodec/aacpsy.c b/libavcodec/aacpsy.c
index 9938e3710a..baf9388398 100644
--- a/libavcodec/aacpsy.c
+++ b/libavcodec/aacpsy.c
@@ -30,7 +30,6 @@
/***********************************
* TODOs:
- * thresholds linearization after their modifications for attaining given bitrate
* try other bitrate controlling mechanism (maybe use ratecontrol.c?)
* control quality for quality-based output
**********************************/
@@ -41,10 +40,51 @@
*/
#define PSY_3GPP_THR_SPREAD_HI 1.5f // spreading factor for low-to-hi threshold spreading (15 dB/Bark)
#define PSY_3GPP_THR_SPREAD_LOW 3.0f // spreading factor for hi-to-low threshold spreading (30 dB/Bark)
+/* spreading factor for low-to-hi energy spreading, long block, > 22kbps/channel (20dB/Bark) */
+#define PSY_3GPP_EN_SPREAD_HI_L1 2.0f
+/* spreading factor for low-to-hi energy spreading, long block, <= 22kbps/channel (15dB/Bark) */
+#define PSY_3GPP_EN_SPREAD_HI_L2 1.5f
+/* spreading factor for low-to-hi energy spreading, short block (15 dB/Bark) */
+#define PSY_3GPP_EN_SPREAD_HI_S 1.5f
+/* spreading factor for hi-to-low energy spreading, long block (30dB/Bark) */
+#define PSY_3GPP_EN_SPREAD_LOW_L 3.0f
+/* spreading factor for hi-to-low energy spreading, short block (20dB/Bark) */
+#define PSY_3GPP_EN_SPREAD_LOW_S 2.0f
#define PSY_3GPP_RPEMIN 0.01f
#define PSY_3GPP_RPELEV 2.0f
+#define PSY_3GPP_C1 3.0f /* log2(8) */
+#define PSY_3GPP_C2 1.3219281f /* log2(2.5) */
+#define PSY_3GPP_C3 0.55935729f /* 1 - C2 / C1 */
+
+#define PSY_SNR_1DB 7.9432821e-1f /* -1dB */
+#define PSY_SNR_25DB 3.1622776e-3f /* -25dB */
+
+#define PSY_3GPP_SAVE_SLOPE_L -0.46666667f
+#define PSY_3GPP_SAVE_SLOPE_S -0.36363637f
+#define PSY_3GPP_SAVE_ADD_L -0.84285712f
+#define PSY_3GPP_SAVE_ADD_S -0.75f
+#define PSY_3GPP_SPEND_SLOPE_L 0.66666669f
+#define PSY_3GPP_SPEND_SLOPE_S 0.81818181f
+#define PSY_3GPP_SPEND_ADD_L -0.35f
+#define PSY_3GPP_SPEND_ADD_S -0.26111111f
+#define PSY_3GPP_CLIP_LO_L 0.2f
+#define PSY_3GPP_CLIP_LO_S 0.2f
+#define PSY_3GPP_CLIP_HI_L 0.95f
+#define PSY_3GPP_CLIP_HI_S 0.75f
+
+#define PSY_3GPP_AH_THR_LONG 0.5f
+#define PSY_3GPP_AH_THR_SHORT 0.63f
+
+enum {
+ PSY_3GPP_AH_NONE,
+ PSY_3GPP_AH_INACTIVE,
+ PSY_3GPP_AH_ACTIVE
+};
+
+#define PSY_3GPP_BITS_TO_PE(bits) ((bits) * 1.18f)
+
/* LAME psy model constants */
#define PSY_LAME_FIR_LEN 21 ///< LAME psy model FIR order
#define AAC_BLOCK_SIZE_LONG 1024 ///< long block size
@@ -60,9 +100,15 @@
* information for single band used by 3GPP TS26.403-inspired psychoacoustic model
*/
typedef struct AacPsyBand{
- float energy; ///< band energy
- float thr; ///< energy threshold
- float thr_quiet; ///< threshold in quiet
+ float energy; ///< band energy
+ float thr; ///< energy threshold
+ float thr_quiet; ///< threshold in quiet
+ float nz_lines; ///< number of non-zero spectral lines
+ float active_lines; ///< number of active spectral lines
+ float pe; ///< perceptual entropy
+ float pe_const; ///< constant part of the PE calculation
+ float norm_fac; ///< normalization factor for linearization
+ int avoid_holes; ///< hole avoidance flag
}AacPsyBand;
/**
@@ -97,6 +143,15 @@ typedef struct AacPsyCoeffs{
* 3GPP TS26.403-inspired psychoacoustic model specific data
*/
typedef struct AacPsyContext{
+ int chan_bitrate; ///< bitrate per channel
+ int frame_bits; ///< average bits per frame
+ int fill_level; ///< bit reservoir fill level
+ struct {
+ float min; ///< minimum allowed PE for bit factor calculation
+ float max; ///< maximum allowed PE for bit factor calculation
+ float previous; ///< allowed PE of the previous frame
+ float correction; ///< PE correction factor
+ } pe;
AacPsyCoeffs psy_coef[2][64];
AacPsyChannel *ch;
}AacPsyContext;
@@ -235,16 +290,33 @@ static av_cold int psy_3gpp_init(FFPsyContext *ctx) {
AacPsyContext *pctx;
float bark;
int i, j, g, start;
- float prev, minscale, minath;
+ float prev, minscale, minath, minsnr, pe_min;
+ const int chan_bitrate = ctx->avctx->bit_rate / ctx->avctx->channels;
+ const int bandwidth = ctx->avctx->cutoff ? ctx->avctx->cutoff : ctx->avctx->sample_rate / 2;
+ const float num_bark = calc_bark((float)bandwidth);
ctx->model_priv_data = av_mallocz(sizeof(AacPsyContext));
pctx = (AacPsyContext*) ctx->model_priv_data;
+ pctx->chan_bitrate = chan_bitrate;
+ pctx->frame_bits = chan_bitrate * AAC_BLOCK_SIZE_LONG / ctx->avctx->sample_rate;
+ pctx->pe.min = 8.0f * AAC_BLOCK_SIZE_LONG * bandwidth / (ctx->avctx->sample_rate * 2.0f);
+ pctx->pe.max = 12.0f * AAC_BLOCK_SIZE_LONG * bandwidth / (ctx->avctx->sample_rate * 2.0f);
+ ctx->bitres.size = 6144 - pctx->frame_bits;
+ ctx->bitres.size -= ctx->bitres.size % 8;
+ pctx->fill_level = ctx->bitres.size;
minath = ath(3410, ATH_ADD);
for (j = 0; j < 2; j++) {
AacPsyCoeffs *coeffs = pctx->psy_coef[j];
const uint8_t *band_sizes = ctx->bands[j];
float line_to_frequency = ctx->avctx->sample_rate / (j ? 256.f : 2048.0f);
+ float avg_chan_bits = chan_bitrate / ctx->avctx->sample_rate * (j ? 128.0f : 1024.0f);
+ /* reference encoder uses 2.4% here instead of 60% like the spec says */
+ float bark_pe = 0.024f * PSY_3GPP_BITS_TO_PE(avg_chan_bits) / num_bark;
+ float en_spread_low = j ? PSY_3GPP_EN_SPREAD_LOW_S : PSY_3GPP_EN_SPREAD_LOW_L;
+ /* High energy spreading for long blocks <= 22kbps/channel and short blocks are the same. */
+ float en_spread_hi = (j || (chan_bitrate <= 22.0f)) ? PSY_3GPP_EN_SPREAD_HI_S : PSY_3GPP_EN_SPREAD_HI_L1;
+
i = 0;
prev = 0.0;
for (g = 0; g < ctx->num_bands[j]; g++) {
@@ -258,6 +330,11 @@ static av_cold int psy_3gpp_init(FFPsyContext *ctx) {
float bark_width = coeffs[g+1].barks - coeffs->barks;
coeff->spread_low[0] = pow(10.0, -bark_width * PSY_3GPP_THR_SPREAD_LOW);
coeff->spread_hi [0] = pow(10.0, -bark_width * PSY_3GPP_THR_SPREAD_HI);
+ coeff->spread_low[1] = pow(10.0, -bark_width * en_spread_low);
+ coeff->spread_hi [1] = pow(10.0, -bark_width * en_spread_hi);
+ pe_min = bark_pe * bark_width;
+ minsnr = pow(2.0f, pe_min / band_sizes[g]) - 1.5f;
+ coeff->min_snr = av_clipf(1.0f / minsnr, PSY_SNR_25DB, PSY_SNR_1DB);
}
start = 0;
for (g = 0; g < ctx->num_bands[j]; g++) {
@@ -385,6 +462,97 @@ static FFPsyWindowInfo psy_3gpp_window(FFPsyContext *ctx,
return wi;
}
+/* 5.6.1.2 "Calculation of Bit Demand" */
+static int calc_bit_demand(AacPsyContext *ctx, float pe, int bits, int size,
+ int short_window)
+{
+ const float bitsave_slope = short_window ? PSY_3GPP_SAVE_SLOPE_S : PSY_3GPP_SAVE_SLOPE_L;
+ const float bitsave_add = short_window ? PSY_3GPP_SAVE_ADD_S : PSY_3GPP_SAVE_ADD_L;
+ const float bitspend_slope = short_window ? PSY_3GPP_SPEND_SLOPE_S : PSY_3GPP_SPEND_SLOPE_L;
+ const float bitspend_add = short_window ? PSY_3GPP_SPEND_ADD_S : PSY_3GPP_SPEND_ADD_L;
+ const float clip_low = short_window ? PSY_3GPP_CLIP_LO_S : PSY_3GPP_CLIP_LO_L;
+ const float clip_high = short_window ? PSY_3GPP_CLIP_HI_S : PSY_3GPP_CLIP_HI_L;
+ float clipped_pe, bit_save, bit_spend, bit_factor, fill_level;
+
+ ctx->fill_level += ctx->frame_bits - bits;
+ ctx->fill_level = av_clip(ctx->fill_level, 0, size);
+ fill_level = av_clipf((float)ctx->fill_level / size, clip_low, clip_high);
+ clipped_pe = av_clipf(pe, ctx->pe.min, ctx->pe.max);
+ bit_save = (fill_level + bitsave_add) * bitsave_slope;
+ assert(bit_save <= 0.3f && bit_save >= -0.05000001f);
+ bit_spend = (fill_level + bitspend_add) * bitspend_slope;
+ assert(bit_spend <= 0.5f && bit_spend >= -0.1f);
+ /* The bit factor graph in the spec is obviously incorrect.
+ * bit_spend + ((bit_spend - bit_spend))...
+ * The reference encoder subtracts everything from 1, but also seems incorrect.
+ * 1 - bit_save + ((bit_spend + bit_save))...
+ * Hopefully below is correct.
+ */
+ bit_factor = 1.0f - bit_save + ((bit_spend - bit_save) / (ctx->pe.max - ctx->pe.min)) * (clipped_pe - ctx->pe.min);
+ /* NOTE: The reference encoder attempts to center pe max/min around the current pe. */
+ ctx->pe.max = FFMAX(pe, ctx->pe.max);
+ ctx->pe.min = FFMIN(pe, ctx->pe.min);
+
+ return FFMIN(ctx->frame_bits * bit_factor, ctx->frame_bits + size - bits);
+}
+
+static float calc_pe_3gpp(AacPsyBand *band)
+{
+ float pe, a;
+
+ band->pe = 0.0f;
+ band->pe_const = 0.0f;
+ band->active_lines = 0.0f;
+ if (band->energy > band->thr) {
+ a = log2f(band->energy);
+ pe = a - log2f(band->thr);
+ band->active_lines = band->nz_lines;
+ if (pe < PSY_3GPP_C1) {
+ pe = pe * PSY_3GPP_C3 + PSY_3GPP_C2;
+ a = a * PSY_3GPP_C3 + PSY_3GPP_C2;
+ band->active_lines *= PSY_3GPP_C3;
+ }
+ band->pe = pe * band->nz_lines;
+ band->pe_const = a * band->nz_lines;
+ }
+
+ return band->pe;
+}
+
+static float calc_reduction_3gpp(float a, float desired_pe, float pe,
+ float active_lines)
+{
+ float thr_avg, reduction;
+
+ thr_avg = powf(2.0f, (a - pe) / (4.0f * active_lines));
+ reduction = powf(2.0f, (a - desired_pe) / (4.0f * active_lines)) - thr_avg;
+
+ return FFMAX(reduction, 0.0f);
+}
+
+static float calc_reduced_thr_3gpp(AacPsyBand *band, float min_snr,
+ float reduction)
+{
+ float thr = band->thr;
+
+ if (band->energy > thr) {
+ thr = powf(thr, 0.25f) + reduction;
+ thr = powf(thr, 4.0f);
+
+ /* This deviates from the 3GPP spec to match the reference encoder.
+ * It performs min(thr_reduced, max(thr, energy/min_snr)) only for bands
+ * that have hole avoidance on (active or inactive). It always reduces the
+ * threshold of bands with hole avoidance off.
+ */
+ if (thr > band->energy * min_snr && band->avoid_holes != PSY_3GPP_AH_NONE) {
+ thr = FFMAX(band->thr, band->energy * min_snr);
+ band->avoid_holes = PSY_3GPP_AH_ACTIVE;
+ }
+ }
+
+ return thr;
+}
+
/**
* Calculate band thresholds as suggested in 3GPP TS26.403
*/
@@ -395,37 +563,167 @@ static void psy_3gpp_analyze(FFPsyContext *ctx, int channel,
AacPsyChannel *pch = &pctx->ch[channel];
int start = 0;
int i, w, g;
- const int num_bands = ctx->num_bands[wi->num_windows == 8];
- const uint8_t *band_sizes = ctx->bands[wi->num_windows == 8];
- AacPsyCoeffs *coeffs = pctx->psy_coef[wi->num_windows == 8];
+ float desired_bits, desired_pe, delta_pe, reduction, spread_en[128] = {0};
+ float a = 0.0f, active_lines = 0.0f, norm_fac = 0.0f;
+ float pe = pctx->chan_bitrate > 32000 ? 0.0f : FFMAX(50.0f, 100.0f - pctx->chan_bitrate * 100.0f / 32000.0f);
+ const int num_bands = ctx->num_bands[wi->num_windows == 8];
+ const uint8_t *band_sizes = ctx->bands[wi->num_windows == 8];
+ AacPsyCoeffs *coeffs = pctx->psy_coef[wi->num_windows == 8];
+ const float avoid_hole_thr = wi->num_windows == 8 ? PSY_3GPP_AH_THR_SHORT : PSY_3GPP_AH_THR_LONG;
//calculate energies, initial thresholds and related values - 5.4.2 "Threshold Calculation"
for (w = 0; w < wi->num_windows*16; w += 16) {
for (g = 0; g < num_bands; g++) {
AacPsyBand *band = &pch->band[w+g];
+
+ float form_factor = 0.0f;
band->energy = 0.0f;
- for (i = 0; i < band_sizes[g]; i++)
+ for (i = 0; i < band_sizes[g]; i++) {
band->energy += coefs[start+i] * coefs[start+i];
- band->thr = band->energy * 0.001258925f;
- start += band_sizes[g];
+ form_factor += sqrtf(fabs(coefs[start+i]));
+ }
+ band->thr = band->energy * 0.001258925f;
+ band->nz_lines = form_factor / powf(band->energy / band_sizes[g], 0.25f);
+
+ start += band_sizes[g];
}
}
//modify thresholds and energies - spread, threshold in quiet, pre-echo control
for (w = 0; w < wi->num_windows*16; w += 16) {
AacPsyBand *bands = &pch->band[w];
+
//5.4.2.3 "Spreading" & 5.4.3 "Spreaded Energy Calculation"
- for (g = 1; g < num_bands; g++)
- bands[g].thr = FFMAX(bands[g].thr, bands[g-1].thr * coeffs[g].spread_hi[0]);
- for (g = num_bands - 2; g >= 0; g--)
- bands[g].thr = FFMAX(bands[g].thr, bands[g+1].thr * coeffs[g].spread_low[0]);
+ spread_en[0] = bands[0].energy;
+ for (g = 1; g < num_bands; g++) {
+ bands[g].thr = FFMAX(bands[g].thr, bands[g-1].thr * coeffs[g].spread_hi[0]);
+ spread_en[w+g] = FFMAX(bands[g].energy, spread_en[w+g-1] * coeffs[g].spread_hi[1]);
+ }
+ for (g = num_bands - 2; g >= 0; g--) {
+ bands[g].thr = FFMAX(bands[g].thr, bands[g+1].thr * coeffs[g].spread_low[0]);
+ spread_en[w+g] = FFMAX(spread_en[w+g], spread_en[w+g+1] * coeffs[g].spread_low[1]);
+ }
//5.4.2.4 "Threshold in quiet"
for (g = 0; g < num_bands; g++) {
AacPsyBand *band = &bands[g];
+
band->thr_quiet = band->thr = FFMAX(band->thr, coeffs[g].ath);
//5.4.2.5 "Pre-echo control"
if (!(wi->window_type[0] == LONG_STOP_SEQUENCE || (wi->window_type[1] == LONG_START_SEQUENCE && !w)))
band->thr = FFMAX(PSY_3GPP_RPEMIN*band->thr, FFMIN(band->thr,
PSY_3GPP_RPELEV*pch->prev_band[w+g].thr_quiet));
+
+ /* 5.6.1.3.1 "Prepatory steps of the perceptual entropy calculation" */
+ pe += calc_pe_3gpp(band);
+ a += band->pe_const;
+ active_lines += band->active_lines;
+
+ /* 5.6.1.3.3 "Selection of the bands for avoidance of holes" */
+ if (spread_en[w+g] * avoid_hole_thr > band->energy || coeffs[g].min_snr > 1.0f)
+ band->avoid_holes = PSY_3GPP_AH_NONE;
+ else
+ band->avoid_holes = PSY_3GPP_AH_INACTIVE;
+ }
+ }
+
+ /* 5.6.1.3.2 "Calculation of the desired perceptual entropy" */
+ ctx->pe[channel] = pe;
+ desired_bits = calc_bit_demand(pctx, pe, ctx->bitres.bits, ctx->bitres.size, wi->num_windows == 8);
+ desired_pe = PSY_3GPP_BITS_TO_PE(desired_bits);
+ /* NOTE: PE correction is kept simple. During initial testing it had very
+ * little effect on the final bitrate. Probably a good idea to come
+ * back and do more testing later.
+ */
+ if (ctx->bitres.bits > 0)
+ desired_pe *= av_clipf(pctx->pe.previous / PSY_3GPP_BITS_TO_PE(ctx->bitres.bits),
+ 0.85f, 1.15f);
+ pctx->pe.previous = PSY_3GPP_BITS_TO_PE(desired_bits);
+
+ if (desired_pe < pe) {
+ /* 5.6.1.3.4 "First Estimation of the reduction value" */
+ for (w = 0; w < wi->num_windows*16; w += 16) {
+ reduction = calc_reduction_3gpp(a, desired_pe, pe, active_lines);
+ pe = 0.0f;
+ a = 0.0f;
+ active_lines = 0.0f;
+ for (g = 0; g < num_bands; g++) {
+ AacPsyBand *band = &pch->band[w+g];
+
+ band->thr = calc_reduced_thr_3gpp(band, coeffs[g].min_snr, reduction);
+ /* recalculate PE */
+ pe += calc_pe_3gpp(band);
+ a += band->pe_const;
+ active_lines += band->active_lines;
+ }
+ }
+
+ /* 5.6.1.3.5 "Second Estimation of the reduction value" */
+ for (i = 0; i < 2; i++) {
+ float pe_no_ah = 0.0f, desired_pe_no_ah;
+ active_lines = a = 0.0f;
+ for (w = 0; w < wi->num_windows*16; w += 16) {
+ for (g = 0; g < num_bands; g++) {
+ AacPsyBand *band = &pch->band[w+g];
+
+ if (band->avoid_holes != PSY_3GPP_AH_ACTIVE) {
+ pe_no_ah += band->pe;
+ a += band->pe_const;
+ active_lines += band->active_lines;
+ }
+ }
+ }
+ desired_pe_no_ah = FFMAX(desired_pe - (pe - pe_no_ah), 0.0f);
+ if (active_lines > 0.0f)
+ reduction += calc_reduction_3gpp(a, desired_pe_no_ah, pe_no_ah, active_lines);
+
+ pe = 0.0f;
+ for (w = 0; w < wi->num_windows*16; w += 16) {
+ for (g = 0; g < num_bands; g++) {
+ AacPsyBand *band = &pch->band[w+g];
+
+ if (active_lines > 0.0f)
+ band->thr = calc_reduced_thr_3gpp(band, coeffs[g].min_snr, reduction);
+ pe += calc_pe_3gpp(band);
+ band->norm_fac = band->active_lines / band->thr;
+ norm_fac += band->norm_fac;
+ }
+ }
+ delta_pe = desired_pe - pe;
+ if (fabs(delta_pe) > 0.05f * desired_pe)
+ break;
+ }
+
+ if (pe < 1.15f * desired_pe) {
+ /* 6.6.1.3.6 "Final threshold modification by linearization" */
+ norm_fac = 1.0f / norm_fac;
+ for (w = 0; w < wi->num_windows*16; w += 16) {
+ for (g = 0; g < num_bands; g++) {
+ AacPsyBand *band = &pch->band[w+g];
+
+ if (band->active_lines > 0.5f) {
+ float delta_sfb_pe = band->norm_fac * norm_fac * delta_pe;
+ float thr = band->thr;
+
+ thr *= powf(2.0f, delta_sfb_pe / band->active_lines);
+ if (thr > coeffs[g].min_snr * band->energy && band->avoid_holes == PSY_3GPP_AH_INACTIVE)
+ thr = FFMAX(band->thr, coeffs[g].min_snr * band->energy);
+ band->thr = thr;
+ }
+ }
+ }
+ } else {
+ /* 5.6.1.3.7 "Further perceptual entropy reduction" */
+ g = num_bands;
+ while (pe > desired_pe && g--) {
+ for (w = 0; w < wi->num_windows*16; w+= 16) {
+ AacPsyBand *band = &pch->band[w+g];
+ if (band->avoid_holes != PSY_3GPP_AH_NONE && coeffs[g].min_snr < PSY_SNR_1DB) {
+ coeffs[g].min_snr = PSY_SNR_1DB;
+ band->thr = band->energy * PSY_SNR_1DB;
+ pe += band->active_lines * 1.5f - band->pe;
+ }
+ }
+ }
+ /* TODO: allow more holes (unused without mid/side) */
}
}
diff --git a/libavcodec/aacsbr.c b/libavcodec/aacsbr.c
index 117aa98875..6ac2cbc06a 100644
--- a/libavcodec/aacsbr.c
+++ b/libavcodec/aacsbr.c
@@ -35,6 +35,7 @@
#include <stdint.h>
#include <float.h>
+#include <math.h>
#define ENVELOPE_ADJUSTMENT_OFFSET 2
#define NOISE_FLOOR_OFFSET 6.0f
diff --git a/libavcodec/adpcm.c b/libavcodec/adpcm.c
index 6252dbcb6a..c1ceca918a 100644
--- a/libavcodec/adpcm.c
+++ b/libavcodec/adpcm.c
@@ -750,6 +750,7 @@ static av_cold int adpcm_decode_init(AVCodecContext * avctx)
case CODEC_ID_ADPCM_EA_R1:
case CODEC_ID_ADPCM_EA_R2:
case CODEC_ID_ADPCM_EA_R3:
+ case CODEC_ID_ADPCM_EA_XAS:
max_channels = 6;
break;
}
diff --git a/libavcodec/allcodecs.c b/libavcodec/allcodecs.c
index 56fa6280a2..eade67fb7a 100644
--- a/libavcodec/allcodecs.c
+++ b/libavcodec/allcodecs.c
@@ -365,6 +365,7 @@ void avcodec_register_all(void)
REGISTER_ENCDEC (XSUB, xsub);
/* external libraries */
+ REGISTER_DECODER (LIBCELT, libcelt);
REGISTER_ENCDEC (LIBDIRAC, libdirac);
REGISTER_ENCODER (LIBFAAC, libfaac);
REGISTER_ENCDEC (LIBGSM, libgsm);
diff --git a/libavcodec/arm/fft_fixed_neon.S b/libavcodec/arm/fft_fixed_neon.S
index 14884d3736..bd6c853ec8 100644
--- a/libavcodec/arm/fft_fixed_neon.S
+++ b/libavcodec/arm/fft_fixed_neon.S
@@ -122,7 +122,7 @@ endfunc
function fft_pass_neon
push {r4,lr}
- movrel lr, coefs + 24
+ movrel lr, coefs+24
vld1.16 {d30}, [lr,:64]
lsl r12, r2, #3
vmov d31, d30
diff --git a/libavcodec/atrac3.c b/libavcodec/atrac3.c
index 3bf514c8b3..bd49169465 100644
--- a/libavcodec/atrac3.c
+++ b/libavcodec/atrac3.c
@@ -186,7 +186,7 @@ static int decode_bytes(const uint8_t* inbuffer, uint8_t* out, int bytes){
obuf[i] = c ^ buf[i];
if (off)
- av_log(NULL,AV_LOG_DEBUG,"Offset of %d not handled, post sample on ffmpeg-dev.\n",off);
+ av_log_ask_for_sample(NULL, "Offset of %d not handled.\n", off);
return off;
}
diff --git a/libavcodec/avcodec.h b/libavcodec/avcodec.h
index 0bb11c1bf3..a30edbe77c 100644
--- a/libavcodec/avcodec.h
+++ b/libavcodec/avcodec.h
@@ -342,6 +342,7 @@ enum CodecID {
CODEC_ID_BINKAUDIO_DCT,
CODEC_ID_AAC_LATM,
CODEC_ID_QDMC,
+ CODEC_ID_CELT,
/* subtitle codecs */
CODEC_ID_DVD_SUBTITLE= 0x17000,
@@ -700,6 +701,10 @@ typedef struct RcOverride{
* Codec supports frame-level multithreading.
*/
#define CODEC_CAP_FRAME_THREADS 0x1000
+/**
+ * Codec supports slice-based (or partition-based) multithreading.
+ */
+#define CODEC_CAP_SLICE_THREADS 0x2000
//The following defines may change, don't expect compatibility if you use them.
#define MB_TYPE_INTRA4x4 0x0001
@@ -3702,7 +3707,7 @@ int avcodec_check_dimensions(void *av_log_ctx, unsigned int w, unsigned int h);
enum PixelFormat avcodec_default_get_format(struct AVCodecContext *s, const enum PixelFormat * fmt);
-#if LIBAVCODEC_VERSION_MAJOR < 53
+#if FF_API_THREAD_INIT
/**
* @deprecated Set s->thread_count before calling avcodec_open() instead of calling this.
*/
@@ -4319,7 +4324,7 @@ void av_log_missing_feature(void *avc, const char *feature, int want_sample);
* a pointer to an AVClass struct
* @param[in] msg string containing an optional message, or NULL if no message
*/
-void av_log_ask_for_sample(void *avc, const char *msg);
+void av_log_ask_for_sample(void *avc, const char *msg, ...);
/**
* Register the hardware accelerator hwaccel.
diff --git a/libavcodec/cook.c b/libavcodec/cook.c
index 3f7776bed0..286ecd42e5 100644
--- a/libavcodec/cook.c
+++ b/libavcodec/cook.c
@@ -1136,7 +1136,7 @@ static av_cold int cook_decode_init(AVCodecContext *avctx)
switch (q->subpacket[s].cookversion) {
case MONO:
if (q->nb_channels != 1) {
- av_log(avctx,AV_LOG_ERROR,"Container channels != 1, report sample!\n");
+ av_log_ask_for_sample(avctx, "Container channels != 1.\n");
return -1;
}
av_log(avctx,AV_LOG_DEBUG,"MONO\n");
@@ -1150,7 +1150,7 @@ static av_cold int cook_decode_init(AVCodecContext *avctx)
break;
case JOINT_STEREO:
if (q->nb_channels != 2) {
- av_log(avctx,AV_LOG_ERROR,"Container channels != 2, report sample!\n");
+ av_log_ask_for_sample(avctx, "Container channels != 2.\n");
return -1;
}
av_log(avctx,AV_LOG_DEBUG,"JOINT_STEREO\n");
@@ -1188,7 +1188,7 @@ static av_cold int cook_decode_init(AVCodecContext *avctx)
break;
default:
- av_log(avctx,AV_LOG_ERROR,"Unknown Cook version, report sample!\n");
+ av_log_ask_for_sample(avctx, "Unknown Cook version.\n");
return -1;
break;
}
@@ -1205,7 +1205,7 @@ static av_cold int cook_decode_init(AVCodecContext *avctx)
/* Try to catch some obviously faulty streams, othervise it might be exploitable */
if (q->subpacket[s].total_subbands > 53) {
- av_log(avctx,AV_LOG_ERROR,"total_subbands > 53, report sample!\n");
+ av_log_ask_for_sample(avctx, "total_subbands > 53\n");
return -1;
}
@@ -1215,7 +1215,7 @@ static av_cold int cook_decode_init(AVCodecContext *avctx)
}
if (q->subpacket[s].subbands > 50) {
- av_log(avctx,AV_LOG_ERROR,"subbands > 50, report sample!\n");
+ av_log_ask_for_sample(avctx, "subbands > 50\n");
return -1;
}
q->subpacket[s].gains1.now = q->subpacket[s].gain_1;
@@ -1226,7 +1226,7 @@ static av_cold int cook_decode_init(AVCodecContext *avctx)
q->num_subpackets++;
s++;
if (s > MAX_SUBPACKETS) {
- av_log(avctx,AV_LOG_ERROR,"Too many subpackets > 5, report file!\n");
+ av_log_ask_for_sample(avctx, "Too many subpackets > 5\n");
return -1;
}
}
@@ -1268,7 +1268,9 @@ static av_cold int cook_decode_init(AVCodecContext *avctx)
/* Try to catch some obviously faulty streams, othervise it might be exploitable */
if ((q->samples_per_channel == 256) || (q->samples_per_channel == 512) || (q->samples_per_channel == 1024)) {
} else {
- av_log(avctx,AV_LOG_ERROR,"unknown amount of samples_per_channel = %d, report sample!\n",q->samples_per_channel);
+ av_log_ask_for_sample(avctx,
+ "unknown amount of samples_per_channel = %d\n",
+ q->samples_per_channel);
return -1;
}
diff --git a/libavcodec/crystalhd.c b/libavcodec/crystalhd.c
index 9bbb6e8bba..1a2d60c672 100644
--- a/libavcodec/crystalhd.c
+++ b/libavcodec/crystalhd.c
@@ -87,6 +87,7 @@
#include "h264.h"
#include "libavutil/imgutils.h"
#include "libavutil/intreadwrite.h"
+#include "libavutil/opt.h"
/** Timeout parameter passed to DtsProcOutput() in us */
#define OUTPUT_PROC_TIMEOUT 50
@@ -118,6 +119,7 @@ typedef struct OpaqueList {
} OpaqueList;
typedef struct {
+ AVClass *av_class;
AVCodecContext *avctx;
AVFrame pic;
HANDLE dev;
@@ -137,8 +139,20 @@ typedef struct {
OpaqueList *head;
OpaqueList *tail;
+
+ /* Options */
+ uint32_t sWidth;
} CHDContext;
+static const AVOption options[] = {
+ { "crystalhd_downscale_width",
+ "Turn on downscaling to the specified width",
+ offsetof(CHDContext, sWidth),
+ FF_OPT_TYPE_INT, 0, 0, UINT32_MAX,
+ AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_DECODING_PARAM, },
+ { NULL, },
+};
+
/*****************************************************************************
* Helper functions
@@ -434,6 +448,11 @@ static av_cold int init(AVCodecContext *avctx)
}
format.mSubtype = subtype;
+ if (priv->sWidth) {
+ format.bEnableScaling = 1;
+ format.ScalingParams.sWidth = priv->sWidth;
+ }
+
/* Get a decoder instance */
av_log(avctx, AV_LOG_VERBOSE, "CrystalHD: starting up\n");
// Initialize the Link and Decoder devices
@@ -948,6 +967,13 @@ static int decode(AVCodecContext *avctx, void *data, int *data_size, AVPacket *a
#if CONFIG_H264_CRYSTALHD_DECODER
+static AVClass h264_class = {
+ "h264_crystalhd",
+ av_default_item_name,
+ options,
+ LIBAVUTIL_VERSION_INT,
+};
+
AVCodec ff_h264_crystalhd_decoder = {
.name = "h264_crystalhd",
.type = AVMEDIA_TYPE_VIDEO,
@@ -960,10 +986,18 @@ AVCodec ff_h264_crystalhd_decoder = {
.flush = flush,
.long_name = NULL_IF_CONFIG_SMALL("H.264 / AVC / MPEG-4 AVC / MPEG-4 part 10 (CrystalHD acceleration)"),
.pix_fmts = (const enum PixelFormat[]){PIX_FMT_YUYV422, PIX_FMT_NONE},
+ .priv_class = &h264_class,
};
#endif
#if CONFIG_MPEG2_CRYSTALHD_DECODER
+static AVClass mpeg2_class = {
+ "mpeg2_crystalhd",
+ av_default_item_name,
+ options,
+ LIBAVUTIL_VERSION_INT,
+};
+
AVCodec ff_mpeg2_crystalhd_decoder = {
.name = "mpeg2_crystalhd",
.type = AVMEDIA_TYPE_VIDEO,
@@ -976,10 +1010,18 @@ AVCodec ff_mpeg2_crystalhd_decoder = {
.flush = flush,
.long_name = NULL_IF_CONFIG_SMALL("MPEG-2 Video (CrystalHD acceleration)"),
.pix_fmts = (const enum PixelFormat[]){PIX_FMT_YUYV422, PIX_FMT_NONE},
+ .priv_class = &mpeg2_class,
};
#endif
#if CONFIG_MPEG4_CRYSTALHD_DECODER
+static AVClass mpeg4_class = {
+ "mpeg4_crystalhd",
+ av_default_item_name,
+ options,
+ LIBAVUTIL_VERSION_INT,
+};
+
AVCodec ff_mpeg4_crystalhd_decoder = {
.name = "mpeg4_crystalhd",
.type = AVMEDIA_TYPE_VIDEO,
@@ -992,10 +1034,18 @@ AVCodec ff_mpeg4_crystalhd_decoder = {
.flush = flush,
.long_name = NULL_IF_CONFIG_SMALL("MPEG-4 Part 2 (CrystalHD acceleration)"),
.pix_fmts = (const enum PixelFormat[]){PIX_FMT_YUYV422, PIX_FMT_NONE},
+ .priv_class = &mpeg4_class,
};
#endif
#if CONFIG_MSMPEG4_CRYSTALHD_DECODER
+static AVClass msmpeg4_class = {
+ "msmpeg4_crystalhd",
+ av_default_item_name,
+ options,
+ LIBAVUTIL_VERSION_INT,
+};
+
AVCodec ff_msmpeg4_crystalhd_decoder = {
.name = "msmpeg4_crystalhd",
.type = AVMEDIA_TYPE_VIDEO,
@@ -1008,10 +1058,18 @@ AVCodec ff_msmpeg4_crystalhd_decoder = {
.flush = flush,
.long_name = NULL_IF_CONFIG_SMALL("MPEG-4 Part 2 Microsoft variant version 3 (CrystalHD acceleration)"),
.pix_fmts = (const enum PixelFormat[]){PIX_FMT_YUYV422, PIX_FMT_NONE},
+ .priv_class = &msmpeg4_class,
};
#endif
#if CONFIG_VC1_CRYSTALHD_DECODER
+static AVClass vc1_class = {
+ "vc1_crystalhd",
+ av_default_item_name,
+ options,
+ LIBAVUTIL_VERSION_INT,
+};
+
AVCodec ff_vc1_crystalhd_decoder = {
.name = "vc1_crystalhd",
.type = AVMEDIA_TYPE_VIDEO,
@@ -1024,10 +1082,18 @@ AVCodec ff_vc1_crystalhd_decoder = {
.flush = flush,
.long_name = NULL_IF_CONFIG_SMALL("SMPTE VC-1 (CrystalHD acceleration)"),
.pix_fmts = (const enum PixelFormat[]){PIX_FMT_YUYV422, PIX_FMT_NONE},
+ .priv_class = &vc1_class,
};
#endif
#if CONFIG_WMV3_CRYSTALHD_DECODER
+static AVClass wmv3_class = {
+ "wmv3_crystalhd",
+ av_default_item_name,
+ options,
+ LIBAVUTIL_VERSION_INT,
+};
+
AVCodec ff_wmv3_crystalhd_decoder = {
.name = "wmv3_crystalhd",
.type = AVMEDIA_TYPE_VIDEO,
@@ -1040,5 +1106,6 @@ AVCodec ff_wmv3_crystalhd_decoder = {
.flush = flush,
.long_name = NULL_IF_CONFIG_SMALL("Windows Media Video 9 (CrystalHD acceleration)"),
.pix_fmts = (const enum PixelFormat[]){PIX_FMT_YUYV422, PIX_FMT_NONE},
+ .priv_class = &wmv3_class,
};
#endif
diff --git a/libavcodec/dnxhdenc.c b/libavcodec/dnxhdenc.c
index bd73290031..6d1a17fd0a 100644
--- a/libavcodec/dnxhdenc.c
+++ b/libavcodec/dnxhdenc.c
@@ -869,6 +869,7 @@ AVCodec ff_dnxhd_encoder = {
dnxhd_encode_init,
dnxhd_encode_picture,
dnxhd_encode_end,
+ .capabilities = CODEC_CAP_SLICE_THREADS,
.pix_fmts = (const enum PixelFormat[]){PIX_FMT_YUV422P, PIX_FMT_NONE},
.long_name = NULL_IF_CONFIG_SMALL("VC3/DNxHD"),
.priv_class = &class,
diff --git a/libavcodec/dv.c b/libavcodec/dv.c
index 6a4914768d..5fca22f9f7 100644
--- a/libavcodec/dv.c
+++ b/libavcodec/dv.c
@@ -1297,6 +1297,7 @@ AVCodec ff_dvvideo_encoder = {
sizeof(DVVideoContext),
dvvideo_init_encoder,
dvvideo_encode_frame,
+ .capabilities = CODEC_CAP_SLICE_THREADS,
.pix_fmts = (const enum PixelFormat[]) {PIX_FMT_YUV411P, PIX_FMT_YUV422P, PIX_FMT_YUV420P, PIX_FMT_NONE},
.long_name = NULL_IF_CONFIG_SMALL("DV (Digital Video)"),
};
@@ -1312,7 +1313,7 @@ AVCodec ff_dvvideo_decoder = {
NULL,
dvvideo_close,
dvvideo_decode_frame,
- CODEC_CAP_DR1,
+ CODEC_CAP_DR1 | CODEC_CAP_SLICE_THREADS,
NULL,
.max_lowres = 3,
.long_name = NULL_IF_CONFIG_SMALL("DV (Digital Video)"),
diff --git a/libavcodec/ffv1.c b/libavcodec/ffv1.c
index b00b463e16..0a982e8754 100644
--- a/libavcodec/ffv1.c
+++ b/libavcodec/ffv1.c
@@ -1795,7 +1795,7 @@ AVCodec ff_ffv1_decoder = {
NULL,
common_end,
decode_frame,
- CODEC_CAP_DR1 /*| CODEC_CAP_DRAW_HORIZ_BAND*/,
+ CODEC_CAP_DR1 /*| CODEC_CAP_DRAW_HORIZ_BAND*/ | CODEC_CAP_SLICE_THREADS,
NULL,
.long_name= NULL_IF_CONFIG_SMALL("FFmpeg video codec #1"),
};
@@ -1809,6 +1809,7 @@ AVCodec ff_ffv1_encoder = {
encode_init,
encode_frame,
common_end,
+ .capabilities = CODEC_CAP_SLICE_THREADS,
.pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_YUV444P, PIX_FMT_YUV422P, PIX_FMT_YUV411P, PIX_FMT_YUV410P, PIX_FMT_RGB32, PIX_FMT_YUV420P16, PIX_FMT_YUV422P16, PIX_FMT_YUV444P16, PIX_FMT_NONE},
.long_name= NULL_IF_CONFIG_SMALL("FFmpeg video codec #1"),
};
diff --git a/libavcodec/flac.c b/libavcodec/flac.c
index 484a44efb3..6e94c2c5ff 100644
--- a/libavcodec/flac.c
+++ b/libavcodec/flac.c
@@ -22,6 +22,7 @@
#include "libavutil/crc.h"
#include "flac.h"
#include "flacdata.h"
+#include "vorbis.h"
static const int8_t sample_size_table[] = { 0, 8, 12, 0, 16, 20, 24, 0 };
@@ -54,6 +55,8 @@ int ff_flac_decode_frame_header(AVCodecContext *avctx, GetBitContext *gb,
fi->ch_mode = get_bits(gb, 4);
if (fi->ch_mode < FLAC_MAX_CHANNELS) {
fi->channels = fi->ch_mode + 1;
+ if (fi->ch_mode <= 5)
+ avctx->channel_layout = ff_vorbis_channel_layouts[fi->ch_mode];
fi->ch_mode = FLAC_CHMODE_INDEPENDENT;
} else if (fi->ch_mode <= FLAC_CHMODE_MID_SIDE) {
fi->channels = 2;
diff --git a/libavcodec/flicvideo.c b/libavcodec/flicvideo.c
index 913617d120..7d2fd87647 100644
--- a/libavcodec/flicvideo.c
+++ b/libavcodec/flicvideo.c
@@ -61,9 +61,9 @@
#define CHECK_PIXEL_PTR(n) \
if (pixel_ptr + n > pixel_limit) { \
- av_log (s->avctx, AV_LOG_INFO, "Problem: pixel_ptr >= pixel_limit (%d >= %d)\n", \
+ av_log (s->avctx, AV_LOG_ERROR, "Invalid pixel_ptr = %d > pixel_limit = %d\n", \
pixel_ptr + n, pixel_limit); \
- return -1; \
+ return AVERROR_INVALIDDATA; \
} \
typedef struct FlicDecodeContext {
@@ -181,6 +181,11 @@ static int flic_decode_frame_8BPP(AVCodecContext *avctx,
/* iterate through the chunks */
while ((frame_size > 0) && (num_chunks > 0)) {
chunk_size = AV_RL32(&buf[stream_ptr]);
+ if (chunk_size > frame_size) {
+ av_log(avctx, AV_LOG_WARNING,
+ "Invalid chunk_size = %u > frame_size = %u\n", chunk_size, frame_size);
+ chunk_size = frame_size;
+ }
stream_ptr += 4;
chunk_type = AV_RL16(&buf[stream_ptr]);
stream_ptr += 2;
diff --git a/libavcodec/fraps.c b/libavcodec/fraps.c
index dd086d2e3f..8d40ed2fa7 100644
--- a/libavcodec/fraps.c
+++ b/libavcodec/fraps.c
@@ -46,6 +46,7 @@ typedef struct FrapsContext{
AVCodecContext *avctx;
AVFrame frame;
uint8_t *tmpbuf;
+ int tmpbuf_size;
DSPContext dsp;
} FrapsContext;
@@ -272,7 +273,9 @@ static int decode_frame(AVCodecContext *avctx,
offs[planes] = buf_size;
for(i = 0; i < planes; i++){
is_chroma = !!i;
- s->tmpbuf = av_realloc(s->tmpbuf, offs[i + 1] - offs[i] - 1024 + FF_INPUT_BUFFER_PADDING_SIZE);
+ av_fast_malloc(&s->tmpbuf, &s->tmpbuf_size, offs[i + 1] - offs[i] - 1024 + FF_INPUT_BUFFER_PADDING_SIZE);
+ if (!s->tmpbuf)
+ return AVERROR(ENOMEM);
if(fraps2_decode_plane(s, f->data[i], f->linesize[i], avctx->width >> is_chroma,
avctx->height >> is_chroma, buf + offs[i], offs[i + 1] - offs[i], is_chroma, 1) < 0) {
av_log(avctx, AV_LOG_ERROR, "Error decoding plane %i\n", i);
@@ -314,7 +317,9 @@ static int decode_frame(AVCodecContext *avctx,
}
offs[planes] = buf_size;
for(i = 0; i < planes; i++){
- s->tmpbuf = av_realloc(s->tmpbuf, offs[i + 1] - offs[i] - 1024 + FF_INPUT_BUFFER_PADDING_SIZE);
+ av_fast_malloc(&s->tmpbuf, &s->tmpbuf_size, offs[i + 1] - offs[i] - 1024 + FF_INPUT_BUFFER_PADDING_SIZE);
+ if (!s->tmpbuf)
+ return AVERROR(ENOMEM);
if(fraps2_decode_plane(s, f->data[0] + i + (f->linesize[0] * (avctx->height - 1)), -f->linesize[0],
avctx->width, avctx->height, buf + offs[i], offs[i + 1] - offs[i], 0, 3) < 0) {
av_log(avctx, AV_LOG_ERROR, "Error decoding plane %i\n", i);
diff --git a/libavcodec/h261_parser.c b/libavcodec/h261_parser.c
index defc1cb46f..3fb86db125 100644
--- a/libavcodec/h261_parser.c
+++ b/libavcodec/h261_parser.c
@@ -70,11 +70,15 @@ static int h261_parse(AVCodecParserContext *s,
ParseContext *pc = s->priv_data;
int next;
- next= h261_find_frame_end(pc,avctx, buf, buf_size);
- if (ff_combine_frame(pc, next, &buf, &buf_size) < 0) {
- *poutbuf = NULL;
- *poutbuf_size = 0;
- return buf_size;
+ if (s->flags & PARSER_FLAG_COMPLETE_FRAMES) {
+ next = buf_size;
+ } else {
+ next= h261_find_frame_end(pc,avctx, buf, buf_size);
+ if (ff_combine_frame(pc, next, &buf, &buf_size) < 0) {
+ *poutbuf = NULL;
+ *poutbuf_size = 0;
+ return buf_size;
+ }
}
*poutbuf = buf;
*poutbuf_size = buf_size;
diff --git a/libavcodec/h263_parser.c b/libavcodec/h263_parser.c
index e08198324b..a3d24ea433 100644
--- a/libavcodec/h263_parser.c
+++ b/libavcodec/h263_parser.c
@@ -70,12 +70,16 @@ static int h263_parse(AVCodecParserContext *s,
ParseContext *pc = s->priv_data;
int next;
- next= ff_h263_find_frame_end(pc, buf, buf_size);
+ if (s->flags & PARSER_FLAG_COMPLETE_FRAMES) {
+ next = buf_size;
+ } else {
+ next= ff_h263_find_frame_end(pc, buf, buf_size);
- if (ff_combine_frame(pc, next, &buf, &buf_size) < 0) {
- *poutbuf = NULL;
- *poutbuf_size = 0;
- return buf_size;
+ if (ff_combine_frame(pc, next, &buf, &buf_size) < 0) {
+ *poutbuf = NULL;
+ *poutbuf_size = 0;
+ return buf_size;
+ }
}
*poutbuf = buf;
diff --git a/libavcodec/h264.c b/libavcodec/h264.c
index c64e6fb3f9..22a57866d4 100644
--- a/libavcodec/h264.c
+++ b/libavcodec/h264.c
@@ -3467,7 +3467,9 @@ AVCodec ff_h264_decoder = {
NULL,
ff_h264_decode_end,
decode_frame,
- /*CODEC_CAP_DRAW_HORIZ_BAND |*/ CODEC_CAP_DR1 | CODEC_CAP_DELAY | CODEC_CAP_FRAME_THREADS,
+ /*CODEC_CAP_DRAW_HORIZ_BAND |*/ CODEC_CAP_DR1 | CODEC_CAP_DELAY |
+ CODEC_CAP_FRAME_THREADS |
+ CODEC_CAP_SLICE_THREADS,
.flush= flush_dpb,
.long_name = NULL_IF_CONFIG_SMALL("H.264 / AVC / MPEG-4 AVC / MPEG-4 part 10"),
.init_thread_copy = ONLY_IF_THREADS_ENABLED(decode_init_thread_copy),
diff --git a/libavcodec/iff.c b/libavcodec/iff.c
index e64ce1eeb6..2467781537 100644
--- a/libavcodec/iff.c
+++ b/libavcodec/iff.c
@@ -30,10 +30,72 @@
#include "avcodec.h"
#include "get_bits.h"
+// TODO: masking bits
+typedef enum {
+ MASK_NONE,
+ MASK_HAS_MASK,
+ MASK_HAS_TRANSPARENT_COLOR,
+ MASK_LASSO
+} mask_type;
+
+/**
+ * Gets the actual extra data after video preperties which contains
+ * the raw CMAP palette data beyond the IFF extra context.
+ *
+ * @param avctx the AVCodecContext where to extract raw palette data from
+ * @return pointer to raw CMAP palette data
+ */
+static av_always_inline uint8_t *get_palette_data(const AVCodecContext *const avctx) {
+ return avctx->extradata + AV_RB16(avctx->extradata);
+}
+
+/**
+ * Gets the size of CMAP palette data beyond the IFF extra context.
+ * Please note that any value < 2 of IFF extra context or
+ * raw extradata < 0 is considered as illegal extradata.
+ *
+ * @param avctx the AVCodecContext where to extract palette data size from
+ * @return size of raw palette data in bytes
+ */
+static av_always_inline int get_palette_size(const AVCodecContext *const avctx) {
+ return avctx->extradata_size - AV_RB16(avctx->extradata);
+}
+
+/**
+ * Gets the actual raw image data after video properties which
+ * contains the raw image data beyond the IFF extra context.
+ *
+ * @param avpkt the AVPacket where to extract raw image data from
+ * @return pointer to raw image data
+ */
+static av_always_inline uint8_t *get_image_data(const AVPacket *const avpkt) {
+ return avpkt->data + AV_RB16(avpkt->data);
+}
+
+/**
+ * Gets the size of raw image data beyond the IFF extra context.
+ * Please note that any value < 2 of either IFF extra context
+ * or raw image data is considered as an illegal packet.
+ *
+ * @param avpkt the AVPacket where to extract image data size from
+ * @return size of raw image data in bytes
+ */
+static av_always_inline int get_image_size(const AVPacket *const avpkt) {
+ return avpkt->size - AV_RB16(avpkt->data);
+}
+
typedef struct {
AVFrame frame;
int planesize;
uint8_t * planebuf;
+ uint8_t * ham_buf; ///< temporary buffer for planar to chunky conversation
+ uint32_t *ham_palbuf; ///< HAM decode table
+ unsigned compression; ///< delta compression method used
+ unsigned bpp; ///< bits per plane to decode (differs from bits_per_coded_sample if HAM)
+ unsigned ham; ///< 0 if non-HAM or number of hold bits (6 for bpp > 6, 4 otherwise)
+ unsigned flags; ///< 1 for EHB, 0 is no extra half darkening
+ unsigned transparency; ///< TODO: transparency color index in palette
+ unsigned masking; ///< TODO: masking method used
int init; // 1 if buffer and palette data already initialized, 0 otherwise
} IffContext;
@@ -122,6 +184,7 @@ static av_always_inline uint32_t gray2rgb(const uint32_t x) {
static int ff_cmap_read_palette(AVCodecContext *avctx, uint32_t *pal)
{
int count, i;
+ const uint8_t *const extradata = get_palette_data(avctx);
if (avctx->bits_per_coded_sample > 8) {
av_log(avctx, AV_LOG_ERROR, "bit_per_coded_sample > 8 not supported\n");
@@ -130,10 +193,10 @@ static int ff_cmap_read_palette(AVCodecContext *avctx, uint32_t *pal)
count = 1 << avctx->bits_per_coded_sample;
// If extradata is smaller than actually needed, fill the remaining with black.
- count = FFMIN(avctx->extradata_size / 3, count);
+ count = FFMIN(get_palette_size(avctx) / 3, count);
if (count) {
for (i=0; i < count; i++) {
- pal[i] = 0xFF000000 | AV_RB24( avctx->extradata + i*3 );
+ pal[i] = 0xFF000000 | AV_RB24(extradata + i*3);
}
} else { // Create gray-scale color palette for bps < 8
count = 1 << avctx->bits_per_coded_sample;
@@ -145,15 +208,123 @@ static int ff_cmap_read_palette(AVCodecContext *avctx, uint32_t *pal)
return 0;
}
+/**
+ * Extracts the IFF extra context and updates internal
+ * decoder structures.
+ *
+ * @param avctx the AVCodecContext where to extract extra context to
+ * @param avpkt the AVPacket to extract extra context from or NULL to use avctx
+ * @return 0 in case of success, a negative error code otherwise
+ */
+static int extract_header(AVCodecContext *const avctx,
+ const AVPacket *const avpkt) {
+ const uint8_t *buf;
+ unsigned buf_size;
+ IffContext *s = avctx->priv_data;
+ if (avpkt) {
+ if (avpkt->size < 2)
+ return AVERROR_INVALIDDATA;
+ buf = avpkt->data;
+ buf_size = bytestream_get_be16(&buf);
+ if (buf_size <= 1 || get_image_size(avpkt) <= 1) {
+ av_log(avctx, AV_LOG_ERROR,
+ "Invalid image size received: %u -> image data offset: %d\n",
+ buf_size, get_image_size(avpkt));
+ return AVERROR_INVALIDDATA;
+ }
+ } else {
+ if (avctx->extradata_size < 2)
+ return AVERROR_INVALIDDATA;
+ buf = avctx->extradata;
+ buf_size = bytestream_get_be16(&buf);
+ if (buf_size <= 1 || get_palette_size(avctx) < 0) {
+ av_log(avctx, AV_LOG_ERROR,
+ "Invalid palette size received: %u -> palette data offset: %d\n",
+ buf_size, get_palette_size(avctx));
+ return AVERROR_INVALIDDATA;
+ }
+ }
+
+ if (buf_size > 8) {
+ s->compression = bytestream_get_byte(&buf);
+ s->bpp = bytestream_get_byte(&buf);
+ s->ham = bytestream_get_byte(&buf);
+ s->flags = bytestream_get_byte(&buf);
+ s->transparency = bytestream_get_be16(&buf);
+ s->masking = bytestream_get_byte(&buf);
+ if (s->masking == MASK_HAS_TRANSPARENT_COLOR) {
+ av_log(avctx, AV_LOG_ERROR, "Transparency not supported\n");
+ return AVERROR_PATCHWELCOME;
+ } else if (s->masking != MASK_NONE) {
+ av_log(avctx, AV_LOG_ERROR, "Masking not supported\n");
+ return AVERROR_PATCHWELCOME;
+ }
+ if (!s->bpp || s->bpp > 32) {
+ av_log(avctx, AV_LOG_ERROR, "Invalid number of bitplanes: %u\n", s->bpp);
+ return AVERROR_INVALIDDATA;
+ } else if (s->ham >= 8) {
+ av_log(avctx, AV_LOG_ERROR, "Invalid number of hold bits for HAM: %u\n", s->ham);
+ return AVERROR_INVALIDDATA;
+ }
+
+ av_freep(&s->ham_buf);
+ av_freep(&s->ham_palbuf);
+
+ if (s->ham) {
+ int i, count = FFMIN(get_palette_size(avctx) / 3, 1 << s->ham);
+ const uint8_t *const extradata = get_palette_data(avctx);
+ s->ham_buf = av_malloc((s->planesize * 8) + FF_INPUT_BUFFER_PADDING_SIZE);
+ if (!s->ham_buf)
+ return AVERROR(ENOMEM);
+
+ s->ham_palbuf = av_malloc((8 * (1 << s->ham) * sizeof (uint32_t)) + FF_INPUT_BUFFER_PADDING_SIZE);
+ if (!s->ham_palbuf) {
+ av_freep(&s->ham_buf);
+ return AVERROR(ENOMEM);
+ }
+
+ if (count) { // HAM with color palette attached
+ // prefill with black and palette and set HAM take direct value mask to zero
+ memset(s->ham_palbuf, 0, (1 << s->ham) * 2 * sizeof (uint32_t));
+ for (i=0; i < count; i++) {
+ s->ham_palbuf[i*2+1] = AV_RL24(extradata + i*3);
+ }
+ count = 1 << s->ham;
+ } else { // HAM with grayscale color palette
+ count = 1 << s->ham;
+ for (i=0; i < count; i++) {
+ s->ham_palbuf[i*2] = 0; // take direct color value from palette
+ s->ham_palbuf[i*2+1] = av_le2ne32(gray2rgb((i * 255) >> s->ham));
+ }
+ }
+ for (i=0; i < count; i++) {
+ uint32_t tmp = i << (8 - s->ham);
+ tmp |= tmp >> s->ham;
+ s->ham_palbuf[(i+count)*2] = 0x00FFFF; // just modify blue color component
+ s->ham_palbuf[(i+count*2)*2] = 0xFFFF00; // just modify red color component
+ s->ham_palbuf[(i+count*3)*2] = 0xFF00FF; // just modify green color component
+ s->ham_palbuf[(i+count)*2+1] = tmp << 16;
+ s->ham_palbuf[(i+count*2)*2+1] = tmp;
+ s->ham_palbuf[(i+count*3)*2+1] = tmp << 8;
+ }
+ } else if (s->flags & 1) { // EHB (ExtraHalfBrite) color palette
+ av_log(avctx, AV_LOG_ERROR, "ExtraHalfBrite (EHB) mode not supported\n");
+ return AVERROR_PATCHWELCOME;
+ }
+ }
+
+ return 0;
+}
+
static av_cold int decode_init(AVCodecContext *avctx)
{
IffContext *s = avctx->priv_data;
int err;
if (avctx->bits_per_coded_sample <= 8) {
- avctx->pix_fmt = (avctx->bits_per_coded_sample < 8 ||
- avctx->extradata_size) ? PIX_FMT_PAL8
- : PIX_FMT_GRAY8;
+ avctx->pix_fmt = (avctx->bits_per_coded_sample < 8) ||
+ (avctx->extradata_size >= 2 && get_palette_size(avctx)) ? PIX_FMT_PAL8
+ : PIX_FMT_GRAY8;
} else if (avctx->bits_per_coded_sample <= 32) {
avctx->pix_fmt = PIX_FMT_BGR32;
} else {
@@ -167,6 +338,10 @@ static av_cold int decode_init(AVCodecContext *avctx)
if (!s->planebuf)
return AVERROR(ENOMEM);
+ s->bpp = avctx->bits_per_coded_sample;
+
+ if ((err = extract_header(avctx, NULL)) < 0)
+ return err;
s->frame.reference = 1;
return 0;
@@ -214,6 +389,39 @@ static void decodeplane32(uint32_t *dst, const uint8_t *buf, int buf_size, int p
} while (--buf_size);
}
+#define DECODE_HAM_PLANE32(x) \
+ first = buf[x] << 1; \
+ second = buf[(x)+1] << 1; \
+ delta &= pal[first++]; \
+ delta |= pal[first]; \
+ dst[x] = delta; \
+ delta &= pal[second++]; \
+ delta |= pal[second]; \
+ dst[(x)+1] = delta
+
+/**
+ * Converts one line of HAM6/8-encoded chunky buffer to 24bpp.
+ *
+ * @param dst the destination 24bpp buffer
+ * @param buf the source 8bpp chunky buffer
+ * @param pal the HAM decode table
+ * @param buf_size the plane size in bytes
+ */
+static void decode_ham_plane32(uint32_t *dst, const uint8_t *buf,
+ const uint32_t *const pal, unsigned buf_size)
+{
+ uint32_t delta = 0;
+ do {
+ uint32_t first, second;
+ DECODE_HAM_PLANE32(0);
+ DECODE_HAM_PLANE32(2);
+ DECODE_HAM_PLANE32(4);
+ DECODE_HAM_PLANE32(6);
+ buf += 8;
+ dst += 8;
+ } while (--buf_size);
+}
+
/**
* Decode one complete byterun1 encoded line.
*
@@ -250,11 +458,14 @@ static int decode_frame_ilbm(AVCodecContext *avctx,
AVPacket *avpkt)
{
IffContext *s = avctx->priv_data;
- const uint8_t *buf = avpkt->data;
- int buf_size = avpkt->size;
+ const uint8_t *buf = avpkt->size >= 2 ? get_image_data(avpkt) : NULL;
+ const int buf_size = avpkt->size >= 2 ? get_image_size(avpkt) : 0;
const uint8_t *buf_end = buf+buf_size;
int y, plane, res;
+ if ((res = extract_header(avctx, avpkt)) < 0)
+ return res;
+
if (s->init) {
if ((res = avctx->reget_buffer(avctx, &s->frame)) < 0) {
av_log(avctx, AV_LOG_ERROR, "reget_buffer() failed\n");
@@ -274,16 +485,26 @@ static int decode_frame_ilbm(AVCodecContext *avctx,
for(y = 0; y < avctx->height; y++ ) {
uint8_t *row = &s->frame.data[0][ y*s->frame.linesize[0] ];
memset(row, 0, avctx->width);
- for (plane = 0; plane < avctx->bits_per_coded_sample && buf < buf_end; plane++) {
+ for (plane = 0; plane < s->bpp && buf < buf_end; plane++) {
decodeplane8(row, buf, FFMIN(s->planesize, buf_end - buf), plane);
buf += s->planesize;
}
}
+ } else if (s->ham) { // HAM to PIX_FMT_BGR32
+ for (y = 0; y < avctx->height; y++) {
+ uint8_t *row = &s->frame.data[0][ y*s->frame.linesize[0] ];
+ memset(s->ham_buf, 0, avctx->width);
+ for (plane = 0; plane < s->bpp && buf < buf_end; plane++) {
+ decodeplane8(s->ham_buf, buf, FFMIN(s->planesize, buf_end - buf), plane);
+ buf += s->planesize;
+ }
+ decode_ham_plane32((uint32_t *) row, s->ham_buf, s->ham_palbuf, s->planesize);
+ }
} else { // PIX_FMT_BGR32
for(y = 0; y < avctx->height; y++ ) {
uint8_t *row = &s->frame.data[0][y*s->frame.linesize[0]];
memset(row, 0, avctx->width << 2);
- for (plane = 0; plane < avctx->bits_per_coded_sample && buf < buf_end; plane++) {
+ for (plane = 0; plane < s->bpp && buf < buf_end; plane++) {
decodeplane32((uint32_t *) row, buf, FFMIN(s->planesize, buf_end - buf), plane);
buf += s->planesize;
}
@@ -295,6 +516,13 @@ static int decode_frame_ilbm(AVCodecContext *avctx,
memcpy(row, buf, FFMIN(avctx->width, buf_end - buf));
buf += avctx->width + (avctx->width % 2); // padding if odd
}
+ } else { // IFF-PBM: HAM to PIX_FMT_BGR32
+ for (y = 0; y < avctx->height; y++) {
+ uint8_t *row = &s->frame.data[0][ y*s->frame.linesize[0] ];
+ memcpy(s->ham_buf, buf, FFMIN(avctx->width, buf_end - buf));
+ buf += avctx->width + (avctx->width & 1); // padding if odd
+ decode_ham_plane32((uint32_t *) row, s->ham_buf, s->ham_palbuf, avctx->width);
+ }
}
*data_size = sizeof(AVFrame);
@@ -307,11 +535,13 @@ static int decode_frame_byterun1(AVCodecContext *avctx,
AVPacket *avpkt)
{
IffContext *s = avctx->priv_data;
- const uint8_t *buf = avpkt->data;
- int buf_size = avpkt->size;
+ const uint8_t *buf = avpkt->size >= 2 ? get_image_data(avpkt) : NULL;
+ const int buf_size = avpkt->size >= 2 ? get_image_size(avpkt) : 0;
const uint8_t *buf_end = buf+buf_size;
int y, plane, res;
+ if ((res = extract_header(avctx, avpkt)) < 0)
+ return res;
if (s->init) {
if ((res = avctx->reget_buffer(avctx, &s->frame)) < 0) {
av_log(avctx, AV_LOG_ERROR, "reget_buffer() failed\n");
@@ -331,26 +561,42 @@ static int decode_frame_byterun1(AVCodecContext *avctx,
for(y = 0; y < avctx->height ; y++ ) {
uint8_t *row = &s->frame.data[0][ y*s->frame.linesize[0] ];
memset(row, 0, avctx->width);
- for (plane = 0; plane < avctx->bits_per_coded_sample; plane++) {
+ for (plane = 0; plane < s->bpp; plane++) {
buf += decode_byterun(s->planebuf, s->planesize, buf, buf_end);
decodeplane8(row, s->planebuf, s->planesize, plane);
}
}
+ } else if (s->ham) { // HAM to PIX_FMT_BGR32
+ for (y = 0; y < avctx->height ; y++) {
+ uint8_t *row = &s->frame.data[0][y*s->frame.linesize[0]];
+ memset(s->ham_buf, 0, avctx->width);
+ for (plane = 0; plane < s->bpp; plane++) {
+ buf += decode_byterun(s->planebuf, s->planesize, buf, buf_end);
+ decodeplane8(s->ham_buf, s->planebuf, s->planesize, plane);
+ }
+ decode_ham_plane32((uint32_t *) row, s->ham_buf, s->ham_palbuf, s->planesize);
+ }
} else { //PIX_FMT_BGR32
for(y = 0; y < avctx->height ; y++ ) {
uint8_t *row = &s->frame.data[0][y*s->frame.linesize[0]];
memset(row, 0, avctx->width << 2);
- for (plane = 0; plane < avctx->bits_per_coded_sample; plane++) {
+ for (plane = 0; plane < s->bpp; plane++) {
buf += decode_byterun(s->planebuf, s->planesize, buf, buf_end);
decodeplane32((uint32_t *) row, s->planebuf, s->planesize, plane);
}
}
}
- } else {
+ } else if (avctx->pix_fmt == PIX_FMT_PAL8 || avctx->pix_fmt == PIX_FMT_GRAY8) { // IFF-PBM
for(y = 0; y < avctx->height ; y++ ) {
uint8_t *row = &s->frame.data[0][y*s->frame.linesize[0]];
buf += decode_byterun(row, avctx->width, buf, buf_end);
}
+ } else { // IFF-PBM: HAM to PIX_FMT_BGR32
+ for (y = 0; y < avctx->height ; y++) {
+ uint8_t *row = &s->frame.data[0][y*s->frame.linesize[0]];
+ buf += decode_byterun(s->ham_buf, avctx->width, buf, buf_end);
+ decode_ham_plane32((uint32_t *) row, s->ham_buf, s->ham_palbuf, avctx->width);
+ }
}
*data_size = sizeof(AVFrame);
@@ -364,6 +610,8 @@ static av_cold int decode_end(AVCodecContext *avctx)
if (s->frame.data[0])
avctx->release_buffer(avctx, &s->frame);
av_freep(&s->planebuf);
+ av_freep(&s->ham_buf);
+ av_freep(&s->ham_palbuf);
return 0;
}
diff --git a/libavcodec/libcelt_dec.c b/libavcodec/libcelt_dec.c
new file mode 100644
index 0000000000..6f3965401c
--- /dev/null
+++ b/libavcodec/libcelt_dec.c
@@ -0,0 +1,136 @@
+/*
+ * Xiph CELT / Opus decoder using libcelt
+ * Copyright (c) 2011 Nicolas George
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <celt/celt.h>
+#include <celt/celt_header.h>
+#include "avcodec.h"
+#include "libavutil/intreadwrite.h"
+
+struct libcelt_context {
+ CELTMode *mode;
+ CELTDecoder *dec;
+ int frame_bytes;
+ int discard;
+};
+
+static int ff_celt_error_to_averror(int err)
+{
+ switch(err) {
+ case CELT_BAD_ARG: return AVERROR(EINVAL);
+#ifdef CELT_BUFFER_TOO_SMALL
+ case CELT_BUFFER_TOO_SMALL: return AVERROR(ENOBUFS);
+#endif
+ case CELT_INTERNAL_ERROR: return AVERROR(EFAULT);
+ case CELT_CORRUPTED_DATA: return AVERROR_INVALIDDATA;
+ case CELT_UNIMPLEMENTED: return AVERROR(ENOTSUP);
+#ifdef ENOTRECOVERABLE
+ case CELT_INVALID_STATE: return AVERROR(ENOTRECOVERABLE);
+#endif
+ case CELT_ALLOC_FAIL: return AVERROR(ENOMEM);
+ default: return AVERROR(EINVAL);
+ }
+}
+
+static int ff_celt_bitstream_version_hack(CELTMode *mode)
+{
+ CELTHeader header = { .version_id = 0 };
+ celt_header_init(&header, mode, 960, 2);
+ return header.version_id;
+}
+
+static av_cold int libcelt_dec_init(AVCodecContext *c)
+{
+ struct libcelt_context *celt = c->priv_data;
+ int err;
+
+ if (!c->channels || !c->frame_size ||
+ c->frame_size > INT_MAX / sizeof(int16_t) / c->channels)
+ return AVERROR(EINVAL);
+ celt->frame_bytes = c->frame_size * c->channels * sizeof(int16_t);
+ celt->mode = celt_mode_create(c->sample_rate, c->frame_size, &err);
+ if (!celt->mode)
+ return ff_celt_error_to_averror(err);
+ celt->dec = celt_decoder_create_custom(celt->mode, c->channels, &err);
+ if (!celt->dec) {
+ celt_mode_destroy(celt->mode);
+ return ff_celt_error_to_averror(err);
+ }
+ if (c->extradata_size >= 4) {
+ celt->discard = AV_RL32(c->extradata);
+ if (celt->discard < 0 || celt->discard >= c->frame_size) {
+ av_log(c, AV_LOG_WARNING,
+ "Invalid overlap (%d), ignored.\n", celt->discard);
+ celt->discard = 0;
+ }
+ celt->discard *= c->channels * sizeof(int16_t);
+ }
+ if(c->extradata_size >= 8) {
+ unsigned version = AV_RL32(c->extradata + 4);
+ unsigned lib_version = ff_celt_bitstream_version_hack(celt->mode);
+ if (version != lib_version)
+ av_log(c, AV_LOG_WARNING,
+ "CELT bitstream version 0x%x may be "
+ "improperly decoded by libcelt for version 0x%x.\n",
+ version, lib_version);
+ }
+ return 0;
+}
+
+static av_cold int libcelt_dec_close(AVCodecContext *c)
+{
+ struct libcelt_context *celt = c->priv_data;
+
+ celt_decoder_destroy(celt->dec);
+ celt_mode_destroy(celt->mode);
+ return 0;
+}
+
+static int libcelt_dec_decode(AVCodecContext *c, void *pcm, int *pcm_size,
+ AVPacket *pkt)
+{
+ struct libcelt_context *celt = c->priv_data;
+ int err;
+
+ if (*pcm_size < celt->frame_bytes)
+ return AVERROR(ENOBUFS);
+ err = celt_decode(celt->dec, pkt->data, pkt->size, pcm, c->frame_size);
+ if (err < 0)
+ return ff_celt_error_to_averror(err);
+ *pcm_size = celt->frame_bytes;
+ if (celt->discard) {
+ *pcm_size = celt->frame_bytes - celt->discard;
+ memmove(pcm, (char *)pcm + celt->discard, *pcm_size);
+ celt->discard = 0;
+ }
+ return pkt->size;
+}
+
+AVCodec ff_libcelt_decoder = {
+ .name = "libcelt",
+ .type = AVMEDIA_TYPE_AUDIO,
+ .id = CODEC_ID_CELT,
+ .priv_data_size = sizeof(struct libcelt_context),
+ .init = libcelt_dec_init,
+ .close = libcelt_dec_close,
+ .decode = libcelt_dec_decode,
+ .capabilities = 0,
+ .long_name = NULL_IF_CONFIG_SMALL("Xiph CELT/Opus decoder using libcelt"),
+};
diff --git a/libavcodec/libmp3lame.c b/libavcodec/libmp3lame.c
index db0bc8259c..5cfb122903 100644
--- a/libavcodec/libmp3lame.c
+++ b/libavcodec/libmp3lame.c
@@ -34,6 +34,10 @@ typedef struct Mp3AudioContext {
int stereo;
uint8_t buffer[BUFFER_SIZE];
int buffer_index;
+ struct {
+ int *left;
+ int *right;
+ } s32_data;
} Mp3AudioContext;
static av_cold int MP3lame_encode_init(AVCodecContext *avctx)
@@ -69,9 +73,26 @@ static av_cold int MP3lame_encode_init(AVCodecContext *avctx)
avctx->frame_size = lame_get_framesize(s->gfp);
- avctx->coded_frame= avcodec_alloc_frame();
+ if(!(avctx->coded_frame= avcodec_alloc_frame())) {
+ lame_close(s->gfp);
+
+ return AVERROR(ENOMEM);
+ }
avctx->coded_frame->key_frame= 1;
+ if(AV_SAMPLE_FMT_S32 == avctx->sample_fmt && s->stereo) {
+ int nelem = 2 * avctx->frame_size;
+
+ if(! (s->s32_data.left = av_malloc(nelem * sizeof(int)))) {
+ av_freep(&avctx->coded_frame);
+ lame_close(s->gfp);
+
+ return AVERROR(ENOMEM);
+ }
+
+ s->s32_data.right = s->s32_data.left + avctx->frame_size;
+ }
+
return 0;
err_close:
@@ -146,17 +167,35 @@ static int MP3lame_encode_frame(AVCodecContext *avctx,
/* lame 3.91 dies on '1-channel interleaved' data */
- if(data){
+ if(!data){
+ lame_result= lame_encode_flush(
+ s->gfp,
+ s->buffer + s->buffer_index,
+ BUFFER_SIZE - s->buffer_index
+ );
+#if 2147483647 == INT_MAX
+ }else if(AV_SAMPLE_FMT_S32 == avctx->sample_fmt){
if (s->stereo) {
- lame_result = lame_encode_buffer_interleaved(
+ int32_t *rp = data;
+ int32_t *mp = rp + 2*avctx->frame_size;
+ int *wpl = s->s32_data.left;
+ int *wpr = s->s32_data.right;
+
+ while (rp < mp) {
+ *wpl++ = *rp++;
+ *wpr++ = *rp++;
+ }
+
+ lame_result = lame_encode_buffer_int(
s->gfp,
- data,
+ s->s32_data.left,
+ s->s32_data.right,
avctx->frame_size,
s->buffer + s->buffer_index,
BUFFER_SIZE - s->buffer_index
);
} else {
- lame_result = lame_encode_buffer(
+ lame_result = lame_encode_buffer_int(
s->gfp,
data,
data,
@@ -165,12 +204,26 @@ static int MP3lame_encode_frame(AVCodecContext *avctx,
BUFFER_SIZE - s->buffer_index
);
}
+#endif
}else{
- lame_result= lame_encode_flush(
+ if (s->stereo) {
+ lame_result = lame_encode_buffer_interleaved(
+ s->gfp,
+ data,
+ avctx->frame_size,
+ s->buffer + s->buffer_index,
+ BUFFER_SIZE - s->buffer_index
+ );
+ } else {
+ lame_result = lame_encode_buffer(
s->gfp,
+ data,
+ data,
+ avctx->frame_size,
s->buffer + s->buffer_index,
BUFFER_SIZE - s->buffer_index
);
+ }
}
if(lame_result < 0){
@@ -206,6 +259,7 @@ static av_cold int MP3lame_encode_close(AVCodecContext *avctx)
{
Mp3AudioContext *s = avctx->priv_data;
+ av_freep(&s->s32_data.left);
av_freep(&avctx->coded_frame);
lame_close(s->gfp);
@@ -222,7 +276,11 @@ AVCodec ff_libmp3lame_encoder = {
MP3lame_encode_frame,
MP3lame_encode_close,
.capabilities= CODEC_CAP_DELAY,
- .sample_fmts = (const enum AVSampleFormat[]){AV_SAMPLE_FMT_S16,AV_SAMPLE_FMT_NONE},
+ .sample_fmts = (const enum AVSampleFormat[]){AV_SAMPLE_FMT_S16,
+#if 2147483647 == INT_MAX
+ AV_SAMPLE_FMT_S32,
+#endif
+ AV_SAMPLE_FMT_NONE},
.supported_samplerates= sSampleRates,
.long_name= NULL_IF_CONFIG_SMALL("libmp3lame MP3 (MPEG audio layer 3)"),
};
diff --git a/libavcodec/libvo-aacenc.c b/libavcodec/libvo-aacenc.c
index 3475a5cfe6..0efb79b1c1 100644
--- a/libavcodec/libvo-aacenc.c
+++ b/libavcodec/libvo-aacenc.c
@@ -59,7 +59,7 @@ static av_cold int aac_encode_init(AVCodecContext *avctx)
if (s->codec_api.SetParam(s->handle, VO_PID_AAC_ENCPARAM, &params)
!= VO_ERR_NONE) {
av_log(avctx, AV_LOG_ERROR, "Unable to set encoding parameters\n");
- return AVERROR_UNKNOWN;
+ return AVERROR(EINVAL);
}
for (index = 0; index < 16; index++)
@@ -68,7 +68,7 @@ static av_cold int aac_encode_init(AVCodecContext *avctx)
if (index == 16) {
av_log(avctx, AV_LOG_ERROR, "Unsupported sample rate %d\n",
avctx->sample_rate);
- return AVERROR_NOTSUPP;
+ return AVERROR(ENOSYS);
}
if (avctx->flags & CODEC_FLAG_GLOBAL_HEADER) {
avctx->extradata_size = 2;
@@ -110,14 +110,14 @@ static int aac_encode_frame(AVCodecContext *avctx,
if (s->codec_api.GetOutputData(s->handle, &output, &output_info)
!= VO_ERR_NONE) {
av_log(avctx, AV_LOG_ERROR, "Unable to encode frame\n");
- return AVERROR_UNKNOWN;
+ return AVERROR(EINVAL);
}
return output.Length;
}
AVCodec ff_libvo_aacenc_encoder = {
"libvo_aacenc",
- CODEC_TYPE_AUDIO,
+ AVMEDIA_TYPE_AUDIO,
CODEC_ID_AAC,
sizeof(AACContext),
aac_encode_init,
diff --git a/libavcodec/libvo-amrwbenc.c b/libavcodec/libvo-amrwbenc.c
index 7b9e66a247..ec23aeb7f8 100644
--- a/libavcodec/libvo-amrwbenc.c
+++ b/libavcodec/libvo-amrwbenc.c
@@ -119,7 +119,7 @@ static int amr_wb_encode_frame(AVCodecContext *avctx,
AVCodec ff_libvo_amrwbenc_encoder = {
"libvo_amrwbenc",
- CODEC_TYPE_AUDIO,
+ AVMEDIA_TYPE_AUDIO,
CODEC_ID_AMR_WB,
sizeof(AMRWBContext),
amr_wb_encode_init,
diff --git a/libavcodec/libx264.c b/libavcodec/libx264.c
index db57eead78..1b4e88b75b 100644
--- a/libavcodec/libx264.c
+++ b/libavcodec/libx264.c
@@ -38,7 +38,9 @@ typedef struct X264Context {
const char *preset;
const char *tune;
const char *profile;
+ const char *level;
int fastfirstpass;
+ const char *stats;
} X264Context;
static void X264_log(void *p, int level, const char *fmt, va_list args)
@@ -144,7 +146,8 @@ static int X264_frame(AVCodecContext *ctx, uint8_t *buf,
}
x4->out_pic.key_frame = pic_out.b_keyframe;
- x4->out_pic.quality = (pic_out.i_qpplus1 - 1) * FF_QP2LAMBDA;
+ if (bufsize)
+ x4->out_pic.quality = (pic_out.i_qpplus1 - 1) * FF_QP2LAMBDA;
return bufsize;
}
@@ -162,6 +165,40 @@ static av_cold int X264_close(AVCodecContext *avctx)
return 0;
}
+/**
+ * Detect default settings and use default profile to avoid libx264 failure.
+ */
+static void check_default_settings(AVCodecContext *avctx)
+{
+ X264Context *x4 = avctx->priv_data;
+
+ int score = 0;
+ score += x4->params.analyse.i_me_range == 0;
+ score += x4->params.rc.i_qp_step == 3;
+ score += x4->params.i_keyint_max == 12;
+ score += x4->params.rc.i_qp_min == 2;
+ score += x4->params.rc.i_qp_max == 31;
+ score += x4->params.rc.f_qcompress == 0.5;
+ score += fabs(x4->params.rc.f_ip_factor - 1.25) < 0.01;
+ score += fabs(x4->params.rc.f_pb_factor - 1.25) < 0.01;
+ score += x4->params.analyse.inter == 0 && x4->params.analyse.i_subpel_refine == 8;
+ if (score >= 5) {
+ av_log(avctx, AV_LOG_ERROR, "Default settings detected, using medium profile\n");
+ x4->preset = "medium";
+ if (avctx->bit_rate == 200*100)
+ avctx->crf = 23;
+ }
+}
+
+#define OPT_STR(opt, param) \
+ do { \
+ if (param && x264_param_parse(&x4->params, opt, param) < 0) { \
+ av_log(avctx, AV_LOG_ERROR, \
+ "bad value for '%s': '%s'\n", opt, param); \
+ return -1; \
+ } \
+ } while (0); \
+
static av_cold int X264_init(AVCodecContext *avctx)
{
X264Context *x4 = avctx->priv_data;
@@ -248,26 +285,19 @@ static av_cold int X264_init(AVCodecContext *avctx)
x4->params.analyse.i_trellis = avctx->trellis;
x4->params.analyse.i_noise_reduction = avctx->noise_reduction;
- if (avctx->level > 0)
- x4->params.i_level_idc = avctx->level;
-
x4->params.rc.b_mb_tree = !!(avctx->flags2 & CODEC_FLAG2_MBTREE);
x4->params.rc.f_ip_factor = 1 / fabs(avctx->i_quant_factor);
x4->params.rc.f_pb_factor = avctx->b_quant_factor;
x4->params.analyse.i_chroma_qp_offset = avctx->chromaoffset;
+ if (!x4->preset)
+ check_default_settings(avctx);
+
if (x4->preset || x4->tune) {
if (x264_param_default_preset(&x4->params, x4->preset, x4->tune) < 0)
return -1;
}
- if (x4->fastfirstpass)
- x264_param_apply_fastfirstpass(&x4->params);
-
- if (x4->profile)
- if (x264_param_apply_profile(&x4->params, x4->profile) < 0)
- return -1;
-
x4->params.pf_log = X264_log;
x4->params.p_log_private = avctx;
x4->params.i_log_level = X264_LOG_DEBUG;
@@ -290,6 +320,8 @@ static av_cold int X264_init(AVCodecContext *avctx)
}
}
+ OPT_STR("stats", x4->stats);
+
// if neither crf nor cqp modes are selected we have to enable the RC
// we do it this way because we cannot check if the bitrate has been set
if (!(avctx->crf || (avctx->cqp > -1)))
@@ -301,6 +333,15 @@ static av_cold int X264_init(AVCodecContext *avctx)
(float)avctx->rc_initial_buffer_occupancy / avctx->rc_buffer_size;
}
+ OPT_STR("level", x4->level);
+
+ if (x4->fastfirstpass)
+ x264_param_apply_fastfirstpass(&x4->params);
+
+ if (x4->profile)
+ if (x264_param_apply_profile(&x4->params, x4->profile) < 0)
+ return -1;
+
x4->params.i_width = avctx->width;
x4->params.i_height = avctx->height;
x4->params.vui.i_sar_width = avctx->sample_aspect_ratio.num;
@@ -325,7 +366,8 @@ static av_cold int X264_init(AVCodecContext *avctx)
x4->params.b_repeat_headers = 0;
// update AVCodecContext with x264 parameters
- avctx->has_b_frames = x4->params.i_bframe_pyramid ? 2 : !!x4->params.i_bframe;
+ avctx->has_b_frames = x4->params.i_bframe ?
+ x4->params.i_bframe_pyramid ? 2 : 1 : 0;
avctx->bit_rate = x4->params.rc.i_bitrate*1000;
avctx->crf = x4->params.rc.f_rf_constant;
@@ -360,6 +402,8 @@ static const AVOption options[] = {
{"tune", "Tune the encoding params", OFFSET(tune), FF_OPT_TYPE_STRING, 0, 0, 0, VE},
{"fastfirstpass", "Use fast settings when encoding first pass", OFFSET(fastfirstpass), FF_OPT_TYPE_INT, 1, 0, 1, VE},
{"profile", "Set profile restrictions", OFFSET(profile), FF_OPT_TYPE_STRING, 0, 0, 0, VE},
+ {"level", "Specify level (as defined by Annex A)", OFFSET(level), FF_OPT_TYPE_STRING, 0, 0, 0, VE},
+ {"passlogfile", "Filename for 2 pass stats", OFFSET(stats), FF_OPT_TYPE_STRING, 0, 0, 0, VE},
{ NULL },
};
diff --git a/libavcodec/loco.c b/libavcodec/loco.c
index 48fa829914..8f2b8ff325 100644
--- a/libavcodec/loco.c
+++ b/libavcodec/loco.c
@@ -248,7 +248,7 @@ static av_cold int decode_init(AVCodecContext *avctx){
break;
default:
l->lossy = AV_RL32(avctx->extradata + 8);
- av_log(avctx, AV_LOG_INFO, "This is LOCO codec version %i, please upload file for study\n", version);
+ av_log_ask_for_sample(avctx, "This is LOCO codec version %i.\n", version);
}
l->mode = AV_RL32(avctx->extradata + 4);
diff --git a/libavcodec/mjpegdec.c b/libavcodec/mjpegdec.c
index d0fc524471..2bdd2a814b 100644
--- a/libavcodec/mjpegdec.c
+++ b/libavcodec/mjpegdec.c
@@ -797,6 +797,10 @@ static int mjpeg_decode_scan(MJpegDecodeContext *s, int nb_components, int Ah, i
if (s->restart_interval && !s->restart_count)
s->restart_count = s->restart_interval;
+ if(get_bits_count(&s->gb)>s->gb.size_in_bits){
+ av_log(s->avctx, AV_LOG_ERROR, "overread %d\n", get_bits_count(&s->gb) - s->gb.size_in_bits);
+ return -1;
+ }
for(i=0;i<nb_components;i++) {
uint8_t *ptr;
int n, h, v, x, y, c, j;
diff --git a/libavcodec/mlp_parser.c b/libavcodec/mlp_parser.c
index 5f0b49a0af..3b87f432ad 100644
--- a/libavcodec/mlp_parser.c
+++ b/libavcodec/mlp_parser.c
@@ -43,7 +43,7 @@ static const uint8_t mlp_channels[32] = {
5, 6, 5, 5, 6, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
};
-static const uint64_t mlp_layout[32] = {
+const uint64_t ff_mlp_layout[32] = {
AV_CH_LAYOUT_MONO,
AV_CH_LAYOUT_STEREO,
AV_CH_LAYOUT_2_1,
@@ -107,7 +107,7 @@ static int truehd_channels(int chanmap)
return channels;
}
-static int64_t truehd_layout(int chanmap)
+int64_t ff_truehd_layout(int chanmap)
{
int layout = 0, i;
@@ -316,19 +316,15 @@ static int mlp_parse(AVCodecParserContext *s,
if (mh.stream_type == 0xbb) {
/* MLP stream */
avctx->channels = mlp_channels[mh.channels_mlp];
- avctx->channel_layout = mlp_layout[mh.channels_mlp];
+ avctx->channel_layout = ff_mlp_layout[mh.channels_mlp];
} else { /* mh.stream_type == 0xba */
/* TrueHD stream */
if (mh.channels_thd_stream2) {
avctx->channels = truehd_channels(mh.channels_thd_stream2);
- avctx->channel_layout = truehd_layout(mh.channels_thd_stream2);
+ avctx->channel_layout = ff_truehd_layout(mh.channels_thd_stream2);
} else {
avctx->channels = truehd_channels(mh.channels_thd_stream1);
- avctx->channel_layout = truehd_layout(mh.channels_thd_stream1);
- }
- if (av_get_channel_layout_nb_channels(avctx->channel_layout) != avctx->channels) {
- avctx->channel_layout = 0;
- av_log_ask_for_sample(avctx, "Unknown channel layout.");
+ avctx->channel_layout = ff_truehd_layout(mh.channels_thd_stream1);
}
}
diff --git a/libavcodec/mlp_parser.h b/libavcodec/mlp_parser.h
index d7ce2b8311..6e43bc38dc 100644
--- a/libavcodec/mlp_parser.h
+++ b/libavcodec/mlp_parser.h
@@ -54,6 +54,9 @@ typedef struct MLPHeaderInfo
int ff_mlp_read_major_sync(void *log, MLPHeaderInfo *mh, GetBitContext *gb);
+int64_t ff_truehd_layout(int chanmap);
+
+extern const uint64_t ff_mlp_layout[32];
#endif /* AVCODEC_MLP_PARSER_H */
diff --git a/libavcodec/mlpdec.c b/libavcodec/mlpdec.c
index 044df5bc6a..7b3bd710bb 100644
--- a/libavcodec/mlpdec.c
+++ b/libavcodec/mlpdec.c
@@ -329,6 +329,23 @@ static int read_major_sync(MLPDecodeContext *m, GetBitContext *gb)
for (substr = 0; substr < MAX_SUBSTREAMS; substr++)
m->substream[substr].restart_seen = 0;
+ if (mh.stream_type == 0xbb) {
+ /* MLP stream */
+ m->avctx->channel_layout = ff_mlp_layout[mh.channels_mlp];
+ } else { /* mh.stream_type == 0xba */
+ /* TrueHD stream */
+ if (mh.channels_thd_stream2) {
+ m->avctx->channel_layout = ff_truehd_layout(mh.channels_thd_stream2);
+ } else {
+ m->avctx->channel_layout = ff_truehd_layout(mh.channels_thd_stream1);
+ }
+ if (m->avctx->channels &&
+ av_get_channel_layout_nb_channels(m->avctx->channel_layout) != m->avctx->channels) {
+ m->avctx->channel_layout = 0;
+ av_log_ask_for_sample(m->avctx, "Unknown channel layout.");
+ }
+ }
+
m->needs_reordering = mh.channels_mlp >= 18 && mh.channels_mlp <= 20;
return 0;
diff --git a/libavcodec/mpeg12.c b/libavcodec/mpeg12.c
index 9afe8c3385..192ecdd478 100644
--- a/libavcodec/mpeg12.c
+++ b/libavcodec/mpeg12.c
@@ -2602,7 +2602,7 @@ AVCodec ff_mpeg2video_decoder = {
NULL,
mpeg_decode_end,
mpeg_decode_frame,
- CODEC_CAP_DRAW_HORIZ_BAND | CODEC_CAP_DR1 | CODEC_CAP_TRUNCATED | CODEC_CAP_DELAY,
+ CODEC_CAP_DRAW_HORIZ_BAND | CODEC_CAP_DR1 | CODEC_CAP_TRUNCATED | CODEC_CAP_DELAY | CODEC_CAP_SLICE_THREADS,
.flush= flush,
.max_lowres= 3,
.long_name= NULL_IF_CONFIG_SMALL("MPEG-2 video"),
@@ -2619,7 +2619,7 @@ AVCodec ff_mpegvideo_decoder = {
NULL,
mpeg_decode_end,
mpeg_decode_frame,
- CODEC_CAP_DRAW_HORIZ_BAND | CODEC_CAP_DR1 | CODEC_CAP_TRUNCATED | CODEC_CAP_DELAY,
+ CODEC_CAP_DRAW_HORIZ_BAND | CODEC_CAP_DR1 | CODEC_CAP_TRUNCATED | CODEC_CAP_DELAY | CODEC_CAP_SLICE_THREADS,
.flush= flush,
.max_lowres= 3,
.long_name= NULL_IF_CONFIG_SMALL("MPEG-1 video"),
diff --git a/libavcodec/mpeg12enc.c b/libavcodec/mpeg12enc.c
index b2526f6b7a..cc1d9c8230 100644
--- a/libavcodec/mpeg12enc.c
+++ b/libavcodec/mpeg12enc.c
@@ -940,7 +940,7 @@ AVCodec ff_mpeg1video_encoder = {
MPV_encode_end,
.supported_framerates= ff_frame_rate_tab+1,
.pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_NONE},
- .capabilities= CODEC_CAP_DELAY,
+ .capabilities= CODEC_CAP_DELAY | CODEC_CAP_SLICE_THREADS,
.long_name= NULL_IF_CONFIG_SMALL("MPEG-1 video"),
};
@@ -954,6 +954,6 @@ AVCodec ff_mpeg2video_encoder = {
MPV_encode_end,
.supported_framerates= ff_frame_rate_tab+1,
.pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_YUV422P, PIX_FMT_NONE},
- .capabilities= CODEC_CAP_DELAY,
+ .capabilities= CODEC_CAP_DELAY | CODEC_CAP_SLICE_THREADS,
.long_name= NULL_IF_CONFIG_SMALL("MPEG-2 video"),
};
diff --git a/libavcodec/mpeg4videoenc.c b/libavcodec/mpeg4videoenc.c
index d6834a6e40..f6a18b77cc 100644
--- a/libavcodec/mpeg4videoenc.c
+++ b/libavcodec/mpeg4videoenc.c
@@ -1359,6 +1359,6 @@ AVCodec ff_mpeg4_encoder = {
MPV_encode_picture,
MPV_encode_end,
.pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_NONE},
- .capabilities= CODEC_CAP_DELAY,
+ .capabilities= CODEC_CAP_DELAY | CODEC_CAP_SLICE_THREADS,
.long_name= NULL_IF_CONFIG_SMALL("MPEG-4 part 2"),
};
diff --git a/libavcodec/mpegvideo_enc.c b/libavcodec/mpegvideo_enc.c
index 9255fa872a..7760ee58b6 100644
--- a/libavcodec/mpegvideo_enc.c
+++ b/libavcodec/mpegvideo_enc.c
@@ -3802,6 +3802,7 @@ AVCodec ff_h263p_encoder = {
MPV_encode_init,
MPV_encode_picture,
MPV_encode_end,
+ .capabilities = CODEC_CAP_SLICE_THREADS,
.pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_NONE},
.long_name= NULL_IF_CONFIG_SMALL("H.263+ / H.263-1998 / H.263 version 2"),
};
diff --git a/libavcodec/opt.c b/libavcodec/opt.c
deleted file mode 100644
index ffa422ec93..0000000000
--- a/libavcodec/opt.c
+++ /dev/null
@@ -1,89 +0,0 @@
-/*
- * AVOptions ABI compatibility wrapper
- * Copyright (c) 2010 Michael Niedermayer <michaelni@gmx.at>
- *
- * This file is part of FFmpeg.
- *
- * FFmpeg is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2.1 of the License, or (at your option) any later version.
- *
- * FFmpeg is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with FFmpeg; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
- */
-
-#include "avcodec.h"
-#include "opt.h"
-
-#if LIBAVCODEC_VERSION_MAJOR < 53 && CONFIG_SHARED && HAVE_SYMVER
-
-FF_SYMVER(const AVOption *, av_find_opt, (void *obj, const char *name, const char *unit, int mask, int flags), "LIBAVCODEC_52"){
- return av_find_opt(obj, name, unit, mask, flags);
-}
-FF_SYMVER(int, av_set_string3, (void *obj, const char *name, const char *val, int alloc, const AVOption **o_out), "LIBAVCODEC_52"){
- return av_set_string3(obj, name, val, alloc, o_out);
-}
-FF_SYMVER(const AVOption *, av_set_double, (void *obj, const char *name, double n), "LIBAVCODEC_52"){
- return av_set_double(obj, name, n);
-}
-FF_SYMVER(const AVOption *, av_set_q, (void *obj, const char *name, AVRational n), "LIBAVCODEC_52"){
- return av_set_q(obj, name, n);
-}
-FF_SYMVER(const AVOption *, av_set_int, (void *obj, const char *name, int64_t n), "LIBAVCODEC_52"){
- return av_set_int(obj, name, n);
-}
-FF_SYMVER(double, av_get_double, (void *obj, const char *name, const AVOption **o_out), "LIBAVCODEC_52"){
- return av_get_double(obj, name, o_out);
-}
-FF_SYMVER(AVRational, av_get_q, (void *obj, const char *name, const AVOption **o_out), "LIBAVCODEC_52"){
- return av_get_q(obj, name, o_out);
-}
-FF_SYMVER(int64_t, av_get_int, (void *obj, const char *name, const AVOption **o_out), "LIBAVCODEC_52"){
- return av_get_int(obj, name, o_out);
-}
-FF_SYMVER(const char *, av_get_string, (void *obj, const char *name, const AVOption **o_out, char *buf, int buf_len), "LIBAVCODEC_52"){
- return av_get_string(obj, name, o_out, buf, buf_len);
-}
-FF_SYMVER(const AVOption *, av_next_option, (void *obj, const AVOption *last), "LIBAVCODEC_52"){
- return av_next_option(obj, last);
-}
-FF_SYMVER(int, av_opt_show2, (void *obj, void *av_log_obj, int req_flags, int rej_flags), "LIBAVCODEC_52"){
- return av_opt_show2(obj, av_log_obj, req_flags, rej_flags);
-}
-FF_SYMVER(void, av_opt_set_defaults, (void *s), "LIBAVCODEC_52"){
- return av_opt_set_defaults(s);
-}
-FF_SYMVER(void, av_opt_set_defaults2, (void *s, int mask, int flags), "LIBAVCODEC_52"){
- return av_opt_set_defaults2(s, mask, flags);
-}
-#endif
-
-#if FF_API_SET_STRING_OLD
-const AVOption *av_set_string2(void *obj, const char *name, const char *val, int alloc){
- const AVOption *o;
- if (av_set_string3(obj, name, val, alloc, &o) < 0)
- return NULL;
- return o;
-}
-
-const AVOption *av_set_string(void *obj, const char *name, const char *val){
- const AVOption *o;
- if (av_set_string3(obj, name, val, 0, &o) < 0)
- return NULL;
- return o;
-}
-#endif
-
-#if FF_API_OPT_SHOW
-int av_opt_show(void *obj, void *av_log_obj){
- return av_opt_show2(obj, av_log_obj,
- AV_OPT_FLAG_ENCODING_PARAM|AV_OPT_FLAG_DECODING_PARAM, 0);
-}
-#endif
diff --git a/libavcodec/pcm.c b/libavcodec/pcm.c
index cdc11f80d0..111ce6193f 100644
--- a/libavcodec/pcm.c
+++ b/libavcodec/pcm.c
@@ -71,7 +71,7 @@ static av_cold int pcm_encode_close(AVCodecContext *avctx)
* @param offset Sample value offset
*/
#define ENCODE(type, endian, src, dst, n, shift, offset) \
- samples_##type = (type*)src; \
+ samples_##type = (const type*) src; \
for(;n>0;n--) { \
register type v = (*samples_##type++ >> shift) + offset; \
bytestream_put_##endian(&dst, v); \
diff --git a/libavcodec/pthread.c b/libavcodec/pthread.c
index 82b0732592..e0eea4b1f1 100644
--- a/libavcodec/pthread.c
+++ b/libavcodec/pthread.c
@@ -877,7 +877,8 @@ static void validate_thread_parameters(AVCodecContext *avctx)
avctx->active_thread_type = 0;
} else if (frame_threading_supported && (avctx->thread_type & FF_THREAD_FRAME)) {
avctx->active_thread_type = FF_THREAD_FRAME;
- } else if (avctx->thread_type & FF_THREAD_SLICE) {
+ } else if (avctx->codec->capabilities & CODEC_CAP_SLICE_THREADS &&
+ avctx->thread_type & FF_THREAD_SLICE) {
avctx->active_thread_type = FF_THREAD_SLICE;
}
}
diff --git a/libavcodec/raw.c b/libavcodec/raw.c
index 26b4df6d57..e607148a2b 100644
--- a/libavcodec/raw.c
+++ b/libavcodec/raw.c
@@ -61,6 +61,7 @@ const PixelFormatTag ff_raw_pix_fmt_tags[] = {
{ PIX_FMT_UYVY422, MKTAG('A', 'V', '1', 'x') }, /* Avid 1:1x */
{ PIX_FMT_UYVY422, MKTAG('A', 'V', 'u', 'p') },
{ PIX_FMT_UYVY422, MKTAG('V', 'D', 'T', 'Z') }, /* SoftLab-NSK VideoTizer */
+ { PIX_FMT_UYVY422, MKTAG('a', 'u', 'v', '2') },
{ PIX_FMT_UYYVYY411, MKTAG('Y', '4', '1', '1') },
{ PIX_FMT_GRAY8, MKTAG('G', 'R', 'E', 'Y') },
{ PIX_FMT_NV12, MKTAG('N', 'V', '1', '2') },
diff --git a/libavcodec/rv10.c b/libavcodec/rv10.c
index e6e9afafb0..8185b75546 100644
--- a/libavcodec/rv10.c
+++ b/libavcodec/rv10.c
@@ -656,6 +656,8 @@ static int rv10_decode_frame(AVCodecContext *avctx,
const uint8_t *slices_hdr = NULL;
av_dlog(avctx, "*****frame %d size=%d\n", avctx->frame_number, buf_size);
+ s->flags = avctx->flags;
+ s->flags2 = avctx->flags2;
/* no supplementary picture */
if (buf_size == 0) {
diff --git a/libavcodec/sp5xdec.c b/libavcodec/sp5xdec.c
index 8bcdbe41b4..dd31edaf85 100644
--- a/libavcodec/sp5xdec.c
+++ b/libavcodec/sp5xdec.c
@@ -86,7 +86,6 @@ static int sp5x_decode_frame(AVCodecContext *avctx,
recoded[j++] = 0xFF;
recoded[j++] = 0xD9;
- avctx->flags &= ~CODEC_FLAG_EMU_EDGE;
av_init_packet(&avpkt_recoded);
avpkt_recoded.data = recoded;
avpkt_recoded.size = j;
@@ -121,6 +120,6 @@ AVCodec ff_amv_decoder = {
NULL,
ff_mjpeg_decode_end,
sp5x_decode_frame,
- CODEC_CAP_DR1,
+ 0,
.long_name = NULL_IF_CONFIG_SMALL("AMV Video"),
};
diff --git a/libavcodec/truemotion1.c b/libavcodec/truemotion1.c
index c7d414a648..b1b14319c5 100644
--- a/libavcodec/truemotion1.c
+++ b/libavcodec/truemotion1.c
@@ -353,7 +353,7 @@ static int truemotion1_decode_header(TrueMotion1Context *s)
s->flags = FLAG_KEYFRAME;
if (s->flags & FLAG_SPRITE) {
- av_log(s->avctx, AV_LOG_INFO, "SPRITE frame found, please report the sample to the developers\n");
+ av_log_ask_for_sample(s->avctx, "SPRITE frame found.\n");
/* FIXME header.width, height, xoffset and yoffset aren't initialized */
#if 0
s->w = header.width;
@@ -370,7 +370,7 @@ static int truemotion1_decode_header(TrueMotion1Context *s)
if ((s->w < 213) && (s->h >= 176))
{
s->flags |= FLAG_INTERPOLATED;
- av_log(s->avctx, AV_LOG_INFO, "INTERPOLATION selected, please report the sample to the developers\n");
+ av_log_ask_for_sample(s->avctx, "INTERPOLATION selected.\n");
}
}
}
diff --git a/libavcodec/tta.c b/libavcodec/tta.c
index eb4d71ff85..ece5c1c43b 100644
--- a/libavcodec/tta.c
+++ b/libavcodec/tta.c
@@ -247,7 +247,7 @@ static av_cold int tta_decode_init(AVCodecContext * avctx)
if (s->is_float)
{
avctx->sample_fmt = AV_SAMPLE_FMT_FLT;
- av_log(s->avctx, AV_LOG_ERROR, "Unsupported sample format. Please contact the developers.\n");
+ av_log_ask_for_sample(s->avctx, "Unsupported sample format.\n");
return -1;
}
else switch(s->bps) {
@@ -256,7 +256,8 @@ static av_cold int tta_decode_init(AVCodecContext * avctx)
// case 3: avctx->sample_fmt = AV_SAMPLE_FMT_S24; break;
case 4: avctx->sample_fmt = AV_SAMPLE_FMT_S32; break;
default:
- av_log(s->avctx, AV_LOG_ERROR, "Invalid/unsupported sample format. Please contact the developers.\n");
+ av_log_ask_for_sample(s->avctx,
+ "Invalid/unsupported sample format.\n");
return -1;
}
diff --git a/libavcodec/twinvq.c b/libavcodec/twinvq.c
index 857ca5bae5..f8e75bb933 100644
--- a/libavcodec/twinvq.c
+++ b/libavcodec/twinvq.c
@@ -234,7 +234,7 @@ static void memset_float(float *buf, float val, int size)
* be a multiple of four.
* @return the LPC value
*
- * @todo reuse code from vorbis_dec.c: vorbis_floor0_decode
+ * @todo reuse code from Vorbis decoder: vorbis_floor0_decode
*/
static float eval_lpc_spectrum(const float *lsp, float cos_val, int order)
{
diff --git a/libavcodec/utils.c b/libavcodec/utils.c
index a6fb871ade..6eb3d3061f 100644
--- a/libavcodec/utils.c
+++ b/libavcodec/utils.c
@@ -482,7 +482,7 @@ static void avcodec_get_subtitle_defaults(AVSubtitle *sub)
int attribute_align_arg avcodec_open(AVCodecContext *avctx, AVCodec *codec)
{
- int ret= -1;
+ int ret = 0;
/* If there is a user-supplied mutex locking routine, call it. */
if (ff_lockmgr_cb) {
@@ -493,11 +493,14 @@ int attribute_align_arg avcodec_open(AVCodecContext *avctx, AVCodec *codec)
entangled_thread_counter++;
if(entangled_thread_counter != 1){
av_log(avctx, AV_LOG_ERROR, "insufficient thread locking around avcodec_open/close()\n");
+ ret = -1;
goto end;
}
- if(avctx->codec || !codec)
+ if(avctx->codec || !codec) {
+ ret = AVERROR(EINVAL);
goto end;
+ }
if (codec->priv_data_size > 0) {
if(!avctx->priv_data){
@@ -547,6 +550,7 @@ int attribute_align_arg avcodec_open(AVCodecContext *avctx, AVCodec *codec)
if (avctx->codec_id != codec->id || (avctx->codec_type != codec->type
&& avctx->codec_type != AVMEDIA_TYPE_ATTACHMENT)) {
av_log(avctx, AV_LOG_ERROR, "codec type or id mismatches\n");
+ ret = AVERROR(EINVAL);
goto free_and_end;
}
avctx->frame_number = 0;
@@ -561,6 +565,7 @@ int attribute_align_arg avcodec_open(AVCodecContext *avctx, AVCodec *codec)
if (avctx->codec->max_lowres < avctx->lowres) {
av_log(avctx, AV_LOG_ERROR, "The maximum value for lowres supported by the decoder is %d\n",
avctx->codec->max_lowres);
+ ret = AVERROR(EINVAL);
goto free_and_end;
}
if (avctx->codec->sample_fmts && avctx->codec->encode) {
@@ -570,6 +575,7 @@ int attribute_align_arg avcodec_open(AVCodecContext *avctx, AVCodec *codec)
break;
if (avctx->codec->sample_fmts[i] == AV_SAMPLE_FMT_NONE) {
av_log(avctx, AV_LOG_ERROR, "Specified sample_fmt is not supported.\n");
+ ret = AVERROR(EINVAL);
goto free_and_end;
}
}
@@ -1257,13 +1263,19 @@ void av_log_missing_feature(void *avc, const char *feature, int want_sample)
av_log(avc, AV_LOG_WARNING, "\n");
}
-void av_log_ask_for_sample(void *avc, const char *msg)
+void av_log_ask_for_sample(void *avc, const char *msg, ...)
{
+ va_list argument_list;
+
+ va_start(argument_list, msg);
+
if (msg)
- av_log(avc, AV_LOG_WARNING, "%s ", msg);
+ av_vlog(avc, AV_LOG_WARNING, msg, argument_list);
av_log(avc, AV_LOG_WARNING, "If you want to help, upload a sample "
"of this file to ftp://upload.ffmpeg.org/MPlayer/incoming/ "
"and contact the ffmpeg-devel mailing list.\n");
+
+ va_end(argument_list);
}
static AVHWAccel *first_hwaccel = NULL;
@@ -1345,8 +1357,7 @@ void ff_thread_await_progress(AVFrame *f, int progress, int field)
#endif
-#if LIBAVCODEC_VERSION_MAJOR < 53
-
+#if FF_API_THREAD_INIT
int avcodec_thread_init(AVCodecContext *s, int thread_count)
{
s->thread_count = thread_count;
diff --git a/libavcodec/v210x.c b/libavcodec/v210x.c
index 0d6a681b78..962678dde6 100644
--- a/libavcodec/v210x.c
+++ b/libavcodec/v210x.c
@@ -52,7 +52,7 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPac
}
if(avpkt->size > avctx->width * avctx->height * 8 / 3){
- av_log(avctx, AV_LOG_ERROR, "Probably padded data, need sample!\n");
+ av_log_ask_for_sample(avctx, "Probably padded data\n");
}
pic->reference= 0;
diff --git a/libavcodec/version.h b/libavcodec/version.h
index 0e2d766bfe..0482501da7 100644
--- a/libavcodec/version.h
+++ b/libavcodec/version.h
@@ -21,8 +21,8 @@
#define AVCODEC_VERSION_H
#define LIBAVCODEC_VERSION_MAJOR 52
-#define LIBAVCODEC_VERSION_MINOR 119
-#define LIBAVCODEC_VERSION_MICRO 1
+#define LIBAVCODEC_VERSION_MINOR 120
+#define LIBAVCODEC_VERSION_MICRO 0
#define LIBAVCODEC_VERSION_INT AV_VERSION_INT(LIBAVCODEC_VERSION_MAJOR, \
LIBAVCODEC_VERSION_MINOR, \
@@ -86,5 +86,11 @@
#ifndef FF_API_REQUEST_CHANNELS
#define FF_API_REQUEST_CHANNELS (LIBAVCODEC_VERSION_MAJOR < 54)
#endif
+#ifndef FF_API_OPT_H
+#define FF_API_OPT_H (LIBAVCODEC_VERSION_MAJOR < 54)
+#endif
+#ifndef FF_API_THREAD_INIT
+#define FF_API_THREAD_INIT (LIBAVCODEC_VERSION_MAJOR < 54)
+#endif
#endif /* AVCODEC_VERSION_H */
diff --git a/libavcodec/vorbis_dec.c b/libavcodec/vorbisdec.c
index 5ed3631950..eacfa5f6a4 100644
--- a/libavcodec/vorbis_dec.c
+++ b/libavcodec/vorbisdec.c
@@ -539,7 +539,7 @@ static int vorbis_parse_setup_hdr_floors(vorbis_context *vc)
rangemax = (1 << rangebits);
if (rangemax > vc->blocksize[1] / 2) {
av_log(vc->avccontext, AV_LOG_ERROR,
- "Floor value is too large for blocksize: %d (%d)\n",
+ "Floor value is too large for blocksize: %"PRIuFAST32" (%"PRIuFAST32")\n",
rangemax, vc->blocksize[1] / 2);
return -1;
}
diff --git a/libavcodec/vorbis_enc.c b/libavcodec/vorbisenc.c
index d15d3454eb..d15d3454eb 100644
--- a/libavcodec/vorbis_enc.c
+++ b/libavcodec/vorbisenc.c
diff --git a/libavcodec/vqavideo.c b/libavcodec/vqavideo.c
index f0ecda6217..109faab4fb 100644
--- a/libavcodec/vqavideo.c
+++ b/libavcodec/vqavideo.c
@@ -464,8 +464,6 @@ static void vqa_decode_chunk(VqaContext *s)
switch (s->vqa_version) {
case 1:
-/* still need sample media for this case (only one game, "Legend of
- * Kyrandia III : Malcolm's Revenge", is known to use this version) */
lobyte = s->decode_buffer[lobytes * 2];
hibyte = s->decode_buffer[(lobytes * 2) + 1];
vector_index = ((hibyte << 8) | lobyte) >> 3;
diff --git a/libavcodec/wnv1.c b/libavcodec/wnv1.c
index 2f96039053..15d90c1a89 100644
--- a/libavcodec/wnv1.c
+++ b/libavcodec/wnv1.c
@@ -96,11 +96,13 @@ static int decode_frame(AVCodecContext *avctx,
else {
l->shift = 8 - (buf[2] >> 4);
if (l->shift > 4) {
- av_log(avctx, AV_LOG_ERROR, "Unknown WNV1 frame header value %i, please upload file for study\n", buf[2] >> 4);
+ av_log_ask_for_sample(avctx, "Unknown WNV1 frame header value %i\n",
+ buf[2] >> 4);
l->shift = 4;
}
if (l->shift < 1) {
- av_log(avctx, AV_LOG_ERROR, "Unknown WNV1 frame header value %i, please upload file for study\n", buf[2] >> 4);
+ av_log_ask_for_sample(avctx, "Unknown WNV1 frame header value %i\n",
+ buf[2] >> 4);
l->shift = 1;
}
}