aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMichael Niedermayer <michaelni@gmx.at>2013-08-27 19:08:02 +0200
committerMichael Niedermayer <michaelni@gmx.at>2013-08-27 19:13:15 +0200
commit333e708520a53df727d951a0672829d1d5414b07 (patch)
tree9c5cbd38cd49c742f72c674e38791af91047632d
parent0930a562e7f2f7198f654dc268c71871da047c29 (diff)
parentcbc6ded5b780195edc93d37f8b9e8eb6e00e8f0a (diff)
downloadffmpeg-333e708520a53df727d951a0672829d1d5414b07.tar.gz
Merge remote-tracking branch 'qatar/release/9' into release/1.1
* qatar/release/9: (21 commits) ogg: Fix potential infinite discard loop dxa: Make sure the reference frame exists h261: check the mtype index segafilm: Error out on impossible packet size ogg: Always alloc the private context in vorbis_header rtjpeg: Use init_get_bits8 nuv: Reset the frame on resize nuv: Use av_fast_realloc nuv: return meaningful error codes. nuv: Pad the lzo outbuf nuv: Do not ignore lzo decompression failures rtmp: Do not misuse memcmp rtmp: rename data_size to size vc1: check mb_height validity. vc1: check the source buffer in vc1_mc functions bink: Bound check the quantization matrix. aac: Check init_get_bits return value aac: return meaningful errors aac: K&R formatting cosmetics oma: correctly mark and decrypt partial packets ... Conflicts: libavcodec/aacdec.c libavcodec/h261dec.c libavcodec/nuv.c libavcodec/vc1dec.c libavformat/oggparsevorbis.c libavformat/omadec.c libavformat/rtmpproto.c tests/ref/fate/nuv-rtjpeg Merged-by: Michael Niedermayer <michaelni@gmx.at>
-rw-r--r--libavcodec/aacdec.c327
-rw-r--r--libavcodec/bink.c3
-rw-r--r--libavcodec/dxa.c6
-rw-r--r--libavcodec/h261dec.c4
-rw-r--r--libavcodec/nuv.c39
-rw-r--r--libavcodec/rtjpeg.c7
-rw-r--r--libavcodec/vc1dec.c18
-rw-r--r--libavformat/oggparsevorbis.c6
-rw-r--r--libavformat/omadec.c46
-rw-r--r--libavformat/rtmppkt.c107
-rw-r--r--libavformat/rtmppkt.h9
-rw-r--r--libavformat/rtmpproto.c95
-rw-r--r--libavformat/segafilm.c2
-rw-r--r--tests/ref/fate/nuv-rtjpeg1
14 files changed, 425 insertions, 245 deletions
diff --git a/libavcodec/aacdec.c b/libavcodec/aacdec.c
index 18928482cd..3379f8b701 100644
--- a/libavcodec/aacdec.c
+++ b/libavcodec/aacdec.c
@@ -215,28 +215,39 @@ struct elem_to_channel {
static int assign_pair(struct elem_to_channel e2c_vec[MAX_ELEM_ID],
uint8_t (*layout_map)[3], int offset, uint64_t left,
- uint64_t right, int pos)
+ uint64_t right, int pos)
{
if (layout_map[offset][0] == TYPE_CPE) {
e2c_vec[offset] = (struct elem_to_channel) {
- .av_position = left | right, .syn_ele = TYPE_CPE,
- .elem_id = layout_map[offset ][1], .aac_position = pos };
+ .av_position = left | right,
+ .syn_ele = TYPE_CPE,
+ .elem_id = layout_map[offset][1],
+ .aac_position = pos
+ };
return 1;
} else {
- e2c_vec[offset] = (struct elem_to_channel) {
- .av_position = left, .syn_ele = TYPE_SCE,
- .elem_id = layout_map[offset ][1], .aac_position = pos };
+ e2c_vec[offset] = (struct elem_to_channel) {
+ .av_position = left,
+ .syn_ele = TYPE_SCE,
+ .elem_id = layout_map[offset][1],
+ .aac_position = pos
+ };
e2c_vec[offset + 1] = (struct elem_to_channel) {
- .av_position = right, .syn_ele = TYPE_SCE,
- .elem_id = layout_map[offset + 1][1], .aac_position = pos };
+ .av_position = right,
+ .syn_ele = TYPE_SCE,
+ .elem_id = layout_map[offset + 1][1],
+ .aac_position = pos
+ };
return 2;
}
}
-static int count_paired_channels(uint8_t (*layout_map)[3], int tags, int pos, int *current) {
+static int count_paired_channels(uint8_t (*layout_map)[3], int tags, int pos,
+ int *current)
+{
int num_pos_channels = 0;
- int first_cpe = 0;
- int sce_parity = 0;
+ int first_cpe = 0;
+ int sce_parity = 0;
int i;
for (i = *current; i < tags; i++) {
if (layout_map[i][2] != pos)
@@ -250,7 +261,7 @@ static int count_paired_channels(uint8_t (*layout_map)[3], int tags, int pos, in
}
}
num_pos_channels += 2;
- first_cpe = 1;
+ first_cpe = 1;
} else {
num_pos_channels++;
sce_parity ^= 1;
@@ -258,7 +269,7 @@ static int count_paired_channels(uint8_t (*layout_map)[3], int tags, int pos, in
}
if (sce_parity &&
((pos == AAC_CHANNEL_FRONT && first_cpe) || pos == AAC_CHANNEL_SIDE))
- return -1;
+ return -1;
*current = i;
return num_pos_channels;
}
@@ -266,7 +277,7 @@ static int count_paired_channels(uint8_t (*layout_map)[3], int tags, int pos, in
static uint64_t sniff_channel_order(uint8_t (*layout_map)[3], int tags)
{
int i, n, total_non_cc_elements;
- struct elem_to_channel e2c_vec[4*MAX_ELEM_ID] = {{ 0 }};
+ struct elem_to_channel e2c_vec[4 * MAX_ELEM_ID] = { { 0 } };
int num_front_channels, num_side_channels, num_back_channels;
uint64_t layout;
@@ -290,8 +301,11 @@ static uint64_t sniff_channel_order(uint8_t (*layout_map)[3], int tags)
i = 0;
if (num_front_channels & 1) {
e2c_vec[i] = (struct elem_to_channel) {
- .av_position = AV_CH_FRONT_CENTER, .syn_ele = TYPE_SCE,
- .elem_id = layout_map[i][1], .aac_position = AAC_CHANNEL_FRONT };
+ .av_position = AV_CH_FRONT_CENTER,
+ .syn_ele = TYPE_SCE,
+ .elem_id = layout_map[i][1],
+ .aac_position = AAC_CHANNEL_FRONT
+ };
i++;
num_front_channels--;
}
@@ -348,22 +362,31 @@ static uint64_t sniff_channel_order(uint8_t (*layout_map)[3], int tags)
}
if (num_back_channels) {
e2c_vec[i] = (struct elem_to_channel) {
- .av_position = AV_CH_BACK_CENTER, .syn_ele = TYPE_SCE,
- .elem_id = layout_map[i][1], .aac_position = AAC_CHANNEL_BACK };
+ .av_position = AV_CH_BACK_CENTER,
+ .syn_ele = TYPE_SCE,
+ .elem_id = layout_map[i][1],
+ .aac_position = AAC_CHANNEL_BACK
+ };
i++;
num_back_channels--;
}
if (i < tags && layout_map[i][2] == AAC_CHANNEL_LFE) {
e2c_vec[i] = (struct elem_to_channel) {
- .av_position = AV_CH_LOW_FREQUENCY, .syn_ele = TYPE_LFE,
- .elem_id = layout_map[i][1], .aac_position = AAC_CHANNEL_LFE };
+ .av_position = AV_CH_LOW_FREQUENCY,
+ .syn_ele = TYPE_LFE,
+ .elem_id = layout_map[i][1],
+ .aac_position = AAC_CHANNEL_LFE
+ };
i++;
}
while (i < tags && layout_map[i][2] == AAC_CHANNEL_LFE) {
e2c_vec[i] = (struct elem_to_channel) {
- .av_position = UINT64_MAX, .syn_ele = TYPE_LFE,
- .elem_id = layout_map[i][1], .aac_position = AAC_CHANNEL_LFE };
+ .av_position = UINT64_MAX,
+ .syn_ele = TYPE_LFE,
+ .elem_id = layout_map[i][1],
+ .aac_position = AAC_CHANNEL_LFE
+ };
i++;
}
@@ -371,12 +394,11 @@ static uint64_t sniff_channel_order(uint8_t (*layout_map)[3], int tags)
total_non_cc_elements = n = i;
do {
int next_n = 0;
- for (i = 1; i < n; i++) {
- if (e2c_vec[i-1].av_position > e2c_vec[i].av_position) {
- FFSWAP(struct elem_to_channel, e2c_vec[i-1], e2c_vec[i]);
+ for (i = 1; i < n; i++)
+ if (e2c_vec[i - 1].av_position > e2c_vec[i].av_position) {
+ FFSWAP(struct elem_to_channel, e2c_vec[i - 1], e2c_vec[i]);
next_n = i;
}
- }
n = next_n;
} while (n > 0);
@@ -418,12 +440,13 @@ static void pop_output_configuration(AACContext *ac) {
}
/**
- * Configure output channel order based on the current program configuration element.
+ * Configure output channel order based on the current program
+ * configuration element.
*
* @return Returns error status. 0 - OK, !0 - error
*/
static int output_configure(AACContext *ac,
- uint8_t layout_map[MAX_ELEM_ID*4][3], int tags,
+ uint8_t layout_map[MAX_ELEM_ID * 4][3], int tags,
enum OCStatus oc_type, int get_new_frame)
{
AVCodecContext *avctx = ac->avctx;
@@ -495,36 +518,40 @@ static void flush(AVCodecContext *avctx)
* @return Returns error status. 0 - OK, !0 - error
*/
static int set_default_channel_config(AVCodecContext *avctx,
- uint8_t (*layout_map)[3],
- int *tags,
- int channel_config)
+ uint8_t (*layout_map)[3],
+ int *tags,
+ int channel_config)
{
if (channel_config < 1 || channel_config > 7) {
- av_log(avctx, AV_LOG_ERROR, "invalid default channel configuration (%d)\n",
+ av_log(avctx, AV_LOG_ERROR,
+ "invalid default channel configuration (%d)\n",
channel_config);
- return -1;
+ return AVERROR_INVALIDDATA;
}
*tags = tags_per_config[channel_config];
- memcpy(layout_map, aac_channel_layout_map[channel_config-1], *tags * sizeof(*layout_map));
+ memcpy(layout_map, aac_channel_layout_map[channel_config - 1],
+ *tags * sizeof(*layout_map));
return 0;
}
static ChannelElement *get_che(AACContext *ac, int type, int elem_id)
{
- // For PCE based channel configurations map the channels solely based on tags.
+ /* For PCE based channel configurations map the channels solely based
+ * on tags. */
if (!ac->oc[1].m4ac.chan_config) {
return ac->tag_che_map[type][elem_id];
}
// Allow single CPE stereo files to be signalled with mono configuration.
- if (!ac->tags_mapped && type == TYPE_CPE && ac->oc[1].m4ac.chan_config == 1) {
+ if (!ac->tags_mapped && type == TYPE_CPE &&
+ ac->oc[1].m4ac.chan_config == 1) {
uint8_t layout_map[MAX_ELEM_ID*4][3];
int layout_map_tags;
push_output_configuration(ac);
av_log(ac->avctx, AV_LOG_DEBUG, "mono with CPE\n");
- if (set_default_channel_config(ac->avctx, layout_map, &layout_map_tags,
- 2) < 0)
+ if (set_default_channel_config(ac->avctx, layout_map,
+ &layout_map_tags, 2) < 0)
return NULL;
if (output_configure(ac, layout_map, layout_map_tags,
OC_TRIAL_FRAME, 1) < 0)
@@ -534,15 +561,16 @@ static ChannelElement *get_che(AACContext *ac, int type, int elem_id)
ac->oc[1].m4ac.ps = 0;
}
// And vice-versa
- if (!ac->tags_mapped && type == TYPE_SCE && ac->oc[1].m4ac.chan_config == 2) {
- uint8_t layout_map[MAX_ELEM_ID*4][3];
+ if (!ac->tags_mapped && type == TYPE_SCE &&
+ ac->oc[1].m4ac.chan_config == 2) {
+ uint8_t layout_map[MAX_ELEM_ID * 4][3];
int layout_map_tags;
push_output_configuration(ac);
av_log(ac->avctx, AV_LOG_DEBUG, "stereo with SCE\n");
- if (set_default_channel_config(ac->avctx, layout_map, &layout_map_tags,
- 1) < 0)
+ if (set_default_channel_config(ac->avctx, layout_map,
+ &layout_map_tags, 1) < 0)
return NULL;
if (output_configure(ac, layout_map, layout_map_tags,
OC_TRIAL_FRAME, 1) < 0)
@@ -552,7 +580,8 @@ static ChannelElement *get_che(AACContext *ac, int type, int elem_id)
if (ac->oc[1].m4ac.sbr)
ac->oc[1].m4ac.ps = -1;
}
- // For indexed channel configurations map the channels solely based on position.
+ /* For indexed channel configurations map the channels solely based
+ * on position. */
switch (ac->oc[1].m4ac.chan_config) {
case 7:
if (ac->tags_mapped == 3 && type == TYPE_CPE) {
@@ -560,9 +589,12 @@ static ChannelElement *get_che(AACContext *ac, int type, int elem_id)
return ac->tag_che_map[TYPE_CPE][elem_id] = ac->che[TYPE_CPE][2];
}
case 6:
- /* Some streams incorrectly code 5.1 audio as SCE[0] CPE[0] CPE[1] SCE[1]
- instead of SCE[0] CPE[0] CPE[1] LFE[0]. If we seem to have
- encountered such a stream, transfer the LFE[0] element to the SCE[1]'s mapping */
+ /* Some streams incorrectly code 5.1 audio as
+ * SCE[0] CPE[0] CPE[1] SCE[1]
+ * instead of
+ * SCE[0] CPE[0] CPE[1] LFE[0].
+ * If we seem to have encountered such a stream, transfer
+ * the LFE[0] element to the SCE[1]'s mapping */
if (ac->tags_mapped == tags_per_config[ac->oc[1].m4ac.chan_config] - 1 && (type == TYPE_LFE || type == TYPE_SCE)) {
ac->tags_mapped++;
return ac->tag_che_map[type][elem_id] = ac->che[TYPE_LFE][0];
@@ -573,13 +605,16 @@ static ChannelElement *get_che(AACContext *ac, int type, int elem_id)
return ac->tag_che_map[TYPE_CPE][elem_id] = ac->che[TYPE_CPE][1];
}
case 4:
- if (ac->tags_mapped == 2 && ac->oc[1].m4ac.chan_config == 4 && type == TYPE_SCE) {
+ if (ac->tags_mapped == 2 &&
+ ac->oc[1].m4ac.chan_config == 4 &&
+ type == TYPE_SCE) {
ac->tags_mapped++;
return ac->tag_che_map[TYPE_SCE][elem_id] = ac->che[TYPE_SCE][1];
}
case 3:
case 2:
- if (ac->tags_mapped == (ac->oc[1].m4ac.chan_config != 2) && type == TYPE_CPE) {
+ if (ac->tags_mapped == (ac->oc[1].m4ac.chan_config != 2) &&
+ type == TYPE_CPE) {
ac->tags_mapped++;
return ac->tag_che_map[TYPE_CPE][elem_id] = ac->che[TYPE_CPE][0];
} else if (ac->oc[1].m4ac.chan_config == 2) {
@@ -596,7 +631,8 @@ static ChannelElement *get_che(AACContext *ac, int type, int elem_id)
}
/**
- * Decode an array of 4 bit element IDs, optionally interleaved with a stereo/mono switching bit.
+ * Decode an array of 4 bit element IDs, optionally interleaved with a
+ * stereo/mono switching bit.
*
* @param type speaker type/position for these channels
*/
@@ -638,7 +674,8 @@ static int decode_pce(AVCodecContext *avctx, MPEG4AudioConfig *m4ac,
uint8_t (*layout_map)[3],
GetBitContext *gb)
{
- int num_front, num_side, num_back, num_lfe, num_assoc_data, num_cc, sampling_index;
+ int num_front, num_side, num_back, num_lfe, num_assoc_data, num_cc;
+ int sampling_index;
int comment_len;
int tags;
@@ -646,7 +683,9 @@ static int decode_pce(AVCodecContext *avctx, MPEG4AudioConfig *m4ac,
sampling_index = get_bits(gb, 4);
if (m4ac->sampling_index != sampling_index)
- av_log(avctx, AV_LOG_WARNING, "Sample rate index in program config element does not match the sample rate index configured by the container.\n");
+ av_log(avctx, AV_LOG_WARNING,
+ "Sample rate index in program config element does not "
+ "match the sample rate index configured by the container.\n");
num_front = get_bits(gb, 4);
num_side = get_bits(gb, 4);
@@ -687,7 +726,7 @@ static int decode_pce(AVCodecContext *avctx, MPEG4AudioConfig *m4ac,
comment_len = get_bits(gb, 8) * 8;
if (get_bits_left(gb) < comment_len) {
av_log(avctx, AV_LOG_ERROR, "decode_pce: " overread_err);
- return -1;
+ return AVERROR_INVALIDDATA;
}
skip_bits_long(gb, comment_len);
return tags;
@@ -729,7 +768,8 @@ static int decode_ga_specific_config(AACContext *ac, AVCodecContext *avctx,
if (tags < 0)
return tags;
} else {
- if ((ret = set_default_channel_config(avctx, layout_map, &tags, channel_config)))
+ if ((ret = set_default_channel_config(avctx, layout_map,
+ &tags, channel_config)))
return ret;
}
@@ -751,7 +791,7 @@ static int decode_ga_specific_config(AACContext *ac, AVCodecContext *avctx,
case AOT_ER_AAC_LTP:
case AOT_ER_AAC_SCALABLE:
case AOT_ER_AAC_LD:
- skip_bits(gb, 3); /* aacSectionDataResilienceFlag
+ skip_bits(gb, 3); /* aacSectionDataResilienceFlag
* aacScalefactorDataResilienceFlag
* aacSpectralDataResilienceFlag
*/
@@ -781,20 +821,24 @@ static int decode_audio_specific_config(AACContext *ac,
int sync_extension)
{
GetBitContext gb;
- int i;
+ int i, ret;
av_dlog(avctx, "audio specific config size %d\n", bit_size >> 3);
for (i = 0; i < bit_size >> 3; i++)
av_dlog(avctx, "%02x ", data[i]);
av_dlog(avctx, "\n");
- init_get_bits(&gb, data, bit_size);
+ if ((ret = init_get_bits(&gb, data, bit_size)) < 0)
+ return ret;
- if ((i = avpriv_mpeg4audio_get_config(m4ac, data, bit_size, sync_extension)) < 0)
- return -1;
+ if ((i = avpriv_mpeg4audio_get_config(m4ac, data, bit_size,
+ sync_extension)) < 0)
+ return AVERROR_INVALIDDATA;
if (m4ac->sampling_index > 12) {
- av_log(avctx, AV_LOG_ERROR, "invalid sampling rate index %d\n", m4ac->sampling_index);
- return -1;
+ av_log(avctx, AV_LOG_ERROR,
+ "invalid sampling rate index %d\n",
+ m4ac->sampling_index);
+ return AVERROR_INVALIDDATA;
}
skip_bits_long(&gb, i);
@@ -803,18 +847,23 @@ static int decode_audio_specific_config(AACContext *ac,
case AOT_AAC_MAIN:
case AOT_AAC_LC:
case AOT_AAC_LTP:
- if (decode_ga_specific_config(ac, avctx, &gb, m4ac, m4ac->chan_config))
- return -1;
+ if ((ret = decode_ga_specific_config(ac, avctx, &gb,
+ m4ac, m4ac->chan_config)) < 0)
+ return ret;
break;
default:
- av_log(avctx, AV_LOG_ERROR, "Audio object type %s%d is not supported.\n",
- m4ac->sbr == 1? "SBR+" : "", m4ac->object_type);
- return -1;
+ av_log(avctx, AV_LOG_ERROR,
+ "Audio object type %s%d is not supported.\n",
+ m4ac->sbr == 1 ? "SBR+" : "",
+ m4ac->object_type);
+ return AVERROR(ENOSYS);
}
- av_dlog(avctx, "AOT %d chan config %d sampling index %d (%d) SBR %d PS %d\n",
+ av_dlog(avctx,
+ "AOT %d chan config %d sampling index %d (%d) SBR %d PS %d\n",
m4ac->object_type, m4ac->chan_config, m4ac->sampling_index,
- m4ac->sample_rate, m4ac->sbr, m4ac->ps);
+ m4ac->sample_rate, m4ac->sbr,
+ m4ac->ps);
return get_bits_count(&gb);
}
@@ -872,15 +921,18 @@ static void reset_predictor_group(PredictorState *ps, int group_num)
reset_predict_state(&ps[i]);
}
-#define AAC_INIT_VLC_STATIC(num, size) \
- INIT_VLC_STATIC(&vlc_spectral[num], 8, ff_aac_spectral_sizes[num], \
- ff_aac_spectral_bits[num], sizeof( ff_aac_spectral_bits[num][0]), sizeof( ff_aac_spectral_bits[num][0]), \
- ff_aac_spectral_codes[num], sizeof(ff_aac_spectral_codes[num][0]), sizeof(ff_aac_spectral_codes[num][0]), \
+#define AAC_INIT_VLC_STATIC(num, size) \
+ INIT_VLC_STATIC(&vlc_spectral[num], 8, ff_aac_spectral_sizes[num], \
+ ff_aac_spectral_bits[num], sizeof(ff_aac_spectral_bits[num][0]), \
+ sizeof(ff_aac_spectral_bits[num][0]), \
+ ff_aac_spectral_codes[num], sizeof(ff_aac_spectral_codes[num][0]), \
+ sizeof(ff_aac_spectral_codes[num][0]), \
size);
static av_cold int aac_decode_init(AVCodecContext *avctx)
{
AACContext *ac = avctx->priv_data;
+ int ret;
ac->avctx = avctx;
ac->oc[1].m4ac.sample_rate = avctx->sample_rate;
@@ -888,10 +940,11 @@ static av_cold int aac_decode_init(AVCodecContext *avctx)
avctx->sample_fmt = AV_SAMPLE_FMT_FLTP;
if (avctx->extradata_size > 0) {
- if (decode_audio_specific_config(ac, ac->avctx, &ac->oc[1].m4ac,
- avctx->extradata,
- avctx->extradata_size*8, 1) < 0)
- return -1;
+ if ((ret = decode_audio_specific_config(ac, ac->avctx, &ac->oc[1].m4ac,
+ avctx->extradata,
+ avctx->extradata_size * 8,
+ 1)) < 0)
+ return ret;
} else {
int sr, i;
uint8_t layout_map[MAX_ELEM_ID*4][3];
@@ -949,9 +1002,14 @@ static av_cold int aac_decode_init(AVCodecContext *avctx)
ff_aac_tableinit();
- INIT_VLC_STATIC(&vlc_scalefactors,7,FF_ARRAY_ELEMS(ff_aac_scalefactor_code),
- ff_aac_scalefactor_bits, sizeof(ff_aac_scalefactor_bits[0]), sizeof(ff_aac_scalefactor_bits[0]),
- ff_aac_scalefactor_code, sizeof(ff_aac_scalefactor_code[0]), sizeof(ff_aac_scalefactor_code[0]),
+ INIT_VLC_STATIC(&vlc_scalefactors, 7,
+ FF_ARRAY_ELEMS(ff_aac_scalefactor_code),
+ ff_aac_scalefactor_bits,
+ sizeof(ff_aac_scalefactor_bits[0]),
+ sizeof(ff_aac_scalefactor_bits[0]),
+ ff_aac_scalefactor_code,
+ sizeof(ff_aac_scalefactor_code[0]),
+ sizeof(ff_aac_scalefactor_code[0]),
352);
ff_mdct_init(&ac->mdct, 11, 1, 1.0 / (32768.0 * 1024.0));
@@ -985,7 +1043,7 @@ static int skip_data_stream_element(AACContext *ac, GetBitContext *gb)
if (get_bits_left(gb) < 8 * count) {
av_log(ac->avctx, AV_LOG_ERROR, "skip_data_stream_element: "overread_err);
- return -1;
+ return AVERROR_INVALIDDATA;
}
skip_bits_long(gb, 8 * count);
return 0;
@@ -997,9 +1055,11 @@ static int decode_prediction(AACContext *ac, IndividualChannelStream *ics,
int sfb;
if (get_bits1(gb)) {
ics->predictor_reset_group = get_bits(gb, 5);
- if (ics->predictor_reset_group == 0 || ics->predictor_reset_group > 30) {
- av_log(ac->avctx, AV_LOG_ERROR, "Invalid Predictor Reset Group.\n");
- return -1;
+ if (ics->predictor_reset_group == 0 ||
+ ics->predictor_reset_group > 30) {
+ av_log(ac->avctx, AV_LOG_ERROR,
+ "Invalid Predictor Reset Group.\n");
+ return AVERROR_INVALIDDATA;
}
}
for (sfb = 0; sfb < FFMIN(ics->max_sfb, ff_aac_pred_sfb_max[ac->oc[1].m4ac.sampling_index]); sfb++) {
@@ -1068,7 +1128,8 @@ static int decode_ics_info(AACContext *ac, IndividualChannelStream *ics,
goto fail;
}
} else if (ac->oc[1].m4ac.object_type == AOT_AAC_LC) {
- av_log(ac->avctx, AV_LOG_ERROR, "Prediction is not allowed in AAC-LC.\n");
+ av_log(ac->avctx, AV_LOG_ERROR,
+ "Prediction is not allowed in AAC-LC.\n");
goto fail;
} else {
if ((ics->ltp.present = get_bits(gb, 1)))
@@ -1079,7 +1140,8 @@ static int decode_ics_info(AACContext *ac, IndividualChannelStream *ics,
if (ics->max_sfb > ics->num_swb) {
av_log(ac->avctx, AV_LOG_ERROR,
- "Number of scalefactor bands in group (%d) exceeds limit (%d).\n",
+ "Number of scalefactor bands in group (%d) "
+ "exceeds limit (%d).\n",
ics->max_sfb, ics->num_swb);
goto fail;
}
@@ -1112,20 +1174,20 @@ static int decode_band_types(AACContext *ac, enum BandType band_type[120],
int sect_band_type = get_bits(gb, 4);
if (sect_band_type == 12) {
av_log(ac->avctx, AV_LOG_ERROR, "invalid band type\n");
- return -1;
+ return AVERROR_INVALIDDATA;
}
do {
sect_len_incr = get_bits(gb, bits);
sect_end += sect_len_incr;
if (get_bits_left(gb) < 0) {
av_log(ac->avctx, AV_LOG_ERROR, "decode_band_types: "overread_err);
- return -1;
+ return AVERROR_INVALIDDATA;
}
if (sect_end > ics->max_sfb) {
av_log(ac->avctx, AV_LOG_ERROR,
"Number of bands (%d) exceeds limit (%d).\n",
sect_end, ics->max_sfb);
- return -1;
+ return AVERROR_INVALIDDATA;
}
} while (sect_len_incr == (1 << bits) - 1);
for (; k < sect_end; k++) {
@@ -1163,7 +1225,8 @@ static int decode_scalefactors(AACContext *ac, float sf[120], GetBitContext *gb,
if (band_type[idx] == ZERO_BT) {
for (; i < run_end; i++, idx++)
sf[idx] = 0.;
- } else if ((band_type[idx] == INTENSITY_BT) || (band_type[idx] == INTENSITY_BT2)) {
+ } else if ((band_type[idx] == INTENSITY_BT) ||
+ (band_type[idx] == INTENSITY_BT2)) {
for (; i < run_end; i++, idx++) {
offset[2] += get_vlc2(gb, vlc_scalefactors.table, 7, 3) - 60;
clipped_offset = av_clip(offset[2], -155, 100);
@@ -1196,7 +1259,7 @@ static int decode_scalefactors(AACContext *ac, float sf[120], GetBitContext *gb,
if (offset[0] > 255U) {
av_log(ac->avctx, AV_LOG_ERROR,
"Scalefactor (%d) out of range.\n", offset[0]);
- return -1;
+ return AVERROR_INVALIDDATA;
}
sf[idx] = -ff_aac_pow2sf_tab[offset[0] - 100 + POW_SF2_ZERO];
}
@@ -1251,10 +1314,11 @@ static int decode_tns(AACContext *ac, TemporalNoiseShaping *tns,
tns->length[w][filt] = get_bits(gb, 6 - 2 * is8);
if ((tns->order[w][filt] = get_bits(gb, 5 - 2 * is8)) > tns_max_order) {
- av_log(ac->avctx, AV_LOG_ERROR, "TNS filter order %d is greater than maximum %d.\n",
+ av_log(ac->avctx, AV_LOG_ERROR,
+ "TNS filter order %d is greater than maximum %d.\n",
tns->order[w][filt], tns_max_order);
tns->order[w][filt] = 0;
- return -1;
+ return AVERROR_INVALIDDATA;
}
if (tns->order[w][filt]) {
tns->direction[w][filt] = get_bits1(gb);
@@ -1283,7 +1347,9 @@ static void decode_mid_side_stereo(ChannelElement *cpe, GetBitContext *gb,
{
int idx;
if (ms_present == 1) {
- for (idx = 0; idx < cpe->ch[0].ics.num_window_groups * cpe->ch[0].ics.max_sfb; idx++)
+ for (idx = 0;
+ idx < cpe->ch[0].ics.num_window_groups * cpe->ch[0].ics.max_sfb;
+ idx++)
cpe->ms_mask[idx] = get_bits1(gb);
} else if (ms_present == 2) {
memset(cpe->ms_mask, 1, sizeof(cpe->ms_mask[0]) * cpe->ch[0].ics.num_window_groups * cpe->ch[0].ics.max_sfb);
@@ -1382,7 +1448,8 @@ static int decode_spectrum_and_dequant(AACContext *ac, float coef[1024],
float *coef_base = coef;
for (g = 0; g < ics->num_windows; g++)
- memset(coef + g * 128 + offsets[ics->max_sfb], 0, sizeof(float) * (c - offsets[ics->max_sfb]));
+ memset(coef + g * 128 + offsets[ics->max_sfb], 0,
+ sizeof(float) * (c - offsets[ics->max_sfb]));
for (g = 0; g < ics->num_window_groups; g++) {
unsigned g_len = ics->group_len[g];
@@ -1537,7 +1604,7 @@ static int decode_spectrum_and_dequant(AACContext *ac, float coef[1024],
if (b > 8) {
av_log(ac->avctx, AV_LOG_ERROR, "error in spectral data, ESC overflow\n");
- return -1;
+ return AVERROR_INVALIDDATA;
}
SKIP_BITS(re, gb, b + 1);
@@ -1652,14 +1719,20 @@ static void apply_prediction(AACContext *ac, SingleChannelElement *sce)
}
if (sce->ics.window_sequence[0] != EIGHT_SHORT_SEQUENCE) {
- for (sfb = 0; sfb < ff_aac_pred_sfb_max[ac->oc[1].m4ac.sampling_index]; sfb++) {
- for (k = sce->ics.swb_offset[sfb]; k < sce->ics.swb_offset[sfb + 1]; k++) {
+ for (sfb = 0;
+ sfb < ff_aac_pred_sfb_max[ac->oc[1].m4ac.sampling_index];
+ sfb++) {
+ for (k = sce->ics.swb_offset[sfb];
+ k < sce->ics.swb_offset[sfb + 1];
+ k++) {
predict(&sce->predictor_state[k], &sce->coeffs[k],
- sce->ics.predictor_present && sce->ics.prediction_used[sfb]);
+ sce->ics.predictor_present &&
+ sce->ics.prediction_used[sfb]);
}
}
if (sce->ics.predictor_reset_group)
- reset_predictor_group(sce->predictor_state, sce->ics.predictor_reset_group);
+ reset_predictor_group(sce->predictor_state,
+ sce->ics.predictor_reset_group);
} else
reset_all_predictors(sce->predictor_state);
}
@@ -1680,6 +1753,7 @@ static int decode_ics(AACContext *ac, SingleChannelElement *sce,
IndividualChannelStream *ics = &sce->ics;
float *out = sce->coeffs;
int global_gain, pulse_present = 0;
+ int ret;
/* This assignment is to silence a GCC warning about the variable being used
* uninitialized when in fact it always is.
@@ -1693,33 +1767,38 @@ static int decode_ics(AACContext *ac, SingleChannelElement *sce,
return AVERROR_INVALIDDATA;
}
- if (decode_band_types(ac, sce->band_type, sce->band_type_run_end, gb, ics) < 0)
- return -1;
- if (decode_scalefactors(ac, sce->sf, gb, global_gain, ics, sce->band_type, sce->band_type_run_end) < 0)
- return -1;
+ if ((ret = decode_band_types(ac, sce->band_type,
+ sce->band_type_run_end, gb, ics)) < 0)
+ return ret;
+ if ((ret = decode_scalefactors(ac, sce->sf, gb, global_gain, ics,
+ sce->band_type, sce->band_type_run_end)) < 0)
+ return ret;
pulse_present = 0;
if (!scale_flag) {
if ((pulse_present = get_bits1(gb))) {
if (ics->window_sequence[0] == EIGHT_SHORT_SEQUENCE) {
- av_log(ac->avctx, AV_LOG_ERROR, "Pulse tool not allowed in eight short sequence.\n");
- return -1;
+ av_log(ac->avctx, AV_LOG_ERROR,
+ "Pulse tool not allowed in eight short sequence.\n");
+ return AVERROR_INVALIDDATA;
}
if (decode_pulses(&pulse, gb, ics->swb_offset, ics->num_swb)) {
- av_log(ac->avctx, AV_LOG_ERROR, "Pulse data corrupt or invalid.\n");
- return -1;
+ av_log(ac->avctx, AV_LOG_ERROR,
+ "Pulse data corrupt or invalid.\n");
+ return AVERROR_INVALIDDATA;
}
}
if ((tns->present = get_bits1(gb)) && decode_tns(ac, tns, gb, ics))
- return -1;
+ return AVERROR_INVALIDDATA;
if (get_bits1(gb)) {
av_log_missing_feature(ac->avctx, "SSR", 1);
return AVERROR_PATCHWELCOME;
}
}
- if (decode_spectrum_and_dequant(ac, out, gb, sce->sf, pulse_present, &pulse, ics, sce->band_type) < 0)
- return -1;
+ if (decode_spectrum_and_dequant(ac, out, gb, sce->sf, pulse_present,
+ &pulse, ics, sce->band_type) < 0)
+ return AVERROR_INVALIDDATA;
if (ac->oc[1].m4ac.object_type == AOT_AAC_MAIN && !common_window)
apply_prediction(ac, sce);
@@ -1740,7 +1819,8 @@ static void apply_mid_side_stereo(AACContext *ac, ChannelElement *cpe)
for (g = 0; g < ics->num_window_groups; g++) {
for (i = 0; i < ics->max_sfb; i++, idx++) {
if (cpe->ms_mask[idx] &&
- cpe->ch[0].band_type[idx] < NOISE_BT && cpe->ch[1].band_type[idx] < NOISE_BT) {
+ cpe->ch[0].band_type[idx] < NOISE_BT &&
+ cpe->ch[1].band_type[idx] < NOISE_BT) {
for (group = 0; group < ics->group_len[g]; group++) {
ac->dsp.butterflies_float(ch0 + group * 128 + offsets[i],
ch1 + group * 128 + offsets[i],
@@ -1760,7 +1840,8 @@ static void apply_mid_side_stereo(AACContext *ac, ChannelElement *cpe)
* [1] mask is decoded from bitstream; [2] mask is all 1s;
* [3] reserved for scalable AAC
*/
-static void apply_intensity_stereo(AACContext *ac, ChannelElement *cpe, int ms_present)
+static void apply_intensity_stereo(AACContext *ac,
+ ChannelElement *cpe, int ms_present)
{
const IndividualChannelStream *ics = &cpe->ch[1].ics;
SingleChannelElement *sce1 = &cpe->ch[1];
@@ -1771,7 +1852,8 @@ static void apply_intensity_stereo(AACContext *ac, ChannelElement *cpe, int ms_p
float scale;
for (g = 0; g < ics->num_window_groups; g++) {
for (i = 0; i < ics->max_sfb;) {
- if (sce1->band_type[idx] == INTENSITY_BT || sce1->band_type[idx] == INTENSITY_BT2) {
+ if (sce1->band_type[idx] == INTENSITY_BT ||
+ sce1->band_type[idx] == INTENSITY_BT2) {
const int bt_run_end = sce1->band_type_run_end[idx];
for (; i < bt_run_end; i++, idx++) {
c = -1 + 2 * (sce1->band_type[idx] - 14);
@@ -1811,13 +1893,14 @@ static int decode_cpe(AACContext *ac, GetBitContext *gb, ChannelElement *cpe)
i = cpe->ch[1].ics.use_kb_window[0];
cpe->ch[1].ics = cpe->ch[0].ics;
cpe->ch[1].ics.use_kb_window[1] = i;
- if (cpe->ch[1].ics.predictor_present && (ac->oc[1].m4ac.object_type != AOT_AAC_MAIN))
+ if (cpe->ch[1].ics.predictor_present &&
+ (ac->oc[1].m4ac.object_type != AOT_AAC_MAIN))
if ((cpe->ch[1].ics.ltp.present = get_bits(gb, 1)))
decode_ltp(&cpe->ch[1].ics.ltp, gb, cpe->ch[1].ics.max_sfb);
ms_present = get_bits(gb, 2);
if (ms_present == 3) {
av_log(ac->avctx, AV_LOG_ERROR, "ms_present = 3 is reserved.\n");
- return -1;
+ return AVERROR_INVALIDDATA;
} else if (ms_present)
decode_mid_side_stereo(cpe, gb, ms_present);
}
@@ -2678,7 +2761,8 @@ static int aac_decode_frame(AVCodecContext *avctx, void *data,
if (ac->force_dmono_mode >= 0)
ac->dmono_mode = ac->force_dmono_mode;
- init_get_bits(&gb, buf, buf_size * 8);
+ if ((err = init_get_bits(&gb, buf, buf_size * 8)) < 0)
+ return err;
if ((err = aac_decode_frame_int(avctx, data, got_frame_ptr, &gb, avpkt)) < 0)
return err;
@@ -2714,13 +2798,13 @@ static av_cold int aac_decode_close(AVCodecContext *avctx)
#define LOAS_SYNC_WORD 0x2b7 ///< 11 bits LOAS sync word
struct LATMContext {
- AACContext aac_ctx; ///< containing AACContext
- int initialized; ///< initialized after a valid extradata was seen
+ AACContext aac_ctx; ///< containing AACContext
+ int initialized; ///< initilized after a valid extradata was seen
// parser data
- int audio_mux_version_A; ///< LATM syntax version
- int frame_length_type; ///< 0/1 variable/fixed frame length
- int frame_length; ///< frame length for fixed frame length
+ int audio_mux_version_A; ///< LATM syntax version
+ int frame_length_type; ///< 0/1 variable/fixed frame length
+ int frame_length; ///< frame length for fixed frame length
};
static inline uint32_t latm_get_value(GetBitContext *b)
@@ -2928,7 +3012,8 @@ static int latm_decode_frame(AVCodecContext *avctx, void *out,
int muxlength, err;
GetBitContext gb;
- init_get_bits(&gb, avpkt->data, avpkt->size * 8);
+ if ((err = init_get_bits(&gb, avpkt->data, avpkt->size * 8)) < 0)
+ return err;
// check for LOAS sync word
if (get_bits(&gb, 11) != LOAS_SYNC_WORD)
diff --git a/libavcodec/bink.c b/libavcodec/bink.c
index 9c04d05e20..d90f79548f 100644
--- a/libavcodec/bink.c
+++ b/libavcodec/bink.c
@@ -685,6 +685,9 @@ static int read_dct_coeffs(GetBitContext *gb, int32_t block[64], const uint8_t *
}
}
+ if (quant_idx >= 16)
+ return AVERROR_INVALIDDATA;
+
quant = quant_matrices[quant_idx];
block[0] = (block[0] * quant[0]) >> 11;
diff --git a/libavcodec/dxa.c b/libavcodec/dxa.c
index 3f489aeab9..3fda64cfef 100644
--- a/libavcodec/dxa.c
+++ b/libavcodec/dxa.c
@@ -255,6 +255,12 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPac
case 5:
c->pic.key_frame = !(compr & 1);
c->pic.pict_type = (compr & 1) ? AV_PICTURE_TYPE_P : AV_PICTURE_TYPE_I;
+
+ if (!tmpptr && !c->pic.key_frame) {
+ av_log(avctx, AV_LOG_ERROR, "Missing reference frame.\n");
+ return AVERROR_INVALIDDATA;
+ }
+
for(j = 0; j < avctx->height; j++){
if((compr & 1) && tmpptr){
for(i = 0; i < avctx->width; i++)
diff --git a/libavcodec/h261dec.c b/libavcodec/h261dec.c
index 7cca7bb82e..2637aae236 100644
--- a/libavcodec/h261dec.c
+++ b/libavcodec/h261dec.c
@@ -291,9 +291,11 @@ static int h261_decode_mb(H261Context *h){
// Read mtype
h->mtype = get_vlc2(&s->gb, h261_mtype_vlc.table, H261_MTYPE_VLC_BITS, 2);
if (h->mtype < 0) {
- av_log(s->avctx, AV_LOG_ERROR, "illegal mtype %d\n", h->mtype);
+ av_log(s->avctx, AV_LOG_ERROR, "Invalid mtype index %d\n",
+ h->mtype);
return SLICE_ERROR;
}
+ av_assert0(h->mtype < FF_ARRAY_ELEMS(ff_h261_mtype_map));
h->mtype = ff_h261_mtype_map[h->mtype];
// Read mquant
diff --git a/libavcodec/nuv.c b/libavcodec/nuv.c
index ff11d007c4..bb46f0174c 100644
--- a/libavcodec/nuv.c
+++ b/libavcodec/nuv.c
@@ -88,7 +88,7 @@ static int get_quant(AVCodecContext *avctx, NuvContext *c, const uint8_t *buf,
int i;
if (size < 2 * 64 * 4) {
av_log(avctx, AV_LOG_ERROR, "insufficient rtjpeg quant data\n");
- return -1;
+ return AVERROR_INVALIDDATA;
}
for (i = 0; i < 64; i++, buf += 4)
c->lq[i] = AV_RL32(buf);
@@ -114,6 +114,8 @@ static int codec_reinit(AVCodecContext *avctx, int width, int height,
int quality)
{
NuvContext *c = avctx->priv_data;
+ int ret;
+
width = FFALIGN(width, 2);
height = FFALIGN(height, 2);
if (quality >= 0)
@@ -121,9 +123,10 @@ static int codec_reinit(AVCodecContext *avctx, int width, int height,
if (width != c->width || height != c->height) {
// also reserve space for a possible additional header
int buf_size = 24 + height * width * 3 / 2 + AV_LZO_OUTPUT_PADDING;
- if (av_image_check_size(height, width, 0, avctx) < 0 ||
- buf_size > INT_MAX/8)
+ if (buf_size > INT_MAX/8)
return -1;
+ if ((ret = av_image_check_size(height, width, 0, avctx)) < 0)
+ return ret;
avctx->width = c->width = width;
avctx->height = c->height = height;
av_fast_malloc(&c->decomp_buf, &c->decomp_size,
@@ -165,7 +168,7 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
if (buf_size < 12) {
av_log(avctx, AV_LOG_ERROR, "coded frame too small\n");
- return -1;
+ return AVERROR_INVALIDDATA;
}
// codec data (rtjpeg quant tables)
@@ -184,7 +187,7 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
if (buf_size < 12 || buf[0] != 'V') {
av_log(avctx, AV_LOG_ERROR, "not a nuv video frame\n");
- return -1;
+ return AVERROR_INVALIDDATA;
}
comptype = buf[1];
switch (comptype) {
@@ -204,11 +207,14 @@ retry:
buf = &buf[12];
buf_size -= 12;
if (comptype == NUV_RTJPEG_IN_LZO || comptype == NUV_LZO) {
- int outlen = c->decomp_size - AV_LZO_OUTPUT_PADDING, inlen = buf_size;
- if (av_lzo1x_decode(c->decomp_buf, &outlen, buf, &inlen))
+ int outlen = c->decomp_size - FFMAX(FF_INPUT_BUFFER_PADDING_SIZE, AV_LZO_OUTPUT_PADDING);
+ int inlen = buf_size;
+ if (av_lzo1x_decode(c->decomp_buf, &outlen, buf, &inlen)) {
av_log(avctx, AV_LOG_ERROR, "error during lzo decompression\n");
+ return AVERROR_INVALIDDATA;
+ }
buf = c->decomp_buf;
- buf_size = c->decomp_size - AV_LZO_OUTPUT_PADDING - outlen;
+ buf_size = c->decomp_size - FFMAX(FF_INPUT_BUFFER_PADDING_SIZE, AV_LZO_OUTPUT_PADDING) - outlen;
}
if (c->codec_frameheader) {
int w, h, q;
@@ -227,10 +233,9 @@ retry:
w = AV_RL16(&buf[6]);
h = AV_RL16(&buf[8]);
q = buf[10];
- res = codec_reinit(avctx, w, h, q);
- if (res < 0)
- return res;
- if (res) {
+ if ((result = codec_reinit(avctx, w, h, q)) < 0)
+ return result;
+ if (result) {
buf = avpkt->data;
buf_size = avpkt->size;
size_change = 1;
@@ -248,7 +253,7 @@ retry:
result = avctx->reget_buffer(avctx, &c->pic);
if (result < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
- return -1;
+ return result;
}
c->pic.pict_type = keyframe ? AV_PICTURE_TYPE_I : AV_PICTURE_TYPE_P;
@@ -280,7 +285,7 @@ retry:
break;
default:
av_log(avctx, AV_LOG_ERROR, "unknown compression\n");
- return -1;
+ return AVERROR_INVALIDDATA;
}
*picture = c->pic;
@@ -291,6 +296,8 @@ retry:
static av_cold int decode_init(AVCodecContext *avctx)
{
NuvContext *c = avctx->priv_data;
+ int ret;
+
avctx->pix_fmt = AV_PIX_FMT_YUV420P;
c->pic.data[0] = NULL;
c->decomp_buf = NULL;
@@ -305,8 +312,8 @@ static av_cold int decode_init(AVCodecContext *avctx)
ff_dsputil_init(&c->dsp, avctx);
- if (codec_reinit(avctx, avctx->width, avctx->height, -1) < 0)
- return 1;
+ if ((ret = codec_reinit(avctx, avctx->width, avctx->height, -1)) < 0)
+ return ret;
return 0;
}
diff --git a/libavcodec/rtjpeg.c b/libavcodec/rtjpeg.c
index 7797a655c1..affacf86f5 100644
--- a/libavcodec/rtjpeg.c
+++ b/libavcodec/rtjpeg.c
@@ -108,10 +108,13 @@ int ff_rtjpeg_decode_frame_yuv420(RTJpegContext *c, AVFrame *f,
const uint8_t *buf, int buf_size) {
GetBitContext gb;
int w = c->w / 16, h = c->h / 16;
- int x, y;
+ int x, y, ret;
uint8_t *y1 = f->data[0], *y2 = f->data[0] + 8 * f->linesize[0];
uint8_t *u = f->data[1], *v = f->data[2];
- init_get_bits(&gb, buf, buf_size * 8);
+
+ if ((ret = init_get_bits8(&gb, buf, buf_size)) < 0)
+ return ret;
+
for (y = 0; y < h; y++) {
for (x = 0; x < w; x++) {
#define BLOCK(quant, dst, stride) do { \
diff --git a/libavcodec/vc1dec.c b/libavcodec/vc1dec.c
index fa25161d75..883beaf429 100644
--- a/libavcodec/vc1dec.c
+++ b/libavcodec/vc1dec.c
@@ -395,8 +395,10 @@ static void vc1_mc_1mv(VC1Context *v, int dir)
}
}
- if(!srcY)
+ if (!srcY || !srcU) {
+ av_log(v->s.avctx, AV_LOG_ERROR, "Referenced frame missing.\n");
return;
+ }
src_x = s->mb_x * 16 + (mx >> 2);
src_y = s->mb_y * 16 + (my >> 2);
@@ -573,8 +575,10 @@ static void vc1_mc_4mv_luma(VC1Context *v, int n, int dir)
} else
srcY = s->next_picture.f.data[0];
- if(!srcY)
+ if (!srcY) {
+ av_log(v->s.avctx, AV_LOG_ERROR, "Referenced frame missing.\n");
return;
+ }
if (v->field_mode) {
if (v->cur_field_type != v->ref_field_type[dir])
@@ -865,8 +869,10 @@ static void vc1_mc_4mv_chroma(VC1Context *v, int dir)
srcV = s->next_picture.f.data[2];
}
- if(!srcU)
+ if (!srcU) {
+ av_log(v->s.avctx, AV_LOG_ERROR, "Referenced frame missing.\n");
return;
+ }
srcU += uvsrc_y * s->uvlinesize + uvsrc_x;
srcV += uvsrc_y * s->uvlinesize + uvsrc_x;
@@ -5666,6 +5672,12 @@ static int vc1_decode_frame(AVCodecContext *avctx, void *data,
v->mv_f[1] = tmp[1];
}
mb_height = s->mb_height >> v->field_mode;
+
+ if (!mb_height) {
+ av_log(v->s.avctx, AV_LOG_ERROR, "Invalid mb_height.\n");
+ goto err;
+ }
+
for (i = 0; i <= n_slices; i++) {
if (i > 0 && slices[i - 1].mby_start >= mb_height) {
if (v->field_mode <= 0) {
diff --git a/libavformat/oggparsevorbis.c b/libavformat/oggparsevorbis.c
index 16bcaefd4d..371c798a50 100644
--- a/libavformat/oggparsevorbis.c
+++ b/libavformat/oggparsevorbis.c
@@ -212,15 +212,15 @@ vorbis_header (AVFormatContext * s, int idx)
struct oggvorbis_private *priv;
int pkt_type = os->buf[os->pstart];
- if (!(pkt_type & 1))
- return os->private ? 0 : -1;
-
if (!os->private) {
os->private = av_mallocz(sizeof(struct oggvorbis_private));
if (!os->private)
return -1;
}
+ if (!(pkt_type & 1))
+ return 0;
+
if (os->psize < 1 || pkt_type > 5)
return -1;
diff --git a/libavformat/omadec.c b/libavformat/omadec.c
index 6d69195426..334d9ee6ff 100644
--- a/libavformat/omadec.c
+++ b/libavformat/omadec.c
@@ -112,13 +112,18 @@ static int kset(AVFormatContext *s, const uint8_t *r_val, const uint8_t *n_val,
return 0;
}
-static int rprobe(AVFormatContext *s, uint8_t *enc_header, const uint8_t *r_val)
+#define OMA_RPROBE_M_VAL 48 + 1
+
+static int rprobe(AVFormatContext *s, uint8_t *enc_header, unsigned size,
+ const uint8_t *r_val)
{
OMAContext *oc = s->priv_data;
unsigned int pos;
struct AVDES av_des;
- if (!enc_header || !r_val)
+ if (!enc_header || !r_val ||
+ size < OMA_ENC_HEADER_SIZE + oc->k_size + oc->e_size + oc->i_size ||
+ size < OMA_RPROBE_M_VAL)
return -1;
/* m_val */
@@ -139,35 +144,41 @@ static int rprobe(AVFormatContext *s, uint8_t *enc_header, const uint8_t *r_val)
return memcmp(&enc_header[pos], oc->sm_val, 8) ? -1 : 0;
}
-static int nprobe(AVFormatContext *s, uint8_t *enc_header, int size, const uint8_t *n_val)
+static int nprobe(AVFormatContext *s, uint8_t *enc_header, unsigned size,
+ const uint8_t *n_val)
{
OMAContext *oc = s->priv_data;
- uint32_t pos, taglen, datalen;
+ uint64_t pos;
+ uint32_t taglen, datalen;
struct AVDES av_des;
- if (!enc_header || !n_val)
+ if (!enc_header || !n_val ||
+ size < OMA_ENC_HEADER_SIZE + oc->k_size + 4)
return -1;
pos = OMA_ENC_HEADER_SIZE + oc->k_size;
if (!memcmp(&enc_header[pos], "EKB ", 4))
pos += 32;
+ if (size < pos + 44)
+ return -1;
+
if (AV_RB32(&enc_header[pos]) != oc->rid)
av_log(s, AV_LOG_DEBUG, "Mismatching RID\n");
taglen = AV_RB32(&enc_header[pos+32]);
datalen = AV_RB32(&enc_header[pos+36]) >> 4;
- if(pos + (uint64_t)taglen + (((uint64_t)datalen)<<4) + 44 > size)
- return -1;
-
pos += 44 + taglen;
+ if (pos + (((uint64_t)datalen) << 4) > size)
+ return -1;
+
av_des_init(&av_des, n_val, 192, 1);
while (datalen-- > 0) {
av_des_crypt(&av_des, oc->r_val, &enc_header[pos], 2, NULL, 1);
kset(s, oc->r_val, NULL, 16);
- if (!rprobe(s, enc_header, oc->r_val))
+ if (!rprobe(s, enc_header, size, oc->r_val))
return 0;
pos += 16;
}
@@ -236,7 +247,7 @@ static int decrypt_init(AVFormatContext *s, ID3v2ExtraMeta *em, uint8_t *header)
kset(s, s->key, s->key, s->keylen);
}
if (!memcmp(oc->r_val, (const uint8_t[8]){0}, 8) ||
- rprobe(s, gdata, oc->r_val) < 0 &&
+ rprobe(s, gdata, geob->datasize, oc->r_val) < 0 &&
nprobe(s, gdata, geob->datasize, oc->n_val) < 0) {
int i;
for (i = 0; i < FF_ARRAY_ELEMS(leaf_table); i += 2) {
@@ -244,7 +255,8 @@ static int decrypt_init(AVFormatContext *s, ID3v2ExtraMeta *em, uint8_t *header)
AV_WL64(buf, leaf_table[i]);
AV_WL64(&buf[8], leaf_table[i+1]);
kset(s, buf, buf, 16);
- if (!rprobe(s, gdata, oc->r_val) || !nprobe(s, gdata, geob->datasize, oc->n_val))
+ if (!rprobe(s, gdata, geob->datasize, oc->r_val) ||
+ !nprobe(s, gdata, geob->datasize, oc->n_val))
break;
}
if (i >= FF_ARRAY_ELEMS(leaf_table)) {
@@ -386,6 +398,9 @@ static int oma_read_packet(AVFormatContext *s, AVPacket *pkt)
int packet_size = s->streams[0]->codec->block_align;
int ret = av_get_packet(s->pb, pkt, packet_size);
+ if (ret < packet_size)
+ pkt->flags |= AV_PKT_FLAG_CORRUPT;
+
if (ret < 0)
return ret;
if (!ret)
@@ -394,8 +409,13 @@ static int oma_read_packet(AVFormatContext *s, AVPacket *pkt)
pkt->stream_index = 0;
if (oc->encrypted) {
- /* previous unencrypted block saved in IV for the next packet (CBC mode) */
- av_des_crypt(&oc->av_des, pkt->data, pkt->data, (ret >> 3), oc->iv, 1);
+ /* previous unencrypted block saved in IV for
+ * the next packet (CBC mode) */
+ if (ret == packet_size)
+ av_des_crypt(&oc->av_des, pkt->data, pkt->data,
+ (packet_size >> 3), oc->iv, 1);
+ else
+ memset(oc->iv, 0, 8);
}
return ret;
diff --git a/libavformat/rtmppkt.c b/libavformat/rtmppkt.c
index 3bd28eb614..d1c38192c7 100644
--- a/libavformat/rtmppkt.c
+++ b/libavformat/rtmppkt.c
@@ -145,25 +145,25 @@ int ff_rtmp_packet_read_internal(URLContext *h, RTMPPacket *p, int chunk_size,
{
uint8_t t, buf[16];
- int channel_id, timestamp, data_size, offset = 0;
+ int channel_id, timestamp, size, offset = 0;
uint32_t extra = 0;
enum RTMPPacketType type;
- int size = 0;
+ int written = 0;
int ret;
- size++;
+ written++;
channel_id = hdr & 0x3F;
if (channel_id < 2) { //special case for channel number >= 64
buf[1] = 0;
if (ffurl_read_complete(h, buf, channel_id + 1) != channel_id + 1)
return AVERROR(EIO);
- size += channel_id + 1;
+ written += channel_id + 1;
channel_id = AV_RL16(buf) + 64;
}
- data_size = prev_pkt[channel_id].data_size;
- type = prev_pkt[channel_id].type;
- extra = prev_pkt[channel_id].extra;
+ size = prev_pkt[channel_id].size;
+ type = prev_pkt[channel_id].type;
+ extra = prev_pkt[channel_id].extra;
hdr >>= 6;
if (hdr == RTMP_PS_ONEBYTE) {
@@ -171,21 +171,21 @@ int ff_rtmp_packet_read_internal(URLContext *h, RTMPPacket *p, int chunk_size,
} else {
if (ffurl_read_complete(h, buf, 3) != 3)
return AVERROR(EIO);
- size += 3;
+ written += 3;
timestamp = AV_RB24(buf);
if (hdr != RTMP_PS_FOURBYTES) {
if (ffurl_read_complete(h, buf, 3) != 3)
return AVERROR(EIO);
- size += 3;
- data_size = AV_RB24(buf);
+ written += 3;
+ size = AV_RB24(buf);
if (ffurl_read_complete(h, buf, 1) != 1)
return AVERROR(EIO);
- size++;
+ written++;
type = buf[0];
if (hdr == RTMP_PS_TWELVEBYTES) {
if (ffurl_read_complete(h, buf, 4) != 4)
return AVERROR(EIO);
- size += 4;
+ written += 4;
extra = AV_RL32(buf);
}
}
@@ -199,36 +199,36 @@ int ff_rtmp_packet_read_internal(URLContext *h, RTMPPacket *p, int chunk_size,
timestamp += prev_pkt[channel_id].timestamp;
if ((ret = ff_rtmp_packet_create(p, channel_id, type, timestamp,
- data_size)) < 0)
+ size)) < 0)
return ret;
p->extra = extra;
// save history
prev_pkt[channel_id].channel_id = channel_id;
prev_pkt[channel_id].type = type;
- prev_pkt[channel_id].data_size = data_size;
+ prev_pkt[channel_id].size = size;
prev_pkt[channel_id].ts_delta = timestamp - prev_pkt[channel_id].timestamp;
prev_pkt[channel_id].timestamp = timestamp;
prev_pkt[channel_id].extra = extra;
- while (data_size > 0) {
- int toread = FFMIN(data_size, chunk_size);
+ while (size > 0) {
+ int toread = FFMIN(size, chunk_size);
if (ffurl_read_complete(h, p->data + offset, toread) != toread) {
ff_rtmp_packet_destroy(p);
return AVERROR(EIO);
}
- data_size -= chunk_size;
- offset += chunk_size;
- size += chunk_size;
- if (data_size > 0) {
+ size -= chunk_size;
+ offset += chunk_size;
+ written += chunk_size;
+ if (size > 0) {
if ((ret = ffurl_read_complete(h, &t, 1)) < 0) { // marker
ff_rtmp_packet_destroy(p);
return ret;
}
- size++;
+ written++;
if (t != (0xC0 + channel_id))
return -1;
}
}
- return size;
+ return written;
}
int ff_rtmp_packet_write(URLContext *h, RTMPPacket *pkt,
@@ -237,7 +237,7 @@ int ff_rtmp_packet_write(URLContext *h, RTMPPacket *pkt,
uint8_t pkt_hdr[16], *p = pkt_hdr;
int mode = RTMP_PS_TWELVEBYTES;
int off = 0;
- int size = 0;
+ int written = 0;
int ret;
pkt->ts_delta = pkt->timestamp - prev_pkt[pkt->channel_id].timestamp;
@@ -246,7 +246,7 @@ int ff_rtmp_packet_write(URLContext *h, RTMPPacket *pkt,
if (prev_pkt[pkt->channel_id].channel_id &&
pkt->extra == prev_pkt[pkt->channel_id].extra) {
if (pkt->type == prev_pkt[pkt->channel_id].type &&
- pkt->data_size == prev_pkt[pkt->channel_id].data_size) {
+ pkt->size == prev_pkt[pkt->channel_id].size) {
mode = RTMP_PS_FOURBYTES;
if (pkt->ts_delta == prev_pkt[pkt->channel_id].ts_delta)
mode = RTMP_PS_ONEBYTE;
@@ -270,7 +270,7 @@ int ff_rtmp_packet_write(URLContext *h, RTMPPacket *pkt,
timestamp = pkt->ts_delta;
bytestream_put_be24(&p, timestamp >= 0xFFFFFF ? 0xFFFFFF : timestamp);
if (mode != RTMP_PS_FOURBYTES) {
- bytestream_put_be24(&p, pkt->data_size);
+ bytestream_put_be24(&p, pkt->size);
bytestream_put_byte(&p, pkt->type);
if (mode == RTMP_PS_TWELVEBYTES)
bytestream_put_le32(&p, pkt->extra);
@@ -281,7 +281,7 @@ int ff_rtmp_packet_write(URLContext *h, RTMPPacket *pkt,
// save history
prev_pkt[pkt->channel_id].channel_id = pkt->channel_id;
prev_pkt[pkt->channel_id].type = pkt->type;
- prev_pkt[pkt->channel_id].data_size = pkt->data_size;
+ prev_pkt[pkt->channel_id].size = pkt->size;
prev_pkt[pkt->channel_id].timestamp = pkt->timestamp;
if (mode != RTMP_PS_TWELVEBYTES) {
prev_pkt[pkt->channel_id].ts_delta = pkt->ts_delta;
@@ -292,20 +292,20 @@ int ff_rtmp_packet_write(URLContext *h, RTMPPacket *pkt,
if ((ret = ffurl_write(h, pkt_hdr, p - pkt_hdr)) < 0)
return ret;
- size = p - pkt_hdr + pkt->data_size;
- while (off < pkt->data_size) {
- int towrite = FFMIN(chunk_size, pkt->data_size - off);
+ written = p - pkt_hdr + pkt->size;
+ while (off < pkt->size) {
+ int towrite = FFMIN(chunk_size, pkt->size - off);
if ((ret = ffurl_write(h, pkt->data + off, towrite)) < 0)
return ret;
off += towrite;
- if (off < pkt->data_size) {
+ if (off < pkt->size) {
uint8_t marker = 0xC0 | pkt->channel_id;
if ((ret = ffurl_write(h, &marker, 1)) < 0)
return ret;
- size++;
+ written++;
}
}
- return size;
+ return written;
}
int ff_rtmp_packet_create(RTMPPacket *pkt, int channel_id, RTMPPacketType type,
@@ -316,7 +316,7 @@ int ff_rtmp_packet_create(RTMPPacket *pkt, int channel_id, RTMPPacketType type,
if (!pkt->data)
return AVERROR(ENOMEM);
}
- pkt->data_size = size;
+ pkt->size = size;
pkt->channel_id = channel_id;
pkt->type = type;
pkt->timestamp = timestamp;
@@ -331,7 +331,7 @@ void ff_rtmp_packet_destroy(RTMPPacket *pkt)
if (!pkt)
return;
av_freep(&pkt->data);
- pkt->data_size = 0;
+ pkt->size = 0;
}
int ff_amf_tag_size(const uint8_t *data, const uint8_t *data_end)
@@ -502,9 +502,9 @@ static void ff_amf_tag_contents(void *ctx, const uint8_t *data, const uint8_t *d
void ff_rtmp_packet_dump(void *ctx, RTMPPacket *p)
{
av_log(ctx, AV_LOG_DEBUG, "RTMP packet type '%s'(%d) for channel %d, timestamp %d, extra field %d size %d\n",
- rtmp_packet_type(p->type), p->type, p->channel_id, p->timestamp, p->extra, p->data_size);
+ rtmp_packet_type(p->type), p->type, p->channel_id, p->timestamp, p->extra, p->size);
if (p->type == RTMP_PT_INVOKE || p->type == RTMP_PT_NOTIFY) {
- uint8_t *src = p->data, *src_end = p->data + p->data_size;
+ uint8_t *src = p->data, *src_end = p->data + p->size;
while (src < src_end) {
int sz;
ff_amf_tag_contents(ctx, src, src_end);
@@ -519,8 +519,41 @@ void ff_rtmp_packet_dump(void *ctx, RTMPPacket *p)
av_log(ctx, AV_LOG_DEBUG, "Client BW = %d\n", AV_RB32(p->data));
} else if (p->type != RTMP_PT_AUDIO && p->type != RTMP_PT_VIDEO && p->type != RTMP_PT_METADATA) {
int i;
- for (i = 0; i < p->data_size; i++)
+ for (i = 0; i < p->size; i++)
av_log(ctx, AV_LOG_DEBUG, " %02X", p->data[i]);
av_log(ctx, AV_LOG_DEBUG, "\n");
}
}
+
+int ff_amf_match_string(const uint8_t *data, int size, const char *str)
+{
+ int len = strlen(str);
+ int amf_len, type;
+
+ if (size < 1)
+ return 0;
+
+ type = *data++;
+
+ if (type != AMF_DATA_TYPE_LONG_STRING &&
+ type != AMF_DATA_TYPE_STRING)
+ return 0;
+
+ if (type == AMF_DATA_TYPE_LONG_STRING) {
+ if ((size -= 4 + 1) < 0)
+ return 0;
+ amf_len = bytestream_get_be32(&data);
+ } else {
+ if ((size -= 2 + 1) < 0)
+ return 0;
+ amf_len = bytestream_get_be16(&data);
+ }
+
+ if (amf_len > size)
+ return 0;
+
+ if (amf_len != len)
+ return 0;
+
+ return !memcmp(data, str, len);
+}
diff --git a/libavformat/rtmppkt.h b/libavformat/rtmppkt.h
index a9422954f5..421ad37934 100644
--- a/libavformat/rtmppkt.h
+++ b/libavformat/rtmppkt.h
@@ -81,7 +81,7 @@ typedef struct RTMPPacket {
uint32_t ts_delta; ///< timestamp increment to the previous one in milliseconds (latter only for media packets)
uint32_t extra; ///< probably an additional channel ID used during streaming data
uint8_t *data; ///< packet payload
- int data_size; ///< packet payload size
+ int size; ///< packet payload size
} RTMPPacket;
/**
@@ -282,6 +282,13 @@ int ff_amf_read_string(GetByteContext *gbc, uint8_t *str,
*/
int ff_amf_read_null(GetByteContext *gbc);
+/**
+ * Match AMF string with a NULL-terminated string.
+ *
+ * @return 0 if the strings do not match.
+ */
+
+int ff_amf_match_string(const uint8_t *data, int size, const char *str);
/** @} */ // AMF funcs
diff --git a/libavformat/rtmpproto.c b/libavformat/rtmpproto.c
index fa661ee552..26e3e96382 100644
--- a/libavformat/rtmpproto.c
+++ b/libavformat/rtmpproto.c
@@ -186,7 +186,7 @@ static int find_tracked_method(URLContext *s, RTMPPacket *pkt, int offset,
int ret;
int i;
- bytestream2_init(&gbc, pkt->data + offset, pkt->data_size - offset);
+ bytestream2_init(&gbc, pkt->data + offset, pkt->size - offset);
if ((ret = ff_amf_read_number(&gbc, &pkt_id)) < 0)
return ret;
@@ -224,7 +224,7 @@ static int rtmp_send_packet(RTMPContext *rt, RTMPPacket *pkt, int track)
double pkt_id;
int len;
- bytestream2_init(&gbc, pkt->data, pkt->data_size);
+ bytestream2_init(&gbc, pkt->data, pkt->size);
if ((ret = ff_amf_read_string(&gbc, name, sizeof(name), &len)) < 0)
goto fail;
@@ -385,7 +385,7 @@ static int gen_connect(URLContext *s, RTMPContext *rt)
}
}
- pkt.data_size = p - pkt.data;
+ pkt.size = p - pkt.data;
return rtmp_send_packet(rt, &pkt, 1);
}
@@ -406,7 +406,7 @@ static int read_connect(URLContext *s, RTMPContext *rt)
rt->prev_pkt[1])) < 0)
return ret;
cp = pkt.data;
- bytestream2_init(&gbc, cp, pkt.data_size);
+ bytestream2_init(&gbc, cp, pkt.size);
if (ff_amf_read_string(&gbc, command, sizeof(command), &stringlen)) {
av_log(s, AV_LOG_ERROR, "Unable to read command string\n");
ff_rtmp_packet_destroy(&pkt);
@@ -437,7 +437,7 @@ static int read_connect(URLContext *s, RTMPContext *rt)
return ret;
p = pkt.data;
bytestream_put_be32(&p, rt->server_bw);
- pkt.data_size = p - pkt.data;
+ pkt.size = p - pkt.data;
ret = ff_rtmp_packet_write(rt->stream, &pkt, rt->out_chunk_size,
rt->prev_pkt[1]);
ff_rtmp_packet_destroy(&pkt);
@@ -450,7 +450,7 @@ static int read_connect(URLContext *s, RTMPContext *rt)
p = pkt.data;
bytestream_put_be32(&p, rt->server_bw);
bytestream_put_byte(&p, 2); // dynamic
- pkt.data_size = p - pkt.data;
+ pkt.size = p - pkt.data;
ret = ff_rtmp_packet_write(rt->stream, &pkt, rt->out_chunk_size,
rt->prev_pkt[1]);
ff_rtmp_packet_destroy(&pkt);
@@ -512,7 +512,7 @@ static int read_connect(URLContext *s, RTMPContext *rt)
ff_amf_write_number(&p, 0);
ff_amf_write_object_end(&p);
- pkt.data_size = p - pkt.data;
+ pkt.size = p - pkt.data;
ret = ff_rtmp_packet_write(rt->stream, &pkt, rt->out_chunk_size,
rt->prev_pkt[1]);
ff_rtmp_packet_destroy(&pkt);
@@ -527,7 +527,7 @@ static int read_connect(URLContext *s, RTMPContext *rt)
ff_amf_write_number(&p, 0);
ff_amf_write_null(&p);
ff_amf_write_number(&p, 8192);
- pkt.data_size = p - pkt.data;
+ pkt.size = p - pkt.data;
ret = ff_rtmp_packet_write(rt->stream, &pkt, rt->out_chunk_size,
rt->prev_pkt[1]);
ff_rtmp_packet_destroy(&pkt);
@@ -742,9 +742,9 @@ static int gen_pong(URLContext *s, RTMPContext *rt, RTMPPacket *ppkt)
uint8_t *p;
int ret;
- if (ppkt->data_size < 6) {
+ if (ppkt->size < 6) {
av_log(s, AV_LOG_ERROR, "Too short ping packet (%d)\n",
- ppkt->data_size);
+ ppkt->size);
return AVERROR_INVALIDDATA;
}
@@ -1418,10 +1418,10 @@ static int handle_chunk_size(URLContext *s, RTMPPacket *pkt)
RTMPContext *rt = s->priv_data;
int ret;
- if (pkt->data_size < 4) {
+ if (pkt->size < 4) {
av_log(s, AV_LOG_ERROR,
"Too short chunk size change packet (%d)\n",
- pkt->data_size);
+ pkt->size);
return AVERROR_INVALIDDATA;
}
@@ -1451,9 +1451,9 @@ static int handle_ping(URLContext *s, RTMPPacket *pkt)
RTMPContext *rt = s->priv_data;
int t, ret;
- if (pkt->data_size < 2) {
+ if (pkt->size < 2) {
av_log(s, AV_LOG_ERROR, "Too short ping packet (%d)\n",
- pkt->data_size);
+ pkt->size);
return AVERROR_INVALIDDATA;
}
@@ -1477,10 +1477,10 @@ static int handle_client_bw(URLContext *s, RTMPPacket *pkt)
{
RTMPContext *rt = s->priv_data;
- if (pkt->data_size < 4) {
+ if (pkt->size < 4) {
av_log(s, AV_LOG_ERROR,
"Client bandwidth report packet is less than 4 bytes long (%d)\n",
- pkt->data_size);
+ pkt->size);
return AVERROR_INVALIDDATA;
}
@@ -1501,10 +1501,10 @@ static int handle_server_bw(URLContext *s, RTMPPacket *pkt)
{
RTMPContext *rt = s->priv_data;
- if (pkt->data_size < 4) {
+ if (pkt->size < 4) {
av_log(s, AV_LOG_ERROR,
"Too short server bandwidth report packet (%d)\n",
- pkt->data_size);
+ pkt->size);
return AVERROR_INVALIDDATA;
}
@@ -1704,7 +1704,7 @@ static int handle_connect_error(URLContext *s, const char *desc)
static int handle_invoke_error(URLContext *s, RTMPPacket *pkt)
{
RTMPContext *rt = s->priv_data;
- const uint8_t *data_end = pkt->data + pkt->data_size;
+ const uint8_t *data_end = pkt->data + pkt->size;
char *tracked_method = NULL;
int level = AV_LOG_ERROR;
uint8_t tmpstr[256];
@@ -1752,7 +1752,7 @@ static int send_invoke_response(URLContext *s, RTMPPacket *pkt)
GetByteContext gbc;
int ret;
- bytestream2_init(&gbc, p, pkt->data_size);
+ bytestream2_init(&gbc, p, pkt->size);
if (ff_amf_read_string(&gbc, command, sizeof(command),
&stringlen)) {
av_log(s, AV_LOG_ERROR, "Error in PT_INVOKE\n");
@@ -1804,7 +1804,7 @@ static int send_invoke_response(URLContext *s, RTMPPacket *pkt)
return ret;
}
pp = spkt.data;
- bytestream2_init_writer(&pbc, pp, spkt.data_size);
+ bytestream2_init_writer(&pbc, pp, spkt.size);
bytestream2_put_be16(&pbc, 0); // 0 -> Stream Begin
bytestream2_put_be32(&pbc, rt->nb_streamid);
ret = ff_rtmp_packet_write(rt->stream, &spkt, rt->out_chunk_size,
@@ -1863,7 +1863,7 @@ static int send_invoke_response(URLContext *s, RTMPPacket *pkt)
* if a client creates more than 2^32 - 2 streams. */
}
}
- spkt.data_size = pp - spkt.data;
+ spkt.size = pp - spkt.data;
ret = ff_rtmp_packet_write(rt->stream, &spkt, rt->out_chunk_size,
rt->prev_pkt[1]);
ff_rtmp_packet_destroy(&spkt);
@@ -1884,7 +1884,7 @@ static int handle_invoke_result(URLContext *s, RTMPPacket *pkt)
return ret;
}
- if (!memcmp(tracked_method, "connect", 7)) {
+ if (!strcmp(tracked_method, "connect")) {
if (!rt->is_input) {
if ((ret = gen_release_stream(s, rt)) < 0)
goto fail;
@@ -1910,7 +1910,7 @@ static int handle_invoke_result(URLContext *s, RTMPPacket *pkt)
goto fail;
}
}
- } else if (!memcmp(tracked_method, "createStream", 12)) {
+ } else if (!strcmp(tracked_method, "createStream")) {
//extract a number from the result
if (pkt->data[10] || pkt->data[19] != 5 || pkt->data[20]) {
av_log(s, AV_LOG_WARNING, "Unexpected reply on connect()\n");
@@ -1937,7 +1937,7 @@ fail:
static int handle_invoke_status(URLContext *s, RTMPPacket *pkt)
{
RTMPContext *rt = s->priv_data;
- const uint8_t *data_end = pkt->data + pkt->data_size;
+ const uint8_t *data_end = pkt->data + pkt->size;
const uint8_t *ptr = pkt->data + 11;
uint8_t tmpstr[256];
int i, t;
@@ -1972,23 +1972,23 @@ static int handle_invoke(URLContext *s, RTMPPacket *pkt)
int ret = 0;
//TODO: check for the messages sent for wrong state?
- if (!memcmp(pkt->data, "\002\000\006_error", 9)) {
+ if (ff_amf_match_string(pkt->data, pkt->size, "_error")) {
if ((ret = handle_invoke_error(s, pkt)) < 0)
return ret;
- } else if (!memcmp(pkt->data, "\002\000\007_result", 10)) {
+ } else if (ff_amf_match_string(pkt->data, pkt->size, "_result")) {
if ((ret = handle_invoke_result(s, pkt)) < 0)
return ret;
- } else if (!memcmp(pkt->data, "\002\000\010onStatus", 11)) {
+ } else if (ff_amf_match_string(pkt->data, pkt->size, "onStatus")) {
if ((ret = handle_invoke_status(s, pkt)) < 0)
return ret;
- } else if (!memcmp(pkt->data, "\002\000\010onBWDone", 11)) {
+ } else if (ff_amf_match_string(pkt->data, pkt->size, "onBWDone")) {
if ((ret = gen_check_bw(s, rt)) < 0)
return ret;
- } else if (!memcmp(pkt->data, "\002\000\015releaseStream", 16) ||
- !memcmp(pkt->data, "\002\000\011FCPublish", 12) ||
- !memcmp(pkt->data, "\002\000\007publish", 10) ||
- !memcmp(pkt->data, "\002\000\010_checkbw", 11) ||
- !memcmp(pkt->data, "\002\000\014createStream", 15)) {
+ } else if (ff_amf_match_string(pkt->data, pkt->size, "releaseStream") ||
+ ff_amf_match_string(pkt->data, pkt->size, "FCPublish") ||
+ ff_amf_match_string(pkt->data, pkt->size, "publish") ||
+ ff_amf_match_string(pkt->data, pkt->size, "_checkbw") ||
+ ff_amf_match_string(pkt->data, pkt->size, "createStream")) {
if ((ret = send_invoke_response(s, pkt)) < 0)
return ret;
}
@@ -2011,7 +2011,7 @@ static int handle_notify(URLContext *s, RTMPPacket *pkt) {
unsigned datatowritelength;
p = pkt->data;
- bytestream2_init(&gbc, p, pkt->data_size);
+ bytestream2_init(&gbc, p, pkt->size);
if (ff_amf_read_string(&gbc, commandbuffer, sizeof(commandbuffer),
&stringlen))
return AVERROR_INVALIDDATA;
@@ -2125,7 +2125,7 @@ static int get_packet(URLContext *s, int for_header)
int ret;
uint8_t *p;
const uint8_t *next;
- uint32_t data_size;
+ uint32_t size;
uint32_t ts, cts, pts=0;
if (rt->state == STATE_STOPPED)
@@ -2168,24 +2168,25 @@ static int get_packet(URLContext *s, int for_header)
ff_rtmp_packet_destroy(&rpkt);
return 0;
}
- if (!rpkt.data_size || !rt->is_input) {
+ if (!rpkt.size || !rt->is_input) {
ff_rtmp_packet_destroy(&rpkt);
continue;
}
if (rpkt.type == RTMP_PT_VIDEO || rpkt.type == RTMP_PT_AUDIO ||
- (rpkt.type == RTMP_PT_NOTIFY && !memcmp("\002\000\012onMetaData", rpkt.data, 13))) {
+ (rpkt.type == RTMP_PT_NOTIFY &&
+ ff_amf_match_string(rpkt.data, rpkt.size, "onMetaData"))) {
ts = rpkt.timestamp;
// generate packet header and put data into buffer for FLV demuxer
rt->flv_off = 0;
- rt->flv_size = rpkt.data_size + 15;
+ rt->flv_size = rpkt.size + 15;
rt->flv_data = p = av_realloc(rt->flv_data, rt->flv_size);
bytestream_put_byte(&p, rpkt.type);
- bytestream_put_be24(&p, rpkt.data_size);
+ bytestream_put_be24(&p, rpkt.size);
bytestream_put_be24(&p, ts);
bytestream_put_byte(&p, ts >> 24);
bytestream_put_be24(&p, 0);
- bytestream_put_buffer(&p, rpkt.data, rpkt.data_size);
+ bytestream_put_buffer(&p, rpkt.data, rpkt.size);
bytestream_put_be32(&p, 0);
ff_rtmp_packet_destroy(&rpkt);
return 0;
@@ -2200,14 +2201,14 @@ static int get_packet(URLContext *s, int for_header)
} else if (rpkt.type == RTMP_PT_METADATA) {
// we got raw FLV data, make it available for FLV demuxer
rt->flv_off = 0;
- rt->flv_size = rpkt.data_size;
+ rt->flv_size = rpkt.size;
rt->flv_data = av_realloc(rt->flv_data, rt->flv_size);
/* rewrite timestamps */
next = rpkt.data;
ts = rpkt.timestamp;
- while (next - rpkt.data < rpkt.data_size - 11) {
+ while (next - rpkt.data < rpkt.size - 11) {
next++;
- data_size = bytestream_get_be24(&next);
+ size = bytestream_get_be24(&next);
p=next;
cts = bytestream_get_be24(&next);
cts |= bytestream_get_byte(&next) << 24;
@@ -2217,9 +2218,9 @@ static int get_packet(URLContext *s, int for_header)
pts = cts;
bytestream_put_be24(&p, ts);
bytestream_put_byte(&p, ts >> 24);
- next += data_size + 3 + 4;
+ next += size + 3 + 4;
}
- memcpy(rt->flv_data, rpkt.data, rpkt.data_size);
+ memcpy(rt->flv_data, rpkt.data, rpkt.size);
ff_rtmp_packet_destroy(&rpkt);
return 0;
}
@@ -2234,7 +2235,7 @@ static int rtmp_close(URLContext *h)
if (!rt->is_input) {
rt->flv_data = NULL;
- if (rt->out_pkt.data_size)
+ if (rt->out_pkt.size)
ff_rtmp_packet_destroy(&rt->out_pkt);
if (rt->state > STATE_FCPUBLISH)
ret = gen_fcunpublish_stream(h, rt);
diff --git a/libavformat/segafilm.c b/libavformat/segafilm.c
index a33ad858f5..232a93117f 100644
--- a/libavformat/segafilm.c
+++ b/libavformat/segafilm.c
@@ -215,6 +215,8 @@ static int film_read_header(AVFormatContext *s)
film->sample_table[i].sample_offset =
data_offset + AV_RB32(&scratch[0]);
film->sample_table[i].sample_size = AV_RB32(&scratch[4]);
+ if (film->sample_table[i].sample_size > INT_MAX / 4)
+ return AVERROR_INVALIDDATA;
if (AV_RB32(&scratch[8]) == 0xFFFFFFFF) {
film->sample_table[i].stream = film->audio_stream_index;
film->sample_table[i].pts = audio_frame_counter;
diff --git a/tests/ref/fate/nuv-rtjpeg b/tests/ref/fate/nuv-rtjpeg
index 8838fbb646..96ead33cbd 100644
--- a/tests/ref/fate/nuv-rtjpeg
+++ b/tests/ref/fate/nuv-rtjpeg
@@ -7,4 +7,3 @@
0, 9, 9, 1, 460800, 0x4e091ee2
0, 10, 10, 1, 460800, 0x2ea88828
0, 11, 11, 1, 460800, 0x4b7f4df0
-0, 12, 12, 1, 460800, 0xa57f20d0