aboutsummaryrefslogtreecommitdiffstats
path: root/ffplay.c
diff options
context:
space:
mode:
authorMichael Niedermayer <michaelni@gmx.at>2011-12-06 01:37:27 +0100
committerMichael Niedermayer <michaelni@gmx.at>2011-12-06 01:37:27 +0100
commitb404ab9e74d3bca12d5989c366f5cfd746279067 (patch)
treefdbba6fdf7a4694fe7b7ecda6401ea6a2e01f95e /ffplay.c
parenta448a5d1c4620aa58ec138fbffd46d18d42d53e0 (diff)
parent52401b82bd2ed30d4c4353cb084bf4ee679d0c22 (diff)
downloadffmpeg-b404ab9e74d3bca12d5989c366f5cfd746279067.tar.gz
Merge remote-tracking branch 'qatar/master'
* qatar/master: mov: Don't av_malloc(0). avconv: only allocate 1 AVFrame per input stream avconv: fix memleaks due to not freeing the AVFrame for audio h264-fate: remove -strict 1 except where necessary (mr4/5-tandberg). misc Doxygen markup improvements doxygen: eliminate Qt-style doxygen syntax g722: Add a regression test for muxing/demuxing in wav g722: Change bits per sample to 4 g722dec: Signal skipping the lower bits via AVOptions instead of bits_per_coded_sample api-example: update to use avcodec_decode_audio4() avplay: use avcodec_decode_audio4() avplay: use a separate buffer for playing silence avformat: use avcodec_decode_audio4() in avformat_find_stream_info() avconv: use avcodec_decode_audio4() instead of avcodec_decode_audio3() mov: Allow empty stts atom. doc: document preferred Doxygen syntax and make patcheck detect it Conflicts: avconv.c ffplay.c libavcodec/mlpdec.c libavcodec/version.h libavformat/mov.c tests/codec-regression.sh tests/fate/h264.mak Merged-by: Michael Niedermayer <michaelni@gmx.at>
Diffstat (limited to 'ffplay.c')
-rw-r--r--ffplay.c46
1 files changed, 25 insertions, 21 deletions
diff --git a/ffplay.c b/ffplay.c
index c1fe937ec4..976ac06d7b 100644
--- a/ffplay.c
+++ b/ffplay.c
@@ -151,11 +151,10 @@ typedef struct VideoState {
AVStream *audio_st;
PacketQueue audioq;
int audio_hw_buf_size;
- /* samples output by the codec. we reserve more space for avsync
- compensation, resampling and format conversion */
- DECLARE_ALIGNED(16,uint8_t,audio_buf1)[AVCODEC_MAX_AUDIO_FRAME_SIZE * 4];
DECLARE_ALIGNED(16,uint8_t,audio_buf2)[AVCODEC_MAX_AUDIO_FRAME_SIZE * 4];
+ uint8_t silence_buf[SDL_AUDIO_BUFFER_SIZE];
uint8_t *audio_buf;
+ uint8_t *audio_buf1;
unsigned int audio_buf_size; /* in bytes */
int audio_buf_index; /* in bytes */
int audio_write_buf_size;
@@ -174,6 +173,7 @@ typedef struct VideoState {
double audio_current_pts_drift;
int frame_drops_early;
int frame_drops_late;
+ AVFrame *frame;
enum ShowMode {
SHOW_MODE_NONE = -1, SHOW_MODE_VIDEO = 0, SHOW_MODE_WAVES, SHOW_MODE_RDFT, SHOW_MODE_NB
@@ -1998,8 +1998,8 @@ static int synchronize_audio(VideoState *is, short *samples,
max_size = ((nb_samples * (100 + SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
if (wanted_size < min_size)
wanted_size = min_size;
- else if (wanted_size > FFMIN3(max_size, sizeof(is->audio_buf1), sizeof(is->audio_buf2)))
- wanted_size = FFMIN3(max_size, sizeof(is->audio_buf1), sizeof(is->audio_buf2));
+ else if (wanted_size > FFMIN3(max_size, samples_size, sizeof(is->audio_buf2)))
+ wanted_size = FFMIN3(max_size, samples_size, sizeof(is->audio_buf2));
/* add or remove samples to correction the synchro */
if (wanted_size < samples_size) {
@@ -2043,7 +2043,7 @@ static int audio_decode_frame(VideoState *is, double *pts_ptr)
AVPacket *pkt = &is->audio_pkt;
AVCodecContext *dec= is->audio_st->codec;
int len1, len2, data_size, resampled_data_size;
- int64_t dec_channel_layout;
+ int64_t dec_channel_layout, got_frame;
double pts;
int new_packet = 0;
int flush_complete = 0;
@@ -2051,13 +2051,16 @@ static int audio_decode_frame(VideoState *is, double *pts_ptr)
for(;;) {
/* NOTE: the audio packet can contain several frames */
while (pkt_temp->size > 0 || (!pkt_temp->data && new_packet)) {
+ if (!is->frame) {
+ if (!(is->frame = avcodec_alloc_frame()))
+ return AVERROR(ENOMEM);
+ } else
+ avcodec_get_frame_defaults(is->frame);
+
if (flush_complete)
break;
new_packet = 0;
- data_size = sizeof(is->audio_buf1);
- len1 = avcodec_decode_audio3(dec,
- (int16_t *)is->audio_buf1, &data_size,
- pkt_temp);
+ len1 = avcodec_decode_audio4(dec, is->frame, &got_frame, pkt_temp);
if (len1 < 0) {
/* if error, we skip the frame */
pkt_temp->size = 0;
@@ -2067,12 +2070,15 @@ static int audio_decode_frame(VideoState *is, double *pts_ptr)
pkt_temp->data += len1;
pkt_temp->size -= len1;
- if (data_size <= 0) {
+ if (!got_frame) {
/* stop sending empty packets if the decoder is finished */
if (!pkt_temp->data && dec->codec->capabilities & CODEC_CAP_DELAY)
flush_complete = 1;
continue;
}
+ data_size = av_samples_get_buffer_size(NULL, dec->channels,
+ is->frame->nb_samples,
+ dec->sample_fmt, 1);
dec_channel_layout = (dec->channel_layout && dec->channels == av_get_channel_layout_nb_channels(dec->channel_layout)) ? dec->channel_layout : av_get_default_channel_layout(dec->channels);
@@ -2101,7 +2107,7 @@ static int audio_decode_frame(VideoState *is, double *pts_ptr)
resampled_data_size = data_size;
if (is->swr_ctx) {
- const uint8_t *in[] = {is->audio_buf1};
+ const uint8_t *in[] = { is->frame->data[0] };
uint8_t *out[] = {is->audio_buf2};
len2 = swr_convert(is->swr_ctx, out, sizeof(is->audio_buf2) / is->audio_tgt_channels / av_get_bytes_per_sample(is->audio_tgt_fmt),
in, data_size / dec->channels / av_get_bytes_per_sample(dec->sample_fmt));
@@ -2116,7 +2122,7 @@ static int audio_decode_frame(VideoState *is, double *pts_ptr)
is->audio_buf = is->audio_buf2;
resampled_data_size = len2 * is->audio_tgt_channels * av_get_bytes_per_sample(is->audio_tgt_fmt);
} else {
- is->audio_buf= is->audio_buf1;
+ is->audio_buf = is->frame->data[0];
}
/* if no pts, then compute it */
@@ -2150,11 +2156,7 @@ static int audio_decode_frame(VideoState *is, double *pts_ptr)
if (pkt->data == flush_pkt.data)
avcodec_flush_buffers(dec);
- pkt_temp->data = pkt->data;
- pkt_temp->size = pkt->size;
- pkt_temp->flags = pkt->flags;
- pkt_temp->side_data = pkt->side_data;
- pkt_temp->side_data_elems = pkt->side_data_elems;
+ *pkt_temp = *pkt;
/* if update the audio clock with the pts */
if (pkt->pts != AV_NOPTS_VALUE) {
@@ -2178,9 +2180,8 @@ static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
audio_size = audio_decode_frame(is, &pts);
if (audio_size < 0) {
/* if error, just output silence */
- is->audio_buf = is->audio_buf1;
- is->audio_buf_size = 256 * is->audio_tgt_channels * av_get_bytes_per_sample(is->audio_tgt_fmt);
- memset(is->audio_buf, 0, is->audio_buf_size);
+ is->audio_buf = is->silence_buf;
+ is->audio_buf_size = sizeof(is->silence_buf);
} else {
if (is->show_mode != SHOW_MODE_VIDEO)
update_sample_display(is, (int16_t *)is->audio_buf, audio_size);
@@ -2356,6 +2357,9 @@ static void stream_component_close(VideoState *is, int stream_index)
if (is->swr_ctx)
swr_free(&is->swr_ctx);
av_free_packet(&is->audio_pkt);
+ av_freep(&is->audio_buf1);
+ is->audio_buf = NULL;
+ av_freep(&is->frame);
if (is->rdft) {
av_rdft_end(is->rdft);