aboutsummaryrefslogtreecommitdiffstats
path: root/avconv.c
diff options
context:
space:
mode:
authorMichael Niedermayer <michaelni@gmx.at>2011-11-23 04:02:17 +0100
committerMichael Niedermayer <michaelni@gmx.at>2011-11-23 04:02:17 +0100
commit0b9a69f244e399565d67100a6862886201a594a4 (patch)
tree22d82b11955930051b34da252f95b992095e91fd /avconv.c
parenta8e6d4d403c9174b4f57475b80bb5f80e1c57a1f (diff)
parent963f6855356fa527a27b08b55e026f683a12cebc (diff)
downloadffmpeg-0b9a69f244e399565d67100a6862886201a594a4.tar.gz
Merge remote-tracking branch 'qatar/master'
* qatar/master: (22 commits) aacdec: Fix PS in ADTS. avconv: Consistently use PIX_FMT_NONE. dsputil: use cpuflags in x86 emu_edge_core dsputil: use movups instead of movdqu in ff_emu_edge_core_sse() wma: initialize prev_block_len_bits, next_block_len_bits, and block_len_bits. mov: Remove some redundant and obsolete comments. Add libavutil/mathematics.h #includes for INFINITY doxy: structure libavformat groups doxy: introduce an empty structure in libavcodec doxy: provide a start page and document libavutil doxy: cleanup pixfmt.h regtest: split video encode/decode tests into individual targets ARM: add explicit .arch and .fpu directives to asm.S pthread: do not touch has_b_frames avconv: cleanup the transcoding loop in output_packet(). avconv: split subtitle transcoding out of output_packet(). avconv: split video transcoding out of output_packet(). avconv: split audio transcoding out of output_packet(). avconv: reindent. avconv: move streamcopy-only code out of decoding loop. ... Conflicts: avconv.c libavcodec/aaccoder.c libavcodec/pthread.c libavcodec/version.h libavutil/audioconvert.h libavutil/avutil.h libavutil/mem.h tests/ref/vsynth1/dv tests/ref/vsynth1/mpeg2thread tests/ref/vsynth2/dv tests/ref/vsynth2/mpeg2thread Merged-by: Michael Niedermayer <michaelni@gmx.at>
Diffstat (limited to 'avconv.c')
-rw-r--r--avconv.c696
1 files changed, 366 insertions, 330 deletions
diff --git a/avconv.c b/avconv.c
index 81f63eca88..ec2c573d7b 100644
--- a/avconv.c
+++ b/avconv.c
@@ -722,11 +722,11 @@ static void choose_pixel_fmt(AVStream *st, AVCodec *codec)
p= (const enum PixelFormat[]){PIX_FMT_YUVJ420P, PIX_FMT_YUVJ422P, PIX_FMT_YUVJ444P, PIX_FMT_YUV420P, PIX_FMT_YUV422P, PIX_FMT_YUV444P, PIX_FMT_BGRA, PIX_FMT_NONE};
}
}
- for(; *p!=-1; p++){
+ for (; *p != PIX_FMT_NONE; p++) {
if(*p == st->codec->pix_fmt)
break;
}
- if (*p == -1) {
+ if (*p == PIX_FMT_NONE) {
if(st->codec->pix_fmt != PIX_FMT_NONE)
av_log(NULL, AV_LOG_WARNING,
"Incompatible pixel format '%s' for codec '%s', auto-selecting format '%s'\n",
@@ -1596,26 +1596,338 @@ static void flush_encoders(OutputStream *ost_table, int nb_ostreams)
}
}
+/*
+ * Check whether a packet from ist should be written into ost at this time
+ */
+static int check_output_constraints(InputStream *ist, OutputStream *ost)
+{
+ OutputFile *of = &output_files[ost->file_index];
+ int ist_index = ist - input_streams;
+
+ if (ost->source_index != ist_index)
+ return 0;
+
+ if (of->start_time && ist->pts < of->start_time)
+ return 0;
+
+ if (of->recording_time != INT64_MAX &&
+ av_compare_ts(ist->pts, AV_TIME_BASE_Q, of->recording_time + of->start_time,
+ (AVRational){1, 1000000}) >= 0) {
+ ost->is_past_recording_time = 1;
+ return 0;
+ }
+
+ return 1;
+}
+
+static void do_streamcopy(InputStream *ist, OutputStream *ost, const AVPacket *pkt)
+{
+ OutputFile *of = &output_files[ost->file_index];
+ int64_t ost_tb_start_time = av_rescale_q(of->start_time, AV_TIME_BASE_Q, ost->st->time_base);
+ AVPicture pict;
+ AVPacket opkt;
+
+ av_init_packet(&opkt);
+
+ if ((!ost->frame_number && !(pkt->flags & AV_PKT_FLAG_KEY)) &&
+ !ost->copy_initial_nonkeyframes)
+ return;
+
+ /* force the input stream PTS */
+ if (ost->st->codec->codec_type == AVMEDIA_TYPE_AUDIO)
+ audio_size += pkt->size;
+ else if (ost->st->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
+ video_size += pkt->size;
+ ost->sync_opts++;
+ }
+
+ opkt.stream_index = ost->index;
+ if (pkt->pts != AV_NOPTS_VALUE)
+ opkt.pts = av_rescale_q(pkt->pts, ist->st->time_base, ost->st->time_base) - ost_tb_start_time;
+ else
+ opkt.pts = AV_NOPTS_VALUE;
+
+ if (pkt->dts == AV_NOPTS_VALUE)
+ opkt.dts = av_rescale_q(ist->pts, AV_TIME_BASE_Q, ost->st->time_base);
+ else
+ opkt.dts = av_rescale_q(pkt->dts, ist->st->time_base, ost->st->time_base);
+ opkt.dts -= ost_tb_start_time;
+
+ opkt.duration = av_rescale_q(pkt->duration, ist->st->time_base, ost->st->time_base);
+ opkt.flags = pkt->flags;
+
+ //FIXME remove the following 2 lines they shall be replaced by the bitstream filters
+ if( ost->st->codec->codec_id != CODEC_ID_H264
+ && ost->st->codec->codec_id != CODEC_ID_MPEG1VIDEO
+ && ost->st->codec->codec_id != CODEC_ID_MPEG2VIDEO
+ ) {
+ if (av_parser_change(ist->st->parser, ost->st->codec, &opkt.data, &opkt.size, pkt->data, pkt->size, pkt->flags & AV_PKT_FLAG_KEY))
+ opkt.destruct = av_destruct_packet;
+ } else {
+ opkt.data = pkt->data;
+ opkt.size = pkt->size;
+ }
+ if (of->ctx->oformat->flags & AVFMT_RAWPICTURE) {
+ /* store AVPicture in AVPacket, as expected by the output format */
+ avpicture_fill(&pict, opkt.data, ost->st->codec->pix_fmt, ost->st->codec->width, ost->st->codec->height);
+ opkt.data = (uint8_t *)&pict;
+ opkt.size = sizeof(AVPicture);
+ opkt.flags |= AV_PKT_FLAG_KEY;
+ }
+
+ write_frame(of->ctx, &opkt, ost->st->codec, ost->bitstream_filters);
+ ost->st->codec->frame_number++;
+ ost->frame_number++;
+ av_free_packet(&opkt);
+}
+
+static void rate_emu_sleep(InputStream *ist)
+{
+ if (input_files[ist->file_index].rate_emu) {
+ int64_t pts = av_rescale(ist->pts, 1000000, AV_TIME_BASE);
+ int64_t now = av_gettime() - ist->start;
+ if (pts > now)
+ usleep(pts - now);
+ }
+}
+
+static int transcode_audio(InputStream *ist, AVPacket *pkt, int *got_output)
+{
+ static unsigned int samples_size = 0;
+ int bps = av_get_bytes_per_sample(ist->st->codec->sample_fmt);
+ uint8_t *decoded_data_buf = NULL;
+ int decoded_data_size = 0;
+ int i, ret;
+
+ if (pkt && samples_size < FFMAX(pkt->size * bps, AVCODEC_MAX_AUDIO_FRAME_SIZE)) {
+ av_free(samples);
+ samples_size = FFMAX(pkt->size * bps, AVCODEC_MAX_AUDIO_FRAME_SIZE);
+ samples = av_malloc(samples_size);
+ }
+ decoded_data_size = samples_size;
+
+ ret = avcodec_decode_audio3(ist->st->codec, samples, &decoded_data_size,
+ pkt);
+ if (ret < 0)
+ return ret;
+ pkt->data += ret;
+ pkt->size -= ret;
+ *got_output = decoded_data_size > 0;
+
+ /* Some bug in mpeg audio decoder gives */
+ /* decoded_data_size < 0, it seems they are overflows */
+ if (!*got_output) {
+ /* no audio frame */
+ return 0;
+ }
+
+ decoded_data_buf = (uint8_t *)samples;
+ ist->next_pts += ((int64_t)AV_TIME_BASE/bps * decoded_data_size) /
+ (ist->st->codec->sample_rate * ist->st->codec->channels);
+
+ // preprocess audio (volume)
+ if (audio_volume != 256) {
+ switch (ist->st->codec->sample_fmt) {
+ case AV_SAMPLE_FMT_U8:
+ {
+ uint8_t *volp = samples;
+ for (i = 0; i < (decoded_data_size / sizeof(*volp)); i++) {
+ int v = (((*volp - 128) * audio_volume + 128) >> 8) + 128;
+ *volp++ = av_clip_uint8(v);
+ }
+ break;
+ }
+ case AV_SAMPLE_FMT_S16:
+ {
+ int16_t *volp = samples;
+ for (i = 0; i < (decoded_data_size / sizeof(*volp)); i++) {
+ int v = ((*volp) * audio_volume + 128) >> 8;
+ *volp++ = av_clip_int16(v);
+ }
+ break;
+ }
+ case AV_SAMPLE_FMT_S32:
+ {
+ int32_t *volp = samples;
+ for (i = 0; i < (decoded_data_size / sizeof(*volp)); i++) {
+ int64_t v = (((int64_t)*volp * audio_volume + 128) >> 8);
+ *volp++ = av_clipl_int32(v);
+ }
+ break;
+ }
+ case AV_SAMPLE_FMT_FLT:
+ {
+ float *volp = samples;
+ float scale = audio_volume / 256.f;
+ for (i = 0; i < (decoded_data_size / sizeof(*volp)); i++) {
+ *volp++ *= scale;
+ }
+ break;
+ }
+ case AV_SAMPLE_FMT_DBL:
+ {
+ double *volp = samples;
+ double scale = audio_volume / 256.;
+ for (i = 0; i < (decoded_data_size / sizeof(*volp)); i++) {
+ *volp++ *= scale;
+ }
+ break;
+ }
+ default:
+ av_log(NULL, AV_LOG_FATAL,
+ "Audio volume adjustment on sample format %s is not supported.\n",
+ av_get_sample_fmt_name(ist->st->codec->sample_fmt));
+ exit_program(1);
+ }
+ }
+
+ rate_emu_sleep(ist);
+
+ for (i = 0; i < nb_output_streams; i++) {
+ OutputStream *ost = &output_streams[i];
+
+ if (!check_output_constraints(ist, ost) || !ost->encoding_needed)
+ continue;
+ do_audio_out(output_files[ost->file_index].ctx, ost, ist,
+ decoded_data_buf, decoded_data_size);
+ }
+ return 0;
+}
+
+static int transcode_video(InputStream *ist, AVPacket *pkt, int *got_output, int64_t *pkt_pts)
+{
+ AVFrame *decoded_frame, *filtered_frame = NULL;
+ void *buffer_to_free = NULL;
+ int i, ret = 0;
+ float quality;
+#if CONFIG_AVFILTER
+ int frame_available = 1;
+#endif
+
+ if (!(decoded_frame = avcodec_alloc_frame()))
+ return AVERROR(ENOMEM);
+ pkt->pts = *pkt_pts;
+ pkt->dts = ist->pts;
+ *pkt_pts = AV_NOPTS_VALUE;
+
+ ret = avcodec_decode_video2(ist->st->codec,
+ decoded_frame, got_output, pkt);
+ if (ret < 0)
+ goto fail;
+
+ quality = same_quant ? decoded_frame->quality : 0;
+ if (!*got_output) {
+ /* no picture yet */
+ av_freep(&decoded_frame);
+ return 0;
+ }
+ ist->next_pts = ist->pts = decoded_frame->best_effort_timestamp;
+ if (ist->st->codec->time_base.num != 0) {
+ int ticks = ist->st->parser ? ist->st->parser->repeat_pict + 1 :
+ ist->st->codec->ticks_per_frame;
+ ist->next_pts += ((int64_t)AV_TIME_BASE *
+ ist->st->codec->time_base.num * ticks) /
+ ist->st->codec->time_base.den;
+ }
+ pkt->size = 0;
+ pre_process_video_frame(ist, (AVPicture *)decoded_frame, &buffer_to_free);
+
+ rate_emu_sleep(ist);
+
+ for (i = 0; i < nb_output_streams; i++) {
+ OutputStream *ost = &output_streams[i];
+ int frame_size;
+
+ if (!check_output_constraints(ist, ost) || !ost->encoding_needed)
+ continue;
+
+#if CONFIG_AVFILTER
+ if (ost->input_video_filter) {
+ if (!decoded_frame->sample_aspect_ratio.num)
+ decoded_frame->sample_aspect_ratio = ist->st->sample_aspect_ratio;
+ decoded_frame->pts = ist->pts;
+
+ av_vsrc_buffer_add_frame(ost->input_video_filter, decoded_frame, AV_VSRC_BUF_FLAG_OVERWRITE);
+ if (!(filtered_frame = avcodec_alloc_frame())) {
+ ret = AVERROR(ENOMEM);
+ goto fail;
+ }
+ frame_available = avfilter_poll_frame(ost->output_video_filter->inputs[0]);
+ }
+ while (frame_available) {
+ if (ost->output_video_filter) {
+ AVRational ist_pts_tb = ost->output_video_filter->inputs[0]->time_base;
+ if (av_buffersink_get_buffer_ref(ost->output_video_filter, &ost->picref, 0) < 0)
+ goto cont;
+ if (ost->picref) {
+ avfilter_fill_frame_from_video_buffer_ref(filtered_frame, ost->picref);
+ ist->pts = av_rescale_q(ost->picref->pts, ist_pts_tb, AV_TIME_BASE_Q);
+ }
+ }
+ if (ost->picref->video && !ost->frame_aspect_ratio)
+ ost->st->codec->sample_aspect_ratio = ost->picref->video->sample_aspect_ratio;
+#else
+ filtered_frame = decoded_frame;
+#endif
+
+ do_video_out(output_files[ost->file_index].ctx, ost, ist, filtered_frame, &frame_size,
+ same_quant ? quality : ost->st->codec->global_quality);
+ if (vstats_filename && frame_size)
+ do_video_stats(output_files[ost->file_index].ctx, ost, frame_size);
+#if CONFIG_AVFILTER
+ cont:
+ frame_available = ost->output_video_filter && avfilter_poll_frame(ost->output_video_filter->inputs[0]);
+ if (ost->picref)
+ avfilter_unref_buffer(ost->picref);
+ }
+ av_freep(&filtered_frame);
+#endif
+ }
+
+fail:
+ av_free(buffer_to_free);
+ av_freep(&decoded_frame);
+ return ret;
+}
+
+static int transcode_subtitles(InputStream *ist, AVPacket *pkt, int *got_output)
+{
+ AVSubtitle subtitle;
+ int i, ret = avcodec_decode_subtitle2(ist->st->codec,
+ &subtitle, got_output, pkt);
+ if (ret < 0)
+ return ret;
+ if (!*got_output)
+ return 0;
+
+ pkt->size = 0;
+
+ rate_emu_sleep(ist);
+
+ for (i = 0; i < nb_output_streams; i++) {
+ OutputStream *ost = &output_streams[i];
+
+ if (!check_output_constraints(ist, ost) || !ost->encoding_needed)
+ continue;
+
+ do_subtitle_out(output_files[ost->file_index].ctx, ost, ist, &subtitle, pkt->pts);
+ }
+
+ avsubtitle_free(&subtitle);
+ return 0;
+}
+
/* pkt = NULL means EOF (needed to flush decoder buffers) */
static int output_packet(InputStream *ist, int ist_index,
OutputStream *ost_table, int nb_ostreams,
const AVPacket *pkt)
{
- AVFormatContext *os;
OutputStream *ost;
int ret = 0, i;
int got_output;
- void *buffer_to_free = NULL;
- static unsigned int samples_size= 0;
- AVSubtitle subtitle, *subtitle_to_free;
int64_t pkt_pts = AV_NOPTS_VALUE;
-#if CONFIG_AVFILTER
- int frame_available;
-#endif
- float quality;
AVPacket avpkt;
- int bps = av_get_bytes_per_sample(ist->st->codec->sample_fmt);
if(ist->next_pts == AV_NOPTS_VALUE)
ist->next_pts= ist->pts;
@@ -1636,10 +1948,7 @@ static int output_packet(InputStream *ist, int ist_index,
pkt_pts = av_rescale_q(pkt->pts, ist->st->time_base, AV_TIME_BASE_Q);
//while we have more to decode or while the decoder did output something on EOF
- while (avpkt.size > 0 || (!pkt && got_output)) {
- uint8_t *data_buf, *decoded_data_buf;
- int data_size, decoded_data_size;
- AVFrame *decoded_frame, *filtered_frame;
+ while (ist->decoding_needed && (avpkt.size > 0 || (!pkt && got_output))) {
handle_eof:
ist->pts= ist->next_pts;
@@ -1648,329 +1957,56 @@ static int output_packet(InputStream *ist, int ist_index,
"Multiple frames in a packet from stream %d\n", pkt->stream_index);
ist->showed_multi_packet_warning=1;
- /* decode the packet if needed */
- decoded_frame = filtered_frame = NULL;
- decoded_data_buf = NULL; /* fail safe */
- decoded_data_size= 0;
- data_buf = avpkt.data;
- data_size = avpkt.size;
- subtitle_to_free = NULL;
- if (ist->decoding_needed) {
- switch(ist->st->codec->codec_type) {
- case AVMEDIA_TYPE_AUDIO:{
- if(pkt && samples_size < FFMAX(pkt->size * bps, AVCODEC_MAX_AUDIO_FRAME_SIZE)) {
- samples_size = FFMAX(pkt->size * bps, AVCODEC_MAX_AUDIO_FRAME_SIZE);
- av_free(samples);
- samples= av_malloc(samples_size);
- }
- decoded_data_size= samples_size;
- /* XXX: could avoid copy if PCM 16 bits with same
- endianness as CPU */
- ret = avcodec_decode_audio3(ist->st->codec, samples, &decoded_data_size,
- &avpkt);
- if (ret < 0)
- return ret;
- avpkt.data += ret;
- avpkt.size -= ret;
- data_size = ret;
- got_output = decoded_data_size > 0;
- /* Some bug in mpeg audio decoder gives */
- /* decoded_data_size < 0, it seems they are overflows */
- if (!got_output) {
- /* no audio frame */
- continue;
- }
- decoded_data_buf = (uint8_t *)samples;
- ist->next_pts += ((int64_t)AV_TIME_BASE/bps * decoded_data_size) /
- (ist->st->codec->sample_rate * ist->st->codec->channels);
- break;}
- case AVMEDIA_TYPE_VIDEO:
- if (!(decoded_frame = avcodec_alloc_frame()))
- return AVERROR(ENOMEM);
- avpkt.pts = pkt_pts;
- avpkt.dts = ist->pts;
- pkt_pts = AV_NOPTS_VALUE;
-
- ret = avcodec_decode_video2(ist->st->codec,
- decoded_frame, &got_output, &avpkt);
- quality = same_quant ? decoded_frame->quality : 0;
- if (ret < 0)
- goto fail;
- if (!got_output) {
- /* no picture yet */
- av_freep(&decoded_frame);
- goto discard_packet;
- }
- ist->next_pts = ist->pts = decoded_frame->best_effort_timestamp;
- if (ist->st->codec->time_base.num != 0) {
- int ticks= ist->st->parser ? ist->st->parser->repeat_pict+1 : ist->st->codec->ticks_per_frame;
- ist->next_pts += ((int64_t)AV_TIME_BASE *
- ist->st->codec->time_base.num * ticks) /
- ist->st->codec->time_base.den;
- }
- avpkt.size = 0;
- buffer_to_free = NULL;
- pre_process_video_frame(ist, (AVPicture *)decoded_frame, &buffer_to_free);
- break;
- case AVMEDIA_TYPE_SUBTITLE:
- ret = avcodec_decode_subtitle2(ist->st->codec,
- &subtitle, &got_output, &avpkt);
- if (ret < 0)
- return ret;
- if (!got_output) {
- goto discard_packet;
- }
- subtitle_to_free = &subtitle;
- avpkt.size = 0;
- break;
- default:
- return -1;
- }
- } else {
- switch(ist->st->codec->codec_type) {
- case AVMEDIA_TYPE_AUDIO:
- ist->next_pts += ((int64_t)AV_TIME_BASE * ist->st->codec->frame_size) /
- ist->st->codec->sample_rate;
- break;
- case AVMEDIA_TYPE_VIDEO:
- if (ist->st->codec->time_base.num != 0) {
- int ticks= ist->st->parser ? ist->st->parser->repeat_pict+1 : ist->st->codec->ticks_per_frame;
- ist->next_pts += ((int64_t)AV_TIME_BASE *
- ist->st->codec->time_base.num * ticks) /
- ist->st->codec->time_base.den;
- }
- break;
- }
- avpkt.size = 0;
- }
-
- // preprocess audio (volume)
- if (ist->st->codec->codec_type == AVMEDIA_TYPE_AUDIO) {
- if (audio_volume != 256) {
- switch (ist->st->codec->sample_fmt) {
- case AV_SAMPLE_FMT_U8:
- {
- uint8_t *volp = samples;
- for (i = 0; i < (decoded_data_size / sizeof(*volp)); i++) {
- int v = (((*volp - 128) * audio_volume + 128) >> 8) + 128;
- *volp++ = av_clip_uint8(v);
- }
- break;
- }
- case AV_SAMPLE_FMT_S16:
- {
- int16_t *volp = samples;
- for (i = 0; i < (decoded_data_size / sizeof(*volp)); i++) {
- int v = ((*volp) * audio_volume + 128) >> 8;
- *volp++ = av_clip_int16(v);
- }
- break;
- }
- case AV_SAMPLE_FMT_S32:
- {
- int32_t *volp = samples;
- for (i = 0; i < (decoded_data_size / sizeof(*volp)); i++) {
- int64_t v = (((int64_t)*volp * audio_volume + 128) >> 8);
- *volp++ = av_clipl_int32(v);
- }
- break;
- }
- case AV_SAMPLE_FMT_FLT:
- {
- float *volp = samples;
- float scale = audio_volume / 256.f;
- for (i = 0; i < (decoded_data_size / sizeof(*volp)); i++) {
- *volp++ *= scale;
- }
- break;
- }
- case AV_SAMPLE_FMT_DBL:
- {
- double *volp = samples;
- double scale = audio_volume / 256.;
- for (i = 0; i < (decoded_data_size / sizeof(*volp)); i++) {
- *volp++ *= scale;
- }
- break;
- }
- default:
- av_log(NULL, AV_LOG_FATAL,
- "Audio volume adjustment on sample format %s is not supported.\n",
- av_get_sample_fmt_name(ist->st->codec->sample_fmt));
- exit_program(1);
- }
- }
- }
-
- /* frame rate emulation */
- if (input_files[ist->file_index].rate_emu) {
- int64_t pts = av_rescale(ist->pts, 1000000, AV_TIME_BASE);
- int64_t now = av_gettime() - ist->start;
- if (pts > now)
- usleep(pts - now);
+ switch(ist->st->codec->codec_type) {
+ case AVMEDIA_TYPE_AUDIO:
+ ret = transcode_audio (ist, &avpkt, &got_output);
+ break;
+ case AVMEDIA_TYPE_VIDEO:
+ ret = transcode_video (ist, &avpkt, &got_output, &pkt_pts);
+ break;
+ case AVMEDIA_TYPE_SUBTITLE:
+ ret = transcode_subtitles(ist, &avpkt, &got_output);
+ break;
+ default:
+ return -1;
}
- /* if output time reached then transcode raw format,
- encode packets and output them */
- for (i = 0; i < nb_ostreams; i++) {
- OutputFile *of = &output_files[ost_table[i].file_index];
- int frame_size;
-
- ost = &ost_table[i];
- if (ost->source_index != ist_index)
- continue;
-
- if (of->start_time && ist->pts < of->start_time)
- continue;
- if (of->recording_time != INT64_MAX &&
- av_compare_ts(ist->pts, AV_TIME_BASE_Q, of->recording_time + of->start_time,
- (AVRational){1, 1000000}) >= 0) {
- ost->is_past_recording_time = 1;
+ if (ret < 0)
+ return ret;
+ if (!got_output) {
+ if (ist->st->codec->codec_type == AVMEDIA_TYPE_AUDIO)
continue;
- }
+ goto discard_packet;
+ }
+ }
+ discard_packet:
-#if CONFIG_AVFILTER
- if (ist->st->codec->codec_type == AVMEDIA_TYPE_VIDEO &&
- ost->input_video_filter) {
- if (!decoded_frame->sample_aspect_ratio.num)
- decoded_frame->sample_aspect_ratio = ist->st->sample_aspect_ratio;
- decoded_frame->pts = ist->pts;
-
- av_vsrc_buffer_add_frame(ost->input_video_filter, decoded_frame, AV_VSRC_BUF_FLAG_OVERWRITE);
- if (!(filtered_frame = avcodec_alloc_frame())) {
- ret = AVERROR(ENOMEM);
- goto fail;
- }
+ /* handle stream copy */
+ if (!ist->decoding_needed) {
+ rate_emu_sleep(ist);
+ switch (ist->st->codec->codec_type) {
+ case AVMEDIA_TYPE_AUDIO:
+ ist->next_pts += ((int64_t)AV_TIME_BASE * ist->st->codec->frame_size) /
+ ist->st->codec->sample_rate;
+ break;
+ case AVMEDIA_TYPE_VIDEO:
+ if (ist->st->codec->time_base.num != 0) {
+ int ticks = ist->st->parser ? ist->st->parser->repeat_pict+1 : ist->st->codec->ticks_per_frame;
+ ist->next_pts += ((int64_t)AV_TIME_BASE *
+ ist->st->codec->time_base.num * ticks) /
+ ist->st->codec->time_base.den;
}
- frame_available = ist->st->codec->codec_type != AVMEDIA_TYPE_VIDEO ||
- !ost->output_video_filter || avfilter_poll_frame(ost->output_video_filter->inputs[0]);
- while (frame_available) {
- if (ist->st->codec->codec_type == AVMEDIA_TYPE_VIDEO && ost->output_video_filter) {
- AVRational ist_pts_tb = ost->output_video_filter->inputs[0]->time_base;
- if (av_buffersink_get_buffer_ref(ost->output_video_filter, &ost->picref, 0) < 0)
- goto cont;
- if (ost->picref) {
- avfilter_fill_frame_from_video_buffer_ref(filtered_frame, ost->picref);
- ist->pts = av_rescale_q(ost->picref->pts, ist_pts_tb, AV_TIME_BASE_Q);
- }
- }
-#else
- filtered_frame = decoded_frame;
-#endif
- os = output_files[ost->file_index].ctx;
-
- /* set the input output pts pairs */
- //ost->sync_ipts = (double)(ist->pts + input_files[ist->file_index].ts_offset - start_time)/ AV_TIME_BASE;
-
- if (ost->encoding_needed) {
- av_assert0(ist->decoding_needed);
- switch(ost->st->codec->codec_type) {
- case AVMEDIA_TYPE_AUDIO:
- do_audio_out(os, ost, ist, decoded_data_buf, decoded_data_size);
- break;
- case AVMEDIA_TYPE_VIDEO:
-#if CONFIG_AVFILTER
- if (ost->picref->video && !ost->frame_aspect_ratio)
- ost->st->codec->sample_aspect_ratio = ost->picref->video->sample_aspect_ratio;
-#endif
- do_video_out(os, ost, ist, filtered_frame, &frame_size,
- same_quant ? quality : ost->st->codec->global_quality);
- if (vstats_filename && frame_size)
- do_video_stats(os, ost, frame_size);
- break;
- case AVMEDIA_TYPE_SUBTITLE:
- do_subtitle_out(os, ost, ist, &subtitle,
- pkt->pts);
- break;
- default:
- abort();
- }
- } else {
- AVPicture pict;
- AVPacket opkt;
- int64_t ost_tb_start_time= av_rescale_q(of->start_time, AV_TIME_BASE_Q, ost->st->time_base);
- av_init_packet(&opkt);
-
- if ((!ost->frame_number && !(pkt->flags & AV_PKT_FLAG_KEY)) &&
- !ost->copy_initial_nonkeyframes)
-#if !CONFIG_AVFILTER
- continue;
-#else
- goto cont;
-#endif
-
- /* no reencoding needed : output the packet directly */
- /* force the input stream PTS */
-
- if(ost->st->codec->codec_type == AVMEDIA_TYPE_AUDIO)
- audio_size += data_size;
- else if (ost->st->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
- video_size += data_size;
- ost->sync_opts++;
- }
-
- opkt.stream_index= ost->index;
- if(pkt->pts != AV_NOPTS_VALUE)
- opkt.pts= av_rescale_q(pkt->pts, ist->st->time_base, ost->st->time_base) - ost_tb_start_time;
- else
- opkt.pts= AV_NOPTS_VALUE;
-
- if (pkt->dts == AV_NOPTS_VALUE)
- opkt.dts = av_rescale_q(ist->pts, AV_TIME_BASE_Q, ost->st->time_base);
- else
- opkt.dts = av_rescale_q(pkt->dts, ist->st->time_base, ost->st->time_base);
- opkt.dts -= ost_tb_start_time;
-
- opkt.duration = av_rescale_q(pkt->duration, ist->st->time_base, ost->st->time_base);
- opkt.flags= pkt->flags;
-
- //FIXME remove the following 2 lines they shall be replaced by the bitstream filters
- if( ost->st->codec->codec_id != CODEC_ID_H264
- && ost->st->codec->codec_id != CODEC_ID_MPEG1VIDEO
- && ost->st->codec->codec_id != CODEC_ID_MPEG2VIDEO
- ) {
- if(av_parser_change(ist->st->parser, ost->st->codec, &opkt.data, &opkt.size, data_buf, data_size, pkt->flags & AV_PKT_FLAG_KEY))
- opkt.destruct= av_destruct_packet;
- } else {
- opkt.data = data_buf;
- opkt.size = data_size;
- }
+ break;
+ }
+ }
+ for (i = 0; pkt && i < nb_ostreams; i++) {
+ ost = &ost_table[i];
- if (os->oformat->flags & AVFMT_RAWPICTURE) {
- /* store AVPicture in AVPacket, as expected by the output format */
- avpicture_fill(&pict, opkt.data, ost->st->codec->pix_fmt, ost->st->codec->width, ost->st->codec->height);
- opkt.data = (uint8_t *)&pict;
- opkt.size = sizeof(AVPicture);
- opkt.flags |= AV_PKT_FLAG_KEY;
- }
- write_frame(os, &opkt, ost->st->codec, ost->bitstream_filters);
- ost->st->codec->frame_number++;
- ost->frame_number++;
- av_free_packet(&opkt);
- }
-#if CONFIG_AVFILTER
- cont:
- frame_available = (ist->st->codec->codec_type == AVMEDIA_TYPE_VIDEO) &&
- ost->output_video_filter && avfilter_poll_frame(ost->output_video_filter->inputs[0]);
- if (ost->picref)
- avfilter_unref_buffer(ost->picref);
- }
- av_freep(&filtered_frame);
-#endif
- }
+ if (!check_output_constraints(ist, ost) || ost->encoding_needed)
+ continue;
-fail:
- av_free(buffer_to_free);
- /* XXX: allocate the subtitles in the codec ? */
- if (subtitle_to_free) {
- avsubtitle_free(subtitle_to_free);
- subtitle_to_free = NULL;
- }
- av_freep(&decoded_frame);
- if (ret < 0)
- return ret;
+ do_streamcopy(ist, ost, pkt);
}
- discard_packet:
return 0;
}