diff options
author | Michael Niedermayer <michaelni@gmx.at> | 2014-01-26 23:35:38 +0100 |
---|---|---|
committer | Michael Niedermayer <michaelni@gmx.at> | 2014-01-26 23:35:38 +0100 |
commit | 896d6a7736c82a181bf17eb240f37645f6b7fa91 (patch) | |
tree | a4deb046f9d19f5c37c45278f49057ade8890d39 /libavformat/utils.c | |
parent | c2871568cffe5c8a32ac7db35febf4267746395b (diff) | |
parent | 50ecf15712354a1d5b3f4dc9a57ff90ed7ee9654 (diff) | |
download | ffmpeg-896d6a7736c82a181bf17eb240f37645f6b7fa91.tar.gz |
Merge commit '50ecf15712354a1d5b3f4dc9a57ff90ed7ee9654'
* commit '50ecf15712354a1d5b3f4dc9a57ff90ed7ee9654':
avformat: utils: K&R formatting cosmetics
Conflicts:
libavformat/utils.c
Merged-by: Michael Niedermayer <michaelni@gmx.at>
Diffstat (limited to 'libavformat/utils.c')
-rw-r--r-- | libavformat/utils.c | 1857 |
1 files changed, 998 insertions, 859 deletions
diff --git a/libavformat/utils.c b/libavformat/utils.c index 421dfef7df..d425b5bc61 100644 --- a/libavformat/utils.c +++ b/libavformat/utils.c @@ -19,36 +19,39 @@ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ +#undef NDEBUG +#include <assert.h> +#include <stdarg.h> #include <stdint.h> -#include "avformat.h" -#include "avio_internal.h" -#include "internal.h" -#include "libavcodec/internal.h" -#include "libavcodec/raw.h" -#include "libavcodec/bytestream.h" -#include "libavutil/opt.h" -#include "libavutil/dict.h" -#include "libavutil/internal.h" -#include "libavutil/pixdesc.h" -#include "metadata.h" -#include "id3v2.h" +#include "config.h" + #include "libavutil/avassert.h" #include "libavutil/avstring.h" +#include "libavutil/dict.h" +#include "libavutil/internal.h" #include "libavutil/mathematics.h" +#include "libavutil/opt.h" #include "libavutil/parseutils.h" +#include "libavutil/pixdesc.h" #include "libavutil/time.h" #include "libavutil/timestamp.h" -#include "riff.h" + +#include "libavcodec/bytestream.h" +#include "libavcodec/internal.h" +#include "libavcodec/raw.h" + #include "audiointerleave.h" -#include "url.h" -#include <stdarg.h> +#include "avformat.h" +#include "avio_internal.h" +#include "id3v2.h" +#include "internal.h" +#include "metadata.h" #if CONFIG_NETWORK #include "network.h" #endif - -#undef NDEBUG -#include <assert.h> +#include "riff.h" +#include "url.h" /** * @file @@ -91,10 +94,10 @@ static int64_t wrap_timestamp(AVStream *st, int64_t timestamp) st->pts_wrap_reference != AV_NOPTS_VALUE && timestamp != AV_NOPTS_VALUE) { if (st->pts_wrap_behavior == AV_PTS_WRAP_ADD_OFFSET && timestamp < st->pts_wrap_reference) - return timestamp + (1ULL<<st->pts_wrap_bits); + return timestamp + (1ULL << st->pts_wrap_bits); else if (st->pts_wrap_behavior == AV_PTS_WRAP_SUB_OFFSET && timestamp >= st->pts_wrap_reference) - return timestamp - (1ULL<<st->pts_wrap_bits); + return timestamp - (1ULL << st->pts_wrap_bits); } return timestamp; } @@ -110,15 +113,15 @@ static AVCodec *find_decoder(AVFormatContext *s, AVStream *st, enum AVCodecID co if (st->codec->codec) return st->codec->codec; - switch(st->codec->codec_type){ + switch (st->codec->codec_type) { case AVMEDIA_TYPE_VIDEO: - if(s->video_codec) return s->video_codec; + if (s->video_codec) return s->video_codec; break; case AVMEDIA_TYPE_AUDIO: - if(s->audio_codec) return s->audio_codec; + if (s->audio_codec) return s->audio_codec; break; case AVMEDIA_TYPE_SUBTITLE: - if(s->subtitle_codec) return s->subtitle_codec; + if (s->subtitle_codec) return s->subtitle_codec; break; } @@ -135,28 +138,26 @@ int av_format_get_probe_score(const AVFormatContext *s) int ffio_limit(AVIOContext *s, int size) { - if(s->maxsize>=0){ + if (s->maxsize>= 0) { int64_t remaining= s->maxsize - avio_tell(s); - if(remaining < size){ - int64_t newsize= avio_size(s); - if(!s->maxsize || s->maxsize<newsize) - s->maxsize= newsize - !newsize; + if (remaining < size) { + int64_t newsize = avio_size(s); + if (!s->maxsize || s->maxsize<newsize) + s->maxsize = newsize - !newsize; remaining= s->maxsize - avio_tell(s); remaining= FFMAX(remaining, 0); } - if(s->maxsize>=0 && remaining+1 < size){ + if (s->maxsize>= 0 && remaining+1 < size) { av_log(NULL, remaining ? AV_LOG_ERROR : AV_LOG_DEBUG, "Truncating packet of size %d to %"PRId64"\n", size, remaining+1); - size= remaining+1; + size = remaining+1; } } return size; } -/* - * Read the data in sane-sized chunks and append to pkt. - * Return the number of bytes read or an error. - */ +/* Read the data in sane-sized chunks and append to pkt. + * Return the number of bytes read or an error. */ static int append_packet_chunked(AVIOContext *s, AVPacket *pkt, int size) { int64_t orig_pos = pkt->pos; // av_grow_packet might reset pos @@ -167,10 +168,8 @@ static int append_packet_chunked(AVIOContext *s, AVPacket *pkt, int size) int prev_size = pkt->size; int read_size; - /* - * When the caller requests a lot of data, limit it to the amount left - * in file or SANE_CHUNK_SIZE when it is not known - */ + /* When the caller requests a lot of data, limit it to the amount + * left in file or SANE_CHUNK_SIZE when it is not known. */ read_size = size; if (read_size > SANE_CHUNK_SIZE/10) { read_size = ffio_limit(s, read_size); @@ -217,18 +216,19 @@ int av_append_packet(AVIOContext *s, AVPacket *pkt, int size) return append_packet_chunked(s, pkt, size); } - int av_filename_number_test(const char *filename) { char buf[1024]; - return filename && (av_get_frame_filename(buf, sizeof(buf), filename, 1)>=0); + return filename && + (av_get_frame_filename(buf, sizeof(buf), filename, 1) >= 0); } -AVInputFormat *av_probe_input_format3(AVProbeData *pd, int is_opened, int *score_ret) +AVInputFormat *av_probe_input_format3(AVProbeData *pd, int is_opened, + int *score_ret) { AVProbeData lpd = *pd; AVInputFormat *fmt1 = NULL, *fmt; - int score, nodat = 0, score_max=0; + int score, nodat = 0, score_max = 0; const static uint8_t zerobuffer[AVPROBE_PADDING_SIZE]; if (!lpd.buf) @@ -237,9 +237,9 @@ AVInputFormat *av_probe_input_format3(AVProbeData *pd, int is_opened, int *score if (lpd.buf_size > 10 && ff_id3v2_match(lpd.buf, ID3v2_DEFAULT_MAGIC)) { int id3len = ff_id3v2_tag_len(lpd.buf); if (lpd.buf_size > id3len + 16) { - lpd.buf += id3len; + lpd.buf += id3len; lpd.buf_size -= id3len; - }else + } else nodat = 1; } @@ -250,22 +250,21 @@ AVInputFormat *av_probe_input_format3(AVProbeData *pd, int is_opened, int *score score = 0; if (fmt1->read_probe) { score = fmt1->read_probe(&lpd); - if(fmt1->extensions && av_match_ext(lpd.filename, fmt1->extensions)) + if (fmt1->extensions && av_match_ext(lpd.filename, fmt1->extensions)) score = FFMAX(score, nodat ? AVPROBE_SCORE_EXTENSION / 2 - 1 : 1); } else if (fmt1->extensions) { - if (av_match_ext(lpd.filename, fmt1->extensions)) { + if (av_match_ext(lpd.filename, fmt1->extensions)) score = AVPROBE_SCORE_EXTENSION; - } } if (score > score_max) { score_max = score; - fmt = fmt1; - }else if (score == score_max) + fmt = fmt1; + } else if (score == score_max) fmt = NULL; } - if(nodat) + if (nodat) score_max = FFMIN(AVPROBE_SCORE_EXTENSION / 2 - 1, score_max); - *score_ret= score_max; + *score_ret = score_max; return fmt; } @@ -273,32 +272,36 @@ AVInputFormat *av_probe_input_format3(AVProbeData *pd, int is_opened, int *score AVInputFormat *av_probe_input_format2(AVProbeData *pd, int is_opened, int *score_max) { int score_ret; - AVInputFormat *fmt= av_probe_input_format3(pd, is_opened, &score_ret); - if(score_ret > *score_max){ - *score_max= score_ret; + AVInputFormat *fmt = av_probe_input_format3(pd, is_opened, &score_ret); + if (score_ret > *score_max) { + *score_max = score_ret; return fmt; - }else + } else return NULL; } -AVInputFormat *av_probe_input_format(AVProbeData *pd, int is_opened){ - int score=0; +AVInputFormat *av_probe_input_format(AVProbeData *pd, int is_opened) +{ + int score = 0; return av_probe_input_format2(pd, is_opened, &score); } -static int set_codec_from_probe_data(AVFormatContext *s, AVStream *st, AVProbeData *pd) +static int set_codec_from_probe_data(AVFormatContext *s, AVStream *st, + AVProbeData *pd) { static const struct { - const char *name; enum AVCodecID id; enum AVMediaType type; + const char *name; + enum AVCodecID id; + enum AVMediaType type; } fmt_id_type[] = { - { "aac" , AV_CODEC_ID_AAC , AVMEDIA_TYPE_AUDIO }, - { "ac3" , AV_CODEC_ID_AC3 , AVMEDIA_TYPE_AUDIO }, - { "dts" , AV_CODEC_ID_DTS , AVMEDIA_TYPE_AUDIO }, - { "eac3" , AV_CODEC_ID_EAC3 , AVMEDIA_TYPE_AUDIO }, - { "h264" , AV_CODEC_ID_H264 , AVMEDIA_TYPE_VIDEO }, - { "loas" , AV_CODEC_ID_AAC_LATM , AVMEDIA_TYPE_AUDIO }, - { "m4v" , AV_CODEC_ID_MPEG4 , AVMEDIA_TYPE_VIDEO }, - { "mp3" , AV_CODEC_ID_MP3 , AVMEDIA_TYPE_AUDIO }, + { "aac", AV_CODEC_ID_AAC, AVMEDIA_TYPE_AUDIO }, + { "ac3", AV_CODEC_ID_AC3, AVMEDIA_TYPE_AUDIO }, + { "dts", AV_CODEC_ID_DTS, AVMEDIA_TYPE_AUDIO }, + { "eac3", AV_CODEC_ID_EAC3, AVMEDIA_TYPE_AUDIO }, + { "h264", AV_CODEC_ID_H264, AVMEDIA_TYPE_VIDEO }, + { "loas", AV_CODEC_ID_AAC_LATM, AVMEDIA_TYPE_AUDIO }, + { "m4v", AV_CODEC_ID_MPEG4, AVMEDIA_TYPE_VIDEO }, + { "mp3", AV_CODEC_ID_MP3, AVMEDIA_TYPE_AUDIO }, { "mpegvideo", AV_CODEC_ID_MPEG2VIDEO, AVMEDIA_TYPE_VIDEO }, { 0 } }; @@ -307,8 +310,10 @@ static int set_codec_from_probe_data(AVFormatContext *s, AVStream *st, AVProbeDa if (fmt && st->request_probe <= score) { int i; - av_log(s, AV_LOG_DEBUG, "Probe with size=%d, packets=%d detected %s with score=%d\n", - pd->buf_size, MAX_PROBE_PACKETS - st->probe_packets, fmt->name, score); + av_log(s, AV_LOG_DEBUG, + "Probe with size=%d, packets=%d detected %s with score=%d\n", + pd->buf_size, MAX_PROBE_PACKETS - st->probe_packets, + fmt->name, score); for (i = 0; fmt_id_type[i].name; i++) { if (!strcmp(fmt->name, fmt_id_type[i].name)) { st->codec->codec_id = fmt_id_type[i].id; @@ -323,7 +328,7 @@ static int set_codec_from_probe_data(AVFormatContext *s, AVStream *st, AVProbeDa /************************************************************/ /* input media file */ -int av_demuxer_open(AVFormatContext *ic){ +int av_demuxer_open(AVFormatContext *ic) { int err; if (ic->iformat->read_header) { @@ -349,19 +354,18 @@ int av_probe_input_buffer2(AVIOContext *pb, AVInputFormat **fmt, int ret = 0, probe_size, buf_offset = 0; int score = 0; - if (!max_probe_size) { + if (!max_probe_size) max_probe_size = PROBE_BUF_MAX; - } else if (max_probe_size > PROBE_BUF_MAX) { + else if (max_probe_size > PROBE_BUF_MAX) max_probe_size = PROBE_BUF_MAX; - } else if (max_probe_size < PROBE_BUF_MIN) { + else if (max_probe_size < PROBE_BUF_MIN) { av_log(logctx, AV_LOG_ERROR, "Specified probe size value %u cannot be < %u\n", max_probe_size, PROBE_BUF_MIN); return AVERROR(EINVAL); } - if (offset >= max_probe_size) { + if (offset >= max_probe_size) return AVERROR(EINVAL); - } if (!*fmt && pb->av_class && av_opt_get(pb, "mime_type", AV_OPT_SEARCH_CHILDREN, &mime_type) >= 0 && mime_type) { if (!av_strcasecmp(mime_type, "audio/aacp")) { @@ -370,21 +374,23 @@ int av_probe_input_buffer2(AVIOContext *pb, AVInputFormat **fmt, av_freep(&mime_type); } - for(probe_size= PROBE_BUF_MIN; probe_size<=max_probe_size && !*fmt; - probe_size = FFMIN(probe_size<<1, FFMAX(max_probe_size, probe_size+1))) { + for (probe_size = PROBE_BUF_MIN; probe_size <= max_probe_size && !*fmt; + probe_size = FFMIN(probe_size << 1, + FFMAX(max_probe_size, probe_size + 1))) { score = probe_size < max_probe_size ? AVPROBE_SCORE_RETRY : 0; - /* read probe data */ + /* Read probe data. */ if ((ret = av_reallocp(&buf, probe_size + AVPROBE_PADDING_SIZE)) < 0) return ret; - if ((ret = avio_read(pb, buf + buf_offset, probe_size - buf_offset)) < 0) { - /* fail if error was not end of file, otherwise, lower score */ + if ((ret = avio_read(pb, buf + buf_offset, + probe_size - buf_offset)) < 0) { + /* Fail if error was not end of file, otherwise, lower score. */ if (ret != AVERROR_EOF) { av_free(buf); return ret; } score = 0; - ret = 0; /* error was end of file, nothing read */ + ret = 0; /* error was end of file, nothing read */ } buf_offset += ret; if (buf_offset < offset) @@ -394,13 +400,18 @@ int av_probe_input_buffer2(AVIOContext *pb, AVInputFormat **fmt, memset(pd.buf + pd.buf_size, 0, AVPROBE_PADDING_SIZE); - /* guess file format */ + /* Guess file format. */ *fmt = av_probe_input_format2(&pd, 1, &score); - if(*fmt){ - if(score <= AVPROBE_SCORE_RETRY){ //this can only be true in the last iteration - av_log(logctx, AV_LOG_WARNING, "Format %s detected only with low score of %d, misdetection possible!\n", (*fmt)->name, score); - }else - av_log(logctx, AV_LOG_DEBUG, "Format %s probed with size=%d and score=%d\n", (*fmt)->name, probe_size, score); + if (*fmt) { + /* This can only be true in the last iteration. */ + if (score <= AVPROBE_SCORE_RETRY) { + av_log(logctx, AV_LOG_WARNING, + "Format %s detected only with low score of %d, " + "misdetection possible!\n", (*fmt)->name, score); + } else + av_log(logctx, AV_LOG_DEBUG, + "Format %s probed with size=%d and score=%d\n", + (*fmt)->name, probe_size, score); #if 0 FILE *f = fopen("probestat.tmp", "ab"); fprintf(f, "probe_size:%d format:%s score:%d filename:%s\n", probe_size, (*fmt)->name, score, filename); @@ -414,7 +425,7 @@ int av_probe_input_buffer2(AVIOContext *pb, AVInputFormat **fmt, return AVERROR_INVALIDDATA; } - /* rewind. reuse probe buffer to avoid seeking */ + /* Rewind. Reuse probe buffer to avoid seeking. */ ret = ffio_rewind_with_probe_data(pb, &buf, buf_offset); return ret < 0 ? ret : score; @@ -428,25 +439,26 @@ int av_probe_input_buffer(AVIOContext *pb, AVInputFormat **fmt, return ret < 0 ? ret : 0; } - -/* open input file and probe the format if necessary */ -static int init_input(AVFormatContext *s, const char *filename, AVDictionary **options) +/* Open input file and probe the format if necessary. */ +static int init_input(AVFormatContext *s, const char *filename, + AVDictionary **options) { int ret; - AVProbeData pd = {filename, NULL, 0}; + AVProbeData pd = { filename, NULL, 0 }; int score = AVPROBE_SCORE_RETRY; if (s->pb) { s->flags |= AVFMT_FLAG_CUSTOM_IO; if (!s->iformat) - return av_probe_input_buffer2(s->pb, &s->iformat, filename, s, 0, s->probesize); + return av_probe_input_buffer2(s->pb, &s->iformat, filename, + s, 0, s->probesize); else if (s->iformat->flags & AVFMT_NOFILE) av_log(s, AV_LOG_WARNING, "Custom AVIOContext makes no sense and " "will be ignored with AVFMT_NOFILE format.\n"); return 0; } - if ( (s->iformat && s->iformat->flags & AVFMT_NOFILE) || + if ((s->iformat && s->iformat->flags & AVFMT_NOFILE) || (!s->iformat && (s->iformat = av_probe_input_format2(&pd, 0, &score)))) return score; @@ -455,11 +467,13 @@ static int init_input(AVFormatContext *s, const char *filename, AVDictionary **o return ret; if (s->iformat) return 0; - return av_probe_input_buffer2(s->pb, &s->iformat, filename, s, 0, s->probesize); + return av_probe_input_buffer2(s->pb, &s->iformat, filename, + s, 0, s->probesize); } static AVPacket *add_to_pktbuf(AVPacketList **packet_buffer, AVPacket *pkt, - AVPacketList **plast_pktl){ + AVPacketList **plast_pktl) +{ AVPacketList *pktl = av_mallocz(sizeof(AVPacketList)); if (!pktl) return NULL; @@ -469,9 +483,9 @@ static AVPacket *add_to_pktbuf(AVPacketList **packet_buffer, AVPacket *pkt, else *packet_buffer = pktl; - /* add the packet in the buffered packet list */ + /* Add the packet in the buffered packet list. */ *plast_pktl = pktl; - pktl->pkt= *pkt; + pktl->pkt = *pkt; return &pktl->pkt; } @@ -482,16 +496,18 @@ int avformat_queue_attached_pictures(AVFormatContext *s) if (s->streams[i]->disposition & AV_DISPOSITION_ATTACHED_PIC && s->streams[i]->discard < AVDISCARD_ALL) { AVPacket copy = s->streams[i]->attached_pic; - copy.buf = av_buffer_ref(copy.buf); + copy.buf = av_buffer_ref(copy.buf); if (!copy.buf) return AVERROR(ENOMEM); - add_to_pktbuf(&s->raw_packet_buffer, ©, &s->raw_packet_buffer_end); + add_to_pktbuf(&s->raw_packet_buffer, ©, + &s->raw_packet_buffer_end); } return 0; } -int avformat_open_input(AVFormatContext **ps, const char *filename, AVInputFormat *fmt, AVDictionary **options) +int avformat_open_input(AVFormatContext **ps, const char *filename, + AVInputFormat *fmt, AVDictionary **options) { AVFormatContext *s = *ps; int ret = 0; @@ -500,7 +516,7 @@ int avformat_open_input(AVFormatContext **ps, const char *filename, AVInputForma if (!s && !(s = avformat_alloc_context())) return AVERROR(ENOMEM); - if (!s->av_class){ + if (!s->av_class) { av_log(NULL, AV_LOG_ERROR, "Input context has not been properly allocated by avformat_alloc_context() and is not NULL either\n"); return AVERROR(EINVAL); } @@ -518,7 +534,7 @@ int avformat_open_input(AVFormatContext **ps, const char *filename, AVInputForma s->probe_score = ret; avio_skip(s->pb, s->skip_initial_bytes); - /* check filename in case an image number is expected */ + /* Check filename in case an image number is expected. */ if (s->iformat->flags & AVFMT_NEEDNUMBER) { if (!av_filename_number_test(filename)) { ret = AVERROR(EINVAL); @@ -529,14 +545,14 @@ int avformat_open_input(AVFormatContext **ps, const char *filename, AVInputForma s->duration = s->start_time = AV_NOPTS_VALUE; av_strlcpy(s->filename, filename ? filename : "", sizeof(s->filename)); - /* allocate private data */ + /* Allocate private data. */ if (s->iformat->priv_data_size > 0) { if (!(s->priv_data = av_mallocz(s->iformat->priv_data_size))) { ret = AVERROR(ENOMEM); goto fail; } if (s->iformat->priv_class) { - *(const AVClass**)s->priv_data = s->iformat->priv_class; + *(const AVClass **) s->priv_data = s->iformat->priv_class; av_opt_set_defaults(s->priv_data); if ((ret = av_opt_set_dict(s->priv_data, &tmp)) < 0) goto fail; @@ -554,7 +570,7 @@ int avformat_open_input(AVFormatContext **ps, const char *filename, AVInputForma if (id3v2_extra_meta) { if (!strcmp(s->iformat->name, "mp3") || !strcmp(s->iformat->name, "aac") || !strcmp(s->iformat->name, "tta")) { - if((ret = ff_id3v2_parse_apic(s, &id3v2_extra_meta)) < 0) + if ((ret = ff_id3v2_parse_apic(s, &id3v2_extra_meta)) < 0) goto fail; } else av_log(s, AV_LOG_DEBUG, "demuxer does not support additional id3 data, skipping\n"); @@ -590,22 +606,25 @@ fail: static void force_codec_ids(AVFormatContext *s, AVStream *st) { - switch(st->codec->codec_type){ + switch (st->codec->codec_type) { case AVMEDIA_TYPE_VIDEO: - if(s->video_codec_id) st->codec->codec_id= s->video_codec_id; + if (s->video_codec_id) + st->codec->codec_id = s->video_codec_id; break; case AVMEDIA_TYPE_AUDIO: - if(s->audio_codec_id) st->codec->codec_id= s->audio_codec_id; + if (s->audio_codec_id) + st->codec->codec_id = s->audio_codec_id; break; case AVMEDIA_TYPE_SUBTITLE: - if(s->subtitle_codec_id)st->codec->codec_id= s->subtitle_codec_id; + if (s->subtitle_codec_id) + st->codec->codec_id = s->subtitle_codec_id; break; } } static int probe_codec(AVFormatContext *s, AVStream *st, const AVPacket *pkt) { - if(st->request_probe>0){ + if (st->request_probe>0) { AVProbeData *pd = &st->probe_data; int end; av_log(s, AV_LOG_DEBUG, "probing stream %d pp:%d\n", st->index, st->probe_packets); @@ -613,38 +632,38 @@ static int probe_codec(AVFormatContext *s, AVStream *st, const AVPacket *pkt) if (pkt) { uint8_t *new_buf = av_realloc(pd->buf, pd->buf_size+pkt->size+AVPROBE_PADDING_SIZE); - if(!new_buf) { + if (!new_buf) { av_log(s, AV_LOG_WARNING, "Failed to reallocate probe buffer for stream %d\n", st->index); goto no_packet; } pd->buf = new_buf; - memcpy(pd->buf+pd->buf_size, pkt->data, pkt->size); + memcpy(pd->buf + pd->buf_size, pkt->data, pkt->size); pd->buf_size += pkt->size; - memset(pd->buf+pd->buf_size, 0, AVPROBE_PADDING_SIZE); + memset(pd->buf + pd->buf_size, 0, AVPROBE_PADDING_SIZE); } else { no_packet: st->probe_packets = 0; if (!pd->buf_size) { - av_log(s, AV_LOG_WARNING, "nothing to probe for stream %d\n", - st->index); + av_log(s, AV_LOG_WARNING, + "nothing to probe for stream %d\n", st->index); } } end= s->raw_packet_buffer_remaining_size <= 0 - || st->probe_packets<=0; + || st->probe_packets<= 0; - if(end || av_log2(pd->buf_size) != av_log2(pd->buf_size - pkt->size)){ - int score= set_codec_from_probe_data(s, st, pd); - if( (st->codec->codec_id != AV_CODEC_ID_NONE && score > AVPROBE_SCORE_RETRY) - || end){ - pd->buf_size=0; + if (end || av_log2(pd->buf_size) != av_log2(pd->buf_size - pkt->size)) { + int score = set_codec_from_probe_data(s, st, pd); + if ( (st->codec->codec_id != AV_CODEC_ID_NONE && score > AVPROBE_SCORE_RETRY) + || end) { + pd->buf_size = 0; av_freep(&pd->buf); - st->request_probe= -1; - if(st->codec->codec_id != AV_CODEC_ID_NONE){ + st->request_probe = -1; + if (st->codec->codec_id != AV_CODEC_ID_NONE) { av_log(s, AV_LOG_DEBUG, "probed stream %d\n", st->index); - }else + } else av_log(s, AV_LOG_WARNING, "probed stream %d failed\n", st->index); } force_codec_ids(s, st); @@ -664,13 +683,13 @@ static int update_wrap_reference(AVFormatContext *s, AVStream *st, int stream_in ref = pkt->pts; if (st->pts_wrap_reference != AV_NOPTS_VALUE || st->pts_wrap_bits >= 63 || ref == AV_NOPTS_VALUE || !s->correct_ts_overflow) return 0; - ref &= (1LL<<st->pts_wrap_bits)-1; + ref &= (1LL << st->pts_wrap_bits)-1; // reference time stamp should be 60 s before first time stamp pts_wrap_reference = ref - av_rescale(60, st->time_base.den, st->time_base.num); // if first time stamp is not more than 1/8 and 60s before the wrap point, subtract rather than add wrap offset - pts_wrap_behavior = (ref < (1LL<<st->pts_wrap_bits) - (1LL<<st->pts_wrap_bits-3)) || - (ref < (1LL<<st->pts_wrap_bits) - av_rescale(60, st->time_base.den, st->time_base.num)) ? + pts_wrap_behavior = (ref < (1LL << st->pts_wrap_bits) - (1LL << st->pts_wrap_bits-3)) || + (ref < (1LL << st->pts_wrap_bits) - av_rescale(60, st->time_base.den, st->time_base.num)) ? AV_PTS_WRAP_ADD_OFFSET : AV_PTS_WRAP_SUB_OFFSET; first_program = av_find_program_from_stream(s, NULL, stream_index); @@ -678,7 +697,7 @@ static int update_wrap_reference(AVFormatContext *s, AVStream *st, int stream_in if (!first_program) { int default_stream_index = av_find_default_stream_index(s); if (s->streams[default_stream_index]->pts_wrap_reference == AV_NOPTS_VALUE) { - for (i=0; i<s->nb_streams; i++) { + for (i = 0; i < s->nb_streams; i++) { s->streams[i]->pts_wrap_reference = pts_wrap_reference; s->streams[i]->pts_wrap_behavior = pts_wrap_behavior; } @@ -701,9 +720,9 @@ static int update_wrap_reference(AVFormatContext *s, AVStream *st, int stream_in // update every program with differing pts_wrap_reference program = first_program; - while(program) { + while (program) { if (program->pts_wrap_reference != pts_wrap_reference) { - for (i=0; i<program->nb_stream_indexes; i++) { + for (i = 0; i<program->nb_stream_indexes; i++) { s->streams[program->stream_index[i]]->pts_wrap_reference = pts_wrap_reference; s->streams[program->stream_index[i]]->pts_wrap_behavior = pts_wrap_behavior; } @@ -722,18 +741,17 @@ int ff_read_packet(AVFormatContext *s, AVPacket *pkt) int ret, i, err; AVStream *st; - for(;;){ + for (;;) { AVPacketList *pktl = s->raw_packet_buffer; if (pktl) { *pkt = pktl->pkt; - st = s->streams[pkt->stream_index]; - if (s->raw_packet_buffer_remaining_size <= 0) { + st = s->streams[pkt->stream_index]; + if (s->raw_packet_buffer_remaining_size <= 0) if ((err = probe_codec(s, st, NULL)) < 0) return err; - } - if(st->request_probe <= 0){ - s->raw_packet_buffer = pktl->next; + if (st->request_probe <= 0) { + s->raw_packet_buffer = pktl->next; s->raw_packet_buffer_remaining_size += pkt->size; av_free(pktl); return 0; @@ -743,16 +761,15 @@ int ff_read_packet(AVFormatContext *s, AVPacket *pkt) pkt->data = NULL; pkt->size = 0; av_init_packet(pkt); - ret= s->iformat->read_packet(s, pkt); + ret = s->iformat->read_packet(s, pkt); if (ret < 0) { if (!pktl || ret == AVERROR(EAGAIN)) return ret; for (i = 0; i < s->nb_streams; i++) { st = s->streams[i]; - if (st->probe_packets) { + if (st->probe_packets) if ((err = probe_codec(s, st, NULL)) < 0) return err; - } av_assert0(st->request_probe <= 0); } continue; @@ -767,12 +784,12 @@ int ff_read_packet(AVFormatContext *s, AVPacket *pkt) continue; } - if(pkt->stream_index >= (unsigned)s->nb_streams){ + if (pkt->stream_index >= (unsigned)s->nb_streams) { av_log(s, AV_LOG_ERROR, "Invalid stream index %d\n", pkt->stream_index); continue; } - st= s->streams[pkt->stream_index]; + st = s->streams[pkt->stream_index]; if (update_wrap_reference(s, st, pkt->stream_index, pkt) && st->pts_wrap_behavior == AV_PTS_WRAP_SUB_OFFSET) { // correct first time stamps to negative values @@ -793,7 +810,7 @@ int ff_read_packet(AVFormatContext *s, AVPacket *pkt) if (s->use_wallclock_as_timestamps) pkt->dts = pkt->pts = av_rescale_q(av_gettime(), AV_TIME_BASE_Q, st->time_base); - if(!pktl && st->request_probe <= 0) + if (!pktl && st->request_probe <= 0) return ret; add_to_pktbuf(&s->raw_packet_buffer, pkt, &s->raw_packet_buffer_end); @@ -845,7 +862,7 @@ int ff_get_audio_frame_size(AVCodecContext *enc, int size, int mux) //For WMA we currently have no other means to calculate duration thus we //do it here by assuming CBR, which is true for all known cases. - if(!mux && enc->bit_rate>0 && size>0 && enc->sample_rate>0 && enc->block_align>1) { + if (!mux && enc->bit_rate>0 && size>0 && enc->sample_rate>0 && enc->block_align>1) { if (enc->codec_id == AV_CODEC_ID_WMAV1 || enc->codec_id == AV_CODEC_ID_WMAV2) return ((int64_t)size * 8 * enc->sample_rate) / enc->bit_rate; } @@ -853,7 +870,6 @@ int ff_get_audio_frame_size(AVCodecContext *enc, int size, int mux) return -1; } - /** * Return the frame duration in seconds. Return 0 if not available. */ @@ -864,15 +880,15 @@ void ff_compute_frame_duration(int *pnum, int *pden, AVStream *st, *pnum = 0; *pden = 0; - switch(st->codec->codec_type) { + switch (st->codec->codec_type) { case AVMEDIA_TYPE_VIDEO: if (st->r_frame_rate.num && !pc) { *pnum = st->r_frame_rate.den; *pden = st->r_frame_rate.num; - } else if(st->time_base.num*1000LL > st->time_base.den) { + } else if (st->time_base.num * 1000LL > st->time_base.den) { *pnum = st->time_base.num; *pden = st->time_base.den; - }else if(st->codec->time_base.num*1000LL > st->codec->time_base.den){ + } else if (st->codec->time_base.num * 1000LL > st->codec->time_base.den) { *pnum = st->codec->time_base.num; *pden = st->codec->time_base.den; if (pc && pc->repeat_pict) { @@ -881,11 +897,11 @@ void ff_compute_frame_duration(int *pnum, int *pden, AVStream *st, else *pnum *= 1 + pc->repeat_pict; } - //If this codec can be interlaced or progressive then we need a parser to compute duration of a packet - //Thus if we have no parser in such case leave duration undefined. - if(st->codec->ticks_per_frame>1 && !pc){ + /* If this codec can be interlaced or progressive then we need + * a parser to compute duration of a packet. Thus if we have + * no parser in such case leave duration undefined. */ + if (st->codec->ticks_per_frame > 1 && !pc) *pnum = *pden = 0; - } } break; case AVMEDIA_TYPE_AUDIO: @@ -900,10 +916,10 @@ void ff_compute_frame_duration(int *pnum, int *pden, AVStream *st, } } -static int is_intra_only(AVCodecContext *enc){ +static int is_intra_only(AVCodecContext *enc) { const AVCodecDescriptor *desc; - if(enc->codec_type != AVMEDIA_TYPE_VIDEO) + if (enc->codec_type != AVMEDIA_TYPE_VIDEO) return 1; desc = av_codec_get_codec_descriptor(enc); @@ -918,17 +934,17 @@ static int is_intra_only(AVCodecContext *enc){ static int has_decode_delay_been_guessed(AVStream *st) { - if(st->codec->codec_id != AV_CODEC_ID_H264) return 1; - if(!st->info) // if we have left find_stream_info then nb_decoded_frames won't increase anymore for stream copy + if (st->codec->codec_id != AV_CODEC_ID_H264) return 1; + if (!st->info) // if we have left find_stream_info then nb_decoded_frames won't increase anymore for stream copy return 1; #if CONFIG_H264_DECODER - if(st->codec->has_b_frames && + if (st->codec->has_b_frames && avpriv_h264_has_num_reorder_frames(st->codec) == st->codec->has_b_frames) return 1; #endif - if(st->codec->has_b_frames<3) + if (st->codec->has_b_frames<3) return st->nb_decoded_frames >= 7; - else if(st->codec->has_b_frames<4) + else if (st->codec->has_b_frames<4) return st->nb_decoded_frames >= 18; else return st->nb_decoded_frames >= 20; @@ -946,44 +962,47 @@ static AVPacketList *get_next_pkt(AVFormatContext *s, AVStream *st, AVPacketList static void update_initial_timestamps(AVFormatContext *s, int stream_index, int64_t dts, int64_t pts, AVPacket *pkt) { - AVStream *st= s->streams[stream_index]; - AVPacketList *pktl= s->packet_buffer ? s->packet_buffer : s->parse_queue; + AVStream *st = s->streams[stream_index]; + AVPacketList *pktl = s->packet_buffer ? s->packet_buffer : s->parse_queue; int64_t pts_buffer[MAX_REORDER_DELAY+1]; int64_t shift; int i, delay; - if(st->first_dts != AV_NOPTS_VALUE || dts == AV_NOPTS_VALUE || st->cur_dts == AV_NOPTS_VALUE || is_relative(dts)) + if (st->first_dts != AV_NOPTS_VALUE || + dts == AV_NOPTS_VALUE || + st->cur_dts == AV_NOPTS_VALUE || + is_relative(dts)) return; - delay = st->codec->has_b_frames; - st->first_dts= dts - (st->cur_dts - RELATIVE_TS_BASE); - st->cur_dts= dts; - shift = st->first_dts - RELATIVE_TS_BASE; + delay = st->codec->has_b_frames; + st->first_dts = dts - (st->cur_dts - RELATIVE_TS_BASE); + st->cur_dts = dts; + shift = st->first_dts - RELATIVE_TS_BASE; - for (i=0; i<MAX_REORDER_DELAY+1; i++) + for (i = 0; i<MAX_REORDER_DELAY+1; i++) pts_buffer[i] = AV_NOPTS_VALUE; if (is_relative(pts)) pts += shift; - for(; pktl; pktl= get_next_pkt(s, st, pktl)){ - if(pktl->pkt.stream_index != stream_index) + for (; pktl; pktl = get_next_pkt(s, st, pktl)) { + if (pktl->pkt.stream_index != stream_index) continue; - if(is_relative(pktl->pkt.pts)) + if (is_relative(pktl->pkt.pts)) pktl->pkt.pts += shift; - if(is_relative(pktl->pkt.dts)) + if (is_relative(pktl->pkt.dts)) pktl->pkt.dts += shift; - if(st->start_time == AV_NOPTS_VALUE && pktl->pkt.pts != AV_NOPTS_VALUE) - st->start_time= pktl->pkt.pts; + if (st->start_time == AV_NOPTS_VALUE && pktl->pkt.pts != AV_NOPTS_VALUE) + st->start_time = pktl->pkt.pts; - if(pktl->pkt.pts != AV_NOPTS_VALUE && delay <= MAX_REORDER_DELAY && has_decode_delay_been_guessed(st)){ - pts_buffer[0]= pktl->pkt.pts; - for(i=0; i<delay && pts_buffer[i] > pts_buffer[i+1]; i++) - FFSWAP(int64_t, pts_buffer[i], pts_buffer[i+1]); - if(pktl->pkt.dts == AV_NOPTS_VALUE) - pktl->pkt.dts= pts_buffer[0]; + if (pktl->pkt.pts != AV_NOPTS_VALUE && delay <= MAX_REORDER_DELAY && has_decode_delay_been_guessed(st)) { + pts_buffer[0] = pktl->pkt.pts; + for (i = 0; i<delay && pts_buffer[i] > pts_buffer[i + 1]; i++) + FFSWAP(int64_t, pts_buffer[i], pts_buffer[i + 1]); + if (pktl->pkt.dts == AV_NOPTS_VALUE) + pktl->pkt.dts = pts_buffer[0]; } } @@ -994,51 +1013,54 @@ static void update_initial_timestamps(AVFormatContext *s, int stream_index, static void update_initial_durations(AVFormatContext *s, AVStream *st, int stream_index, int duration) { - AVPacketList *pktl= s->packet_buffer ? s->packet_buffer : s->parse_queue; - int64_t cur_dts= RELATIVE_TS_BASE; + AVPacketList *pktl = s->packet_buffer ? s->packet_buffer : s->parse_queue; + int64_t cur_dts = RELATIVE_TS_BASE; - if(st->first_dts != AV_NOPTS_VALUE){ + if (st->first_dts != AV_NOPTS_VALUE) { if (st->update_initial_durations_done) return; st->update_initial_durations_done = 1; - cur_dts= st->first_dts; - for(; pktl; pktl= get_next_pkt(s, st, pktl)){ - if(pktl->pkt.stream_index == stream_index){ - if(pktl->pkt.pts != pktl->pkt.dts || pktl->pkt.dts != AV_NOPTS_VALUE || pktl->pkt.duration) + cur_dts = st->first_dts; + for (; pktl; pktl = get_next_pkt(s, st, pktl)) { + if (pktl->pkt.stream_index == stream_index) { + if (pktl->pkt.pts != pktl->pkt.dts || + pktl->pkt.dts != AV_NOPTS_VALUE || + pktl->pkt.duration) break; cur_dts -= duration; } } - if(pktl && pktl->pkt.dts != st->first_dts) { + if (pktl && pktl->pkt.dts != st->first_dts) { av_log(s, AV_LOG_DEBUG, "first_dts %s not matching first dts %s (pts %s, duration %d) in the queue\n", av_ts2str(st->first_dts), av_ts2str(pktl->pkt.dts), av_ts2str(pktl->pkt.pts), pktl->pkt.duration); return; } - if(!pktl) { + if (!pktl) { av_log(s, AV_LOG_DEBUG, "first_dts %s but no packet with dts in the queue\n", av_ts2str(st->first_dts)); return; } - pktl= s->packet_buffer ? s->packet_buffer : s->parse_queue; + pktl = s->packet_buffer ? s->packet_buffer : s->parse_queue; st->first_dts = cur_dts; - }else if(st->cur_dts != RELATIVE_TS_BASE) + } else if (st->cur_dts != RELATIVE_TS_BASE) return; - for(; pktl; pktl= get_next_pkt(s, st, pktl)){ - if(pktl->pkt.stream_index != stream_index) + for (; pktl; pktl = get_next_pkt(s, st, pktl)) { + if (pktl->pkt.stream_index != stream_index) continue; - if(pktl->pkt.pts == pktl->pkt.dts && (pktl->pkt.dts == AV_NOPTS_VALUE || pktl->pkt.dts == st->first_dts) - && !pktl->pkt.duration){ - pktl->pkt.dts= cur_dts; - if(!st->codec->has_b_frames) - pktl->pkt.pts= cur_dts; + if (pktl->pkt.pts == pktl->pkt.dts && + (pktl->pkt.dts == AV_NOPTS_VALUE || pktl->pkt.dts == st->first_dts) && + !pktl->pkt.duration) { + pktl->pkt.dts = cur_dts; + if (!st->codec->has_b_frames) + pktl->pkt.pts = cur_dts; // if (st->codec->codec_type != AVMEDIA_TYPE_AUDIO) pktl->pkt.duration = duration; - }else + } else break; cur_dts = pktl->pkt.dts + pktl->pkt.duration; } - if(!pktl) - st->cur_dts= cur_dts; + if (!pktl) + st->cur_dts = cur_dts; } static void compute_pkt_fields(AVFormatContext *s, AVStream *st, @@ -1051,8 +1073,8 @@ static void compute_pkt_fields(AVFormatContext *s, AVStream *st, if (s->flags & AVFMT_FLAG_NOFILLIN) return; - if((s->flags & AVFMT_FLAG_IGNDTS) && pkt->pts != AV_NOPTS_VALUE) - pkt->dts= AV_NOPTS_VALUE; + if ((s->flags & AVFMT_FLAG_IGNDTS) && pkt->pts != AV_NOPTS_VALUE) + pkt->dts = AV_NOPTS_VALUE; if (pc && pc->pict_type == AV_PICTURE_TYPE_B && !st->codec->has_b_frames) @@ -1060,11 +1082,11 @@ static void compute_pkt_fields(AVFormatContext *s, AVStream *st, st->codec->has_b_frames = 1; /* do we have a video B-frame ? */ - delay= st->codec->has_b_frames; + delay = st->codec->has_b_frames; presentation_delayed = 0; /* XXX: need has_b_frame, but cannot get it if the codec is - not initialized */ + * not initialized */ if (delay && pc && pc->pict_type != AV_PICTURE_TYPE_B) presentation_delayed = 1; @@ -1072,54 +1094,64 @@ static void compute_pkt_fields(AVFormatContext *s, AVStream *st, if (pkt->pts != AV_NOPTS_VALUE && pkt->dts != AV_NOPTS_VALUE && st->pts_wrap_bits < 63 && pkt->dts - (1LL << (st->pts_wrap_bits - 1)) > pkt->pts) { - if(is_relative(st->cur_dts) || pkt->dts - (1LL<<(st->pts_wrap_bits-1)) > st->cur_dts) { - pkt->dts -= 1LL<<st->pts_wrap_bits; + if (is_relative(st->cur_dts) || pkt->dts - (1LL<<(st->pts_wrap_bits - 1)) > st->cur_dts) { + pkt->dts -= 1LL << st->pts_wrap_bits; } else - pkt->pts += 1LL<<st->pts_wrap_bits; + pkt->pts += 1LL << st->pts_wrap_bits; } - // some mpeg2 in mpeg-ps lack dts (issue171 / input_file.mpg) - // we take the conservative approach and discard both - // Note, if this is misbehaving for a H.264 file then possibly presentation_delayed is not set correctly. - if(delay==1 && pkt->dts == pkt->pts && pkt->dts != AV_NOPTS_VALUE && presentation_delayed){ + /* Some MPEG-2 in MPEG-PS lack dts (issue #171 / input_file.mpg). + * We take the conservative approach and discard both. + * Note: If this is misbehaving for an H.264 file, then possibly + * presentation_delayed is not set correctly. */ + if (delay == 1 && pkt->dts == pkt->pts && + pkt->dts != AV_NOPTS_VALUE && presentation_delayed) { av_log(s, AV_LOG_DEBUG, "invalid dts/pts combination %"PRIi64"\n", pkt->dts); if ( strcmp(s->iformat->name, "mov,mp4,m4a,3gp,3g2,mj2") && strcmp(s->iformat->name, "flv")) // otherwise we discard correct timestamps for vc1-wmapro.ism - pkt->dts= AV_NOPTS_VALUE; + pkt->dts = AV_NOPTS_VALUE; } - duration = av_mul_q((AVRational){pkt->duration, 1}, st->time_base); + duration = av_mul_q((AVRational) {pkt->duration, 1}, st->time_base); if (pkt->duration == 0) { ff_compute_frame_duration(&num, &den, st, pc, pkt); if (den && num) { - duration = (AVRational){num, den}; - pkt->duration = av_rescale_rnd(1, num * (int64_t)st->time_base.den, den * (int64_t)st->time_base.num, AV_ROUND_DOWN); + duration = (AVRational) {num, den}; + pkt->duration = av_rescale_rnd(1, + num * (int64_t) st->time_base.den, + den * (int64_t) st->time_base.num, + AV_ROUND_DOWN); } } - if(pkt->duration != 0 && (s->packet_buffer || s->parse_queue)) + if (pkt->duration != 0 && (s->packet_buffer || s->parse_queue)) update_initial_durations(s, st, pkt->stream_index, pkt->duration); - /* correct timestamps with byte offset if demuxers only have timestamps - on packet boundaries */ - if(pc && st->need_parsing == AVSTREAM_PARSE_TIMESTAMPS && pkt->size){ + /* Correct timestamps with byte offset if demuxers only have timestamps + * on packet boundaries */ + if (pc && st->need_parsing == AVSTREAM_PARSE_TIMESTAMPS && pkt->size) { /* this will estimate bitrate based on this frame's duration and size */ offset = av_rescale(pc->offset, pkt->duration, pkt->size); - if(pkt->pts != AV_NOPTS_VALUE) + if (pkt->pts != AV_NOPTS_VALUE) pkt->pts += offset; - if(pkt->dts != AV_NOPTS_VALUE) + if (pkt->dts != AV_NOPTS_VALUE) pkt->dts += offset; } /* This may be redundant, but it should not hurt. */ - if(pkt->dts != AV_NOPTS_VALUE && pkt->pts != AV_NOPTS_VALUE && pkt->pts > pkt->dts) + if (pkt->dts != AV_NOPTS_VALUE && + pkt->pts != AV_NOPTS_VALUE && + pkt->pts > pkt->dts) presentation_delayed = 1; - av_dlog(NULL, "IN delayed:%d pts:%s, dts:%s cur_dts:%s st:%d pc:%p duration:%d\n", - presentation_delayed, av_ts2str(pkt->pts), av_ts2str(pkt->dts), av_ts2str(st->cur_dts), pkt->stream_index, pc, pkt->duration); - /* interpolate PTS and DTS if they are not present */ - //We skip H264 currently because delay and has_b_frames are not reliably set - if((delay==0 || (delay==1 && pc)) && st->codec->codec_id != AV_CODEC_ID_H264){ + av_dlog(NULL, + "IN delayed:%d pts:%s, dts:%s cur_dts:%s st:%d pc:%p duration:%d\n", + presentation_delayed, av_ts2str(pkt->pts), av_ts2str(pkt->dts), av_ts2str(st->cur_dts), + pkt->stream_index, pc, pkt->duration); + /* Interpolate PTS and DTS if they are not present. We skip H264 + * currently because delay and has_b_frames are not reliably set. */ + if ((delay == 0 || (delay == 1 && pc)) && + st->codec->codec_id != AV_CODEC_ID_H264) { if (presentation_delayed) { /* DTS = decompression timestamp */ /* PTS = presentation timestamp */ @@ -1129,16 +1161,16 @@ static void compute_pkt_fields(AVFormatContext *s, AVStream *st, if (pkt->dts == AV_NOPTS_VALUE) pkt->dts = st->cur_dts; - /* this is tricky: the dts must be incremented by the duration - of the frame we are displaying, i.e. the last I- or P-frame */ + /* This is tricky: the dts must be incremented by the duration + * of the frame we are displaying, i.e. the last I- or P-frame. */ if (st->last_IP_duration == 0) st->last_IP_duration = pkt->duration; - if(pkt->dts != AV_NOPTS_VALUE) + if (pkt->dts != AV_NOPTS_VALUE) st->cur_dts = pkt->dts + st->last_IP_duration; - st->last_IP_duration = pkt->duration; - st->last_IP_pts= pkt->pts; - /* cannot compute PTS if not present (we can compute it only - by knowing the future */ + st->last_IP_duration = pkt->duration; + st->last_IP_pts = pkt->pts; + /* Cannot compute PTS if not present (we can compute it only + * by knowing the future. */ } else if (pkt->pts != AV_NOPTS_VALUE || pkt->dts != AV_NOPTS_VALUE || pkt->duration ) { @@ -1156,17 +1188,18 @@ static void compute_pkt_fields(AVFormatContext *s, AVStream *st, } } - if(pkt->pts != AV_NOPTS_VALUE && delay <= MAX_REORDER_DELAY && has_decode_delay_been_guessed(st)){ - st->pts_buffer[0]= pkt->pts; - for(i=0; i<delay && st->pts_buffer[i] > st->pts_buffer[i+1]; i++) - FFSWAP(int64_t, st->pts_buffer[i], st->pts_buffer[i+1]); - if(pkt->dts == AV_NOPTS_VALUE) - pkt->dts= st->pts_buffer[0]; + if (pkt->pts != AV_NOPTS_VALUE && delay <= MAX_REORDER_DELAY && has_decode_delay_been_guessed(st)) { + st->pts_buffer[0] = pkt->pts; + for (i = 0; i<delay && st->pts_buffer[i] > st->pts_buffer[i + 1]; i++) + FFSWAP(int64_t, st->pts_buffer[i], st->pts_buffer[i + 1]); + if (pkt->dts == AV_NOPTS_VALUE) + pkt->dts = st->pts_buffer[0]; } - if(st->codec->codec_id == AV_CODEC_ID_H264){ // we skipped it above so we try here - update_initial_timestamps(s, pkt->stream_index, pkt->dts, pkt->pts, pkt); // this should happen on the first packet - } - if(pkt->dts > st->cur_dts) + // We skipped it above so we try here. + if (st->codec->codec_id == AV_CODEC_ID_H264) + // This should happen on the first packet + update_initial_timestamps(s, pkt->stream_index, pkt->dts, pkt->pts, pkt); + if (pkt->dts > st->cur_dts) st->cur_dts = pkt->dts; av_dlog(NULL, "OUTdelayed:%d/%d pts:%s, dts:%s cur_dts:%s\n", @@ -1191,21 +1224,21 @@ static void free_packet_buffer(AVPacketList **pkt_buf, AVPacketList **pkt_buf_en } /** - * Parse a packet, add all split parts to parse_queue + * Parse a packet, add all split parts to parse_queue. * - * @param pkt packet to parse, NULL when flushing the parser at end of stream + * @param pkt Packet to parse, NULL when flushing the parser at end of stream. */ static int parse_packet(AVFormatContext *s, AVPacket *pkt, int stream_index) { AVPacket out_pkt = { 0 }, flush_pkt = { 0 }; - AVStream *st = s->streams[stream_index]; - uint8_t *data = pkt ? pkt->data : NULL; - int size = pkt ? pkt->size : 0; + AVStream *st = s->streams[stream_index]; + uint8_t *data = pkt ? pkt->data : NULL; + int size = pkt ? pkt->size : 0; int ret = 0, got_output = 0; if (!pkt) { av_init_packet(&flush_pkt); - pkt = &flush_pkt; + pkt = &flush_pkt; got_output = 1; } else if (!size && st->parser->flags & PARSER_FLAG_COMPLETE_FRAMES) { // preserve 0-size sync packets @@ -1216,7 +1249,7 @@ static int parse_packet(AVFormatContext *s, AVPacket *pkt, int stream_index) int len; av_init_packet(&out_pkt); - len = av_parser_parse2(st->parser, st->codec, + len = av_parser_parse2(st->parser, st->codec, &out_pkt.data, &out_pkt.size, data, size, pkt->pts, pkt->dts, pkt->pos); @@ -1234,18 +1267,19 @@ static int parse_packet(AVFormatContext *s, AVPacket *pkt, int stream_index) if (pkt->side_data) { out_pkt.side_data = pkt->side_data; out_pkt.side_data_elems = pkt->side_data_elems; - pkt->side_data = NULL; - pkt->side_data_elems = 0; + pkt->side_data = NULL; + pkt->side_data_elems = 0; } /* set the duration */ out_pkt.duration = 0; if (st->codec->codec_type == AVMEDIA_TYPE_AUDIO) { if (st->codec->sample_rate > 0) { - out_pkt.duration = av_rescale_q_rnd(st->parser->duration, - (AVRational){ 1, st->codec->sample_rate }, - st->time_base, - AV_ROUND_DOWN); + out_pkt.duration = + av_rescale_q_rnd(st->parser->duration, + (AVRational) { 1, st->codec->sample_rate }, + st->time_base, + AV_ROUND_DOWN); } } else if (st->codec->time_base.num != 0 && st->codec->time_base.den != 0) { @@ -1256,11 +1290,11 @@ static int parse_packet(AVFormatContext *s, AVPacket *pkt, int stream_index) } out_pkt.stream_index = st->index; - out_pkt.pts = st->parser->pts; - out_pkt.dts = st->parser->dts; - out_pkt.pos = st->parser->pos; + out_pkt.pts = st->parser->pts; + out_pkt.dts = st->parser->dts; + out_pkt.pos = st->parser->pos; - if(st->need_parsing == AVSTREAM_PARSE_FULL_RAW) + if (st->need_parsing == AVSTREAM_PARSE_FULL_RAW) out_pkt.pos = st->parser->frame_offset; if (st->parser->key_frame == 1 || @@ -1268,14 +1302,14 @@ static int parse_packet(AVFormatContext *s, AVPacket *pkt, int stream_index) st->parser->pict_type == AV_PICTURE_TYPE_I)) out_pkt.flags |= AV_PKT_FLAG_KEY; - if(st->parser->key_frame == -1 && st->parser->pict_type==AV_PICTURE_TYPE_NONE && (pkt->flags&AV_PKT_FLAG_KEY)) + if (st->parser->key_frame == -1 && st->parser->pict_type ==AV_PICTURE_TYPE_NONE && (pkt->flags&AV_PKT_FLAG_KEY)) out_pkt.flags |= AV_PKT_FLAG_KEY; compute_pkt_fields(s, st, st->parser, &out_pkt); if (out_pkt.data == pkt->data && out_pkt.size == pkt->size) { - out_pkt.buf = pkt->buf; - pkt->buf = NULL; + out_pkt.buf = pkt->buf; + pkt->buf = NULL; #if FF_API_DESTRUCT_PACKET FF_DISABLE_DEPRECATION_WARNINGS out_pkt.destruct = pkt->destruct; @@ -1293,7 +1327,6 @@ FF_ENABLE_DEPRECATION_WARNINGS } } - /* end of the stream => close and free the parser */ if (pkt == &flush_pkt) { av_parser_close(st->parser); @@ -1311,8 +1344,8 @@ static int read_from_packet_buffer(AVPacketList **pkt_buffer, { AVPacketList *pktl; av_assert0(*pkt_buffer); - pktl = *pkt_buffer; - *pkt = pktl->pkt; + pktl = *pkt_buffer; + *pkt = pktl->pkt; *pkt_buffer = pktl->next; if (!pktl->next) *pkt_buffer_end = NULL; @@ -1336,7 +1369,7 @@ static int read_frame_internal(AVFormatContext *s, AVPacket *pkt) if (ret == AVERROR(EAGAIN)) return ret; /* flush the parsers */ - for(i = 0; i < s->nb_streams; i++) { + for (i = 0; i < s->nb_streams; i++) { st = s->streams[i]; if (st->parser && st->need_parsing) parse_packet(s, NULL, st->index); @@ -1351,20 +1384,20 @@ static int read_frame_internal(AVFormatContext *s, AVPacket *pkt) if (cur_pkt.pts != AV_NOPTS_VALUE && cur_pkt.dts != AV_NOPTS_VALUE && cur_pkt.pts < cur_pkt.dts) { - av_log(s, AV_LOG_WARNING, "Invalid timestamps stream=%d, pts=%s, dts=%s, size=%d\n", + av_log(s, AV_LOG_WARNING, + "Invalid timestamps stream=%d, pts=%s, dts=%s, size=%d\n", cur_pkt.stream_index, av_ts2str(cur_pkt.pts), av_ts2str(cur_pkt.dts), cur_pkt.size); } if (s->debug & FF_FDEBUG_TS) - av_log(s, AV_LOG_DEBUG, "ff_read_packet stream=%d, pts=%s, dts=%s, size=%d, duration=%d, flags=%d\n", + av_log(s, AV_LOG_DEBUG, + "ff_read_packet stream=%d, pts=%s, dts=%s, size=%d, duration=%d, flags=%d\n", cur_pkt.stream_index, av_ts2str(cur_pkt.pts), av_ts2str(cur_pkt.dts), - cur_pkt.size, - cur_pkt.duration, - cur_pkt.flags); + cur_pkt.size, cur_pkt.duration, cur_pkt.flags); if (st->need_parsing && !st->parser && !(s->flags & AVFMT_FLAG_NOPARSE)) { st->parser = av_parser_init(st->codec->codec_id); @@ -1374,13 +1407,12 @@ static int read_frame_internal(AVFormatContext *s, AVPacket *pkt) avcodec_get_name(st->codec->codec_id)); /* no parser available: just output the raw packets */ st->need_parsing = AVSTREAM_PARSE_NONE; - } else if(st->need_parsing == AVSTREAM_PARSE_HEADERS) { + } else if (st->need_parsing == AVSTREAM_PARSE_HEADERS) st->parser->flags |= PARSER_FLAG_COMPLETE_FRAMES; - } else if(st->need_parsing == AVSTREAM_PARSE_FULL_ONCE) { + else if (st->need_parsing == AVSTREAM_PARSE_FULL_ONCE) st->parser->flags |= PARSER_FLAG_ONCE; - } else if(st->need_parsing == AVSTREAM_PARSE_FULL_RAW) { + else if (st->need_parsing == AVSTREAM_PARSE_FULL_RAW) st->parser->flags |= PARSER_FLAG_USE_CODEC_TS; - } } if (!st->need_parsing || !st->parser) { @@ -1390,7 +1422,8 @@ static int read_frame_internal(AVFormatContext *s, AVPacket *pkt) if ((s->iformat->flags & AVFMT_GENERIC_INDEX) && (pkt->flags & AV_PKT_FLAG_KEY) && pkt->dts != AV_NOPTS_VALUE) { ff_reduce_index(s, st->index); - av_add_index_entry(st, pkt->pos, pkt->dts, 0, 0, AVINDEX_KEYFRAME); + av_add_index_entry(st, pkt->pos, pkt->dts, + 0, 0, AVINDEX_KEYFRAME); } got_packet = 1; } else if (st->discard < AVDISCARD_ALL) { @@ -1426,17 +1459,17 @@ static int read_frame_internal(AVFormatContext *s, AVPacket *pkt) } } - if(ret >= 0 && !(s->flags & AVFMT_FLAG_KEEP_SIDE_DATA)) + if (ret >= 0 && !(s->flags & AVFMT_FLAG_KEEP_SIDE_DATA)) av_packet_merge_side_data(pkt); - if(s->debug & FF_FDEBUG_TS) - av_log(s, AV_LOG_DEBUG, "read_frame_internal stream=%d, pts=%s, dts=%s, size=%d, duration=%d, flags=%d\n", - pkt->stream_index, - av_ts2str(pkt->pts), - av_ts2str(pkt->dts), - pkt->size, - pkt->duration, - pkt->flags); + if (s->debug & FF_FDEBUG_TS) + av_log(s, AV_LOG_DEBUG, + "read_frame_internal stream=%d, pts=%s, dts=%s, " + "size=%d, duration=%d, flags=%d\n", + pkt->stream_index, + av_ts2str(pkt->pts), + av_ts2str(pkt->dts), + pkt->size, pkt->duration, pkt->flags); return ret; } @@ -1444,14 +1477,15 @@ static int read_frame_internal(AVFormatContext *s, AVPacket *pkt) int av_read_frame(AVFormatContext *s, AVPacket *pkt) { const int genpts = s->flags & AVFMT_FLAG_GENPTS; - int eof = 0; + int eof = 0; int ret; AVStream *st; if (!genpts) { - ret = s->packet_buffer ? - read_from_packet_buffer(&s->packet_buffer, &s->packet_buffer_end, pkt) : - read_frame_internal(s, pkt); + ret = s->packet_buffer + ? read_from_packet_buffer(&s->packet_buffer, + &s->packet_buffer_end, pkt) + : read_frame_internal(s, pkt); if (ret < 0) return ret; goto return_packet; @@ -1471,7 +1505,8 @@ int av_read_frame(AVFormatContext *s, AVPacket *pkt) while (pktl && next_pkt->pts == AV_NOPTS_VALUE) { if (pktl->pkt.stream_index == next_pkt->stream_index && (av_compare_mod(next_pkt->dts, pktl->pkt.dts, 2LL << (wrap_bits - 1)) < 0)) { - if (av_compare_mod(pktl->pkt.pts, pktl->pkt.dts, 2LL << (wrap_bits - 1))) { //not b frame + if (av_compare_mod(pktl->pkt.pts, pktl->pkt.dts, 2LL << (wrap_bits - 1))) { + // not B-frame next_pkt->pts = pktl->pkt.dts; } if (last_dts != AV_NOPTS_VALUE) { @@ -1511,7 +1546,7 @@ int av_read_frame(AVFormatContext *s, AVPacket *pkt) } if (av_dup_packet(add_to_pktbuf(&s->packet_buffer, pkt, - &s->packet_buffer_end)) < 0) + &s->packet_buffer_end)) < 0) return AVERROR(ENOMEM); } @@ -1552,21 +1587,20 @@ int av_find_default_stream_index(AVFormatContext *s) if (s->nb_streams <= 0) return -1; - for(i = 0; i < s->nb_streams; i++) { + for (i = 0; i < s->nb_streams; i++) { st = s->streams[i]; if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO && !(st->disposition & AV_DISPOSITION_ATTACHED_PIC)) { return i; } - if (first_audio_index < 0 && st->codec->codec_type == AVMEDIA_TYPE_AUDIO) + if (first_audio_index < 0 && + st->codec->codec_type == AVMEDIA_TYPE_AUDIO) first_audio_index = i; } return first_audio_index >= 0 ? first_audio_index : 0; } -/** - * Flush the frame reader. - */ +/** Flush the frame reader. */ void ff_read_frame_flush(AVFormatContext *s) { AVStream *st; @@ -1574,8 +1608,8 @@ void ff_read_frame_flush(AVFormatContext *s) flush_packet_queue(s); - /* for each stream, reset read state */ - for(i = 0; i < s->nb_streams; i++) { + /* Reset read state for each stream. */ + for (i = 0; i < s->nb_streams; i++) { st = s->streams[i]; if (st->parser) { @@ -1583,13 +1617,16 @@ void ff_read_frame_flush(AVFormatContext *s) st->parser = NULL; } st->last_IP_pts = AV_NOPTS_VALUE; - if(st->first_dts == AV_NOPTS_VALUE) st->cur_dts = RELATIVE_TS_BASE; - else st->cur_dts = AV_NOPTS_VALUE; /* we set the current DTS to an unspecified origin */ + if (st->first_dts == AV_NOPTS_VALUE) + st->cur_dts = RELATIVE_TS_BASE; + else + /* We set the current DTS to an unspecified origin. */ + st->cur_dts = AV_NOPTS_VALUE; st->probe_packets = MAX_PROBE_PACKETS; - for(j=0; j<MAX_REORDER_DELAY+1; j++) - st->pts_buffer[j]= AV_NOPTS_VALUE; + for (j = 0; j < MAX_REORDER_DELAY + 1; j++) + st->pts_buffer[j] = AV_NOPTS_VALUE; } } @@ -1597,40 +1634,42 @@ void ff_update_cur_dts(AVFormatContext *s, AVStream *ref_st, int64_t timestamp) { int i; - for(i = 0; i < s->nb_streams; i++) { + for (i = 0; i < s->nb_streams; i++) { AVStream *st = s->streams[i]; - st->cur_dts = av_rescale(timestamp, - st->time_base.den * (int64_t)ref_st->time_base.num, - st->time_base.num * (int64_t)ref_st->time_base.den); + st->cur_dts = + av_rescale(timestamp, + st->time_base.den * (int64_t) ref_st->time_base.num, + st->time_base.num * (int64_t) ref_st->time_base.den); } } void ff_reduce_index(AVFormatContext *s, int stream_index) { - AVStream *st= s->streams[stream_index]; - unsigned int max_entries= s->max_index_size / sizeof(AVIndexEntry); + AVStream *st = s->streams[stream_index]; + unsigned int max_entries = s->max_index_size / sizeof(AVIndexEntry); - if((unsigned)st->nb_index_entries >= max_entries){ + if ((unsigned) st->nb_index_entries >= max_entries) { int i; - for(i=0; 2*i<st->nb_index_entries; i++) - st->index_entries[i]= st->index_entries[2*i]; - st->nb_index_entries= i; + for (i = 0; 2 * i < st->nb_index_entries; i++) + st->index_entries[i] = st->index_entries[2 * i]; + st->nb_index_entries = i; } } int ff_add_index_entry(AVIndexEntry **index_entries, int *nb_index_entries, unsigned int *index_entries_allocated_size, - int64_t pos, int64_t timestamp, int size, int distance, int flags) + int64_t pos, int64_t timestamp, + int size, int distance, int flags) { AVIndexEntry *entries, *ie; int index; - if((unsigned)*nb_index_entries + 1 >= UINT_MAX / sizeof(AVIndexEntry)) + if ((unsigned) *nb_index_entries + 1 >= UINT_MAX / sizeof(AVIndexEntry)) return -1; - if(timestamp == AV_NOPTS_VALUE) + if (timestamp == AV_NOPTS_VALUE) return AVERROR(EINVAL); if (size < 0 || size > 0x3FFFFFFF) @@ -1643,39 +1682,42 @@ int ff_add_index_entry(AVIndexEntry **index_entries, index_entries_allocated_size, (*nb_index_entries + 1) * sizeof(AVIndexEntry)); - if(!entries) + if (!entries) return -1; - *index_entries= entries; + *index_entries = entries; - index= ff_index_search_timestamp(*index_entries, *nb_index_entries, timestamp, AVSEEK_FLAG_ANY); + index = ff_index_search_timestamp(*index_entries, *nb_index_entries, + timestamp, AVSEEK_FLAG_ANY); - if(index<0){ - index= (*nb_index_entries)++; - ie= &entries[index]; - av_assert0(index==0 || ie[-1].timestamp < timestamp); - }else{ - ie= &entries[index]; - if(ie->timestamp != timestamp){ - if(ie->timestamp <= timestamp) + if (index < 0) { + index = (*nb_index_entries)++; + ie = &entries[index]; + av_assert0(index == 0 || ie[-1].timestamp < timestamp); + } else { + ie = &entries[index]; + if (ie->timestamp != timestamp) { + if (ie->timestamp <= timestamp) return -1; - memmove(entries + index + 1, entries + index, sizeof(AVIndexEntry)*(*nb_index_entries - index)); + memmove(entries + index + 1, entries + index, + sizeof(AVIndexEntry) * (*nb_index_entries - index)); (*nb_index_entries)++; - }else if(ie->pos == pos && distance < ie->min_distance) //do not reduce the distance - distance= ie->min_distance; + } else if (ie->pos == pos && distance < ie->min_distance) + // do not reduce the distance + distance = ie->min_distance; } - ie->pos = pos; - ie->timestamp = timestamp; - ie->min_distance= distance; - ie->size= size; - ie->flags = flags; + ie->pos = pos; + ie->timestamp = timestamp; + ie->min_distance = distance; + ie->size = size; + ie->flags = flags; return index; } -int av_add_index_entry(AVStream *st, - int64_t pos, int64_t timestamp, int size, int distance, int flags) +int av_add_index_entry(AVStream *st, int64_t pos, int64_t timestamp, + int size, int distance, int flags) { timestamp = wrap_timestamp(st, timestamp); return ff_add_index_entry(&st->index_entries, &st->nb_index_entries, @@ -1689,36 +1731,34 @@ int ff_index_search_timestamp(const AVIndexEntry *entries, int nb_entries, int a, b, m; int64_t timestamp; - a = - 1; + a = -1; b = nb_entries; - //optimize appending index entries at the end - if(b && entries[b-1].timestamp < wanted_timestamp) - a= b-1; + // Optimize appending index entries at the end. + if (b && entries[b - 1].timestamp < wanted_timestamp) + a = b - 1; while (b - a > 1) { - m = (a + b) >> 1; + m = (a + b) >> 1; timestamp = entries[m].timestamp; - if(timestamp >= wanted_timestamp) + if (timestamp >= wanted_timestamp) b = m; - if(timestamp <= wanted_timestamp) + if (timestamp <= wanted_timestamp) a = m; } - m= (flags & AVSEEK_FLAG_BACKWARD) ? a : b; + m = (flags & AVSEEK_FLAG_BACKWARD) ? a : b; - if(!(flags & AVSEEK_FLAG_ANY)){ - while(m>=0 && m<nb_entries && !(entries[m].flags & AVINDEX_KEYFRAME)){ + if (!(flags & AVSEEK_FLAG_ANY)) + while (m >= 0 && m < nb_entries && + !(entries[m].flags & AVINDEX_KEYFRAME)) m += (flags & AVSEEK_FLAG_BACKWARD) ? -1 : 1; - } - } - if(m == nb_entries) + if (m == nb_entries) return -1; - return m; + return m; } -int av_index_search_timestamp(AVStream *st, int64_t wanted_timestamp, - int flags) +int av_index_search_timestamp(AVStream *st, int64_t wanted_timestamp, int flags) { return ff_index_search_timestamp(st->index_entries, st->nb_index_entries, wanted_timestamp, flags); @@ -1733,9 +1773,10 @@ static int64_t ff_read_timestamp(AVFormatContext *s, int stream_index, int64_t * return ts; } -int ff_seek_frame_binary(AVFormatContext *s, int stream_index, int64_t target_ts, int flags) +int ff_seek_frame_binary(AVFormatContext *s, int stream_index, + int64_t target_ts, int flags) { - AVInputFormat *avif= s->iformat; + AVInputFormat *avif = s->iformat; int64_t av_uninit(pos_min), av_uninit(pos_max), pos, pos_limit; int64_t ts_min, ts_max, ts; int index; @@ -1747,42 +1788,47 @@ int ff_seek_frame_binary(AVFormatContext *s, int stream_index, int64_t target_ts av_dlog(s, "read_seek: %d %s\n", stream_index, av_ts2str(target_ts)); - ts_max= - ts_min= AV_NOPTS_VALUE; - pos_limit= -1; //gcc falsely says it may be uninitialized + ts_max = + ts_min = AV_NOPTS_VALUE; + pos_limit = -1; // GCC falsely says it may be uninitialized. - st= s->streams[stream_index]; - if(st->index_entries){ + st = s->streams[stream_index]; + if (st->index_entries) { AVIndexEntry *e; - index= av_index_search_timestamp(st, target_ts, flags | AVSEEK_FLAG_BACKWARD); //FIXME whole func must be checked for non-keyframe entries in index case, especially read_timestamp() - index= FFMAX(index, 0); - e= &st->index_entries[index]; + /* FIXME: Whole function must be checked for non-keyframe entries in + * index case, especially read_timestamp(). */ + index = av_index_search_timestamp(st, target_ts, + flags | AVSEEK_FLAG_BACKWARD); + index = FFMAX(index, 0); + e = &st->index_entries[index]; - if(e->timestamp <= target_ts || e->pos == e->min_distance){ - pos_min= e->pos; - ts_min= e->timestamp; + if (e->timestamp <= target_ts || e->pos == e->min_distance) { + pos_min = e->pos; + ts_min = e->timestamp; av_dlog(s, "using cached pos_min=0x%"PRIx64" dts_min=%s\n", pos_min, av_ts2str(ts_min)); - }else{ - av_assert1(index==0); + } else { + av_assert1(index == 0); } - index= av_index_search_timestamp(st, target_ts, flags & ~AVSEEK_FLAG_BACKWARD); + index = av_index_search_timestamp(st, target_ts, + flags & ~AVSEEK_FLAG_BACKWARD); av_assert0(index < st->nb_index_entries); - if(index >= 0){ - e= &st->index_entries[index]; + if (index >= 0) { + e = &st->index_entries[index]; av_assert1(e->timestamp >= target_ts); - pos_max= e->pos; - ts_max= e->timestamp; - pos_limit= pos_max - e->min_distance; - av_dlog(s, "using cached pos_max=0x%"PRIx64" pos_limit=0x%"PRIx64" dts_max=%s\n", - pos_max, pos_limit, av_ts2str(ts_max)); + pos_max = e->pos; + ts_max = e->timestamp; + pos_limit = pos_max - e->min_distance; + av_dlog(s, "using cached pos_max=0x%"PRIx64" pos_limit=0x%"PRIx64 + " dts_max=%s\n", pos_max, pos_limit, av_ts2str(ts_max)); } } - pos= ff_gen_search(s, stream_index, target_ts, pos_min, pos_max, pos_limit, ts_min, ts_max, flags, &ts, avif->read_timestamp); - if(pos<0) + pos = ff_gen_search(s, stream_index, target_ts, pos_min, pos_max, pos_limit, + ts_min, ts_max, flags, &ts, avif->read_timestamp); + if (pos < 0) return -1; /* do the seek */ @@ -1798,28 +1844,30 @@ int ff_seek_frame_binary(AVFormatContext *s, int stream_index, int64_t target_ts int ff_find_last_ts(AVFormatContext *s, int stream_index, int64_t *ts, int64_t *pos, int64_t (*read_timestamp)(struct AVFormatContext *, int , int64_t *, int64_t )) { - int64_t step= 1024; + int64_t step = 1024; int64_t limit, ts_max; int64_t filesize = avio_size(s->pb); - int64_t pos_max = filesize - 1; - do{ + int64_t pos_max = filesize - 1; + do { limit = pos_max; pos_max = FFMAX(0, (pos_max) - step); - ts_max = ff_read_timestamp(s, stream_index, &pos_max, limit, read_timestamp); - step += step; - }while(ts_max == AV_NOPTS_VALUE && 2*limit > step); + ts_max = ff_read_timestamp(s, stream_index, + &pos_max, limit, read_timestamp); + step += step; + } while (ts_max == AV_NOPTS_VALUE && 2*limit > step); if (ts_max == AV_NOPTS_VALUE) return -1; - for(;;){ + for (;;) { int64_t tmp_pos = pos_max + 1; - int64_t tmp_ts = ff_read_timestamp(s, stream_index, &tmp_pos, INT64_MAX, read_timestamp); - if(tmp_ts == AV_NOPTS_VALUE) + int64_t tmp_ts = ff_read_timestamp(s, stream_index, + &tmp_pos, INT64_MAX, read_timestamp); + if (tmp_ts == AV_NOPTS_VALUE) break; av_assert0(tmp_pos > pos_max); ts_max = tmp_ts; pos_max = tmp_pos; - if(tmp_pos >= filesize) + if (tmp_pos >= filesize) break; } @@ -1833,8 +1881,10 @@ int ff_find_last_ts(AVFormatContext *s, int stream_index, int64_t *ts, int64_t * int64_t ff_gen_search(AVFormatContext *s, int stream_index, int64_t target_ts, int64_t pos_min, int64_t pos_max, int64_t pos_limit, - int64_t ts_min, int64_t ts_max, int flags, int64_t *ts_ret, - int64_t (*read_timestamp)(struct AVFormatContext *, int , int64_t *, int64_t )) + int64_t ts_min, int64_t ts_max, + int flags, int64_t *ts_ret, + int64_t (*read_timestamp)(struct AVFormatContext *, int, + int64_t *, int64_t)) { int64_t pos, ts; int64_t start_pos; @@ -1843,107 +1893,114 @@ int64_t ff_gen_search(AVFormatContext *s, int stream_index, int64_t target_ts, av_dlog(s, "gen_seek: %d %s\n", stream_index, av_ts2str(target_ts)); - if(ts_min == AV_NOPTS_VALUE){ + if (ts_min == AV_NOPTS_VALUE) { pos_min = s->data_offset; - ts_min = ff_read_timestamp(s, stream_index, &pos_min, INT64_MAX, read_timestamp); + ts_min = ff_read_timestamp(s, stream_index, &pos_min, INT64_MAX, read_timestamp); if (ts_min == AV_NOPTS_VALUE) return -1; } - if(ts_min >= target_ts){ - *ts_ret= ts_min; + if (ts_min >= target_ts) { + *ts_ret = ts_min; return pos_min; } - if(ts_max == AV_NOPTS_VALUE){ + if (ts_max == AV_NOPTS_VALUE) { if ((ret = ff_find_last_ts(s, stream_index, &ts_max, &pos_max, read_timestamp)) < 0) return ret; - pos_limit= pos_max; + pos_limit = pos_max; } - if(ts_max <= target_ts){ - *ts_ret= ts_max; + if (ts_max <= target_ts) { + *ts_ret = ts_max; return pos_max; } - if(ts_min > ts_max){ + if (ts_min > ts_max) return -1; - }else if(ts_min == ts_max){ - pos_limit= pos_min; - } + else if (ts_min == ts_max) + pos_limit = pos_min; - no_change=0; + no_change = 0; while (pos_min < pos_limit) { - av_dlog(s, "pos_min=0x%"PRIx64" pos_max=0x%"PRIx64" dts_min=%s dts_max=%s\n", + av_dlog(s, + "pos_min=0x%"PRIx64" pos_max=0x%"PRIx64" dts_min=%s dts_max=%s\n", pos_min, pos_max, av_ts2str(ts_min), av_ts2str(ts_max)); assert(pos_limit <= pos_max); - if(no_change==0){ - int64_t approximate_keyframe_distance= pos_max - pos_limit; + if (no_change == 0) { + int64_t approximate_keyframe_distance = pos_max - pos_limit; // interpolate position (better than dichotomy) - pos = av_rescale(target_ts - ts_min, pos_max - pos_min, ts_max - ts_min) - + pos_min - approximate_keyframe_distance; - }else if(no_change==1){ - // bisection, if interpolation failed to change min or max pos last time - pos = (pos_min + pos_limit)>>1; - }else{ + pos = av_rescale(target_ts - ts_min, pos_max - pos_min, + ts_max - ts_min) + + pos_min - approximate_keyframe_distance; + } else if (no_change == 1) { + // bisection if interpolation did not change min / max pos last time + pos = (pos_min + pos_limit) >> 1; + } else { /* linear search if bisection failed, can only happen if there - are very few or no keyframes between min/max */ - pos=pos_min; + * are very few or no keyframes between min/max */ + pos = pos_min; } - if(pos <= pos_min) - pos= pos_min + 1; - else if(pos > pos_limit) - pos= pos_limit; - start_pos= pos; - - ts = ff_read_timestamp(s, stream_index, &pos, INT64_MAX, read_timestamp); //may pass pos_limit instead of -1 - if(pos == pos_max) + if (pos <= pos_min) + pos = pos_min + 1; + else if (pos > pos_limit) + pos = pos_limit; + start_pos = pos; + + // May pass pos_limit instead of -1. + ts = ff_read_timestamp(s, stream_index, &pos, INT64_MAX, read_timestamp); + if (pos == pos_max) no_change++; else - no_change=0; - av_dlog(s, "%"PRId64" %"PRId64" %"PRId64" / %s %s %s target:%s limit:%"PRId64" start:%"PRId64" noc:%d\n", + no_change = 0; + av_dlog(s, "%"PRId64" %"PRId64" %"PRId64" / %s %s %s" + " target:%s limit:%"PRId64" start:%"PRId64" noc:%d\n", pos_min, pos, pos_max, av_ts2str(ts_min), av_ts2str(ts), av_ts2str(ts_max), av_ts2str(target_ts), pos_limit, start_pos, no_change); - if(ts == AV_NOPTS_VALUE){ + if (ts == AV_NOPTS_VALUE) { av_log(s, AV_LOG_ERROR, "read_timestamp() failed in the middle\n"); return -1; } assert(ts != AV_NOPTS_VALUE); if (target_ts <= ts) { pos_limit = start_pos - 1; - pos_max = pos; - ts_max = ts; + pos_max = pos; + ts_max = ts; } if (target_ts >= ts) { pos_min = pos; - ts_min = ts; + ts_min = ts; } } - pos = (flags & AVSEEK_FLAG_BACKWARD) ? pos_min : pos_max; - ts = (flags & AVSEEK_FLAG_BACKWARD) ? ts_min : ts_max; + pos = (flags & AVSEEK_FLAG_BACKWARD) ? pos_min : pos_max; + ts = (flags & AVSEEK_FLAG_BACKWARD) ? ts_min : ts_max; #if 0 pos_min = pos; - ts_min = ff_read_timestamp(s, stream_index, &pos_min, INT64_MAX, read_timestamp); + ts_min = ff_read_timestamp(s, stream_index, &pos_min, INT64_MAX, read_timestamp); pos_min++; ts_max = ff_read_timestamp(s, stream_index, &pos_min, INT64_MAX, read_timestamp); av_dlog(s, "pos=0x%"PRIx64" %s<=%s<=%s\n", pos, av_ts2str(ts_min), av_ts2str(target_ts), av_ts2str(ts_max)); #endif - *ts_ret= ts; + *ts_ret = ts; return pos; } -static int seek_frame_byte(AVFormatContext *s, int stream_index, int64_t pos, int flags){ +static int seek_frame_byte(AVFormatContext *s, int stream_index, + int64_t pos, int flags) +{ int64_t pos_min, pos_max; pos_min = s->data_offset; pos_max = avio_size(s->pb) - 1; - if (pos < pos_min) pos= pos_min; - else if(pos > pos_max) pos= pos_max; + if (pos < pos_min) + pos = pos_min; + else if (pos > pos_max) + pos = pos_max; avio_seek(s->pb, pos, SEEK_SET); @@ -1952,8 +2009,8 @@ static int seek_frame_byte(AVFormatContext *s, int stream_index, int64_t pos, in return 0; } -static int seek_frame_generic(AVFormatContext *s, - int stream_index, int64_t timestamp, int flags) +static int seek_frame_generic(AVFormatContext *s, int stream_index, + int64_t timestamp, int flags) { int index; int64_t ret; @@ -1964,35 +2021,36 @@ static int seek_frame_generic(AVFormatContext *s, index = av_index_search_timestamp(st, timestamp, flags); - if(index < 0 && st->nb_index_entries && timestamp < st->index_entries[0].timestamp) + if (index < 0 && st->nb_index_entries && + timestamp < st->index_entries[0].timestamp) return -1; - if(index < 0 || index==st->nb_index_entries-1){ + if (index < 0 || index == st->nb_index_entries - 1) { AVPacket pkt; - int nonkey=0; + int nonkey = 0; - if(st->nb_index_entries){ + if (st->nb_index_entries) { av_assert0(st->index_entries); - ie= &st->index_entries[st->nb_index_entries-1]; + ie = &st->index_entries[st->nb_index_entries - 1]; if ((ret = avio_seek(s->pb, ie->pos, SEEK_SET)) < 0) return ret; ff_update_cur_dts(s, st, ie->timestamp); - }else{ + } else { if ((ret = avio_seek(s->pb, s->data_offset, SEEK_SET)) < 0) return ret; } for (;;) { int read_status; - do{ + do { read_status = av_read_frame(s, &pkt); } while (read_status == AVERROR(EAGAIN)); if (read_status < 0) break; av_free_packet(&pkt); - if(stream_index == pkt.stream_index && pkt.dts > timestamp){ - if(pkt.flags & AV_PKT_FLAG_KEY) + if (stream_index == pkt.stream_index && pkt.dts > timestamp) { + if (pkt.flags & AV_PKT_FLAG_KEY) break; - if(nonkey++ > 1000 && st->codec->codec_id != AV_CODEC_ID_CDGRAPHICS){ + if (nonkey++ > 1000 && st->codec->codec_id != AV_CODEC_ID_CDGRAPHICS) { av_log(s, AV_LOG_ERROR,"seek_frame_generic failed as this stream seems to contain no keyframes after the target timestamp, %d non keyframes found\n", nonkey); break; } @@ -2004,10 +2062,9 @@ static int seek_frame_generic(AVFormatContext *s, return -1; ff_read_frame_flush(s); - if (s->iformat->read_seek){ - if(s->iformat->read_seek(s, stream_index, timestamp, flags) >= 0) + if (s->iformat->read_seek) + if (s->iformat->read_seek(s, stream_index, timestamp, flags) >= 0) return 0; - } ie = &st->index_entries[index]; if ((ret = avio_seek(s->pb, ie->pos, SEEK_SET)) < 0) return ret; @@ -2029,14 +2086,15 @@ static int seek_frame_internal(AVFormatContext *s, int stream_index, return seek_frame_byte(s, stream_index, timestamp, flags); } - if(stream_index < 0){ - stream_index= av_find_default_stream_index(s); - if(stream_index < 0) + if (stream_index < 0) { + stream_index = av_find_default_stream_index(s); + if (stream_index < 0) return -1; - st= s->streams[stream_index]; + st = s->streams[stream_index]; /* timestamp for default must be expressed in AV_TIME_BASE units */ - timestamp = av_rescale(timestamp, st->time_base.den, AV_TIME_BASE * (int64_t)st->time_base.num); + timestamp = av_rescale(timestamp, st->time_base.den, + AV_TIME_BASE * (int64_t) st->time_base.num); } /* first, we try the format specific seek */ @@ -2045,22 +2103,22 @@ static int seek_frame_internal(AVFormatContext *s, int stream_index, ret = s->iformat->read_seek(s, stream_index, timestamp, flags); } else ret = -1; - if (ret >= 0) { + if (ret >= 0) return 0; - } - if (s->iformat->read_timestamp && !(s->iformat->flags & AVFMT_NOBINSEARCH)) { + if (s->iformat->read_timestamp && + !(s->iformat->flags & AVFMT_NOBINSEARCH)) { ff_read_frame_flush(s); return ff_seek_frame_binary(s, stream_index, timestamp, flags); } else if (!(s->iformat->flags & AVFMT_NOGENSEARCH)) { ff_read_frame_flush(s); return seek_frame_generic(s, stream_index, timestamp, flags); - } - else + } else return -1; } -int av_seek_frame(AVFormatContext *s, int stream_index, int64_t timestamp, int flags) +int av_seek_frame(AVFormatContext *s, int stream_index, + int64_t timestamp, int flags) { int ret; @@ -2082,14 +2140,15 @@ int av_seek_frame(AVFormatContext *s, int stream_index, int64_t timestamp, int f return ret; } -int avformat_seek_file(AVFormatContext *s, int stream_index, int64_t min_ts, int64_t ts, int64_t max_ts, int flags) +int avformat_seek_file(AVFormatContext *s, int stream_index, int64_t min_ts, + int64_t ts, int64_t max_ts, int flags) { - if(min_ts > ts || max_ts < ts) + if (min_ts > ts || max_ts < ts) return -1; if (stream_index < -1 || stream_index >= (int)s->nb_streams) return AVERROR(EINVAL); - if(s->seek2any>0) + if (s->seek2any>0) flags |= AVSEEK_FLAG_ANY; flags &= ~AVSEEK_FLAG_BACKWARD; @@ -2108,15 +2167,16 @@ int avformat_seek_file(AVFormatContext *s, int stream_index, int64_t min_ts, int AV_ROUND_DOWN | AV_ROUND_PASS_MINMAX); } - ret = s->iformat->read_seek2(s, stream_index, min_ts, ts, max_ts, flags); + ret = s->iformat->read_seek2(s, stream_index, min_ts, + ts, max_ts, flags); if (ret >= 0) ret = avformat_queue_attached_pictures(s); return ret; } - if(s->iformat->read_timestamp){ - //try to seek via read_timestamp() + if (s->iformat->read_timestamp) { + // try to seek via read_timestamp() } // Fall back on old API if new is not implemented but old is. @@ -2148,7 +2208,7 @@ static int has_duration(AVFormatContext *ic) int i; AVStream *st; - for(i = 0;i < ic->nb_streams; i++) { + for (i = 0; i < ic->nb_streams; i++) { st = ic->streams[i]; if (st->duration != AV_NOPTS_VALUE) return 1; @@ -2173,47 +2233,50 @@ static void update_stream_timings(AVFormatContext *ic) start_time = INT64_MAX; start_time_text = INT64_MAX; - end_time = INT64_MIN; - duration = INT64_MIN; - for(i = 0;i < ic->nb_streams; i++) { + end_time = INT64_MIN; + duration = INT64_MIN; + for (i = 0; i < ic->nb_streams; i++) { st = ic->streams[i]; if (st->start_time != AV_NOPTS_VALUE && st->time_base.den) { - start_time1= av_rescale_q(st->start_time, st->time_base, AV_TIME_BASE_Q); + start_time1 = av_rescale_q(st->start_time, st->time_base, + AV_TIME_BASE_Q); if (st->codec->codec_type == AVMEDIA_TYPE_SUBTITLE || st->codec->codec_type == AVMEDIA_TYPE_DATA) { if (start_time1 < start_time_text) start_time_text = start_time1; } else start_time = FFMIN(start_time, start_time1); - end_time1 = AV_NOPTS_VALUE; + end_time1 = AV_NOPTS_VALUE; if (st->duration != AV_NOPTS_VALUE) { - end_time1 = start_time1 - + av_rescale_q(st->duration, st->time_base, AV_TIME_BASE_Q); + end_time1 = start_time1 + + av_rescale_q(st->duration, st->time_base, + AV_TIME_BASE_Q); end_time = FFMAX(end_time, end_time1); } - for(p = NULL; (p = av_find_program_from_stream(ic, p, i)); ){ - if(p->start_time == AV_NOPTS_VALUE || p->start_time > start_time1) + for (p = NULL; (p = av_find_program_from_stream(ic, p, i)); ) { + if (p->start_time == AV_NOPTS_VALUE || p->start_time > start_time1) p->start_time = start_time1; - if(p->end_time < end_time1) + if (p->end_time < end_time1) p->end_time = end_time1; } } if (st->duration != AV_NOPTS_VALUE) { - duration1 = av_rescale_q(st->duration, st->time_base, AV_TIME_BASE_Q); - duration = FFMAX(duration, duration1); + duration1 = av_rescale_q(st->duration, st->time_base, + AV_TIME_BASE_Q); + duration = FFMAX(duration, duration1); } } if (start_time == INT64_MAX || (start_time > start_time_text && start_time - start_time_text < AV_TIME_BASE)) start_time = start_time_text; - else if(start_time > start_time_text) + else if (start_time > start_time_text) av_log(ic, AV_LOG_VERBOSE, "Ignoring outlier non primary stream starttime %f\n", start_time_text / (float)AV_TIME_BASE); if (start_time != INT64_MAX) { ic->start_time = start_time; if (end_time != INT64_MIN) { if (ic->nb_programs) { - for (i=0; i<ic->nb_programs; i++) { + for (i = 0; i < ic->nb_programs; i++) { p = ic->programs[i]; - if(p->start_time != AV_NOPTS_VALUE && p->end_time > p->start_time) + if (p->start_time != AV_NOPTS_VALUE && p->end_time > p->start_time) duration = FFMAX(duration, p->end_time - p->start_time); } } else @@ -2223,13 +2286,13 @@ static void update_stream_timings(AVFormatContext *ic) if (duration != INT64_MIN && duration > 0 && ic->duration == AV_NOPTS_VALUE) { ic->duration = duration; } - if (ic->pb && (filesize = avio_size(ic->pb)) > 0 && ic->duration != AV_NOPTS_VALUE) { - /* compute the bitrate */ - double bitrate = (double)filesize * 8.0 * AV_TIME_BASE / - (double)ic->duration; - if (bitrate >= 0 && bitrate <= INT_MAX) - ic->bit_rate = bitrate; - } + if (ic->pb && (filesize = avio_size(ic->pb)) > 0 && ic->duration != AV_NOPTS_VALUE) { + /* compute the bitrate */ + double bitrate = (double) filesize * 8.0 * AV_TIME_BASE / + (double) ic->duration; + if (bitrate >= 0 && bitrate <= INT_MAX) + ic->bit_rate = bitrate; + } } static void fill_all_stream_timings(AVFormatContext *ic) @@ -2238,13 +2301,15 @@ static void fill_all_stream_timings(AVFormatContext *ic) AVStream *st; update_stream_timings(ic); - for(i = 0;i < ic->nb_streams; i++) { + for (i = 0; i < ic->nb_streams; i++) { st = ic->streams[i]; if (st->start_time == AV_NOPTS_VALUE) { - if(ic->start_time != AV_NOPTS_VALUE) - st->start_time = av_rescale_q(ic->start_time, AV_TIME_BASE_Q, st->time_base); - if(ic->duration != AV_NOPTS_VALUE) - st->duration = av_rescale_q(ic->duration, AV_TIME_BASE_Q, st->time_base); + if (ic->start_time != AV_NOPTS_VALUE) + st->start_time = av_rescale_q(ic->start_time, AV_TIME_BASE_Q, + st->time_base); + if (ic->duration != AV_NOPTS_VALUE) + st->duration = av_rescale_q(ic->duration, AV_TIME_BASE_Q, + st->time_base); } } } @@ -2258,7 +2323,7 @@ static void estimate_timings_from_bit_rate(AVFormatContext *ic) /* if bit_rate is already set, we believe it */ if (ic->bit_rate <= 0) { int bit_rate = 0; - for(i=0;i<ic->nb_streams;i++) { + for (i = 0; i < ic->nb_streams; i++) { st = ic->streams[i]; if (st->codec->bit_rate > 0) { if (INT_MAX - st->codec->bit_rate < bit_rate) { @@ -2276,11 +2341,13 @@ static void estimate_timings_from_bit_rate(AVFormatContext *ic) ic->bit_rate != 0) { filesize = ic->pb ? avio_size(ic->pb) : 0; if (filesize > 0) { - for(i = 0; i < ic->nb_streams; i++) { - st = ic->streams[i]; + for (i = 0; i < ic->nb_streams; i++) { + st = ic->streams[i]; if ( st->time_base.num <= INT64_MAX / ic->bit_rate && st->duration == AV_NOPTS_VALUE) { - duration= av_rescale(8*filesize, st->time_base.den, ic->bit_rate*(int64_t)st->time_base.num); + duration = av_rescale(8 * filesize, st->time_base.den, + ic->bit_rate * + (int64_t) st->time_base.num); st->duration = duration; show_warning = 1; } @@ -2288,7 +2355,8 @@ static void estimate_timings_from_bit_rate(AVFormatContext *ic) } } if (show_warning) - av_log(ic, AV_LOG_WARNING, "Estimating duration from bitrate, this may be inaccurate\n"); + av_log(ic, AV_LOG_WARNING, + "Estimating duration from bitrate, this may be inaccurate\n"); } #define DURATION_MAX_READ_SIZE 250000LL @@ -2302,19 +2370,20 @@ static void estimate_timings_from_pts(AVFormatContext *ic, int64_t old_offset) int read_size, i, ret; int64_t end_time; int64_t filesize, offset, duration; - int retry=0; + int retry = 0; /* flush packet queue */ flush_packet_queue(ic); - for (i=0; i<ic->nb_streams; i++) { + for (i = 0; i < ic->nb_streams; i++) { st = ic->streams[i]; if (st->start_time == AV_NOPTS_VALUE && st->first_dts == AV_NOPTS_VALUE) - av_log(st->codec, AV_LOG_WARNING, "start time is not set in estimate_timings_from_pts\n"); + av_log(st->codec, AV_LOG_WARNING, + "start time is not set in estimate_timings_from_pts\n"); if (st->parser) { av_parser_close(st->parser); - st->parser= NULL; + st->parser = NULL; } } @@ -2322,24 +2391,24 @@ static void estimate_timings_from_pts(AVFormatContext *ic, int64_t old_offset) /* XXX: may need to support wrapping */ filesize = ic->pb ? avio_size(ic->pb) : 0; end_time = AV_NOPTS_VALUE; - do{ - offset = filesize - (DURATION_MAX_READ_SIZE<<retry); + do { + offset = filesize - (DURATION_MAX_READ_SIZE << retry); if (offset < 0) offset = 0; avio_seek(ic->pb, offset, SEEK_SET); read_size = 0; - for(;;) { - if (read_size >= DURATION_MAX_READ_SIZE<<(FFMAX(retry-1,0))) + for (;;) { + if (read_size >= DURATION_MAX_READ_SIZE << (FFMAX(retry - 1, 0))) break; do { ret = ff_read_packet(ic, pkt); - } while(ret == AVERROR(EAGAIN)); + } while (ret == AVERROR(EAGAIN)); if (ret != 0) break; read_size += pkt->size; - st = ic->streams[pkt->stream_index]; + st = ic->streams[pkt->stream_index]; if (pkt->pts != AV_NOPTS_VALUE && (st->start_time != AV_NOPTS_VALUE || st->first_dts != AV_NOPTS_VALUE)) { @@ -2349,7 +2418,7 @@ static void estimate_timings_from_pts(AVFormatContext *ic, int64_t old_offset) else duration -= st->first_dts; if (duration > 0) { - if (st->duration == AV_NOPTS_VALUE || st->info->last_duration<=0 || + if (st->duration == AV_NOPTS_VALUE || st->info->last_duration<= 0 || (st->duration < duration && FFABS(duration - st->info->last_duration) < 60LL*st->time_base.den / st->time_base.num)) st->duration = duration; st->info->last_duration = duration; @@ -2357,16 +2426,16 @@ static void estimate_timings_from_pts(AVFormatContext *ic, int64_t old_offset) } av_free_packet(pkt); } - }while( end_time==AV_NOPTS_VALUE - && filesize > (DURATION_MAX_READ_SIZE<<retry) - && ++retry <= DURATION_MAX_RETRY); + } while (end_time == AV_NOPTS_VALUE && + filesize > (DURATION_MAX_READ_SIZE << retry) && + ++retry <= DURATION_MAX_RETRY); fill_all_stream_timings(ic); avio_seek(ic->pb, old_offset, SEEK_SET); - for (i=0; i<ic->nb_streams; i++) { - st= ic->streams[i]; - st->cur_dts= st->first_dts; + for (i = 0; i < ic->nb_streams; i++) { + st = ic->streams[i]; + st->cur_dts = st->first_dts; st->last_IP_pts = AV_NOPTS_VALUE; } } @@ -2391,7 +2460,7 @@ static void estimate_timings(AVFormatContext *ic, int64_t old_offset) ic->duration_estimation_method = AVFMT_DURATION_FROM_PTS; } else if (has_duration(ic)) { /* at least one component has timings - we use them for all - the components */ + * the components */ fill_all_stream_timings(ic); ic->duration_estimation_method = AVFMT_DURATION_FROM_STREAM; } else { @@ -2404,13 +2473,14 @@ static void estimate_timings(AVFormatContext *ic, int64_t old_offset) { int i; AVStream av_unused *st; - for(i = 0;i < ic->nb_streams; i++) { + for (i = 0; i < ic->nb_streams; i++) { st = ic->streams[i]; av_dlog(ic, "%d: start_time: %0.3f duration: %0.3f\n", i, (double) st->start_time / AV_TIME_BASE, (double) st->duration / AV_TIME_BASE); } - av_dlog(ic, "stream: start_time: %0.3f duration: %0.3f bitrate=%d kb/s\n", + av_dlog(ic, + "stream: start_time: %0.3f duration: %0.3f bitrate=%d kb/s\n", (double) ic->start_time / AV_TIME_BASE, (double) ic->duration / AV_TIME_BASE, ic->bit_rate / 1000); @@ -2431,7 +2501,8 @@ static int has_codec_parameters(AVStream *st, const char **errmsg_ptr) case AVMEDIA_TYPE_AUDIO: if (!avctx->frame_size && determinable_frame_size(avctx)) FAIL("unspecified frame size"); - if (st->info->found_decoder >= 0 && avctx->sample_fmt == AV_SAMPLE_FMT_NONE) + if (st->info->found_decoder >= 0 && + avctx->sample_fmt == AV_SAMPLE_FMT_NONE) FAIL("unspecified sample format"); if (!avctx->sample_rate) FAIL("unspecified sample rate"); @@ -2454,7 +2525,7 @@ static int has_codec_parameters(AVStream *st, const char **errmsg_ptr) FAIL("unspecified size"); break; case AVMEDIA_TYPE_DATA: - if(avctx->codec_id == AV_CODEC_ID_NONE) return 1; + if (avctx->codec_id == AV_CODEC_ID_NONE) return 1; } if (avctx->codec_id == AV_CODEC_ID_NONE) @@ -2463,7 +2534,8 @@ static int has_codec_parameters(AVStream *st, const char **errmsg_ptr) } /* returns 1 or 0 if or if not decoded data was returned, or a negative error */ -static int try_decode_frame(AVFormatContext *s, AVStream *st, AVPacket *avpkt, AVDictionary **options) +static int try_decode_frame(AVFormatContext *s, AVStream *st, AVPacket *avpkt, + AVDictionary **options) { const AVCodec *codec; int got_picture = 1, ret = 0; @@ -2481,12 +2553,12 @@ static int try_decode_frame(AVFormatContext *s, AVStream *st, AVPacket *avpkt, A if (!codec) { st->info->found_decoder = -1; - ret = -1; + ret = -1; goto fail; } - /* force thread count to 1 since the h264 decoder will not extract SPS - * and PPS to extradata during multi-threaded decoding */ + /* Force thread count to 1 since the H.264 decoder will not extract + * SPS and PPS to extradata during multi-threaded decoding. */ av_dict_set(options ? options : &thread_opt, "threads", "1", 0); ret = avcodec_open2(st->codec, codec, options ? options : &thread_opt); if (!options) @@ -2506,11 +2578,11 @@ static int try_decode_frame(AVFormatContext *s, AVStream *st, AVPacket *avpkt, A while ((pkt.size > 0 || (!pkt.data && got_picture)) && ret >= 0 && - (!has_codec_parameters(st, NULL) || - !has_decode_delay_been_guessed(st) || - (!st->codec_info_nb_frames && st->codec->codec->capabilities & CODEC_CAP_CHANNEL_CONF))) { + (!has_codec_parameters(st, NULL) || !has_decode_delay_been_guessed(st) || + (!st->codec_info_nb_frames && + st->codec->codec->capabilities & CODEC_CAP_CHANNEL_CONF))) { got_picture = 0; - switch(st->codec->codec_type) { + switch (st->codec->codec_type) { case AVMEDIA_TYPE_VIDEO: ret = avcodec_decode_video2(st->codec, frame, &got_picture, &pkt); @@ -2535,7 +2607,7 @@ static int try_decode_frame(AVFormatContext *s, AVStream *st, AVPacket *avpkt, A } } - if(!pkt.data && !got_picture) + if (!pkt.data && !got_picture) ret = -1; fail: @@ -2556,14 +2628,12 @@ unsigned int ff_codec_get_tag(const AVCodecTag *tags, enum AVCodecID id) enum AVCodecID ff_codec_get_id(const AVCodecTag *tags, unsigned int tag) { int i; - for(i=0; tags[i].id != AV_CODEC_ID_NONE;i++) { - if(tag == tags[i].tag) + for (i = 0; tags[i].id != AV_CODEC_ID_NONE; i++) + if (tag == tags[i].tag) return tags[i].id; - } - for(i=0; tags[i].id != AV_CODEC_ID_NONE; i++) { + for (i = 0; tags[i].id != AV_CODEC_ID_NONE; i++) if (avpriv_toupper4(tag) == avpriv_toupper4(tags[i].tag)) return tags[i].id; - } return AV_CODEC_ID_NONE; } @@ -2571,34 +2641,47 @@ enum AVCodecID ff_get_pcm_codec_id(int bps, int flt, int be, int sflags) { if (flt) { switch (bps) { - case 32: return be ? AV_CODEC_ID_PCM_F32BE : AV_CODEC_ID_PCM_F32LE; - case 64: return be ? AV_CODEC_ID_PCM_F64BE : AV_CODEC_ID_PCM_F64LE; - default: return AV_CODEC_ID_NONE; + case 32: + return be ? AV_CODEC_ID_PCM_F32BE : AV_CODEC_ID_PCM_F32LE; + case 64: + return be ? AV_CODEC_ID_PCM_F64BE : AV_CODEC_ID_PCM_F64LE; + default: + return AV_CODEC_ID_NONE; } } else { bps += 7; bps >>= 3; if (sflags & (1 << (bps - 1))) { switch (bps) { - case 1: return AV_CODEC_ID_PCM_S8; - case 2: return be ? AV_CODEC_ID_PCM_S16BE : AV_CODEC_ID_PCM_S16LE; - case 3: return be ? AV_CODEC_ID_PCM_S24BE : AV_CODEC_ID_PCM_S24LE; - case 4: return be ? AV_CODEC_ID_PCM_S32BE : AV_CODEC_ID_PCM_S32LE; - default: return AV_CODEC_ID_NONE; + case 1: + return AV_CODEC_ID_PCM_S8; + case 2: + return be ? AV_CODEC_ID_PCM_S16BE : AV_CODEC_ID_PCM_S16LE; + case 3: + return be ? AV_CODEC_ID_PCM_S24BE : AV_CODEC_ID_PCM_S24LE; + case 4: + return be ? AV_CODEC_ID_PCM_S32BE : AV_CODEC_ID_PCM_S32LE; + default: + return AV_CODEC_ID_NONE; } } else { switch (bps) { - case 1: return AV_CODEC_ID_PCM_U8; - case 2: return be ? AV_CODEC_ID_PCM_U16BE : AV_CODEC_ID_PCM_U16LE; - case 3: return be ? AV_CODEC_ID_PCM_U24BE : AV_CODEC_ID_PCM_U24LE; - case 4: return be ? AV_CODEC_ID_PCM_U32BE : AV_CODEC_ID_PCM_U32LE; - default: return AV_CODEC_ID_NONE; + case 1: + return AV_CODEC_ID_PCM_U8; + case 2: + return be ? AV_CODEC_ID_PCM_U16BE : AV_CODEC_ID_PCM_U16LE; + case 3: + return be ? AV_CODEC_ID_PCM_U24BE : AV_CODEC_ID_PCM_U24LE; + case 4: + return be ? AV_CODEC_ID_PCM_U32BE : AV_CODEC_ID_PCM_U32LE; + default: + return AV_CODEC_ID_NONE; } } } } -unsigned int av_codec_get_tag(const AVCodecTag * const *tags, enum AVCodecID id) +unsigned int av_codec_get_tag(const AVCodecTag *const *tags, enum AVCodecID id) { unsigned int tag; if (!av_codec_get_tag2(tags, id, &tag)) @@ -2610,7 +2693,7 @@ int av_codec_get_tag2(const AVCodecTag * const *tags, enum AVCodecID id, unsigned int *tag) { int i; - for(i=0; tags && tags[i]; i++){ + for (i = 0; tags && tags[i]; i++) { const AVCodecTag *codec_tags = tags[i]; while (codec_tags->id != AV_CODEC_ID_NONE) { if (codec_tags->id == id) { @@ -2623,12 +2706,13 @@ int av_codec_get_tag2(const AVCodecTag * const *tags, enum AVCodecID id, return 0; } -enum AVCodecID av_codec_get_id(const AVCodecTag * const *tags, unsigned int tag) +enum AVCodecID av_codec_get_id(const AVCodecTag *const *tags, unsigned int tag) { int i; - for(i=0; tags && tags[i]; i++){ - enum AVCodecID id= ff_codec_get_id(tags[i], tag); - if(id!=AV_CODEC_ID_NONE) return id; + for (i = 0; tags && tags[i]; i++) { + enum AVCodecID id = ff_codec_get_id(tags[i], tag); + if (id != AV_CODEC_ID_NONE) + return id; } return AV_CODEC_ID_NONE; } @@ -2636,17 +2720,20 @@ enum AVCodecID av_codec_get_id(const AVCodecTag * const *tags, unsigned int tag) static void compute_chapters_end(AVFormatContext *s) { unsigned int i, j; - int64_t max_time = s->duration + ((s->start_time == AV_NOPTS_VALUE) ? 0 : s->start_time); + int64_t max_time = s->duration + + ((s->start_time == AV_NOPTS_VALUE) ? 0 : s->start_time); for (i = 0; i < s->nb_chapters; i++) if (s->chapters[i]->end == AV_NOPTS_VALUE) { AVChapter *ch = s->chapters[i]; - int64_t end = max_time ? av_rescale_q(max_time, AV_TIME_BASE_Q, ch->time_base) - : INT64_MAX; + int64_t end = max_time ? av_rescale_q(max_time, AV_TIME_BASE_Q, + ch->time_base) + : INT64_MAX; for (j = 0; j < s->nb_chapters; j++) { - AVChapter *ch1 = s->chapters[j]; - int64_t next_start = av_rescale_q(ch1->start, ch1->time_base, ch->time_base); + AVChapter *ch1 = s->chapters[j]; + int64_t next_start = av_rescale_q(ch1->start, ch1->time_base, + ch->time_base); if (j != i && next_start > ch->start && next_start < end) end = next_start; } @@ -2654,28 +2741,29 @@ static void compute_chapters_end(AVFormatContext *s) } } -static int get_std_framerate(int i){ - if(i<60*12) return (i+1)*1001; - else return ((const int[]){24,30,60,12,15,48})[i-60*12]*1000*12; +static int get_std_framerate(int i) +{ + if (i < 60 * 12) + return (i + 1) * 1001; + else + return ((const int[]) { 24, 30, 60, 12, 15, 48 })[i - 60 * 12] * 1000 * 12; } -/* - * Is the time base unreliable. +/* Is the time base unreliable? * This is a heuristic to balance between quick acceptance of the values in * the headers vs. some extra checks. * Old DivX and Xvid often have nonsense timebases like 1fps or 2fps. * MPEG-2 commonly misuses field repeat flags to store different framerates. - * And there are "variable" fps files this needs to detect as well. - */ -static int tb_unreliable(AVCodecContext *c){ - if( c->time_base.den >= 101L*c->time_base.num - || c->time_base.den < 5L*c->time_base.num -/* || c->codec_tag == AV_RL32("DIVX") - || c->codec_tag == AV_RL32("XVID")*/ - || c->codec_tag == AV_RL32("mp4v") - || c->codec_id == AV_CODEC_ID_MPEG2VIDEO - || c->codec_id == AV_CODEC_ID_H264 - ) + * And there are "variable" fps files this needs to detect as well. */ +static int tb_unreliable(AVCodecContext *c) +{ + if (c->time_base.den >= 101L * c->time_base.num || + c->time_base.den < 5L * c->time_base.num || + // c->codec_tag == AV_RL32("DIVX") || + // c->codec_tag == AV_RL32("XVID") || + c->codec_tag == AV_RL32("mp4v") || + c->codec_id == AV_CODEC_ID_MPEG2VIDEO || + c->codec_id == AV_CODEC_ID_H264) return 1; return 0; } @@ -2728,24 +2816,24 @@ int ff_rfps_add_frame(AVFormatContext *ic, AVStream *st, int64_t ts) int i, j; int64_t last = st->info->last_dts; - if( ts != AV_NOPTS_VALUE && last != AV_NOPTS_VALUE && ts > last - && ts - (uint64_t)last < INT64_MAX){ - double dts= (is_relative(ts) ? ts - RELATIVE_TS_BASE : ts) * av_q2d(st->time_base); - int64_t duration= ts - last; + if ( ts != AV_NOPTS_VALUE && last != AV_NOPTS_VALUE && ts > last + && ts - (uint64_t)last < INT64_MAX) { + double dts = (is_relative(ts) ? ts - RELATIVE_TS_BASE : ts) * av_q2d(st->time_base); + int64_t duration = ts - last; if (!st->info->duration_error) st->info->duration_error = av_mallocz(sizeof(st->info->duration_error[0])*2); if (!st->info->duration_error) return AVERROR(ENOMEM); -// if(st->codec->codec_type == AVMEDIA_TYPE_VIDEO) +// if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO) // av_log(NULL, AV_LOG_ERROR, "%f\n", dts); - for (i=0; i<MAX_STD_TIMEBASES; i++) { + for (i = 0; i<MAX_STD_TIMEBASES; i++) { if (st->info->duration_error[0][1][i] < 1e10) { - int framerate= get_std_framerate(i); - double sdts= dts*framerate/(1001*12); - for(j=0; j<2; j++){ - int64_t ticks= llrint(sdts+j*0.5); + int framerate = get_std_framerate(i); + double sdts = dts*framerate/(1001*12); + for (j= 0; j<2; j++) { + int64_t ticks = llrint(sdts+j*0.5); double error= sdts - ticks + j*0.5; st->info->duration_error[j][0][i] += error; st->info->duration_error[j][1][i] += error*error; @@ -2757,7 +2845,7 @@ int ff_rfps_add_frame(AVFormatContext *ic, AVStream *st, int64_t ts) if (st->info->duration_count % 10 == 0) { int n = st->info->duration_count; - for (i=0; i<MAX_STD_TIMEBASES; i++) { + for (i = 0; i<MAX_STD_TIMEBASES; i++) { if (st->info->duration_error[0][1][i] < 1e10) { double a0 = st->info->duration_error[0][0][i] / n; double error0 = st->info->duration_error[0][1][i] / n - a0*a0; @@ -2785,7 +2873,7 @@ void ff_rfps_calculate(AVFormatContext *ic) { int i, j; - for (i = 0; i<ic->nb_streams; i++) { + for (i = 0; i < ic->nb_streams; i++) { AVStream *st = ic->streams[i]; if (st->codec->codec_type != AVMEDIA_TYPE_VIDEO) @@ -2800,27 +2888,27 @@ void ff_rfps_calculate(AVFormatContext *ic) int num = 0; double best_error= 0.01; - for (j=0; j<MAX_STD_TIMEBASES; j++) { + for (j= 0; j<MAX_STD_TIMEBASES; j++) { int k; - if(st->info->codec_info_duration && st->info->codec_info_duration*av_q2d(st->time_base) < (1001*12.0)/get_std_framerate(j)) + if (st->info->codec_info_duration && st->info->codec_info_duration*av_q2d(st->time_base) < (1001*12.0)/get_std_framerate(j)) continue; - if(!st->info->codec_info_duration && 1.0 < (1001*12.0)/get_std_framerate(j)) + if (!st->info->codec_info_duration && 1.0 < (1001*12.0)/get_std_framerate(j)) continue; if (av_q2d(st->time_base) * st->info->rfps_duration_sum / st->info->duration_count < (1001*12.0 * 0.8)/get_std_framerate(j)) continue; - for(k=0; k<2; k++){ - int n= st->info->duration_count; + for (k= 0; k<2; k++) { + int n = st->info->duration_count; double a= st->info->duration_error[k][0][j] / n; double error= st->info->duration_error[k][1][j]/n - a*a; - if(error < best_error && best_error> 0.000000001){ + if (error < best_error && best_error> 0.000000001) { best_error= error; num = get_std_framerate(j); } - if(error < 0.02) + if (error < 0.02) av_log(NULL, AV_LOG_DEBUG, "rfps: %f %f\n", get_std_framerate(j) / 12.0/1001, error); } } @@ -2842,33 +2930,34 @@ int avformat_find_stream_info(AVFormatContext *ic, AVDictionary **options) int64_t read_size; AVStream *st; AVPacket pkt1, *pkt; - int64_t old_offset = avio_tell(ic->pb); - int orig_nb_streams = ic->nb_streams; // new streams might appear, no options for those - int flush_codecs = ic->probesize > 0; + int64_t old_offset = avio_tell(ic->pb); + // new streams might appear, no options for those + int orig_nb_streams = ic->nb_streams; + int flush_codecs = ic->probesize > 0; - if(ic->pb) + if (ic->pb) av_log(ic, AV_LOG_DEBUG, "Before avformat_find_stream_info() pos: %"PRId64" bytes read:%"PRId64" seeks:%d\n", avio_tell(ic->pb), ic->pb->bytes_read, ic->pb->seek_count); - for(i=0;i<ic->nb_streams;i++) { + for (i = 0; i < ic->nb_streams; i++) { const AVCodec *codec; AVDictionary *thread_opt = NULL; st = ic->streams[i]; if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO || st->codec->codec_type == AVMEDIA_TYPE_SUBTITLE) { -/* if(!st->time_base.num) - st->time_base= */ - if(!st->codec->time_base.num) - st->codec->time_base= st->time_base; +/* if (!st->time_base.num) + st->time_base = */ + if (!st->codec->time_base.num) + st->codec->time_base = st->time_base; } - //only for the split stuff + // only for the split stuff if (!st->parser && !(ic->flags & AVFMT_FLAG_NOPARSE)) { st->parser = av_parser_init(st->codec->codec_id); - if(st->parser){ - if(st->need_parsing == AVSTREAM_PARSE_HEADERS){ + if (st->parser) { + if (st->need_parsing == AVSTREAM_PARSE_HEADERS) { st->parser->flags |= PARSER_FLAG_COMPLETE_FRAMES; - } else if(st->need_parsing == AVSTREAM_PARSE_FULL_RAW) { + } else if (st->need_parsing == AVSTREAM_PARSE_FULL_RAW) { st->parser->flags |= PARSER_FLAG_USE_CODEC_TS; } } else if (st->need_parsing) { @@ -2879,28 +2968,30 @@ int avformat_find_stream_info(AVFormatContext *ic, AVDictionary **options) } codec = find_decoder(ic, st, st->codec->codec_id); - /* force thread count to 1 since the h264 decoder will not extract SPS - * and PPS to extradata during multi-threaded decoding */ + /* Force thread count to 1 since the H.264 decoder will not extract + * SPS and PPS to extradata during multi-threaded decoding. */ av_dict_set(options ? &options[i] : &thread_opt, "threads", "1", 0); /* Ensure that subtitle_header is properly set. */ if (st->codec->codec_type == AVMEDIA_TYPE_SUBTITLE && codec && !st->codec->codec) { if (avcodec_open2(st->codec, codec, options ? &options[i] : &thread_opt) < 0) - av_log(ic, AV_LOG_WARNING, "Failed to open codec in av_find_stream_info\n"); + av_log(ic, AV_LOG_WARNING, + "Failed to open codec in av_find_stream_info\n"); } - //try to just open decoders, in case this is enough to get parameters + // Try to just open decoders, in case this is enough to get parameters. if (!has_codec_parameters(st, NULL) && st->request_probe <= 0) { if (codec && !st->codec->codec) if (avcodec_open2(st->codec, codec, options ? &options[i] : &thread_opt) < 0) - av_log(ic, AV_LOG_WARNING, "Failed to open codec in av_find_stream_info\n"); + av_log(ic, AV_LOG_WARNING, + "Failed to open codec in av_find_stream_info\n"); } if (!options) av_dict_free(&thread_opt); } - for (i=0; i<ic->nb_streams; i++) { + for (i = 0; i < ic->nb_streams; i++) { #if FF_API_R_FRAME_RATE ic->streams[i]->info->last_dts = AV_NOPTS_VALUE; #endif @@ -2908,25 +2999,25 @@ int avformat_find_stream_info(AVFormatContext *ic, AVDictionary **options) ic->streams[i]->info->fps_last_dts = AV_NOPTS_VALUE; } - count = 0; + count = 0; read_size = 0; - for(;;) { - if (ff_check_interrupt(&ic->interrupt_callback)){ - ret= AVERROR_EXIT; + for (;;) { + if (ff_check_interrupt(&ic->interrupt_callback)) { + ret = AVERROR_EXIT; av_log(ic, AV_LOG_DEBUG, "interrupted\n"); break; } /* check if one codec still needs to be handled */ - for(i=0;i<ic->nb_streams;i++) { + for (i = 0; i < ic->nb_streams; i++) { int fps_analyze_framecount = 20; st = ic->streams[i]; if (!has_codec_parameters(st, NULL)) break; - /* if the timebase is coarse (like the usual millisecond precision - of mkv), we need to analyze more frames to reliably arrive at - the correct fps */ + /* If the timebase is coarse (like the usual millisecond precision + * of mkv), we need to analyze more frames to reliably arrive at + * the correct fps. */ if (av_q2d(st->time_base) > 0.0005) fps_analyze_framecount *= 2; if (ic->fps_probe_size >= 0) @@ -2934,11 +3025,13 @@ int avformat_find_stream_info(AVFormatContext *ic, AVDictionary **options) if (st->disposition & AV_DISPOSITION_ATTACHED_PIC) fps_analyze_framecount = 0; /* variable fps and no guess at the real fps */ - if( tb_unreliable(st->codec) && !(st->r_frame_rate.num && st->avg_frame_rate.num) - && st->info->duration_count < fps_analyze_framecount - && st->codec->codec_type == AVMEDIA_TYPE_VIDEO) + if (tb_unreliable(st->codec) && + !(st->r_frame_rate.num && st->avg_frame_rate.num) && + st->info->duration_count < fps_analyze_framecount && + st->codec->codec_type == AVMEDIA_TYPE_VIDEO) break; - if(st->parser && st->parser->parser->split && !st->codec->extradata) + if (st->parser && st->parser->parser->split && + !st->codec->extradata) break; if (st->first_dts == AV_NOPTS_VALUE && (st->codec->codec_type == AVMEDIA_TYPE_VIDEO || @@ -2946,21 +3039,21 @@ int avformat_find_stream_info(AVFormatContext *ic, AVDictionary **options) break; } if (i == ic->nb_streams) { - /* NOTE: if the format has no header, then we need to read - some packets to get most of the streams, so we cannot - stop here */ + /* NOTE: If the format has no header, then we need to read some + * packets to get most of the streams, so we cannot stop here. */ if (!(ic->ctx_flags & AVFMTCTX_NOHEADER)) { - /* if we found the info for all the codecs, we can stop */ + /* If we found the info for all the codecs, we can stop. */ ret = count; av_log(ic, AV_LOG_DEBUG, "All info found\n"); flush_codecs = 0; break; } } - /* we did not get all the codec info, but we read too much data */ + /* We did not get all the codec info, but we read too much data. */ if (read_size >= ic->probesize) { ret = count; - av_log(ic, AV_LOG_DEBUG, "Probe buffer size limit of %d bytes reached\n", ic->probesize); + av_log(ic, AV_LOG_DEBUG, + "Probe buffer size limit of %d bytes reached\n", ic->probesize); for (i = 0; i < ic->nb_streams; i++) if (!ic->streams[i]->r_frame_rate.num && ic->streams[i]->info->duration_count <= 1 && @@ -2971,8 +3064,8 @@ int avformat_find_stream_info(AVFormatContext *ic, AVDictionary **options) break; } - /* NOTE: a new stream can be added there if no header in file - (AVFMTCTX_NOHEADER) */ + /* NOTE: A new stream can be added there if no header in file + * (AVFMTCTX_NOHEADER). */ ret = read_frame_internal(ic, &pkt1); if (ret == AVERROR(EAGAIN)) continue; @@ -3003,24 +3096,31 @@ int avformat_find_stream_info(AVFormatContext *ic, AVDictionary **options) /* check for non-increasing dts */ if (st->info->fps_last_dts != AV_NOPTS_VALUE && st->info->fps_last_dts >= pkt->dts) { - av_log(ic, AV_LOG_DEBUG, "Non-increasing DTS in stream %d: " - "packet %d with DTS %"PRId64", packet %d with DTS " - "%"PRId64"\n", st->index, st->info->fps_last_dts_idx, - st->info->fps_last_dts, st->codec_info_nb_frames, pkt->dts); - st->info->fps_first_dts = st->info->fps_last_dts = AV_NOPTS_VALUE; + av_log(ic, AV_LOG_DEBUG, + "Non-increasing DTS in stream %d: packet %d with DTS " + "%"PRId64", packet %d with DTS %"PRId64"\n", + st->index, st->info->fps_last_dts_idx, + st->info->fps_last_dts, st->codec_info_nb_frames, + pkt->dts); + st->info->fps_first_dts = + st->info->fps_last_dts = AV_NOPTS_VALUE; } - /* check for a discontinuity in dts - if the difference in dts - * is more than 1000 times the average packet duration in the sequence, - * we treat it as a discontinuity */ + /* Check for a discontinuity in dts. If the difference in dts + * is more than 1000 times the average packet duration in the + * sequence, we treat it as a discontinuity. */ if (st->info->fps_last_dts != AV_NOPTS_VALUE && st->info->fps_last_dts_idx > st->info->fps_first_dts_idx && (pkt->dts - st->info->fps_last_dts) / 1000 > - (st->info->fps_last_dts - st->info->fps_first_dts) / (st->info->fps_last_dts_idx - st->info->fps_first_dts_idx)) { - av_log(ic, AV_LOG_WARNING, "DTS discontinuity in stream %d: " - "packet %d with DTS %"PRId64", packet %d with DTS " - "%"PRId64"\n", st->index, st->info->fps_last_dts_idx, - st->info->fps_last_dts, st->codec_info_nb_frames, pkt->dts); - st->info->fps_first_dts = st->info->fps_last_dts = AV_NOPTS_VALUE; + (st->info->fps_last_dts - st->info->fps_first_dts) / + (st->info->fps_last_dts_idx - st->info->fps_first_dts_idx)) { + av_log(ic, AV_LOG_WARNING, + "DTS discontinuity in stream %d: packet %d with DTS " + "%"PRId64", packet %d with DTS %"PRId64"\n", + st->index, st->info->fps_last_dts_idx, + st->info->fps_last_dts, st->codec_info_nb_frames, + pkt->dts); + st->info->fps_first_dts = + st->info->fps_last_dts = AV_NOPTS_VALUE; } /* update stored dts values */ @@ -3028,17 +3128,17 @@ int avformat_find_stream_info(AVFormatContext *ic, AVDictionary **options) st->info->fps_first_dts = pkt->dts; st->info->fps_first_dts_idx = st->codec_info_nb_frames; } - st->info->fps_last_dts = pkt->dts; + st->info->fps_last_dts = pkt->dts; st->info->fps_last_dts_idx = st->codec_info_nb_frames; } if (st->codec_info_nb_frames>1) { - int64_t t=0; + int64_t t = 0; if (st->time_base.den > 0) t = av_rescale_q(st->info->codec_info_duration, st->time_base, AV_TIME_BASE_Q); if (st->avg_frame_rate.num > 0) t = FFMAX(t, av_rescale_q(st->codec_info_nb_frames, av_inv_q(st->avg_frame_rate), AV_TIME_BASE_Q)); - if ( t==0 + if ( t == 0 && st->codec_info_nb_frames>30 && st->info->fps_first_dts != AV_NOPTS_VALUE && st->info->fps_last_dts != AV_NOPTS_VALUE) @@ -3050,31 +3150,33 @@ int avformat_find_stream_info(AVFormatContext *ic, AVDictionary **options) } if (pkt->duration) { st->info->codec_info_duration += pkt->duration; - st->info->codec_info_duration_fields += st->parser && st->need_parsing && st->codec->ticks_per_frame==2 ? st->parser->repeat_pict + 1 : 2; + st->info->codec_info_duration_fields += st->parser && st->need_parsing && st->codec->ticks_per_frame ==2 ? st->parser->repeat_pict + 1 : 2; } } #if FF_API_R_FRAME_RATE ff_rfps_add_frame(ic, st, pkt->dts); #endif - if(st->parser && st->parser->parser->split && !st->codec->extradata){ - int i= st->parser->parser->split(st->codec, pkt->data, pkt->size); + if (st->parser && st->parser->parser->split && !st->codec->extradata) { + int i = st->parser->parser->split(st->codec, pkt->data, pkt->size); if (i > 0 && i < FF_MAX_EXTRADATA_SIZE) { if (ff_alloc_extradata(st->codec, i)) return AVERROR(ENOMEM); - memcpy(st->codec->extradata, pkt->data, st->codec->extradata_size); + memcpy(st->codec->extradata, pkt->data, + st->codec->extradata_size); } } - /* if still no information, we try to open the codec and to - decompress the frame. We try to avoid that in most cases as - it takes longer and uses more memory. For MPEG-4, we need to - decompress for QuickTime. - - If CODEC_CAP_CHANNEL_CONF is set this will force decoding of at - least one frame of codec data, this makes sure the codec initializes - the channel configuration and does not only trust the values from the container. - */ - try_decode_frame(ic, st, pkt, (options && i < orig_nb_streams ) ? &options[i] : NULL); + /* If still no information, we try to open the codec and to + * decompress the frame. We try to avoid that in most cases as + * it takes longer and uses more memory. For MPEG-4, we need to + * decompress for QuickTime. + * + * If CODEC_CAP_CHANNEL_CONF is set this will force decoding of at + * least one frame of codec data, this makes sure the codec initializes + * the channel configuration and does not only trust the values from + * the container. */ + try_decode_frame(ic, st, pkt, + (options && i < orig_nb_streams) ? &options[i] : NULL); st->codec_info_nb_frames++; count++; @@ -3085,7 +3187,7 @@ int avformat_find_stream_info(AVFormatContext *ic, AVDictionary **options) int err = 0; av_init_packet(&empty_pkt); - for(i=0;i<ic->nb_streams;i++) { + for (i = 0; i < ic->nb_streams; i++) { st = ic->streams[i]; @@ -3093,8 +3195,8 @@ int avformat_find_stream_info(AVFormatContext *ic, AVDictionary **options) if (st->info->found_decoder == 1) { do { err = try_decode_frame(ic, st, &empty_pkt, - (options && i < orig_nb_streams) ? - &options[i] : NULL); + (options && i < orig_nb_streams) + ? &options[i] : NULL); } while (err > 0 && !has_codec_parameters(st, NULL)); if (err < 0) { @@ -3106,25 +3208,27 @@ int avformat_find_stream_info(AVFormatContext *ic, AVDictionary **options) } // close codecs which were opened in try_decode_frame() - for(i=0;i<ic->nb_streams;i++) { + for (i = 0; i < ic->nb_streams; i++) { st = ic->streams[i]; avcodec_close(st->codec); } ff_rfps_calculate(ic); - for(i=0;i<ic->nb_streams;i++) { + for (i = 0; i < ic->nb_streams; i++) { st = ic->streams[i]; if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO) { - if(st->codec->codec_id == AV_CODEC_ID_RAWVIDEO && !st->codec->codec_tag && !st->codec->bits_per_coded_sample){ + if (st->codec->codec_id == AV_CODEC_ID_RAWVIDEO && !st->codec->codec_tag && !st->codec->bits_per_coded_sample) { uint32_t tag= avcodec_pix_fmt_to_codec_tag(st->codec->pix_fmt); if (avpriv_find_pix_fmt(ff_raw_pix_fmt_tags, tag) == st->codec->pix_fmt) st->codec->codec_tag= tag; } /* estimate average framerate if not set by demuxer */ - if (st->info->codec_info_duration_fields && !st->avg_frame_rate.num && st->info->codec_info_duration) { - int best_fps = 0; + if (st->info->codec_info_duration_fields && + !st->avg_frame_rate.num && + st->info->codec_info_duration) { + int best_fps = 0; double best_error = 0.01; if (st->info->codec_info_duration >= INT64_MAX / st->time_base.num / 2|| @@ -3132,61 +3236,68 @@ int avformat_find_stream_info(AVFormatContext *ic, AVDictionary **options) st->info->codec_info_duration < 0) continue; av_reduce(&st->avg_frame_rate.num, &st->avg_frame_rate.den, - st->info->codec_info_duration_fields*(int64_t)st->time_base.den, - st->info->codec_info_duration*2*(int64_t)st->time_base.num, 60000); + st->info->codec_info_duration_fields * (int64_t) st->time_base.den, + st->info->codec_info_duration * 2 * (int64_t) st->time_base.num, 60000); - /* round guessed framerate to a "standard" framerate if it's - * within 1% of the original estimate*/ + /* Round guessed framerate to a "standard" framerate if it's + * within 1% of the original estimate. */ for (j = 1; j < MAX_STD_TIMEBASES; j++) { - AVRational std_fps = { get_std_framerate(j), 12*1001 }; - double error = fabs(av_q2d(st->avg_frame_rate) / av_q2d(std_fps) - 1); + AVRational std_fps = { get_std_framerate(j), 12 * 1001 }; + double error = fabs(av_q2d(st->avg_frame_rate) / + av_q2d(std_fps) - 1); if (error < best_error) { best_error = error; best_fps = std_fps.num; } } - if (best_fps) { + if (best_fps) av_reduce(&st->avg_frame_rate.num, &st->avg_frame_rate.den, - best_fps, 12*1001, INT_MAX); - } + best_fps, 12 * 1001, INT_MAX); } - if (!st->r_frame_rate.num){ - if( st->codec->time_base.den * (int64_t)st->time_base.num - <= st->codec->time_base.num * st->codec->ticks_per_frame * (int64_t)st->time_base.den){ + if (!st->r_frame_rate.num) { + if ( st->codec->time_base.den * (int64_t) st->time_base.num + <= st->codec->time_base.num * st->codec->ticks_per_frame * (int64_t) st->time_base.den) { st->r_frame_rate.num = st->codec->time_base.den; st->r_frame_rate.den = st->codec->time_base.num * st->codec->ticks_per_frame; - }else{ + } else { st->r_frame_rate.num = st->time_base.den; st->r_frame_rate.den = st->time_base.num; } } - }else if(st->codec->codec_type == AVMEDIA_TYPE_AUDIO) { - if(!st->codec->bits_per_coded_sample) - st->codec->bits_per_coded_sample= av_get_bits_per_sample(st->codec->codec_id); + } else if (st->codec->codec_type == AVMEDIA_TYPE_AUDIO) { + if (!st->codec->bits_per_coded_sample) + st->codec->bits_per_coded_sample = + av_get_bits_per_sample(st->codec->codec_id); // set stream disposition based on audio service type switch (st->codec->audio_service_type) { case AV_AUDIO_SERVICE_TYPE_EFFECTS: - st->disposition = AV_DISPOSITION_CLEAN_EFFECTS; break; + st->disposition = AV_DISPOSITION_CLEAN_EFFECTS; + break; case AV_AUDIO_SERVICE_TYPE_VISUALLY_IMPAIRED: - st->disposition = AV_DISPOSITION_VISUAL_IMPAIRED; break; + st->disposition = AV_DISPOSITION_VISUAL_IMPAIRED; + break; case AV_AUDIO_SERVICE_TYPE_HEARING_IMPAIRED: - st->disposition = AV_DISPOSITION_HEARING_IMPAIRED; break; + st->disposition = AV_DISPOSITION_HEARING_IMPAIRED; + break; case AV_AUDIO_SERVICE_TYPE_COMMENTARY: - st->disposition = AV_DISPOSITION_COMMENT; break; + st->disposition = AV_DISPOSITION_COMMENT; + break; case AV_AUDIO_SERVICE_TYPE_KARAOKE: - st->disposition = AV_DISPOSITION_KARAOKE; break; + st->disposition = AV_DISPOSITION_KARAOKE; + break; } } } - if(ic->probesize) + if (ic->probesize) estimate_timings(ic, old_offset); if (ret >= 0 && ic->nb_streams) - ret = -1; /* we could not have all the codec parameters before EOF */ - for(i=0;i<ic->nb_streams;i++) { + /* We could not have all the codec parameters before EOF. */ + ret = -1; + for (i = 0; i < ic->nb_streams; i++) { const char *errmsg; st = ic->streams[i]; if (!has_codec_parameters(st, &errmsg)) { @@ -3203,8 +3314,8 @@ int avformat_find_stream_info(AVFormatContext *ic, AVDictionary **options) compute_chapters_end(ic); - find_stream_info_err: - for (i=0; i < ic->nb_streams; i++) { +find_stream_info_err: + for (i = 0; i < ic->nb_streams; i++) { st = ic->streams[i]; if (ic->streams[i]->codec->codec_type != AVMEDIA_TYPE_AUDIO) ic->streams[i]->codec->thread_count = 0; @@ -3212,7 +3323,7 @@ int avformat_find_stream_info(AVFormatContext *ic, AVDictionary **options) av_freep(&st->info->duration_error); av_freep(&ic->streams[i]->info); } - if(ic->pb) + if (ic->pb) av_log(ic, AV_LOG_DEBUG, "After avformat_find_stream_info() pos: %"PRId64" bytes read:%"PRId64" seeks:%d frames:%d\n", avio_tell(ic->pb), ic->pb->bytes_read, ic->pb->seek_count, count); return ret; @@ -3235,12 +3346,9 @@ AVProgram *av_find_program_from_stream(AVFormatContext *ic, AVProgram *last, int return NULL; } -int av_find_best_stream(AVFormatContext *ic, - enum AVMediaType type, - int wanted_stream_nb, - int related_stream, - AVCodec **decoder_ret, - int flags) +int av_find_best_stream(AVFormatContext *ic, enum AVMediaType type, + int wanted_stream_nb, int related_stream, + AVCodec **decoder_ret, int flags) { int i, nb_streams = ic->nb_streams; int ret = AVERROR_STREAM_NOT_FOUND, best_count = -1, best_bitrate = -1, best_multiframe = -1, count, bitrate, multiframe; @@ -3250,19 +3358,20 @@ int av_find_best_stream(AVFormatContext *ic, if (related_stream >= 0 && wanted_stream_nb < 0) { AVProgram *p = av_find_program_from_stream(ic, NULL, related_stream); if (p) { - program = p->stream_index; + program = p->stream_index; nb_streams = p->nb_stream_indexes; } } for (i = 0; i < nb_streams; i++) { int real_stream_index = program ? program[i] : i; - AVStream *st = ic->streams[real_stream_index]; + AVStream *st = ic->streams[real_stream_index]; AVCodecContext *avctx = st->codec; if (avctx->codec_type != type) continue; if (wanted_stream_nb >= 0 && real_stream_index != wanted_stream_nb) continue; - if (st->disposition & (AV_DISPOSITION_HEARING_IMPAIRED|AV_DISPOSITION_VISUAL_IMPAIRED)) + if (st->disposition & (AV_DISPOSITION_HEARING_IMPAIRED | + AV_DISPOSITION_VISUAL_IMPAIRED)) continue; if (type == AVMEDIA_TYPE_AUDIO && !avctx->channels) continue; @@ -3281,15 +3390,16 @@ int av_find_best_stream(AVFormatContext *ic, (best_multiframe == multiframe && best_bitrate > bitrate) || (best_multiframe == multiframe && best_bitrate == bitrate && best_count >= count)) continue; - best_count = count; + best_count = count; best_bitrate = bitrate; best_multiframe = multiframe; - ret = real_stream_index; + ret = real_stream_index; best_decoder = decoder; if (program && i == nb_streams - 1 && ret < 0) { - program = NULL; + program = NULL; nb_streams = ic->nb_streams; - i = 0; /* no related stream found, try again with everything */ + /* no related stream found, try again with everything */ + i = 0; } } if (decoder_ret) @@ -3317,9 +3427,9 @@ int av_read_pause(AVFormatContext *s) return AVERROR(ENOSYS); } -void ff_free_stream(AVFormatContext *s, AVStream *st){ +void ff_free_stream(AVFormatContext *s, AVStream *st) { av_assert0(s->nb_streams>0); - av_assert0(s->streams[ s->nb_streams-1 ] == st); + av_assert0(s->streams[ s->nb_streams - 1 ] == st); if (st->parser) { av_parser_close(st->parser); @@ -3350,17 +3460,17 @@ void avformat_free_context(AVFormatContext *s) if (s->iformat && s->iformat->priv_class && s->priv_data) av_opt_free(s->priv_data); - for(i=s->nb_streams-1; i>=0; i--) { + for (i = s->nb_streams - 1; i >= 0; i--) { ff_free_stream(s, s->streams[i]); } - for(i=s->nb_programs-1; i>=0; i--) { + for (i = s->nb_programs - 1; i >= 0; i--) { av_dict_free(&s->programs[i]->metadata); av_freep(&s->programs[i]->stream_index); av_freep(&s->programs[i]); } av_freep(&s->programs); av_freep(&s->priv_data); - while(s->nb_chapters--) { + while (s->nb_chapters--) { av_dict_free(&s->chapters[s->nb_chapters]->metadata); av_freep(&s->chapters[s->nb_chapters]); } @@ -3385,7 +3495,7 @@ void avformat_close_input(AVFormatContext **ps) if (!ps || !*ps) return; - s = *ps; + s = *ps; pb = s->pb; if ((s->iformat && s->iformat->flags & AVFMT_NOFILE) || @@ -3394,10 +3504,9 @@ void avformat_close_input(AVFormatContext **ps) flush_packet_queue(s); - if (s->iformat) { + if (s->iformat) if (s->iformat->read_close) s->iformat->read_close(s); - } avformat_free_context(s); @@ -3439,19 +3548,18 @@ AVStream *avformat_new_stream(AVFormatContext *s, const AVCodec *c) st->info->last_dts = AV_NOPTS_VALUE; st->codec = avcodec_alloc_context3(c); - if (s->iformat) { + if (s->iformat) /* no default bitrate if decoding */ st->codec->bit_rate = 0; - } - st->index = s->nb_streams; + st->index = s->nb_streams; st->start_time = AV_NOPTS_VALUE; - st->duration = AV_NOPTS_VALUE; - /* we set the current DTS to 0 so that formats without any timestamps - but durations get some timestamps, formats with some unknown - timestamps have their first few packets buffered and the - timestamps corrected before they are returned to the user */ - st->cur_dts = s->iformat ? RELATIVE_TS_BASE : 0; - st->first_dts = AV_NOPTS_VALUE; + st->duration = AV_NOPTS_VALUE; + /* we set the current DTS to 0 so that formats without any timestamps + * but durations get some timestamps, formats with some unknown + * timestamps have their first few packets buffered and the + * timestamps corrected before they are returned to the user */ + st->cur_dts = s->iformat ? RELATIVE_TS_BASE : 0; + st->first_dts = AV_NOPTS_VALUE; st->probe_packets = MAX_PROBE_PACKETS; st->pts_wrap_reference = AV_NOPTS_VALUE; st->pts_wrap_behavior = AV_PTS_WRAP_IGNORE; @@ -3459,10 +3567,10 @@ AVStream *avformat_new_stream(AVFormatContext *s, const AVCodec *c) /* default pts setting is MPEG-like */ avpriv_set_pts_info(st, 33, 1, 90000); st->last_IP_pts = AV_NOPTS_VALUE; - for(i=0; i<MAX_REORDER_DELAY+1; i++) - st->pts_buffer[i]= AV_NOPTS_VALUE; + for (i = 0; i < MAX_REORDER_DELAY + 1; i++) + st->pts_buffer[i] = AV_NOPTS_VALUE; - st->sample_aspect_ratio = (AVRational){0,1}; + st->sample_aspect_ratio = (AVRational) { 0, 1 }; #if FF_API_R_FRAME_RATE st->info->last_dts = AV_NOPTS_VALUE; @@ -3476,16 +3584,16 @@ AVStream *avformat_new_stream(AVFormatContext *s, const AVCodec *c) AVProgram *av_new_program(AVFormatContext *ac, int id) { - AVProgram *program=NULL; + AVProgram *program = NULL; int i; av_dlog(ac, "new_program: id=0x%04x\n", id); - for(i=0; i<ac->nb_programs; i++) - if(ac->programs[i]->id == id) + for (i = 0; i < ac->nb_programs; i++) + if (ac->programs[i]->id == id) program = ac->programs[i]; - if(!program){ + if (!program) { program = av_mallocz(sizeof(AVProgram)); if (!program) return NULL; @@ -3502,34 +3610,35 @@ AVProgram *av_new_program(AVFormatContext *ac, int id) return program; } -AVChapter *avpriv_new_chapter(AVFormatContext *s, int id, AVRational time_base, int64_t start, int64_t end, const char *title) +AVChapter *avpriv_new_chapter(AVFormatContext *s, int id, AVRational time_base, + int64_t start, int64_t end, const char *title) { AVChapter *chapter = NULL; int i; - for(i=0; i<s->nb_chapters; i++) - if(s->chapters[i]->id == id) + for (i = 0; i < s->nb_chapters; i++) + if (s->chapters[i]->id == id) chapter = s->chapters[i]; - if(!chapter){ - chapter= av_mallocz(sizeof(AVChapter)); - if(!chapter) + if (!chapter) { + chapter = av_mallocz(sizeof(AVChapter)); + if (!chapter) return NULL; dynarray_add(&s->chapters, &s->nb_chapters, chapter); } av_dict_set(&chapter->metadata, "title", title, 0); - chapter->id = id; - chapter->time_base= time_base; - chapter->start = start; - chapter->end = end; + chapter->id = id; + chapter->time_base = time_base; + chapter->start = start; + chapter->end = end; return chapter; } -void ff_program_add_stream_index(AVFormatContext *ac, int progid, unsigned int idx) +void ff_program_add_stream_index(AVFormatContext *ac, int progid, unsigned idx) { int i, j; - AVProgram *program=NULL; + AVProgram *program = NULL; void *tmp; if (idx >= ac->nb_streams) { @@ -3537,16 +3646,16 @@ void ff_program_add_stream_index(AVFormatContext *ac, int progid, unsigned int i return; } - for(i=0; i<ac->nb_programs; i++){ - if(ac->programs[i]->id != progid) + for (i = 0; i < ac->nb_programs; i++) { + if (ac->programs[i]->id != progid) continue; program = ac->programs[i]; - for(j=0; j<program->nb_stream_indexes; j++) - if(program->stream_index[j] == idx) + for (j = 0; j < program->nb_stream_indexes; j++) + if (program->stream_index[j] == idx) return; tmp = av_realloc_array(program->stream_index, program->nb_stream_indexes+1, sizeof(unsigned int)); - if(!tmp) + if (!tmp) return; program->stream_index = tmp; program->stream_index[program->nb_stream_indexes++] = idx; @@ -3554,24 +3663,29 @@ void ff_program_add_stream_index(AVFormatContext *ac, int progid, unsigned int i } } -static void print_fps(double d, const char *postfix){ - uint64_t v= lrintf(d*100); - if (v% 100 ) av_log(NULL, AV_LOG_INFO, ", %3.2f %s", d, postfix); - else if(v%(100*1000)) av_log(NULL, AV_LOG_INFO, ", %1.0f %s", d, postfix); - else av_log(NULL, AV_LOG_INFO, ", %1.0fk %s", d/1000, postfix); +static void print_fps(double d, const char *postfix) +{ + uint64_t v = lrintf(d * 100); + if (v % 100) + av_log(NULL, AV_LOG_INFO, ", %3.2f %s", d, postfix); + else if (v % (100 * 1000)) + av_log(NULL, AV_LOG_INFO, ", %1.0f %s", d, postfix); + else + av_log(NULL, AV_LOG_INFO, ", %1.0fk %s", d / 1000, postfix); } static void dump_metadata(void *ctx, AVDictionary *m, const char *indent) { - if(m && !(av_dict_count(m) == 1 && av_dict_get(m, "language", NULL, 0))){ - AVDictionaryEntry *tag=NULL; + if (m && !(av_dict_count(m) == 1 && av_dict_get(m, "language", NULL, 0))) { + AVDictionaryEntry *tag = NULL; av_log(ctx, AV_LOG_INFO, "%sMetadata:\n", indent); - while((tag=av_dict_get(m, "", tag, AV_DICT_IGNORE_SUFFIX))) { - if(strcmp("language", tag->key)){ + while ((tag = av_dict_get(m, "", tag, AV_DICT_IGNORE_SUFFIX))) + if (strcmp("language", tag->key)) { const char *p = tag->value; - av_log(ctx, AV_LOG_INFO, "%s %-16s: ", indent, tag->key); - while(*p) { + av_log(ctx, AV_LOG_INFO, + "%s %-16s: ", indent, tag->key); + while (*p) { char tmp[256]; size_t len = strcspn(p, "\x8\xa\xb\xc\xd"); av_strlcpy(tmp, p, FFMIN(sizeof(tmp), len+1)); @@ -3583,12 +3697,12 @@ static void dump_metadata(void *ctx, AVDictionary *m, const char *indent) } av_log(ctx, AV_LOG_INFO, "\n"); } - } } } /* "user interface" functions */ -static void dump_stream_format(AVFormatContext *ic, int i, int index, int is_output) +static void dump_stream_format(AVFormatContext *ic, int i, + int index, int is_output) { char buf[256]; int flags = (is_output ? ic->oformat->flags : ic->iformat->flags); @@ -3603,30 +3717,31 @@ static void dump_stream_format(AVFormatContext *ic, int i, int index, int is_out av_log(NULL, AV_LOG_INFO, "[0x%x]", st->id); if (lang) av_log(NULL, AV_LOG_INFO, "(%s)", lang->value); - av_log(NULL, AV_LOG_DEBUG, ", %d, %d/%d", st->codec_info_nb_frames, st->time_base.num/g, st->time_base.den/g); + av_log(NULL, AV_LOG_DEBUG, ", %d, %d/%d", st->codec_info_nb_frames, + st->time_base.num / g, st->time_base.den / g); av_log(NULL, AV_LOG_INFO, ": %s", buf); if (st->sample_aspect_ratio.num && // default av_cmp_q(st->sample_aspect_ratio, st->codec->sample_aspect_ratio)) { AVRational display_aspect_ratio; av_reduce(&display_aspect_ratio.num, &display_aspect_ratio.den, - st->codec->width*st->sample_aspect_ratio.num, - st->codec->height*st->sample_aspect_ratio.den, - 1024*1024); + st->codec->width * st->sample_aspect_ratio.num, + st->codec->height * st->sample_aspect_ratio.den, + 1024 * 1024); av_log(NULL, AV_LOG_INFO, ", SAR %d:%d DAR %d:%d", - st->sample_aspect_ratio.num, st->sample_aspect_ratio.den, - display_aspect_ratio.num, display_aspect_ratio.den); + st->sample_aspect_ratio.num, st->sample_aspect_ratio.den, + display_aspect_ratio.num, display_aspect_ratio.den); } - if(st->codec->codec_type == AVMEDIA_TYPE_VIDEO){ - if(st->avg_frame_rate.den && st->avg_frame_rate.num) + if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO) { + if (st->avg_frame_rate.den && st->avg_frame_rate.num) print_fps(av_q2d(st->avg_frame_rate), "fps"); #if FF_API_R_FRAME_RATE - if(st->r_frame_rate.den && st->r_frame_rate.num) + if (st->r_frame_rate.den && st->r_frame_rate.num) print_fps(av_q2d(st->r_frame_rate), "tbr"); #endif - if(st->time_base.den && st->time_base.num) - print_fps(1/av_q2d(st->time_base), "tbn"); - if(st->codec->time_base.den && st->codec->time_base.num) - print_fps(1/av_q2d(st->codec->time_base), "tbc"); + if (st->time_base.den && st->time_base.num) + print_fps(1 / av_q2d(st->time_base), "tbn"); + if (st->codec->time_base.den && st->codec->time_base.num) + print_fps(1 / av_q2d(st->codec->time_base), "tbc"); } if (st->disposition & AV_DISPOSITION_DEFAULT) av_log(NULL, AV_LOG_INFO, " (default)"); @@ -3652,10 +3767,8 @@ static void dump_stream_format(AVFormatContext *ic, int i, int index, int is_out dump_metadata(NULL, st->metadata, " "); } -void av_dump_format(AVFormatContext *ic, - int index, - const char *url, - int is_output) +void av_dump_format(AVFormatContext *ic, int index, + const char *url, int is_output) { int i; uint8_t *printed = ic->nb_streams ? av_mallocz(ic->nb_streams) : NULL; @@ -3663,19 +3776,19 @@ void av_dump_format(AVFormatContext *ic, return; av_log(NULL, AV_LOG_INFO, "%s #%d, %s, %s '%s':\n", - is_output ? "Output" : "Input", - index, - is_output ? ic->oformat->name : ic->iformat->name, - is_output ? "to" : "from", url); + is_output ? "Output" : "Input", + index, + is_output ? ic->oformat->name : ic->iformat->name, + is_output ? "to" : "from", url); dump_metadata(NULL, ic->metadata, " "); if (!is_output) { av_log(NULL, AV_LOG_INFO, " Duration: "); if (ic->duration != AV_NOPTS_VALUE) { int hours, mins, secs, us; int64_t duration = ic->duration + 5000; - secs = duration / AV_TIME_BASE; - us = duration % AV_TIME_BASE; - mins = secs / 60; + secs = duration / AV_TIME_BASE; + us = duration % AV_TIME_BASE; + mins = secs / 60; secs %= 60; hours = mins / 60; mins %= 60; @@ -3688,36 +3801,38 @@ void av_dump_format(AVFormatContext *ic, int secs, us; av_log(NULL, AV_LOG_INFO, ", start: "); secs = ic->start_time / AV_TIME_BASE; - us = abs(ic->start_time % AV_TIME_BASE); + us = abs(ic->start_time % AV_TIME_BASE); av_log(NULL, AV_LOG_INFO, "%d.%06d", - secs, (int)av_rescale(us, 1000000, AV_TIME_BASE)); + secs, (int) av_rescale(us, 1000000, AV_TIME_BASE)); } av_log(NULL, AV_LOG_INFO, ", bitrate: "); - if (ic->bit_rate) { - av_log(NULL, AV_LOG_INFO,"%d kb/s", ic->bit_rate / 1000); - } else { + if (ic->bit_rate) + av_log(NULL, AV_LOG_INFO, "%d kb/s", ic->bit_rate / 1000); + else av_log(NULL, AV_LOG_INFO, "N/A"); - } av_log(NULL, AV_LOG_INFO, "\n"); } for (i = 0; i < ic->nb_chapters; i++) { AVChapter *ch = ic->chapters[i]; av_log(NULL, AV_LOG_INFO, " Chapter #%d.%d: ", index, i); - av_log(NULL, AV_LOG_INFO, "start %f, ", ch->start * av_q2d(ch->time_base)); - av_log(NULL, AV_LOG_INFO, "end %f\n", ch->end * av_q2d(ch->time_base)); + av_log(NULL, AV_LOG_INFO, + "start %f, ", ch->start * av_q2d(ch->time_base)); + av_log(NULL, AV_LOG_INFO, + "end %f\n", ch->end * av_q2d(ch->time_base)); dump_metadata(NULL, ch->metadata, " "); } - if(ic->nb_programs) { + if (ic->nb_programs) { int j, k, total = 0; - for(j=0; j<ic->nb_programs; j++) { + for (j = 0; j < ic->nb_programs; j++) { AVDictionaryEntry *name = av_dict_get(ic->programs[j]->metadata, "name", NULL, 0); av_log(NULL, AV_LOG_INFO, " Program %d %s\n", ic->programs[j]->id, name ? name->value : ""); dump_metadata(NULL, ic->programs[j]->metadata, " "); - for(k=0; k<ic->programs[j]->nb_stream_indexes; k++) { - dump_stream_format(ic, ic->programs[j]->stream_index[k], index, is_output); + for (k = 0; k < ic->programs[j]->nb_stream_indexes; k++) { + dump_stream_format(ic, ic->programs[j]->stream_index[k], + index, is_output); printed[ic->programs[j]->stream_index[k]] = 1; } total += ic->programs[j]->nb_stream_indexes; @@ -3725,7 +3840,7 @@ void av_dump_format(AVFormatContext *ic, if (total < ic->nb_streams) av_log(NULL, AV_LOG_INFO, " No Program\n"); } - for(i=0;i<ic->nb_streams;i++) + for (i = 0; i < ic->nb_streams; i++) if (!printed[i]) dump_stream_format(ic, i, index, is_output); @@ -3734,11 +3849,10 @@ void av_dump_format(AVFormatContext *ic, uint64_t ff_ntp_time(void) { - return (av_gettime() / 1000) * 1000 + NTP_OFFSET_US; + return (av_gettime() / 1000) * 1000 + NTP_OFFSET_US; } -int av_get_frame_filename(char *buf, int buf_size, - const char *path, int number) +int av_get_frame_filename(char *buf, int buf_size, const char *path, int number) { const char *p; char *q, buf1[20], c; @@ -3747,20 +3861,19 @@ int av_get_frame_filename(char *buf, int buf_size, q = buf; p = path; percentd_found = 0; - for(;;) { + for (;;) { c = *p++; if (c == '\0') break; if (c == '%') { do { nd = 0; - while (av_isdigit(*p)) { + while (av_isdigit(*p)) nd = nd * 10 + *p++ - '0'; - } c = *p++; } while (av_isdigit(c)); - switch(c) { + switch (c) { case '%': goto addchar; case 'd': @@ -3778,7 +3891,7 @@ int av_get_frame_filename(char *buf, int buf_size, goto fail; } } else { - addchar: +addchar: if ((q - buf) < buf_size - 1) *q++ = c; } @@ -3787,7 +3900,7 @@ int av_get_frame_filename(char *buf, int buf_size, goto fail; *q = '\0'; return 0; - fail: +fail: *q = '\0'; return -1; } @@ -3796,22 +3909,28 @@ static void hex_dump_internal(void *avcl, FILE *f, int level, const uint8_t *buf, int size) { int len, i, j, c; -#define PRINT(...) do { if (!f) av_log(avcl, level, __VA_ARGS__); else fprintf(f, __VA_ARGS__); } while(0) +#define PRINT(...) \ + do { \ + if (!f) \ + av_log(avcl, level, __VA_ARGS__); \ + else \ + fprintf(f, __VA_ARGS__); \ + } while (0) - for(i=0;i<size;i+=16) { + for (i = 0; i < size; i += 16) { len = size - i; if (len > 16) len = 16; PRINT("%08x ", i); - for(j=0;j<16;j++) { + for (j = 0; j < 16; j++) { if (j < len) - PRINT(" %02x", buf[i+j]); + PRINT(" %02x", buf[i + j]); else PRINT(" "); } PRINT(" "); - for(j=0;j<len;j++) { - c = buf[i+j]; + for (j = 0; j < len; j++) { + c = buf[i + j]; if (c < ' ' || c > '~') c = '.'; PRINT("%c", c); @@ -3831,9 +3950,16 @@ void av_hex_dump_log(void *avcl, int level, const uint8_t *buf, int size) hex_dump_internal(avcl, NULL, level, buf, size); } -static void pkt_dump_internal(void *avcl, FILE *f, int level, AVPacket *pkt, int dump_payload, AVRational time_base) +static void pkt_dump_internal(void *avcl, FILE *f, int level, AVPacket *pkt, + int dump_payload, AVRational time_base) { -#define PRINT(...) do { if (!f) av_log(avcl, level, __VA_ARGS__); else fprintf(f, __VA_ARGS__); } while(0) +#define PRINT(...) \ + do { \ + if (!f) \ + av_log(avcl, level, __VA_ARGS__); \ + else \ + fprintf(f, __VA_ARGS__); \ + } while (0) PRINT("stream #%d:\n", pkt->stream_index); PRINT(" keyframe=%d\n", ((pkt->flags & AV_PKT_FLAG_KEY) != 0)); PRINT(" duration=%0.3f\n", pkt->duration * av_q2d(time_base)); @@ -3870,24 +3996,29 @@ void av_pkt_dump_log2(void *avcl, int level, AVPacket *pkt, int dump_payload, void av_url_split(char *proto, int proto_size, char *authorization, int authorization_size, char *hostname, int hostname_size, - int *port_ptr, - char *path, int path_size, - const char *url) + int *port_ptr, char *path, int path_size, const char *url) { const char *p, *ls, *ls2, *at, *at2, *col, *brk; - if (port_ptr) *port_ptr = -1; - if (proto_size > 0) proto[0] = 0; - if (authorization_size > 0) authorization[0] = 0; - if (hostname_size > 0) hostname[0] = 0; - if (path_size > 0) path[0] = 0; + if (port_ptr) + *port_ptr = -1; + if (proto_size > 0) + proto[0] = 0; + if (authorization_size > 0) + authorization[0] = 0; + if (hostname_size > 0) + hostname[0] = 0; + if (path_size > 0) + path[0] = 0; /* parse protocol */ if ((p = strchr(url, ':'))) { av_strlcpy(proto, url, FFMIN(proto_size, p + 1 - url)); p++; /* skip ':' */ - if (*p == '/') p++; - if (*p == '/') p++; + if (*p == '/') + p++; + if (*p == '/') + p++; } else { /* no protocol means plain filename */ av_strlcpy(path, url, path_size); @@ -3897,14 +4028,14 @@ void av_url_split(char *proto, int proto_size, /* separate path from hostname */ ls = strchr(p, '/'); ls2 = strchr(p, '?'); - if(!ls) + if (!ls) ls = ls2; else if (ls && ls2) ls = FFMIN(ls, ls2); - if(ls) + if (ls) av_strlcpy(path, ls, path_size); else - ls = &p[strlen(p)]; // XXX + ls = &p[strlen(p)]; // XXX /* the rest is hostname, use that to parse auth/port */ if (ls != p) { @@ -3925,7 +4056,8 @@ void av_url_split(char *proto, int proto_size, } else if ((col = strchr(p, ':')) && col < ls) { av_strlcpy(hostname, p, FFMIN(col + 1 - p, hostname_size)); - if (port_ptr) *port_ptr = atoi(col + 1); + if (port_ptr) + *port_ptr = atoi(col + 1); } else av_strlcpy(hostname, p, FFMIN(ls + 1 - p, hostname_size)); @@ -3945,7 +4077,7 @@ char *ff_data_to_hex(char *buff, const uint8_t *src, int s, int lowercase) 'c', 'd', 'e', 'f' }; const char *hex_table = lowercase ? hex_table_lc : hex_table_uc; - for(i = 0; i < s; i++) { + for (i = 0; i < s; i++) { buff[i * 2] = hex_table[src[i] >> 4]; buff[i * 2 + 1] = hex_table[src[i] & 0xF]; } @@ -3958,7 +4090,7 @@ int ff_hex_to_data(uint8_t *data, const char *p) int c, len, v; len = 0; - v = 1; + v = 1; for (;;) { p += strspn(p, SPACE_CHARS); if (*p == '\0') @@ -3993,17 +4125,23 @@ void avpriv_set_pts_info(AVStream *s, int pts_wrap_bits, unsigned int pts_num, unsigned int pts_den) { AVRational new_tb; - if(av_reduce(&new_tb.num, &new_tb.den, pts_num, pts_den, INT_MAX)){ - if(new_tb.num != pts_num) - av_log(NULL, AV_LOG_DEBUG, "st:%d removing common factor %d from timebase\n", s->index, pts_num/new_tb.num); - }else - av_log(NULL, AV_LOG_WARNING, "st:%d has too large timebase, reducing\n", s->index); - - if(new_tb.num <= 0 || new_tb.den <= 0) { - av_log(NULL, AV_LOG_ERROR, "Ignoring attempt to set invalid timebase %d/%d for st:%d\n", new_tb.num, new_tb.den, s->index); + if (av_reduce(&new_tb.num, &new_tb.den, pts_num, pts_den, INT_MAX)) { + if (new_tb.num != pts_num) + av_log(NULL, AV_LOG_DEBUG, + "st:%d removing common factor %d from timebase\n", + s->index, pts_num / new_tb.num); + } else + av_log(NULL, AV_LOG_WARNING, + "st:%d has too large timebase, reducing\n", s->index); + + if (new_tb.num <= 0 || new_tb.den <= 0) { + av_log(NULL, AV_LOG_ERROR, + "Ignoring attempt to set invalid timebase %d/%d for st:%d\n", + new_tb.num, new_tb.den, + s->index); return; } - s->time_base = new_tb; + s->time_base = new_tb; av_codec_set_pkt_timebase(s->codec, new_tb); s->pts_wrap_bits = pts_wrap_bits; } @@ -4065,16 +4203,15 @@ void ff_parse_key_value(const char *str, ff_parse_key_val_cb callback_get_buf, int ff_find_stream_index(AVFormatContext *s, int id) { int i; - for (i = 0; i < s->nb_streams; i++) { + for (i = 0; i < s->nb_streams; i++) if (s->streams[i]->id == id) return i; - } return -1; } int64_t ff_iso8601_to_unix_time(const char *datestr) { - struct tm time1 = {0}, time2 = {0}; + struct tm time1 = { 0 }, time2 = { 0 }; char *ret1, *ret2; ret1 = av_small_strptime(datestr, "%Y - %m - %d %H:%M:%S", &time1); ret2 = av_small_strptime(datestr, "%Y - %m - %dT%H:%M:%S", &time2); @@ -4084,14 +4221,16 @@ int64_t ff_iso8601_to_unix_time(const char *datestr) return av_timegm(&time1); } -int avformat_query_codec(AVOutputFormat *ofmt, enum AVCodecID codec_id, int std_compliance) +int avformat_query_codec(AVOutputFormat *ofmt, enum AVCodecID codec_id, + int std_compliance) { if (ofmt) { if (ofmt->query_codec) return ofmt->query_codec(codec_id, std_compliance); else if (ofmt->codec_tag) return !!av_codec_get_tag(ofmt->codec_tag, codec_id); - else if (codec_id == ofmt->video_codec || codec_id == ofmt->audio_codec || + else if (codec_id == ofmt->video_codec || + codec_id == ofmt->audio_codec || codec_id == ofmt->subtitle_codec) return 1; } @@ -4129,19 +4268,19 @@ int ff_add_param_change(AVPacket *pkt, int32_t channels, if (!pkt) return AVERROR(EINVAL); if (channels) { - size += 4; + size += 4; flags |= AV_SIDE_DATA_PARAM_CHANGE_CHANNEL_COUNT; } if (channel_layout) { - size += 8; + size += 8; flags |= AV_SIDE_DATA_PARAM_CHANGE_CHANNEL_LAYOUT; } if (sample_rate) { - size += 4; + size += 4; flags |= AV_SIDE_DATA_PARAM_CHANGE_SAMPLE_RATE; } if (width || height) { - size += 8; + size += 8; flags |= AV_SIDE_DATA_PARAM_CHANGE_DIMENSIONS; } data = av_packet_new_side_data(pkt, AV_PKT_DATA_PARAM_CHANGE, size); @@ -4330,7 +4469,7 @@ int ff_generate_avci_extradata(AVStream *st) }; const uint8_t *data = NULL; - int size = 0; + int size = 0; if (st->codec->width == 1920) { if (st->codec->field_order == AV_FIELD_PROGRESSIVE) { |