diff options
author | Michael Niedermayer <michaelni@gmx.at> | 2012-01-04 01:12:34 +0100 |
---|---|---|
committer | Michael Niedermayer <michaelni@gmx.at> | 2012-01-04 01:12:34 +0100 |
commit | ad1c8dd6734f0aa7a7a87b4669a166715c114b46 (patch) | |
tree | c03c08f30bf1a7e8d9859abc48de3391bb05c6a0 | |
parent | d6da16dca5a64ed7ab2db54710a0c703f179d3ba (diff) | |
parent | fd16f567987524a769d5d4f1f69089f000386ac2 (diff) | |
download | ffmpeg-ad1c8dd6734f0aa7a7a87b4669a166715c114b46.tar.gz |
Merge remote-tracking branch 'qatar/master'
* qatar/master:
fate: add dxtory test
adx_parser: rewrite.
adxdec: Validate channel count to fix a division by zero.
adxdec: Do not require extradata.
cmdutils: K&R reformatting cosmetics
alacdec: implement the 2-pass prediction type.
alacenc: implement the 2-pass prediction type.
alacenc: do not generate invalid multi-channel ALAC files
alacdec: fill in missing or guessed info about the extradata format.
utvideo: proper median prediction for interlaced videos
lavu: bump lavu minor for av_popcount64
dca: K&R formatting cosmetics
dct: K&R formatting cosmetics
lavf: flush decoders in avformat_find_stream_info().
win32: detect number of CPUs using affinity
Add av_popcount64
snow: Restore three mistakenly removed casts.
Conflicts:
cmdutils.c
doc/APIchanges
libavcodec/adx_parser.c
libavcodec/adxdec.c
libavcodec/alacenc.c
libavutil/avutil.h
tests/fate/screen.mak
Merged-by: Michael Niedermayer <michaelni@gmx.at>
-rw-r--r-- | cmdutils.c | 341 | ||||
-rwxr-xr-x | configure | 4 | ||||
-rw-r--r-- | doc/APIchanges | 11 | ||||
-rw-r--r-- | libavcodec/adx.c | 2 | ||||
-rw-r--r-- | libavcodec/adx_parser.c | 32 | ||||
-rw-r--r-- | libavcodec/adxdec.c | 17 | ||||
-rw-r--r-- | libavcodec/alac.c | 75 | ||||
-rw-r--r-- | libavcodec/alacenc.c | 18 | ||||
-rw-r--r-- | libavcodec/dca.c | 546 | ||||
-rw-r--r-- | libavcodec/dct.c | 122 | ||||
-rw-r--r-- | libavcodec/pthread.c | 11 | ||||
-rw-r--r-- | libavcodec/snow.c | 6 | ||||
-rw-r--r-- | libavcodec/utvideo.c | 101 | ||||
-rw-r--r-- | libavformat/utils.c | 38 | ||||
-rw-r--r-- | libavutil/avutil.h | 2 | ||||
-rw-r--r-- | libavutil/common.h | 13 | ||||
-rw-r--r-- | tests/fate/screen.mak | 3 | ||||
-rw-r--r-- | tests/ref/fate/dxtory | 1 |
18 files changed, 767 insertions, 576 deletions
diff --git a/cmdutils.c b/cmdutils.c index f7b8da6f92..e86f8455ec 100644 --- a/cmdutils.c +++ b/cmdutils.c @@ -61,7 +61,8 @@ static FILE *report_file; void init_opts(void) { #if CONFIG_SWSCALE - sws_opts = sws_getContext(16, 16, 0, 16, 16, 0, SWS_BICUBIC, NULL, NULL, NULL); + sws_opts = sws_getContext(16, 16, 0, 16, 16, 0, SWS_BICUBIC, + NULL, NULL, NULL); #endif } @@ -75,7 +76,7 @@ void uninit_opts(void) av_dict_free(&codec_opts); } -void log_callback_help(void* ptr, int level, const char* fmt, va_list vl) +void log_callback_help(void *ptr, int level, const char *fmt, va_list vl) { vfprintf(stdout, fmt, vl); } @@ -94,19 +95,20 @@ static void log_callback_report(void *ptr, int level, const char *fmt, va_list v fflush(report_file); } -double parse_number_or_die(const char *context, const char *numstr, int type, double min, double max) +double parse_number_or_die(const char *context, const char *numstr, int type, + double min, double max) { char *tail; const char *error; double d = av_strtod(numstr, &tail); if (*tail) - error= "Expected number for %s but found: %s\n"; + error = "Expected number for %s but found: %s\n"; else if (d < min || d > max) - error= "The value for %s was %s which is not within %f - %f\n"; - else if(type == OPT_INT64 && (int64_t)d != d) - error= "Expected int64 for %s but found %s\n"; + error = "The value for %s was %s which is not within %f - %f\n"; + else if (type == OPT_INT64 && (int64_t)d != d) + error = "Expected int64 for %s but found %s\n"; else if (type == OPT_INT && (int)d != d) - error= "Expected int for %s but found %s\n"; + error = "Expected int for %s but found %s\n"; else return d; av_log(NULL, AV_LOG_FATAL, error, context, numstr, min, max); @@ -114,7 +116,8 @@ double parse_number_or_die(const char *context, const char *numstr, int type, do return 0; } -int64_t parse_time_or_die(const char *context, const char *timestr, int is_duration) +int64_t parse_time_or_die(const char *context, const char *timestr, + int is_duration) { int64_t us; if (av_parse_time(&us, timestr, is_duration) < 0) { @@ -125,13 +128,14 @@ int64_t parse_time_or_die(const char *context, const char *timestr, int is_durat return us; } -void show_help_options(const OptionDef *options, const char *msg, int mask, int value) +void show_help_options(const OptionDef *options, const char *msg, int mask, + int value) { const OptionDef *po; int first; first = 1; - for(po = options; po->name != NULL; po++) { + for (po = options; po->name != NULL; po++) { char buf[64]; if ((po->flags & mask) == value) { if (first) { @@ -158,7 +162,8 @@ void show_help_children(const AVClass *class, int flags) show_help_children(child, flags); } -static const OptionDef* find_option(const OptionDef *po, const char *name){ +static const OptionDef *find_option(const OptionDef *po, const char *name) +{ const char *p = strchr(name, ':'); int len = p ? p - name : strlen(name); @@ -205,8 +210,8 @@ static void prepare_app_arguments(int *argc_ptr, char ***argv_ptr) buffsize += WideCharToMultiByte(CP_UTF8, 0, argv_w[i], -1, NULL, 0, NULL, NULL); - win32_argv_utf8 = av_mallocz(sizeof(char*) * (win32_argc + 1) + buffsize); - argstr_flat = (char*)win32_argv_utf8 + sizeof(char*) * (win32_argc + 1); + win32_argv_utf8 = av_mallocz(sizeof(char *) * (win32_argc + 1) + buffsize); + argstr_flat = (char *)win32_argv_utf8 + sizeof(char *) * (win32_argc + 1); if (win32_argv_utf8 == NULL) { LocalFree(argv_w); return; @@ -231,8 +236,8 @@ static inline void prepare_app_arguments(int *argc_ptr, char ***argv_ptr) } #endif /* WIN32 && !__MINGW32CE__ */ - -int parse_option(void *optctx, const char *opt, const char *arg, const OptionDef *options) +int parse_option(void *optctx, const char *opt, const char *arg, + const OptionDef *options) { const OptionDef *po; int bool_val = 1; @@ -261,13 +266,14 @@ unknown_opt: /* new-style options contain an offset into optctx, old-style address of * a global var*/ - dst = po->flags & (OPT_OFFSET|OPT_SPEC) ? (uint8_t*)optctx + po->u.off : po->u.dst_ptr; + dst = po->flags & (OPT_OFFSET | OPT_SPEC) ? (uint8_t *)optctx + po->u.off + : po->u.dst_ptr; if (po->flags & OPT_SPEC) { SpecifierOpt **so = dst; char *p = strchr(opt, ':'); - dstcount = (int*)(so + 1); + dstcount = (int *)(so + 1); *so = grow_array(*so, sizeof(**so), dstcount, *dstcount + 1); (*so)[*dstcount - 1].specifier = av_strdup(p ? p + 1 : ""); dst = &(*so)[*dstcount - 1].u; @@ -276,24 +282,25 @@ unknown_opt: if (po->flags & OPT_STRING) { char *str; str = av_strdup(arg); - *(char**)dst = str; + *(char **)dst = str; } else if (po->flags & OPT_BOOL) { - *(int*)dst = bool_val; + *(int *)dst = bool_val; } else if (po->flags & OPT_INT) { - *(int*)dst = parse_number_or_die(opt, arg, OPT_INT64, INT_MIN, INT_MAX); + *(int *)dst = parse_number_or_die(opt, arg, OPT_INT64, INT_MIN, INT_MAX); } else if (po->flags & OPT_INT64) { - *(int64_t*)dst = parse_number_or_die(opt, arg, OPT_INT64, INT64_MIN, INT64_MAX); + *(int64_t *)dst = parse_number_or_die(opt, arg, OPT_INT64, INT64_MIN, INT64_MAX); } else if (po->flags & OPT_TIME) { - *(int64_t*)dst = parse_time_or_die(opt, arg, 1); + *(int64_t *)dst = parse_time_or_die(opt, arg, 1); } else if (po->flags & OPT_FLOAT) { - *(float*)dst = parse_number_or_die(opt, arg, OPT_FLOAT, -INFINITY, INFINITY); + *(float *)dst = parse_number_or_die(opt, arg, OPT_FLOAT, -INFINITY, INFINITY); } else if (po->flags & OPT_DOUBLE) { - *(double*)dst = parse_number_or_die(opt, arg, OPT_DOUBLE, -INFINITY, INFINITY); + *(double *)dst = parse_number_or_die(opt, arg, OPT_DOUBLE, -INFINITY, INFINITY); } else if (po->u.func_arg) { - int ret = po->flags & OPT_FUNC2 ? po->u.func2_arg(optctx, opt, arg) : - po->u.func_arg(opt, arg); + int ret = po->flags & OPT_FUNC2 ? po->u.func2_arg(optctx, opt, arg) + : po->u.func_arg(opt, arg); if (ret < 0) { - av_log(NULL, AV_LOG_ERROR, "Failed to set value '%s' for option '%s'\n", arg, opt); + av_log(NULL, AV_LOG_ERROR, + "Failed to set value '%s' for option '%s'\n", arg, opt); return ret; } } @@ -303,7 +310,7 @@ unknown_opt: } void parse_options(void *optctx, int argc, char **argv, const OptionDef *options, - void (* parse_arg_function)(void *, const char*)) + void (*parse_arg_function)(void *, const char*)) { const char *opt; int optindex, handleoptions = 1, ret; @@ -336,7 +343,8 @@ void parse_options(void *optctx, int argc, char **argv, const OptionDef *options /* * Return index of option opt in argv or 0 if not found. */ -static int locate_option(int argc, char **argv, const OptionDef *options, const char *optname) +static int locate_option(int argc, char **argv, const OptionDef *options, + const char *optname) { const OptionDef *po; int i; @@ -419,15 +427,18 @@ int opt_default(const char *opt, const char *arg) p = opt + strlen(opt); av_strlcpy(opt_stripped, opt, FFMIN(sizeof(opt_stripped), p - opt + 1)); - if ((oc = av_opt_find(&cc, opt_stripped, NULL, 0, AV_OPT_SEARCH_CHILDREN|AV_OPT_SEARCH_FAKE_OBJ)) || - ((opt[0] == 'v' || opt[0] == 'a' || opt[0] == 's') && - (oc = av_opt_find(&cc, opt+1, NULL, 0, AV_OPT_SEARCH_FAKE_OBJ)))) + if ((oc = av_opt_find(&cc, opt_stripped, NULL, 0, + AV_OPT_SEARCH_CHILDREN | AV_OPT_SEARCH_FAKE_OBJ)) || + ((opt[0] == 'v' || opt[0] == 'a' || opt[0] == 's') && + (oc = av_opt_find(&cc, opt + 1, NULL, 0, AV_OPT_SEARCH_FAKE_OBJ)))) av_dict_set(&codec_opts, opt, arg, FLAGS(oc)); - if ((of = av_opt_find(&fc, opt, NULL, 0, AV_OPT_SEARCH_CHILDREN | AV_OPT_SEARCH_FAKE_OBJ))) + if ((of = av_opt_find(&fc, opt, NULL, 0, + AV_OPT_SEARCH_CHILDREN | AV_OPT_SEARCH_FAKE_OBJ))) av_dict_set(&format_opts, opt, arg, FLAGS(of)); #if CONFIG_SWSCALE sc = sws_get_class(); - if ((os = av_opt_find(&sc, opt, NULL, 0, AV_OPT_SEARCH_CHILDREN | AV_OPT_SEARCH_FAKE_OBJ))) { + if ((os = av_opt_find(&sc, opt, NULL, 0, + AV_OPT_SEARCH_CHILDREN | AV_OPT_SEARCH_FAKE_OBJ))) { // XXX we only support sws_flags, not arbitrary sws options int ret = av_opt_set(sws_opts, opt, arg, 0); if (ret < 0) { @@ -604,7 +615,8 @@ void show_banner(int argc, char **argv, const OptionDef *options) if (idx) return; - av_log(NULL, AV_LOG_INFO, "%s version " FFMPEG_VERSION ", Copyright (c) %d-%d the FFmpeg developers\n", + av_log(NULL, AV_LOG_INFO, + "%s version " FFMPEG_VERSION ", Copyright (c) %d-%d the FFmpeg developers\n", program_name, program_birth_year, this_year); av_log(NULL, AV_LOG_INFO, " built on %s %s with %s %s\n", __DATE__, __TIME__, CC_TYPE, CC_VERSION); @@ -692,49 +704,47 @@ int opt_license(const char *opt, const char *arg) int opt_formats(const char *opt, const char *arg) { - AVInputFormat *ifmt=NULL; - AVOutputFormat *ofmt=NULL; + AVInputFormat *ifmt = NULL; + AVOutputFormat *ofmt = NULL; const char *last_name; - printf( - "File formats:\n" - " D. = Demuxing supported\n" - " .E = Muxing supported\n" - " --\n"); - last_name= "000"; - for(;;){ - int decode=0; - int encode=0; - const char *name=NULL; - const char *long_name=NULL; - - while((ofmt= av_oformat_next(ofmt))) { - if((name == NULL || strcmp(ofmt->name, name)<0) && - strcmp(ofmt->name, last_name)>0){ - name= ofmt->name; - long_name= ofmt->long_name; - encode=1; + printf("File formats:\n" + " D. = Demuxing supported\n" + " .E = Muxing supported\n" + " --\n"); + last_name = "000"; + for (;;) { + int decode = 0; + int encode = 0; + const char *name = NULL; + const char *long_name = NULL; + + while ((ofmt = av_oformat_next(ofmt))) { + if ((name == NULL || strcmp(ofmt->name, name) < 0) && + strcmp(ofmt->name, last_name) > 0) { + name = ofmt->name; + long_name = ofmt->long_name; + encode = 1; } } - while((ifmt= av_iformat_next(ifmt))) { - if((name == NULL || strcmp(ifmt->name, name)<0) && - strcmp(ifmt->name, last_name)>0){ - name= ifmt->name; - long_name= ifmt->long_name; - encode=0; + while ((ifmt = av_iformat_next(ifmt))) { + if ((name == NULL || strcmp(ifmt->name, name) < 0) && + strcmp(ifmt->name, last_name) > 0) { + name = ifmt->name; + long_name = ifmt->long_name; + encode = 0; } - if(name && strcmp(ifmt->name, name)==0) - decode=1; + if (name && strcmp(ifmt->name, name) == 0) + decode = 1; } - if(name==NULL) + if (name == NULL) break; - last_name= name; + last_name = name; - printf( - " %s%s %-15s %s\n", - decode ? "D":" ", - encode ? "E":" ", - name, + printf(" %s%s %-15s %s\n", + decode ? "D" : " ", + encode ? "E" : " ", + name, long_name ? long_name:" "); } return 0; @@ -742,44 +752,45 @@ int opt_formats(const char *opt, const char *arg) int opt_codecs(const char *opt, const char *arg) { - AVCodec *p=NULL, *p2; + AVCodec *p = NULL, *p2; const char *last_name; - printf( - "Codecs:\n" - " D..... = Decoding supported\n" - " .E.... = Encoding supported\n" - " ..V... = Video codec\n" - " ..A... = Audio codec\n" - " ..S... = Subtitle codec\n" - " ...S.. = Supports draw_horiz_band\n" - " ....D. = Supports direct rendering method 1\n" - " .....T = Supports weird frame truncation\n" - " ------\n"); + printf("Codecs:\n" + " D..... = Decoding supported\n" + " .E.... = Encoding supported\n" + " ..V... = Video codec\n" + " ..A... = Audio codec\n" + " ..S... = Subtitle codec\n" + " ...S.. = Supports draw_horiz_band\n" + " ....D. = Supports direct rendering method 1\n" + " .....T = Supports weird frame truncation\n" + " ------\n"); last_name= "000"; - for(;;){ - int decode=0; - int encode=0; - int cap=0; + for (;;) { + int decode = 0; + int encode = 0; + int cap = 0; const char *type_str; - p2=NULL; - while((p= av_codec_next(p))) { - if((p2==NULL || strcmp(p->name, p2->name)<0) && - strcmp(p->name, last_name)>0){ - p2= p; - decode= encode= cap=0; + p2 = NULL; + while ((p = av_codec_next(p))) { + if ((p2 == NULL || strcmp(p->name, p2->name) < 0) && + strcmp(p->name, last_name) > 0) { + p2 = p; + decode = encode = cap = 0; } - if(p2 && strcmp(p->name, p2->name)==0){ - if(p->decode) decode=1; - if(p->encode) encode=1; + if (p2 && strcmp(p->name, p2->name) == 0) { + if (p->decode) + decode = 1; + if (p->encode) + encode = 1; cap |= p->capabilities; } } - if(p2==NULL) + if (p2 == NULL) break; - last_name= p2->name; + last_name = p2->name; - switch(p2->type) { + switch (p2->type) { case AVMEDIA_TYPE_VIDEO: type_str = "V"; break; @@ -793,36 +804,36 @@ int opt_codecs(const char *opt, const char *arg) type_str = "?"; break; } - printf( - " %s%s%s%s%s%s %-15s %s", - decode ? "D": (/*p2->decoder ? "d":*/" "), - encode ? "E":" ", - type_str, - cap & CODEC_CAP_DRAW_HORIZ_BAND ? "S":" ", - cap & CODEC_CAP_DR1 ? "D":" ", - cap & CODEC_CAP_TRUNCATED ? "T":" ", - p2->name, - p2->long_name ? p2->long_name : ""); - /* if(p2->decoder && decode==0) - printf(" use %s for decoding", p2->decoder->name);*/ + printf(" %s%s%s%s%s%s %-15s %s", + decode ? "D" : (/* p2->decoder ? "d" : */ " "), + encode ? "E" : " ", + type_str, + cap & CODEC_CAP_DRAW_HORIZ_BAND ? "S" : " ", + cap & CODEC_CAP_DR1 ? "D" : " ", + cap & CODEC_CAP_TRUNCATED ? "T" : " ", + p2->name, + p2->long_name ? p2->long_name : ""); +#if 0 + if (p2->decoder && decode == 0) + printf(" use %s for decoding", p2->decoder->name); +#endif printf("\n"); } printf("\n"); - printf( -"Note, the names of encoders and decoders do not always match, so there are\n" -"several cases where the above table shows encoder only or decoder only entries\n" -"even though both encoding and decoding are supported. For example, the h263\n" -"decoder corresponds to the h263 and h263p encoders, for file formats it is even\n" -"worse.\n"); + printf("Note, the names of encoders and decoders do not always match, so there are\n" + "several cases where the above table shows encoder only or decoder only entries\n" + "even though both encoding and decoding are supported. For example, the h263\n" + "decoder corresponds to the h263 and h263p encoders, for file formats it is even\n" + "worse.\n"); return 0; } int opt_bsfs(const char *opt, const char *arg) { - AVBitStreamFilter *bsf=NULL; + AVBitStreamFilter *bsf = NULL; printf("Bitstream filters:\n"); - while((bsf = av_bitstream_filter_next(bsf))) + while ((bsf = av_bitstream_filter_next(bsf))) printf("%s\n", bsf->name); printf("\n"); return 0; @@ -863,15 +874,14 @@ int opt_pix_fmts(const char *opt, const char *arg) { enum PixelFormat pix_fmt; - printf( - "Pixel formats:\n" - "I.... = Supported Input format for conversion\n" - ".O... = Supported Output format for conversion\n" - "..H.. = Hardware accelerated format\n" - "...P. = Paletted format\n" - "....B = Bitstream format\n" - "FLAGS NAME NB_COMPONENTS BITS_PER_PIXEL\n" - "-----\n"); + printf("Pixel formats:\n" + "I.... = Supported Input format for conversion\n" + ".O... = Supported Output format for conversion\n" + "..H.. = Hardware accelerated format\n" + "...P. = Paletted format\n" + "....B = Bitstream format\n" + "FLAGS NAME NB_COMPONENTS BITS_PER_PIXEL\n" + "-----\n"); #if !CONFIG_SWSCALE # define sws_isSupportedInput(x) 0 @@ -921,7 +931,8 @@ int cmdutils_read_file(const char *filename, char **bufptr, size_t *size) FILE *f = fopen(filename, "rb"); if (!f) { - av_log(NULL, AV_LOG_ERROR, "Cannot read file '%s': %s\n", filename, strerror(errno)); + av_log(NULL, AV_LOG_ERROR, "Cannot read file '%s': %s\n", filename, + strerror(errno)); return AVERROR(errno); } fseek(f, 0, SEEK_END); @@ -952,14 +963,14 @@ int cmdutils_read_file(const char *filename, char **bufptr, size_t *size) } FILE *get_preset_file(char *filename, size_t filename_size, - const char *preset_name, int is_path, const char *codec_name) + const char *preset_name, int is_path, + const char *codec_name) { FILE *f = NULL; int i; - const char *base[3]= { getenv("FFMPEG_DATADIR"), - getenv("HOME"), - FFMPEG_DATADIR, - }; + const char *base[3] = { getenv("FFMPEG_DATADIR"), + getenv("HOME"), + FFMPEG_DATADIR, }; if (is_path) { av_strlcpy(filename, preset_name, filename_size); @@ -985,11 +996,14 @@ FILE *get_preset_file(char *filename, size_t filename_size, for (i = 0; i < 3 && !f; i++) { if (!base[i]) continue; - snprintf(filename, filename_size, "%s%s/%s.ffpreset", base[i], i != 1 ? "" : "/.ffmpeg", preset_name); + snprintf(filename, filename_size, "%s%s/%s.ffpreset", base[i], + i != 1 ? "" : "/.ffmpeg", preset_name); f = fopen(filename, "r"); if (!f && codec_name) { snprintf(filename, filename_size, - "%s%s/%s-%s.ffpreset", base[i], i != 1 ? "" : "/.ffmpeg", codec_name, preset_name); + "%s%s/%s-%s.ffpreset", + base[i], i != 1 ? "" : "/.ffmpeg", codec_name, + preset_name); f = fopen(filename, "r"); } } @@ -1000,22 +1014,23 @@ FILE *get_preset_file(char *filename, size_t filename_size, int check_stream_specifier(AVFormatContext *s, AVStream *st, const char *spec) { - if (*spec <= '9' && *spec >= '0') /* opt:index */ + if (*spec <= '9' && *spec >= '0') /* opt:index */ return strtol(spec, NULL, 0) == st->index; - else if (*spec == 'v' || *spec == 'a' || *spec == 's' || *spec == 'd' || *spec == 't') { /* opt:[vasdt] */ + else if (*spec == 'v' || *spec == 'a' || *spec == 's' || *spec == 'd' || + *spec == 't') { /* opt:[vasdt] */ enum AVMediaType type; switch (*spec++) { - case 'v': type = AVMEDIA_TYPE_VIDEO; break; - case 'a': type = AVMEDIA_TYPE_AUDIO; break; - case 's': type = AVMEDIA_TYPE_SUBTITLE; break; - case 'd': type = AVMEDIA_TYPE_DATA; break; + case 'v': type = AVMEDIA_TYPE_VIDEO; break; + case 'a': type = AVMEDIA_TYPE_AUDIO; break; + case 's': type = AVMEDIA_TYPE_SUBTITLE; break; + case 'd': type = AVMEDIA_TYPE_DATA; break; case 't': type = AVMEDIA_TYPE_ATTACHMENT; break; default: abort(); // never reached, silence warning } if (type != st->codec->codec_type) return 0; - if (*spec++ == ':') { /* possibly followed by :index */ + if (*spec++ == ':') { /* possibly followed by :index */ int i, index = strtol(spec, NULL, 0); for (i = 0; i < s->nb_streams; i++) if (s->streams[i]->codec->codec_type == type && index-- == 0) @@ -1051,11 +1066,13 @@ int check_stream_specifier(AVFormatContext *s, AVStream *st, const char *spec) return AVERROR(EINVAL); } -AVDictionary *filter_codec_opts(AVDictionary *opts, AVCodec *codec, AVFormatContext *s, AVStream *st) +AVDictionary *filter_codec_opts(AVDictionary *opts, AVCodec *codec, + AVFormatContext *s, AVStream *st) { AVDictionary *ret = NULL; AVDictionaryEntry *t = NULL; - int flags = s->oformat ? AV_OPT_FLAG_ENCODING_PARAM : AV_OPT_FLAG_DECODING_PARAM; + int flags = s->oformat ? AV_OPT_FLAG_ENCODING_PARAM + : AV_OPT_FLAG_DECODING_PARAM; char prefix = 0; const AVClass *cc = avcodec_get_class(); @@ -1063,9 +1080,18 @@ AVDictionary *filter_codec_opts(AVDictionary *opts, AVCodec *codec, AVFormatCont return NULL; switch (codec->type) { - case AVMEDIA_TYPE_VIDEO: prefix = 'v'; flags |= AV_OPT_FLAG_VIDEO_PARAM; break; - case AVMEDIA_TYPE_AUDIO: prefix = 'a'; flags |= AV_OPT_FLAG_AUDIO_PARAM; break; - case AVMEDIA_TYPE_SUBTITLE: prefix = 's'; flags |= AV_OPT_FLAG_SUBTITLE_PARAM; break; + case AVMEDIA_TYPE_VIDEO: + prefix = 'v'; + flags |= AV_OPT_FLAG_VIDEO_PARAM; + break; + case AVMEDIA_TYPE_AUDIO: + prefix = 'a'; + flags |= AV_OPT_FLAG_AUDIO_PARAM; + break; + case AVMEDIA_TYPE_SUBTITLE: + prefix = 's'; + flags |= AV_OPT_FLAG_SUBTITLE_PARAM; + break; } while (t = av_dict_get(opts, "", t, AV_DICT_IGNORE_SUFFIX)) { @@ -1080,10 +1106,14 @@ AVDictionary *filter_codec_opts(AVDictionary *opts, AVCodec *codec, AVFormatCont } if (av_opt_find(&cc, t->key, NULL, flags, AV_OPT_SEARCH_FAKE_OBJ) || - (codec && codec->priv_class && av_opt_find(&codec->priv_class, t->key, NULL, flags, AV_OPT_SEARCH_FAKE_OBJ))) + (codec && codec->priv_class && + av_opt_find(&codec->priv_class, t->key, NULL, flags, + AV_OPT_SEARCH_FAKE_OBJ))) av_dict_set(&ret, t->key, t->value, 0); - else if (t->key[0] == prefix && av_opt_find(&cc, t->key+1, NULL, flags, AV_OPT_SEARCH_FAKE_OBJ)) - av_dict_set(&ret, t->key+1, t->value, 0); + else if (t->key[0] == prefix && + av_opt_find(&cc, t->key + 1, NULL, flags, + AV_OPT_SEARCH_FAKE_OBJ)) + av_dict_set(&ret, t->key + 1, t->value, 0); if (p) *p = ':'; @@ -1091,7 +1121,8 @@ AVDictionary *filter_codec_opts(AVDictionary *opts, AVCodec *codec, AVFormatCont return ret; } -AVDictionary **setup_find_stream_info_opts(AVFormatContext *s, AVDictionary *codec_opts) +AVDictionary **setup_find_stream_info_opts(AVFormatContext *s, + AVDictionary *codec_opts) { int i; AVDictionary **opts; @@ -1100,11 +1131,13 @@ AVDictionary **setup_find_stream_info_opts(AVFormatContext *s, AVDictionary *cod return NULL; opts = av_mallocz(s->nb_streams * sizeof(*opts)); if (!opts) { - av_log(NULL, AV_LOG_ERROR, "Could not alloc memory for stream options.\n"); + av_log(NULL, AV_LOG_ERROR, + "Could not alloc memory for stream options.\n"); return NULL; } for (i = 0; i < s->nb_streams; i++) - opts[i] = filter_codec_opts(codec_opts, avcodec_find_decoder(s->streams[i]->codec->codec_id), s, s->streams[i]); + opts[i] = filter_codec_opts(codec_opts, avcodec_find_decoder(s->streams[i]->codec->codec_id), + s, s->streams[i]); return opts; } @@ -1175,9 +1175,9 @@ HAVE_LIST=" fork getaddrinfo gethrtime + GetProcessAffinityMask GetProcessMemoryInfo GetProcessTimes - GetSystemInfo getrusage gnu_as ibm_asm @@ -3016,8 +3016,8 @@ check_func_headers windows.h PeekNamedPipe check_func_headers io.h setmode check_func_headers lzo/lzo1x.h lzo1x_999_compress check_lib2 "windows.h psapi.h" GetProcessMemoryInfo -lpsapi +check_func_headers windows.h GetProcessAffinityMask check_func_headers windows.h GetProcessTimes -check_func_headers windows.h GetSystemInfo check_func_headers windows.h MapViewOfFile check_func_headers windows.h VirtualAlloc diff --git a/doc/APIchanges b/doc/APIchanges index d77706b401..9fa0e07640 100644 --- a/doc/APIchanges +++ b/doc/APIchanges @@ -31,21 +31,24 @@ API changes, most recent first: 2011-10-20 - b35e9e1 - lavu 51.22.0 Add av_strtok() to avstring.h. +2011-01-03 - b73ec05 - lavu 51.21.0 + Add av_popcount64 + 2011-12-18 - 8400b12 - lavc 53.28.1 Deprecate AVFrame.age. The field is unused. -2011-xx-xx - xxxxxxx - lavf 53.17.0 - Add avformat_open_input(). +2011-12-12 - 5266045 - lavf 53.17.0 + Add avformat_close_input(). Deprecate av_close_input_file() and av_close_input_stream(). -2011-xx-xx - xxxxxxx - lavc 53.25.0 +2011-12-02 - 0eea212 - lavc 53.25.0 Add nb_samples and extended_data fields to AVFrame. Deprecate AVCODEC_MAX_AUDIO_FRAME_SIZE. Deprecate avcodec_decode_audio3() in favor of avcodec_decode_audio4(). avcodec_decode_audio4() writes output samples to an AVFrame, which allows audio decoders to use get_buffer(). -2011-xx-xx - xxxxxxx - lavc 53.24.0 +2011-12-04 - 560f773 - lavc 53.24.0 Change AVFrame.data[4]/base[4]/linesize[4]/error[4] to [8] at next major bump. Change AVPicture.data[4]/linesize[4] to [8] at next major bump. Change AVCodecContext.error[4] to [8] at next major bump. diff --git a/libavcodec/adx.c b/libavcodec/adx.c index aa90fd89c3..1e5d89c991 100644 --- a/libavcodec/adx.c +++ b/libavcodec/adx.c @@ -58,7 +58,7 @@ int avpriv_adx_decode_header(AVCodecContext *avctx, const uint8_t *buf, /* channels */ avctx->channels = buf[7]; - if (avctx->channels > 2) + if (avctx->channels <= 0 || avctx->channels > 2) return AVERROR_INVALIDDATA; /* sample rate */ diff --git a/libavcodec/adx_parser.c b/libavcodec/adx_parser.c index bbd865454e..de3b1b073f 100644 --- a/libavcodec/adx_parser.c +++ b/libavcodec/adx_parser.c @@ -45,27 +45,31 @@ static int adx_parse(AVCodecParserContext *s1, ParseContext *pc = &s->pc; int next = END_NOT_FOUND; int i; - uint64_t state= pc->state64; + uint64_t state = pc->state64; - if(!s->header_size){ - for(i=0; i<buf_size; i++){ - state= (state<<8) | buf[i]; - if((state&0xFFFF0000FFFFFF00) == 0x8000000003120400ULL && (state&0xFF) && ((state>>32)&0xFFFF)>=4){ - s->header_size= ((state>>32)&0xFFFF) + 4; - s->block_size = BLOCK_SIZE * (state&0xFF); - s->remaining = i - 7 + s->header_size + s->block_size; - break; + if (!s->header_size) { + for (i = 0; i < buf_size; i++) { + state = (state << 8) | buf[i]; + /* check for fixed fields in ADX header for possible match */ + if ((state & 0xFFFF0000FFFFFF00) == 0x8000000003120400ULL) { + int channels = state & 0xFF; + int header_size = ((state >> 32) & 0xFFFF) + 4; + if (channels > 0 && header_size >= 8) { + s->header_size = header_size; + s->block_size = BLOCK_SIZE * channels; + s->remaining = i - 7 + s->header_size + s->block_size; + break; + } } } - pc->state64= state; + pc->state64 = state; } if (s->header_size) { - if (!s->remaining) { + if (!s->remaining) s->remaining = s->block_size; - } - if (s->remaining<=buf_size) { - next= s->remaining; + if (s->remaining <= buf_size) { + next = s->remaining; s->remaining = 0; } else s->remaining -= buf_size; diff --git a/libavcodec/adxdec.c b/libavcodec/adxdec.c index cf494c12d4..fdff6875e1 100644 --- a/libavcodec/adxdec.c +++ b/libavcodec/adxdec.c @@ -45,7 +45,8 @@ static av_cold int adx_decode_init(AVCodecContext *avctx) av_log(avctx, AV_LOG_ERROR, "error parsing ADX header\n"); return AVERROR_INVALIDDATA; } - c->channels = avctx->channels; + c->channels = avctx->channels; + c->header_parsed = 1; } avctx->sample_fmt = AV_SAMPLE_FMT_S16; @@ -106,21 +107,21 @@ static int adx_decode_frame(AVCodecContext *avctx, void *data, return buf_size; } - if(AV_RB16(buf) == 0x8000){ + if (!c->header_parsed && buf_size >= 2 && AV_RB16(buf) == 0x8000) { int header_size; - if ((ret = avpriv_adx_decode_header(avctx, buf, - buf_size, &header_size, + if ((ret = avpriv_adx_decode_header(avctx, buf, buf_size, &header_size, c->coeff)) < 0) { av_log(avctx, AV_LOG_ERROR, "error parsing ADX header\n"); return AVERROR_INVALIDDATA; } - c->channels = avctx->channels; - if(buf_size < header_size) + c->channels = avctx->channels; + c->header_parsed = 1; + if (buf_size < header_size) return AVERROR_INVALIDDATA; - buf += header_size; + buf += header_size; buf_size -= header_size; } - if(c->channels <= 0) + if (!c->header_parsed) return AVERROR_INVALIDDATA; /* calculate number of blocks in the packet */ diff --git a/libavcodec/alac.c b/libavcodec/alac.c index c08d2848ed..70e1a6438b 100644 --- a/libavcodec/alac.c +++ b/libavcodec/alac.c @@ -25,27 +25,23 @@ * @author 2005 David Hammerton * @see http://crazney.net/programs/itunes/alac.html * - * Note: This decoder expects a 36- (0x24-)byte QuickTime atom to be + * Note: This decoder expects a 36-byte QuickTime atom to be * passed through the extradata[_size] fields. This atom is tacked onto * the end of an 'alac' stsd atom and has the following format: - * bytes 0-3 atom size (0x24), big-endian - * bytes 4-7 atom type ('alac', not the 'alac' tag from start of stsd) - * bytes 8-35 data bytes needed by decoder * - * Extradata: - * 32bit size - * 32bit tag (=alac) - * 32bit zero? - * 32bit max sample per frame - * 8bit ?? (zero?) + * 32bit atom size + * 32bit tag ("alac") + * 32bit tag version (0) + * 32bit samples per frame (used when not set explicitly in the frames) + * 8bit compatible version (0) * 8bit sample size - * 8bit history mult - * 8bit initial history - * 8bit kmodifier - * 8bit channels? - * 16bit ?? - * 32bit max coded frame size - * 32bit bitrate? + * 8bit history mult (40) + * 8bit initial history (14) + * 8bit kmodifier (10) + * 8bit channels + * 16bit maxRun (255) + * 32bit max coded frame size (0 means unknown) + * 32bit average bitrate (0 means unknown) * 32bit samplerate */ @@ -464,24 +460,29 @@ static int alac_decode_frame(AVCodecContext *avctx, void *data, if(ret<0) return ret; - if (prediction_type[ch] == 0) { - /* adaptive fir */ - predictor_decompress_fir_adapt(alac->predicterror_buffer[ch], - alac->outputsamples_buffer[ch], - outputsamples, - readsamplesize, - predictor_coef_table[ch], - predictor_coef_num[ch], - prediction_quantitization[ch]); - } else { - av_log(avctx, AV_LOG_ERROR, "FIXME: unhandled prediction type: %i\n", prediction_type[ch]); - /* I think the only other prediction type (or perhaps this is - * just a boolean?) runs adaptive fir twice.. like: - * predictor_decompress_fir_adapt(predictor_error, tempout, ...) - * predictor_decompress_fir_adapt(predictor_error, outputsamples ...) - * little strange.. + /* adaptive FIR filter */ + if (prediction_type[ch] == 15) { + /* Prediction type 15 runs the adaptive FIR twice. + * The first pass uses the special-case coef_num = 31, while + * the second pass uses the coefs from the bitstream. + * + * However, this prediction type is not currently used by the + * reference encoder. */ + predictor_decompress_fir_adapt(alac->predicterror_buffer[ch], + alac->predicterror_buffer[ch], + outputsamples, readsamplesize, + NULL, 31, 0); + } else if (prediction_type[ch] > 0) { + av_log(avctx, AV_LOG_WARNING, "unknown prediction type: %i\n", + prediction_type[ch]); } + predictor_decompress_fir_adapt(alac->predicterror_buffer[ch], + alac->outputsamples_buffer[ch], + outputsamples, readsamplesize, + predictor_coef_table[ch], + predictor_coef_num[ch], + prediction_quantitization[ch]); } } else { /* not compressed, easy case */ @@ -584,7 +585,7 @@ static int alac_set_info(ALACContext *alac) ptr += 4; /* size */ ptr += 4; /* alac */ - ptr += 4; /* 0 ? */ + ptr += 4; /* version */ if(AV_RB32(ptr) >= UINT_MAX/4){ av_log(alac->avctx, AV_LOG_ERROR, "setinfo_max_samples_per_frame too large\n"); @@ -593,15 +594,15 @@ static int alac_set_info(ALACContext *alac) /* buffer size / 2 ? */ alac->setinfo_max_samples_per_frame = bytestream_get_be32(&ptr); - ptr++; /* ??? */ + ptr++; /* compatible version */ alac->setinfo_sample_size = *ptr++; alac->setinfo_rice_historymult = *ptr++; alac->setinfo_rice_initialhistory = *ptr++; alac->setinfo_rice_kmodifier = *ptr++; alac->numchannels = *ptr++; - bytestream_get_be16(&ptr); /* ??? */ + bytestream_get_be16(&ptr); /* maxRun */ bytestream_get_be32(&ptr); /* max coded frame size */ - bytestream_get_be32(&ptr); /* bitrate ? */ + bytestream_get_be32(&ptr); /* average bitrate */ bytestream_get_be32(&ptr); /* samplerate */ return 0; diff --git a/libavcodec/alacenc.c b/libavcodec/alacenc.c index 305a5b825b..e8d1bc03f2 100644 --- a/libavcodec/alacenc.c +++ b/libavcodec/alacenc.c @@ -348,6 +348,7 @@ static void alac_entropy_coder(AlacEncodeContext *s) static void write_compressed_frame(AlacEncodeContext *s) { int i, j; + int prediction_type = 0; if (s->avctx->channels == 2) alac_stereo_decorrelation(s); @@ -358,7 +359,7 @@ static void write_compressed_frame(AlacEncodeContext *s) calc_predictor_params(s, i); - put_bits(&s->pbctx, 4, 0); // prediction type : currently only type 0 has been RE'd + put_bits(&s->pbctx, 4, prediction_type); put_bits(&s->pbctx, 4, s->lpc[i].lpc_quant); put_bits(&s->pbctx, 3, s->rc.rice_modifier); @@ -373,6 +374,14 @@ static void write_compressed_frame(AlacEncodeContext *s) for (i = 0; i < s->avctx->channels; i++) { alac_linear_predictor(s, i); + + // TODO: determine when this will actually help. for now it's not used. + if (prediction_type == 15) { + // 2nd pass 1st order filter + for (j = s->avctx->frame_size - 1; j > 0; j--) + s->predictor_buf[j] -= s->predictor_buf[j - 1]; + } + alac_entropy_coder(s); } } @@ -391,8 +400,11 @@ static av_cold int alac_encode_init(AVCodecContext *avctx) return -1; } - if(avctx->channels > 2) { - av_log(avctx, AV_LOG_ERROR, "channels > 2 not supported\n"); + /* TODO: Correctly implement multi-channel ALAC. + It is similar to multi-channel AAC, in that it has a series of + single-channel (SCE), channel-pair (CPE), and LFE elements. */ + if (avctx->channels > 2) { + av_log(avctx, AV_LOG_ERROR, "only mono or stereo input is currently supported\n"); return AVERROR_PATCHWELCOME; } diff --git a/libavcodec/dca.c b/libavcodec/dca.c index 7c43b922b1..8622587efe 100644 --- a/libavcodec/dca.c +++ b/libavcodec/dca.c @@ -48,13 +48,13 @@ //#define TRACE -#define DCA_PRIM_CHANNELS_MAX (7) -#define DCA_SUBBANDS (32) -#define DCA_ABITS_MAX (32) /* Should be 28 */ -#define DCA_SUBSUBFRAMES_MAX (4) -#define DCA_SUBFRAMES_MAX (16) -#define DCA_BLOCKS_MAX (16) -#define DCA_LFE_MAX (3) +#define DCA_PRIM_CHANNELS_MAX (7) +#define DCA_SUBBANDS (32) +#define DCA_ABITS_MAX (32) /* Should be 28 */ +#define DCA_SUBSUBFRAMES_MAX (4) +#define DCA_SUBFRAMES_MAX (16) +#define DCA_BLOCKS_MAX (16) +#define DCA_LFE_MAX (3) enum DCAMode { DCA_MONO = 0, @@ -127,28 +127,45 @@ static const int dca_ext_audio_descr_mask[] = { * OV -> center back * All 2 channel configurations -> AV_CH_LAYOUT_STEREO */ - static const uint64_t dca_core_channel_layout[] = { - AV_CH_FRONT_CENTER, ///< 1, A - AV_CH_LAYOUT_STEREO, ///< 2, A + B (dual mono) - AV_CH_LAYOUT_STEREO, ///< 2, L + R (stereo) - AV_CH_LAYOUT_STEREO, ///< 2, (L+R) + (L-R) (sum-difference) - AV_CH_LAYOUT_STEREO, ///< 2, LT +RT (left and right total) - AV_CH_LAYOUT_STEREO|AV_CH_FRONT_CENTER, ///< 3, C+L+R - AV_CH_LAYOUT_STEREO|AV_CH_BACK_CENTER, ///< 3, L+R+S - AV_CH_LAYOUT_STEREO|AV_CH_FRONT_CENTER|AV_CH_BACK_CENTER, ///< 4, C + L + R+ S - AV_CH_LAYOUT_STEREO|AV_CH_SIDE_LEFT|AV_CH_SIDE_RIGHT, ///< 4, L + R +SL+ SR - AV_CH_LAYOUT_STEREO|AV_CH_FRONT_CENTER|AV_CH_SIDE_LEFT|AV_CH_SIDE_RIGHT, ///< 5, C + L + R+ SL+SR - AV_CH_LAYOUT_STEREO|AV_CH_SIDE_LEFT|AV_CH_SIDE_RIGHT|AV_CH_FRONT_LEFT_OF_CENTER|AV_CH_FRONT_RIGHT_OF_CENTER, ///< 6, CL + CR + L + R + SL + SR - AV_CH_LAYOUT_STEREO|AV_CH_BACK_LEFT|AV_CH_BACK_RIGHT|AV_CH_FRONT_CENTER|AV_CH_BACK_CENTER, ///< 6, C + L + R+ LR + RR + OV - AV_CH_FRONT_CENTER|AV_CH_FRONT_RIGHT_OF_CENTER|AV_CH_FRONT_LEFT_OF_CENTER|AV_CH_BACK_CENTER|AV_CH_BACK_LEFT|AV_CH_BACK_RIGHT, ///< 6, CF+ CR+LF+ RF+LR + RR - AV_CH_FRONT_LEFT_OF_CENTER|AV_CH_FRONT_CENTER|AV_CH_FRONT_RIGHT_OF_CENTER|AV_CH_LAYOUT_STEREO|AV_CH_SIDE_LEFT|AV_CH_SIDE_RIGHT, ///< 7, CL + C + CR + L + R + SL + SR - AV_CH_FRONT_LEFT_OF_CENTER|AV_CH_FRONT_RIGHT_OF_CENTER|AV_CH_LAYOUT_STEREO|AV_CH_SIDE_LEFT|AV_CH_SIDE_RIGHT|AV_CH_BACK_LEFT|AV_CH_BACK_RIGHT, ///< 8, CL + CR + L + R + SL1 + SL2+ SR1 + SR2 - AV_CH_FRONT_LEFT_OF_CENTER|AV_CH_FRONT_CENTER|AV_CH_FRONT_RIGHT_OF_CENTER|AV_CH_LAYOUT_STEREO|AV_CH_SIDE_LEFT|AV_CH_BACK_CENTER|AV_CH_SIDE_RIGHT, ///< 8, CL + C+ CR + L + R + SL + S+ SR + AV_CH_FRONT_CENTER, ///< 1, A + AV_CH_LAYOUT_STEREO, ///< 2, A + B (dual mono) + AV_CH_LAYOUT_STEREO, ///< 2, L + R (stereo) + AV_CH_LAYOUT_STEREO, ///< 2, (L + R) + (L - R) (sum-difference) + AV_CH_LAYOUT_STEREO, ///< 2, LT + RT (left and right total) + AV_CH_LAYOUT_STEREO | AV_CH_FRONT_CENTER, ///< 3, C + L + R + AV_CH_LAYOUT_STEREO | AV_CH_BACK_CENTER, ///< 3, L + R + S + AV_CH_LAYOUT_STEREO | AV_CH_FRONT_CENTER | AV_CH_BACK_CENTER, ///< 4, C + L + R + S + AV_CH_LAYOUT_STEREO | AV_CH_SIDE_LEFT | AV_CH_SIDE_RIGHT, ///< 4, L + R + SL + SR + + AV_CH_LAYOUT_STEREO | AV_CH_FRONT_CENTER | AV_CH_SIDE_LEFT | + AV_CH_SIDE_RIGHT, ///< 5, C + L + R + SL + SR + + AV_CH_LAYOUT_STEREO | AV_CH_SIDE_LEFT | AV_CH_SIDE_RIGHT | + AV_CH_FRONT_LEFT_OF_CENTER | AV_CH_FRONT_RIGHT_OF_CENTER, ///< 6, CL + CR + L + R + SL + SR + + AV_CH_LAYOUT_STEREO | AV_CH_BACK_LEFT | AV_CH_BACK_RIGHT | + AV_CH_FRONT_CENTER | AV_CH_BACK_CENTER, ///< 6, C + L + R + LR + RR + OV + + AV_CH_FRONT_CENTER | AV_CH_FRONT_RIGHT_OF_CENTER | + AV_CH_FRONT_LEFT_OF_CENTER | AV_CH_BACK_CENTER | + AV_CH_BACK_LEFT | AV_CH_BACK_RIGHT, ///< 6, CF + CR + LF + RF + LR + RR + + AV_CH_FRONT_LEFT_OF_CENTER | AV_CH_FRONT_CENTER | + AV_CH_FRONT_RIGHT_OF_CENTER | AV_CH_LAYOUT_STEREO | + AV_CH_SIDE_LEFT | AV_CH_SIDE_RIGHT, ///< 7, CL + C + CR + L + R + SL + SR + + AV_CH_FRONT_LEFT_OF_CENTER | AV_CH_FRONT_RIGHT_OF_CENTER | + AV_CH_LAYOUT_STEREO | AV_CH_SIDE_LEFT | AV_CH_SIDE_RIGHT | + AV_CH_BACK_LEFT | AV_CH_BACK_RIGHT, ///< 8, CL + CR + L + R + SL1 + SL2 + SR1 + SR2 + + AV_CH_FRONT_LEFT_OF_CENTER | AV_CH_FRONT_CENTER | + AV_CH_FRONT_RIGHT_OF_CENTER | AV_CH_LAYOUT_STEREO | + AV_CH_SIDE_LEFT | AV_CH_BACK_CENTER | AV_CH_SIDE_RIGHT, ///< 8, CL + C + CR + L + R + SL + S + SR }; static const int8_t dca_lfe_index[] = { - 1,2,2,2,2,3,2,3,2,3,2,3,1,3,2,3 + 1, 2, 2, 2, 2, 3, 2, 3, 2, 3, 2, 3, 1, 3, 2, 3 }; static const int8_t dca_channel_reorder_lfe[][9] = { @@ -227,19 +244,19 @@ static const int8_t dca_channel_reorder_nolfe_xch[][9] = { { 3, 2, 4, 0, 1, 5, 8, 7, 6}, }; -#define DCA_DOLBY 101 /* FIXME */ +#define DCA_DOLBY 101 /* FIXME */ -#define DCA_CHANNEL_BITS 6 -#define DCA_CHANNEL_MASK 0x3F +#define DCA_CHANNEL_BITS 6 +#define DCA_CHANNEL_MASK 0x3F -#define DCA_LFE 0x80 +#define DCA_LFE 0x80 -#define HEADER_SIZE 14 +#define HEADER_SIZE 14 -#define DCA_MAX_FRAME_SIZE 16384 -#define DCA_MAX_EXSS_HEADER_SIZE 4096 +#define DCA_MAX_FRAME_SIZE 16384 +#define DCA_MAX_EXSS_HEADER_SIZE 4096 -#define DCA_BUFFER_PADDING_SIZE 1024 +#define DCA_BUFFER_PADDING_SIZE 1024 /** Bit allocation */ typedef struct { @@ -254,9 +271,11 @@ static BitAlloc dca_tmode; ///< transition mode VLCs static BitAlloc dca_scalefactor; ///< scalefactor VLCs static BitAlloc dca_smpl_bitalloc[11]; ///< samples VLCs -static av_always_inline int get_bitalloc(GetBitContext *gb, BitAlloc *ba, int idx) +static av_always_inline int get_bitalloc(GetBitContext *gb, BitAlloc *ba, + int idx) { - return get_vlc2(gb, ba->vlc[idx].table, ba->vlc[idx].bits, ba->wrap) + ba->offset; + return get_vlc2(gb, ba->vlc[idx].table, ba->vlc[idx].bits, ba->wrap) + + ba->offset; } typedef struct { @@ -306,8 +325,8 @@ typedef struct { float scalefactor_adj[DCA_PRIM_CHANNELS_MAX][DCA_ABITS_MAX]; ///< scale factor adjustment /* Primary audio coding side information */ - int subsubframes[DCA_SUBFRAMES_MAX]; ///< number of subsubframes - int partial_samples[DCA_SUBFRAMES_MAX]; ///< partial subsubframe samples count + int subsubframes[DCA_SUBFRAMES_MAX]; ///< number of subsubframes + int partial_samples[DCA_SUBFRAMES_MAX]; ///< partial subsubframe samples count int prediction_mode[DCA_PRIM_CHANNELS_MAX][DCA_SUBBANDS]; ///< prediction mode (ADPCM used or not) int prediction_vq[DCA_PRIM_CHANNELS_MAX][DCA_SUBBANDS]; ///< prediction VQ coefs int bitalloc[DCA_PRIM_CHANNELS_MAX][DCA_SUBBANDS]; ///< bit allocation index @@ -334,13 +353,13 @@ typedef struct { float scale_bias; ///< output scale DECLARE_ALIGNED(32, float, subband_samples)[DCA_BLOCKS_MAX][DCA_PRIM_CHANNELS_MAX][DCA_SUBBANDS][8]; - DECLARE_ALIGNED(32, float, samples)[(DCA_PRIM_CHANNELS_MAX+1)*256]; - const float *samples_chanptr[DCA_PRIM_CHANNELS_MAX+1]; + DECLARE_ALIGNED(32, float, samples)[(DCA_PRIM_CHANNELS_MAX + 1) * 256]; + const float *samples_chanptr[DCA_PRIM_CHANNELS_MAX + 1]; uint8_t dca_buffer[DCA_MAX_FRAME_SIZE + DCA_MAX_EXSS_HEADER_SIZE + DCA_BUFFER_PADDING_SIZE]; int dca_buffer_size; ///< how much data is in the dca_buffer - const int8_t* channel_order_tab; ///< channel reordering table, lfe and non lfe + const int8_t *channel_order_tab; ///< channel reordering table, lfe and non lfe GetBitContext gb; /* Current position in DCA frame */ int current_subframe; @@ -415,13 +434,15 @@ static av_cold void dca_init_vlcs(void) } for (i = 0; i < 10; i++) - for (j = 0; j < 7; j++){ - if (!bitalloc_codes[i][j]) break; - dca_smpl_bitalloc[i+1].offset = bitalloc_offsets[i]; - dca_smpl_bitalloc[i+1].wrap = 1 + (j > 4); - dca_smpl_bitalloc[i+1].vlc[j].table = &dca_table[dca_vlc_offs[c]]; - dca_smpl_bitalloc[i+1].vlc[j].table_allocated = dca_vlc_offs[c + 1] - dca_vlc_offs[c]; - init_vlc(&dca_smpl_bitalloc[i+1].vlc[j], bitalloc_maxbits[i][j], + for (j = 0; j < 7; j++) { + if (!bitalloc_codes[i][j]) + break; + dca_smpl_bitalloc[i + 1].offset = bitalloc_offsets[i]; + dca_smpl_bitalloc[i + 1].wrap = 1 + (j > 4); + dca_smpl_bitalloc[i + 1].vlc[j].table = &dca_table[dca_vlc_offs[c]]; + dca_smpl_bitalloc[i + 1].vlc[j].table_allocated = dca_vlc_offs[c + 1] - dca_vlc_offs[c]; + + init_vlc(&dca_smpl_bitalloc[i + 1].vlc[j], bitalloc_maxbits[i][j], bitalloc_sizes[i], bitalloc_bits[i][j], 1, 1, bitalloc_codes[i][j], 2, 2, INIT_VLC_USE_NEW_STATIC); @@ -432,19 +453,19 @@ static av_cold void dca_init_vlcs(void) static inline void get_array(GetBitContext *gb, int *dst, int len, int bits) { - while(len--) + while (len--) *dst++ = get_bits(gb, bits); } -static int dca_parse_audio_coding_header(DCAContext * s, int base_channel) +static int dca_parse_audio_coding_header(DCAContext *s, int base_channel) { int i, j; static const float adj_table[4] = { 1.0, 1.1250, 1.2500, 1.4375 }; static const int bitlen[11] = { 0, 1, 2, 2, 2, 2, 3, 3, 3, 3, 3 }; - static const int thr[11] = { 0, 1, 3, 3, 3, 3, 7, 7, 7, 7, 7 }; + static const int thr[11] = { 0, 1, 3, 3, 3, 3, 7, 7, 7, 7, 7 }; - s->total_channels = get_bits(&s->gb, 3) + 1 + base_channel; - s->prim_channels = s->total_channels; + s->total_channels = get_bits(&s->gb, 3) + 1 + base_channel; + s->prim_channels = s->total_channels; if (s->prim_channels > DCA_PRIM_CHANNELS_MAX) s->prim_channels = DCA_PRIM_CHANNELS_MAX; @@ -487,23 +508,28 @@ static int dca_parse_audio_coding_header(DCAContext * s, int base_channel) get_bits(&s->gb, 16); } - s->current_subframe = 0; + s->current_subframe = 0; s->current_subsubframe = 0; #ifdef TRACE av_log(s->avctx, AV_LOG_DEBUG, "subframes: %i\n", s->subframes); av_log(s->avctx, AV_LOG_DEBUG, "prim channels: %i\n", s->prim_channels); - for (i = base_channel; i < s->prim_channels; i++){ - av_log(s->avctx, AV_LOG_DEBUG, "subband activity: %i\n", s->subband_activity[i]); - av_log(s->avctx, AV_LOG_DEBUG, "vq start subband: %i\n", s->vq_start_subband[i]); - av_log(s->avctx, AV_LOG_DEBUG, "joint intensity: %i\n", s->joint_intensity[i]); - av_log(s->avctx, AV_LOG_DEBUG, "transient mode codebook: %i\n", s->transient_huffman[i]); - av_log(s->avctx, AV_LOG_DEBUG, "scale factor codebook: %i\n", s->scalefactor_huffman[i]); - av_log(s->avctx, AV_LOG_DEBUG, "bit allocation quantizer: %i\n", s->bitalloc_huffman[i]); + for (i = base_channel; i < s->prim_channels; i++) { + av_log(s->avctx, AV_LOG_DEBUG, "subband activity: %i\n", + s->subband_activity[i]); + av_log(s->avctx, AV_LOG_DEBUG, "vq start subband: %i\n", + s->vq_start_subband[i]); + av_log(s->avctx, AV_LOG_DEBUG, "joint intensity: %i\n", + s->joint_intensity[i]); + av_log(s->avctx, AV_LOG_DEBUG, "transient mode codebook: %i\n", + s->transient_huffman[i]); + av_log(s->avctx, AV_LOG_DEBUG, "scale factor codebook: %i\n", + s->scalefactor_huffman[i]); + av_log(s->avctx, AV_LOG_DEBUG, "bit allocation quantizer: %i\n", + s->bitalloc_huffman[i]); av_log(s->avctx, AV_LOG_DEBUG, "quant index huff:"); for (j = 0; j < 11; j++) - av_log(s->avctx, AV_LOG_DEBUG, " %i", - s->quant_index_huffman[i][j]); + av_log(s->avctx, AV_LOG_DEBUG, " %i", s->quant_index_huffman[i][j]); av_log(s->avctx, AV_LOG_DEBUG, "\n"); av_log(s->avctx, AV_LOG_DEBUG, "scalefac adj:"); for (j = 0; j < 11; j++) @@ -512,10 +538,10 @@ static int dca_parse_audio_coding_header(DCAContext * s, int base_channel) } #endif - return 0; + return 0; } -static int dca_parse_frame_header(DCAContext * s) +static int dca_parse_frame_header(DCAContext *s) { init_get_bits(&s->gb, s->dca_buffer, s->dca_buffer_size * 8); @@ -564,7 +590,8 @@ static int dca_parse_frame_header(DCAContext * s) /* FIXME: channels mixing levels */ s->output = s->amode; - if (s->lfe) s->output |= DCA_LFE; + if (s->lfe) + s->output |= DCA_LFE; #ifdef TRACE av_log(s->avctx, AV_LOG_DEBUG, "frame type: %i\n", s->frame_type); @@ -613,15 +640,15 @@ static int dca_parse_frame_header(DCAContext * s) static inline int get_scale(GetBitContext *gb, int level, int value) { - if (level < 5) { - /* huffman encoded */ - value += get_bitalloc(gb, &dca_scalefactor, level); - } else if (level < 8) - value = get_bits(gb, level + 1); - return value; + if (level < 5) { + /* huffman encoded */ + value += get_bitalloc(gb, &dca_scalefactor, level); + } else if (level < 8) + value = get_bits(gb, level + 1); + return value; } -static int dca_subframe_header(DCAContext * s, int base_channel, int block_index) +static int dca_subframe_header(DCAContext *s, int base_channel, int block_index) { /* Primary audio coding side information */ int j, k; @@ -630,7 +657,7 @@ static int dca_subframe_header(DCAContext * s, int base_channel, int block_index return AVERROR_INVALIDDATA; if (!base_channel) { - s->subsubframes[s->current_subframe] = get_bits(&s->gb, 2) + 1; + s->subsubframes[s->current_subframe] = get_bits(&s->gb, 2) + 1; s->partial_samples[s->current_subframe] = get_bits(&s->gb, 3); } @@ -666,8 +693,8 @@ static int dca_subframe_header(DCAContext * s, int base_channel, int block_index } if (s->bitalloc[j][k] > 26) { -// av_log(s->avctx,AV_LOG_DEBUG,"bitalloc index [%i][%i] too big (%i)\n", -// j, k, s->bitalloc[j][k]); + // av_log(s->avctx, AV_LOG_DEBUG, "bitalloc index [%i][%i] too big (%i)\n", + // j, k, s->bitalloc[j][k]); return AVERROR_INVALIDDATA; } } @@ -692,7 +719,8 @@ static int dca_subframe_header(DCAContext * s, int base_channel, int block_index const uint32_t *scale_table; int scale_sum; - memset(s->scale_factor[j], 0, s->subband_activity[j] * sizeof(s->scale_factor[0][0][0]) * 2); + memset(s->scale_factor[j], 0, + s->subband_activity[j] * sizeof(s->scale_factor[0][0][0]) * 2); if (s->scalefactor_huffman[j] == 6) scale_table = scale_factor_quant7; @@ -810,9 +838,11 @@ static int dca_subframe_header(DCAContext * s, int base_channel, int block_index } #ifdef TRACE - av_log(s->avctx, AV_LOG_DEBUG, "subsubframes: %i\n", s->subsubframes[s->current_subframe]); + av_log(s->avctx, AV_LOG_DEBUG, "subsubframes: %i\n", + s->subsubframes[s->current_subframe]); av_log(s->avctx, AV_LOG_DEBUG, "partial samples: %i\n", s->partial_samples[s->current_subframe]); + for (j = base_channel; j < s->prim_channels; j++) { av_log(s->avctx, AV_LOG_DEBUG, "prediction mode:"); for (k = 0; k < s->subband_activity[j]; k++) @@ -821,12 +851,12 @@ static int dca_subframe_header(DCAContext * s, int base_channel, int block_index } for (j = base_channel; j < s->prim_channels; j++) { for (k = 0; k < s->subband_activity[j]; k++) - av_log(s->avctx, AV_LOG_DEBUG, - "prediction coefs: %f, %f, %f, %f\n", - (float) adpcm_vb[s->prediction_vq[j][k]][0] / 8192, - (float) adpcm_vb[s->prediction_vq[j][k]][1] / 8192, - (float) adpcm_vb[s->prediction_vq[j][k]][2] / 8192, - (float) adpcm_vb[s->prediction_vq[j][k]][3] / 8192); + av_log(s->avctx, AV_LOG_DEBUG, + "prediction coefs: %f, %f, %f, %f\n", + (float) adpcm_vb[s->prediction_vq[j][k]][0] / 8192, + (float) adpcm_vb[s->prediction_vq[j][k]][1] / 8192, + (float) adpcm_vb[s->prediction_vq[j][k]][2] / 8192, + (float) adpcm_vb[s->prediction_vq[j][k]][3] / 8192); } for (j = base_channel; j < s->prim_channels; j++) { av_log(s->avctx, AV_LOG_DEBUG, "bitalloc index: "); @@ -862,8 +892,10 @@ static int dca_subframe_header(DCAContext * s, int base_channel, int block_index if (!base_channel && s->prim_channels > 2 && s->downmix) { av_log(s->avctx, AV_LOG_DEBUG, "Downmix coeffs:\n"); for (j = 0; j < s->prim_channels; j++) { - av_log(s->avctx, AV_LOG_DEBUG, "Channel 0,%d = %f\n", j, dca_downmix_coeffs[s->downmix_coef[j][0]]); - av_log(s->avctx, AV_LOG_DEBUG, "Channel 1,%d = %f\n", j, dca_downmix_coeffs[s->downmix_coef[j][1]]); + av_log(s->avctx, AV_LOG_DEBUG, "Channel 0, %d = %f\n", j, + dca_downmix_coeffs[s->downmix_coef[j][0]]); + av_log(s->avctx, AV_LOG_DEBUG, "Channel 1, %d = %f\n", j, + dca_downmix_coeffs[s->downmix_coef[j][1]]); } av_log(s->avctx, AV_LOG_DEBUG, "\n"); } @@ -884,7 +916,7 @@ static int dca_subframe_header(DCAContext * s, int base_channel, int block_index return 0; } -static void qmf_32_subbands(DCAContext * s, int chans, +static void qmf_32_subbands(DCAContext *s, int chans, float samples_in[32][8], float *samples_out, float scale) { @@ -894,7 +926,7 @@ static void qmf_32_subbands(DCAContext * s, int chans, int sb_act = s->subband_activity[chans]; int subindex; - scale *= sqrt(1/8.0); + scale *= sqrt(1 / 8.0); /* Select filter */ if (!s->multirate_inter) /* Non-perfect reconstruction */ @@ -908,18 +940,18 @@ static void qmf_32_subbands(DCAContext * s, int chans, /* Reconstructed channel sample index */ for (subindex = 0; subindex < 8; subindex++) { /* Load in one sample from each subband and clear inactive subbands */ - for (i = 0; i < sb_act; i++){ + for (i = 0; i < sb_act; i++) { unsigned sign = (i - 1) & 2; - uint32_t v = AV_RN32A(&samples_in[i][subindex]) ^ sign << 30; + uint32_t v = AV_RN32A(&samples_in[i][subindex]) ^ sign << 30; AV_WN32A(&s->raXin[i], v); } s->synth.synth_filter_float(&s->imdct, - s->subband_fir_hist[chans], &s->hist_index[chans], - s->subband_fir_noidea[chans], prCoeff, - samples_out, s->raXin, scale); - samples_out+= 32; - + s->subband_fir_hist[chans], + &s->hist_index[chans], + s->subband_fir_noidea[chans], prCoeff, + samples_out, s->raXin, scale); + samples_out += 32; } } @@ -949,45 +981,44 @@ static void lfe_interpolation_fir(DCAContext *s, int decimation_select, } /* Interpolation */ for (deciindex = 0; deciindex < num_deci_sample; deciindex++) { - s->dcadsp.lfe_fir(samples_out, samples_in, prCoeff, decifactor, - scale); + s->dcadsp.lfe_fir(samples_out, samples_in, prCoeff, decifactor, scale); samples_in++; samples_out += 2 * decifactor; } } /* downmixing routines */ -#define MIX_REAR1(samples, si1, rs, coef) \ - samples[i] += samples[si1] * coef[rs][0]; \ - samples[i+256] += samples[si1] * coef[rs][1]; - -#define MIX_REAR2(samples, si1, si2, rs, coef) \ - samples[i] += samples[si1] * coef[rs][0] + samples[si2] * coef[rs+1][0]; \ - samples[i+256] += samples[si1] * coef[rs][1] + samples[si2] * coef[rs+1][1]; - -#define MIX_FRONT3(samples, coef) \ - t = samples[i+c]; \ - u = samples[i+l]; \ - v = samples[i+r]; \ +#define MIX_REAR1(samples, si1, rs, coef) \ + samples[i] += samples[si1] * coef[rs][0]; \ + samples[i+256] += samples[si1] * coef[rs][1]; + +#define MIX_REAR2(samples, si1, si2, rs, coef) \ + samples[i] += samples[si1] * coef[rs][0] + samples[si2] * coef[rs + 1][0]; \ + samples[i+256] += samples[si1] * coef[rs][1] + samples[si2] * coef[rs + 1][1]; + +#define MIX_FRONT3(samples, coef) \ + t = samples[i + c]; \ + u = samples[i + l]; \ + v = samples[i + r]; \ samples[i] = t * coef[0][0] + u * coef[1][0] + v * coef[2][0]; \ samples[i+256] = t * coef[0][1] + u * coef[1][1] + v * coef[2][1]; -#define DOWNMIX_TO_STEREO(op1, op2) \ - for (i = 0; i < 256; i++){ \ - op1 \ - op2 \ +#define DOWNMIX_TO_STEREO(op1, op2) \ + for (i = 0; i < 256; i++) { \ + op1 \ + op2 \ } static void dca_downmix(float *samples, int srcfmt, int downmix_coef[DCA_PRIM_CHANNELS_MAX][2], const int8_t *channel_mapping) { - int c,l,r,sl,sr,s; + int c, l, r, sl, sr, s; int i; float t, u, v; float coef[DCA_PRIM_CHANNELS_MAX][2]; - for (i=0; i<DCA_PRIM_CHANNELS_MAX; i++) { + for (i = 0; i < DCA_PRIM_CHANNELS_MAX; i++) { coef[i][0] = dca_downmix_coeffs[downmix_coef[i][0]]; coef[i][1] = dca_downmix_coeffs[downmix_coef[i][1]]; } @@ -1006,11 +1037,11 @@ static void dca_downmix(float *samples, int srcfmt, c = channel_mapping[0] * 256; l = channel_mapping[1] * 256; r = channel_mapping[2] * 256; - DOWNMIX_TO_STEREO(MIX_FRONT3(samples, coef),); + DOWNMIX_TO_STEREO(MIX_FRONT3(samples, coef), ); break; case DCA_2F1R: s = channel_mapping[2] * 256; - DOWNMIX_TO_STEREO(MIX_REAR1(samples, i + s, 2, coef),); + DOWNMIX_TO_STEREO(MIX_REAR1(samples, i + s, 2, coef), ); break; case DCA_3F1R: c = channel_mapping[0] * 256; @@ -1023,12 +1054,12 @@ static void dca_downmix(float *samples, int srcfmt, case DCA_2F2R: sl = channel_mapping[2] * 256; sr = channel_mapping[3] * 256; - DOWNMIX_TO_STEREO(MIX_REAR2(samples, i + sl, i + sr, 2, coef),); + DOWNMIX_TO_STEREO(MIX_REAR2(samples, i + sl, i + sr, 2, coef), ); break; case DCA_3F2R: - c = channel_mapping[0] * 256; - l = channel_mapping[1] * 256; - r = channel_mapping[2] * 256; + c = channel_mapping[0] * 256; + l = channel_mapping[1] * 256; + r = channel_mapping[2] * 256; sl = channel_mapping[3] * 256; sr = channel_mapping[4] * 256; DOWNMIX_TO_STEREO(MIX_FRONT3(samples, coef), @@ -1048,7 +1079,7 @@ static int decode_blockcode(int code, int levels, int *values) for (i = 0; i < 4; i++) { int div = FASTDIV(code, levels); - values[i] = code - offset - div*levels; + values[i] = code - offset - div * levels; code = div; } @@ -1062,8 +1093,8 @@ static int decode_blockcodes(int code1, int code2, int levels, int *values) } #endif -static const uint8_t abits_sizes[7] = { 7, 10, 12, 13, 15, 17, 19 }; -static const uint8_t abits_levels[7] = { 3, 5, 7, 9, 13, 17, 25 }; +static const uint8_t abits_sizes[7] = { 7, 10, 12, 13, 15, 17, 19 }; +static const uint8_t abits_levels[7] = { 3, 5, 7, 9, 13, 17, 25 }; #ifndef int8x8_fmul_int32 static inline void int8x8_fmul_int32(float *dst, const int8_t *src, int scale) @@ -1075,7 +1106,7 @@ static inline void int8x8_fmul_int32(float *dst, const int8_t *src, int scale) } #endif -static int dca_subsubframe(DCAContext * s, int base_channel, int block_index) +static int dca_subsubframe(DCAContext *s, int base_channel, int block_index) { int k, l; int subsubframe = s->current_subsubframe; @@ -1118,20 +1149,21 @@ static int dca_subsubframe(DCAContext * s, int base_channel, int block_index) /* * Extract bits from the bit stream */ - if (!abits){ + if (!abits) { memset(subband_samples[k][l], 0, 8 * sizeof(subband_samples[0][0][0])); } else { /* Deal with transients */ int sfi = s->transition_mode[k][l] && subsubframe >= s->transition_mode[k][l]; - float rscale = quant_step_size * s->scale_factor[k][l][sfi] * s->scalefactor_adj[k][sel]; + float rscale = quant_step_size * s->scale_factor[k][l][sfi] * + s->scalefactor_adj[k][sel]; - if (abits >= 11 || !dca_smpl_bitalloc[abits].vlc[sel].table){ - if (abits <= 7){ + if (abits >= 11 || !dca_smpl_bitalloc[abits].vlc[sel].table) { + if (abits <= 7) { /* Block code */ int block_code1, block_code2, size, levels, err; - size = abits_sizes[abits-1]; - levels = abits_levels[abits-1]; + size = abits_sizes[abits - 1]; + levels = abits_levels[abits - 1]; block_code1 = get_bits(&s->gb, size); block_code2 = get_bits(&s->gb, size); @@ -1142,19 +1174,20 @@ static int dca_subsubframe(DCAContext * s, int base_channel, int block_index) "ERROR: block code look-up failed\n"); return AVERROR_INVALIDDATA; } - }else{ + } else { /* no coding */ for (m = 0; m < 8; m++) block[m] = get_sbits(&s->gb, abits - 3); } - }else{ + } else { /* Huffman coded */ for (m = 0; m < 8; m++) - block[m] = get_bitalloc(&s->gb, &dca_smpl_bitalloc[abits], sel); + block[m] = get_bitalloc(&s->gb, + &dca_smpl_bitalloc[abits], sel); } s->fmt_conv.int32_to_float_fmul_scalar(subband_samples[k][l], - block, rscale, 8); + block, rscale, 8); } /* @@ -1171,8 +1204,7 @@ static int dca_subsubframe(DCAContext * s, int base_channel, int block_index) else if (s->predictor_history) subband_samples[k][l][m] += (adpcm_vb[s->prediction_vq[k][l]][n - 1] * - s->subband_samples_hist[k][l][m - n + - 4] / 8192); + s->subband_samples_hist[k][l][m - n + 4] / 8192); } } } @@ -1186,7 +1218,8 @@ static int dca_subsubframe(DCAContext * s, int base_channel, int block_index) int hfvq = s->high_freq_vq[k][l]; if (!s->debug_flag & 0x01) { - av_log(s->avctx, AV_LOG_DEBUG, "Stream with high frequencies VQ coding\n"); + av_log(s->avctx, AV_LOG_DEBUG, + "Stream with high frequencies VQ coding\n"); s->debug_flag |= 0x01; } @@ -1210,23 +1243,25 @@ static int dca_subsubframe(DCAContext * s, int base_channel, int block_index) /* Backup predictor history for adpcm */ for (k = base_channel; k < s->prim_channels; k++) for (l = 0; l < s->vq_start_subband[k]; l++) - memcpy(s->subband_samples_hist[k][l], &subband_samples[k][l][4], - 4 * sizeof(subband_samples[0][0][0])); + memcpy(s->subband_samples_hist[k][l], + &subband_samples[k][l][4], + 4 * sizeof(subband_samples[0][0][0])); return 0; } -static int dca_filter_channels(DCAContext * s, int block_index) +static int dca_filter_channels(DCAContext *s, int block_index) { float (*subband_samples)[DCA_SUBBANDS][8] = s->subband_samples[block_index]; int k; /* 32 subbands QMF */ for (k = 0; k < s->prim_channels; k++) { -/* static float pcm_to_double[8] = - {32768.0, 32768.0, 524288.0, 524288.0, 0, 8388608.0, 8388608.0};*/ - qmf_32_subbands(s, k, subband_samples[k], &s->samples[256 * s->channel_order_tab[k]], - M_SQRT1_2*s->scale_bias /*pcm_to_double[s->source_pcm_res] */ ); +/* static float pcm_to_double[8] = { 32768.0, 32768.0, 524288.0, 524288.0, + 0, 8388608.0, 8388608.0 };*/ + qmf_32_subbands(s, k, subband_samples[k], + &s->samples[256 * s->channel_order_tab[k]], + M_SQRT1_2 * s->scale_bias /* pcm_to_double[s->source_pcm_res] */); } /* Down mixing */ @@ -1239,7 +1274,7 @@ static int dca_filter_channels(DCAContext * s, int block_index) lfe_interpolation_fir(s, s->lfe, 2 * s->lfe, s->lfe_data + 2 * s->lfe * (block_index + 4), &s->samples[256 * dca_lfe_index[s->amode]], - (1.0/256.0)*s->scale_bias); + (1.0 / 256.0) * s->scale_bias); /* Outputs 20bits pcm samples */ } @@ -1247,7 +1282,7 @@ static int dca_filter_channels(DCAContext * s, int block_index) } -static int dca_subframe_footer(DCAContext * s, int base_channel) +static int dca_subframe_footer(DCAContext *s, int base_channel) { int aux_data_count = 0, i; @@ -1279,7 +1314,7 @@ static int dca_subframe_footer(DCAContext * s, int base_channel) * @param s pointer to the DCAContext */ -static int dca_decode_block(DCAContext * s, int base_channel, int block_index) +static int dca_decode_block(DCAContext *s, int base_channel, int block_index) { int ret; @@ -1327,8 +1362,8 @@ static int dca_decode_block(DCAContext * s, int base_channel, int block_index) /** * Convert bitstream to one representation based on sync marker */ -static int dca_convert_bitstream(const uint8_t * src, int src_size, uint8_t * dst, - int max_size) +static int dca_convert_bitstream(const uint8_t *src, int src_size, uint8_t *dst, + int max_size) { uint32_t mrk; int i, tmp; @@ -1336,7 +1371,7 @@ static int dca_convert_bitstream(const uint8_t * src, int src_size, uint8_t * ds uint16_t *sdst = (uint16_t *) dst; PutBitContext pb; - if ((unsigned)src_size > (unsigned)max_size) { + if ((unsigned) src_size > (unsigned) max_size) { // av_log(NULL, AV_LOG_ERROR, "Input frame size larger than DCA_MAX_FRAME_SIZE!\n"); // return -1; src_size = max_size; @@ -1371,18 +1406,16 @@ static int dca_convert_bitstream(const uint8_t * src, int src_size, uint8_t * ds static int dca_exss_mask2count(int mask) { /* count bits that mean speaker pairs twice */ - return av_popcount(mask) - + av_popcount(mask & ( - DCA_EXSS_CENTER_LEFT_RIGHT - | DCA_EXSS_FRONT_LEFT_RIGHT - | DCA_EXSS_FRONT_HIGH_LEFT_RIGHT - | DCA_EXSS_WIDE_LEFT_RIGHT - | DCA_EXSS_SIDE_LEFT_RIGHT - | DCA_EXSS_SIDE_HIGH_LEFT_RIGHT - | DCA_EXSS_SIDE_REAR_LEFT_RIGHT - | DCA_EXSS_REAR_LEFT_RIGHT - | DCA_EXSS_REAR_HIGH_LEFT_RIGHT - )); + return av_popcount(mask) + + av_popcount(mask & (DCA_EXSS_CENTER_LEFT_RIGHT | + DCA_EXSS_FRONT_LEFT_RIGHT | + DCA_EXSS_FRONT_HIGH_LEFT_RIGHT | + DCA_EXSS_WIDE_LEFT_RIGHT | + DCA_EXSS_SIDE_LEFT_RIGHT | + DCA_EXSS_SIDE_HIGH_LEFT_RIGHT | + DCA_EXSS_SIDE_REAR_LEFT_RIGHT | + DCA_EXSS_REAR_LEFT_RIGHT | + DCA_EXSS_REAR_HIGH_LEFT_RIGHT)); } /** @@ -1408,7 +1441,7 @@ static int dca_exss_parse_asset_header(DCAContext *s) int header_size; int channels; int embedded_stereo = 0; - int embedded_6ch = 0; + int embedded_6ch = 0; int drc_code_present; int extensions_mask; int i, j; @@ -1543,7 +1576,8 @@ static int dca_exss_parse_asset_header(DCAContext *s) if (!(extensions_mask & DCA_EXT_CORE)) av_log(s->avctx, AV_LOG_WARNING, "DTS core detection mismatch.\n"); if ((extensions_mask & DCA_CORE_EXTS) != s->core_ext_mask) - av_log(s->avctx, AV_LOG_WARNING, "DTS extensions detection mismatch (%d, %d)\n", + av_log(s->avctx, AV_LOG_WARNING, + "DTS extensions detection mismatch (%d, %d)\n", extensions_mask & DCA_CORE_EXTS, s->core_ext_mask); return 0; @@ -1568,7 +1602,7 @@ static void dca_exss_parse_header(DCAContext *s) ss_index = get_bits(&s->gb, 2); blownup = get_bits1(&s->gb); - skip_bits(&s->gb, 8 + 4 * blownup); // header_size + skip_bits(&s->gb, 8 + 4 * blownup); // header_size skip_bits(&s->gb, 16 + 4 * blownup); // hd_size s->static_fields = get_bits1(&s->gb); @@ -1609,18 +1643,18 @@ static void dca_exss_parse_header(DCAContext *s) int mix_out_mask_size; skip_bits(&s->gb, 2); // adjustment level - mix_out_mask_size = (get_bits(&s->gb, 2) + 1) << 2; - s->num_mix_configs = get_bits(&s->gb, 2) + 1; + mix_out_mask_size = (get_bits(&s->gb, 2) + 1) << 2; + s->num_mix_configs = get_bits(&s->gb, 2) + 1; for (i = 0; i < s->num_mix_configs; i++) { - int mix_out_mask = get_bits(&s->gb, mix_out_mask_size); + int mix_out_mask = get_bits(&s->gb, mix_out_mask_size); s->mix_config_num_ch[i] = dca_exss_mask2count(mix_out_mask); } } } for (i = 0; i < num_assets; i++) - skip_bits_long(&s->gb, 16 + 4 * blownup); // asset size + skip_bits_long(&s->gb, 16 + 4 * blownup); // asset size for (i = 0; i < num_assets; i++) { if (dca_exss_parse_asset_header(s)) @@ -1667,8 +1701,8 @@ static int dca_decode_frame(AVCodecContext *avctx, void *data, } //set AVCodec values with parsed data avctx->sample_rate = s->sample_rate; - avctx->bit_rate = s->bit_rate; - avctx->frame_size = s->sample_blocks * 32; + avctx->bit_rate = s->bit_rate; + avctx->frame_size = s->sample_blocks * 32; s->profile = FF_PROFILE_DTS; @@ -1700,72 +1734,71 @@ static int dca_decode_frame(AVCodecContext *avctx, void *data, /* extensions start at 32-bit boundaries into bitstream */ skip_bits_long(&s->gb, (-get_bits_count(&s->gb)) & 31); - while(core_ss_end - get_bits_count(&s->gb) >= 32) { - uint32_t bits = get_bits_long(&s->gb, 32); + while (core_ss_end - get_bits_count(&s->gb) >= 32) { + uint32_t bits = get_bits_long(&s->gb, 32); - switch(bits) { - case 0x5a5a5a5a: { - int ext_amode, xch_fsize; + switch (bits) { + case 0x5a5a5a5a: { + int ext_amode, xch_fsize; - s->xch_base_channel = s->prim_channels; + s->xch_base_channel = s->prim_channels; - /* validate sync word using XCHFSIZE field */ - xch_fsize = show_bits(&s->gb, 10); - if((s->frame_size != (get_bits_count(&s->gb) >> 3) - 4 + xch_fsize) && - (s->frame_size != (get_bits_count(&s->gb) >> 3) - 4 + xch_fsize + 1)) - continue; - - /* skip length-to-end-of-frame field for the moment */ - skip_bits(&s->gb, 10); - - s->core_ext_mask |= DCA_EXT_XCH; + /* validate sync word using XCHFSIZE field */ + xch_fsize = show_bits(&s->gb, 10); + if ((s->frame_size != (get_bits_count(&s->gb) >> 3) - 4 + xch_fsize) && + (s->frame_size != (get_bits_count(&s->gb) >> 3) - 4 + xch_fsize + 1)) + continue; - /* extension amode should == 1, number of channels in extension */ - /* AFAIK XCh is not used for more channels */ - if ((ext_amode = get_bits(&s->gb, 4)) != 1) { - av_log(avctx, AV_LOG_ERROR, "XCh extension amode %d not" - " supported!\n",ext_amode); - continue; - } + /* skip length-to-end-of-frame field for the moment */ + skip_bits(&s->gb, 10); - /* much like core primary audio coding header */ - dca_parse_audio_coding_header(s, s->xch_base_channel); + s->core_ext_mask |= DCA_EXT_XCH; - for (i = 0; i < (s->sample_blocks / 8); i++) { - if ((ret = dca_decode_block(s, s->xch_base_channel, i))) { - av_log(avctx, AV_LOG_ERROR, "error decoding XCh extension\n"); + /* extension amode(number of channels in extension) should be 1 */ + /* AFAIK XCh is not used for more channels */ + if ((ext_amode = get_bits(&s->gb, 4)) != 1) { + av_log(avctx, AV_LOG_ERROR, "XCh extension amode %d not" + " supported!\n", ext_amode); continue; } + + /* much like core primary audio coding header */ + dca_parse_audio_coding_header(s, s->xch_base_channel); + + for (i = 0; i < (s->sample_blocks / 8); i++) + if ((ret = dca_decode_block(s, s->xch_base_channel, i))) { + av_log(avctx, AV_LOG_ERROR, "error decoding XCh extension\n"); + continue; + } + + s->xch_present = 1; + break; } + case 0x47004a03: + /* XXCh: extended channels */ + /* usually found either in core or HD part in DTS-HD HRA streams, + * but not in DTS-ES which contains XCh extensions instead */ + s->core_ext_mask |= DCA_EXT_XXCH; + break; + + case 0x1d95f262: { + int fsize96 = show_bits(&s->gb, 12) + 1; + if (s->frame_size != (get_bits_count(&s->gb) >> 3) - 4 + fsize96) + continue; - s->xch_present = 1; - break; - } - case 0x47004a03: - /* XXCh: extended channels */ - /* usually found either in core or HD part in DTS-HD HRA streams, - * but not in DTS-ES which contains XCh extensions instead */ - s->core_ext_mask |= DCA_EXT_XXCH; - break; - - case 0x1d95f262: { - int fsize96 = show_bits(&s->gb, 12) + 1; - if (s->frame_size != (get_bits_count(&s->gb) >> 3) - 4 + fsize96) - continue; - - av_log(avctx, AV_LOG_DEBUG, "X96 extension found at %d bits\n", get_bits_count(&s->gb)); - skip_bits(&s->gb, 12); - av_log(avctx, AV_LOG_DEBUG, "FSIZE96 = %d bytes\n", fsize96); - av_log(avctx, AV_LOG_DEBUG, "REVNO = %d\n", get_bits(&s->gb, 4)); - - s->core_ext_mask |= DCA_EXT_X96; - break; - } - } + av_log(avctx, AV_LOG_DEBUG, "X96 extension found at %d bits\n", + get_bits_count(&s->gb)); + skip_bits(&s->gb, 12); + av_log(avctx, AV_LOG_DEBUG, "FSIZE96 = %d bytes\n", fsize96); + av_log(avctx, AV_LOG_DEBUG, "REVNO = %d\n", get_bits(&s->gb, 4)); - skip_bits_long(&s->gb, (-get_bits_count(&s->gb)) & 31); - } + s->core_ext_mask |= DCA_EXT_X96; + break; + } + } + skip_bits_long(&s->gb, (-get_bits_count(&s->gb)) & 31); + } } else { /* no supported extensions, skip the rest of the core substream */ skip_bits_long(&s->gb, core_ss_end - get_bits_count(&s->gb)); @@ -1777,15 +1810,15 @@ static int dca_decode_frame(AVCodecContext *avctx, void *data, s->profile = FF_PROFILE_DTS_ES; /* check for ExSS (HD part) */ - if (s->dca_buffer_size - s->frame_size > 32 - && get_bits_long(&s->gb, 32) == DCA_HD_MARKER) + if (s->dca_buffer_size - s->frame_size > 32 && + get_bits_long(&s->gb, 32) == DCA_HD_MARKER) dca_exss_parse_header(s); avctx->profile = s->profile; channels = s->prim_channels + !!s->lfe; - if (s->amode<16) { + if (s->amode < 16) { avctx->channel_layout = dca_core_channel_layout[s->amode]; if (s->xch_present && (!avctx->request_channels || @@ -1821,7 +1854,7 @@ static int dca_decode_frame(AVCodecContext *avctx, void *data, s->channel_order_tab = dca_channel_order_native; } } else { - av_log(avctx, AV_LOG_ERROR, "Non standard configuration %d !\n",s->amode); + av_log(avctx, AV_LOG_ERROR, "Non standard configuration %d !\n", s->amode); return AVERROR_INVALIDDATA; } @@ -1837,8 +1870,8 @@ static int dca_decode_frame(AVCodecContext *avctx, void *data, av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); return ret; } - samples_flt = (float *)s->frame.data[0]; - samples_s16 = (int16_t *)s->frame.data[0]; + samples_flt = (float *) s->frame.data[0]; + samples_s16 = (int16_t *) s->frame.data[0]; /* filter to get final output */ for (i = 0; i < (s->sample_blocks / 8); i++) { @@ -1846,10 +1879,10 @@ static int dca_decode_frame(AVCodecContext *avctx, void *data, /* If this was marked as a DTS-ES stream we need to subtract back- */ /* channel from SL & SR to remove matrixed back-channel signal */ - if((s->source_pcm_res & 1) && s->xch_present) { - float* back_chan = s->samples + s->channel_order_tab[s->xch_base_channel] * 256; - float* lt_chan = s->samples + s->channel_order_tab[s->xch_base_channel - 2] * 256; - float* rt_chan = s->samples + s->channel_order_tab[s->xch_base_channel - 1] * 256; + if ((s->source_pcm_res & 1) && s->xch_present) { + float *back_chan = s->samples + s->channel_order_tab[s->xch_base_channel] * 256; + float *lt_chan = s->samples + s->channel_order_tab[s->xch_base_channel - 2] * 256; + float *rt_chan = s->samples + s->channel_order_tab[s->xch_base_channel - 1] * 256; s->dsp.vector_fmac_scalar(lt_chan, back_chan, -M_SQRT1_2, 256); s->dsp.vector_fmac_scalar(rt_chan, back_chan, -M_SQRT1_2, 256); } @@ -1868,12 +1901,11 @@ static int dca_decode_frame(AVCodecContext *avctx, void *data, /* update lfe history */ lfe_samples = 2 * s->lfe * (s->sample_blocks / 8); - for (i = 0; i < 2 * s->lfe * 4; i++) { + for (i = 0; i < 2 * s->lfe * 4; i++) s->lfe_data[i] = s->lfe_data[i + lfe_samples]; - } - *got_frame_ptr = 1; - *(AVFrame *)data = s->frame; + *got_frame_ptr = 1; + *(AVFrame *) data = s->frame; return buf_size; } @@ -1886,7 +1918,7 @@ static int dca_decode_frame(AVCodecContext *avctx, void *data, * @param avctx pointer to the AVCodecContext */ -static av_cold int dca_decode_init(AVCodecContext * avctx) +static av_cold int dca_decode_init(AVCodecContext *avctx) { DCAContext *s = avctx->priv_data; int i; @@ -1900,15 +1932,15 @@ static av_cold int dca_decode_init(AVCodecContext * avctx) ff_dcadsp_init(&s->dcadsp); ff_fmt_convert_init(&s->fmt_conv, avctx); - for (i = 0; i < DCA_PRIM_CHANNELS_MAX+1; i++) + for (i = 0; i < DCA_PRIM_CHANNELS_MAX + 1; i++) s->samples_chanptr[i] = s->samples + i * 256; if (avctx->request_sample_fmt == AV_SAMPLE_FMT_FLT) { avctx->sample_fmt = AV_SAMPLE_FMT_FLT; - s->scale_bias = 1.0 / 32768.0; + s->scale_bias = 1.0 / 32768.0; } else { avctx->sample_fmt = AV_SAMPLE_FMT_S16; - s->scale_bias = 1.0; + s->scale_bias = 1.0; } /* allow downmixing to stereo */ @@ -1923,7 +1955,7 @@ static av_cold int dca_decode_init(AVCodecContext * avctx) return 0; } -static av_cold int dca_decode_end(AVCodecContext * avctx) +static av_cold int dca_decode_end(AVCodecContext *avctx) { DCAContext *s = avctx->priv_data; ff_mdct_end(&s->imdct); @@ -1940,17 +1972,17 @@ static const AVProfile profiles[] = { }; AVCodec ff_dca_decoder = { - .name = "dca", - .type = AVMEDIA_TYPE_AUDIO, - .id = CODEC_ID_DTS, - .priv_data_size = sizeof(DCAContext), - .init = dca_decode_init, - .decode = dca_decode_frame, - .close = dca_decode_end, - .long_name = NULL_IF_CONFIG_SMALL("DCA (DTS Coherent Acoustics)"), - .capabilities = CODEC_CAP_CHANNEL_CONF | CODEC_CAP_DR1, - .sample_fmts = (const enum AVSampleFormat[]) { - AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_NONE - }, - .profiles = NULL_IF_CONFIG_SMALL(profiles), + .name = "dca", + .type = AVMEDIA_TYPE_AUDIO, + .id = CODEC_ID_DTS, + .priv_data_size = sizeof(DCAContext), + .init = dca_decode_init, + .decode = dca_decode_frame, + .close = dca_decode_end, + .long_name = NULL_IF_CONFIG_SMALL("DCA (DTS Coherent Acoustics)"), + .capabilities = CODEC_CAP_CHANNEL_CONF | CODEC_CAP_DR1, + .sample_fmts = (const enum AVSampleFormat[]) { AV_SAMPLE_FMT_FLT, + AV_SAMPLE_FMT_S16, + AV_SAMPLE_FMT_NONE }, + .profiles = NULL_IF_CONFIG_SMALL(profiles), }; diff --git a/libavcodec/dct.c b/libavcodec/dct.c index c30cff664e..4800e13b36 100644 --- a/libavcodec/dct.c +++ b/libavcodec/dct.c @@ -28,15 +28,16 @@ */ #include <math.h> + #include "libavutil/mathematics.h" #include "dct.h" #include "dct32.h" -/* sin((M_PI * x / (2*n)) */ -#define SIN(s,n,x) (s->costab[(n) - (x)]) +/* sin((M_PI * x / (2 * n)) */ +#define SIN(s, n, x) (s->costab[(n) - (x)]) -/* cos((M_PI * x / (2*n)) */ -#define COS(s,n,x) (s->costab[x]) +/* cos((M_PI * x / (2 * n)) */ +#define COS(s, n, x) (s->costab[x]) static void ff_dst_calc_I_c(DCTContext *ctx, FFTSample *data) { @@ -44,28 +45,28 @@ static void ff_dst_calc_I_c(DCTContext *ctx, FFTSample *data) int i; data[0] = 0; - for(i = 1; i < n/2; i++) { - float tmp1 = data[i ]; - float tmp2 = data[n - i]; - float s = SIN(ctx, n, 2*i); - - s *= tmp1 + tmp2; - tmp1 = (tmp1 - tmp2) * 0.5f; - data[i ] = s + tmp1; - data[n - i] = s - tmp1; + for (i = 1; i < n / 2; i++) { + float tmp1 = data[i ]; + float tmp2 = data[n - i]; + float s = SIN(ctx, n, 2 * i); + + s *= tmp1 + tmp2; + tmp1 = (tmp1 - tmp2) * 0.5f; + data[i] = s + tmp1; + data[n - i] = s - tmp1; } - data[n/2] *= 2; + data[n / 2] *= 2; ctx->rdft.rdft_calc(&ctx->rdft, data); data[0] *= 0.5f; - for(i = 1; i < n-2; i += 2) { - data[i + 1] += data[i - 1]; - data[i ] = -data[i + 2]; + for (i = 1; i < n - 2; i += 2) { + data[i + 1] += data[i - 1]; + data[i] = -data[i + 2]; } - data[n-1] = 0; + data[n - 1] = 0; } static void ff_dct_calc_I_c(DCTContext *ctx, FFTSample *data) @@ -74,19 +75,19 @@ static void ff_dct_calc_I_c(DCTContext *ctx, FFTSample *data) int i; float next = -0.5f * (data[0] - data[n]); - for(i = 0; i < n/2; i++) { - float tmp1 = data[i ]; + for (i = 0; i < n / 2; i++) { + float tmp1 = data[i]; float tmp2 = data[n - i]; - float s = SIN(ctx, n, 2*i); - float c = COS(ctx, n, 2*i); + float s = SIN(ctx, n, 2 * i); + float c = COS(ctx, n, 2 * i); c *= tmp1 - tmp2; s *= tmp1 - tmp2; next += c; - tmp1 = (tmp1 + tmp2) * 0.5f; - data[i ] = tmp1 - s; + tmp1 = (tmp1 + tmp2) * 0.5f; + data[i] = tmp1 - s; data[n - i] = tmp1 + s; } @@ -94,7 +95,7 @@ static void ff_dct_calc_I_c(DCTContext *ctx, FFTSample *data) data[n] = data[1]; data[1] = next; - for(i = 3; i <= n; i += 2) + for (i = 3; i <= n; i += 2) data[i] = data[i - 2] - data[i]; } @@ -103,16 +104,16 @@ static void ff_dct_calc_III_c(DCTContext *ctx, FFTSample *data) int n = 1 << ctx->nbits; int i; - float next = data[n - 1]; + float next = data[n - 1]; float inv_n = 1.0f / n; for (i = n - 2; i >= 2; i -= 2) { - float val1 = data[i ]; + float val1 = data[i]; float val2 = data[i - 1] - data[i + 1]; - float c = COS(ctx, n, i); - float s = SIN(ctx, n, i); + float c = COS(ctx, n, i); + float s = SIN(ctx, n, i); - data[i ] = c * val1 + s * val2; + data[i] = c * val1 + s * val2; data[i + 1] = s * val1 - c * val2; } @@ -121,13 +122,13 @@ static void ff_dct_calc_III_c(DCTContext *ctx, FFTSample *data) ctx->rdft.rdft_calc(&ctx->rdft, data); for (i = 0; i < n / 2; i++) { - float tmp1 = data[i ] * inv_n; + float tmp1 = data[i] * inv_n; float tmp2 = data[n - i - 1] * inv_n; - float csc = ctx->csc2[i] * (tmp1 - tmp2); + float csc = ctx->csc2[i] * (tmp1 - tmp2); - tmp1 += tmp2; - data[i ] = tmp1 + csc; - data[n - i - 1] = tmp1 - csc; + tmp1 += tmp2; + data[i] = tmp1 + csc; + data[n - i - 1] = tmp1 - csc; } } @@ -137,34 +138,33 @@ static void ff_dct_calc_II_c(DCTContext *ctx, FFTSample *data) int i; float next; - for (i=0; i < n/2; i++) { - float tmp1 = data[i ]; + for (i = 0; i < n / 2; i++) { + float tmp1 = data[i]; float tmp2 = data[n - i - 1]; - float s = SIN(ctx, n, 2*i + 1); + float s = SIN(ctx, n, 2 * i + 1); - s *= tmp1 - tmp2; - tmp1 = (tmp1 + tmp2) * 0.5f; + s *= tmp1 - tmp2; + tmp1 = (tmp1 + tmp2) * 0.5f; - data[i ] = tmp1 + s; + data[i] = tmp1 + s; data[n-i-1] = tmp1 - s; } ctx->rdft.rdft_calc(&ctx->rdft, data); - next = data[1] * 0.5; + next = data[1] * 0.5; data[1] *= -1; for (i = n - 2; i >= 0; i -= 2) { float inr = data[i ]; float ini = data[i + 1]; - float c = COS(ctx, n, i); - float s = SIN(ctx, n, i); + float c = COS(ctx, n, i); + float s = SIN(ctx, n, i); - data[i ] = c * inr + s * ini; + data[i] = c * inr + s * ini; + data[i + 1] = next; - data[i+1] = next; - - next += s * inr - c * ini; + next += s * inr - c * ini; } } @@ -180,36 +180,36 @@ av_cold int ff_dct_init(DCTContext *s, int nbits, enum DCTTransformType inverse) memset(s, 0, sizeof(*s)); - s->nbits = nbits; - s->inverse = inverse; + s->nbits = nbits; + s->inverse = inverse; if (inverse == DCT_II && nbits == 5) { s->dct_calc = dct32_func; } else { - ff_init_ff_cos_tabs(nbits+2); - - s->costab = ff_cos_tabs[nbits+2]; + ff_init_ff_cos_tabs(nbits + 2); - s->csc2 = av_malloc(n/2 * sizeof(FFTSample)); + s->costab = ff_cos_tabs[nbits + 2]; + s->csc2 = av_malloc(n / 2 * sizeof(FFTSample)); if (ff_rdft_init(&s->rdft, nbits, inverse == DCT_III) < 0) { av_free(s->csc2); return -1; } - for (i = 0; i < n/2; i++) - s->csc2[i] = 0.5 / sin((M_PI / (2*n) * (2*i + 1))); + for (i = 0; i < n / 2; i++) + s->csc2[i] = 0.5 / sin((M_PI / (2 * n) * (2 * i + 1))); - switch(inverse) { - case DCT_I : s->dct_calc = ff_dct_calc_I_c; break; - case DCT_II : s->dct_calc = ff_dct_calc_II_c ; break; + switch (inverse) { + case DCT_I : s->dct_calc = ff_dct_calc_I_c; break; + case DCT_II : s->dct_calc = ff_dct_calc_II_c; break; case DCT_III: s->dct_calc = ff_dct_calc_III_c; break; - case DST_I : s->dct_calc = ff_dst_calc_I_c; break; + case DST_I : s->dct_calc = ff_dst_calc_I_c; break; } } s->dct32 = ff_dct32_float; - if (HAVE_MMX) ff_dct_init_mmx(s); + if (HAVE_MMX) + ff_dct_init_mmx(s); return 0; } diff --git a/libavcodec/pthread.c b/libavcodec/pthread.c index 717b865bae..d545f140ac 100644 --- a/libavcodec/pthread.c +++ b/libavcodec/pthread.c @@ -35,7 +35,7 @@ #define _GNU_SOURCE #include <sched.h> #endif -#if HAVE_GETSYSTEMINFO +#if HAVE_GETPROCESSAFFINITYMASK #include <windows.h> #endif #if HAVE_SYSCTL @@ -172,10 +172,11 @@ static int get_logical_cpus(AVCodecContext *avctx) if (!ret) { nb_cpus = CPU_COUNT(&cpuset); } -#elif HAVE_GETSYSTEMINFO - SYSTEM_INFO sysinfo; - GetSystemInfo(&sysinfo); - nb_cpus = sysinfo.dwNumberOfProcessors; +#elif HAVE_GETPROCESSAFFINITYMASK + DWORD_PTR proc_aff, sys_aff; + ret = GetProcessAffinityMask(GetCurrentProcess(), &proc_aff, &sys_aff); + if (ret) + nb_cpus = av_popcount64(proc_aff); #elif HAVE_SYSCTL && defined(HW_NCPU) int mib[2] = { CTL_HW, HW_NCPU }; size_t len = sizeof(nb_cpus); diff --git a/libavcodec/snow.c b/libavcodec/snow.c index 660162a1b3..a289ecfbce 100644 --- a/libavcodec/snow.c +++ b/libavcodec/snow.c @@ -516,9 +516,9 @@ static void halfpel_interpol(SnowContext *s, uint8_t *halfpel[4][4], AVFrame *fr int ls= frame->linesize[p]; uint8_t *src= frame->data[p]; - halfpel[1][p] = av_malloc(ls * (h + 2 * EDGE_WIDTH)) + EDGE_WIDTH * (1 + ls); - halfpel[2][p] = av_malloc(ls * (h + 2 * EDGE_WIDTH)) + EDGE_WIDTH * (1 + ls); - halfpel[3][p] = av_malloc(ls * (h + 2 * EDGE_WIDTH)) + EDGE_WIDTH * (1 + ls); + halfpel[1][p] = (uint8_t*) av_malloc(ls * (h + 2 * EDGE_WIDTH)) + EDGE_WIDTH * (1 + ls); + halfpel[2][p] = (uint8_t*) av_malloc(ls * (h + 2 * EDGE_WIDTH)) + EDGE_WIDTH * (1 + ls); + halfpel[3][p] = (uint8_t*) av_malloc(ls * (h + 2 * EDGE_WIDTH)) + EDGE_WIDTH * (1 + ls); halfpel[0][p]= src; for(y=0; y<h; y++){ diff --git a/libavcodec/utvideo.c b/libavcodec/utvideo.c index 6fb384a4e4..906a61f9f8 100644 --- a/libavcodec/utvideo.c +++ b/libavcodec/utvideo.c @@ -282,6 +282,77 @@ static void restore_median(uint8_t *src, int step, int stride, } } +/* UtVideo interlaced mode treats every two lines as a single one, + * so restoring function should take care of possible padding between + * two parts of the same "line". + */ +static void restore_median_il(uint8_t *src, int step, int stride, + int width, int height, int slices, int rmode) +{ + int i, j, slice; + int A, B, C; + uint8_t *bsrc; + int slice_start, slice_height; + const int cmask = ~(rmode ? 3 : 1); + const int stride2 = stride << 1; + + for (slice = 0; slice < slices; slice++) { + slice_start = ((slice * height) / slices) & cmask; + slice_height = ((((slice + 1) * height) / slices) & cmask) - slice_start; + slice_height >>= 1; + + bsrc = src + slice_start * stride; + + // first line - left neighbour prediction + bsrc[0] += 0x80; + A = bsrc[0]; + for (i = step; i < width * step; i += step) { + bsrc[i] += A; + A = bsrc[i]; + } + for (i = 0; i < width * step; i += step) { + bsrc[stride + i] += A; + A = bsrc[stride + i]; + } + bsrc += stride2; + if (slice_height == 1) + continue; + // second line - first element has top predition, the rest uses median + C = bsrc[-stride2]; + bsrc[0] += C; + A = bsrc[0]; + for (i = step; i < width * step; i += step) { + B = bsrc[i - stride2]; + bsrc[i] += mid_pred(A, B, (uint8_t)(A + B - C)); + C = B; + A = bsrc[i]; + } + for (i = 0; i < width * step; i += step) { + B = bsrc[i - stride]; + bsrc[stride + i] += mid_pred(A, B, (uint8_t)(A + B - C)); + C = B; + A = bsrc[stride + i]; + } + bsrc += stride2; + // the rest of lines use continuous median prediction + for (j = 2; j < slice_height; j++) { + for (i = 0; i < width * step; i += step) { + B = bsrc[i - stride2]; + bsrc[i] += mid_pred(A, B, (uint8_t)(A + B - C)); + C = B; + A = bsrc[i]; + } + for (i = 0; i < width * step; i += step) { + B = bsrc[i - stride]; + bsrc[i + stride] += mid_pred(A, B, (uint8_t)(A + B - C)); + C = B; + A = bsrc[i + stride]; + } + bsrc += stride2; + } + } +} + static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPacket *avpkt) { const uint8_t *buf = avpkt->data; @@ -381,10 +452,18 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPac c->frame_pred == PRED_LEFT); if (ret) return ret; - if (c->frame_pred == PRED_MEDIAN) - restore_median(c->pic.data[i], 1, c->pic.linesize[i], - avctx->width >> !!i, avctx->height >> !!i, - c->slices, !i); + if (c->frame_pred == PRED_MEDIAN) { + if (!c->interlaced) { + restore_median(c->pic.data[i], 1, c->pic.linesize[i], + avctx->width >> !!i, avctx->height >> !!i, + c->slices, !i); + } else { + restore_median_il(c->pic.data[i], 1, c->pic.linesize[i], + avctx->width >> !!i, + avctx->height >> !!i, + c->slices, !i); + } + } } break; case PIX_FMT_YUV422P: @@ -395,9 +474,17 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPac c->frame_pred == PRED_LEFT); if (ret) return ret; - if (c->frame_pred == PRED_MEDIAN) - restore_median(c->pic.data[i], 1, c->pic.linesize[i], - avctx->width >> !!i, avctx->height, c->slices, 0); + if (c->frame_pred == PRED_MEDIAN) { + if (!c->interlaced) { + restore_median(c->pic.data[i], 1, c->pic.linesize[i], + avctx->width >> !!i, avctx->height, + c->slices, 0); + } else { + restore_median_il(c->pic.data[i], 1, c->pic.linesize[i], + avctx->width >> !!i, avctx->height, + c->slices, 0); + } + } } break; } diff --git a/libavformat/utils.c b/libavformat/utils.c index 82b6b8a674..e8fefe6514 100644 --- a/libavformat/utils.c +++ b/libavformat/utils.c @@ -2235,7 +2235,7 @@ static int has_decode_delay_been_guessed(AVStream *st) static int try_decode_frame(AVStream *st, AVPacket *avpkt, AVDictionary **options) { AVCodec *codec; - int got_picture, ret = 0; + int got_picture = 1, ret = 0; AVFrame picture; AVPacket pkt = *avpkt; @@ -2248,7 +2248,8 @@ static int try_decode_frame(AVStream *st, AVPacket *avpkt, AVDictionary **option return ret; } - while (pkt.size > 0 && ret >= 0 && + while ((pkt.size > 0 || (!pkt.data && got_picture)) && + ret >= 0 && (!has_codec_parameters(st->codec) || !has_decode_delay_been_guessed(st) || (!st->codec_info_nb_frames && st->codec->codec->capabilities & CODEC_CAP_CHANNEL_CONF))) { @@ -2377,14 +2378,9 @@ int avformat_find_stream_info(AVFormatContext *ic, AVDictionary **options) int i, count, ret, read_size, j; AVStream *st; AVPacket pkt1, *pkt; - AVDictionary *one_thread_opt = NULL; int64_t old_offset = avio_tell(ic->pb); int orig_nb_streams = ic->nb_streams; // new streams might appear, no options for those - /* this function doesn't flush the decoders, so force thread count - * to 1 to fix behavior when thread count > number of frames in the file */ - av_dict_set(&one_thread_opt, "threads", "1", 0); - for(i=0;i<ic->nb_streams;i++) { AVCodec *codec; st = ic->streams[i]; @@ -2406,21 +2402,15 @@ int avformat_find_stream_info(AVFormatContext *ic, AVDictionary **options) assert(!st->codec->codec); codec = avcodec_find_decoder(st->codec->codec_id); - /* this function doesn't flush the decoders, so force thread count - * to 1 to fix behavior when thread count > number of frames in the file */ - if (options) - av_dict_set(&options[i], "threads", "1", 0); - /* Ensure that subtitle_header is properly set. */ if (st->codec->codec_type == AVMEDIA_TYPE_SUBTITLE && codec && !st->codec->codec) - avcodec_open2(st->codec, codec, options ? &options[i] : &one_thread_opt); + avcodec_open2(st->codec, codec, options ? &options[i] : NULL); //try to just open decoders, in case this is enough to get parameters if(!has_codec_parameters(st->codec)){ if (codec && !st->codec->codec) - avcodec_open2(st->codec, codec, options ? &options[i] - : &one_thread_opt); + avcodec_open2(st->codec, codec, options ? &options[i] : NULL); } } @@ -2486,10 +2476,22 @@ int avformat_find_stream_info(AVFormatContext *ic, AVDictionary **options) continue; if (ret < 0) { - /* EOF or error */ + /* EOF or error*/ + AVPacket empty_pkt = { 0 }; + int err; + av_init_packet(&empty_pkt); + ret = -1; /* we could not have all the codec parameters before EOF */ for(i=0;i<ic->nb_streams;i++) { st = ic->streams[i]; + + /* flush the decoders */ + while ((err = try_decode_frame(st, &empty_pkt, + (options && i < orig_nb_streams) ? + &options[i] : NULL)) >= 0) + if (has_codec_parameters(st->codec)) + break; + if (!has_codec_parameters(st->codec)){ char buf[256]; avcodec_string(buf, sizeof(buf), st->codec, 0); @@ -2562,8 +2564,7 @@ int avformat_find_stream_info(AVFormatContext *ic, AVDictionary **options) least one frame of codec data, this makes sure the codec initializes the channel configuration and does not only trust the values from the container. */ - try_decode_frame(st, pkt, (options && i < orig_nb_streams )? &options[i] - : &one_thread_opt); + try_decode_frame(st, pkt, (options && i < orig_nb_streams ) ? &options[i] : NULL); st->codec_info_nb_frames++; count++; @@ -2689,7 +2690,6 @@ int avformat_find_stream_info(AVFormatContext *ic, AVDictionary **options) ic->streams[i]->codec->thread_count = 0; av_freep(&ic->streams[i]->info); } - av_dict_free(&one_thread_opt); return ret; } diff --git a/libavutil/avutil.h b/libavutil/avutil.h index 4dc6eb0875..d6855a4020 100644 --- a/libavutil/avutil.h +++ b/libavutil/avutil.h @@ -154,7 +154,7 @@ */ #define LIBAVUTIL_VERSION_MAJOR 51 -#define LIBAVUTIL_VERSION_MINOR 33 +#define LIBAVUTIL_VERSION_MINOR 34 #define LIBAVUTIL_VERSION_MICRO 100 #define LIBAVUTIL_VERSION_INT AV_VERSION_INT(LIBAVUTIL_VERSION_MAJOR, \ diff --git a/libavutil/common.h b/libavutil/common.h index cf361ca15e..84290c6363 100644 --- a/libavutil/common.h +++ b/libavutil/common.h @@ -220,6 +220,16 @@ static av_always_inline av_const int av_popcount_c(uint32_t x) return (x + (x >> 16)) & 0x3F; } +/** + * Count number of bits set to one in x + * @param x value to count bits of + * @return the number of bits set to one in x + */ +static av_always_inline av_const int av_popcount64_c(uint64_t x) +{ + return av_popcount(x) + av_popcount(x >> 32); +} + #define MKTAG(a,b,c,d) ((a) | ((b) << 8) | ((c) << 16) | ((unsigned)(d) << 24)) #define MKBETAG(a,b,c,d) ((d) | ((c) << 8) | ((b) << 16) | ((unsigned)(a) << 24)) @@ -385,3 +395,6 @@ static av_always_inline av_const int av_popcount_c(uint32_t x) #ifndef av_popcount # define av_popcount av_popcount_c #endif +#ifndef av_popcount64 +# define av_popcount64 av_popcount64_c +#endif diff --git a/tests/fate/screen.mak b/tests/fate/screen.mak index 3a71da2c0c..0bc06e85a0 100644 --- a/tests/fate/screen.mak +++ b/tests/fate/screen.mak @@ -1,6 +1,9 @@ FATE_SCREEN += fate-cscd fate-cscd: CMD = framecrc -i $(SAMPLES)/CSCD/sample_video.avi -an -vsync 0 -pix_fmt rgb24 +FATE_SCREEN += fate-dxtory +fate-dxtory: CMD = framecrc -i $(SAMPLES)/dxtory/dxtory_mic.avi + FATE_SCREEN += fate-fraps-v0 fate-fraps-v0: CMD = framecrc -i $(SAMPLES)/fraps/Griffin_Ragdoll01-partial.avi diff --git a/tests/ref/fate/dxtory b/tests/ref/fate/dxtory new file mode 100644 index 0000000000..5fab200f77 --- /dev/null +++ b/tests/ref/fate/dxtory @@ -0,0 +1 @@ +0, 0, 1382400, 0x44373645 |