diff options
author | Michael Niedermayer <michaelni@gmx.at> | 2011-08-05 12:39:51 +0200 |
---|---|---|
committer | Michael Niedermayer <michaelni@gmx.at> | 2011-08-05 13:02:20 +0200 |
commit | 22d841becd89f21fcd88a76bcafb675fd051ce86 (patch) | |
tree | 04f910119420fabb5557217eb8c7cd6c1a0d24ea | |
parent | 25c2f13d00fcfdb81c33a459795c33d29f8690e8 (diff) | |
parent | 6e19cfb083eda83aaf4b49ae765ab2b3e578d32d (diff) | |
download | ffmpeg-22d841becd89f21fcd88a76bcafb675fd051ce86.tar.gz |
Merge branch 'master' into oldabi
* master: (780 commits)
ffmpeg: Fix doxygen comments for MetadataMap
filters.texi: fix wrong references in the "Filtergraph syntax" section
yadif: correct documentation on the parity parameter
mpegvideo.h: remove the 1 line difference to qatar
applehttp: fix variant discard logic
Fix possible crash when decoding mpeg streams.
h263dec: Fix asserts broken by the elimination of FF_COMMON_FRAME.
avidec: skip seek pos adjustment for non interleaved files. Fixes Ticket327
lsws: remove deprecated and unused stuff after the 0->1 major bump
cosmetics: remove some stray comments from AVCodec declarations
cosmetics: fix indentation/alignment in AVCodec declarations
Abort if command offset decreases, avoids potential endless loop.
Warn when falling back to unreliable UMF fps tag.
Detect NI-avi at playtime like mplayer. Fixes Ticket333
avidec: Fix XAN DPCM demuxing.
Fix a possible miscompilation of cabac with old (broken) compilers.
Fix -loop_input.
Set bits_per_coded_sample when encoding ADPCM.
vf_boxblur: call avfilter_draw_slice() at the end of draw_slice()
vf_boxblur: fix out-of-buffer access when h > w
...
Conflicts:
ffmpeg.c
libavcodec/avcodec.h
libavcodec/opt.h
libavcodec/version.h
libavdevice/avdevice.h
libavfilter/avfilter.h
libavformat/avformat.h
libavformat/aviobuf.c
libavformat/rtsp.c
libavformat/udp.c
libavformat/utils.c
libavformat/version.h
libavutil/avutil.h
Merged-by: Michael Niedermayer <michaelni@gmx.at>
783 files changed, 22592 insertions, 17503 deletions
diff --git a/.gitignore b/.gitignore index 3e8ed0046e..dd81f54e4e 100644 --- a/.gitignore +++ b/.gitignore @@ -32,7 +32,6 @@ tests/audiogen tests/base64 tests/data tests/rotozoom -tests/seek_test tests/tiny_psnr tests/videogen tests/vsynth1 @@ -44,5 +43,4 @@ tools/pktdumper tools/probetest tools/qt-faststart tools/trasher -tools/trasher*.d version.h @@ -1,45 +1,18 @@ Entries are sorted chronologically from oldest to youngest within each release, releases are sorted from youngest to oldest. +version next: -version 0.7: - -- many many things we forgot because we rather write code than changelogs -- libmpcodecs video filter support (3 times as many filters than before) -- mpeg2 aspect ratio dection fixed -- libxvid aspect pickiness fixed -- Frame multithreaded decoding -- E-AC-3 audio encoder -- ac3enc: add channel coupling support -- floating-point sample format support to the ac3, eac3, dca, aac, and vorbis decoders. -- H264/MPEG frame-level multi-threading -- All av_metadata_* functions renamed to av_dict_* and moved to libavutil -- 4:4:4 H.264 decoding support -- 10-bit H.264 optimizations for x86 -- lut, lutrgb, and lutyuv filters added -- buffersink libavfilter sink added -- Bump libswscale for recently reported ABI break - +- openal input device added +- boxblur filter added +- BWF muxer +- Flash Screen Video 2 decoder -version 0.7_beta2: - -- VP8 frame-multithreading -- NEON optimizations for VP8 -- Lots of deprecated API cruft removed -- fft and imdct optimizations for AVX (Sandy Bridge) processors -- showinfo filter added -- DPX image encoder -- SMPTE 302M AES3 audio decoder -- Apple Core Audio Format muxer -- 9bit and 10bit per sample support in the H.264 decoder -- 9bit and 10bit FFV1 encoding / decoding -- split filter added -- select filter added -- sdl output device added +version 0.8: -version 0.7_beta1: +- many many things we forgot because we rather write code than changelogs - WebM support in Matroska de/muxer - low overhead Ogg muxing - MMS-TCP support @@ -58,7 +31,7 @@ version 0.7_beta1: - RTP depacketization of QDM2 - ANSI/ASCII art playback system - Lego Mindstorms RSO de/muxer -- libavcore added +- libavcore added (and subsequently removed) - SubRip subtitle file muxer and demuxer - Chinese AVS encoding via libxavs - ffprobe -show_packets option added @@ -88,10 +61,10 @@ version 0.7_beta1: - demuxer for receiving raw rtp:// URLs without an SDP description - single stream LATM/LOAS decoder - setpts filter added -- Win64 support for optimized asm functions +- Win64 support for optimized x86 assembly functions - MJPEG/AVI1 to JPEG/JFIF bitstream filter - ASS subtitle encoder and decoder -- IEC 61937 encapsulation for E-AC3, TrueHD, DTS-HD (for HDMI passthrough) +- IEC 61937 encapsulation for E-AC-3, TrueHD, DTS-HD (for HDMI passthrough) - overlay filter added - rename aspect filter to setdar, and pixelaspect to setsar - IEC 61937 demuxer @@ -126,6 +99,37 @@ version 0.7_beta1: - AMR-WB encoding via libvo-amrwbenc - xWMA demuxer - Mobotix MxPEG decoder +- VP8 frame-multithreading +- NEON optimizations for VP8 +- Lots of deprecated API cruft removed +- fft and imdct optimizations for AVX (Sandy Bridge) processors +- showinfo filter added +- SMPTE 302M AES3 audio decoder +- Apple Core Audio Format muxer +- 9bit and 10bit per sample support in the H.264 decoder +- 9bit and 10bit FFV1 encoding / decoding +- split filter added +- select filter added +- sdl output device added +- libmpcodecs video filter support (3 times as many filters than before) +- mpeg2 aspect ratio dection fixed +- libxvid aspect pickiness fixed +- Frame multithreaded decoding +- E-AC-3 audio encoder +- ac3enc: add channel coupling support +- floating-point sample format support to the ac3, eac3, dca, aac, and vorbis decoders. +- H264/MPEG frame-level multi-threading +- All av_metadata_* functions renamed to av_dict_* and moved to libavutil +- 4:4:4 H.264 decoding support +- 10-bit H.264 optimizations for x86 +- lut, lutrgb, and lutyuv filters added +- buffersink libavfilter sink added +- Bump libswscale for recently reported ABI break + + +version 0.7: + +- all the changes for 0.8, but keeping API/ABI compatibility with the 0.6 release version 0.6: @@ -1,4 +1,4 @@ -# Doxyfile 1.5.6 +# Doxyfile 1.7.1 # This file describes the settings to be used by the documentation system # doxygen (www.doxygen.org) for a project @@ -54,11 +54,11 @@ CREATE_SUBDIRS = NO # information to generate all constant output in the proper language. # The default language is English, other supported languages are: # Afrikaans, Arabic, Brazilian, Catalan, Chinese, Chinese-Traditional, -# Croatian, Czech, Danish, Dutch, Farsi, Finnish, French, German, Greek, -# Hungarian, Italian, Japanese, Japanese-en (Japanese with English messages), -# Korean, Korean-en, Lithuanian, Norwegian, Macedonian, Persian, Polish, -# Portuguese, Romanian, Russian, Serbian, Slovak, Slovene, Spanish, Swedish, -# and Ukrainian. +# Croatian, Czech, Danish, Dutch, Esperanto, Farsi, Finnish, French, German, +# Greek, Hungarian, Italian, Japanese, Japanese-en (Japanese with English +# messages), Korean, Korean-en, Lithuanian, Norwegian, Macedonian, Persian, +# Polish, Portuguese, Romanian, Russian, Serbian, Serbian-Cyrilic, Slovak, +# Slovene, Spanish, Swedish, Ukrainian, and Vietnamese. OUTPUT_LANGUAGE = English @@ -155,13 +155,6 @@ QT_AUTOBRIEF = NO MULTILINE_CPP_IS_BRIEF = NO -# If the DETAILS_AT_TOP tag is set to YES then Doxygen -# will output the detailed description near the top, like JavaDoc. -# If set to NO, the detailed description appears after the member -# documentation. - -DETAILS_AT_TOP = NO - # If the INHERIT_DOCS tag is set to YES (the default) then an undocumented # member inherits the documentation from any documented member that it # re-implements. @@ -214,6 +207,18 @@ OPTIMIZE_FOR_FORTRAN = NO OPTIMIZE_OUTPUT_VHDL = NO +# Doxygen selects the parser to use depending on the extension of the files it +# parses. With this tag you can assign which parser to use for a given extension. +# Doxygen has a built-in mapping, but you can override or extend it using this +# tag. The format is ext=language, where ext is a file extension, and language +# is one of the parsers supported by doxygen: IDL, Java, Javascript, CSharp, C, +# C++, D, PHP, Objective-C, Python, Fortran, VHDL, C, C++. For instance to make +# doxygen treat .inc files as Fortran files (default is PHP), and .f files as C +# (default is Fortran), use: inc=Fortran f=C. Note that for custom extensions +# you also need to set FILE_PATTERNS otherwise the files are not read by doxygen. + +EXTENSION_MAPPING = + # If you use STL classes (i.e. std::string, std::vector, etc.) but do not want # to include (a tag file for) the STL sources as input, then you should # set this tag to YES in order to let doxygen match functions declarations and @@ -268,6 +273,22 @@ SUBGROUPING = YES TYPEDEF_HIDES_STRUCT = NO +# The SYMBOL_CACHE_SIZE determines the size of the internal cache use to +# determine which symbols to keep in memory and which to flush to disk. +# When the cache is full, less often used symbols will be written to disk. +# For small to medium size projects (<1000 input files) the default value is +# probably good enough. For larger projects a too small cache size can cause +# doxygen to be busy swapping symbols to and from disk most of the time +# causing a significant performance penality. +# If the system has enough physical memory increasing the cache will improve the +# performance by keeping more symbols in memory. Note that the value works on +# a logarithmic scale so increasing the size by one will rougly double the +# memory usage. The cache size is given by this formula: +# 2^(16+SYMBOL_CACHE_SIZE). The valid range is 0..9, the default is 0, +# corresponding to a cache size of 2^16 = 65536 symbols + +SYMBOL_CACHE_SIZE = 0 + #--------------------------------------------------------------------------- # Build related configuration options #--------------------------------------------------------------------------- @@ -366,6 +387,12 @@ HIDE_SCOPE_NAMES = NO SHOW_INCLUDE_FILES = YES +# If the FORCE_LOCAL_INCLUDES tag is set to YES then Doxygen +# will list include files with double quotes in the documentation +# rather than with sharp brackets. + +FORCE_LOCAL_INCLUDES = NO + # If the INLINE_INFO tag is set to YES (the default) then a tag [inline] # is inserted in the documentation for inline members. @@ -385,6 +412,16 @@ SORT_MEMBER_DOCS = YES SORT_BRIEF_DOCS = NO +# If the SORT_MEMBERS_CTORS_1ST tag is set to YES then doxygen +# will sort the (brief and detailed) documentation of class members so that +# constructors and destructors are listed first. If set to NO (the default) +# the constructors will appear in the respective orders defined by +# SORT_MEMBER_DOCS and SORT_BRIEF_DOCS. +# This tag will be ignored for brief docs if SORT_BRIEF_DOCS is set to NO +# and ignored for detailed docs if SORT_MEMBER_DOCS is set to NO. + +SORT_MEMBERS_CTORS_1ST = NO + # If the SORT_GROUP_NAMES tag is set to YES then doxygen will sort the # hierarchy of group names into alphabetical order. If set to NO (the default) # the group names will appear in their defined order. @@ -459,7 +496,8 @@ SHOW_DIRECTORIES = NO SHOW_FILES = YES # Set the SHOW_NAMESPACES tag to NO to disable the generation of the -# Namespaces page. This will remove the Namespaces entry from the Quick Index +# Namespaces page. +# This will remove the Namespaces entry from the Quick Index # and from the Folder Tree View (if specified). The default is YES. SHOW_NAMESPACES = YES @@ -474,6 +512,15 @@ SHOW_NAMESPACES = YES FILE_VERSION_FILTER = +# The LAYOUT_FILE tag can be used to specify a layout file which will be parsed +# by doxygen. The layout file controls the global structure of the generated +# output files in an output format independent way. The create the layout file +# that represents doxygen's defaults, run doxygen with the -l option. +# You can optionally specify a file name after the option, if omitted +# DoxygenLayout.xml will be used as the name of the layout file. + +LAYOUT_FILE = + #--------------------------------------------------------------------------- # configuration options related to warning and progress messages #--------------------------------------------------------------------------- @@ -577,7 +624,8 @@ EXCLUDE_SYMLINKS = NO # against the file with absolute path, so to exclude all test directories # for example use the pattern */test/* -EXCLUDE_PATTERNS = *.git *.d +EXCLUDE_PATTERNS = *.git \ + *.d # The EXCLUDE_SYMBOLS tag can be used to specify one or more symbol names # (namespaces, classes, functions, etc.) that should be excluded from the @@ -591,14 +639,15 @@ EXCLUDE_SYMBOLS = # directories that contain example code fragments that are included (see # the \include command). -EXAMPLE_PATH = +EXAMPLE_PATH = libavcodec/ \ + libavformat/ # If the value of the EXAMPLE_PATH tag contains directories, you can use the # EXAMPLE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp # and *.h) to filter out the source-files in the directories. If left # blank all files are included. -EXAMPLE_PATTERNS = +EXAMPLE_PATTERNS = *-example.c # If the EXAMPLE_RECURSIVE tag is set to YES then subdirectories will be # searched for input files to be used with the \include or \dontinclude @@ -618,14 +667,17 @@ IMAGE_PATH = # by executing (via popen()) the command <filter> <input-file>, where <filter> # is the value of the INPUT_FILTER tag, and <input-file> is the name of an # input file. Doxygen will then use the output that the filter program writes -# to standard output. If FILTER_PATTERNS is specified, this tag will be +# to standard output. +# If FILTER_PATTERNS is specified, this tag will be # ignored. INPUT_FILTER = # The FILTER_PATTERNS tag can be used to specify filters on a per file pattern -# basis. Doxygen will compare the file name with each pattern and apply the -# filter if there is a match. The filters are a list of the form: +# basis. +# Doxygen will compare the file name with each pattern and apply the +# filter if there is a match. +# The filters are a list of the form: # pattern=filter (like *.cpp=my_cpp_filter). See INPUT_FILTER for further # info on how filters are used. If FILTER_PATTERNS is empty, INPUT_FILTER # is applied to all files. @@ -675,7 +727,8 @@ REFERENCES_RELATION = NO # If the REFERENCES_LINK_SOURCE tag is set to YES (the default) # and SOURCE_BROWSER tag is set to YES, then the hyperlinks from # functions in REFERENCES_RELATION and REFERENCED_BY_RELATION lists will -# link to the source code. Otherwise they will link to the documentstion. +# link to the source code. +# Otherwise they will link to the documentation. REFERENCES_LINK_SOURCE = YES @@ -758,18 +811,50 @@ HTML_FOOTER = HTML_STYLESHEET = +# The HTML_COLORSTYLE_HUE tag controls the color of the HTML output. +# Doxygen will adjust the colors in the stylesheet and background images +# according to this color. Hue is specified as an angle on a colorwheel, +# see http://en.wikipedia.org/wiki/Hue for more information. +# For instance the value 0 represents red, 60 is yellow, 120 is green, +# 180 is cyan, 240 is blue, 300 purple, and 360 is red again. +# The allowed range is 0 to 359. + +HTML_COLORSTYLE_HUE = 220 + +# The HTML_COLORSTYLE_SAT tag controls the purity (or saturation) of +# the colors in the HTML output. For a value of 0 the output will use +# grayscales only. A value of 255 will produce the most vivid colors. + +HTML_COLORSTYLE_SAT = 100 + +# The HTML_COLORSTYLE_GAMMA tag controls the gamma correction applied to +# the luminance component of the colors in the HTML output. Values below +# 100 gradually make the output lighter, whereas values above 100 make +# the output darker. The value divided by 100 is the actual gamma applied, +# so 80 represents a gamma of 0.8, The value 220 represents a gamma of 2.2, +# and 100 does not change the gamma. + +HTML_COLORSTYLE_GAMMA = 80 + +# If the HTML_TIMESTAMP tag is set to YES then the footer of each generated HTML +# page will contain the date and time when the page was generated. Setting +# this to NO can help when comparing the output of multiple runs. + +HTML_TIMESTAMP = YES + # If the HTML_ALIGN_MEMBERS tag is set to YES, the members of classes, # files or namespaces will be aligned in HTML using tables. If set to # NO a bullet list will be used. HTML_ALIGN_MEMBERS = YES -# If the GENERATE_HTMLHELP tag is set to YES, additional index files -# will be generated that can be used as input for tools like the -# Microsoft HTML help workshop to generate a compiled HTML help file (.chm) -# of the generated HTML documentation. +# If the HTML_DYNAMIC_SECTIONS tag is set to YES then the generated HTML +# documentation will contain sections that can be hidden and shown after the +# page has loaded. For this to work a browser that supports +# JavaScript and DHTML is required (for instance Mozilla 1.0+, Firefox +# Netscape 6.0+, Internet explorer 5.0+, Konqueror, or Safari). -GENERATE_HTMLHELP = NO +HTML_DYNAMIC_SECTIONS = NO # If the GENERATE_DOCSET tag is set to YES, additional index files # will be generated that can be used as input for Apple's Xcode 3 @@ -779,6 +864,8 @@ GENERATE_HTMLHELP = NO # directory and running "make install" will install the docset in # ~/Library/Developer/Shared/Documentation/DocSets so that Xcode will find # it at startup. +# See http://developer.apple.com/tools/creatingdocsetswithdoxygen.html +# for more information. GENERATE_DOCSET = NO @@ -796,13 +883,22 @@ DOCSET_FEEDNAME = "Doxygen generated docs" DOCSET_BUNDLE_ID = org.doxygen.Project -# If the HTML_DYNAMIC_SECTIONS tag is set to YES then the generated HTML -# documentation will contain sections that can be hidden and shown after the -# page has loaded. For this to work a browser that supports -# JavaScript and DHTML is required (for instance Mozilla 1.0+, Firefox -# Netscape 6.0+, Internet explorer 5.0+, Konqueror, or Safari). +# When GENERATE_PUBLISHER_ID tag specifies a string that should uniquely identify +# the documentation publisher. This should be a reverse domain-name style +# string, e.g. com.mycompany.MyDocSet.documentation. -HTML_DYNAMIC_SECTIONS = NO +DOCSET_PUBLISHER_ID = org.doxygen.Publisher + +# The GENERATE_PUBLISHER_NAME tag identifies the documentation publisher. + +DOCSET_PUBLISHER_NAME = Publisher + +# If the GENERATE_HTMLHELP tag is set to YES, additional index files +# will be generated that can be used as input for tools like the +# Microsoft HTML help workshop to generate a compiled HTML help file (.chm) +# of the generated HTML documentation. + +GENERATE_HTMLHELP = NO # If the GENERATE_HTMLHELP tag is set to YES, the CHM_FILE tag can # be used to specify the file name of the resulting .chm file. You @@ -841,6 +937,76 @@ BINARY_TOC = NO TOC_EXPAND = NO +# If the GENERATE_QHP tag is set to YES and both QHP_NAMESPACE and +# QHP_VIRTUAL_FOLDER are set, an additional index file will be generated +# that can be used as input for Qt's qhelpgenerator to generate a +# Qt Compressed Help (.qch) of the generated HTML documentation. + +GENERATE_QHP = NO + +# If the QHG_LOCATION tag is specified, the QCH_FILE tag can +# be used to specify the file name of the resulting .qch file. +# The path specified is relative to the HTML output folder. + +QCH_FILE = + +# The QHP_NAMESPACE tag specifies the namespace to use when generating +# Qt Help Project output. For more information please see +# http://doc.trolltech.com/qthelpproject.html#namespace + +QHP_NAMESPACE = org.doxygen.Project + +# The QHP_VIRTUAL_FOLDER tag specifies the namespace to use when generating +# Qt Help Project output. For more information please see +# http://doc.trolltech.com/qthelpproject.html#virtual-folders + +QHP_VIRTUAL_FOLDER = doc + +# If QHP_CUST_FILTER_NAME is set, it specifies the name of a custom filter to +# add. For more information please see +# http://doc.trolltech.com/qthelpproject.html#custom-filters + +QHP_CUST_FILTER_NAME = + +# The QHP_CUST_FILT_ATTRS tag specifies the list of the attributes of the +# custom filter to add. For more information please see +# <a href="http://doc.trolltech.com/qthelpproject.html#custom-filters"> +# Qt Help Project / Custom Filters</a>. + +QHP_CUST_FILTER_ATTRS = + +# The QHP_SECT_FILTER_ATTRS tag specifies the list of the attributes this +# project's +# filter section matches. +# <a href="http://doc.trolltech.com/qthelpproject.html#filter-attributes"> +# Qt Help Project / Filter Attributes</a>. + +QHP_SECT_FILTER_ATTRS = + +# If the GENERATE_QHP tag is set to YES, the QHG_LOCATION tag can +# be used to specify the location of Qt's qhelpgenerator. +# If non-empty doxygen will try to run qhelpgenerator on the generated +# .qhp file. + +QHG_LOCATION = + +# If the GENERATE_ECLIPSEHELP tag is set to YES, additional index files +# will be generated, which together with the HTML files, form an Eclipse help +# plugin. To install this plugin and make it available under the help contents +# menu in Eclipse, the contents of the directory containing the HTML and XML +# files needs to be copied into the plugins directory of eclipse. The name of +# the directory within the plugins directory should be the same as +# the ECLIPSE_DOC_ID value. After copying Eclipse needs to be restarted before +# the help appears. + +GENERATE_ECLIPSEHELP = NO + +# A unique identifier for the eclipse help plugin. When installing the plugin +# the directory name containing the HTML and XML files should also have +# this name. + +ECLIPSE_DOC_ID = org.doxygen.Project + # The DISABLE_INDEX tag can be used to turn on/off the condensed index at # top of each HTML page. The value NO (the default) enables the index and # the value YES disables it. @@ -854,27 +1020,30 @@ ENUM_VALUES_PER_LINE = 4 # The GENERATE_TREEVIEW tag is used to specify whether a tree-like index # structure should be generated to display hierarchical information. -# If the tag value is set to FRAME, a side panel will be generated +# If the tag value is set to YES, a side panel will be generated # containing a tree-like index structure (just like the one that # is generated for HTML Help). For this to work a browser that supports -# JavaScript, DHTML, CSS and frames is required (for instance Mozilla 1.0+, -# Netscape 6.0+, Internet explorer 5.0+, or Konqueror). Windows users are -# probably better off using the HTML help feature. Other possible values -# for this tag are: HIERARCHIES, which will generate the Groups, Directories, -# and Class Hiererachy pages using a tree view instead of an ordered list; -# ALL, which combines the behavior of FRAME and HIERARCHIES; and NONE, which -# disables this behavior completely. For backwards compatibility with previous -# releases of Doxygen, the values YES and NO are equivalent to FRAME and NONE -# respectively. +# JavaScript, DHTML, CSS and frames is required (i.e. any modern browser). +# Windows users are probably better off using the HTML help feature. GENERATE_TREEVIEW = NO +# By enabling USE_INLINE_TREES, doxygen will generate the Groups, Directories, +# and Class Hierarchy pages using a tree view instead of an ordered list. + +USE_INLINE_TREES = NO + # If the treeview is enabled (see GENERATE_TREEVIEW) then this tag can be # used to set the initial width (in pixels) of the frame in which the tree # is shown. TREEVIEW_WIDTH = 250 +# When the EXT_LINKS_IN_WINDOW option is set to YES doxygen will open +# links to external symbols imported via tag files in a separate window. + +EXT_LINKS_IN_WINDOW = NO + # Use this tag to change the font size of Latex formulas included # as images in the HTML documentation. The default is 10. Note that # when you change the font size after a successful doxygen run you need @@ -883,6 +1052,34 @@ TREEVIEW_WIDTH = 250 FORMULA_FONTSIZE = 10 +# Use the FORMULA_TRANPARENT tag to determine whether or not the images +# generated for formulas are transparent PNGs. Transparent PNGs are +# not supported properly for IE 6.0, but are supported on all modern browsers. +# Note that when changing this option you need to delete any form_*.png files +# in the HTML output before the changes have effect. + +FORMULA_TRANSPARENT = YES + +# When the SEARCHENGINE tag is enabled doxygen will generate a search box +# for the HTML output. The underlying search engine uses javascript +# and DHTML and should work on any modern browser. Note that when using +# HTML help (GENERATE_HTMLHELP), Qt help (GENERATE_QHP), or docsets +# (GENERATE_DOCSET) there is already a search function so this one should +# typically be disabled. For large projects the javascript based search engine +# can be slow, then enabling SERVER_BASED_SEARCH may provide a better solution. + +SEARCHENGINE = NO + +# When the SERVER_BASED_SEARCH tag is enabled the search engine will be +# implemented using a PHP enabled web server instead of at the web client +# using Javascript. Doxygen will generate the search PHP script and index +# file to put on the web server. The advantage of the server +# based approach is that it scales better to large projects and allows +# full text search. The disadvances is that it is more difficult to setup +# and does not have live searching capabilities. + +SERVER_BASED_SEARCH = NO + #--------------------------------------------------------------------------- # configuration options related to the LaTeX output #--------------------------------------------------------------------------- @@ -900,6 +1097,9 @@ LATEX_OUTPUT = latex # The LATEX_CMD_NAME tag can be used to specify the LaTeX command name to be # invoked. If left blank `latex' will be used as the default command name. +# Note that when enabling USE_PDFLATEX this option is only used for +# generating bitmaps for formulas in the HTML output, but not in the +# Makefile that is written to the output directory. LATEX_CMD_NAME = latex @@ -959,6 +1159,13 @@ LATEX_BATCHMODE = NO LATEX_HIDE_INDICES = NO +# If LATEX_SOURCE_CODE is set to YES then doxygen will include +# source code with syntax highlighting in the LaTeX output. +# Note that which sources are shown also depends on other settings +# such as SOURCE_BROWSER. + +LATEX_SOURCE_CODE = NO + #--------------------------------------------------------------------------- # configuration options related to the RTF output #--------------------------------------------------------------------------- @@ -1095,8 +1302,10 @@ GENERATE_PERLMOD = NO PERLMOD_LATEX = NO # If the PERLMOD_PRETTY tag is set to YES the Perl module output will be -# nicely formatted so it can be parsed by a human reader. This is useful -# if you want to understand what is going on. On the other hand, if this +# nicely formatted so it can be parsed by a human reader. +# This is useful +# if you want to understand what is going on. +# On the other hand, if this # tag is set to NO the size of the Perl module output will be much smaller # and Perl will parse it just the same. @@ -1158,16 +1367,23 @@ INCLUDE_FILE_PATTERNS = # undefined via #undef or recursively expanded use the := operator # instead of the = operator. -PREDEFINED = __attribute__(x)="" "RENAME(x)=x ## _TMPL" "DEF(x)=x ## _TMPL" \ - HAVE_AV_CONFIG_H HAVE_MMX HAVE_MMX2 HAVE_AMD3DNOW \ +PREDEFINED = "__attribute__(x)=" \ + "RENAME(x)=x ## _TMPL" \ + "DEF(x)=x ## _TMPL" \ + HAVE_AV_CONFIG_H \ + HAVE_MMX \ + HAVE_MMX2 \ + HAVE_AMD3DNOW \ + "DECLARE_ALIGNED(a,t,n)=t n" \ + "offsetof(x,y)=0x42" # If the MACRO_EXPANSION and EXPAND_ONLY_PREDEF tags are set to YES then # this tag can be used to specify a list of macro names that should be expanded. # The macro definition that is found in the sources will be used. # Use the PREDEFINED tag if you want to use a different macro definition. -#EXPAND_AS_DEFINED = FF_COMMON_FRAME -EXPAND_AS_DEFINED = declare_idct(idct, table, idct_row_head, idct_row, idct_row_tail, idct_row_mid) +EXPAND_AS_DEFINED = declare_idct \ + READ_PAR_DATA \ # If the SKIP_FUNCTION_MACROS tag is set to YES (the default) then # doxygen's preprocessor will remove all function-like macros that are alone @@ -1185,9 +1401,11 @@ SKIP_FUNCTION_MACROS = YES # Optionally an initial location of the external documentation # can be added for each tagfile. The format of a tag file without # this location is as follows: -# TAGFILES = file1 file2 ... +# +# TAGFILES = file1 file2 ... # Adding location for the tag files is done as follows: -# TAGFILES = file1=loc1 "file2 = loc2" ... +# +# TAGFILES = file1=loc1 "file2 = loc2" ... # where "loc1" and "loc2" can be relative or absolute paths or # URLs. If a location is present for each tag, the installdox tool # does not have to be run to correct the links. @@ -1255,6 +1473,14 @@ HIDE_UNDOC_RELATIONS = YES HAVE_DOT = NO +# The DOT_NUM_THREADS specifies the number of dot invocations doxygen is +# allowed to run in parallel. When set to 0 (the default) doxygen will +# base this on the number of processors available in the system. You can set it +# explicitly to a value larger than 0 to get control over the balance +# between CPU load and processing speed. + +DOT_NUM_THREADS = 0 + # By default doxygen will write a font called FreeSans.ttf to the output # directory and reference it in all dot files that doxygen generates. This # font does not include all possible unicode characters however, so when you need @@ -1266,6 +1492,11 @@ HAVE_DOT = NO DOT_FONTNAME = FreeSans +# The DOT_FONTSIZE tag can be used to set the size of the font of dot graphs. +# The default size is 10pt. + +DOT_FONTSIZE = 10 + # By default doxygen will tell dot to use the output directory to look for the # FreeSans.ttf font (which doxygen will put there itself). If you specify a # different font using DOT_FONTNAME you can set the path where dot @@ -1383,10 +1614,10 @@ DOT_GRAPH_MAX_NODES = 50 MAX_DOT_GRAPH_DEPTH = 0 # Set the DOT_TRANSPARENT tag to YES to generate images with a transparent -# background. This is enabled by default, which results in a transparent -# background. Warning: Depending on the platform used, enabling this option -# may lead to badly anti-aliased labels on the edges of a graph (i.e. they -# become hard to read). +# background. This is disabled by default, because dot on Windows does not +# seem to support this out of the box. Warning: Depending on the platform used, +# enabling this option may lead to badly anti-aliased labels on the edges of +# a graph (i.e. they become hard to read). DOT_TRANSPARENT = YES @@ -1408,12 +1639,3 @@ GENERATE_LEGEND = YES # the various graphs. DOT_CLEANUP = YES - -#--------------------------------------------------------------------------- -# Configuration::additions related to the search engine -#--------------------------------------------------------------------------- - -# The SEARCHENGINE tag specifies whether or not a search engine should be -# used. If set to NO the values of all tags below this one will be ignored. - -SEARCHENGINE = NO diff --git a/MAINTAINERS b/MAINTAINERS index 8588ba73ad..e5ba8d2fac 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -362,6 +362,16 @@ Sparc Roman Shaposhnik x86 Michael Niedermayer +Releases +======== + +0.5 *Deprecated/Unmaintained* +0.6 Stefano Sabatini +0.7 Michael Niedermayer +0.8 Michael Niedermayer + + + GnuPG Fingerprints of maintainers and others who have svn write access ====================================================================== @@ -1,13 +1,13 @@ +MAIN_MAKEFILE=1 include config.mak -SRC_DIR = $(SRC_PATH_BARE) +vpath %.c $(SRC_PATH) +vpath %.h $(SRC_PATH) +vpath %.S $(SRC_PATH) +vpath %.asm $(SRC_PATH) +vpath %.v $(SRC_PATH) +vpath %.texi $(SRC_PATH) -vpath %.c $(SRC_DIR) -vpath %.h $(SRC_DIR) -vpath %.S $(SRC_DIR) -vpath %.asm $(SRC_DIR) -vpath %.v $(SRC_DIR) -vpath %.texi $(SRC_PATH_BARE) PROGS-$(CONFIG_FFMPEG) += ffmpeg PROGS-$(CONFIG_FFPLAY) += ffplay @@ -17,20 +17,16 @@ PROGS-$(CONFIG_FFSERVER) += ffserver PROGS := $(PROGS-yes:%=%$(EXESUF)) PROGS_G = $(PROGS-yes:%=%_g$(EXESUF)) OBJS = $(PROGS-yes:%=%.o) cmdutils.o -MANPAGES = $(PROGS-yes:%=doc/%.1) -PODPAGES = $(PROGS-yes:%=doc/%.pod) -HTMLPAGES = $(PROGS-yes:%=doc/%.html) -TOOLS = $(addprefix tools/, $(addsuffix $(EXESUF), cws2fws graph2dot lavfi-showfiltfmts pktdumper probetest qt-faststart trasher)) TESTTOOLS = audiogen videogen rotozoom tiny_psnr base64 HOSTPROGS := $(TESTTOOLS:%=tests/%) +TOOLS = qt-faststart trasher +TOOLS-$(CONFIG_ZLIB) += cws2fws BASENAMES = ffmpeg ffplay ffprobe ffserver ALLPROGS = $(BASENAMES:%=%$(EXESUF)) ALLPROGS_G = $(BASENAMES:%=%_g$(EXESUF)) ALLMANPAGES = $(BASENAMES:%=%.1) -ALLFFLIBS = avcodec avdevice avfilter avformat avutil postproc swscale - FFLIBS-$(CONFIG_AVDEVICE) += avdevice FFLIBS-$(CONFIG_AVFILTER) += avfilter FFLIBS-$(CONFIG_AVFORMAT) += avformat @@ -40,33 +36,35 @@ FFLIBS-$(CONFIG_SWSCALE) += swscale FFLIBS := avutil -DATA_FILES := $(wildcard $(SRC_DIR)/ffpresets/*.ffpreset) +DATA_FILES := $(wildcard $(SRC_PATH)/ffpresets/*.ffpreset) SKIPHEADERS = cmdutils_common_opts.h -include common.mak +include $(SRC_PATH)/common.mak -FF_LDFLAGS := $(FFLDFLAGS) FF_EXTRALIBS := $(FFEXTRALIBS) FF_DEP_LIBS := $(DEP_LIBS) -all-$(CONFIG_DOC): documentation - -all: $(FF_DEP_LIBS) $(PROGS) +all: $(PROGS) $(PROGS): %$(EXESUF): %_g$(EXESUF) $(CP) $< $@ $(STRIP) $@ +$(TOOLS): %$(EXESUF): %.o + $(LD) $(LDFLAGS) -o $@ $< $(ELIBS) + +tools/cws2fws$(EXESUF): ELIBS = -lz + config.h: .config -.config: $(wildcard $(FFLIBS:%=$(SRC_DIR)/lib%/all*.c)) +.config: $(wildcard $(FFLIBS:%=$(SRC_PATH)/lib%/all*.c)) @-tput bold 2>/dev/null @-printf '\nWARNING: $(?F) newer than config.h, rerun configure\n\n' @-tput sgr0 2>/dev/null SUBDIR_VARS := OBJS FFLIBS CLEANFILES DIRS TESTPROGS EXAMPLES SKIPHEADERS \ ALTIVEC-OBJS MMX-OBJS NEON-OBJS X86-OBJS YASM-OBJS-FFT YASM-OBJS \ - HOSTPROGS BUILT_HEADERS TESTOBJS ARCH_HEADERS ARMV6-OBJS + HOSTPROGS BUILT_HEADERS TESTOBJS ARCH_HEADERS ARMV6-OBJS TOOLS define RESET $(1) := @@ -76,31 +74,24 @@ endef define DOSUBDIR $(foreach V,$(SUBDIR_VARS),$(eval $(call RESET,$(V)))) SUBDIR := $(1)/ -include $(1)/Makefile +include $(SRC_PATH)/$(1)/Makefile endef $(foreach D,$(FFLIBS),$(eval $(call DOSUBDIR,lib$(D)))) ffplay.o: CFLAGS += $(SDL_CFLAGS) ffplay_g$(EXESUF): FF_EXTRALIBS += $(SDL_LIBS) -ffserver_g$(EXESUF): FF_LDFLAGS += $(FFSERVERLDFLAGS) +ffserver_g$(EXESUF): LDFLAGS += $(FFSERVERLDFLAGS) %_g$(EXESUF): %.o cmdutils.o $(FF_DEP_LIBS) - $(LD) $(FF_LDFLAGS) -o $@ $< cmdutils.o $(FF_EXTRALIBS) - -alltools: $(TOOLS) + $(LD) $(LDFLAGS) -o $@ $< cmdutils.o $(FF_EXTRALIBS) -tools/%$(EXESUF): tools/%.o - $(LD) $(FF_LDFLAGS) -o $@ $< $(FF_EXTRALIBS) - -tools/%.o: tools/%.c - $(CC) $(CPPFLAGS) $(CFLAGS) -c $(CC_O) $< +OBJDIRS += tools -include $(wildcard tools/*.d) --include $(wildcard tests/*.d) -VERSION_SH = $(SRC_PATH_BARE)/version.sh -GIT_LOG = $(SRC_PATH_BARE)/.git/logs/HEAD +VERSION_SH = $(SRC_PATH)/version.sh +GIT_LOG = $(SRC_PATH)/.git/logs/HEAD .version: $(wildcard $(GIT_LOG)) $(VERSION_SH) config.mak .version: M=@ @@ -112,28 +103,6 @@ version.h .version: # force version.sh to run whenever version might have changed -include .version -DOCS = $(addprefix doc/, developer.html faq.html general.html libavfilter.html) $(HTMLPAGES) $(MANPAGES) $(PODPAGES) - -documentation: $(DOCS) - --include $(wildcard $(DOCS:%=%.d)) - -TEXIDEP = awk '/^@include/ { printf "$@: $(@D)/%s\n", $$2 }' <$< >$(@:%=%.d) - -doc/%.html: TAG = HTML -doc/%.html: doc/%.texi $(SRC_PATH_BARE)/doc/t2h.init - $(Q)$(TEXIDEP) - $(M)texi2html -monolithic --init-file $(SRC_PATH_BARE)/doc/t2h.init --output $@ $< - -doc/%.pod: TAG = POD -doc/%.pod: doc/%.texi - $(Q)$(TEXIDEP) - $(M)doc/texi2pod.pl $< $@ - -doc/%.1: TAG = MAN -doc/%.1: doc/%.pod - $(M)pod2man --section=1 --center=" " --release=" " $< > $@ - ifdef PROGS install: install-progs install-data endif @@ -143,7 +112,6 @@ install: install-libs install-headers install-libs: install-libs-yes install-progs-yes: -install-progs-$(CONFIG_DOC): install-man install-progs-$(CONFIG_SHARED): install-libs install-progs: install-progs-yes $(PROGS) @@ -154,11 +122,7 @@ install-data: $(DATA_FILES) $(Q)mkdir -p "$(DATADIR)" $(INSTALL) -m 644 $(DATA_FILES) "$(DATADIR)" -install-man: $(MANPAGES) - $(Q)mkdir -p "$(MANDIR)/man1" - $(INSTALL) -m 644 $(MANPAGES) "$(MANDIR)/man1" - -uninstall: uninstall-libs uninstall-headers uninstall-progs uninstall-data uninstall-man +uninstall: uninstall-libs uninstall-headers uninstall-progs uninstall-data uninstall-progs: $(RM) $(addprefix "$(BINDIR)/", $(ALLPROGS)) @@ -166,19 +130,9 @@ uninstall-progs: uninstall-data: $(RM) -r "$(DATADIR)" -uninstall-man: - $(RM) $(addprefix "$(MANDIR)/man1/",$(ALLMANPAGES)) - -testclean: - $(RM) -r tests/vsynth1 tests/vsynth2 tests/data - $(RM) $(addprefix tests/,$(CLEANSUFFIXES)) - $(RM) tests/seek_test$(EXESUF) tests/seek_test.o - $(RM) $(TESTTOOLS:%=tests/%$(HOSTEXESUF)) - -clean:: testclean +clean:: $(RM) $(ALLPROGS) $(ALLPROGS_G) $(RM) $(CLEANSUFFIXES) - $(RM) doc/*.html doc/*.pod doc/*.1 $(RM) $(TOOLS) $(RM) $(CLEANSUFFIXES:%=tools/%) @@ -189,119 +143,19 @@ distclean:: config: $(SRC_PATH)/configure $(value FFMPEG_CONFIGURATION) -# regression tests - -check: test - -fulltest test: codectest lavftest lavfitest seektest - -FFSERVER_REFFILE = $(SRC_PATH)/tests/ffserver.regression.ref - -codectest: fate-codec -lavftest: fate-lavf -lavfitest: fate-lavfi -seektest: fate-seek - -AREF = fate-acodec-aref -VREF = fate-vsynth1-vref fate-vsynth2-vref -REFS = $(AREF) $(VREF) - -$(VREF): ffmpeg$(EXESUF) tests/vsynth1/00.pgm tests/vsynth2/00.pgm -$(AREF): ffmpeg$(EXESUF) tests/data/asynth1.sw - -ffservertest: ffserver$(EXESUF) tests/vsynth1/00.pgm tests/data/asynth1.sw - @echo - @echo "Unfortunately ffserver is broken and therefore its regression" - @echo "test fails randomly. Treat the results accordingly." - @echo - $(SRC_PATH)/tests/ffserver-regression.sh $(FFSERVER_REFFILE) $(SRC_PATH)/tests/ffserver.conf - -tests/vsynth1/00.pgm: tests/videogen$(HOSTEXESUF) - @mkdir -p tests/vsynth1 - $(M)./$< 'tests/vsynth1/' - -tests/vsynth2/00.pgm: tests/rotozoom$(HOSTEXESUF) - @mkdir -p tests/vsynth2 - $(M)./$< 'tests/vsynth2/' $(SRC_PATH)/tests/lena.pnm - -tests/data/asynth1.sw: tests/audiogen$(HOSTEXESUF) - @mkdir -p tests/data - $(M)./$< $@ - -tests/data/asynth1.sw tests/vsynth%/00.pgm: TAG = GEN - -tests/seek_test$(EXESUF): tests/seek_test.o $(FF_DEP_LIBS) - $(LD) $(FF_LDFLAGS) -o $@ $< $(FF_EXTRALIBS) - -tools/lavfi-showfiltfmts$(EXESUF): tools/lavfi-showfiltfmts.o $(FF_DEP_LIBS) - $(LD) $(FF_LDFLAGS) -o $@ $< $(FF_EXTRALIBS) - -include $(SRC_PATH_BARE)/tests/fate.mak -include $(SRC_PATH_BARE)/tests/fate2.mak - -include $(SRC_PATH_BARE)/tests/fate/aac.mak -include $(SRC_PATH_BARE)/tests/fate/als.mak -include $(SRC_PATH_BARE)/tests/fate/fft.mak -include $(SRC_PATH_BARE)/tests/fate/h264.mak -include $(SRC_PATH_BARE)/tests/fate/mp3.mak -include $(SRC_PATH_BARE)/tests/fate/vorbis.mak -include $(SRC_PATH_BARE)/tests/fate/vp8.mak - -FATE_ACODEC = $(ACODEC_TESTS:%=fate-acodec-%) -FATE_VSYNTH1 = $(VCODEC_TESTS:%=fate-vsynth1-%) -FATE_VSYNTH2 = $(VCODEC_TESTS:%=fate-vsynth2-%) -FATE_VCODEC = $(FATE_VSYNTH1) $(FATE_VSYNTH2) -FATE_LAVF = $(LAVF_TESTS:%=fate-lavf-%) -FATE_LAVFI = $(LAVFI_TESTS:%=fate-lavfi-%) -FATE_SEEK = $(SEEK_TESTS:seek_%=fate-seek-%) - -FATE = $(FATE_ACODEC) \ - $(FATE_VCODEC) \ - $(FATE_LAVF) \ - $(FATE_LAVFI) \ - $(FATE_SEEK) \ - -$(filter-out %-aref,$(FATE_ACODEC)): $(AREF) -$(filter-out %-vref,$(FATE_VCODEC)): $(VREF) -$(FATE_LAVF): $(REFS) -$(FATE_LAVFI): $(REFS) tools/lavfi-showfiltfmts$(EXESUF) -$(FATE_SEEK): fate-codec fate-lavf tests/seek_test$(EXESUF) - -$(FATE_ACODEC): CMD = codectest acodec -$(FATE_VSYNTH1): CMD = codectest vsynth1 -$(FATE_VSYNTH2): CMD = codectest vsynth2 -$(FATE_LAVF): CMD = lavftest -$(FATE_LAVFI): CMD = lavfitest -$(FATE_SEEK): CMD = seektest - -fate-codec: fate-acodec fate-vcodec -fate-acodec: $(FATE_ACODEC) -fate-vcodec: $(FATE_VCODEC) -fate-lavf: $(FATE_LAVF) -fate-lavfi: $(FATE_LAVFI) -fate-seek: $(FATE_SEEK) - -ifdef SAMPLES -FATE += $(FATE_TESTS) -fate-rsync: - rsync -vaLW rsync://fate-suite.libav.org/fate-suite/ $(SAMPLES) -else -fate-rsync: - @echo "use 'make fate-rsync SAMPLES=/path/to/samples' to sync the fate suite" -$(FATE_TESTS): - @echo "SAMPLES not specified, cannot run FATE. See doc/fate.txt for more information." -endif - -FATE_UTILS = base64 tiny_psnr +include $(SRC_PATH)/doc/Makefile +include $(SRC_PATH)/tests/Makefile -fate: $(FATE) +$(sort $(OBJDIRS)): + $(Q)mkdir -p $@ -$(FATE): ffmpeg$(EXESUF) $(FATE_UTILS:%=tests/%$(HOSTEXESUF)) - @echo "TEST $(@:fate-%=%)" - $(Q)$(SRC_PATH)/tests/fate-run.sh $@ "$(SAMPLES)" "$(TARGET_EXEC)" "$(TARGET_PATH)" '$(CMD)' '$(CMP)' '$(REF)' '$(FUZZ)' '$(THREADS)' '$(THREAD_TYPE)' +# Dummy rule to stop make trying to rebuild removed or renamed headers +%.h: + @: -fate-list: - @printf '%s\n' $(sort $(FATE)) +# Disable suffix rules. Most of the builtin rules are suffix rules, +# so this saves some time on slow systems. +.SUFFIXES: -.PHONY: all alltools *clean check config documentation examples install* -.PHONY: *test testprogs uninstall* +.PHONY: all all-yes alltools *clean config examples install* +.PHONY: testprogs uninstall* @@ -1 +1 @@ -0.7 +0.8.git diff --git a/cmdutils.c b/cmdutils.c index cd6d13346d..084c441fa4 100644 --- a/cmdutils.c +++ b/cmdutils.c @@ -49,13 +49,10 @@ #include <sys/resource.h> #endif -const char **opt_names; -const char **opt_values; -static int opt_name_count; AVCodecContext *avcodec_opts[AVMEDIA_TYPE_NB]; AVFormatContext *avformat_opts; struct SwsContext *sws_opts; -AVDictionary *format_opts, *video_opts, *audio_opts, *sub_opts; +AVDictionary *format_opts, *codec_opts; static const int this_year = 2011; @@ -63,7 +60,7 @@ void init_opts(void) { int i; for (i = 0; i < AVMEDIA_TYPE_NB; i++) - avcodec_opts[i] = avcodec_alloc_context2(i); + avcodec_opts[i] = avcodec_alloc_context3(NULL); avformat_opts = avformat_alloc_context(); #if CONFIG_SWSCALE sws_opts = sws_getContext(16, 16, 0, 16, 16, 0, SWS_BICUBIC, NULL, NULL, NULL); @@ -81,17 +78,8 @@ void uninit_opts(void) sws_freeContext(sws_opts); sws_opts = NULL; #endif - for (i = 0; i < opt_name_count; i++) { - av_freep(&opt_names[i]); - av_freep(&opt_values[i]); - } - av_freep(&opt_names); - av_freep(&opt_values); - opt_name_count = 0; av_dict_free(&format_opts); - av_dict_free(&video_opts); - av_dict_free(&audio_opts); - av_dict_free(&sub_opts); + av_dict_free(&codec_opts); } void log_callback_help(void* ptr, int level, const char* fmt, va_list vl) @@ -281,7 +269,7 @@ unknown_opt: *po->u.float_arg = parse_number_or_die(opt, arg, OPT_FLOAT, -INFINITY, INFINITY); } else if (po->u.func_arg) { if (po->u.func_arg(opt, arg) < 0) { - fprintf(stderr, "%s: failed to set value '%s' for option '%s'\n", argv[0], arg, opt); + fprintf(stderr, "%s: failed to set value '%s' for option '%s'\n", argv[0], arg ? arg : "[null]", opt); exit(1); } } @@ -297,20 +285,14 @@ unknown_opt: } #define FLAGS (o->type == FF_OPT_TYPE_FLAGS) ? AV_DICT_APPEND : 0 -#define SET_PREFIXED_OPTS(ch, flag, output) \ - if (opt[0] == ch && avcodec_opts[0] && (o = av_opt_find(avcodec_opts[0], opt+1, NULL, flag, 0)))\ - av_dict_set(&output, opt+1, arg, FLAGS); -static int opt_default2(const char *opt, const char *arg) +int opt_default(const char *opt, const char *arg) { const AVOption *o; - if ((o = av_opt_find(avcodec_opts[0], opt, NULL, 0, AV_OPT_SEARCH_CHILDREN))) { - if (o->flags & AV_OPT_FLAG_VIDEO_PARAM) - av_dict_set(&video_opts, opt, arg, FLAGS); - if (o->flags & AV_OPT_FLAG_AUDIO_PARAM) - av_dict_set(&audio_opts, opt, arg, FLAGS); - if (o->flags & AV_OPT_FLAG_SUBTITLE_PARAM) - av_dict_set(&sub_opts, opt, arg, FLAGS); - } else if ((o = av_opt_find(avformat_opts, opt, NULL, 0, AV_OPT_SEARCH_CHILDREN))) + if ((o = av_opt_find(avcodec_opts[0], opt, NULL, 0, AV_OPT_SEARCH_CHILDREN)) || + ((opt[0] == 'v' || opt[0] == 'a' || opt[0] == 's') && + (o = av_opt_find(avcodec_opts[0], opt+1, NULL, 0, 0)))) + av_dict_set(&codec_opts, opt, arg, FLAGS); + else if ((o = av_opt_find(avformat_opts, opt, NULL, 0, AV_OPT_SEARCH_CHILDREN))) av_dict_set(&format_opts, opt, arg, FLAGS); else if ((o = av_opt_find(sws_opts, opt, NULL, 0, AV_OPT_SEARCH_CHILDREN))) { // XXX we only support sws_flags, not arbitrary sws options @@ -321,93 +303,12 @@ static int opt_default2(const char *opt, const char *arg) } } - if (!o) { - SET_PREFIXED_OPTS('v', AV_OPT_FLAG_VIDEO_PARAM, video_opts) - SET_PREFIXED_OPTS('a', AV_OPT_FLAG_AUDIO_PARAM, audio_opts) - SET_PREFIXED_OPTS('s', AV_OPT_FLAG_SUBTITLE_PARAM, sub_opts) - } - if (o) return 0; fprintf(stderr, "Unrecognized option '%s'\n", opt); return AVERROR_OPTION_NOT_FOUND; } -int opt_default(const char *opt, const char *arg){ - int type; - int ret= 0; - const AVOption *o= NULL; - int opt_types[]={AV_OPT_FLAG_VIDEO_PARAM, AV_OPT_FLAG_AUDIO_PARAM, 0, AV_OPT_FLAG_SUBTITLE_PARAM, 0}; - AVCodec *p = NULL; - AVOutputFormat *oformat = NULL; - AVInputFormat *iformat = NULL; - - while ((p = av_codec_next(p))) { - const AVClass *c = p->priv_class; - if (c && av_find_opt(&c, opt, NULL, 0, 0)) - break; - } - if (p) - goto out; - while ((oformat = av_oformat_next(oformat))) { - const AVClass *c = oformat->priv_class; - if (c && av_find_opt(&c, opt, NULL, 0, 0)) - break; - } - if (oformat) - goto out; - while ((iformat = av_iformat_next(iformat))) { - const AVClass *c = iformat->priv_class; - if (c && av_find_opt(&c, opt, NULL, 0, 0)) - break; - } - if (iformat) - goto out; - - for(type=0; *avcodec_opts && type<AVMEDIA_TYPE_NB && ret>= 0; type++){ - const AVOption *o2 = av_opt_find(avcodec_opts[0], opt, NULL, opt_types[type], 0); - if(o2) - ret = av_set_string3(avcodec_opts[type], opt, arg, 1, &o); - } - if(!o && avformat_opts) - ret = av_set_string3(avformat_opts, opt, arg, 1, &o); - if(!o && sws_opts) - ret = av_set_string3(sws_opts, opt, arg, 1, &o); - if(!o){ - if (opt[0] == 'a' && avcodec_opts[AVMEDIA_TYPE_AUDIO]) - ret = av_set_string3(avcodec_opts[AVMEDIA_TYPE_AUDIO], opt+1, arg, 1, &o); - else if(opt[0] == 'v' && avcodec_opts[AVMEDIA_TYPE_VIDEO]) - ret = av_set_string3(avcodec_opts[AVMEDIA_TYPE_VIDEO], opt+1, arg, 1, &o); - else if(opt[0] == 's' && avcodec_opts[AVMEDIA_TYPE_SUBTITLE]) - ret = av_set_string3(avcodec_opts[AVMEDIA_TYPE_SUBTITLE], opt+1, arg, 1, &o); - if (ret >= 0) - opt += 1; - } - if (o && ret < 0) { - fprintf(stderr, "Invalid value '%s' for option '%s'\n", arg, opt); - exit(1); - } - if (!o) { - fprintf(stderr, "Unrecognized option '%s'\n", opt); - exit(1); - } - - out: - if ((ret = opt_default2(opt, arg)) < 0) - return ret; - -// av_log(NULL, AV_LOG_ERROR, "%s:%s: %f 0x%0X\n", opt, arg, av_get_double(avcodec_opts, opt, NULL), (int)av_get_int(avcodec_opts, opt, NULL)); - - opt_values= av_realloc(opt_values, sizeof(void*)*(opt_name_count+1)); - opt_values[opt_name_count] = av_strdup(arg); - opt_names= av_realloc(opt_names, sizeof(void*)*(opt_name_count+1)); - opt_names[opt_name_count++] = av_strdup(opt); - - if ((*avcodec_opts && avcodec_opts[0]->debug) || (avformat_opts && avformat_opts->debug)) - av_log_set_level(AV_LOG_DEBUG); - return 0; -} - int opt_loglevel(const char *opt, const char *arg) { const struct { const char *name; int level; } log_levels[] = { @@ -456,59 +357,6 @@ int opt_timelimit(const char *opt, const char *arg) return 0; } -static void *alloc_priv_context(int size, const AVClass *class) -{ - void *p = av_mallocz(size); - if (p) { - *(const AVClass **)p = class; - av_opt_set_defaults(p); - } - return p; -} - -void set_context_opts(void *ctx, void *opts_ctx, int flags, AVCodec *codec) -{ - int i; - void *priv_ctx=NULL; - if(!strcmp("AVCodecContext", (*(AVClass**)ctx)->class_name)){ - AVCodecContext *avctx= ctx; - if(codec && codec->priv_class){ - if(!avctx->priv_data && codec->priv_data_size) - avctx->priv_data= alloc_priv_context(codec->priv_data_size, codec->priv_class); - priv_ctx= avctx->priv_data; - } - } else if (!strcmp("AVFormatContext", (*(AVClass**)ctx)->class_name)) { - AVFormatContext *avctx = ctx; - if (avctx->oformat && avctx->oformat->priv_class) { - priv_ctx = avctx->priv_data; - } else if (avctx->iformat && avctx->iformat->priv_class) { - priv_ctx = avctx->priv_data; - } - } - - for(i=0; i<opt_name_count; i++){ - char buf[256]; - const AVOption *opt; - const char *str; - if (priv_ctx) { - if (av_find_opt(priv_ctx, opt_names[i], NULL, flags, flags)) { - if (av_set_string3(priv_ctx, opt_names[i], opt_values[i], 1, NULL) < 0) { - fprintf(stderr, "Invalid value '%s' for option '%s'\n", - opt_names[i], opt_values[i]); - exit(1); - } - } else - goto global; - } else { - global: - str = av_get_string(opts_ctx, opt_names[i], &opt, buf, sizeof(buf)); - /* if an option with name opt_names[i] is present in opts_ctx then str is non-NULL */ - if (str && ((opt->flags & flags) == flags)) - av_set_string3(ctx, opt_names[i], str, 1, NULL); - } - } -} - void print_error(const char *filename, int err) { char errbuf[128]; @@ -574,12 +422,13 @@ void show_banner(void) print_all_libs_info(stderr, INDENT|SHOW_VERSION); } -void show_version(void) { +int opt_version(const char *opt, const char *arg) { printf("%s " FFMPEG_VERSION "\n", program_name); print_all_libs_info(stdout, SHOW_VERSION); + return 0; } -void show_license(void) +int opt_license(const char *opt, const char *arg) { printf( #if CONFIG_NONFREE @@ -646,9 +495,10 @@ void show_license(void) program_name, program_name, program_name #endif ); + return 0; } -void show_formats(void) +int opt_formats(const char *opt, const char *arg) { AVInputFormat *ifmt=NULL; AVOutputFormat *ofmt=NULL; @@ -695,9 +545,10 @@ void show_formats(void) name, long_name ? long_name:" "); } + return 0; } -void show_codecs(void) +int opt_codecs(const char *opt, const char *arg) { AVCodec *p=NULL, *p2; const char *last_name; @@ -771,9 +622,10 @@ void show_codecs(void) "even though both encoding and decoding are supported. For example, the h263\n" "decoder corresponds to the h263 and h263p encoders, for file formats it is even\n" "worse.\n"); + return 0; } -void show_bsfs(void) +int opt_bsfs(const char *opt, const char *arg) { AVBitStreamFilter *bsf=NULL; @@ -781,9 +633,10 @@ void show_bsfs(void) while((bsf = av_bitstream_filter_next(bsf))) printf("%s\n", bsf->name); printf("\n"); + return 0; } -void show_protocols(void) +int opt_protocols(const char *opt, const char *arg) { URLProtocol *up=NULL; @@ -799,9 +652,10 @@ void show_protocols(void) up->url_write ? 'O' : '.', up->url_seek ? 'S' : '.', up->name); + return 0; } -void show_filters(void) +int opt_filters(const char *opt, const char *arg) { AVFilter av_unused(**filter) = NULL; @@ -810,9 +664,10 @@ void show_filters(void) while ((filter = av_filter_next(filter)) && *filter) printf("%-16s %s\n", (*filter)->name, (*filter)->description); #endif + return 0; } -void show_pix_fmts(void) +int opt_pix_fmts(const char *opt, const char *arg) { enum PixelFormat pix_fmt; @@ -843,6 +698,7 @@ void show_pix_fmts(void) pix_desc->nb_components, av_get_bits_per_pixel(pix_desc)); } + return 0; } int read_yesno(void) @@ -926,3 +782,48 @@ FILE *get_preset_file(char *filename, size_t filename_size, return f; } + +AVDictionary *filter_codec_opts(AVDictionary *opts, enum CodecID codec_id, int encoder) +{ + AVDictionary *ret = NULL; + AVDictionaryEntry *t = NULL; + AVCodec *codec = encoder ? avcodec_find_encoder(codec_id) : avcodec_find_decoder(codec_id); + int flags = encoder ? AV_OPT_FLAG_ENCODING_PARAM : AV_OPT_FLAG_DECODING_PARAM; + char prefix = 0; + + if (!codec) + return NULL; + + switch (codec->type) { + case AVMEDIA_TYPE_VIDEO: prefix = 'v'; flags |= AV_OPT_FLAG_VIDEO_PARAM; break; + case AVMEDIA_TYPE_AUDIO: prefix = 'a'; flags |= AV_OPT_FLAG_AUDIO_PARAM; break; + case AVMEDIA_TYPE_SUBTITLE: prefix = 's'; flags |= AV_OPT_FLAG_SUBTITLE_PARAM; break; + } + + while (t = av_dict_get(opts, "", t, AV_DICT_IGNORE_SUFFIX)) { + if (av_opt_find(avcodec_opts[0], t->key, NULL, flags, 0) || + (codec && codec->priv_class && av_opt_find(&codec->priv_class, t->key, NULL, flags, 0))) + av_dict_set(&ret, t->key, t->value, 0); + else if (t->key[0] == prefix && av_opt_find(avcodec_opts[0], t->key+1, NULL, flags, 0)) + av_dict_set(&ret, t->key+1, t->value, 0); + } + return ret; +} + +AVDictionary **setup_find_stream_info_opts(AVFormatContext *s, AVDictionary *codec_opts) +{ + int i; + AVDictionary **opts; + + if (!s->nb_streams) + return NULL; + opts = av_mallocz(s->nb_streams * sizeof(*opts)); + if (!opts) { + av_log(NULL, AV_LOG_ERROR, "Could not alloc memory for stream options.\n"); + return NULL; + } + for (i = 0; i < s->nb_streams; i++) + opts[i] = filter_codec_opts(codec_opts, s->streams[i]->codec->codec_id, 0); + return opts; +} + diff --git a/cmdutils.h b/cmdutils.h index e001ab9201..8348bf266a 100644 --- a/cmdutils.h +++ b/cmdutils.h @@ -43,11 +43,10 @@ extern const char program_name[]; */ extern const int program_birth_year; -extern const char **opt_names; extern AVCodecContext *avcodec_opts[AVMEDIA_TYPE_NB]; extern AVFormatContext *avformat_opts; extern struct SwsContext *sws_opts; -extern AVDictionary *format_opts, *video_opts, *audio_opts, *sub_opts; +extern AVDictionary *format_opts, *codec_opts; /** * Initialize the cmdutils option system, in particular @@ -62,7 +61,7 @@ void uninit_opts(void); /** * Trivial log callback. - * Only suitable for show_help and similar since it lacks prefix handling. + * Only suitable for opt_help and similar since it lacks prefix handling. */ void log_callback_help(void* ptr, int level, const char* fmt, va_list vl); @@ -153,7 +152,29 @@ void show_help_options(const OptionDef *options, const char *msg, int mask, int void parse_options(int argc, char **argv, const OptionDef *options, int (* parse_arg_function)(const char *opt, const char *arg)); -void set_context_opts(void *ctx, void *opts_ctx, int flags, AVCodec *codec); +/** + * Filter out options for given codec. + * + * Create a new options dictionary containing only the options from + * opts which apply to the codec with ID codec_id. + * + * @param encoder if non-zero the codec is an encoder, otherwise is a decoder + * @return a pointer to the created dictionary + */ +AVDictionary *filter_codec_opts(AVDictionary *opts, enum CodecID codec_id, int encoder); + +/** + * Setup AVCodecContext options for avformat_find_stream_info(). + * + * Create an array of dictionaries, one dictionary for each stream + * contained in s. + * Each dictionary will contain the options from codec_opts which can + * be applied to the corresponding stream codec context. + * + * @return pointer to the created array of dictionaries, NULL if it + * cannot be created + */ +AVDictionary **setup_find_stream_info_opts(AVFormatContext *s, AVDictionary *codec_opts); /** * Print an error message to stderr, indicating filename and a human @@ -177,50 +198,58 @@ void show_banner(void); * Print the version of the program to stdout. The version message * depends on the current versions of the repository and of the libav* * libraries. + * This option processing function does not utilize the arguments. */ -void show_version(void); +int opt_version(const char *opt, const char *arg); /** * Print the license of the program to stdout. The license depends on * the license of the libraries compiled into the program. + * This option processing function does not utilize the arguments. */ -void show_license(void); +int opt_license(const char *opt, const char *arg); /** * Print a listing containing all the formats supported by the * program. + * This option processing function does not utilize the arguments. */ -void show_formats(void); +int opt_formats(const char *opt, const char *arg); /** * Print a listing containing all the codecs supported by the * program. + * This option processing function does not utilize the arguments. */ -void show_codecs(void); +int opt_codecs(const char *opt, const char *arg); /** * Print a listing containing all the filters supported by the * program. + * This option processing function does not utilize the arguments. */ -void show_filters(void); +int opt_filters(const char *opt, const char *arg); /** * Print a listing containing all the bit stream filters supported by the * program. + * This option processing function does not utilize the arguments. */ -void show_bsfs(void); +int opt_bsfs(const char *opt, const char *arg); /** * Print a listing containing all the protocols supported by the * program. + * This option processing function does not utilize the arguments. */ -void show_protocols(void); +int opt_protocols(const char *opt, const char *arg); /** * Print a listing containing all the pixel formats supported by the * program. + * This option processing function does not utilize the arguments. */ -void show_pix_fmts(void); +int opt_pix_fmts(const char *opt, const char *arg); /** * Return a positive value if a line read from standard input diff --git a/cmdutils_common_opts.h b/cmdutils_common_opts.h index 9b5e5d22cd..8e680490fe 100644 --- a/cmdutils_common_opts.h +++ b/cmdutils_common_opts.h @@ -1,13 +1,13 @@ - { "L", OPT_EXIT, {(void*)show_license}, "show license" }, - { "h", OPT_EXIT, {(void*)show_help}, "show help" }, - { "?", OPT_EXIT, {(void*)show_help}, "show help" }, - { "help", OPT_EXIT, {(void*)show_help}, "show help" }, - { "-help", OPT_EXIT, {(void*)show_help}, "show help" }, - { "version", OPT_EXIT, {(void*)show_version}, "show version" }, - { "formats" , OPT_EXIT, {(void*)show_formats }, "show available formats" }, - { "codecs" , OPT_EXIT, {(void*)show_codecs }, "show available codecs" }, - { "bsfs" , OPT_EXIT, {(void*)show_bsfs }, "show available bit stream filters" }, - { "protocols", OPT_EXIT, {(void*)show_protocols}, "show available protocols" }, - { "filters", OPT_EXIT, {(void*)show_filters }, "show available filters" }, - { "pix_fmts" , OPT_EXIT, {(void*)show_pix_fmts }, "show available pixel formats" }, + { "L", OPT_EXIT, {(void*)opt_license}, "show license" }, + { "h", OPT_EXIT, {(void*)opt_help}, "show help" }, + { "?", OPT_EXIT, {(void*)opt_help}, "show help" }, + { "help", OPT_EXIT, {(void*)opt_help}, "show help" }, + { "-help", OPT_EXIT, {(void*)opt_help}, "show help" }, + { "version", OPT_EXIT, {(void*)opt_version}, "show version" }, + { "formats" , OPT_EXIT, {(void*)opt_formats }, "show available formats" }, + { "codecs" , OPT_EXIT, {(void*)opt_codecs }, "show available codecs" }, + { "bsfs" , OPT_EXIT, {(void*)opt_bsfs }, "show available bit stream filters" }, + { "protocols", OPT_EXIT, {(void*)opt_protocols}, "show available protocols" }, + { "filters", OPT_EXIT, {(void*)opt_filters }, "show available filters" }, + { "pix_fmts" , OPT_EXIT, {(void*)opt_pix_fmts }, "show available pixel formats" }, { "loglevel", HAS_ARG, {(void*)opt_loglevel}, "set libav* logging level", "loglevel" }, diff --git a/common.mak b/common.mak index a293987272..b1f62d90f7 100644 --- a/common.mak +++ b/common.mak @@ -20,20 +20,30 @@ $(foreach VAR,$(SILENT),$(eval override $(VAR) = @$($(VAR)))) $(eval INSTALL = @$(call ECHO,INSTALL,$$(^:$(SRC_DIR)/%=%)); $(INSTALL)) endif -IFLAGS := -I. -I$(SRC_PATH) -CPPFLAGS := $(IFLAGS) $(CPPFLAGS) -CFLAGS += $(ECFLAGS) -YASMFLAGS += $(IFLAGS) -Pconfig.asm - +ALLFFLIBS = avcodec avdevice avfilter avformat avutil postproc swscale + +# NASM requires -I path terminated with / +IFLAGS := -I. -I$(SRC_PATH)/ +CPPFLAGS := $(IFLAGS) $(CPPFLAGS) +CFLAGS += $(ECFLAGS) +CCFLAGS = $(CFLAGS) +YASMFLAGS += $(IFLAGS) -Pconfig.asm HOSTCFLAGS += $(IFLAGS) +LDFLAGS := $(ALLFFLIBS:%=-Llib%) $(LDFLAGS) + +define COMPILE + $($(1)DEP) + $($(1)) $(CPPFLAGS) $($(1)FLAGS) $($(1)_DEPFLAGS) -c $($(1)_O) $< +endef + +COMPILE_C = $(call COMPILE,CC) +COMPILE_S = $(call COMPILE,AS) %.o: %.c - $(CCDEP) - $(CC) $(CPPFLAGS) $(CFLAGS) $(CC_DEPFLAGS) -c $(CC_O) $< + $(COMPILE_C) %.o: %.S - $(ASDEP) - $(AS) $(CPPFLAGS) $(ASFLAGS) $(AS_DEPFLAGS) -c -o $@ $< + $(COMPILE_S) %.ho: %.h $(CC) $(CPPFLAGS) $(CFLAGS) -Wno-unused -c -o $@ -x c $< @@ -61,29 +71,40 @@ OBJS += $(OBJS-yes) FFLIBS := $(FFLIBS-yes) $(FFLIBS) TESTPROGS += $(TESTPROGS-yes) -FFEXTRALIBS := $(addprefix -l,$(addsuffix $(BUILDSUF),$(FFLIBS))) $(EXTRALIBS) -FFLDFLAGS := $(addprefix -Llib,$(ALLFFLIBS)) $(LDFLAGS) +FFEXTRALIBS := $(FFLIBS:%=-l%$(BUILDSUF)) $(EXTRALIBS) -EXAMPLES := $(addprefix $(SUBDIR),$(addsuffix -example$(EXESUF),$(EXAMPLES))) -OBJS := $(addprefix $(SUBDIR),$(sort $(OBJS))) -TESTOBJS := $(addprefix $(SUBDIR),$(TESTOBJS) $(TESTPROGS:%=%-test.o)) -TESTPROGS := $(addprefix $(SUBDIR),$(addsuffix -test$(EXESUF),$(TESTPROGS))) -HOSTOBJS := $(addprefix $(SUBDIR),$(addsuffix .o,$(HOSTPROGS))) -HOSTPROGS := $(addprefix $(SUBDIR),$(addsuffix $(HOSTEXESUF),$(HOSTPROGS))) +EXAMPLES := $(EXAMPLES:%=$(SUBDIR)%-example$(EXESUF)) +OBJS := $(sort $(OBJS:%=$(SUBDIR)%)) +TESTOBJS := $(TESTOBJS:%=$(SUBDIR)%) $(TESTPROGS:%=$(SUBDIR)%-test.o) +TESTPROGS := $(TESTPROGS:%=$(SUBDIR)%-test$(EXESUF)) +HOSTOBJS := $(HOSTPROGS:%=$(SUBDIR)%.o) +HOSTPROGS := $(HOSTPROGS:%=$(SUBDIR)%$(HOSTEXESUF)) +TOOLS += $(TOOLS-yes) +TOOLOBJS := $(TOOLS:%=tools/%.o) +TOOLS := $(TOOLS:%=tools/%$(EXESUF)) DEP_LIBS := $(foreach NAME,$(FFLIBS),lib$(NAME)/$($(CONFIG_SHARED:yes=S)LIBNAME)) ALLHEADERS := $(subst $(SRC_DIR)/,$(SUBDIR),$(wildcard $(SRC_DIR)/*.h $(SRC_DIR)/$(ARCH)/*.h)) -SKIPHEADERS += $(addprefix $(ARCH)/,$(ARCH_HEADERS)) -SKIPHEADERS := $(addprefix $(SUBDIR),$(SKIPHEADERS-) $(SKIPHEADERS)) +SKIPHEADERS += $(ARCH_HEADERS:%=$(ARCH)/%) $(SKIPHEADERS-) +SKIPHEADERS := $(SKIPHEADERS:%=$(SUBDIR)%) checkheaders: $(filter-out $(SKIPHEADERS:.h=.ho),$(ALLHEADERS:.h=.ho)) +alltools: $(TOOLS) + $(HOSTOBJS): %.o: %.c $(HOSTCC) $(HOSTCFLAGS) -c -o $@ $< $(HOSTPROGS): %$(HOSTEXESUF): %.o $(HOSTCC) $(HOSTLDFLAGS) -o $@ $< $(HOSTLIBS) +$(OBJS): | $(sort $(dir $(OBJS))) +$(HOSTOBJS): | $(sort $(dir $(HOSTOBJS))) +$(TESTOBJS): | $(sort $(dir $(TESTOBJS))) +$(TOOLOBJS): | tools + +OBJDIRS := $(OBJDIRS) $(dir $(OBJS) $(HOSTOBJS) $(TESTOBJS)) + CLEANSUFFIXES = *.d *.o *~ *.ho *.map *.ver DISTCLEANSUFFIXES = *.pc LIBSUFFIXES = *.a *.lib *.so *.so.* *.dylib *.dll *.def *.dll.a *.exp @@ -106,8 +106,8 @@ Configuration options: --disable-lpc disable LPC code --disable-mdct disable MDCT code --disable-rdft disable RDFT code - --enable-vaapi enable VAAPI code - --enable-vdpau enable VDPAU code + --enable-vaapi enable VAAPI code [autodetect] + --enable-vdpau enable VDPAU code [autodetect] --disable-dxva2 disable DXVA2 code --enable-runtime-cpudetect detect cpu capabilities at runtime (bigger binary) --enable-hardcoded-tables use hardcoded tables instead of runtime generation @@ -188,6 +188,7 @@ External library support: --enable-libxavs enable AVS encoding via xavs [no] --enable-libxvid enable Xvid encoding via xvidcore, native MPEG-4/Xvid encoder exists [no] + --enable-openal enable OpenAL 1.1 capture support [no] --enable-mlib enable Sun medialib [no] --enable-zlib enable zlib [autodetect] @@ -238,6 +239,7 @@ Advanced options (experts only): --malloc-prefix=PFX prefix malloc and related names with PFX --enable-sram allow use of on-chip SRAM --disable-symver disable symbol versioning + --optflags override optimization-related compiler flags Developer options (useful when working on FFmpeg itself): --disable-debug disable debugging symbols @@ -876,9 +878,9 @@ apply(){ } cp_if_changed(){ - cmp -s "$1" "$2" && - echo "$2 is unchanged" || - cp -f "$1" "$2" + cmp -s "$1" "$2" && echo "$2 is unchanged" && return + mkdir -p "$(dirname $2)" + cp -f "$1" "$2" } # CONFIG_LIST contains configurable options, while HAVE_LIST is for @@ -958,6 +960,7 @@ CONFIG_LIST=" mpegaudiodsp network nonfree + openal pic postproc rdft @@ -970,6 +973,7 @@ CONFIG_LIST=" static swscale swscale_alpha + thumb vaapi vdpau version3 @@ -1094,7 +1098,6 @@ HAVE_LIST=" memalign mkstemp mmap - pld posix_memalign round roundf @@ -1107,6 +1110,7 @@ HAVE_LIST=" poll_h setrlimit strerror_r + strptime strtok_r struct_addrinfo struct_ipv6_mreq @@ -1121,7 +1125,6 @@ HAVE_LIST=" sys_select_h sys_soundcard_h sys_videoio_h - ten_operands termios_h threads trunc @@ -1187,6 +1190,7 @@ CMDLINE_SET=" logfile malloc_prefix nm + optflags pkg_config samples strip @@ -1276,6 +1280,7 @@ flac_encoder_select="golomb lpc" flashsv_decoder_select="zlib" flashsv_encoder_select="zlib" flashsv2_encoder_select="zlib" +flashsv2_decoder_select="zlib" flv_decoder_select="h263_decoder" flv_encoder_select="h263_encoder" fraps_decoder_select="huffman" @@ -1468,6 +1473,7 @@ dv1394_indev_deps="dv1394 dv_demuxer" fbdev_indev_deps="linux_fb_h" jack_indev_deps="jack_jack_h sem_timedwait" libdc1394_indev_deps="libdc1394" +openal_indev_deps="openal" oss_indev_deps_any="soundcard_h sys_soundcard_h" oss_outdev_deps_any="soundcard_h sys_soundcard_h" sdl_outdev_deps="sdl" @@ -1493,6 +1499,7 @@ udp_protocol_deps="network" # filters blackframe_filter_deps="gpl" +boxblur_filter_deps="gpl" cropdetect_filter_deps="gpl" drawtext_filter_deps="libfreetype" frei0r_filter_deps="frei0r dlopen strtok_r" @@ -1512,9 +1519,9 @@ postproc_deps="gpl" # programs ffmpeg_deps="avcodec avformat swscale" -ffmpeg_select="buffer_filter" +ffmpeg_select="buffer_filter buffersink_filter" ffplay_deps="avcodec avformat swscale sdl" -ffplay_select="rdft" +ffplay_select="buffersink_filter rdft" ffprobe_deps="avcodec avformat" ffserver_deps="avformat ffm_muxer fork rtp_protocol rtsp_demuxer" ffserver_extralibs='$ldl' @@ -1536,11 +1543,6 @@ test_deps(){ done } -set_ne_test_deps(){ - eval ${1}_be_test_deps="bigendian" - eval ${1}_le_test_deps="!bigendian" -} - test_deps _encoder _decoder \ adpcm_g726=g726 \ adpcm_ima_qt \ @@ -1603,7 +1605,7 @@ test_deps _muxer _demuxer \ mmf \ mov \ pcm_mulaw=mulaw \ - mxf \ + mxf="mxf mxf_d10" \ nut \ ogg \ rawvideo=pixfmt \ @@ -1617,15 +1619,6 @@ test_deps _muxer _demuxer \ ac3_fixed_test_deps="ac3_fixed_encoder ac3_decoder rm_muxer rm_demuxer" mpg_test_deps="mpeg1system_muxer mpegps_demuxer" -set_ne_test_deps pixdesc -set_ne_test_deps pixfmts_copy -set_ne_test_deps pixfmts_crop -set_ne_test_deps pixfmts_hflip -set_ne_test_deps pixfmts_null -set_ne_test_deps pixfmts_pad -set_ne_test_deps pixfmts_scale -set_ne_test_deps pixfmts_vflip - # default parameters logfile="config.log" @@ -1700,7 +1693,10 @@ SLIBNAME='$(SLIBPREF)$(FULLNAME)$(SLIBSUF)' SLIBNAME_WITH_VERSION='$(SLIBNAME).$(LIBVERSION)' SLIBNAME_WITH_MAJOR='$(SLIBNAME).$(LIBMAJOR)' LIB_INSTALL_EXTRA_CMD='$$(RANLIB) "$(LIBDIR)/$(LIBNAME)"' +SLIB_INSTALL_NAME='$(SLIBNAME_WITH_VERSION)' +SLIB_INSTALL_LINKS='$(SLIBNAME_WITH_MAJOR) $(SLIBNAME)' +AS_O='-o $@' CC_O='-o $@' host_cflags='-D_ISOC99_SOURCE -O3 -g' @@ -1715,11 +1711,9 @@ DEPFLAGS='$(CPPFLAGS) $(CFLAGS) -MM' # find source path if test -f configure; then - source_path="$(pwd)" - disable source_path_used + source_path=. else source_path=$(cd $(dirname "$0"); pwd) - enable source_path_used echo "$source_path" | grep -q '[[:blank:]]' && die "Out of tree builds are impossible with whitespace in source path." test -e "$source_path/config.h" && @@ -2056,6 +2050,7 @@ elif $cc -V 2>&1 | grep -q Sun; then cc_ident=$($cc -V 2>&1 | head -n1 | cut -d' ' -f 2-) DEPEND_CMD='$(DEPCC) $(DEPFLAGS) $< | sed -e "1s,^.*: ,$@: ," -e "\$$!s,\$$, \\\," -e "1!s,^.*: , ," > $(@:.o=.d)' DEPFLAGS='$(CPPFLAGS) $(CFLAGS) -xM1' + add_ldflags -xc99 speed_cflags='-O5' size_cflags='-O5 -xspace' filter_cflags=suncc_flags @@ -2146,7 +2141,7 @@ if test "$cpu" = host; then gcc|llvm_gcc) check_native(){ $cc $1=native -v -c -o $TMPO $TMPC >$TMPE 2>&1 || return - sed -n "/$1=/{ + sed -n "/cc1.*$1=/{ s/.*$1=\\([^ ]*\\).*/\\1/ p q @@ -2261,7 +2256,7 @@ elif enabled x86; then disable cmov ;; # targets that do support conditional mov (cmov) - i686|pentiumpro|pentium[23]|pentium-m|athlon|athlon-tbird|athlon-4|athlon-[mx]p|athlon64|k8|opteron|athlon-fx|core2|amdfam10|barcelona|atom) + i686|pentiumpro|pentium[23]|pentium-m|athlon|athlon-tbird|athlon-4|athlon-[mx]p|athlon64*|k8*|opteron*|athlon-fx|core2|amdfam10|barcelona|atom) cpuflags="-march=$cpu" enable cmov enable fast_cmov @@ -2298,7 +2293,7 @@ elif enabled arm; then case $cpu in cortex-a*) subarch=armv7a ;; cortex-r*) subarch=armv7r ;; - cortex-m*) subarch=armv7m ;; + cortex-m*) enable thumb; subarch=armv7m ;; arm11*) subarch=armv6 ;; arm[79]*e*|arm9[24]6*|arm96*|arm102[26]) subarch=armv5te ;; armv4*|arm7*|arm9[24]*) subarch=armv4 ;; @@ -2466,12 +2461,11 @@ case $target_os in SLIBSUF=".dll" SLIBNAME_WITH_VERSION='$(SLIBPREF)$(FULLNAME)-$(LIBVERSION)$(SLIBSUF)' SLIBNAME_WITH_MAJOR='$(SLIBPREF)$(FULLNAME)-$(LIBMAJOR)$(SLIBSUF)' - SLIB_EXTRA_CMD='-lib.exe /machine:$(LIBTARGET) /def:$$(@:$(SLIBSUF)=.def) /out:$(SUBDIR)$(SLIBNAME_WITH_MAJOR:$(SLIBSUF)=.lib)' - SLIB_INSTALL_EXTRA_CMD='-install -m 644 $(SUBDIR)$(SLIBNAME_WITH_MAJOR:$(SLIBSUF)=.lib) "$(SHLIBDIR)/$(SLIBNAME:$(SLIBSUF)=.lib)"; \ - install -m 644 $(SUBDIR)$(SLIBNAME_WITH_MAJOR:$(SLIBSUF)=.lib) "$(SHLIBDIR)/$(SLIBNAME_WITH_MAJOR:$(SLIBSUF)=.lib)"; \ - install -d "$(LIBDIR)"; \ - install -m 644 $(SUBDIR)lib$(SLIBNAME:$(SLIBSUF)=.dll.a) "$(LIBDIR)/lib$(SLIBNAME:$(SLIBSUF)=.dll.a)"' - SLIB_UNINSTALL_EXTRA_CMD='rm -f "$(SHLIBDIR)/$(SLIBNAME:$(SLIBSUF)=.lib)"' + SLIB_EXTRA_CMD='-lib.exe /machine:$(LIBTARGET) /def:$$(@:$(SLIBSUF)=.def) /out:$(SUBDIR)$(SLIBNAME:$(SLIBSUF)=.lib)' + SLIB_INSTALL_NAME='$(SLIBNAME_WITH_MAJOR)' + SLIB_INSTALL_LINKS= + SLIB_INSTALL_EXTRA_SHLIB='$(SLIBNAME:$(SLIBSUF)=.lib)' + SLIB_INSTALL_EXTRA_LIB='lib$(SLIBNAME:$(SLIBSUF)=.dll.a) $(SLIBNAME_WITH_MAJOR:$(SLIBSUF)=.def)' SHFLAGS='-shared -Wl,--output-def,$$(@:$(SLIBSUF)=.def) -Wl,--out-implib,$(SUBDIR)lib$(SLIBNAME:$(SLIBSUF)=.dll.a) -Wl,--enable-runtime-pseudo-reloc -Wl,--enable-auto-image-base' objformat="win32" enable dos_paths @@ -2529,8 +2523,7 @@ case $target_os in emxexp -o $(OBJS) >> $(SUBDIR)$(NAME).def' SLIB_EXTRA_CMD='emximp -o $(SUBDIR)$(LIBPREF)$(NAME)_dll.a $(SUBDIR)$(NAME).def; \ emximp -o $(SUBDIR)$(LIBPREF)$(NAME)_dll.lib $(SUBDIR)$(NAME).def;' - SLIB_INSTALL_EXTRA_CMD='install -m 644 $(SUBDIR)$(LIBPREF)$(NAME)_dll.a $(SUBDIR)$(LIBPREF)$(NAME)_dll.lib "$(LIBDIR)"' - SLIB_UNINSTALL_EXTRA_CMD='rm -f "$(LIBDIR)"/$(LIBPREF)$(NAME)_dll.a "$(LIBDIR)"/$(LIBPREF)$(NAME)_dll.lib' + SLIB_INSTALL_EXTRA_LIB='$(LIBPREF)$(NAME)_dll.a $(LIBPREF)$(NAME)_dll.lib' enable dos_paths ;; gnu/kfreebsd) @@ -2545,7 +2538,14 @@ case $target_os in symbian) SLIBSUF=".dll" enable dos_paths - add_cflags --include=$sysinclude/gcce/gcce.h + add_cflags --include=$sysinclude/gcce/gcce.h -fvisibility=default + add_cppflags -D__GCCE__ -D__SYMBIAN32__ -DSYMBIAN_OE_POSIX_SIGNALS + add_ldflags -Wl,--target1-abs,--no-undefined \ + -Wl,-Ttext,0x80000,-Tdata,0x1000000 -shared \ + -Wl,--entry=_E32Startup -Wl,-u,_E32Startup + add_extralibs -l:eexe.lib -l:usrt2_2.lib -l:dfpaeabi.dso \ + -l:drtaeabi.dso -l:scppnwdl.dso -lsupc++ -lgcc \ + -l:libc.dso -l:libm.dso -l:euser.dso -l:libcrt0.lib ;; none) ;; @@ -2571,8 +2571,6 @@ EOF exit 1; fi -disabled static && LIBNAME="" - die_license_disabled() { enabled $1 || { enabled $2 && die "$2 is $1 and --enable-$1 is not specified."; } } @@ -2638,7 +2636,7 @@ if enabled alpha; then elif enabled arm; then - check_cflags -marm + enabled thumb && check_cflags -mthumb || check_cflags -marm nogas=die if check_cpp_condition stddef.h "defined __ARM_PCS_VFP"; then @@ -2654,9 +2652,6 @@ EOF warn "Compiler does not indicate floating-point ABI, guessing $fpabi." fi - # We have to check if pld is a nop and disable it. - check_asm pld '"pld [r0]"' - enabled armv5te && check_asm armv5te '"qadd r0, r0, r0"' enabled armv6 && check_asm armv6 '"sadd16 r0, r0, r0"' enabled armv6t2 && check_asm armv6t2 '"movt r0, #0"' @@ -2733,18 +2728,6 @@ EOF # check whether xmm clobbers are supported check_asm xmm_clobbers '"":::"%xmm0"' - # check whether more than 10 operands are supported - check_cc <<EOF && enable ten_operands -int main(void) { - int x=0; - __asm__ volatile( - "" - :"+&rm"(x), "+&rm"(x), "+&rm"(x), "+&rm"(x), "+&rm"(x), "+&rm"(x) - ); - return 0; -} -EOF - # check whether binutils is new enough to compile SSSE3/MMX2 enabled ssse3 && check_asm ssse3 '"pabsw %xmm0, %xmm0"' enabled mmx2 && check_asm mmx2 '"pmaxub %mm0, %mm1"' @@ -2839,6 +2822,7 @@ check_func mmap check_func ${malloc_prefix}posix_memalign && enable posix_memalign check_func setrlimit check_func strerror_r +check_func strptime check_func strtok_r check_func_headers conio.h kbhit check_func_headers io.h setmode @@ -2944,6 +2928,11 @@ enabled libx264 && require libx264 x264.h x264_encoder_encode -lx264 && die "ERROR: libx264 version must be >= 0.115."; } enabled libxavs && require libxavs xavs.h xavs_encoder_encode -lxavs enabled libxvid && require libxvid xvid.h xvid_global -lxvidcore +enabled openal && { { for al_libs in "${OPENAL_LIBS}" "-lopenal" "-lOpenAL32"; do + check_lib 'AL/al.h' alGetError "${al_libs}" && break; done } || + die "ERROR: openal not found"; } && + { check_cpp_condition "AL/al.h" "defined(AL_VERSION_1_1)" || + die "ERROR: openal version must be 1.1 or compatible"; } enabled mlib && require mediaLib mlib_types.h mlib_VectorSub_S16_U8_Mod -lmlib SDL_CONFIG="${cross_prefix}sdl-config" @@ -3032,6 +3021,7 @@ check_cflags -Wdeclaration-after-statement check_cflags -Wall check_cflags -Wno-parentheses check_cflags -Wno-switch +check_cflags -Wno-format-zero-length check_cflags -Wdisabled-optimization check_cflags -Wpointer-arith check_cflags -Wredundant-decls @@ -3046,7 +3036,7 @@ enabled extra_warnings && check_cflags -Winline # add some linker flags check_ldflags -Wl,--warn-common -check_ldflags -Wl,-rpath-link,libpostproc -Wl,-rpath-link,libswscale -Wl,-rpath-link,libavfilter -Wl,-rpath-link,libavdevice -Wl,-rpath-link,libavformat -Wl,-rpath-link,libavcodec -Wl,-rpath-link,libavutil +check_ldflags -Wl,-rpath-link=libpostproc:libswscale:libavfilter:libavdevice:libavformat:libavcodec:libavutil test_ldflags -Wl,-Bsymbolic && append SHFLAGS -Wl,-Bsymbolic echo "X{};" > $TMPV @@ -3062,9 +3052,10 @@ void ff_foo(void) {} EOF fi -if enabled small; then +if [ -n "$optflags" ]; then + add_cflags $optflags +elif enabled small; then add_cflags $size_cflags - optimizations="small" elif enabled optimizations; then add_cflags $speed_cflags else @@ -3118,6 +3109,8 @@ elif enabled armcc; then add_cflags -W${armcc_opt},--diag_suppress=1207 add_cflags -W${armcc_opt},--diag_suppress=1293 # assignment in condition add_cflags -W${armcc_opt},--diag_suppress=3343 # hardfp compat + add_cflags -W${armcc_opt},--diag_suppress=167 # pointer sign + add_cflags -W${armcc_opt},--diag_suppress=513 # pointer sign elif enabled tms470; then add_cflags -pds=824 -pds=837 elif enabled pathscale; then @@ -3181,7 +3174,6 @@ if enabled x86; then echo "CMOV is fast ${fast_cmov-no}" echo "EBX available ${ebx_available-no}" echo "EBP available ${ebp_available-no}" - echo "10 operands supported ${ten_operands-no}" fi if enabled arm; then echo "ARMv5TE enabled ${armv5te-no}" @@ -3204,6 +3196,7 @@ if enabled sparc; then fi echo "debug symbols ${debug-no}" echo "strip symbols ${stripping-no}" +echo "optimize for size ${small-no}" echo "optimizations ${optimizations-no}" echo "static ${static-no}" echo "shared ${shared-no}" @@ -3213,6 +3206,9 @@ echo "network support ${network-no}" echo "threading support ${thread_type-no}" echo "SDL support ${sdl-no}" echo "Sun medialib support ${mlib-no}" +echo "libdxva2 enabled ${dxva2-no}" +echo "libva enabled ${vaapi-no}" +echo "libvdpau enabled ${vdpau-no}" echo "AVISynth enabled ${avisynth-no}" echo "libcelt enabled ${libcelt-no}" echo "frei0r enabled ${frei0r-no}" @@ -3230,7 +3226,6 @@ echo "librtmp enabled ${librtmp-no}" echo "libschroedinger enabled ${libschroedinger-no}" echo "libspeex enabled ${libspeex-no}" echo "libtheora enabled ${libtheora-no}" -echo "libva enabled ${vaapi-no}" echo "libvo-aacenc support ${libvo_aacenc-no}" echo "libvo-amrwbenc support ${libvo_amrwbenc-no}" echo "libvorbis enabled ${libvorbis-no}" @@ -3238,6 +3233,7 @@ echo "libvpx enabled ${libvpx-no}" echo "libx264 enabled ${libx264-no}" echo "libxavs enabled ${libxavs-no}" echo "libxvid enabled ${libxvid-no}" +echo "openal enabled ${openal-no}" echo "zlib enabled ${zlib-no}" echo "bzlib enabled ${bzlib-no}" echo @@ -3264,44 +3260,7 @@ echo "License: $license" echo "Creating config.mak and config.h..." -# build tree in object directory if source path is different from current one -if enabled source_path_used; then - DIRS=" - doc - libavcodec - libavcodec/$arch - libavdevice - libavfilter - libavfilter/$arch - libavfilter/libmpcodecs - libavfilter/libmpcodecs/libvo - libavformat - libavutil - libavutil/$arch - libpostproc - libswscale - libswscale/$arch - tests - tools - " - FILES=" - Makefile - common.mak - subdir.mak - doc/texi2pod.pl - libavcodec/Makefile - libavcodec/${arch}/Makefile - libavdevice/Makefile - libavfilter/Makefile - libavfilter/${arch}/Makefile - libavformat/Makefile - libavutil/Makefile - libpostproc/Makefile - libswscale/Makefile - " - map 'mkdir -p $v' $DIRS; - map 'test -f "$source_path/$v" && $ln_s "$source_path/$v" $v' $FILES -fi +test -e Makefile || $ln_s "$source_path/Makefile" . enabled stripping || strip="echo skipping strip" @@ -3319,8 +3278,10 @@ INCDIR=\$(DESTDIR)$incdir BINDIR=\$(DESTDIR)$bindir DATADIR=\$(DESTDIR)$datadir MANDIR=\$(DESTDIR)$mandir -SRC_PATH="$source_path" -SRC_PATH_BARE=$source_path +SRC_PATH=$source_path +ifndef MAIN_MAKEFILE +SRC_PATH:=\$(SRC_PATH:.%=..%) +endif CC_IDENT=$cc_ident ARCH=$arch CC=$cc @@ -3337,6 +3298,7 @@ STRIP=$strip CPPFLAGS=$CPPFLAGS CFLAGS=$CFLAGS ASFLAGS=$ASFLAGS +AS_O=$CC_O CC_O=$CC_O LDFLAGS=$LDFLAGS FFSERVERLDFLAGS=$FFSERVERLDFLAGS @@ -3374,8 +3336,10 @@ SLIBNAME_WITH_VERSION=${SLIBNAME_WITH_VERSION} SLIBNAME_WITH_MAJOR=${SLIBNAME_WITH_MAJOR} SLIB_CREATE_DEF_CMD=${SLIB_CREATE_DEF_CMD} SLIB_EXTRA_CMD=${SLIB_EXTRA_CMD} -SLIB_INSTALL_EXTRA_CMD=${SLIB_INSTALL_EXTRA_CMD} -SLIB_UNINSTALL_EXTRA_CMD=${SLIB_UNINSTALL_EXTRA_CMD} +SLIB_INSTALL_NAME=${SLIB_INSTALL_NAME} +SLIB_INSTALL_LINKS=${SLIB_INSTALL_LINKS} +SLIB_INSTALL_EXTRA_LIB=${SLIB_INSTALL_EXTRA_LIB} +SLIB_INSTALL_EXTRA_SHLIB=${SLIB_INSTALL_EXTRA_SHLIB} SAMPLES:=${samples:-\$(FATE_SAMPLES)} EOF @@ -3486,6 +3450,7 @@ version=$3 libs=$4 requires=$5 enabled ${name#lib} || return 0 +mkdir -p $name cat <<EOF > $name/$name.pc prefix=$prefix exec_prefix=\${prefix} @@ -3523,5 +3488,5 @@ pkgconfig_generate libavcodec "FFmpeg codec library" "$LIBAVCODEC_VERSION" "$ext pkgconfig_generate libavformat "FFmpeg container format library" "$LIBAVFORMAT_VERSION" "$extralibs" "libavcodec = $LIBAVCODEC_VERSION" pkgconfig_generate libavdevice "FFmpeg device handling library" "$LIBAVDEVICE_VERSION" "$extralibs" "libavformat = $LIBAVFORMAT_VERSION" pkgconfig_generate libavfilter "FFmpeg video filtering library" "$LIBAVFILTER_VERSION" "$extralibs" -pkgconfig_generate libpostproc "FFmpeg post processing library" "$LIBPOSTPROC_VERSION" "" "libavutil = $LIBAVUTIL_VERSION" +pkgconfig_generate libpostproc "FFmpeg postprocessing library" "$LIBPOSTPROC_VERSION" "" "libavutil = $LIBAVUTIL_VERSION" pkgconfig_generate libswscale "FFmpeg image rescaling library" "$LIBSWSCALE_VERSION" "$LIBM" "libavutil = $LIBAVUTIL_VERSION" diff --git a/doc/APIchanges b/doc/APIchanges index ec76a7bb2a..eba7f8416b 100644 --- a/doc/APIchanges +++ b/doc/APIchanges @@ -13,6 +13,29 @@ libavutil: 2011-04-18 API changes, most recent first: +2011-08-02 - 9d39cbf - lavc 53.7.1 + Add AV_PKT_FLAG_CORRUPT AVPacket flag. + +2011-07-16 - xxxxxx - lavfi 2.27.0 + Add audio packing negotiation fields and helper functions. + + In particular, add AVFilterPacking enum, planar, in_packings and + out_packings fields to AVFilterLink, and the functions: + avfilter_set_common_packing_formats() + avfilter_all_packing_formats() + +2011-07-10 - a67c061 - lavf 53.3.0 + Add avformat_find_stream_info(), deprecate av_find_stream_info(). + +2011-07-10 - 0b950fe - lavc 53.6.0 + Add avcodec_open2(), deprecate avcodec_open(). + +2011-07-01 - b442ca6 - lavf 53.5.0 - avformat.h + Add function av_get_output_timestamp(). + +2011-06-28 - 5129336 - lavu 51.11.0 - avutil.h + Define the AV_PICTURE_TYPE_NONE value in AVPictureType enum. + 2011-06-19 - xxxxxxx - lavfi 2.23.0 - avfilter.h Add layout negotiation fields and helper functions. @@ -43,17 +66,20 @@ API changes, most recent first: 2011-06-12 - xxxxxxx - lavfi 2.16.0 - avfilter_graph_parse() Change avfilter_graph_parse() signature. -2011-06-xx - xxxxxxx - lavf 53.2.0 - avformat.h +2011-06-23 - 67e9ae1 - lavu 51.8.0 - attributes.h + Add av_printf_format(). + +2011-06-16 - 05e84c9, 25de595 - lavf 53.2.0 - avformat.h Add avformat_open_input and avformat_write_header(). Deprecate av_open_input_stream, av_open_input_file, AVFormatParameters and av_write_header. -2011-06-xx - xxxxxxx - lavu 51.7.0 - opt.h +2011-06-16 - 7e83e1c, dc59ec5 - lavu 51.7.0 - opt.h Add av_opt_set_dict() and av_opt_find(). Deprecate av_find_opt(). Add AV_DICT_APPEND flag. -2011-06-xx - xxxxxxx - lavu 51.6.0 - opt.h +2011-06-10 - cb7c11c - lavu 51.6.0 - opt.h Add av_opt_flag_is_set(). 2011-06-10 - c381960 - lavfi 2.15.0 - avfilter_get_audio_buffer_ref_from_arrays diff --git a/doc/Makefile b/doc/Makefile new file mode 100644 index 0000000000..01960b74b0 --- /dev/null +++ b/doc/Makefile @@ -0,0 +1,46 @@ +MANPAGES = $(PROGS-yes:%=doc/%.1) +PODPAGES = $(PROGS-yes:%=doc/%.pod) +HTMLPAGES = $(PROGS-yes:%=doc/%.html) + +DOCS = $(addprefix doc/, developer.html faq.html general.html libavfilter.html) $(HTMLPAGES) $(MANPAGES) $(PODPAGES) + +all-$(CONFIG_DOC): documentation + +documentation: $(DOCS) + +TEXIDEP = awk '/^@include/ { printf "$@: $(@D)/%s\n", $$2 }' <$< >$(@:%=%.d) + +doc/%.html: TAG = HTML +doc/%.html: doc/%.texi $(SRC_PATH)/doc/t2h.init + $(Q)$(TEXIDEP) + $(M)texi2html -monolithic --init-file $(SRC_PATH)/doc/t2h.init --output $@ $< + +doc/%.pod: TAG = POD +doc/%.pod: doc/%.texi + $(Q)$(TEXIDEP) + $(M)$(SRC_PATH)/doc/texi2pod.pl $< $@ + +doc/%.1: TAG = MAN +doc/%.1: doc/%.pod + $(M)pod2man --section=1 --center=" " --release=" " $< > $@ + +$(DOCS): | doc +OBJDIRS += doc + +install-progs-$(CONFIG_DOC): install-man + +install-man: $(MANPAGES) + $(Q)mkdir -p "$(MANDIR)/man1" + $(INSTALL) -m 644 $(MANPAGES) "$(MANDIR)/man1" + +uninstall: uninstall-man + +uninstall-man: + $(RM) $(addprefix "$(MANDIR)/man1/",$(ALLMANPAGES)) + +clean:: + $(RM) doc/*.html doc/*.pod doc/*.1 $(CLEANSUFFIXES:%=doc/%) + +-include $(wildcard $(DOCS:%=%.d)) + +.PHONY: documentation diff --git a/doc/TODO b/doc/TODO deleted file mode 100644 index 8ff8a6b388..0000000000 --- a/doc/TODO +++ /dev/null @@ -1,82 +0,0 @@ -ffmpeg TODO list: ----------------- - -Fabrice's TODO list: (unordered) -------------------- -Short term: - -- use AVFMTCTX_DISCARD_PKT in ffplay so that DV has a chance to work -- add RTSP regression test (both client and server) -- make ffserver allocate AVFormatContext -- clean up (incompatible change, for 0.5.0): - * AVStream -> AVComponent - * AVFormatContext -> AVInputStream/AVOutputStream - * suppress rate_emu from AVCodecContext -- add new float/integer audio filterting and conversion : suppress - CODEC_ID_PCM_xxc and use CODEC_ID_RAWAUDIO. -- fix telecine and frame rate conversion - -Long term (ask me if you want to help): - -- commit new imgconvert API and new PIX_FMT_xxx alpha formats -- commit new LGPL'ed float and integer-only AC3 decoder -- add WMA integer-only decoder -- add new MPEG4-AAC audio decoder (both integer-only and float version) - -Michael's TODO list: (unordered) (if anyone wanna help with sth, just ask) -------------------- -- optimize H264 CABAC -- more optimizations -- simper rate control - -Philip'a TODO list: (alphabetically ordered) (please help) ------------------- -- Add a multi-ffm filetype so that feeds can be recorded into multiple files rather - than one big file. -- Authenticated users support -- where the authentication is in the URL -- Change ASF files so that the embedded timestamp in the frames is right rather - than being an offset from the start of the stream -- Make ffm files more resilient to changes in the codec structures so that you - can play old ffm files. - -Baptiste's TODO list: ------------------ -- mov edit list support (AVEditList) -- YUV 10 bit per component support "2vuy" -- mxf muxer -- mpeg2 non linear quantizer - -unassigned TODO: (unordered) ---------------- -- use AVFrame for audio codecs too -- rework aviobuf.c buffering strategy and fix url_fskip -- generate optimal huffman tables for mjpeg encoding -- fix ffserver regression tests -- support xvids motion estimation -- support x264s motion estimation -- support x264s rate control -- SNOW: non translational motion compensation -- SNOW: more optimal quantization -- SNOW: 4x4 block support -- SNOW: 1/8 pel motion compensation support -- SNOW: iterative motion estimation based on subsampled images -- SNOW: try B frames and MCTF and see how their PSNR/bitrate/complexity behaves -- SNOW: try to use the wavelet transformed MC-ed reference frame as context for the entropy coder -- SNOW: think about/analyize how to make snow use multiple cpus/threads -- SNOW: finish spec -- FLAC: lossy encoding (viterbi and naive scalar quantization) -- libavfilter -- JPEG2000 decoder & encoder -- MPEG4 GMC encoding support -- macroblock based pixel format (better cache locality, somewhat complex, one paper claimed it faster for high res) -- regression tests for codecs which do not have an encoder (I+P-frame bitstream in the 'master' branch) -- add support for using mplayers video filters to ffmpeg -- H264 encoder -- per MB ratecontrol (so VCD and such do work better) -- write a script which iteratively changes all functions between always_inline and noinline and benchmarks the result to find the best set of inlined functions -- convert all the non SIMD asm into small asm vs. C testcases and submit them to the gcc devels so they can improve gcc -- generic audio mixing API -- extract PES packetizer from PS muxer and use it for new TS muxer -- implement automatic AVBistreamFilter activation -- make cabac encoder use bytestream (see http://trac.videolan.org/x264/changeset/?format=diff&new=651) -- merge imdct and windowing, the current code does considerable amounts of redundant work diff --git a/doc/developer.texi b/doc/developer.texi index 69c2951620..6bfbbba523 100644 --- a/doc/developer.texi +++ b/doc/developer.texi @@ -34,6 +34,7 @@ You can use libavcodec or libavformat in your commercial program, but @emph{any patch you make must be published}. The best way to proceed is to send your patches to the FFmpeg mailing list. + @anchor{Coding Rules} @section Coding Rules @@ -54,10 +55,8 @@ These features are supported by all compilers we care about, so we will not accept patches to remove their use unless they absolutely do not impair clarity and performance. -All code must compile with GCC 2.95 and GCC 3.3. Currently, FFmpeg also -compiles with several other compilers, such as the Compaq ccc compiler -or Sun Studio 9, and we would like to keep it that way unless it would -be exceedingly involved. To ensure compatibility, please do not use any +All code must compile with recent versions of GCC and a number of other +currently supported compilers. To ensure compatibility, please do not use additional C99 features or GCC extensions. Especially watch out for: @itemize @bullet @item @@ -86,7 +85,7 @@ above them explaining what the function does, even if it is just one sentence. All structures and their member variables should be documented, too. @example /** - * @@file mpeg.c + * @@file * MPEG codec. * @@author ... */ @@ -244,7 +243,8 @@ Note, these rules are mostly borrowed from the MPlayer project. @section Submitting patches -First, read the (@pxref{Coding Rules}) above if you did not yet. +First, read the @ref{Coding Rules} above if you did not yet, in particular +the rules regarding patch submission. When you submit your patch, please use @code{git format-patch} or @code{git send-email}. We cannot read other diffs :-) @@ -259,8 +259,8 @@ for us and greatly increases your chances of getting your patch applied. Use the patcheck tool of FFmpeg to check your patch. The tool is located in the tools directory. -Run the regression tests before submitting a patch so that you can -verify that there are no big problems. +Run the @ref{Regression Tests} before submitting a patch in order to verify +it does not cause unexpected problems. Patches should be posted as base64 encoded attachments (or any other encoding which ensures that the patch will not be trashed during @@ -339,7 +339,7 @@ send a reminder by email. Your patch should eventually be dealt with. @item Is the patch against latest FFmpeg git master branch? @item - Are you subscribed to ffmpeg-dev? + Are you subscribed to ffmpeg-devel? (the list is subscribers only due to spam) @item Have you checked that the changes are minimal, so that the same cannot be diff --git a/doc/examples/Makefile b/doc/examples/Makefile index c32d524da4..fde2256fca 100644 --- a/doc/examples/Makefile +++ b/doc/examples/Makefile @@ -3,7 +3,7 @@ FFMPEG_LIBS=libavdevice libavformat libavfilter libavcodec libswscale libavutil CFLAGS+=$(shell pkg-config --cflags $(FFMPEG_LIBS)) LDFLAGS+=$(shell pkg-config --libs $(FFMPEG_LIBS)) -EXAMPLES=encoding-example muxing-example +EXAMPLES=encoding filtering metadata muxing OBJS=$(addsuffix .o,$(EXAMPLES)) diff --git a/doc/examples/encoding-example.c b/doc/examples/encoding.c index a32c09ab69..7efd5facce 100644 --- a/doc/examples/encoding-example.c +++ b/doc/examples/encoding.c @@ -1,40 +1,34 @@ /* * Copyright (c) 2001 Fabrice Bellard * - * This file is part of FFmpeg. + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: * - * FFmpeg is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. * - * FFmpeg is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with FFmpeg; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. */ /** * @file - * avcodec API use example. + * libavcodec API use example. * - * Note that this library only handles codecs (mpeg, mpeg4, etc...), + * Note that libavcodec only handles codecs (mpeg, mpeg4, etc...), * not file formats (avi, vob, etc...). See library 'libavformat' for the * format handling */ -#include <stdlib.h> -#include <stdio.h> -#include <string.h> - -#ifdef HAVE_AV_CONFIG_H -#undef HAVE_AV_CONFIG_H -#endif - #include "libavcodec/avcodec.h" #include "libavutil/mathematics.h" @@ -64,12 +58,13 @@ static void audio_encode_example(const char *filename) exit(1); } - c= avcodec_alloc_context(); + c = avcodec_alloc_context3(codec); /* put sample parameters */ c->bit_rate = 64000; c->sample_rate = 44100; c->channels = 2; + c->sample_fmt = AV_SAMPLE_FMT_S16; /* open it */ if (avcodec_open(c, codec) < 0) { @@ -134,7 +129,7 @@ static void audio_decode_example(const char *outfilename, const char *filename) exit(1); } - c= avcodec_alloc_context(); + c = avcodec_alloc_context3(codec); /* open it */ if (avcodec_open(c, codec) < 0) { @@ -204,7 +199,7 @@ static void video_encode_example(const char *filename) int i, out_size, size, x, y, outbuf_size; FILE *f; AVFrame *picture; - uint8_t *outbuf, *picture_buf; + uint8_t *outbuf; printf("Video encoding\n"); @@ -215,7 +210,7 @@ static void video_encode_example(const char *filename) exit(1); } - c= avcodec_alloc_context(); + c = avcodec_alloc_context3(codec); picture= avcodec_alloc_frame(); /* put sample parameters */ @@ -244,15 +239,11 @@ static void video_encode_example(const char *filename) /* alloc image and output buffer */ outbuf_size = 100000; outbuf = malloc(outbuf_size); - size = c->width * c->height; - picture_buf = malloc((size * 3) / 2); /* size for YUV 420 */ - picture->data[0] = picture_buf; - picture->data[1] = picture->data[0] + size; - picture->data[2] = picture->data[1] + size / 4; - picture->linesize[0] = c->width; - picture->linesize[1] = c->width / 2; - picture->linesize[2] = c->width / 2; + /* the image can be allocated by any means and av_image_alloc() is + * just the most convenient way if av_malloc() is to be used */ + av_image_alloc(picture->data, picture->linesize, + c->width, c->height, c->pix_fmt, 1); /* encode 1 second of video */ for(i=0;i<25;i++) { @@ -295,11 +286,11 @@ static void video_encode_example(const char *filename) outbuf[3] = 0xb7; fwrite(outbuf, 1, 4, f); fclose(f); - free(picture_buf); free(outbuf); avcodec_close(c); av_free(c); + av_free(picture->data[0]); av_free(picture); printf("\n"); } @@ -346,7 +337,7 @@ static void video_decode_example(const char *outfilename, const char *filename) exit(1); } - c= avcodec_alloc_context(); + c = avcodec_alloc_context3(codec); picture= avcodec_alloc_frame(); if(codec->capabilities&CODEC_CAP_TRUNCATED) diff --git a/doc/examples/filtering.c b/doc/examples/filtering.c new file mode 100644 index 0000000000..369cc03e80 --- /dev/null +++ b/doc/examples/filtering.c @@ -0,0 +1,230 @@ +/* + * Copyright (c) 2010 Nicolas George + * Copyright (c) 2011 Stefano Sabatini + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +/** + * @file + * API example for decoding and filtering + */ + +#define _XOPEN_SOURCE 600 /* for usleep */ + +#include <libavcodec/avcodec.h> +#include <libavformat/avformat.h> +#include <libavfilter/avfiltergraph.h> +#include <libavfilter/vsink_buffer.h> +#include <libavfilter/vsrc_buffer.h> + +const char *filter_descr = "scale=78:24"; + +static AVFormatContext *fmt_ctx; +static AVCodecContext *dec_ctx; +AVFilterContext *buffersink_ctx; +AVFilterContext *buffersrc_ctx; +AVFilterGraph *filter_graph; +static int video_stream_index = -1; +static int64_t last_pts = AV_NOPTS_VALUE; + +static int open_input_file(const char *filename) +{ + int ret, i; + AVCodec *dec; + + if ((ret = avformat_open_input(&fmt_ctx, filename, NULL, NULL)) < 0) { + av_log(NULL, AV_LOG_ERROR, "Cannot open input file\n"); + return ret; + } + + if ((ret = av_find_stream_info(fmt_ctx)) < 0) { + av_log(NULL, AV_LOG_ERROR, "Cannot find stream information\n"); + return ret; + } + + /* select the video stream */ + ret = av_find_best_stream(fmt_ctx, AVMEDIA_TYPE_VIDEO, -1, -1, &dec, 0); + if (ret < 0) { + av_log(NULL, AV_LOG_ERROR, "Cannot find a video stream in the input file\n"); + return ret; + } + video_stream_index = ret; + dec_ctx = fmt_ctx->streams[video_stream_index]->codec; + + /* init the video decoder */ + if ((ret = avcodec_open(dec_ctx, dec)) < 0) { + av_log(NULL, AV_LOG_ERROR, "Cannot open video decoder\n"); + return ret; + } + + return 0; +} + +static int init_filters(const char *filters_descr) +{ + char args[512]; + int ret; + AVFilter *buffersrc = avfilter_get_by_name("buffer"); + AVFilter *buffersink = avfilter_get_by_name("buffersink"); + AVFilterInOut *outputs = avfilter_inout_alloc(); + AVFilterInOut *inputs = avfilter_inout_alloc(); + enum PixelFormat pix_fmts[] = { PIX_FMT_GRAY8, PIX_FMT_NONE }; + filter_graph = avfilter_graph_alloc(); + + /* buffer video source: the decoded frames from the decoder will be inserted here. */ + snprintf(args, sizeof(args), "%d:%d:%d:%d:%d:%d:%d", + dec_ctx->width, dec_ctx->height, dec_ctx->pix_fmt, + dec_ctx->time_base.num, dec_ctx->time_base.den, + dec_ctx->sample_aspect_ratio.num, dec_ctx->sample_aspect_ratio.den); + ret = avfilter_graph_create_filter(&buffersrc_ctx, buffersrc, "in", + args, NULL, filter_graph); + if (ret < 0) { + av_log(NULL, AV_LOG_ERROR, "Cannot create buffer source\n"); + return ret; + } + + /* buffer video sink: to terminate the filter chain. */ + ret = avfilter_graph_create_filter(&buffersink_ctx, buffersink, "out", + NULL, pix_fmts, filter_graph); + if (ret < 0) { + av_log(NULL, AV_LOG_ERROR, "Cannot create buffer sink\n"); + return ret; + } + + /* Endpoints for the filter graph. */ + outputs->name = av_strdup("in"); + outputs->filter_ctx = buffersrc_ctx; + outputs->pad_idx = 0; + outputs->next = NULL; + + inputs->name = av_strdup("out"); + inputs->filter_ctx = buffersink_ctx; + inputs->pad_idx = 0; + inputs->next = NULL; + + if ((ret = avfilter_graph_parse(filter_graph, filter_descr, + &inputs, &outputs, NULL)) < 0) + return ret; + + if ((ret = avfilter_graph_config(filter_graph, NULL)) < 0) + return ret; +} + +static void display_picref(AVFilterBufferRef *picref, AVRational time_base) +{ + int x, y; + uint8_t *p0, *p; + int64_t delay; + + if (picref->pts != AV_NOPTS_VALUE) { + if (last_pts != AV_NOPTS_VALUE) { + /* sleep roughly the right amount of time; + * usleep is in microseconds, just like AV_TIME_BASE. */ + delay = av_rescale_q(picref->pts - last_pts, + time_base, AV_TIME_BASE_Q); + if (delay > 0 && delay < 1000000) + usleep(delay); + } + last_pts = picref->pts; + } + + /* Trivial ASCII grayscale display. */ + p0 = picref->data[0]; + puts("\033c"); + for (y = 0; y < picref->video->h; y++) { + p = p0; + for (x = 0; x < picref->video->w; x++) + putchar(" .-+#"[*(p++) / 52]); + putchar('\n'); + p0 += picref->linesize[0]; + } + fflush(stdout); +} + +int main(int argc, char **argv) +{ + int ret; + AVPacket packet; + AVFrame frame; + int got_frame; + + if (argc != 2) { + fprintf(stderr, "Usage: %s file\n", argv[0]); + exit(1); + } + + avcodec_register_all(); + av_register_all(); + avfilter_register_all(); + + if ((ret = open_input_file(argv[1]) < 0)) + goto end; + if ((ret = init_filters(filter_descr)) < 0) + goto end; + + /* read all packets */ + while (1) { + AVFilterBufferRef *picref; + if ((ret = av_read_frame(fmt_ctx, &packet)) < 0) + break; + + if (packet.stream_index == video_stream_index) { + avcodec_get_frame_defaults(&frame); + got_frame = 0; + ret = avcodec_decode_video2(dec_ctx, &frame, &got_frame, &packet); + av_free_packet(&packet); + if (ret < 0) { + av_log(NULL, AV_LOG_ERROR, "Error decoding video\n"); + break; + } + + if (got_frame) { + if (frame.pts == AV_NOPTS_VALUE) + frame.pts = frame.pkt_dts == AV_NOPTS_VALUE ? + frame.pkt_dts : frame.pkt_pts; + /* push the decoded frame into the filtergraph */ + av_vsrc_buffer_add_frame(buffersrc_ctx, &frame); + + /* pull filtered pictures from the filtergraph */ + while (avfilter_poll_frame(buffersink_ctx->inputs[0])) { + av_vsink_buffer_get_video_buffer_ref(buffersink_ctx, &picref, 0); + if (picref) { + display_picref(picref, buffersink_ctx->inputs[0]->time_base); + avfilter_unref_buffer(picref); + } + } + } + } + } +end: + avfilter_graph_free(&filter_graph); + if (dec_ctx) + avcodec_close(dec_ctx); + av_close_input_file(fmt_ctx); + + if (ret < 0 && ret != AVERROR_EOF) { + char buf[1024]; + av_strerror(ret, buf, sizeof(buf)); + fprintf(stderr, "Error occurred: %s\n", buf); + exit(1); + } + + exit(0); +} diff --git a/doc/examples/metadata.c b/doc/examples/metadata.c new file mode 100644 index 0000000000..7d29be7049 --- /dev/null +++ b/doc/examples/metadata.c @@ -0,0 +1,55 @@ +/* + * Copyright (c) 2011 Reinhard Tartler + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +/** + * @file + * Shows how the metadata API can be used in application programs. + */ + +#include <stdio.h> + +#include <libavformat/avformat.h> +#include <libavutil/dict.h> + +int main (int argc, char **argv) +{ + AVFormatContext *fmt_ctx = NULL; + AVDictionaryEntry *tag = NULL; + int ret; + + if (argc != 2) { + printf("usage: %s <input_file>\n" + "example program to demonstrate the use of the libavformat metadata API.\n" + "\n", argv[0]); + return 1; + } + + av_register_all(); + if ((ret = avformat_open_input(&fmt_ctx, argv[1], NULL, NULL))) + return ret; + + while ((tag = av_dict_get(fmt_ctx->metadata, "", tag, AV_DICT_IGNORE_SUFFIX))) + printf("%s=%s\n", tag->key, tag->value); + + avformat_free_context(fmt_ctx); + return 0; +} diff --git a/doc/examples/muxing-example.c b/doc/examples/muxing.c index a1f19a47bf..2491e96d3d 100644 --- a/doc/examples/muxing-example.c +++ b/doc/examples/muxing.c @@ -22,8 +22,10 @@ /** * @file - * Libavformat API example: Output a media file in any supported - * libavformat format. The default codecs are used. + * libavformat API example. + * + * Output a media file in any supported libavformat format. + * The default codecs are used. */ #include <stdlib.h> @@ -31,6 +33,7 @@ #include <string.h> #include <math.h> +#include "libavutil/mathematics.h" #include "libavformat/avformat.h" #include "libswscale/swscale.h" @@ -78,7 +81,7 @@ static AVStream *add_audio_stream(AVFormatContext *oc, enum CodecID codec_id) c->channels = 2; // some formats want stream headers to be separate - if(oc->oformat->flags & AVFMT_GLOBALHEADER) + if (oc->oformat->flags & AVFMT_GLOBALHEADER) c->flags |= CODEC_FLAG_GLOBAL_HEADER; return st; @@ -141,7 +144,7 @@ static void get_audio_frame(int16_t *samples, int frame_size, int nb_channels) int16_t *q; q = samples; - for(j=0;j<frame_size;j++) { + for (j = 0; j < frame_size; j++) { v = (int)(sin(t) * 10000); for(i = 0; i < nb_channels; i++) *q++ = v; @@ -160,13 +163,13 @@ static void write_audio_frame(AVFormatContext *oc, AVStream *st) get_audio_frame(samples, audio_input_frame_size, c->channels); - pkt.size= avcodec_encode_audio(c, audio_outbuf, audio_outbuf_size, samples); + pkt.size = avcodec_encode_audio(c, audio_outbuf, audio_outbuf_size, samples); if (c->coded_frame && c->coded_frame->pts != AV_NOPTS_VALUE) pkt.pts= av_rescale_q(c->coded_frame->pts, c->time_base, st->time_base); pkt.flags |= AV_PKT_FLAG_KEY; - pkt.stream_index= st->index; - pkt.data= audio_outbuf; + pkt.stream_index = st->index; + pkt.data = audio_outbuf; /* write the compressed frame in the media file */ if (av_interleaved_write_frame(oc, &pkt) != 0) { @@ -230,7 +233,7 @@ static AVStream *add_video_stream(AVFormatContext *oc, enum CodecID codec_id) c->mb_decision=2; } // some formats want stream headers to be separate - if(oc->oformat->flags & AVFMT_GLOBALHEADER) + if (oc->oformat->flags & AVFMT_GLOBALHEADER) c->flags |= CODEC_FLAG_GLOBAL_HEADER; return st; @@ -316,15 +319,15 @@ static void fill_yuv_image(AVFrame *pict, int frame_index, int width, int height i = frame_index; /* Y */ - for(y=0;y<height;y++) { - for(x=0;x<width;x++) { + for (y = 0; y < height; y++) { + for (x = 0; x < width; x++) { pict->data[0][y * pict->linesize[0] + x] = x + y + i * 3; } } /* Cb and Cr */ - for(y=0;y<height/2;y++) { - for(x=0;x<width/2;x++) { + for (y = 0; y < height/2; y++) { + for (x = 0; x < width/2; x++) { pict->data[1][y * pict->linesize[1] + x] = 128 + y + i * 2; pict->data[2][y * pict->linesize[2] + x] = 64 + x + i * 5; } @@ -369,14 +372,14 @@ static void write_video_frame(AVFormatContext *oc, AVStream *st) if (oc->oformat->flags & AVFMT_RAWPICTURE) { /* raw video case. The API will change slightly in the near - futur for that */ + future for that. */ AVPacket pkt; av_init_packet(&pkt); pkt.flags |= AV_PKT_FLAG_KEY; - pkt.stream_index= st->index; - pkt.data= (uint8_t *)picture; - pkt.size= sizeof(AVPicture); + pkt.stream_index = st->index; + pkt.data = (uint8_t *)picture; + pkt.size = sizeof(AVPicture); ret = av_interleaved_write_frame(oc, &pkt); } else { @@ -391,9 +394,9 @@ static void write_video_frame(AVFormatContext *oc, AVStream *st) pkt.pts= av_rescale_q(c->coded_frame->pts, c->time_base, st->time_base); if(c->coded_frame->key_frame) pkt.flags |= AV_PKT_FLAG_KEY; - pkt.stream_index= st->index; - pkt.data= video_outbuf; - pkt.size= out_size; + pkt.stream_index = st->index; + pkt.data = video_outbuf; + pkt.size = out_size; /* write the compressed frame in the media file */ ret = av_interleaved_write_frame(oc, &pkt); @@ -455,7 +458,7 @@ int main(int argc, char **argv) if (!oc) { exit(1); } - fmt= oc->oformat; + fmt = oc->oformat; /* add the audio and video streams using the default format codecs and initialize the codecs */ diff --git a/doc/faq.texi b/doc/faq.texi index 225f139b3c..535ca31446 100644 --- a/doc/faq.texi +++ b/doc/faq.texi @@ -47,7 +47,7 @@ Likely reasons @item We are busy and haven't had time yet to read your report or investigate the issue. @item You didn't follow @url{http://ffmpeg.org/bugreports.html}. -@item You didn't use git HEAD. +@item You didn't use git master. @item You reported a segmentation fault without gdb output. @item You describe a problem but not how to reproduce it. @item It's unclear if you use ffmpeg as command line tool or use @@ -123,7 +123,8 @@ problem and an NP-hard problem... @section ffmpeg does not work; what is wrong? -Try a @code{make distclean} in the ffmpeg source directory before the build. If this does not help see +Try a @code{make distclean} in the ffmpeg source directory before the build. +If this does not help see (@url{http://ffmpeg.org/bugreports.html}). @section How do I encode single pictures into movies? @@ -285,7 +286,8 @@ Just create an "input.avs" text file with this single line ... ffmpeg -i input.avs @end example -For ANY other help on Avisynth, please visit @url{http://www.avisynth.org/}. +For ANY other help on Avisynth, please visit the +@uref{http://www.avisynth.org/, Avisynth homepage}. @section How can I join video files? @@ -417,7 +419,7 @@ No. These tools are too bloated and they complicate the build. FFmpeg is already organized in a highly modular manner and does not need to be rewritten in a formal object language. Further, many of the developers favor straight C; it works for them. For more arguments on this matter, -read "Programming Religion" at (@url{http://www.tux.org/lkml/#s15}). +read @uref{http://www.tux.org/lkml/#s15, "Programming Religion"}. @section Why are the ffmpeg programs devoid of debugging symbols? diff --git a/doc/ffmpeg.texi b/doc/ffmpeg.texi index 6f21451219..832e3d8fe2 100644 --- a/doc/ffmpeg.texi +++ b/doc/ffmpeg.texi @@ -159,8 +159,6 @@ Set the ISO 639 language code (3 letters) of the current subtitle stream. @section Video Options @table @option -@item -b @var{bitrate} -Set the video bitrate in bit/s (default = 200 kb/s). @item -vframes @var{number} Set the number of video frames to record. @item -r @var{fps} @@ -476,7 +474,7 @@ FF_ER_COMPLIANT @item 3 FF_ER_AGGRESSIVE @item 4 -FF_ER_VERY_AGGRESSIVE +FF_ER_EXPLODE @end table @item -ec @var{bit_mask} @@ -556,16 +554,17 @@ The timestamps must be specified in ascending order. @item -aframes @var{number} Set the number of audio frames to record. @item -ar @var{freq} -Set the audio sampling frequency. there is no default for input streams, -for output streams it is set by default to the frequency of the input stream. -@item -ab @var{bitrate} -Set the audio bitrate in bit/s (default = 64k). +Set the audio sampling frequency. For output streams it is set by +default to the frequency of the corresponding input stream. For input +streams this option only makes sense for audio grabbing devices and raw +demuxers and is mapped to the corresponding demuxer options. @item -aq @var{q} Set the audio quality (codec-specific, VBR). @item -ac @var{channels} -Set the number of audio channels. For input streams it is set by -default to 1, for output streams it is set by default to the same -number of audio channels in input. +Set the number of audio channels. For output streams it is set by +default to the number of input audio channels. For input streams +this option only makes sense for audio grabbing devices and raw demuxers +and is mapped to the corresponding demuxer options. @item -an Disable audio recording. @item -acodec @var{codec} @@ -733,9 +732,11 @@ Read input at native frame rate. Mainly used to simulate a grab device. @item -loop_input Loop over the input stream. Currently it works only for image streams. This option is used for automatic FFserver testing. +This option is deprecated, use -loop 1. @item -loop_output @var{number_of_times} Repeatedly loop output for formats that support looping such as animated GIF (0 will loop the output infinitely). +This option is deprecated, use -loop. @item -threads @var{count} Thread count. @item -vsync @var{parameter} @@ -883,8 +884,8 @@ ffmpeg -f oss -i /dev/dsp -f video4linux2 -i /dev/video0 /tmp/out.mpg @end example Note that you must activate the right video source and channel before -launching ffmpeg with any TV viewer such as xawtv -(@url{http://linux.bytesex.org/xawtv/}) by Gerd Knorr. You also +launching ffmpeg with any TV viewer such as +@uref{http://linux.bytesex.org/xawtv/, xawtv} by Gerd Knorr. You also have to set the audio recording levels correctly with a standard mixer. @@ -903,8 +904,34 @@ the DISPLAY environment variable. ffmpeg -f x11grab -s cif -r 25 -i :0.0+10,20 /tmp/out.mpg @end example -0.0 is display.screen number of your X11 server, same as the DISPLAY environment -variable. 10 is the x-offset and 20 the y-offset for the grabbing. +10 is the x-offset and 20 the y-offset for the grabbing. + +@example +ffmpeg -f x11grab -follow_mouse centered -s cif -r 25 -i :0.0 /tmp/out.mpg +@end example + +The grabbing region follows the mouse pointer, which stays at the center of +region. + +@example +ffmpeg -f x11grab -follow_mouse 100 -s cif -r 25 -i :0.0 /tmp/out.mpg +@end example + +Only follows when mouse pointer reaches within 100 pixels to the edge of +region. + +@example +ffmpeg -f x11grab -show_region 1 -s cif -r 25 -i :0.0+10,20 /tmp/out.mpg +@end example + +The grabbing region will be indicated on screen. + +@example +ffmpeg -f x11grab -follow_mouse centered -show_region 1 -s cif -r 25 -i :0.0 /tmp/out.mpg +@end example + +The grabbing region indication will follow the mouse pointer. + @section Video and Audio file format conversion diff --git a/doc/ffplay.texi b/doc/ffplay.texi index f9f5e07cda..9390e14741 100644 --- a/doc/ffplay.texi +++ b/doc/ffplay.texi @@ -38,8 +38,9 @@ Force displayed width. @item -y @var{height} Force displayed height. @item -s @var{size} -Set frame size (WxH or abbreviation), needed for videos which don't -contain a header with the frame size like raw YUV. +Set frame size (WxH or abbreviation), needed for videos which do +not contain a header with the frame size like raw YUV. This option +has been deprecated in favor of private options, try -video_size. @item -an Disable audio. @item -vn @@ -90,6 +91,7 @@ Read @var{input_file}. @table @option @item -pix_fmt @var{format} Set pixel format. +This option has been deprecated in favor of private options, try -pixel_format. @item -stats Show the stream duration, the codec parameters, the current position in the stream and the audio/video synchronisation drift. diff --git a/doc/fftools-common-opts.texi b/doc/fftools-common-opts.texi index d72ca5cc00..bcf036b65f 100644 --- a/doc/fftools-common-opts.texi +++ b/doc/fftools-common-opts.texi @@ -91,3 +91,28 @@ The use of the environment variable @env{NO_COLOR} is deprecated and will be dropped in a following FFmpeg version. @end table + +@section AVOptions + +These options are provided directly by the libavformat, libavdevice and +libavcodec libraries. To see the list of available AVOptions, use the +@option{-help} option. They are separated into two categories: +@table @option +@item generic +These options can be set for any container, codec or device. Generic options are +listed under AVFormatContext options for containers/devices and under +AVCodecContext options for codecs. +@item private +These options are specific to the given container, device or codec. Private +options are listed under their corresponding containers/devices/codecs. +@end table + +For example to write an ID3v2.3 header instead of a default ID3v2.4 to +an MP3 file, use the @option{id3v2_version} private option of the MP3 +muxer: +@example +ffmpeg -i input.flac -id3v2_version 3 out.mp3 +@end example + +Note -nooption syntax cannot be used for boolean AVOptions, use -option +0/-option 1. diff --git a/doc/filters.texi b/doc/filters.texi index eb31714486..b41fce7ced 100644 --- a/doc/filters.texi +++ b/doc/filters.texi @@ -18,8 +18,8 @@ output pads is called a "sink". A filtergraph can be represented using a textual representation, which is recognized by the @code{-vf} and @code{-af} options of the ff* -tools, and by the @code{av_parse_graph()} function defined in -@file{libavfilter/avfiltergraph}. +tools, and by the @code{avfilter_graph_parse()} function defined in +@file{libavfilter/avfiltergraph.h}. A filterchain consists of a sequence of connected filters, each one connected to the previous one in the sequence. A filterchain is @@ -183,6 +183,66 @@ threshold, and defaults to 98. @var{threshold} is the threshold below which a pixel value is considered black, and defaults to 32. +@section boxblur + +Apply boxblur algorithm to the input video. + +This filter accepts the parameters: +@var{luma_power}:@var{luma_radius}:@var{chroma_radius}:@var{chroma_power}:@var{alpha_radius}:@var{alpha_power} + +Chroma and alpha parameters are optional, if not specified they default +to the corresponding values set for @var{luma_radius} and +@var{luma_power}. + +@var{luma_radius}, @var{chroma_radius}, and @var{alpha_radius} represent +the radius in pixels of the box used for blurring the corresponding +input plane. They are expressions, and can contain the following +constants: +@table @option +@item w, h +the input width and heigth in pixels + +@item cw, ch +the input chroma image width and height in pixels + +@item hsub, vsub +horizontal and vertical chroma subsample values. For example for the +pixel format "yuv422p" @var{hsub} is 2 and @var{vsub} is 1. +@end table + +The radius must be a non-negative number, and must be not greater than +the value of the expression @code{min(w,h)/2} for the luma and alpha planes, +and of @code{min(cw,ch)/2} for the chroma planes. + +@var{luma_power}, @var{chroma_power}, and @var{alpha_power} represent +how many times the boxblur filter is applied to the corresponding +plane. + +Some examples follow: + +@itemize + +@item +Apply a boxblur filter with luma, chroma, and alpha radius +set to 2: +@example +boxblur=2:1 +@end example + +@item +Set luma radius to 2, alpha and chroma radius to 0 +@example +boxblur=2:1:0:0:0:0 +@end example + +@item +Set luma and chroma radius to a fraction of the video dimension +@example +boxblur=min(h\,w)/10:1:min(cw\,ch)/10:1 +@end example + +@end itemize + @section copy Copy the input source unchanged to the output. Mainly useful for @@ -215,6 +275,19 @@ the output (cropped) width and heigth @item ow, oh same as @var{out_w} and @var{out_h} +@item a +same as @var{iw} / @var{ih} + +@item sar +input sample aspect ratio + +@item dar +input display aspect ratio, it is the same as (@var{iw} / @var{ih}) * @var{sar} + +@item hsub, vsub +horizontal and vertical chroma subsample values. For example for the +pixel format "yuv422p" @var{hsub} is 2 and @var{vsub} is 1. + @item n the number of input frame, starting from 0 @@ -835,9 +908,6 @@ the named filter. The list of the currently supported filters follows: @table @var @item 2xsai -@item blackframe -@item boxblur -@item cropdetect @item decimate @item delogo @item denoise3d @@ -874,7 +944,6 @@ The list of the currently supported filters follows: @item qp @item rectangle @item remove-logo -@item rgbtest @item rotate @item sab @item screenshot @@ -1013,8 +1082,7 @@ Erode an image by using a specific structuring element. This filter corresponds to the libopencv function @code{cvErode}. The filter accepts the parameters: @var{struct_el}:@var{nb_iterations}, -with the same meaning and use of those of the dilate filter -(@pxref{dilate}). +with the same syntax and semantics as the @ref{dilate} filter. @subsection smooth @@ -1130,7 +1198,13 @@ x and y offsets as specified by the @var{x} and @var{y} expressions, or NAN if not yet specified @item a -input display aspect ratio, same as @var{iw} / @var{ih} +same as @var{iw} / @var{ih} + +@item sar +input sample aspect ratio + +@item dar +input display aspect ratio, it is the same as (@var{iw} / @var{ih}) * @var{sar} @item hsub, vsub horizontal and vertical chroma subsample values. For example for the @@ -1190,6 +1264,12 @@ pad="max(iw\,ih):ow:(ow-iw)/2:(oh-ih)/2" # pad the input to get a final w/h ratio of 16:9 pad="ih*16/9:ih:(ow-iw)/2:(oh-ih)/2" +# for anamorphic video, in order to set the output display aspect ratio, +# it is necessary to use sar in the expression, according to the relation: +# (ih * X / ih) * sar = output_dar +# X = output_dar / sar +pad="ih*16/9/sar:ih:(ow-iw)/2:(oh-ih)/2" + # double output size and put the input video in the bottom-right # corner of the output padded area pad="2*iw:2*ih:ow-iw:oh-ih" @@ -1232,7 +1312,13 @@ the output (cropped) width and heigth same as @var{out_w} and @var{out_h} @item a -input display aspect ratio, same as @var{iw} / @var{ih} +same as @var{iw} / @var{ih} + +@item sar +input sample aspect ratio + +@item dar +input display aspect ratio, it is the same as (@var{iw} / @var{ih}) * @var{sar} @item hsub, vsub horizontal and vertical chroma subsample values. For example for the @@ -1426,7 +1512,7 @@ setdar=16:9 setdar=1.77777 @end example -See also the "setsar" filter documentation (@pxref{setsar}). +See also the @ref{setsar} filter documentation. @section setpts @@ -1737,7 +1823,7 @@ Flip the input video vertically. Deinterlace the input video ("yadif" means "yet another deinterlacing filter"). -It accepts the optional parameters: @var{mode}:@var{parity}. +It accepts the optional parameters: @var{mode}:@var{parity}:@var{auto}. @var{mode} specifies the interlacing mode to adopt, accepts one of the following values: @@ -1760,9 +1846,9 @@ interlaced video, accepts one of the following values: @table @option @item 0 -assume bottom field first -@item 1 assume top field first +@item 1 +assume bottom field first @item -1 enable automatic detection @end table @@ -1771,6 +1857,18 @@ Default value is -1. If interlacing is unknown or decoder does not export this information, top field first will be assumed. +@var{auto} specifies if deinterlacer should trust the interlaced flag +and only deinterlace frames marked as interlaced + +@table @option +@item 0 +deinterlace all frames +@item 1 +only deinterlace frames marked as interlaced +@end table + +Default value is 0. + @c man end VIDEO FILTERS @chapter Video Sources @@ -1960,8 +2058,7 @@ form @var{width}x@var{height} or a frame size abbreviation. the form @var{num}/@var{den} or a frame rate abbreviation. @var{src_name} is the name to the frei0r source to load. For more information regarding frei0r and how to set the parameters read the -section "frei0r" (@pxref{frei0r}) in the description of the video -filters. +section @ref{frei0r} in the description of the video filters. Some examples follow: @example @@ -1970,6 +2067,53 @@ Some examples follow: frei0r_src=200x200:10:partik0l=1234 [overlay]; [in][overlay] overlay @end example +@section rgbtestsrc, testsrc + +The @code{rgbtestsrc} source generates an RGB test pattern useful for +detecting RGB vs BGR issues. You should see a red, green and blue +stripe from top to bottom. + +The @code{testsrc} source generates a test video pattern, showing a +color pattern, a scrolling gradient and a timestamp. This is mainly +intended for testing purposes. + +Both sources accept an optional sequence of @var{key}=@var{value} pairs, +separated by ":". The description of the accepted options follows. + +@table @option + +@item size, s +Specify the size of the sourced video, it may be a string of the form +@var{width}x@var{heigth}, or the name of a size abbreviation. The +default value is "320x240". + +@item rate, r +Specify the frame rate of the sourced video, as the number of frames +generated per second. It has to be a string in the format +@var{frame_rate_num}/@var{frame_rate_den}, an integer number, a float +number or a valid video frame rate abbreviation. The default value is +"25". + +@item duration +Set the video duration of the sourced video. The accepted syntax is: +@example +[-]HH[:MM[:SS[.m...]]] +[-]S+[.m...] +@end example +See also the function @code{av_parse_time()}. + +If not specified, or the expressed duration is negative, the video is +supposed to be generated forever. +@end table + +For example the following: +@example +testsrc=duration=5.3:size=qcif:rate=10 +@end example + +will generate a video with a duration of 5.3 seconds, with size +176x144 and a framerate of 10 frames per second. + @c man end VIDEO SOURCES @chapter Video Sinks diff --git a/doc/general.texi b/doc/general.texi index e5e76db056..c5045b1095 100644 --- a/doc/general.texi +++ b/doc/general.texi @@ -66,6 +66,7 @@ library: @tab Used in Z and Z95 games. @item Brute Force & Ignorance @tab @tab X @tab Used in the game Flash Traffic: City of Angels. +@item BWF @tab X @tab X @item Interplay C93 @tab @tab X @tab Used in the game Cyberia from Interplay. @item Delphine Software International CIN @tab @tab X @@ -400,7 +401,7 @@ following image formats are supported: @tab experimental lossless codec (fourcc: FFV1) @item Flash Screen Video v1 @tab X @tab X @tab fourcc: FSV1 -@item Flash Screen Video v2 @tab X +@item Flash Screen Video v2 @tab X @tab X @item Flash Video (FLV) @tab X @tab X @tab Sorenson H.263 used in Flash @item Fraps @tab @tab X @@ -819,8 +820,8 @@ the FFmpeg Windows Help Forum at FFmpeg can be built to run natively on Windows using the MinGW tools. Install the latest versions of MSYS and MinGW from @url{http://www.mingw.org/}. -You can find detailed installation -instructions in the download section and the FAQ. +You can find detailed installation instructions in the download +section and the FAQ. FFmpeg does not build out-of-the-box with the packages the automated MinGW installer provides. It also requires coreutils to be installed and many other @@ -848,13 +849,14 @@ noticeable when running make for a second time (for example in @code{make install}). @item In order to compile FFplay, you must have the MinGW development library -of SDL. Get it from @url{http://www.libsdl.org}. +of @uref{http://www.libsdl.org/, SDL}. Edit the @file{bin/sdl-config} script so that it points to the correct prefix where SDL was installed. Verify that @file{sdl-config} can be launched from the MSYS command line. @item By using @code{./configure --enable-shared} when configuring FFmpeg, -you can build libavutil, libavcodec and libavformat as DLLs. +you can build the FFmpeg libraries (e.g. libavutil, libavcodec, +libavformat) as DLLs. @end itemize @@ -961,7 +963,7 @@ MSVC++-compatible import libraries. @item Build FFmpeg with @example -./configure --enable-shared --enable-memalign-hack +./configure --enable-shared make make install @end example @@ -969,7 +971,8 @@ make install Your install path (@file{/usr/local/} by default) should now have the necessary DLL and LIB files under the @file{bin} directory. -@end enumerate +Alternatively, build the libraries with a cross compiler, according to +the instructions below in @ref{Cross compilation for Windows with Linux}. To use those files with MSVC++, do the same as you would do with the static libraries, as described above. But in Step 4, @@ -982,10 +985,7 @@ libraries (@file{libxxx.a} files) you should add the MSVC import libraries libraries (@file{libxxx.dll.a} files), as these will give you undefined reference errors. There should be no need for @file{libmingwex.a}, @file{libgcc.a}, and @file{wsock32.lib}, nor any other external library -statically linked into the DLLs. The @file{bin} directory contains a bunch -of DLL files, but the ones that are actually used to run your application -are the ones with a major version number in their filenames -(i.e. @file{avcodec-51.dll}). +statically linked into the DLLs. FFmpeg headers do not declare global data for Windows DLLs through the usual dllexport/dllimport interface. Such data will be exported properly while @@ -998,10 +998,41 @@ extern __declspec(dllimport) const AVPixFmtDescriptor av_pix_fmt_descriptors[]; Note that using import libraries created by dlltool requires the linker optimization option to be set to -"References: Keep Unreferenced Data (/OPT:NOREF)", otherwise +"References: Keep Unreferenced Data (@code{/OPT:NOREF})", otherwise the resulting binaries will fail during runtime. This isn't required when using import libraries generated by lib.exe. +This issue is reported upstream at +@url{http://sourceware.org/bugzilla/show_bug.cgi?id=12633}. + +To create import libraries that work with the @code{/OPT:REF} option +(which is enabled by default in Release mode), follow these steps: + +@enumerate + +@item Open @file{Visual Studio 2005 Command Prompt}. + +Alternatively, in a normal command line prompt, call @file{vcvars32.bat} +which sets up the environment variables for the Visual C++ tools +(the standard location for this file is +@file{C:\Program Files\Microsoft Visual Studio 8\VC\bin\vcvars32.bat}). + +@item Enter the @file{bin} directory where the created LIB and DLL files +are stored. + +@item Generate new import libraries with @file{lib.exe}: + +@example +lib /machine:i386 /def:..\lib\avcodec-53.def /out:avcodec.lib +lib /machine:i386 /def:..\lib\avdevice-53.def /out:avdevice.lib +lib /machine:i386 /def:..\lib\avfilter-2.def /out:avfilter.lib +lib /machine:i386 /def:..\lib\avformat-53.def /out:avformat.lib +lib /machine:i386 /def:..\lib\avutil-51.def /out:avutil.lib +lib /machine:i386 /def:..\lib\swscale-2.def /out:swscale.lib +@end example + +@end enumerate +@anchor{Cross compilation for Windows with Linux} @subsection Cross compilation for Windows with Linux You must use the MinGW cross compilation tools available at @@ -1014,8 +1045,7 @@ Then configure FFmpeg with the following options: (you can change the cross-prefix according to the prefix chosen for the MinGW tools). -Then you can easily test FFmpeg with Wine -(@url{http://www.winehq.com/}). +Then you can easily test FFmpeg with @uref{http://www.winehq.com/, Wine}. @subsection Compilation under Cygwin @@ -1036,7 +1066,7 @@ diffutils Then run @example -./configure --enable-static --disable-shared +./configure @end example to make a static build. @@ -1054,8 +1084,8 @@ If you want to build FFmpeg with additional libraries, download Cygwin libogg-devel, libvorbis-devel @end example -These library packages are only available from Cygwin Ports -(@url{http://sourceware.org/cygwinports/}) : +These library packages are only available from +@uref{http://sourceware.org/cygwinports/, Cygwin Ports}: @example yasm, libSDL-devel, libdirac-devel, libfaac-devel, libgsm-devel, @@ -1083,12 +1113,12 @@ and add some special flags to your configure invocation. For a static build run @example -./configure --target-os=mingw32 --enable-memalign-hack --enable-static --disable-shared --extra-cflags=-mno-cygwin --extra-libs=-mno-cygwin +./configure --target-os=mingw32 --extra-cflags=-mno-cygwin --extra-libs=-mno-cygwin @end example and for a build with shared libraries @example -./configure --target-os=mingw32 --enable-memalign-hack --enable-shared --disable-static --extra-cflags=-mno-cygwin --extra-libs=-mno-cygwin +./configure --target-os=mingw32 --enable-shared --disable-static --extra-cflags=-mno-cygwin --extra-libs=-mno-cygwin @end example @bye diff --git a/doc/indevs.texi b/doc/indevs.texi index 04871081dd..af9b1a680f 100644 --- a/doc/indevs.texi +++ b/doc/indevs.texi @@ -137,6 +137,95 @@ For more information read: IIDC1394 input device, based on libdc1394 and libraw1394. +@section openal + +The OpenAL input device provides audio capture on all systems with a +working OpenAL 1.1 implementation. + +To enable this input device during configuration, you need OpenAL +headers and libraries installed on your system, and need to configure +FFmpeg with @code{--enable-openal}. + +OpenAL headers and libraries should be provided as part of your OpenAL +implementation, or as an additional download (an SDK). Depending on your +installation you may need to specify additional flags via the +@code{--extra-cflags} and @code{--extra-ldflags} for allowing the build +system to locate the OpenAL headers and libraries. + +An incomplete list of OpenAL implementations follows: + +@table @strong +@item Creative +The official Windows implementation, providing hardware acceleration +with supported devices and software fallback. +See @url{http://openal.org/}. +@item OpenAL Soft +Portable, open source (LGPL) software implementation. Includes +backends for the most common sound APIs on the Windows, Linux, +Solaris, and BSD operating systems. +See @url{http://kcat.strangesoft.net/openal.html}. +@item Apple +OpenAL is part of Core Audio, the official Mac OS X Audio interface. +See @url{http://developer.apple.com/technologies/mac/audio-and-video.html} +@end table + +This device allows to capture from an audio input device handled +through OpenAL. + +You need to specify the name of the device to capture in the provided +filename. If the empty string is provided, the device will +automatically select the default device. You can get the list of the +supported devices by using the option @var{list_devices}. + +@subsection Options + +@table @option + +@item channels +Set the number of channels in the captured audio. Only the values +@option{1} (monaural) and @option{2} (stereo) are currently supported. +Defaults to @option{2}. + +@item sample_size +Set the sample size (in bits) of the captured audio. Only the values +@option{8} and @option{16} are currently supported. Defaults to +@option{16}. + +@item sample_rate +Set the sample rate (in Hz) of the captured audio. +Defaults to @option{44.1k}. + +@item list_devices +If set to @option{true}, print a list of devices and exit. +Defaults to @option{false}. + +@end table + +@subsection Examples + +Print the list of OpenAL supported devices and exit: +@example +$ ffmpeg -list_devices true -f openal -i dummy out.ogg +@end example + +Capture from the OpenAL device @file{DR-BT101 via PulseAudio}: +@example +$ ffmpeg -f openal -i 'DR-BT101 via PulseAudio' out.ogg +@end example + +Capture from the default device (note the empty string '' as filename): +@example +$ ffmpeg -f openal -i '' out.ogg +@end example + +Capture from two devices simultaneously, writing to two different files, +within the same @file{ffmpeg} command: +@example +$ ffmpeg -f openal -i 'DR-BT101 via PulseAudio' out1.ogg -f openal -i 'ALSA Default' out2.ogg +@end example +Note: not all OpenAL implementations support multiple simultaneous capture - +try the latest OpenAL Soft if the above does not work. + @section oss Open Sound System input device. @@ -248,7 +337,46 @@ For example to grab from @file{:0.0} using @file{ffmpeg}: ffmpeg -f x11grab -r 25 -s cif -i :0.0 out.mpg # Grab at position 10,20. -ffmpeg -f x11grab -25 -s cif -i :0.0+10,20 out.mpg +ffmpeg -f x11grab -r 25 -s cif -i :0.0+10,20 out.mpg +@end example + +@subsection @var{follow_mouse} AVOption + +The syntax is: +@example +-follow_mouse centered|@var{PIXELS} +@end example + +When it is specified with "centered", the grabbing region follows the mouse +pointer and keeps the pointer at the center of region; otherwise, the region +follows only when the mouse pointer reaches within @var{PIXELS} (greater than +zero) to the edge of region. + +For example: +@example +ffmpeg -f x11grab -follow_mouse centered -r 25 -s cif -i :0.0 out.mpg + +# Follows only when the mouse pointer reaches within 100 pixels to edge +ffmpeg -f x11grab -follow_mouse 100 -r 25 -s cif -i :0.0 out.mpg +@end example + +@subsection @var{show_region} AVOption + +The syntax is: +@example +-show_region 1 +@end example + +If @var{show_region} AVOption is specified with @var{1}, then the grabbing +region will be indicated on screen. With this option, it's easy to know what is +being grabbed if only a portion of the screen is grabbed. + +For example: +@example +ffmpeg -f x11grab -show_region 1 -r 25 -s cif -i :0.0+10,20 out.mpg + +# With follow_mouse +ffmpeg -f x11grab -follow_mouse centered -show_region 1 -r 25 -s cif -i :0.0 out.mpg @end example @c man end INPUT DEVICES diff --git a/doc/muxers.texi b/doc/muxers.texi index 55b44d1018..66ea95ec18 100644 --- a/doc/muxers.texi +++ b/doc/muxers.texi @@ -51,7 +51,7 @@ and the input video converted to MPEG-2 video, use the command: ffmpeg -i INPUT -acodec pcm_u8 -vcodec mpeg2video -f crc - @end example -See also the @code{framecrc} muxer (@pxref{framecrc}). +See also the @ref{framecrc} muxer. @anchor{framecrc} @section framecrc @@ -88,7 +88,7 @@ MPEG-2 video, use the command: ffmpeg -i INPUT -acodec pcm_u8 -vcodec mpeg2video -f framecrc - @end example -See also the @code{crc} muxer (@pxref{crc}). +See also the @ref{crc} muxer. @section image2 diff --git a/doc/protocols.texi b/doc/protocols.texi index a71c262368..991ec888af 100644 --- a/doc/protocols.texi +++ b/doc/protocols.texi @@ -242,7 +242,7 @@ data transferred over RDT). The muxer can be used to send a stream using RTSP ANNOUNCE to a server supporting it (currently Darwin Streaming Server and Mischa Spiegelmock's -RTSP server, @url{http://github.com/revmischa/rtsp-server}). +@uref{http://github.com/revmischa/rtsp-server, RTSP server}). The required syntax for a RTSP url is: @example diff --git a/doc/texi2pod.pl b/doc/texi2pod.pl index 84c36ff1e1..0eb5e8d9fe 100755 --- a/doc/texi2pod.pl +++ b/doc/texi2pod.pl @@ -352,6 +352,7 @@ sub postprocess s/\(?\@xref\{(?:[^\}]*)\}(?:[^.<]|(?:<[^<>]*>))*\.\)?//g; s/\s+\(\@pxref\{(?:[^\}]*)\}\)//g; s/;\s+\@pxref\{(?:[^\}]*)\}//g; + s/\@ref\{([^\}]*)\}/$1/g; s/\@noindent\s*//g; s/\@refill//g; s/\@gol//g; @@ -40,6 +40,7 @@ #include "libavutil/fifo.h" #include "libavutil/intreadwrite.h" #include "libavutil/dict.h" +#include "libavutil/mathematics.h" #include "libavutil/pixdesc.h" #include "libavutil/avstring.h" #include "libavutil/libm.h" @@ -89,26 +90,26 @@ const char program_name[] = "ffmpeg"; const int program_birth_year = 2000; /* select an input stream for an output stream */ -typedef struct AVStreamMap { +typedef struct StreamMap { int file_index; int stream_index; int sync_file_index; int sync_stream_index; -} AVStreamMap; +} StreamMap; /** * select an input file for an output file */ -typedef struct AVMetaDataMap { - int file; //< file index - char type; //< type of metadata to copy -- (g)lobal, (s)tream, (c)hapter or (p)rogram - int index; //< stream/chapter/program number -} AVMetaDataMap; +typedef struct MetadataMap { + int file; ///< file index + char type; ///< type of metadata to copy -- (g)lobal, (s)tream, (c)hapter or (p)rogram + int index; ///< stream/chapter/program number +} MetadataMap; -typedef struct AVChapterMap { +typedef struct ChapterMap { int in_file; int out_file; -} AVChapterMap; +} ChapterMap; static const OptionDef options[]; @@ -116,28 +117,25 @@ static const OptionDef options[]; #if !FF_API_MAX_STREAMS #define MAX_STREAMS 1024 /* arbitrary sanity check value */ #endif - static const char *last_asked_format = NULL; -static int64_t input_files_ts_offset[MAX_FILES]; -static double *input_files_ts_scale[MAX_FILES] = {NULL}; -static AVCodec **input_codecs = NULL; -static int nb_input_codecs = 0; -static int nb_input_files_ts_scale[MAX_FILES] = {0}; +static double *ts_scale; +static int nb_ts_scale; static AVFormatContext *output_files[MAX_FILES]; +static AVDictionary *output_opts[MAX_FILES]; static int nb_output_files = 0; -static AVStreamMap *stream_maps = NULL; +static StreamMap *stream_maps = NULL; static int nb_stream_maps; /* first item specifies output metadata, second is input */ -static AVMetaDataMap (*meta_data_maps)[2] = NULL; +static MetadataMap (*meta_data_maps)[2] = NULL; static int nb_meta_data_maps; static int metadata_global_autocopy = 1; static int metadata_streams_autocopy = 1; static int metadata_chapters_autocopy = 1; -static AVChapterMap *chapter_maps = NULL; +static ChapterMap *chapter_maps = NULL; static int nb_chapter_maps; /* indexed by output file stream index */ @@ -175,7 +173,6 @@ static char *vfilters = NULL; static int intra_only = 0; static int audio_sample_rate = 0; -static int64_t channel_layout = 0; #define QSCALE_NONE -99999 static float audio_qscale = QSCALE_NONE; static int audio_disable = 0; @@ -198,7 +195,6 @@ static float mux_max_delay= 0.7; static int64_t recording_time = INT64_MAX; static int64_t start_time = 0; -static int64_t recording_timestamp = 0; static int64_t input_ts_offset = 0; static int file_overwrite = 0; static AVDictionary *metadata; @@ -225,9 +221,6 @@ static int copy_initial_nonkeyframes = 0; static int rate_emu = 0; -static int video_channel = 0; -static char *video_standard; - static int audio_volume = 256; static int exit_on_error = 0; @@ -262,19 +255,19 @@ static AVBitStreamFilterContext *subtitle_bitstream_filters=NULL; #define DEFAULT_PASS_LOGFILENAME_PREFIX "ffmpeg2pass" -struct AVInputStream; +struct InputStream; -typedef struct AVOutputStream { +typedef struct OutputStream { int file_index; /* file index */ int index; /* stream index in the output file */ - int source_index; /* AVInputStream index */ + int source_index; /* InputStream index */ AVStream *st; /* stream in the output file */ int encoding_needed; /* true if encoding needed for this stream */ int frame_number; /* input pts and corresponding output pts for A/V sync */ //double sync_ipts; /* dts from the AVPacket of the demuxer in second units */ - struct AVInputStream *sync_ist; /* input stream to sync against */ + struct InputStream *sync_ist; /* input stream to sync against */ int64_t sync_opts; /* output frame counter, could be changed to some true timestamp */ //FIXME look at frame_number AVBitStreamFilterContext *bitstream_filters; AVCodec *enc; @@ -315,37 +308,38 @@ typedef struct AVOutputStream { #endif int sws_flags; -} AVOutputStream; + AVDictionary *opts; +} OutputStream; -static AVOutputStream **output_streams_for_file[MAX_FILES] = { NULL }; +static OutputStream **output_streams_for_file[MAX_FILES] = { NULL }; static int nb_output_streams_for_file[MAX_FILES] = { 0 }; -typedef struct AVInputStream { +typedef struct InputStream { int file_index; AVStream *st; int discard; /* true if stream data should be discarded */ int decoding_needed; /* true if the packets must be decoded in 'raw_fifo' */ - int64_t sample_index; /* current sample */ + AVCodec *dec; int64_t start; /* time when read started */ int64_t next_pts; /* synthetic pts for cases where pkt.pts is not defined */ int64_t pts; /* current pts */ + double ts_scale; int is_start; /* is 1 at the start and after a discontinuity */ int showed_multi_packet_warning; int is_past_recording_time; -#if CONFIG_AVFILTER - AVFrame *filter_frame; - int has_filter_frame; -#endif -} AVInputStream; + AVDictionary *opts; +} InputStream; -typedef struct AVInputFile { +typedef struct InputFile { AVFormatContext *ctx; int eof_reached; /* true if eof reached */ int ist_index; /* index of first stream in ist_table */ int buffer_size; /* current total buffer size */ -} AVInputFile; + int nb_streams; + int64_t ts_offset; +} InputFile; #if HAVE_TERMIOS_H @@ -353,14 +347,14 @@ typedef struct AVInputFile { static struct termios oldtty; #endif -static AVInputStream *input_streams = NULL; +static InputStream *input_streams = NULL; static int nb_input_streams = 0; -static AVInputFile *input_files = NULL; +static InputFile *input_files = NULL; static int nb_input_files = 0; #if CONFIG_AVFILTER -static int configure_video_filters(AVInputStream *ist, AVOutputStream *ost) +static int configure_video_filters(InputStream *ist, OutputStream *ost) { AVFilterContext *last_filter, *filter; /** filter graph containing all filters including input & output */ @@ -542,11 +536,13 @@ static int ffmpeg_exit(int ret) avio_close(s->pb); avformat_free_context(s); av_free(output_streams_for_file[i]); + av_dict_free(&output_opts[i]); } for(i=0;i<nb_input_files;i++) { av_close_input_file(input_files[i].ctx); - av_free(input_files_ts_scale[i]); } + for (i = 0; i < nb_input_streams; i++) + av_dict_free(&input_streams[i].opts); av_free(intra_matrix); av_free(inter_matrix); @@ -556,7 +552,6 @@ static int ffmpeg_exit(int ret) av_free(vstats_filename); av_free(streamid_map); - av_free(input_codecs); av_free(stream_maps); av_free(meta_data_maps); @@ -568,8 +563,6 @@ static int ffmpeg_exit(int ret) av_free(subtitle_codec_name); av_free(data_codec_name); - av_free(video_standard); - uninit_opts(); av_free(audio_buf); av_free(audio_out); @@ -591,6 +584,32 @@ static int ffmpeg_exit(int ret) return ret; } +static void assert_avoptions(AVDictionary *m) +{ + AVDictionaryEntry *t; + if ((t = av_dict_get(m, "", NULL, AV_DICT_IGNORE_SUFFIX))) { + av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key); + ffmpeg_exit(1); + } +} + +static void assert_codec_experimental(AVCodecContext *c, int encoder) +{ + const char *codec_string = encoder ? "encoder" : "decoder"; + AVCodec *codec; + if (c->codec->capabilities & CODEC_CAP_EXPERIMENTAL && + c->strict_std_compliance > FF_COMPLIANCE_EXPERIMENTAL) { + av_log(NULL, AV_LOG_ERROR, "%s '%s' is experimental and might produce bad " + "results.\nAdd '-strict experimental' if you want to use it.\n", + codec_string, c->codec->name); + codec = encoder ? avcodec_find_encoder(c->codec->id) : avcodec_find_decoder(c->codec->id); + if (!(codec->capabilities & CODEC_CAP_EXPERIMENTAL)) + av_log(NULL, AV_LOG_ERROR, "Or use the non experimental %s '%s'.\n", + codec_string, codec->name); + ffmpeg_exit(1); + } +} + /* similar to ff_dynarray_add() and av_fast_realloc() */ static void *grow_array(void *array, int elem_size, int *size, int new_size) { @@ -679,10 +698,16 @@ static void choose_pixel_fmt(AVStream *st, AVCodec *codec) } } -static AVOutputStream *new_output_stream(AVFormatContext *oc, int file_idx) +static OutputStream *new_output_stream(AVFormatContext *oc, int file_idx, AVCodec *codec) { - int idx = oc->nb_streams - 1; - AVOutputStream *ost; + OutputStream *ost; + AVStream *st = av_new_stream(oc, oc->nb_streams < nb_streamid_map ? streamid_map[oc->nb_streams] : 0); + int idx = oc->nb_streams - 1; + + if (!st) { + av_log(NULL, AV_LOG_ERROR, "Could not alloc stream.\n"); + ffmpeg_exit(1); + } output_streams_for_file[file_idx] = grow_array(output_streams_for_file[file_idx], @@ -690,13 +715,19 @@ static AVOutputStream *new_output_stream(AVFormatContext *oc, int file_idx) &nb_output_streams_for_file[file_idx], oc->nb_streams); ost = output_streams_for_file[file_idx][idx] = - av_mallocz(sizeof(AVOutputStream)); + av_mallocz(sizeof(OutputStream)); if (!ost) { fprintf(stderr, "Could not alloc output stream\n"); ffmpeg_exit(1); } ost->file_index = file_idx; ost->index = idx; + ost->st = st; + ost->enc = codec; + if (codec) + ost->opts = filter_codec_opts(codec_opts, codec->id, 1); + + avcodec_get_context_defaults3(st->codec, codec); ost->sws_flags = av_get_int(sws_opts, "sws_flags", NULL); return ost; @@ -705,34 +736,28 @@ static AVOutputStream *new_output_stream(AVFormatContext *oc, int file_idx) static int read_ffserver_streams(AVFormatContext *s, const char *filename) { int i, err; - AVFormatContext *ic; + AVFormatContext *ic = NULL; int nopts = 0; - err = av_open_input_file(&ic, filename, NULL, FFM_PACKET_SIZE, NULL); + err = avformat_open_input(&ic, filename, NULL, NULL); if (err < 0) return err; /* copy stream format */ - s->nb_streams = 0; for(i=0;i<ic->nb_streams;i++) { AVStream *st; + OutputStream *ost; AVCodec *codec; - s->nb_streams++; + codec = avcodec_find_encoder(ic->streams[i]->codec->codec_id); + ost = new_output_stream(s, nb_output_files, codec); + st = ost->st; // FIXME: a more elegant solution is needed - st = av_mallocz(sizeof(AVStream)); - memcpy(st, ic->streams[i], sizeof(AVStream)); + memcpy(st, &ic->streams[i], sizeof(AVStream)); st->info = av_malloc(sizeof(*st->info)); memcpy(st->info, ic->streams[i]->info, sizeof(*st->info)); - st->codec = avcodec_alloc_context(); - if (!st->codec) { - print_error(filename, AVERROR(ENOMEM)); - ffmpeg_exit(1); - } avcodec_copy_context(st->codec, ic->streams[i]->codec); - s->streams[i] = st; - codec = avcodec_find_encoder(st->codec->codec_id); if (st->codec->codec_type == AVMEDIA_TYPE_AUDIO) { if (audio_stream_copy) { st->stream_copy = 1; @@ -747,21 +772,16 @@ static int read_ffserver_streams(AVFormatContext *s, const char *filename) if(st->codec->flags & CODEC_FLAG_BITEXACT) nopts = 1; - - new_output_stream(s, nb_output_files); } - if (!nopts) - s->timestamp = av_gettime(); - av_close_input_file(ic); return 0; } static double -get_sync_ipts(const AVOutputStream *ost) +get_sync_ipts(const OutputStream *ost) { - const AVInputStream *ist = ost->sync_ist; + const InputStream *ist = ost->sync_ist; return (double)(ist->pts - start_time)/AV_TIME_BASE; } @@ -800,8 +820,8 @@ static void write_frame(AVFormatContext *s, AVPacket *pkt, AVCodecContext *avctx #define MAX_AUDIO_PACKET_SIZE (128 * 1024) static void do_audio_out(AVFormatContext *s, - AVOutputStream *ost, - AVInputStream *ist, + OutputStream *ost, + InputStream *ist, unsigned char *buf, int size) { uint8_t *buftmp; @@ -1043,7 +1063,7 @@ need_realloc: } } -static void pre_process_video_frame(AVInputStream *ist, AVPicture *picture, void **bufp) +static void pre_process_video_frame(InputStream *ist, AVPicture *picture, void **bufp) { AVCodecContext *dec; AVPicture *picture2; @@ -1086,8 +1106,8 @@ static void pre_process_video_frame(AVInputStream *ist, AVPicture *picture, void #define AV_DELAY_MAX 0.100 static void do_subtitle_out(AVFormatContext *s, - AVOutputStream *ost, - AVInputStream *ist, + OutputStream *ost, + InputStream *ist, AVSubtitle *sub, int64_t pts) { @@ -1152,10 +1172,10 @@ static int bit_buffer_size= 1024*256; static uint8_t *bit_buffer= NULL; static void do_video_out(AVFormatContext *s, - AVOutputStream *ost, - AVInputStream *ist, + OutputStream *ost, + InputStream *ist, AVFrame *in_picture, - int *frame_size) + int *frame_size, float quality) { int nb_frames, i, ret, av_unused resample_changed; AVFrame *final_picture, *formatted_picture; @@ -1286,7 +1306,7 @@ static void do_video_out(AVFormatContext *s, /* handles sameq here. This is not correct because it may not be a global option */ - big_picture.quality = same_quality ? ist->st->quality : ost->st->quality; + big_picture.quality = quality; if(!me_threshold) big_picture.pict_type = 0; // big_picture.pts = AV_NOPTS_VALUE; @@ -1337,7 +1357,7 @@ static double psnr(double d){ return -10.0*log(d)/log(10.0); } -static void do_video_stats(AVFormatContext *os, AVOutputStream *ost, +static void do_video_stats(AVFormatContext *os, OutputStream *ost, int frame_size) { AVCodecContext *enc; @@ -1375,11 +1395,11 @@ static void do_video_stats(AVFormatContext *os, AVOutputStream *ost, } static void print_report(AVFormatContext **output_files, - AVOutputStream **ost_table, int nb_ostreams, + OutputStream **ost_table, int nb_ostreams, int is_last_report) { char buf[1024]; - AVOutputStream *ost; + OutputStream *ost; AVFormatContext *oc; int64_t total_size; AVCodecContext *enc; @@ -1514,12 +1534,12 @@ static void generate_silence(uint8_t* buf, enum AVSampleFormat sample_fmt, size_ } /* pkt = NULL means EOF (needed to flush decoder buffers) */ -static int output_packet(AVInputStream *ist, int ist_index, - AVOutputStream **ost_table, int nb_ostreams, +static int output_packet(InputStream *ist, int ist_index, + OutputStream **ost_table, int nb_ostreams, const AVPacket *pkt) { AVFormatContext *os; - AVOutputStream *ost; + OutputStream *ost; int ret, i; int got_output; AVFrame picture; @@ -1530,6 +1550,7 @@ static int output_packet(AVInputStream *ist, int ist_index, #if CONFIG_AVFILTER int frame_available; #endif + float quality; AVPacket avpkt; int bps = av_get_bytes_per_sample(ist->st->codec->sample_fmt); @@ -1585,7 +1606,7 @@ static int output_packet(AVInputStream *ist, int ist_index, ret = avcodec_decode_audio3(ist->st->codec, samples, &decoded_data_size, &avpkt); if (ret < 0) - goto fail_decode; + return ret; avpkt.data += ret; avpkt.size -= ret; data_size = ret; @@ -1610,9 +1631,9 @@ static int output_packet(AVInputStream *ist, int ist_index, ret = avcodec_decode_video2(ist->st->codec, &picture, &got_output, &avpkt); - ist->st->quality= picture.quality; + quality = same_quality ? picture.quality : 0; if (ret < 0) - goto fail_decode; + return ret; if (!got_output) { /* no picture yet */ goto discard_packet; @@ -1632,7 +1653,7 @@ static int output_packet(AVInputStream *ist, int ist_index, ret = avcodec_decode_subtitle2(ist->st->codec, &subtitle, &got_output, &avpkt); if (ret < 0) - goto fail_decode; + return ret; if (!got_output) { goto discard_packet; } @@ -1640,7 +1661,7 @@ static int output_packet(AVInputStream *ist, int ist_index, avpkt.size = 0; break; default: - goto fail_decode; + return -1; } } else { switch(ist->st->codec->codec_type) { @@ -1723,7 +1744,7 @@ static int output_packet(AVInputStream *ist, int ist_index, os = output_files[ost->file_index]; /* set the input output pts pairs */ - //ost->sync_ipts = (double)(ist->pts + input_files_ts_offset[ist->file_index] - start_time)/ AV_TIME_BASE; + //ost->sync_ipts = (double)(ist->pts + input_files[ist->file_index].ts_offset - start_time)/ AV_TIME_BASE; if (ost->encoding_needed) { av_assert0(ist->decoding_needed); @@ -1736,7 +1757,8 @@ static int output_packet(AVInputStream *ist, int ist_index, if (ost->picref->video && !ost->frame_aspect_ratio) ost->st->codec->sample_aspect_ratio = ost->picref->video->sample_aspect_ratio; #endif - do_video_out(os, ost, ist, &picture, &frame_size); + do_video_out(os, ost, ist, &picture, &frame_size, + same_quality ? quality : ost->st->codec->global_quality); if (vstats_filename && frame_size) do_video_stats(os, ost, frame_size); break; @@ -1860,7 +1882,7 @@ static int output_packet(AVInputStream *ist, int ist_index, ret = 0; /* encode any samples remaining in fifo */ if (fifo_bytes > 0) { - int osize = av_get_bits_per_sample_fmt(enc->sample_fmt) >> 3; + int osize = av_get_bytes_per_sample(enc->sample_fmt); int fs_tmp = enc->frame_size; av_fifo_generic_read(ost->fifo, audio_buf, fifo_bytes, NULL); @@ -1919,8 +1941,6 @@ static int output_packet(AVInputStream *ist, int ist_index, } return 0; - fail_decode: - return -1; } static void print_sdp(AVFormatContext **avc, int n) @@ -1940,7 +1960,7 @@ static int copy_chapters(int infile, int outfile) for (i = 0; i < is->nb_chapters; i++) { AVChapter *in_ch = is->chapters[i], *out_ch; - int64_t ts_off = av_rescale_q(start_time - input_files_ts_offset[infile], + int64_t ts_off = av_rescale_q(start_time - input_files[infile].ts_offset, AV_TIME_BASE_Q, in_ch->time_base); int64_t rt = (recording_time == INT64_MAX) ? INT64_MAX : av_rescale_q(recording_time, AV_TIME_BASE_Q, in_ch->time_base); @@ -1972,7 +1992,7 @@ static int copy_chapters(int infile, int outfile) return 0; } -static void parse_forced_key_frames(char *kf, AVOutputStream *ost, +static void parse_forced_key_frames(char *kf, OutputStream *ost, AVCodecContext *avctx) { char *p; @@ -2000,16 +2020,16 @@ static void parse_forced_key_frames(char *kf, AVOutputStream *ost, */ static int transcode(AVFormatContext **output_files, int nb_output_files, - AVInputFile *input_files, + InputFile *input_files, int nb_input_files, - AVStreamMap *stream_maps, int nb_stream_maps) + StreamMap *stream_maps, int nb_stream_maps) { int ret = 0, i, j, k, n, nb_ostreams = 0, step; AVFormatContext *is, *os; AVCodecContext *codec, *icodec; - AVOutputStream *ost, **ost_table = NULL; - AVInputStream *ist; + OutputStream *ost, **ost_table = NULL; + InputStream *ist; char error[1024]; int key; int want_sdp = 1; @@ -2046,7 +2066,7 @@ static int transcode(AVFormatContext **output_files, int si = stream_maps[i].stream_index; if (fi < 0 || fi > nb_input_files - 1 || - si < 0 || si > input_files[fi].ctx->nb_streams - 1) { + si < 0 || si > input_files[fi].nb_streams - 1) { fprintf(stderr,"Could not find input stream #%d.%d\n", fi, si); ret = AVERROR(EINVAL); goto fail; @@ -2061,7 +2081,7 @@ static int transcode(AVFormatContext **output_files, } } - ost_table = av_mallocz(sizeof(AVOutputStream *) * nb_ostreams); + ost_table = av_mallocz(sizeof(OutputStream *) * nb_ostreams); if (!ost_table) goto fail; @@ -2107,7 +2127,6 @@ static int transcode(AVFormatContext **output_files, for(i=0;i<os->nb_streams;i++,n++) { int found; ost = ost_table[n] = output_streams_for_file[k][i]; - ost->st = os->streams[i]; if (nb_stream_maps > 0) { ost->source_index = input_files[stream_maps[n].file_index].ist_index + stream_maps[n].stream_index; @@ -2221,13 +2240,23 @@ static int transcode(AVFormatContext **output_files, goto fail; memcpy(codec->extradata, icodec->extradata, icodec->extradata_size); codec->extradata_size= icodec->extradata_size; - if(!copy_tb && av_q2d(icodec->time_base)*icodec->ticks_per_frame > av_q2d(ist->st->time_base) && av_q2d(ist->st->time_base) < 1.0/500){ - codec->time_base = icodec->time_base; - codec->time_base.num *= icodec->ticks_per_frame; - av_reduce(&codec->time_base.num, &codec->time_base.den, - codec->time_base.num, codec->time_base.den, INT_MAX); - }else - codec->time_base = ist->st->time_base; + + codec->time_base = ist->st->time_base; + if(!strcmp(os->oformat->name, "avi")) { + if(!copy_tb && av_q2d(icodec->time_base)*icodec->ticks_per_frame > 2*av_q2d(ist->st->time_base) && av_q2d(ist->st->time_base) < 1.0/500){ + codec->time_base = icodec->time_base; + codec->time_base.num *= icodec->ticks_per_frame; + codec->time_base.den *= 2; + } + } else if(!(os->oformat->flags & AVFMT_VARIABLE_FPS)) { + if(!copy_tb && av_q2d(icodec->time_base)*icodec->ticks_per_frame > av_q2d(ist->st->time_base) && av_q2d(ist->st->time_base) < 1.0/500){ + codec->time_base = icodec->time_base; + codec->time_base.num *= icodec->ticks_per_frame; + } + } + av_reduce(&codec->time_base.num, &codec->time_base.den, + codec->time_base.num, codec->time_base.den, INT_MAX); + switch(codec->codec_type) { case AVMEDIA_TYPE_AUDIO: if(audio_volume != 256) { @@ -2278,13 +2307,16 @@ static int transcode(AVFormatContext **output_files, ost->reformat_pair = MAKE_SFMT_PAIR(AV_SAMPLE_FMT_NONE,AV_SAMPLE_FMT_NONE); if (!codec->sample_rate) { codec->sample_rate = icodec->sample_rate; - if (icodec->lowres) - codec->sample_rate >>= icodec->lowres; } choose_sample_rate(ost->st, ost->enc); codec->time_base = (AVRational){1, codec->sample_rate}; - if (!codec->channels) + if (codec->sample_fmt == AV_SAMPLE_FMT_NONE) + codec->sample_fmt = icodec->sample_fmt; + choose_sample_fmt(ost->st, ost->enc); + if (!codec->channels) { codec->channels = icodec->channels; + codec->channel_layout = icodec->channel_layout; + } if (av_get_channel_layout_nb_channels(codec->channel_layout) != codec->channels) codec->channel_layout = 0; ost->audio_resample = codec->sample_rate != icodec->sample_rate || audio_sync_method > 1; @@ -2304,16 +2336,19 @@ static int transcode(AVFormatContext **output_files, fprintf(stderr, "Video pixel format is unknown, stream cannot be encoded\n"); ffmpeg_exit(1); } + + if (!codec->width || !codec->height) { + codec->width = icodec->width; + codec->height = icodec->height; + } + ost->video_resample = codec->width != icodec->width || codec->height != icodec->height || codec->pix_fmt != icodec->pix_fmt; if (ost->video_resample) { codec->bits_per_raw_sample= frame_bits_per_raw_sample; } - if (!codec->width || !codec->height) { - codec->width = icodec->width; - codec->height = icodec->height; - } + ost->resample_height = icodec->height; ost->resample_width = icodec->width; ost->resample_pix_fmt= icodec->pix_fmt; @@ -2412,12 +2447,17 @@ static int transcode(AVFormatContext **output_files, memcpy(ost->st->codec->subtitle_header, dec->subtitle_header, dec->subtitle_header_size); ost->st->codec->subtitle_header_size = dec->subtitle_header_size; } - if (avcodec_open(ost->st->codec, codec) < 0) { + if (avcodec_open2(ost->st->codec, codec, &ost->opts) < 0) { snprintf(error, sizeof(error), "Error while opening encoder for output stream #%d.%d - maybe incorrect parameters such as bit_rate, rate, width or height", ost->file_index, ost->index); ret = AVERROR(EINVAL); goto dump_format; } + assert_codec_experimental(ost->st->codec, 1); + assert_avoptions(ost->opts); + if (ost->st->codec->bit_rate && ost->st->codec->bit_rate < 1000) + av_log(NULL, AV_LOG_WARNING, "The bitrate parameter is set too low." + "It takes bits/s as argument, not kbits/s\n"); extra_size += ost->st->codec->extradata_size; } } @@ -2426,7 +2466,7 @@ static int transcode(AVFormatContext **output_files, for (i = 0; i < nb_input_streams; i++) { ist = &input_streams[i]; if (ist->decoding_needed) { - AVCodec *codec = i < nb_input_codecs ? input_codecs[i] : NULL; + AVCodec *codec = ist->dec; if (!codec) codec = avcodec_find_decoder(ist->st->codec->codec_id); if (!codec) { @@ -2435,12 +2475,14 @@ static int transcode(AVFormatContext **output_files, ret = AVERROR(EINVAL); goto dump_format; } - if (avcodec_open(ist->st->codec, codec) < 0) { + if (avcodec_open2(ist->st->codec, codec, &ist->opts) < 0) { snprintf(error, sizeof(error), "Error while opening decoder for input stream #%d.%d", ist->file_index, ist->st->index); ret = AVERROR(EINVAL); goto dump_format; } + assert_codec_experimental(ist->st->codec, 0); + assert_avoptions(ost->opts); //if (ist->st->codec->codec_type == AVMEDIA_TYPE_VIDEO) // ist->st->codec->flags |= CODEC_FLAG_REPEAT_FIELD; } @@ -2481,7 +2523,7 @@ static int transcode(AVFormatContext **output_files, files[1] = input_files[in_file_index].ctx; for (j = 0; j < 2; j++) { - AVMetaDataMap *map = &meta_data_maps[i][j]; + MetadataMap *map = &meta_data_maps[i][j]; switch (map->type) { case 'g': @@ -2548,11 +2590,12 @@ static int transcode(AVFormatContext **output_files, /* open files and write file headers */ for(i=0;i<nb_output_files;i++) { os = output_files[i]; - if (av_write_header(os) < 0) { + if (avformat_write_header(os, &output_opts[i]) < 0) { snprintf(error, sizeof(error), "Could not write header for output file #%d (incorrect codec parameters ?)", i); ret = AVERROR(EINVAL); goto dump_format; } +// assert_avoptions(output_opts[i]); if (strcmp(output_files[i]->oformat->name, "rtp")) { want_sdp = 0; } @@ -2729,7 +2772,7 @@ static int transcode(AVFormatContext **output_files, } /* the following test is needed in case new streams appear dynamically in stream : we ignore them */ - if (pkt.stream_index >= input_files[file_index].ctx->nb_streams) + if (pkt.stream_index >= input_files[file_index].nb_streams) goto discard_packet; ist_index = input_files[file_index].ist_index + pkt.stream_index; ist = &input_streams[ist_index]; @@ -2737,27 +2780,27 @@ static int transcode(AVFormatContext **output_files, goto discard_packet; if (pkt.dts != AV_NOPTS_VALUE) - pkt.dts += av_rescale_q(input_files_ts_offset[ist->file_index], AV_TIME_BASE_Q, ist->st->time_base); + pkt.dts += av_rescale_q(input_files[ist->file_index].ts_offset, AV_TIME_BASE_Q, ist->st->time_base); if (pkt.pts != AV_NOPTS_VALUE) - pkt.pts += av_rescale_q(input_files_ts_offset[ist->file_index], AV_TIME_BASE_Q, ist->st->time_base); + pkt.pts += av_rescale_q(input_files[ist->file_index].ts_offset, AV_TIME_BASE_Q, ist->st->time_base); - if (pkt.stream_index < nb_input_files_ts_scale[file_index] - && input_files_ts_scale[file_index][pkt.stream_index]){ + if (ist->ts_scale) { if(pkt.pts != AV_NOPTS_VALUE) - pkt.pts *= input_files_ts_scale[file_index][pkt.stream_index]; + pkt.pts *= ist->ts_scale; if(pkt.dts != AV_NOPTS_VALUE) - pkt.dts *= input_files_ts_scale[file_index][pkt.stream_index]; + pkt.dts *= ist->ts_scale; } -// fprintf(stderr, "next:%"PRId64" dts:%"PRId64" off:%"PRId64" %d\n", ist->next_pts, pkt.dts, input_files_ts_offset[ist->file_index], ist->st->codec->codec_type); +// fprintf(stderr, "next:%"PRId64" dts:%"PRId64" off:%"PRId64" %d\n", ist->next_pts, pkt.dts, input_files[ist->file_index].ts_offset, ist->st->codec->codec_type); if (pkt.dts != AV_NOPTS_VALUE && ist->next_pts != AV_NOPTS_VALUE && (is->iformat->flags & AVFMT_TS_DISCONT)) { int64_t pkt_dts= av_rescale_q(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q); int64_t delta= pkt_dts - ist->next_pts; if((FFABS(delta) > 1LL*dts_delta_threshold*AV_TIME_BASE || pkt_dts+1<ist->pts)&& !copy_ts){ - input_files_ts_offset[ist->file_index]-= delta; + input_files[ist->file_index].ts_offset -= delta; if (verbose > 2) - fprintf(stderr, "timestamp discontinuity %"PRId64", new offset= %"PRId64"\n", delta, input_files_ts_offset[ist->file_index]); + fprintf(stderr, "timestamp discontinuity %"PRId64", new offset= %"PRId64"\n", + delta, input_files[ist->file_index].ts_offset); pkt.dts-= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base); if(pkt.pts != AV_NOPTS_VALUE) pkt.pts-= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base); @@ -2860,6 +2903,7 @@ static int transcode(AVFormatContext **output_files, audio_resample_close(ost->resample); if (ost->reformat_ctx) av_audio_convert_free(ost->reformat_ctx); + av_dict_free(&ost->opts); av_free(ost); } } @@ -2901,18 +2945,6 @@ static int opt_frame_rate(const char *opt, const char *arg) return 0; } -static int opt_bitrate(const char *opt, const char *arg) -{ - int codec_type = opt[0]=='a' ? AVMEDIA_TYPE_AUDIO : AVMEDIA_TYPE_VIDEO; - - opt_default(opt, arg); - - if (av_get_int(avcodec_opts[codec_type], "b", NULL) < 1000) - fprintf(stderr, "WARNING: The bitrate parameter is set too low. It takes bits/s as argument, not kbits/s\n"); - - return 0; -} - static int opt_frame_crop(const char *opt, const char *arg) { fprintf(stderr, "Option '%s' has been removed, use the crop filter instead\n", opt); @@ -2942,7 +2974,7 @@ static int opt_frame_pix_fmt(const char *opt, const char *arg) return AVERROR(EINVAL); } } else { - show_pix_fmts(); + opt_pix_fmts(NULL, NULL); ffmpeg_exit(0); } return 0; @@ -3001,8 +3033,7 @@ static int opt_qscale(const char *opt, const char *arg) static int opt_top_field_first(const char *opt, const char *arg) { top_field_first = parse_number_or_die(opt, arg, OPT_INT, 0, 1); - opt_default(opt, arg); - return 0; + return opt_default(opt, arg); } static int opt_thread_count(const char *opt, const char *arg) @@ -3047,14 +3078,14 @@ static int opt_audio_channels(const char *opt, const char *arg) static int opt_video_channel(const char *opt, const char *arg) { - video_channel = parse_number_or_die(opt, arg, OPT_INT64, 0, INT_MAX); - return 0; + av_log(NULL, AV_LOG_WARNING, "This option is deprecated, use -channel.\n"); + return opt_default("channel", arg); } static int opt_video_standard(const char *opt, const char *arg) { - video_standard = av_strdup(arg); - return 0; + av_log(NULL, AV_LOG_WARNING, "This option is deprecated, use -standard.\n"); + return opt_default("standard", arg); } static int opt_codec(const char *opt, const char *arg) @@ -3095,7 +3126,7 @@ static int opt_codec_tag(const char *opt, const char *arg) static int opt_map(const char *opt, const char *arg) { - AVStreamMap *m; + StreamMap *m; char *p; stream_maps = grow_array(stream_maps, sizeof(*stream_maps), &nb_stream_maps, nb_stream_maps + 1); @@ -3142,7 +3173,7 @@ static void parse_meta_type(char *arg, char *type, int *index, char **endptr) static int opt_map_metadata(const char *opt, const char *arg) { - AVMetaDataMap *m, *m1; + MetadataMap *m, *m1; char *p; meta_data_maps = grow_array(meta_data_maps, sizeof(*meta_data_maps), @@ -3177,7 +3208,7 @@ static int opt_map_meta_data(const char *opt, const char *arg) static int opt_map_chapters(const char *opt, const char *arg) { - AVChapterMap *c; + ChapterMap *c; char *p; chapter_maps = grow_array(chapter_maps, sizeof(*chapter_maps), &nb_chapter_maps, @@ -3205,8 +3236,8 @@ static int opt_input_ts_scale(const char *opt, const char *arg) if(stream >= MAX_STREAMS) ffmpeg_exit(1); - input_files_ts_scale[nb_input_files] = grow_array(input_files_ts_scale[nb_input_files], sizeof(*input_files_ts_scale[nb_input_files]), &nb_input_files_ts_scale[nb_input_files], stream + 1); - input_files_ts_scale[nb_input_files][stream]= scale; + ts_scale = grow_array(ts_scale, sizeof(*ts_scale), &nb_ts_scale, stream + 1); + ts_scale[stream] = scale; return 0; } @@ -3224,7 +3255,14 @@ static int opt_start_time(const char *opt, const char *arg) static int opt_recording_timestamp(const char *opt, const char *arg) { - recording_timestamp = parse_time_or_die(opt, arg, 0) / 1000000; + char buf[128]; + int64_t recording_timestamp = parse_time_or_die(opt, arg, 0) / 1E6; + struct tm time = *gmtime((time_t*)&recording_timestamp); + strftime(buf, sizeof(buf), "creation_time=%FT%T%z", &time); + opt_metadata("metadata", buf); + + av_log(NULL, AV_LOG_WARNING, "%s is deprecated, set the 'creation_time' metadata " + "tag instead.\n", opt); return 0; } @@ -3234,7 +3272,7 @@ static int opt_input_ts_offset(const char *opt, const char *arg) return 0; } -static enum CodecID find_codec_or_die(const char *name, int type, int encoder, int strict) +static enum CodecID find_codec_or_die(const char *name, int type, int encoder) { const char *codec_string = encoder ? "encoder" : "decoder"; AVCodec *codec; @@ -3252,29 +3290,18 @@ static enum CodecID find_codec_or_die(const char *name, int type, int encoder, i fprintf(stderr, "Invalid %s type '%s'\n", codec_string, name); ffmpeg_exit(1); } - if(codec->capabilities & CODEC_CAP_EXPERIMENTAL && - strict > FF_COMPLIANCE_EXPERIMENTAL) { - fprintf(stderr, "%s '%s' is experimental and might produce bad " - "results.\nAdd '-strict experimental' if you want to use it.\n", - codec_string, codec->name); - codec = encoder ? - avcodec_find_encoder(codec->id) : - avcodec_find_decoder(codec->id); - if (!(codec->capabilities & CODEC_CAP_EXPERIMENTAL)) - fprintf(stderr, "Or use the non experimental %s '%s'.\n", - codec_string, codec->name); - ffmpeg_exit(1); - } return codec->id; } static int opt_input_file(const char *opt, const char *filename) { AVFormatContext *ic; - AVFormatParameters params, *ap = ¶ms; AVInputFormat *file_iformat = NULL; int err, i, ret, rfps, rfps_base; int64_t timestamp; + uint8_t buf[128]; + AVDictionary **opts; + int orig_nb_streams; // number of streams before avformat_find_stream_info if (last_asked_format) { if (!(file_iformat = av_find_input_format(last_asked_format))) { @@ -3296,45 +3323,46 @@ static int opt_input_file(const char *opt, const char *filename) print_error(filename, AVERROR(ENOMEM)); ffmpeg_exit(1); } - - memset(ap, 0, sizeof(*ap)); - ap->prealloced_context = 1; - ap->sample_rate = audio_sample_rate; - ap->channels = audio_channels; - ap->time_base.den = frame_rate.num; - ap->time_base.num = frame_rate.den; - ap->width = frame_width; - ap->height = frame_height; - ap->pix_fmt = frame_pix_fmt; - // ap->sample_fmt = audio_sample_fmt; //FIXME:not implemented in libavformat - ap->channel = video_channel; - ap->standard = video_standard; - - set_context_opts(ic, avformat_opts, AV_OPT_FLAG_DECODING_PARAM, NULL); + if (audio_sample_rate) { + snprintf(buf, sizeof(buf), "%d", audio_sample_rate); + av_dict_set(&format_opts, "sample_rate", buf, 0); + } + if (audio_channels) { + snprintf(buf, sizeof(buf), "%d", audio_channels); + av_dict_set(&format_opts, "channels", buf, 0); + } + if (frame_rate.num) { + snprintf(buf, sizeof(buf), "%d/%d", frame_rate.num, frame_rate.den); + av_dict_set(&format_opts, "framerate", buf, 0); + } + if (frame_width && frame_height) { + snprintf(buf, sizeof(buf), "%dx%d", frame_width, frame_height); + av_dict_set(&format_opts, "video_size", buf, 0); + } + if (frame_pix_fmt != PIX_FMT_NONE) + av_dict_set(&format_opts, "pixel_format", av_get_pix_fmt_name(frame_pix_fmt), 0); ic->video_codec_id = - find_codec_or_die(video_codec_name , AVMEDIA_TYPE_VIDEO , 0, - avcodec_opts[AVMEDIA_TYPE_VIDEO ]->strict_std_compliance); + find_codec_or_die(video_codec_name , AVMEDIA_TYPE_VIDEO , 0); ic->audio_codec_id = - find_codec_or_die(audio_codec_name , AVMEDIA_TYPE_AUDIO , 0, - avcodec_opts[AVMEDIA_TYPE_AUDIO ]->strict_std_compliance); + find_codec_or_die(audio_codec_name , AVMEDIA_TYPE_AUDIO , 0); ic->subtitle_codec_id= - find_codec_or_die(subtitle_codec_name, AVMEDIA_TYPE_SUBTITLE, 0, - avcodec_opts[AVMEDIA_TYPE_SUBTITLE]->strict_std_compliance); - ic->flags |= AVFMT_FLAG_NONBLOCK | AVFMT_FLAG_PRIV_OPT; + find_codec_or_die(subtitle_codec_name, AVMEDIA_TYPE_SUBTITLE, 0); + ic->flags |= AVFMT_FLAG_NONBLOCK; - /* open the input file with generic libav function */ - err = av_open_input_file(&ic, filename, file_iformat, 0, ap); - if(err >= 0){ - set_context_opts(ic, avformat_opts, AV_OPT_FLAG_DECODING_PARAM, NULL); - err = av_demuxer_open(ic, ap); - if(err < 0) - avformat_free_context(ic); + if (loop_input) { + av_log(NULL, AV_LOG_WARNING, "-loop_input is deprecated, use -loop 1\n"); + ic->loop_input = loop_input; } + + /* open the input file with generic libav function */ + err = avformat_open_input(&ic, filename, file_iformat, &format_opts); if (err < 0) { print_error(filename, err); ffmpeg_exit(1); } + assert_avoptions(format_opts); + if(opt_programid) { int i, j; int found=0; @@ -3359,11 +3387,13 @@ static int opt_input_file(const char *opt, const char *filename) opt_programid=0; } - ic->loop_input = loop_input; + /* Set AVCodecContext options for avformat_find_stream_info */ + opts = setup_find_stream_info_opts(ic, codec_opts); + orig_nb_streams = ic->nb_streams; /* If not enough info to get the stream parameters, we decode the first frames to get it. (used in mpeg case for example) */ - ret = av_find_stream_info(ic); + ret = avformat_find_stream_info(ic, opts); if (ret < 0 && verbose >= 0) { fprintf(stderr, "%s: could not find codec parameters\n", filename); av_close_input_file(ic); @@ -3390,39 +3420,36 @@ static int opt_input_file(const char *opt, const char *filename) for(i=0;i<ic->nb_streams;i++) { AVStream *st = ic->streams[i]; AVCodecContext *dec = st->codec; - AVInputStream *ist; + InputStream *ist; dec->thread_count = thread_count; - input_codecs = grow_array(input_codecs, sizeof(*input_codecs), &nb_input_codecs, nb_input_codecs + 1); input_streams = grow_array(input_streams, sizeof(*input_streams), &nb_input_streams, nb_input_streams + 1); ist = &input_streams[nb_input_streams - 1]; ist->st = st; ist->file_index = nb_input_files; ist->discard = 1; + ist->opts = filter_codec_opts(codec_opts, ist->st->codec->codec_id, 0); + + if (i < nb_ts_scale) + ist->ts_scale = ts_scale[i]; switch (dec->codec_type) { case AVMEDIA_TYPE_AUDIO: - input_codecs[nb_input_codecs-1] = avcodec_find_decoder_by_name(audio_codec_name); - if(!input_codecs[nb_input_codecs-1]) - input_codecs[nb_input_codecs-1] = avcodec_find_decoder(dec->codec_id); - set_context_opts(dec, avcodec_opts[AVMEDIA_TYPE_AUDIO], AV_OPT_FLAG_AUDIO_PARAM | AV_OPT_FLAG_DECODING_PARAM, input_codecs[nb_input_codecs-1]); - channel_layout = dec->channel_layout; - audio_sample_fmt = dec->sample_fmt; + ist->dec = avcodec_find_decoder_by_name(audio_codec_name); + if(!ist->dec) + ist->dec = avcodec_find_decoder(dec->codec_id); if(audio_disable) st->discard= AVDISCARD_ALL; break; case AVMEDIA_TYPE_VIDEO: - input_codecs[nb_input_codecs-1] = avcodec_find_decoder_by_name(video_codec_name); - if(!input_codecs[nb_input_codecs-1]) - input_codecs[nb_input_codecs-1] = avcodec_find_decoder(dec->codec_id); - set_context_opts(dec, avcodec_opts[AVMEDIA_TYPE_VIDEO], AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_DECODING_PARAM, input_codecs[nb_input_codecs-1]); + ist->dec= avcodec_find_decoder_by_name(video_codec_name); + if(!ist->dec) + ist->dec = avcodec_find_decoder(dec->codec_id); rfps = ic->streams[i]->r_frame_rate.num; rfps_base = ic->streams[i]->r_frame_rate.den; if (dec->lowres) { dec->flags |= CODEC_FLAG_EMU_EDGE; - dec->height >>= dec->lowres; - dec->width >>= dec->lowres; } if(me_threshold) dec->debug |= FF_DEBUG_MV; @@ -3444,9 +3471,9 @@ static int opt_input_file(const char *opt, const char *filename) case AVMEDIA_TYPE_DATA: break; case AVMEDIA_TYPE_SUBTITLE: - input_codecs[nb_input_codecs-1] = avcodec_find_decoder_by_name(subtitle_codec_name); - if(!input_codecs[nb_input_codecs-1]) - input_codecs[nb_input_codecs-1] = avcodec_find_decoder(dec->codec_id); + ist->dec = avcodec_find_decoder_by_name(subtitle_codec_name); + if(!ist->dec) + ist->dec = avcodec_find_decoder(dec->codec_id); if(subtitle_disable) st->discard = AVDISCARD_ALL; break; @@ -3458,7 +3485,6 @@ static int opt_input_file(const char *opt, const char *filename) } } - input_files_ts_offset[nb_input_files] = input_ts_offset - (copy_ts ? 0 : timestamp); /* dump the file content */ if (verbose >= 0) av_dump_format(ic, nb_input_files, filename, 0); @@ -3466,16 +3492,23 @@ static int opt_input_file(const char *opt, const char *filename) input_files = grow_array(input_files, sizeof(*input_files), &nb_input_files, nb_input_files + 1); input_files[nb_input_files - 1].ctx = ic; input_files[nb_input_files - 1].ist_index = nb_input_streams - ic->nb_streams; + input_files[nb_input_files - 1].ts_offset = input_ts_offset - (copy_ts ? 0 : timestamp); + input_files[nb_input_files - 1].nb_streams = ic->nb_streams; top_field_first = -1; - video_channel = 0; frame_rate = (AVRational){0, 0}; frame_pix_fmt = PIX_FMT_NONE; frame_height = 0; frame_width = 0; audio_sample_rate = 0; audio_channels = 0; + audio_sample_fmt = AV_SAMPLE_FMT_NONE; + av_freep(&ts_scale); + nb_ts_scale = 0; + for (i = 0; i < orig_nb_streams; i++) + av_dict_free(&opts[i]); + av_freep(&opts); av_freep(&video_codec_name); av_freep(&audio_codec_name); av_freep(&subtitle_codec_name); @@ -3530,28 +3563,24 @@ static void check_inputs(int *has_video_ptr, static void new_video_stream(AVFormatContext *oc, int file_idx) { AVStream *st; - AVOutputStream *ost; + OutputStream *ost; AVCodecContext *video_enc; enum CodecID codec_id = CODEC_ID_NONE; AVCodec *codec= NULL; - st = av_new_stream(oc, oc->nb_streams < nb_streamid_map ? streamid_map[oc->nb_streams] : 0); - if (!st) { - fprintf(stderr, "Could not alloc stream\n"); - ffmpeg_exit(1); - } - ost = new_output_stream(oc, file_idx); - if(!video_stream_copy){ if (video_codec_name) { - codec_id = find_codec_or_die(video_codec_name, AVMEDIA_TYPE_VIDEO, 1, - avcodec_opts[AVMEDIA_TYPE_VIDEO]->strict_std_compliance); + codec_id = find_codec_or_die(video_codec_name, AVMEDIA_TYPE_VIDEO, 1); codec = avcodec_find_encoder_by_name(video_codec_name); - ost->enc = codec; } else { codec_id = av_guess_codec(oc->oformat, NULL, oc->filename, NULL, AVMEDIA_TYPE_VIDEO); codec = avcodec_find_encoder(codec_id); } + } + + ost = new_output_stream(oc, file_idx, codec); + st = ost->st; + if (!video_stream_copy) { ost->frame_aspect_ratio = frame_aspect_ratio; frame_aspect_ratio = 0; #if CONFIG_AVFILTER @@ -3560,7 +3589,6 @@ static void new_video_stream(AVFormatContext *oc, int file_idx) #endif } - avcodec_get_context_defaults3(st->codec, codec); ost->bitstream_filters = video_bitstream_filters; video_bitstream_filters= NULL; @@ -3573,12 +3601,11 @@ static void new_video_stream(AVFormatContext *oc, int file_idx) if(oc->oformat->flags & AVFMT_GLOBALHEADER) { video_enc->flags |= CODEC_FLAG_GLOBAL_HEADER; - avcodec_opts[AVMEDIA_TYPE_VIDEO]->flags|= CODEC_FLAG_GLOBAL_HEADER; } + video_enc->codec_type = AVMEDIA_TYPE_VIDEO; if (video_stream_copy) { st->stream_copy = 1; - video_enc->codec_type = AVMEDIA_TYPE_VIDEO; video_enc->sample_aspect_ratio = st->sample_aspect_ratio = av_d2q(frame_aspect_ratio*frame_height/frame_width, 255); } else { @@ -3588,7 +3615,6 @@ static void new_video_stream(AVFormatContext *oc, int file_idx) if (frame_rate.num) ost->frame_rate = frame_rate; video_enc->codec_id = codec_id; - set_context_opts(video_enc, avcodec_opts[AVMEDIA_TYPE_VIDEO], AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM, codec); video_enc->width = frame_width; video_enc->height = frame_height; @@ -3600,8 +3626,7 @@ static void new_video_stream(AVFormatContext *oc, int file_idx) video_enc->gop_size = 0; if (video_qscale || same_quality) { video_enc->flags |= CODEC_FLAG_QSCALE; - video_enc->global_quality= - st->quality = FF_QP2LAMBDA * video_qscale; + video_enc->global_quality = FF_QP2LAMBDA * video_qscale; } if(intra_matrix) @@ -3670,31 +3695,22 @@ static void new_video_stream(AVFormatContext *oc, int file_idx) static void new_audio_stream(AVFormatContext *oc, int file_idx) { AVStream *st; - AVOutputStream *ost; + OutputStream *ost; AVCodec *codec= NULL; AVCodecContext *audio_enc; enum CodecID codec_id = CODEC_ID_NONE; - st = av_new_stream(oc, oc->nb_streams < nb_streamid_map ? streamid_map[oc->nb_streams] : 0); - if (!st) { - fprintf(stderr, "Could not alloc stream\n"); - ffmpeg_exit(1); - } - ost = new_output_stream(oc, file_idx); - if(!audio_stream_copy){ if (audio_codec_name) { - codec_id = find_codec_or_die(audio_codec_name, AVMEDIA_TYPE_AUDIO, 1, - avcodec_opts[AVMEDIA_TYPE_AUDIO]->strict_std_compliance); + codec_id = find_codec_or_die(audio_codec_name, AVMEDIA_TYPE_AUDIO, 1); codec = avcodec_find_encoder_by_name(audio_codec_name); - ost->enc = codec; } else { codec_id = av_guess_codec(oc->oformat, NULL, oc->filename, NULL, AVMEDIA_TYPE_AUDIO); codec = avcodec_find_encoder(codec_id); } } - - avcodec_get_context_defaults3(st->codec, codec); + ost = new_output_stream(oc, file_idx, codec); + st = ost->st; ost->bitstream_filters = audio_bitstream_filters; audio_bitstream_filters= NULL; @@ -3709,25 +3725,22 @@ static void new_audio_stream(AVFormatContext *oc, int file_idx) if (oc->oformat->flags & AVFMT_GLOBALHEADER) { audio_enc->flags |= CODEC_FLAG_GLOBAL_HEADER; - avcodec_opts[AVMEDIA_TYPE_AUDIO]->flags|= CODEC_FLAG_GLOBAL_HEADER; } if (audio_stream_copy) { st->stream_copy = 1; } else { audio_enc->codec_id = codec_id; - set_context_opts(audio_enc, avcodec_opts[AVMEDIA_TYPE_AUDIO], AV_OPT_FLAG_AUDIO_PARAM | AV_OPT_FLAG_ENCODING_PARAM, codec); if (audio_qscale > QSCALE_NONE) { audio_enc->flags |= CODEC_FLAG_QSCALE; - audio_enc->global_quality = st->quality = FF_QP2LAMBDA * audio_qscale; + audio_enc->global_quality = FF_QP2LAMBDA * audio_qscale; } if (audio_channels) audio_enc->channels = audio_channels; - audio_enc->sample_fmt = audio_sample_fmt; + if (audio_sample_fmt != AV_SAMPLE_FMT_NONE) + audio_enc->sample_fmt = audio_sample_fmt; if (audio_sample_rate) audio_enc->sample_rate = audio_sample_rate; - audio_enc->channel_layout = channel_layout; - choose_sample_fmt(st, codec); } if (audio_language) { av_dict_set(&st->metadata, "language", audio_language, 0); @@ -3743,21 +3756,16 @@ static void new_audio_stream(AVFormatContext *oc, int file_idx) static void new_data_stream(AVFormatContext *oc, int file_idx) { AVStream *st; - AVCodec *codec=NULL; + OutputStream *ost; AVCodecContext *data_enc; - st = av_new_stream(oc, oc->nb_streams < nb_streamid_map ? streamid_map[oc->nb_streams] : 0); - if (!st) { - fprintf(stderr, "Could not alloc stream\n"); - ffmpeg_exit(1); - } - new_output_stream(oc, file_idx); + ost = new_output_stream(oc, file_idx, NULL); + st = ost->st; data_enc = st->codec; if (!data_stream_copy) { fprintf(stderr, "Data stream encoding not supported yet (only streamcopy)\n"); ffmpeg_exit(1); } - avcodec_get_context_defaults3(st->codec, codec); data_enc->codec_type = AVMEDIA_TYPE_DATA; @@ -3766,7 +3774,6 @@ static void new_data_stream(AVFormatContext *oc, int file_idx) if (oc->oformat->flags & AVFMT_GLOBALHEADER) { data_enc->flags |= CODEC_FLAG_GLOBAL_HEADER; - avcodec_opts[AVMEDIA_TYPE_DATA]->flags |= CODEC_FLAG_GLOBAL_HEADER; } if (data_stream_copy) { st->stream_copy = 1; @@ -3780,30 +3787,23 @@ static void new_data_stream(AVFormatContext *oc, int file_idx) static void new_subtitle_stream(AVFormatContext *oc, int file_idx) { AVStream *st; - AVOutputStream *ost; + OutputStream *ost; AVCodec *codec=NULL; AVCodecContext *subtitle_enc; enum CodecID codec_id = CODEC_ID_NONE; - st = av_new_stream(oc, oc->nb_streams < nb_streamid_map ? streamid_map[oc->nb_streams] : 0); - if (!st) { - fprintf(stderr, "Could not alloc stream\n"); - ffmpeg_exit(1); - } - ost = new_output_stream(oc, file_idx); - subtitle_enc = st->codec; if(!subtitle_stream_copy){ if (subtitle_codec_name) { - codec_id = find_codec_or_die(subtitle_codec_name, AVMEDIA_TYPE_SUBTITLE, 1, - avcodec_opts[AVMEDIA_TYPE_SUBTITLE]->strict_std_compliance); + codec_id = find_codec_or_die(subtitle_codec_name, AVMEDIA_TYPE_SUBTITLE, 1); codec = avcodec_find_encoder_by_name(subtitle_codec_name); - ost->enc = codec; } else { codec_id = av_guess_codec(oc->oformat, NULL, oc->filename, NULL, AVMEDIA_TYPE_SUBTITLE); codec = avcodec_find_encoder(codec_id); } } - avcodec_get_context_defaults3(st->codec, codec); + ost = new_output_stream(oc, file_idx, codec); + st = ost->st; + subtitle_enc = st->codec; ost->bitstream_filters = subtitle_bitstream_filters; subtitle_bitstream_filters= NULL; @@ -3815,13 +3815,11 @@ static void new_subtitle_stream(AVFormatContext *oc, int file_idx) if (oc->oformat->flags & AVFMT_GLOBALHEADER) { subtitle_enc->flags |= CODEC_FLAG_GLOBAL_HEADER; - avcodec_opts[AVMEDIA_TYPE_SUBTITLE]->flags |= CODEC_FLAG_GLOBAL_HEADER; } if (subtitle_stream_copy) { st->stream_copy = 1; } else { subtitle_enc->codec_id = codec_id; - set_context_opts(avcodec_opts[AVMEDIA_TYPE_SUBTITLE], subtitle_enc, AV_OPT_FLAG_SUBTITLE_PARAM | AV_OPT_FLAG_ENCODING_PARAM, codec); } if (subtitle_language) { @@ -3879,7 +3877,6 @@ static int opt_output_file(const char *opt, const char *filename) AVFormatContext *oc; int err, use_video, use_audio, use_subtitle, use_data; int input_has_video, input_has_audio, input_has_subtitle, input_has_data; - AVFormatParameters params, *ap = ¶ms; AVOutputFormat *file_oformat; if(nb_output_files >= FF_ARRAY_ELEMS(output_files)){ @@ -3913,23 +3910,20 @@ static int opt_output_file(const char *opt, const char *filename) use_subtitle = file_oformat->subtitle_codec != CODEC_ID_NONE || subtitle_stream_copy || subtitle_codec_name; use_data = data_stream_copy || data_codec_name; /* XXX once generic data codec will be available add a ->data_codec reference and use it here */ - /* disable if no corresponding type found and at least one - input file */ - if (nb_input_files > 0) { - check_inputs(&input_has_video, - &input_has_audio, - &input_has_subtitle, - &input_has_data); - - if (!input_has_video) - use_video = 0; - if (!input_has_audio) - use_audio = 0; - if (!input_has_subtitle) - use_subtitle = 0; - if (!input_has_data) - use_data = 0; - } + /* disable if no corresponding type found */ + check_inputs(&input_has_video, + &input_has_audio, + &input_has_subtitle, + &input_has_data); + + if (!input_has_video) + use_video = 0; + if (!input_has_audio) + use_audio = 0; + if (!input_has_subtitle) + use_subtitle = 0; + if (!input_has_data) + use_data = 0; /* manual disable */ if (audio_disable) use_audio = 0; @@ -3942,12 +3936,11 @@ static int opt_output_file(const char *opt, const char *filename) if (use_subtitle) new_subtitle_stream(oc, nb_output_files); if (use_data) new_data_stream(oc, nb_output_files); - oc->timestamp = recording_timestamp; - av_dict_copy(&oc->metadata, metadata, 0); av_dict_free(&metadata); } + av_dict_copy(&output_opts[nb_output_files], format_opts, 0); output_files[nb_output_files++] = oc; /* check filename in case of an image number is expected */ @@ -3987,24 +3980,20 @@ static int opt_output_file(const char *opt, const char *filename) } } - memset(ap, 0, sizeof(*ap)); - if (av_set_parameters(oc, ap) < 0) { - fprintf(stderr, "%s: Invalid encoding parameters\n", - oc->filename); - ffmpeg_exit(1); - } - oc->preload= (int)(mux_preload*AV_TIME_BASE); oc->max_delay= (int)(mux_max_delay*AV_TIME_BASE); - oc->loop_output = loop_output; - set_context_opts(oc, avformat_opts, AV_OPT_FLAG_ENCODING_PARAM, NULL); + if (loop_output >= 0) { + av_log(NULL, AV_LOG_WARNING, "-loop_output is deprecated, use -loop\n"); + oc->loop_output = loop_output; + } frame_rate = (AVRational){0, 0}; frame_width = 0; frame_height = 0; audio_sample_rate = 0; audio_channels = 0; + audio_sample_fmt = AV_SAMPLE_FMT_NONE; av_freep(&forced_key_frames); uninit_opts(); @@ -4072,16 +4061,18 @@ static void parse_matrix_coeffs(uint16_t *dest, const char *str) } } -static void opt_inter_matrix(const char *arg) +static int opt_inter_matrix(const char *opt, const char *arg) { inter_matrix = av_mallocz(sizeof(uint16_t) * 64); parse_matrix_coeffs(inter_matrix, arg); + return 0; } -static void opt_intra_matrix(const char *arg) +static int opt_intra_matrix(const char *opt, const char *arg) { intra_matrix = av_mallocz(sizeof(uint16_t) * 64); parse_matrix_coeffs(intra_matrix, arg); + return 0; } static void show_usage(void) @@ -4091,10 +4082,11 @@ static void show_usage(void) printf("\n"); } -static void show_help(void) +static int opt_help(const char *opt, const char *arg) { AVCodec *c; AVOutputFormat *oformat = NULL; + AVInputFormat *iformat = NULL; av_log_set_callback(log_callback_help); show_usage(); @@ -4145,7 +4137,16 @@ static void show_help(void) } } + /* individual demuxer options */ + while ((iformat = av_iformat_next(iformat))) { + if (iformat->priv_class) { + av_opt_show2(&iformat->priv_class, NULL, AV_OPT_FLAG_DECODING_PARAM, 0); + printf("\n"); + } + } + av_opt_show2(sws_opts, NULL, AV_OPT_FLAG_ENCODING_PARAM|AV_OPT_FLAG_DECODING_PARAM, 0); + return 0; } static int opt_target(const char *opt, const char *arg) @@ -4376,10 +4377,10 @@ static void log_callback_null(void* ptr, int level, const char* fmt, va_list vl) { } -static void opt_passlogfile(const char *arg) +static int opt_passlogfile(const char *opt, const char *arg) { pass_logfilename_prefix = arg; - opt_default("passlogfile", arg); + return opt_default("passlogfile", arg); } static const OptionDef options[] = { @@ -4410,8 +4411,8 @@ static const OptionDef options[] = { { "hex", OPT_BOOL | OPT_EXPERT, {(void*)&do_hex_dump}, "when dumping packets, also dump the payload" }, { "re", OPT_BOOL | OPT_EXPERT, {(void*)&rate_emu}, "read input at native frame rate", "" }, - { "loop_input", OPT_BOOL | OPT_EXPERT, {(void*)&loop_input}, "loop (current only works with images)" }, - { "loop_output", HAS_ARG | OPT_INT | OPT_EXPERT, {(void*)&loop_output}, "number of times to loop output in formats that support looping (0 loops forever)", "" }, + { "loop_input", OPT_BOOL | OPT_EXPERT, {(void*)&loop_input}, "deprecated, use -loop" }, + { "loop_output", HAS_ARG | OPT_INT | OPT_EXPERT, {(void*)&loop_output}, "deprecated, use -loop", "" }, { "v", HAS_ARG, {(void*)opt_verbose}, "set ffmpeg verbosity level", "number" }, { "target", HAS_ARG, {(void*)opt_target}, "specify target file type (\"vcd\", \"svcd\", \"dvd\", \"dv\", \"dv50\", \"pal-vcd\", \"ntsc-svcd\", ...)", "type" }, { "threads", HAS_ARG | OPT_EXPERT, {(void*)opt_thread_count}, "thread count", "count" }, @@ -4427,8 +4428,6 @@ static const OptionDef options[] = { { "copyinkf", OPT_BOOL | OPT_EXPERT, {(void*)©_initial_nonkeyframes}, "copy initial non-keyframes" }, /* video options */ - { "b", HAS_ARG | OPT_VIDEO, {(void*)opt_bitrate}, "set bitrate (in bits/s)", "bitrate" }, - { "vb", HAS_ARG | OPT_VIDEO, {(void*)opt_bitrate}, "set bitrate (in bits/s)", "bitrate" }, { "vframes", OPT_INT | HAS_ARG | OPT_VIDEO, {(void*)&max_frames[AVMEDIA_TYPE_VIDEO]}, "set the number of video frames to record", "number" }, { "r", HAS_ARG | OPT_VIDEO, {(void*)opt_frame_rate}, "set frame rate (Hz value, fraction or abbreviation)", "rate" }, { "s", HAS_ARG | OPT_VIDEO, {(void*)opt_frame_size}, "set frame size (WxH or abbreviation)", "size" }, @@ -4476,7 +4475,6 @@ static const OptionDef options[] = { { "force_key_frames", OPT_STRING | HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void *)&forced_key_frames}, "force key frames at specified timestamps", "timestamps" }, /* audio options */ - { "ab", HAS_ARG | OPT_AUDIO, {(void*)opt_bitrate}, "set bitrate (in bits/s)", "bitrate" }, { "aframes", OPT_INT | HAS_ARG | OPT_AUDIO, {(void*)&max_frames[AVMEDIA_TYPE_AUDIO]}, "set the number of audio frames to record", "number" }, { "aq", OPT_FLOAT | HAS_ARG | OPT_AUDIO, {(void*)&audio_qscale}, "set audio quality (codec-specific)", "quality", }, { "ar", HAS_ARG | OPT_AUDIO, {(void*)opt_audio_rate}, "set audio sampling rate (in Hz)", "rate" }, @@ -4497,8 +4495,8 @@ static const OptionDef options[] = { { "stag", HAS_ARG | OPT_EXPERT | OPT_SUBTITLE, {(void*)opt_codec_tag}, "force subtitle tag/fourcc", "fourcc/tag" }, /* grab options */ - { "vc", HAS_ARG | OPT_EXPERT | OPT_VIDEO | OPT_GRAB, {(void*)opt_video_channel}, "set video grab channel (DV1394 only)", "channel" }, - { "tvstd", HAS_ARG | OPT_EXPERT | OPT_VIDEO | OPT_GRAB, {(void*)opt_video_standard}, "set television standard (NTSC, PAL (SECAM))", "standard" }, + { "vc", HAS_ARG | OPT_EXPERT | OPT_VIDEO | OPT_GRAB, {(void*)opt_video_channel}, "deprecated, use -channel", "channel" }, + { "tvstd", HAS_ARG | OPT_EXPERT | OPT_VIDEO | OPT_GRAB, {(void*)opt_video_standard}, "deprecated, use -standard", "standard" }, { "isync", OPT_BOOL | OPT_EXPERT | OPT_GRAB, {(void*)&input_sync}, "sync read on input", "" }, /* muxer options */ @@ -25,8 +25,10 @@ #include <limits.h> #include "libavutil/avstring.h" #include "libavutil/colorspace.h" +#include "libavutil/mathematics.h" #include "libavutil/pixdesc.h" #include "libavutil/imgutils.h" +#include "libavutil/dict.h" #include "libavutil/parseutils.h" #include "libavutil/samplefmt.h" #include "libavutil/avassert.h" @@ -211,7 +213,7 @@ typedef struct VideoState { int refresh; } VideoState; -static void show_help(void); +static int opt_help(const char *opt, const char *arg); /* options specified by the user */ static AVInputFormat *file_iformat; @@ -221,9 +223,6 @@ static int fs_screen_width; static int fs_screen_height; static int screen_width = 0; static int screen_height = 0; -static int frame_width = 0; -static int frame_height = 0; -static enum PixelFormat frame_pix_fmt = PIX_FMT_NONE; static int audio_disable; static int video_disable; static int wanted_stream[AVMEDIA_TYPE_NB]={ @@ -1429,7 +1428,7 @@ static int queue_picture(VideoState *is, AVFrame *src_frame, double pts1, int64_ static int get_video_frame(VideoState *is, AVFrame *frame, int64_t *pts, AVPacket *pkt) { - int len1 av_unused, got_picture, i; + int got_picture, i; if (packet_queue_get(&is->videoq, pkt, 1) < 0) return -1; @@ -1456,9 +1455,7 @@ static int get_video_frame(VideoState *is, AVFrame *frame, int64_t *pts, AVPacke return 0; } - len1 = avcodec_decode_video2(is->video_st->codec, - frame, &got_picture, - pkt); + avcodec_decode_video2(is->video_st->codec, frame, &got_picture, pkt); if (got_picture) { if (decoder_reorder_pts == -1) { @@ -1655,6 +1652,7 @@ static int input_config_props(AVFilterLink *link) link->w = c->width; link->h = c->height; + link->sample_aspect_ratio = priv->is->video_st->sample_aspect_ratio; link->time_base = priv->is->video_st->time_base; return 0; @@ -1690,10 +1688,10 @@ static int configure_video_filters(AVFilterGraph *graph, VideoState *is, const c if ((ret = avfilter_graph_create_filter(&filt_src, &input_filter, "src", NULL, is, graph)) < 0) - goto the_end; + return ret; if ((ret = avfilter_graph_create_filter(&filt_out, avfilter_get_by_name("buffersink"), "out", NULL, pix_fmts, graph)) < 0) - goto the_end; + return ret; if(vfilters) { AVFilterInOut *outputs = avfilter_inout_alloc(); @@ -1710,18 +1708,18 @@ static int configure_video_filters(AVFilterGraph *graph, VideoState *is, const c inputs->next = NULL; if ((ret = avfilter_graph_parse(graph, vfilters, &inputs, &outputs, NULL)) < 0) - goto the_end; + return ret; av_freep(&vfilters); } else { if ((ret = avfilter_link(filt_src, 0, filt_out, 0)) < 0) - goto the_end; + return ret; } if ((ret = avfilter_graph_config(graph, NULL)) < 0) - goto the_end; + return ret; is->out_video_filter = filt_out; -the_end: + return ret; } @@ -1805,7 +1803,7 @@ static int subtitle_thread(void *arg) VideoState *is = arg; SubPicture *sp; AVPacket pkt1, *pkt = &pkt1; - int len1 av_unused, got_subtitle; + int got_subtitle; double pts; int i, j; int r, g, b, y, u, v, a; @@ -1829,7 +1827,7 @@ static int subtitle_thread(void *arg) SDL_UnlockMutex(is->subpq_mutex); if (is->subtitleq.abort_request) - goto the_end; + return 0; sp = &is->subpq[is->subpq_windex]; @@ -1839,9 +1837,9 @@ static int subtitle_thread(void *arg) if (pkt->pts != AV_NOPTS_VALUE) pts = av_q2d(is->subtitle_st->time_base)*pkt->pts; - len1 = avcodec_decode_subtitle2(is->subtitle_st->codec, - &sp->sub, &got_subtitle, - pkt); + avcodec_decode_subtitle2(is->subtitle_st->codec, &sp->sub, + &got_subtitle, pkt); + if (got_subtitle && sp->sub.format == 0) { sp->pts = pts; @@ -1866,7 +1864,6 @@ static int subtitle_thread(void *arg) } av_free_packet(pkt); } - the_end: return 0; } @@ -2112,11 +2109,15 @@ static int stream_component_open(VideoState *is, int stream_index) AVCodecContext *avctx; AVCodec *codec; SDL_AudioSpec wanted_spec, spec; + AVDictionary *opts; + AVDictionaryEntry *t = NULL; if (stream_index < 0 || stream_index >= ic->nb_streams) return -1; avctx = ic->streams[stream_index]->codec; + opts = filter_codec_opts(codec_opts, avctx->codec_id, 0); + /* prepare audio output */ if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) { if (avctx->channels > 0) { @@ -2142,13 +2143,16 @@ static int stream_component_open(VideoState *is, int stream_index) avctx->error_concealment= error_concealment; avctx->thread_count= thread_count; - set_context_opts(avctx, avcodec_opts[avctx->codec_type], 0, codec); - if(codec->capabilities & CODEC_CAP_DR1) avctx->flags |= CODEC_FLAG_EMU_EDGE; - if (avcodec_open(avctx, codec) < 0) + if (!codec || + avcodec_open2(avctx, codec, &opts) < 0) return -1; + if ((t = av_dict_get(opts, "", NULL, AV_DICT_IGNORE_SUFFIX))) { + av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key); + return AVERROR_OPTION_NOT_FOUND; + } /* prepare audio output */ if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) { @@ -2295,15 +2299,15 @@ static int decode_interrupt_cb(void) static int read_thread(void *arg) { VideoState *is = arg; - AVFormatContext *ic; + AVFormatContext *ic = NULL; int err, i, ret; int st_index[AVMEDIA_TYPE_NB]; AVPacket pkt1, *pkt = &pkt1; - AVFormatParameters params, *ap = ¶ms; int eof=0; int pkt_in_play_range = 0; - - ic = avformat_alloc_context(); + AVDictionaryEntry *t; + AVDictionary **opts; + int orig_nb_streams; memset(st_index, -1, sizeof(st_index)); is->video_stream = -1; @@ -2313,41 +2317,35 @@ static int read_thread(void *arg) global_video_state = is; avio_set_interrupt_cb(decode_interrupt_cb); - memset(ap, 0, sizeof(*ap)); - - ap->prealloced_context = 1; - ap->width = frame_width; - ap->height= frame_height; - ap->time_base= (AVRational){1, 25}; - ap->pix_fmt = frame_pix_fmt; - ic->flags |= AVFMT_FLAG_PRIV_OPT; - - - err = av_open_input_file(&ic, is->filename, is->iformat, 0, ap); - if (err >= 0) { - set_context_opts(ic, avformat_opts, AV_OPT_FLAG_DECODING_PARAM, NULL); - err = av_demuxer_open(ic, ap); - if(err < 0){ - avformat_free_context(ic); - ic= NULL; - } - } + err = avformat_open_input(&ic, is->filename, is->iformat, &format_opts); if (err < 0) { print_error(is->filename, err); ret = -1; goto fail; } + if ((t = av_dict_get(format_opts, "", NULL, AV_DICT_IGNORE_SUFFIX))) { + av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key); + ret = AVERROR_OPTION_NOT_FOUND; + goto fail; + } is->ic = ic; if(genpts) ic->flags |= AVFMT_FLAG_GENPTS; - err = av_find_stream_info(ic); + opts = setup_find_stream_info_opts(ic, codec_opts); + orig_nb_streams = ic->nb_streams; + + err = avformat_find_stream_info(ic, opts); if (err < 0) { fprintf(stderr, "%s: could not find codec parameters\n", is->filename); ret = -1; goto fail; } + for (i = 0; i < orig_nb_streams; i++) + av_dict_free(&opts[i]); + av_freep(&opts); + if(ic->pb) ic->pb->eof_reached= 0; //FIXME hack, ffplay maybe should not use url_feof() to test for the end @@ -2813,15 +2811,8 @@ static void event_loop(void) static int opt_frame_size(const char *opt, const char *arg) { - if (av_parse_video_size(&frame_width, &frame_height, arg) < 0) { - fprintf(stderr, "Incorrect frame size\n"); - return AVERROR(EINVAL); - } - if ((frame_width % 2) != 0 || (frame_height % 2) != 0) { - fprintf(stderr, "Frame size must be a multiple of 2\n"); - return AVERROR(EINVAL); - } - return 0; + av_log(NULL, AV_LOG_WARNING, "Option -s is deprecated, use -video_size.\n"); + return opt_default("video_size", arg); } static int opt_width(const char *opt, const char *arg) @@ -2848,8 +2839,8 @@ static int opt_format(const char *opt, const char *arg) static int opt_frame_pix_fmt(const char *opt, const char *arg) { - frame_pix_fmt = av_get_pix_fmt(arg); - return 0; + av_log(NULL, AV_LOG_WARNING, "Option -pix_fmt is deprecated, use -pixel_format.\n"); + return opt_default("pixel_format", arg); } static int opt_sync(const char *opt, const char *arg) @@ -2964,7 +2955,7 @@ static void show_usage(void) printf("\n"); } -static void show_help(void) +static int opt_help(const char *opt, const char *arg) { av_log_set_callback(log_callback_help); show_usage(); @@ -2996,6 +2987,7 @@ static void show_help(void) "down/up seek backward/forward 1 minute\n" "mouse click seek to percentage in file corresponding to fraction of width\n" ); + return 0; } /* Called from the main */ @@ -3039,6 +3031,7 @@ int main(int argc, char **argv) #endif if (SDL_Init (flags)) { fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError()); + fprintf(stderr, "(Did you set the DISPLAY variable?)\n"); exit(1); } diff --git a/ffpresets/libx264-baseline.ffpreset b/ffpresets/libx264-baseline.ffpreset deleted file mode 100644 index ee7654bdec..0000000000 --- a/ffpresets/libx264-baseline.ffpreset +++ /dev/null @@ -1,4 +0,0 @@ -coder=0 -bf=0 -flags2=-wpred-dct8x8 -wpredp=0 @@ -143,6 +143,7 @@ static void show_packet(AVFormatContext *fmt_ctx, AVPacket *pkt) printf("pos=%"PRId64"\n" , pkt->pos); printf("flags=%c\n" , pkt->flags & AV_PKT_FLAG_KEY ? 'K' : '_'); printf("[/PACKET]\n"); + fflush(stdout); } static void show_packets(AVFormatContext *fmt_ctx) @@ -201,6 +202,7 @@ static void show_stream(AVFormatContext *fmt_ctx, int stream_idx) } printf("pix_fmt=%s\n", dec_ctx->pix_fmt != PIX_FMT_NONE ? av_pix_fmt_descriptors[dec_ctx->pix_fmt].name : "unknown"); + printf("level=%d\n", dec_ctx->level); break; case AVMEDIA_TYPE_AUDIO: @@ -231,6 +233,7 @@ static void show_stream(AVFormatContext *fmt_ctx, int stream_idx) printf("TAG:%s=%s\n", tag->key, tag->value); printf("[/STREAM]\n"); + fflush(stdout); } static void show_format(AVFormatContext *fmt_ctx) @@ -257,6 +260,7 @@ static void show_format(AVFormatContext *fmt_ctx) printf("TAG:%s=%s\n", tag->key, tag->value); printf("[/FORMAT]\n"); + fflush(stdout); } static int open_input_file(AVFormatContext **fmt_ctx_ptr, const char *filename) @@ -291,7 +295,7 @@ static int open_input_file(AVFormatContext **fmt_ctx_ptr, const char *filename) if (!(codec = avcodec_find_decoder(stream->codec->codec_id))) { fprintf(stderr, "Unsupported codec with id %d for input stream %d\n", stream->codec->codec_id, stream->index); - } else if (avcodec_open(stream->codec, codec) < 0) { + } else if (avcodec_open2(stream->codec, codec, NULL) < 0) { fprintf(stderr, "Error while opening codec for input stream %d\n", stream->index); } @@ -353,7 +357,7 @@ static int opt_input_file(const char *opt, const char *arg) return 0; } -static void show_help(void) +static int opt_help(const char *opt, const char *arg) { av_log_set_callback(log_callback_help); show_usage(); @@ -361,14 +365,16 @@ static void show_help(void) printf("\n"); av_opt_show2(avformat_opts, NULL, AV_OPT_FLAG_DECODING_PARAM, 0); + return 0; } -static void opt_pretty(void) +static int opt_pretty(const char *opt, const char *arg) { show_value_unit = 1; use_value_prefix = 1; use_byte_value_binary_prefix = 1; use_value_sexagesimal_format = 1; + return 0; } static const OptionDef options[] = { diff --git a/ffserver.c b/ffserver.c index 0eac639137..538f5c0f29 100644 --- a/ffserver.c +++ b/ffserver.c @@ -37,6 +37,7 @@ #include "libavutil/avstring.h" #include "libavutil/lfg.h" #include "libavutil/dict.h" +#include "libavutil/mathematics.h" #include "libavutil/random_seed.h" #include "libavutil/parseutils.h" #include "libavcodec/opt.h" @@ -2118,7 +2119,7 @@ static void open_parser(AVFormatContext *s, int i) codec = avcodec_find_decoder(st->codec->codec_id); if (codec && (codec->capabilities & CODEC_CAP_PARSE_ONLY)) { st->codec->parse_only = 1; - if (avcodec_open(st->codec, codec) < 0) + if (avcodec_open2(st->codec, codec, NULL) < 0) st->codec->parse_only = 0; } } @@ -2129,13 +2130,12 @@ static int open_input_stream(HTTPContext *c, const char *info) char buf[128]; char input_filename[1024]; AVFormatContext *s = NULL; - int buf_size, i, ret; + int i, ret; int64_t stream_pos; /* find file name */ if (c->stream->feed) { strcpy(input_filename, c->stream->feed->feed_filename); - buf_size = FFM_PACKET_SIZE; /* compute position (absolute time) */ if (av_find_info_tag(buf, sizeof(buf), "date", info)) { if ((ret = av_parse_time(&stream_pos, buf, 0)) < 0) @@ -2147,7 +2147,6 @@ static int open_input_stream(HTTPContext *c, const char *info) stream_pos = av_gettime() - c->stream->prebuffer * (int64_t)1000; } else { strcpy(input_filename, c->stream->feed_filename); - buf_size = 0; /* compute position (relative time) */ if (av_find_info_tag(buf, sizeof(buf), "date", info)) { if ((ret = av_parse_time(&stream_pos, buf, 1)) < 0) @@ -3472,7 +3471,7 @@ static AVStream *add_av_stream1(FFStream *stream, AVCodecContext *codec, int cop if (!fst) return NULL; if (copy) { - fst->codec= avcodec_alloc_context(); + fst->codec = avcodec_alloc_context3(NULL); memcpy(fst->codec, codec, sizeof(AVCodecContext)); if (codec->extradata_size) { fst->codec->extradata = av_malloc(codec->extradata_size); @@ -3512,7 +3511,7 @@ static int add_av_stream(FFStream *feed, AVStream *st) case AVMEDIA_TYPE_AUDIO: if (av1->channels == av->channels && av1->sample_rate == av->sample_rate) - goto found; + return i; break; case AVMEDIA_TYPE_VIDEO: if (av1->width == av->width && @@ -3520,7 +3519,7 @@ static int add_av_stream(FFStream *feed, AVStream *st) av1->time_base.den == av->time_base.den && av1->time_base.num == av->time_base.num && av1->gop_size == av->gop_size) - goto found; + return i; break; default: abort(); @@ -3532,8 +3531,6 @@ static int add_av_stream(FFStream *feed, AVStream *st) if (!fst) return -1; return feed->nb_streams - 1; - found: - return i; } static void remove_stream(FFStream *stream) @@ -3654,21 +3651,13 @@ static void build_feed_streams(void) for(stream = first_stream; stream != NULL; stream = stream->next) { feed = stream->feed; if (feed) { - if (!stream->is_feed) { - /* we handle a stream coming from a feed */ - for(i=0;i<stream->nb_streams;i++) - stream->feed_streams[i] = add_av_stream(feed, stream->streams[i]); - } - } - } - - /* gather all streams */ - for(stream = first_stream; stream != NULL; stream = stream->next) { - feed = stream->feed; - if (feed) { if (stream->is_feed) { for(i=0;i<stream->nb_streams;i++) stream->feed_streams[i] = i; + } else { + /* we handle a stream coming from a feed */ + for(i=0;i<stream->nb_streams;i++) + stream->feed_streams[i] = add_av_stream(feed, stream->streams[i]); } } } @@ -3894,7 +3883,7 @@ static void add_codec(FFStream *stream, AVCodecContext *av) st = av_mallocz(sizeof(AVStream)); if (!st) return; - st->codec = avcodec_alloc_context(); + st->codec = avcodec_alloc_context3(NULL); stream->streams[stream->nb_streams++] = st; memcpy(st->codec, av, sizeof(AVCodecContext)); } @@ -4662,12 +4651,13 @@ static void opt_debug(void) logfilename[0] = '-'; } -static void show_help(void) +static int opt_help(const char *opt, const char *arg) { printf("usage: ffserver [options]\n" "Hyper fast multi format Audio/Video streaming server\n"); printf("\n"); show_help_options(options, "Main options:\n", 0, 0); + return 0; } static const OptionDef options[] = { diff --git a/ffserver.h b/ffserver.h deleted file mode 100644 index c76752fa43..0000000000 --- a/ffserver.h +++ /dev/null @@ -1,28 +0,0 @@ -/* - * Multiple format streaming server - * copyright (c) 2002 Fabrice Bellard - * - * This file is part of FFmpeg. - * - * FFmpeg is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * FFmpeg is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with FFmpeg; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - */ -#ifndef FFMPEG_FFSERVER_H -#define FFMPEG_FFSERVER_H - -/* interface between ffserver and modules */ - -void ffserver_module_init(void); - -#endif /* FFMPEG_FFSERVER_H */ diff --git a/libavcodec/4xm.c b/libavcodec/4xm.c index d89b494b09..cf98e91766 100644 --- a/libavcodec/4xm.c +++ b/libavcodec/4xm.c @@ -865,15 +865,14 @@ static av_cold int decode_end(AVCodecContext *avctx){ } AVCodec ff_fourxm_decoder = { - "4xm", - AVMEDIA_TYPE_VIDEO, - CODEC_ID_4XM, - sizeof(FourXContext), - decode_init, - NULL, - decode_end, - decode_frame, - CODEC_CAP_DR1, + .name = "4xm", + .type = AVMEDIA_TYPE_VIDEO, + .id = CODEC_ID_4XM, + .priv_data_size = sizeof(FourXContext), + .init = decode_init, + .close = decode_end, + .decode = decode_frame, + .capabilities = CODEC_CAP_DR1, .long_name = NULL_IF_CONFIG_SMALL("4X Movie"), }; diff --git a/libavcodec/8bps.c b/libavcodec/8bps.c index 390ce8f72f..92aca8163a 100644 --- a/libavcodec/8bps.c +++ b/libavcodec/8bps.c @@ -221,14 +221,13 @@ static av_cold int decode_end(AVCodecContext *avctx) AVCodec ff_eightbps_decoder = { - "8bps", - AVMEDIA_TYPE_VIDEO, - CODEC_ID_8BPS, - sizeof(EightBpsContext), - decode_init, - NULL, - decode_end, - decode_frame, - CODEC_CAP_DR1, - .long_name = NULL_IF_CONFIG_SMALL("QuickTime 8BPS video"), + .name = "8bps", + .type = AVMEDIA_TYPE_VIDEO, + .id = CODEC_ID_8BPS, + .priv_data_size = sizeof(EightBpsContext), + .init = decode_init, + .close = decode_end, + .decode = decode_frame, + .capabilities = CODEC_CAP_DR1, + .long_name = NULL_IF_CONFIG_SMALL("QuickTime 8BPS video"), }; diff --git a/libavcodec/8svx.c b/libavcodec/8svx.c index 5d94e005a2..3864d61857 100644 --- a/libavcodec/8svx.c +++ b/libavcodec/8svx.c @@ -22,6 +22,8 @@ /** * @file * 8svx audio decoder + * @author Jaikrishnan Menon + * * supports: fibonacci delta encoding * : exponential encoding * diff --git a/libavcodec/Makefile b/libavcodec/Makefile index 4e9906e40c..5092c19761 100644 --- a/libavcodec/Makefile +++ b/libavcodec/Makefile @@ -94,7 +94,7 @@ OBJS-$(CONFIG_AURA2_DECODER) += aura.o OBJS-$(CONFIG_AVS_DECODER) += avs.o OBJS-$(CONFIG_BETHSOFTVID_DECODER) += bethsoftvideo.o OBJS-$(CONFIG_BFI_DECODER) += bfi.o -OBJS-$(CONFIG_BINK_DECODER) += bink.o binkidct.o +OBJS-$(CONFIG_BINK_DECODER) += bink.o binkdsp.o OBJS-$(CONFIG_BINKAUDIO_DCT_DECODER) += binkaudio.o wma.o OBJS-$(CONFIG_BINKAUDIO_RDFT_DECODER) += binkaudio.o wma.o OBJS-$(CONFIG_BMP_DECODER) += bmp.o msrledec.o @@ -128,9 +128,9 @@ OBJS-$(CONFIG_DVDSUB_ENCODER) += dvdsubenc.o OBJS-$(CONFIG_DVVIDEO_DECODER) += dv.o dvdata.o OBJS-$(CONFIG_DVVIDEO_ENCODER) += dv.o dvdata.o OBJS-$(CONFIG_DXA_DECODER) += dxa.o -OBJS-$(CONFIG_EAC3_DECODER) += eac3dec.o eac3dec_data.o +OBJS-$(CONFIG_EAC3_DECODER) += eac3dec.o eac3_data.o OBJS-$(CONFIG_EAC3_ENCODER) += eac3enc.o ac3enc.o ac3enc_float.o \ - ac3tab.o ac3.o kbdwin.o + ac3tab.o ac3.o kbdwin.o eac3_data.o OBJS-$(CONFIG_EACMV_DECODER) += eacmv.o OBJS-$(CONFIG_EAMAD_DECODER) += eamad.o eaidct.o mpeg12.o \ mpeg12data.o mpegvideo.o \ @@ -154,6 +154,7 @@ OBJS-$(CONFIG_FLAC_ENCODER) += flacenc.o flacdata.o flac.o vorbis_dat OBJS-$(CONFIG_FLASHSV_DECODER) += flashsv.o OBJS-$(CONFIG_FLASHSV_ENCODER) += flashsvenc.o OBJS-$(CONFIG_FLASHSV2_ENCODER) += flashsv2enc.o +OBJS-$(CONFIG_FLASHSV2_DECODER) += flashsv.o OBJS-$(CONFIG_FLIC_DECODER) += flicvideo.o OBJS-$(CONFIG_FOURXM_DECODER) += 4xm.o OBJS-$(CONFIG_FRAPS_DECODER) += fraps.o @@ -551,6 +552,7 @@ OBJS-$(CONFIG_MATROSKA_DEMUXER) += mpeg4audio.o mpegaudiodata.o OBJS-$(CONFIG_MATROSKA_MUXER) += xiph.o mpeg4audio.o \ flacdec.o flacdata.o flac.o \ mpegaudiodata.o vorbis_data.o +OBJS-$(CONFIG_MP3_MUXER) += mpegaudiodata.o mpegaudiodecheader.o OBJS-$(CONFIG_MOV_DEMUXER) += mpeg4audio.o mpegaudiodata.o OBJS-$(CONFIG_MOV_MUXER) += mpeg4audio.o mpegaudiodata.o OBJS-$(CONFIG_MPEGTS_MUXER) += mpegvideo.o mpeg4audio.o @@ -667,7 +669,7 @@ OBJS-$(CONFIG_MLIB) += mlib/dsputil_mlib.o \ # well. OBJS-$(!CONFIG_SMALL) += inverse.o --include $(SUBDIR)$(ARCH)/Makefile +-include $(SRC_PATH)/$(SUBDIR)$(ARCH)/Makefile SKIPHEADERS += %_tablegen.h \ %_tables.h \ @@ -694,7 +696,7 @@ DIRS = alpha arm bfin mlib ppc ps2 sh4 sparc x86 CLEANFILES = *_tables.c *_tables.h *_tablegen$(HOSTEXESUF) -include $(SUBDIR)../subdir.mak +include $(SRC_PATH)/subdir.mak $(SUBDIR)dct-test$(EXESUF): $(SUBDIR)dctref.o diff --git a/libavcodec/aac.h b/libavcodec/aac.h index f089ee9a29..477acb5d4e 100644 --- a/libavcodec/aac.h +++ b/libavcodec/aac.h @@ -258,7 +258,7 @@ typedef struct { DynamicRangeControl che_drc; /** - * @defgroup elements Channel element related data. + * @name Channel element related data * @{ */ enum ChannelPosition che_pos[4][MAX_ELEM_ID]; /**< channel element channel mapping with the @@ -270,14 +270,15 @@ typedef struct { /** @} */ /** - * @defgroup temporary aligned temporary buffers (We do not want to have these on the stack.) + * @name temporary aligned temporary buffers + * (We do not want to have these on the stack.) * @{ */ DECLARE_ALIGNED(32, float, buf_mdct)[1024]; /** @} */ /** - * @defgroup tables Computed / set up during initialization. + * @name Computed / set up during initialization * @{ */ FFTContext mdct; @@ -289,7 +290,7 @@ typedef struct { /** @} */ /** - * @defgroup output Members used for output interleaving. + * @name Members used for output interleaving * @{ */ float *output_data[MAX_CHANNELS]; ///< Points to each element's 'ret' buffer (PCM output). diff --git a/libavcodec/aaccoder.c b/libavcodec/aaccoder.c index a68aa025de..b61af18056 100644 --- a/libavcodec/aaccoder.c +++ b/libavcodec/aaccoder.c @@ -346,7 +346,7 @@ static void encode_window_bands_info(AACEncContext *s, SingleChannelElement *sce float cost_stay_here, cost_get_here; float rd = 0.0f; for (w = 0; w < group_len; w++) { - FFPsyBand *band = &s->psy.psy_bands[s->cur_channel*PSY_MAX_BANDS+(win+w)*16+swb]; + FFPsyBand *band = &s->psy.ch[s->cur_channel].psy_bands[(win+w)*16+swb]; rd += quantize_band_cost(s, sce->coeffs + start + w*128, s->scoefs + start + w*128, size, sce->sf_idx[(win+w)*16+swb], cb, @@ -433,10 +433,26 @@ static void codebook_trellis_rate(AACEncContext *s, SingleChannelElement *sce, for (swb = 0; swb < max_sfb; swb++) { size = sce->ics.swb_sizes[swb]; if (sce->zeroes[win*16 + swb]) { - for (cb = 0; cb < 12; cb++) { - path[swb+1][cb].prev_idx = cb; - path[swb+1][cb].cost = path[swb][cb].cost; - path[swb+1][cb].run = path[swb][cb].run + 1; + float cost_stay_here = path[swb][0].cost; + float cost_get_here = next_minrd + run_bits + 4; + if ( run_value_bits[sce->ics.num_windows == 8][path[swb][0].run] + != run_value_bits[sce->ics.num_windows == 8][path[swb][0].run+1]) + cost_stay_here += run_bits; + if (cost_get_here < cost_stay_here) { + path[swb+1][0].prev_idx = next_mincb; + path[swb+1][0].cost = cost_get_here; + path[swb+1][0].run = 1; + } else { + path[swb+1][0].prev_idx = 0; + path[swb+1][0].cost = cost_stay_here; + path[swb+1][0].run = path[swb][0].run + 1; + } + next_minrd = path[swb+1][0].cost; + next_mincb = 0; + for (cb = 1; cb < 12; cb++) { + path[swb+1][cb].cost = 61450; + path[swb+1][cb].prev_idx = -1; + path[swb+1][cb].run = 0; } } else { float minrd = next_minrd; @@ -610,7 +626,7 @@ static void search_for_quantizers_anmr(AVCodecContext *avctx, AACEncContext *s, qmin = INT_MAX; qmax = 0.0f; for (w2 = 0; w2 < sce->ics.group_len[w]; w2++) { - FFPsyBand *band = &s->psy.psy_bands[s->cur_channel*PSY_MAX_BANDS+(w+w2)*16+g]; + FFPsyBand *band = &s->psy.ch[s->cur_channel].psy_bands[(w+w2)*16+g]; if (band->energy <= band->threshold || band->threshold == 0.0f) { sce->zeroes[(w+w2)*16+g] = 1; continue; @@ -639,7 +655,7 @@ static void search_for_quantizers_anmr(AVCodecContext *avctx, AACEncContext *s, float dist = 0; int cb = find_min_book(maxval, sce->sf_idx[w*16+g]); for (w2 = 0; w2 < sce->ics.group_len[w]; w2++) { - FFPsyBand *band = &s->psy.psy_bands[s->cur_channel*PSY_MAX_BANDS+(w+w2)*16+g]; + FFPsyBand *band = &s->psy.ch[s->cur_channel].psy_bands[(w+w2)*16+g]; dist += quantize_band_cost(s, coefs + w2*128, s->scoefs + start + w2*128, sce->ics.swb_sizes[g], q + q0, cb, lambda / band->threshold, INFINITY, NULL); } @@ -712,7 +728,7 @@ static void search_for_quantizers_twoloop(AVCodecContext *avctx, int nz = 0; float uplim = 0.0f; for (w2 = 0; w2 < sce->ics.group_len[w]; w2++) { - FFPsyBand *band = &s->psy.psy_bands[s->cur_channel*PSY_MAX_BANDS+(w+w2)*16+g]; + FFPsyBand *band = &s->psy.ch[s->cur_channel].psy_bands[(w+w2)*16+g]; uplim += band->threshold; if (band->energy <= band->threshold || band->threshold == 0.0f) { sce->zeroes[(w+w2)*16+g] = 1; @@ -1012,7 +1028,7 @@ static void search_for_quantizers_fast(AVCodecContext *avctx, AACEncContext *s, for (w = 0; w < sce->ics.num_windows; w += sce->ics.group_len[w]) { for (g = 0; g < sce->ics.num_swb; g++) { for (w2 = 0; w2 < sce->ics.group_len[w]; w2++) { - FFPsyBand *band = &s->psy.psy_bands[s->cur_channel*PSY_MAX_BANDS+(w+w2)*16+g]; + FFPsyBand *band = &s->psy.ch[s->cur_channel].psy_bands[(w+w2)*16+g]; if (band->energy <= band->threshold) { sce->sf_idx[(w+w2)*16+g] = 218; sce->zeroes[(w+w2)*16+g] = 1; @@ -1050,8 +1066,8 @@ static void search_for_ms(AACEncContext *s, ChannelElement *cpe, if (!cpe->ch[0].zeroes[w*16+g] && !cpe->ch[1].zeroes[w*16+g]) { float dist1 = 0.0f, dist2 = 0.0f; for (w2 = 0; w2 < sce0->ics.group_len[w]; w2++) { - FFPsyBand *band0 = &s->psy.psy_bands[(s->cur_channel+0)*PSY_MAX_BANDS+(w+w2)*16+g]; - FFPsyBand *band1 = &s->psy.psy_bands[(s->cur_channel+1)*PSY_MAX_BANDS+(w+w2)*16+g]; + FFPsyBand *band0 = &s->psy.ch[s->cur_channel+0].psy_bands[(w+w2)*16+g]; + FFPsyBand *band1 = &s->psy.ch[s->cur_channel+1].psy_bands[(w+w2)*16+g]; float minthr = FFMIN(band0->threshold, band1->threshold); float maxthr = FFMAX(band0->threshold, band1->threshold); for (i = 0; i < sce0->ics.swb_sizes[g]; i++) { diff --git a/libavcodec/aacdec.c b/libavcodec/aacdec.c index cb8760801a..927d4314fa 100644 --- a/libavcodec/aacdec.c +++ b/libavcodec/aacdec.c @@ -532,6 +532,22 @@ static void reset_all_predictors(PredictorState *ps) reset_predict_state(&ps[i]); } +static int sample_rate_idx (int rate) +{ + if (92017 <= rate) return 0; + else if (75132 <= rate) return 1; + else if (55426 <= rate) return 2; + else if (46009 <= rate) return 3; + else if (37566 <= rate) return 4; + else if (27713 <= rate) return 5; + else if (23004 <= rate) return 6; + else if (18783 <= rate) return 7; + else if (13856 <= rate) return 8; + else if (11502 <= rate) return 9; + else if (9391 <= rate) return 10; + else return 11; +} + static void reset_predictor_group(PredictorState *ps, int group_num) { int i; @@ -558,6 +574,26 @@ static av_cold int aac_decode_init(AVCodecContext *avctx) avctx->extradata, avctx->extradata_size) < 0) return -1; + } else { + int sr, i; + enum ChannelPosition new_che_pos[4][MAX_ELEM_ID]; + + sr = sample_rate_idx(avctx->sample_rate); + ac->m4ac.sampling_index = sr; + ac->m4ac.channels = avctx->channels; + + for (i = 0; i < FF_ARRAY_ELEMS(ff_mpeg4audio_channels); i++) + if (ff_mpeg4audio_channels[i] == avctx->channels) + break; + if (i == FF_ARRAY_ELEMS(ff_mpeg4audio_channels)) { + i = 0; + } + ac->m4ac.chan_config = i; + + if (ac->m4ac.chan_config) { + set_default_channel_config(avctx, new_che_pos, ac->m4ac.chan_config); + output_configure(ac, ac->che_pos, new_che_pos, ac->m4ac.chan_config, OC_GLOBAL_HDR); + } } if (avctx->request_sample_fmt == AV_SAMPLE_FMT_FLT) { @@ -1755,12 +1791,10 @@ static void windowing_and_mdct_ltp(AACContext *ac, float *out, } else { memset(in, 0, 448 * sizeof(float)); ac->dsp.vector_fmul(in + 448, in + 448, swindow_prev, 128); - memcpy(in + 576, in + 576, 448 * sizeof(float)); } if (ics->window_sequence[0] != LONG_START_SEQUENCE) { ac->dsp.vector_fmul_reverse(in + 1024, in + 1024, lwindow, 1024); } else { - memcpy(in + 1024, in + 1024, 448 * sizeof(float)); ac->dsp.vector_fmul_reverse(in + 1024 + 448, in + 1024 + 448, swindow, 128); memset(in + 1024 + 576, 0, 448 * sizeof(float)); } @@ -2049,6 +2083,7 @@ static int parse_adts_frame_header(AACContext *ac, GetBitContext *gb) if (output_configure(ac, ac->che_pos, new_che_pos, hdr_info.chan_config, OC_TRIAL_FRAME)) return -7; } else if (ac->output_configured != OC_LOCKED) { + ac->m4ac.chan_config = 0; ac->output_configured = OC_NONE; } if (ac->output_configured != OC_LOCKED) { @@ -2504,18 +2539,18 @@ av_cold static int latm_decode_init(AVCodecContext *avctx) AVCodec ff_aac_decoder = { - "aac", - AVMEDIA_TYPE_AUDIO, - CODEC_ID_AAC, - sizeof(AACContext), - aac_decode_init, - NULL, - aac_decode_close, - aac_decode_frame, + .name = "aac", + .type = AVMEDIA_TYPE_AUDIO, + .id = CODEC_ID_AAC, + .priv_data_size = sizeof(AACContext), + .init = aac_decode_init, + .close = aac_decode_close, + .decode = aac_decode_frame, .long_name = NULL_IF_CONFIG_SMALL("Advanced Audio Coding"), .sample_fmts = (const enum AVSampleFormat[]) { AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_NONE }, + .capabilities = CODEC_CAP_CHANNEL_CONF, .channel_layouts = aac_channel_layout, }; @@ -2536,5 +2571,6 @@ AVCodec ff_aac_latm_decoder = { .sample_fmts = (const enum AVSampleFormat[]) { AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_NONE }, + .capabilities = CODEC_CAP_CHANNEL_CONF, .channel_layouts = aac_channel_layout, }; diff --git a/libavcodec/aacenc.c b/libavcodec/aacenc.c index 0de6622389..8205cf0bc2 100644 --- a/libavcodec/aacenc.c +++ b/libavcodec/aacenc.c @@ -208,8 +208,9 @@ static av_cold int aac_encode_init(AVCodecContext *avctx) ff_init_ff_sine_windows(10); ff_init_ff_sine_windows(7); + s->chan_map = aac_chan_configs[avctx->channels-1]; s->samples = av_malloc(2 * 1024 * avctx->channels * sizeof(s->samples[0])); - s->cpe = av_mallocz(sizeof(ChannelElement) * aac_chan_configs[avctx->channels-1][0]); + s->cpe = av_mallocz(sizeof(ChannelElement) * s->chan_map[0]); avctx->extradata = av_mallocz(5 + FF_INPUT_BUFFER_PADDING_SIZE); avctx->extradata_size = 5; put_audio_specific_config(avctx); @@ -218,7 +219,7 @@ static av_cold int aac_encode_init(AVCodecContext *avctx) sizes[1] = swb_size_128[i]; lengths[0] = ff_aac_num_swb_1024[i]; lengths[1] = ff_aac_num_swb_128[i]; - ff_psy_init(&s->psy, avctx, 2, sizes, lengths); + ff_psy_init(&s->psy, avctx, 2, sizes, lengths, s->chan_map[0], &s->chan_map[1]); s->psypp = ff_psy_preprocess_init(avctx); s->coder = &ff_aac_coders[2]; @@ -372,7 +373,7 @@ static void adjust_frame_information(AACEncContext *apc, ChannelElement *cpe, in if (msc == 0 || ics0->max_sfb == 0) cpe->ms_mode = 0; else - cpe->ms_mode = msc < ics0->max_sfb ? 1 : 2; + cpe->ms_mode = msc < ics0->max_sfb * ics0->num_windows ? 1 : 2; } } @@ -500,7 +501,6 @@ static int aac_encode_frame(AVCodecContext *avctx, int16_t *samples = s->samples, *samples2, *la; ChannelElement *cpe; int i, ch, w, g, chans, tag, start_ch; - const uint8_t *chan_map = aac_chan_configs[avctx->channels-1]; int chan_el_counter[4]; FFPsyWindowInfo windows[AAC_MAX_CHANNELS]; @@ -521,8 +521,8 @@ static int aac_encode_frame(AVCodecContext *avctx, } else { start_ch = 0; samples2 = s->samples + 1024 * avctx->channels; - for (i = 0; i < chan_map[0]; i++) { - tag = chan_map[i+1]; + for (i = 0; i < s->chan_map[0]; i++) { + tag = s->chan_map[i+1]; chans = tag == TYPE_CPE ? 2 : 1; ff_psy_preprocess(s->psypp, (uint16_t*)data + channel_maps[avctx->channels-1][start_ch], @@ -538,9 +538,9 @@ static int aac_encode_frame(AVCodecContext *avctx, } start_ch = 0; - for (i = 0; i < chan_map[0]; i++) { + for (i = 0; i < s->chan_map[0]; i++) { FFPsyWindowInfo* wi = windows + start_ch; - tag = chan_map[i+1]; + tag = s->chan_map[i+1]; chans = tag == TYPE_CPE ? 2 : 1; cpe = &s->cpe[i]; for (ch = 0; ch < chans; ch++) { @@ -580,16 +580,19 @@ static int aac_encode_frame(AVCodecContext *avctx, put_bitstream_info(avctx, s, LIBAVCODEC_IDENT); start_ch = 0; memset(chan_el_counter, 0, sizeof(chan_el_counter)); - for (i = 0; i < chan_map[0]; i++) { + for (i = 0; i < s->chan_map[0]; i++) { FFPsyWindowInfo* wi = windows + start_ch; - tag = chan_map[i+1]; + const float *coeffs[2]; + tag = s->chan_map[i+1]; chans = tag == TYPE_CPE ? 2 : 1; cpe = &s->cpe[i]; put_bits(&s->pb, 3, tag); put_bits(&s->pb, 4, chan_el_counter[tag]++); + for (ch = 0; ch < chans; ch++) + coeffs[ch] = cpe->ch[ch].coeffs; + s->psy.model->analyze(&s->psy, start_ch, coeffs, wi); for (ch = 0; ch < chans; ch++) { - s->cur_channel = start_ch + ch; - s->psy.model->analyze(&s->psy, s->cur_channel, cpe->ch[ch].coeffs, &wi[ch]); + s->cur_channel = start_ch * 2 + ch; s->coder->search_for_quantizers(avctx, s, &cpe->ch[ch], s->lambda); } cpe->common_window = 0; @@ -605,7 +608,7 @@ static int aac_encode_frame(AVCodecContext *avctx, } } } - s->cur_channel = start_ch; + s->cur_channel = start_ch * 2; if (s->options.stereo_mode && cpe->common_window) { if (s->options.stereo_mode > 0) { IndividualChannelStream *ics = &cpe->ch[0].ics; @@ -689,13 +692,13 @@ static const AVClass aacenc_class = { }; AVCodec ff_aac_encoder = { - "aac", - AVMEDIA_TYPE_AUDIO, - CODEC_ID_AAC, - sizeof(AACEncContext), - aac_encode_init, - aac_encode_frame, - aac_encode_end, + .name = "aac", + .type = AVMEDIA_TYPE_AUDIO, + .id = CODEC_ID_AAC, + .priv_data_size = sizeof(AACEncContext), + .init = aac_encode_init, + .encode = aac_encode_frame, + .close = aac_encode_end, .capabilities = CODEC_CAP_SMALL_LAST_FRAME | CODEC_CAP_DELAY | CODEC_CAP_EXPERIMENTAL, .sample_fmts = (const enum AVSampleFormat[]){AV_SAMPLE_FMT_S16,AV_SAMPLE_FMT_NONE}, .long_name = NULL_IF_CONFIG_SMALL("Advanced Audio Coding"), diff --git a/libavcodec/aacenc.h b/libavcodec/aacenc.h index 44ad50bf80..150c651665 100644 --- a/libavcodec/aacenc.h +++ b/libavcodec/aacenc.h @@ -61,6 +61,7 @@ typedef struct AACEncContext { int16_t *samples; ///< saved preprocessed input int samplerate_index; ///< MPEG-4 samplerate index + const uint8_t *chan_map; ///< channel configuration map ChannelElement *cpe; ///< channel elements FFPsyContext psy; diff --git a/libavcodec/aacps.c b/libavcodec/aacps.c index fc124d1972..d016cf4e10 100644 --- a/libavcodec/aacps.c +++ b/libavcodec/aacps.c @@ -69,19 +69,19 @@ static const int huff_iid[] = { static VLC vlc_ps[10]; -/** - * Read Inter-channel Intensity Difference/Inter-Channel Coherence/ - * Inter-channel Phase Difference/Overall Phase Difference parameters from the - * bitstream. - * - * @param avctx contains the current codec context - * @param gb pointer to the input bitstream - * @param ps pointer to the Parametric Stereo context - * @param par pointer to the parameter to be read - * @param e envelope to decode - * @param dt 1: time delta-coded, 0: frequency delta-coded - */ #define READ_PAR_DATA(PAR, OFFSET, MASK, ERR_CONDITION) \ +/** \ + * Read Inter-channel Intensity Difference/Inter-Channel Coherence/ \ + * Inter-channel Phase Difference/Overall Phase Difference parameters from the \ + * bitstream. \ + * \ + * @param avctx contains the current codec context \ + * @param gb pointer to the input bitstream \ + * @param ps pointer to the Parametric Stereo context \ + * @param PAR pointer to the parameter to be read \ + * @param e envelope to decode \ + * @param dt 1: time delta-coded, 0: frequency delta-coded \ + */ \ static int read_ ## PAR ## _data(AVCodecContext *avctx, GetBitContext *gb, PSContext *ps, \ int8_t (*PAR)[PS_MAX_NR_IIDICC], int table_idx, int e, int dt) \ { \ @@ -813,14 +813,17 @@ static void stereo_processing(PSContext *ps, float (*l)[32][2], float (*r)[32][2 const float (*H_LUT)[8][4] = (PS_BASELINE || ps->icc_mode < 3) ? HA : HB; //Remapping - memcpy(H11[0][0], H11[0][ps->num_env_old], PS_MAX_NR_IIDICC*sizeof(H11[0][0][0])); - memcpy(H11[1][0], H11[1][ps->num_env_old], PS_MAX_NR_IIDICC*sizeof(H11[1][0][0])); - memcpy(H12[0][0], H12[0][ps->num_env_old], PS_MAX_NR_IIDICC*sizeof(H12[0][0][0])); - memcpy(H12[1][0], H12[1][ps->num_env_old], PS_MAX_NR_IIDICC*sizeof(H12[1][0][0])); - memcpy(H21[0][0], H21[0][ps->num_env_old], PS_MAX_NR_IIDICC*sizeof(H21[0][0][0])); - memcpy(H21[1][0], H21[1][ps->num_env_old], PS_MAX_NR_IIDICC*sizeof(H21[1][0][0])); - memcpy(H22[0][0], H22[0][ps->num_env_old], PS_MAX_NR_IIDICC*sizeof(H22[0][0][0])); - memcpy(H22[1][0], H22[1][ps->num_env_old], PS_MAX_NR_IIDICC*sizeof(H22[1][0][0])); + if (ps->num_env_old) { + memcpy(H11[0][0], H11[0][ps->num_env_old], PS_MAX_NR_IIDICC*sizeof(H11[0][0][0])); + memcpy(H11[1][0], H11[1][ps->num_env_old], PS_MAX_NR_IIDICC*sizeof(H11[1][0][0])); + memcpy(H12[0][0], H12[0][ps->num_env_old], PS_MAX_NR_IIDICC*sizeof(H12[0][0][0])); + memcpy(H12[1][0], H12[1][ps->num_env_old], PS_MAX_NR_IIDICC*sizeof(H12[1][0][0])); + memcpy(H21[0][0], H21[0][ps->num_env_old], PS_MAX_NR_IIDICC*sizeof(H21[0][0][0])); + memcpy(H21[1][0], H21[1][ps->num_env_old], PS_MAX_NR_IIDICC*sizeof(H21[1][0][0])); + memcpy(H22[0][0], H22[0][ps->num_env_old], PS_MAX_NR_IIDICC*sizeof(H22[0][0][0])); + memcpy(H22[1][0], H22[1][ps->num_env_old], PS_MAX_NR_IIDICC*sizeof(H22[1][0][0])); + } + if (is34) { remap34(&iid_mapped, ps->iid_par, ps->nr_iid_par, ps->num_env, 1); remap34(&icc_mapped, ps->icc_par, ps->nr_icc_par, ps->num_env, 1); diff --git a/libavcodec/aacpsy.c b/libavcodec/aacpsy.c index baf9388398..4152b70bbf 100644 --- a/libavcodec/aacpsy.c +++ b/libavcodec/aacpsy.c @@ -377,9 +377,10 @@ static const uint8_t window_grouping[9] = { * Tell encoder which window types to use. * @see 3GPP TS26.403 5.4.1 "Blockswitching" */ -static FFPsyWindowInfo psy_3gpp_window(FFPsyContext *ctx, - const int16_t *audio, const int16_t *la, - int channel, int prev_type) +static av_unused FFPsyWindowInfo psy_3gpp_window(FFPsyContext *ctx, + const int16_t *audio, + const int16_t *la, + int channel, int prev_type) { int i, j; int br = ctx->avctx->bit_rate / ctx->avctx->channels; @@ -556,8 +557,8 @@ static float calc_reduced_thr_3gpp(AacPsyBand *band, float min_snr, /** * Calculate band thresholds as suggested in 3GPP TS26.403 */ -static void psy_3gpp_analyze(FFPsyContext *ctx, int channel, - const float *coefs, const FFPsyWindowInfo *wi) +static void psy_3gpp_analyze_channel(FFPsyContext *ctx, int channel, + const float *coefs, const FFPsyWindowInfo *wi) { AacPsyContext *pctx = (AacPsyContext*) ctx->model_priv_data; AacPsyChannel *pch = &pctx->ch[channel]; @@ -626,7 +627,7 @@ static void psy_3gpp_analyze(FFPsyContext *ctx, int channel, } /* 5.6.1.3.2 "Calculation of the desired perceptual entropy" */ - ctx->pe[channel] = pe; + ctx->ch[channel].entropy = pe; desired_bits = calc_bit_demand(pctx, pe, ctx->bitres.bits, ctx->bitres.size, wi->num_windows == 8); desired_pe = PSY_3GPP_BITS_TO_PE(desired_bits); /* NOTE: PE correction is kept simple. During initial testing it had very @@ -730,7 +731,7 @@ static void psy_3gpp_analyze(FFPsyContext *ctx, int channel, for (w = 0; w < wi->num_windows*16; w += 16) { for (g = 0; g < num_bands; g++) { AacPsyBand *band = &pch->band[w+g]; - FFPsyBand *psy_band = &ctx->psy_bands[channel*PSY_MAX_BANDS+w+g]; + FFPsyBand *psy_band = &ctx->ch[channel].psy_bands[w+g]; psy_band->threshold = band->thr; psy_band->energy = band->energy; @@ -740,6 +741,16 @@ static void psy_3gpp_analyze(FFPsyContext *ctx, int channel, memcpy(pch->prev_band, pch->band, sizeof(pch->band)); } +static void psy_3gpp_analyze(FFPsyContext *ctx, int channel, + const float **coeffs, const FFPsyWindowInfo *wi) +{ + int ch; + FFPsyChannelGroup *group = ff_psy_find_group(ctx, channel); + + for (ch = 0; ch < group->num_ch; ch++) + psy_3gpp_analyze_channel(ctx, channel + ch, coeffs[ch], &wi[ch]); +} + static av_cold void psy_3gpp_end(FFPsyContext *apc) { AacPsyContext *pctx = (AacPsyContext*) apc->model_priv_data; diff --git a/libavcodec/aasc.c b/libavcodec/aasc.c index e6f363de4c..f0407bc94b 100644 --- a/libavcodec/aasc.c +++ b/libavcodec/aasc.c @@ -110,14 +110,13 @@ static av_cold int aasc_decode_end(AVCodecContext *avctx) } AVCodec ff_aasc_decoder = { - "aasc", - AVMEDIA_TYPE_VIDEO, - CODEC_ID_AASC, - sizeof(AascContext), - aasc_decode_init, - NULL, - aasc_decode_end, - aasc_decode_frame, - CODEC_CAP_DR1, + .name = "aasc", + .type = AVMEDIA_TYPE_VIDEO, + .id = CODEC_ID_AASC, + .priv_data_size = sizeof(AascContext), + .init = aasc_decode_init, + .close = aasc_decode_end, + .decode = aasc_decode_frame, + .capabilities = CODEC_CAP_DR1, .long_name = NULL_IF_CONFIG_SMALL("Autodesk RLE"), }; diff --git a/libavcodec/ac3.h b/libavcodec/ac3.h index a53817e1b0..94d6652e78 100644 --- a/libavcodec/ac3.h +++ b/libavcodec/ac3.h @@ -94,7 +94,7 @@ typedef struct AC3BitAllocParameters { * Coded AC-3 header values up to the lfeon element, plus derived values. */ typedef struct { - /** @defgroup coded Coded elements + /** @name Coded elements * @{ */ uint16_t sync_word; @@ -112,7 +112,7 @@ typedef struct { int num_blocks; ///< number of audio blocks /** @} */ - /** @defgroup derived Derived values + /** @name Derived values * @{ */ uint8_t sr_shift; @@ -156,6 +156,8 @@ typedef struct AC3EncOptions { int dolby_surround_ex_mode; int dolby_headphone_mode; int ad_converter_type; + int eac3_mixing_metadata; + int eac3_info_metadata; /* other encoding options */ int allow_per_frame_metadata; diff --git a/libavcodec/ac3dec.c b/libavcodec/ac3dec.c index ca6f18ff3c..6e378bb59c 100644 --- a/libavcodec/ac3dec.c +++ b/libavcodec/ac3dec.c @@ -30,6 +30,7 @@ #include <string.h> #include "libavutil/crc.h" +#include "libavutil/opt.h" #include "internal.h" #include "aac_ac3_parser.h" #include "ac3_parser.h" @@ -1438,6 +1439,20 @@ static av_cold int ac3_decode_end(AVCodecContext *avctx) return 0; } +#define OFFSET(x) offsetof(AC3DecodeContext, x) +#define PAR (AV_OPT_FLAG_DECODING_PARAM | AV_OPT_FLAG_AUDIO_PARAM) +static const AVOption options[] = { + { "drc_scale", "percentage of dynamic range compression to apply", OFFSET(drc_scale), FF_OPT_TYPE_FLOAT, {1.0}, 0.0, 1.0, PAR }, + { NULL}, +}; + +static const AVClass ac3_decoder_class = { + .class_name = "(E-)AC3 decoder", + .item_name = av_default_item_name, + .option = options, + .version = LIBAVUTIL_VERSION_INT, +}; + AVCodec ff_ac3_decoder = { .name = "ac3", .type = AVMEDIA_TYPE_AUDIO, @@ -1450,6 +1465,7 @@ AVCodec ff_ac3_decoder = { .sample_fmts = (const enum AVSampleFormat[]) { AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_NONE }, + .priv_class = &ac3_decoder_class, }; #if CONFIG_EAC3_DECODER @@ -1465,5 +1481,6 @@ AVCodec ff_eac3_decoder = { .sample_fmts = (const enum AVSampleFormat[]) { AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_NONE }, + .priv_class = &ac3_decoder_class, }; #endif diff --git a/libavcodec/ac3dec.h b/libavcodec/ac3dec.h index 377e5154d7..8e84c18f11 100644 --- a/libavcodec/ac3dec.h +++ b/libavcodec/ac3dec.h @@ -66,10 +66,11 @@ #define AC3_FRAME_BUFFER_SIZE 32768 typedef struct { + AVClass *class; ///< class for AVOptions AVCodecContext *avctx; ///< parent context GetBitContext gbc; ///< bitstream reader -///@defgroup bsi bit stream information +///@name Bit stream information ///@{ int frame_type; ///< frame type (strmtyp) int substreamid; ///< substream identification @@ -87,7 +88,7 @@ typedef struct { int eac3; ///< indicates if current frame is E-AC-3 ///@} -///@defgroup audfrm frame syntax parameters +///@name Frame syntax parameters int snr_offset_strategy; ///< SNR offset strategy (snroffststr) int block_switch_syntax; ///< block switch syntax enabled (blkswe) int dither_flag_syntax; ///< dither flag syntax enabled (dithflage) @@ -97,7 +98,7 @@ typedef struct { int skip_syntax; ///< skip field syntax enabled (skipflde) ///@} -///@defgroup cpl standard coupling +///@name Standard coupling int cpl_in_use[AC3_MAX_BLOCKS]; ///< coupling in use (cplinu) int cpl_strategy_exists[AC3_MAX_BLOCKS];///< coupling strategy exists (cplstre) int channel_in_cpl[AC3_MAX_CHANNELS]; ///< channel in coupling (chincpl) @@ -110,7 +111,7 @@ typedef struct { int cpl_coords[AC3_MAX_CHANNELS][AC3_MAX_CPL_BANDS]; ///< coupling coordinates (cplco) ///@} -///@defgroup spx spectral extension +///@name Spectral extension ///@{ int spx_in_use; ///< spectral extension in use (spxinu) uint8_t channel_uses_spx[AC3_MAX_CHANNELS]; ///< channel uses spectral extension (chinspx) @@ -126,12 +127,12 @@ typedef struct { float spx_signal_blend[AC3_MAX_CHANNELS][SPX_MAX_BANDS];///< spx signal blending factor (sblendfact) ///@} -///@defgroup aht adaptive hybrid transform +///@name Adaptive hybrid transform int channel_uses_aht[AC3_MAX_CHANNELS]; ///< channel AHT in use (chahtinu) int pre_mantissa[AC3_MAX_CHANNELS][AC3_MAX_COEFS][AC3_MAX_BLOCKS]; ///< pre-IDCT mantissas ///@} -///@defgroup channel channel +///@name Channel int fbw_channels; ///< number of full-bandwidth channels int channels; ///< number of total channels int lfe_ch; ///< index of LFE channel @@ -141,27 +142,28 @@ typedef struct { int out_channels; ///< number of output channels ///@} -///@defgroup dynrng dynamic range +///@name Dynamic range float dynamic_range[2]; ///< dynamic range + float drc_scale; ///< percentage of dynamic range compression to be applied ///@} -///@defgroup bandwidth bandwidth +///@name Bandwidth int start_freq[AC3_MAX_CHANNELS]; ///< start frequency bin (strtmant) int end_freq[AC3_MAX_CHANNELS]; ///< end frequency bin (endmant) ///@} -///@defgroup rematrixing rematrixing +///@name Rematrixing int num_rematrixing_bands; ///< number of rematrixing bands (nrematbnd) int rematrixing_flags[4]; ///< rematrixing flags (rematflg) ///@} -///@defgroup exponents exponents +///@name Exponents int num_exp_groups[AC3_MAX_CHANNELS]; ///< Number of exponent groups (nexpgrp) int8_t dexps[AC3_MAX_CHANNELS][AC3_MAX_COEFS]; ///< decoded exponents int exp_strategy[AC3_MAX_BLOCKS][AC3_MAX_CHANNELS]; ///< exponent strategies (expstr) ///@} -///@defgroup bitalloc bit allocation +///@name Bit allocation AC3BitAllocParameters bit_alloc_params; ///< bit allocation parameters int first_cpl_leak; ///< first coupling leak state (firstcplleak) int snr_offset[AC3_MAX_CHANNELS]; ///< signal-to-noise ratio offsets (snroffst) @@ -177,25 +179,25 @@ typedef struct { uint8_t dba_values[AC3_MAX_CHANNELS][8]; ///< delta values for each segment ///@} -///@defgroup dithering zero-mantissa dithering +///@name Zero-mantissa dithering int dither_flag[AC3_MAX_CHANNELS]; ///< dither flags (dithflg) AVLFG dith_state; ///< for dither generation ///@} -///@defgroup imdct IMDCT +///@name IMDCT int block_switch[AC3_MAX_CHANNELS]; ///< block switch flags (blksw) FFTContext imdct_512; ///< for 512 sample IMDCT FFTContext imdct_256; ///< for 256 sample IMDCT ///@} -///@defgroup opt optimization +///@name Optimization DSPContext dsp; ///< for optimization AC3DSPContext ac3dsp; FmtConvertContext fmt_conv; ///< optimized conversion functions float mul_bias; ///< scaling for float_to_int16 conversion ///@} -///@defgroup arrays aligned arrays +///@name Aligned arrays DECLARE_ALIGNED(16, int, fixed_coeffs)[AC3_MAX_CHANNELS][AC3_MAX_COEFS]; ///< fixed-point transform coefficients DECLARE_ALIGNED(32, float, transform_coeffs)[AC3_MAX_CHANNELS][AC3_MAX_COEFS]; ///< transform coefficients DECLARE_ALIGNED(32, float, delay)[AC3_MAX_CHANNELS][AC3_BLOCK_SIZE]; ///< delay - added to the next block diff --git a/libavcodec/ac3dsp.c b/libavcodec/ac3dsp.c index 619addc3d5..96bd123e6f 100644 --- a/libavcodec/ac3dsp.c +++ b/libavcodec/ac3dsp.c @@ -164,21 +164,8 @@ static void ac3_extract_exponents_c(uint8_t *exp, int32_t *coef, int nb_coefs) int i; for (i = 0; i < nb_coefs; i++) { - int e; int v = abs(coef[i]); - if (v == 0) - e = 24; - else { - e = 23 - av_log2(v); - if (e >= 24) { - e = 24; - coef[i] = 0; - } else if (e < 0) { - e = 0; - coef[i] = av_clip(coef[i], -16777215, 16777215); - } - } - exp[i] = e; + exp[i] = v ? 23 - av_log2(v) : 24; } } diff --git a/libavcodec/ac3enc.c b/libavcodec/ac3enc.c index 6ee8a7ac15..b17036cb68 100644 --- a/libavcodec/ac3enc.c +++ b/libavcodec/ac3enc.c @@ -46,7 +46,7 @@ #include "eac3enc.h" typedef struct AC3Mant { - uint16_t *qmant1_ptr, *qmant2_ptr, *qmant4_ptr; ///< mantissa pointers for bap=1,2,4 + int16_t *qmant1_ptr, *qmant2_ptr, *qmant4_ptr; ///< mantissa pointers for bap=1,2,4 int mant1_cnt, mant2_cnt, mant4_cnt; ///< mantissa counts for bap=1,2,4 } AC3Mant; @@ -177,7 +177,7 @@ static const int8_t ac3_coupling_start_tab[6][3][19] = { * Adjust the frame size to make the average bit rate match the target bit rate. * This is only needed for 11025, 22050, and 44100 sample rates or any E-AC-3. */ -static void adjust_frame_size(AC3EncodeContext *s) +void ff_ac3_adjust_frame_size(AC3EncodeContext *s) { while (s->bits_written >= s->bit_rate && s->samples_written >= s->sample_rate) { s->bits_written -= s->bit_rate; @@ -186,18 +186,19 @@ static void adjust_frame_size(AC3EncodeContext *s) s->frame_size = s->frame_size_min + 2 * (s->bits_written * s->sample_rate < s->samples_written * s->bit_rate); s->bits_written += s->frame_size * 8; - s->samples_written += AC3_FRAME_SIZE; + s->samples_written += AC3_BLOCK_SIZE * s->num_blocks; } -static void compute_coupling_strategy(AC3EncodeContext *s) +void ff_ac3_compute_coupling_strategy(AC3EncodeContext *s) { int blk, ch; int got_cpl_snr; + int num_cpl_blocks; /* set coupling use flags for each block/channel */ /* TODO: turn coupling on/off and adjust start band based on bit usage */ - for (blk = 0; blk < AC3_MAX_BLOCKS; blk++) { + for (blk = 0; blk < s->num_blocks; blk++) { AC3Block *block = &s->blocks[blk]; for (ch = 1; ch <= s->fbw_channels; ch++) block->channel_in_cpl[ch] = s->cpl_on; @@ -206,12 +207,14 @@ static void compute_coupling_strategy(AC3EncodeContext *s) /* enable coupling for each block if at least 2 channels have coupling enabled for that block */ got_cpl_snr = 0; - for (blk = 0; blk < AC3_MAX_BLOCKS; blk++) { + num_cpl_blocks = 0; + for (blk = 0; blk < s->num_blocks; blk++) { AC3Block *block = &s->blocks[blk]; block->num_cpl_channels = 0; for (ch = 1; ch <= s->fbw_channels; ch++) block->num_cpl_channels += block->channel_in_cpl[ch]; block->cpl_in_use = block->num_cpl_channels > 1; + num_cpl_blocks += block->cpl_in_use; if (!block->cpl_in_use) { block->num_cpl_channels = 0; for (ch = 1; ch <= s->fbw_channels; ch++) @@ -237,9 +240,11 @@ static void compute_coupling_strategy(AC3EncodeContext *s) block->new_snr_offsets = 0; } } + if (!num_cpl_blocks) + s->cpl_on = 0; /* set bandwidth for each channel */ - for (blk = 0; blk < AC3_MAX_BLOCKS; blk++) { + for (blk = 0; blk < s->num_blocks; blk++) { AC3Block *block = &s->blocks[blk]; for (ch = 1; ch <= s->fbw_channels; ch++) { if (block->channel_in_cpl[ch]) @@ -254,7 +259,7 @@ static void compute_coupling_strategy(AC3EncodeContext *s) /** * Apply stereo rematrixing to coefficients based on rematrixing flags. */ -static void apply_rematrixing(AC3EncodeContext *s) +void ff_ac3_apply_rematrixing(AC3EncodeContext *s) { int nb_coefs; int blk, bnd, i; @@ -264,7 +269,7 @@ static void apply_rematrixing(AC3EncodeContext *s) if (!s->rematrixing_enabled) return; - for (blk = 0; blk < AC3_MAX_BLOCKS; blk++) { + for (blk = 0; blk < s->num_blocks; blk++) { AC3Block *block = &s->blocks[blk]; if (block->new_rematrixing_strategy) flags = block->rematrixing_flags; @@ -301,18 +306,19 @@ static av_cold void exponent_init(AC3EncodeContext *s) } /* LFE */ exponent_group_tab[0][0][7] = 2; + + if (CONFIG_EAC3_ENCODER && s->eac3) + ff_eac3_exponent_init(); } /** * Extract exponents from the MDCT coefficients. - * This takes into account the normalization that was done to the input samples - * by adjusting the exponents by the exponent shift values. */ static void extract_exponents(AC3EncodeContext *s) { int ch = !s->cpl_on; - int chan_size = AC3_MAX_COEFS * AC3_MAX_BLOCKS * (s->channels - ch + 1); + int chan_size = AC3_MAX_COEFS * s->num_blocks * (s->channels - ch + 1); AC3Block *block = &s->blocks[0]; s->ac3dsp.extract_exponents(block->exp[ch], block->fixed_coef[ch], chan_size); @@ -325,6 +331,15 @@ static void extract_exponents(AC3EncodeContext *s) */ #define EXP_DIFF_THRESHOLD 500 +/** + * Table used to select exponent strategy based on exponent reuse block interval. + */ +static const uint8_t exp_strategy_reuse_tab[4][6] = { + { EXP_D15, EXP_D15, EXP_D15, EXP_D15, EXP_D15, EXP_D15 }, + { EXP_D15, EXP_D15, EXP_D15, EXP_D15, EXP_D15, EXP_D15 }, + { EXP_D25, EXP_D25, EXP_D15, EXP_D15, EXP_D15, EXP_D15 }, + { EXP_D45, EXP_D25, EXP_D25, EXP_D15, EXP_D15, EXP_D15 } +}; /** * Calculate exponent strategies for all channels. @@ -343,9 +358,16 @@ static void compute_exp_strategy(AC3EncodeContext *s) reused in the next frame */ exp_strategy[0] = EXP_NEW; exp += AC3_MAX_COEFS; - for (blk = 1; blk < AC3_MAX_BLOCKS; blk++, exp += AC3_MAX_COEFS) { - if ((ch == CPL_CH && (!s->blocks[blk].cpl_in_use || !s->blocks[blk-1].cpl_in_use)) || - (ch > CPL_CH && (s->blocks[blk].channel_in_cpl[ch] != s->blocks[blk-1].channel_in_cpl[ch]))) { + for (blk = 1; blk < s->num_blocks; blk++, exp += AC3_MAX_COEFS) { + if (ch == CPL_CH) { + if (!s->blocks[blk-1].cpl_in_use) { + exp_strategy[blk] = EXP_NEW; + continue; + } else if (!s->blocks[blk].cpl_in_use) { + exp_strategy[blk] = EXP_REUSE; + continue; + } + } else if (s->blocks[blk].channel_in_cpl[ch] != s->blocks[blk-1].channel_in_cpl[ch]) { exp_strategy[blk] = EXP_NEW; continue; } @@ -360,25 +382,24 @@ static void compute_exp_strategy(AC3EncodeContext *s) /* now select the encoding strategy type : if exponents are often recoded, we use a coarse encoding */ blk = 0; - while (blk < AC3_MAX_BLOCKS) { + while (blk < s->num_blocks) { blk1 = blk + 1; - while (blk1 < AC3_MAX_BLOCKS && exp_strategy[blk1] == EXP_REUSE) + while (blk1 < s->num_blocks && exp_strategy[blk1] == EXP_REUSE) blk1++; - switch (blk1 - blk) { - case 1: exp_strategy[blk] = EXP_D45; break; - case 2: - case 3: exp_strategy[blk] = EXP_D25; break; - default: exp_strategy[blk] = EXP_D15; break; - } + exp_strategy[blk] = exp_strategy_reuse_tab[s->num_blks_code][blk1-blk-1]; blk = blk1; } } if (s->lfe_on) { ch = s->lfe_channel; s->exp_strategy[ch][0] = EXP_D15; - for (blk = 1; blk < AC3_MAX_BLOCKS; blk++) + for (blk = 1; blk < s->num_blocks; blk++) s->exp_strategy[ch][blk] = EXP_REUSE; } + + /* for E-AC-3, determine frame exponent strategy */ + if (CONFIG_EAC3_ENCODER && s->eac3) + ff_eac3_get_frame_exp_strategy(s); } @@ -470,7 +491,7 @@ static void encode_exponents(AC3EncodeContext *s) cpl = (ch == CPL_CH); blk = 0; - while (blk < AC3_MAX_BLOCKS) { + while (blk < s->num_blocks) { AC3Block *block = &s->blocks[blk]; if (cpl && !block->cpl_in_use) { exp += AC3_MAX_COEFS; @@ -483,7 +504,7 @@ static void encode_exponents(AC3EncodeContext *s) /* count the number of EXP_REUSE blocks after the current block and set exponent reference block numbers */ s->exp_ref_block[ch][blk] = blk; - while (blk1 < AC3_MAX_BLOCKS && exp_strategy[blk1] == EXP_REUSE) { + while (blk1 < s->num_blocks && exp_strategy[blk1] == EXP_REUSE) { s->exp_ref_block[ch][blk1] = blk; blk1++; } @@ -519,7 +540,7 @@ static void group_exponents(AC3EncodeContext *s) int exp0, exp1; bit_count = 0; - for (blk = 0; blk < AC3_MAX_BLOCKS; blk++) { + for (blk = 0; blk < s->num_blocks; blk++) { AC3Block *block = &s->blocks[blk]; for (ch = !block->cpl_in_use; ch <= s->channels; ch++) { int exp_strategy = s->exp_strategy[ch][blk]; @@ -570,7 +591,7 @@ static void group_exponents(AC3EncodeContext *s) * Extract exponents from MDCT coefficients, calculate exponent strategies, * and encode final exponents. */ -static void process_exponents(AC3EncodeContext *s) +void ff_ac3_process_exponents(AC3EncodeContext *s) { extract_exponents(s); @@ -608,26 +629,38 @@ static void count_frame_bits_fixed(AC3EncodeContext *s) if (s->eac3) { /* bitstream info header */ frame_bits += 35; - frame_bits += 1 + 1 + 1; + frame_bits += 1 + 1; + if (s->num_blocks != 0x6) + frame_bits++; + frame_bits++; /* audio frame header */ - frame_bits += 2; + if (s->num_blocks == 6) + frame_bits += 2; frame_bits += 10; /* exponent strategy */ - for (blk = 0; blk < AC3_MAX_BLOCKS; blk++) - frame_bits += 2 * s->fbw_channels + s->lfe_on; + if (s->use_frame_exp_strategy) + frame_bits += 5 * s->fbw_channels; + else + frame_bits += s->num_blocks * 2 * s->fbw_channels; + if (s->lfe_on) + frame_bits += s->num_blocks; /* converter exponent strategy */ - frame_bits += s->fbw_channels * 5; + if (s->num_blks_code != 0x3) + frame_bits++; + else + frame_bits += s->fbw_channels * 5; /* snr offsets */ frame_bits += 10; /* block start info */ - frame_bits++; + if (s->num_blocks != 1) + frame_bits++; } else { frame_bits += 49; frame_bits += frame_bits_inc[s->channel_mode]; } /* audio blocks */ - for (blk = 0; blk < AC3_MAX_BLOCKS; blk++) { + for (blk = 0; blk < s->num_blocks; blk++) { if (!s->eac3) { /* block switch flags */ frame_bits += s->fbw_channels; @@ -726,10 +759,34 @@ static void count_frame_bits(AC3EncodeContext *s) /* header */ if (s->eac3) { + if (opt->eac3_mixing_metadata) { + if (s->channel_mode > AC3_CHMODE_STEREO) + frame_bits += 2; + if (s->has_center) + frame_bits += 6; + if (s->has_surround) + frame_bits += 6; + frame_bits += s->lfe_on; + frame_bits += 1 + 1 + 2; + if (s->channel_mode < AC3_CHMODE_STEREO) + frame_bits++; + frame_bits++; + } + if (opt->eac3_info_metadata) { + frame_bits += 3 + 1 + 1; + if (s->channel_mode == AC3_CHMODE_STEREO) + frame_bits += 2 + 2; + if (s->channel_mode >= AC3_CHMODE_2F2R) + frame_bits += 2; + frame_bits++; + if (opt->audio_production_info) + frame_bits += 5 + 2 + 1; + frame_bits++; + } /* coupling */ if (s->channel_mode > AC3_CHMODE_MONO) { frame_bits++; - for (blk = 1; blk < AC3_MAX_BLOCKS; blk++) { + for (blk = 1; blk < s->num_blocks; blk++) { AC3Block *block = &s->blocks[blk]; frame_bits++; if (block->new_cpl_strategy) @@ -737,8 +794,14 @@ static void count_frame_bits(AC3EncodeContext *s) } } /* coupling exponent strategy */ - for (blk = 0; blk < AC3_MAX_BLOCKS; blk++) - frame_bits += 2 * s->blocks[blk].cpl_in_use; + if (s->cpl_on) { + if (s->use_frame_exp_strategy) { + frame_bits += 5 * s->cpl_on; + } else { + for (blk = 0; blk < s->num_blocks; blk++) + frame_bits += 2 * s->blocks[blk].cpl_in_use; + } + } } else { if (opt->audio_production_info) frame_bits += 7; @@ -751,7 +814,7 @@ static void count_frame_bits(AC3EncodeContext *s) } /* audio blocks */ - for (blk = 0; blk < AC3_MAX_BLOCKS; blk++) { + for (blk = 0; blk < s->num_blocks; blk++) { AC3Block *block = &s->blocks[blk]; /* coupling strategy */ @@ -838,7 +901,7 @@ static void bit_alloc_masking(AC3EncodeContext *s) { int blk, ch; - for (blk = 0; blk < AC3_MAX_BLOCKS; blk++) { + for (blk = 0; blk < s->num_blocks; blk++) { AC3Block *block = &s->blocks[blk]; for (ch = !block->cpl_in_use; ch <= s->channels; ch++) { /* We only need psd and mask for calculating bap. @@ -874,9 +937,9 @@ static void reset_block_bap(AC3EncodeContext *s) ref_bap = s->bap_buffer; for (ch = 0; ch <= s->channels; ch++) { - for (blk = 0; blk < AC3_MAX_BLOCKS; blk++) + for (blk = 0; blk < s->num_blocks; blk++) s->ref_bap[ch][blk] = ref_bap + AC3_MAX_COEFS * s->exp_ref_block[ch][blk]; - ref_bap += AC3_MAX_COEFS * AC3_MAX_BLOCKS; + ref_bap += AC3_MAX_COEFS * s->num_blocks; } s->ref_bap_set = 1; } @@ -909,7 +972,7 @@ static void count_mantissa_bits_update_ch(AC3EncodeContext *s, int ch, { int blk; - for (blk = 0; blk < AC3_MAX_BLOCKS; blk++) { + for (blk = 0; blk < s->num_blocks; blk++) { AC3Block *block = &s->blocks[blk]; if (ch == CPL_CH && !block->cpl_in_use) continue; @@ -953,7 +1016,7 @@ static int bit_alloc(AC3EncodeContext *s, int snr_offset) snr_offset = (snr_offset - 240) << 2; reset_block_bap(s); - for (blk = 0; blk < AC3_MAX_BLOCKS; blk++) { + for (blk = 0; blk < s->num_blocks; blk++) { AC3Block *block = &s->blocks[blk]; for (ch = !block->cpl_in_use; ch <= s->channels; ch++) { @@ -1023,87 +1086,18 @@ static int cbr_bit_allocation(AC3EncodeContext *s) /** - * Downgrade exponent strategies to reduce the bits used by the exponents. - * This is a fallback for when bit allocation fails with the normal exponent - * strategies. Each time this function is run it only downgrades the - * strategy in 1 channel of 1 block. - * @return non-zero if downgrade was unsuccessful - */ -static int downgrade_exponents(AC3EncodeContext *s) -{ - int ch, blk; - - for (blk = AC3_MAX_BLOCKS-1; blk >= 0; blk--) { - for (ch = !s->blocks[blk].cpl_in_use; ch <= s->fbw_channels; ch++) { - if (s->exp_strategy[ch][blk] == EXP_D15) { - s->exp_strategy[ch][blk] = EXP_D25; - return 0; - } - } - } - for (blk = AC3_MAX_BLOCKS-1; blk >= 0; blk--) { - for (ch = !s->blocks[blk].cpl_in_use; ch <= s->fbw_channels; ch++) { - if (s->exp_strategy[ch][blk] == EXP_D25) { - s->exp_strategy[ch][blk] = EXP_D45; - return 0; - } - } - } - /* block 0 cannot reuse exponents, so only downgrade D45 to REUSE if - the block number > 0 */ - for (blk = AC3_MAX_BLOCKS-1; blk > 0; blk--) { - for (ch = !s->blocks[blk].cpl_in_use; ch <= s->fbw_channels; ch++) { - if (s->exp_strategy[ch][blk] > EXP_REUSE) { - s->exp_strategy[ch][blk] = EXP_REUSE; - return 0; - } - } - } - return -1; -} - - -/** * Perform bit allocation search. * Finds the SNR offset value that maximizes quality and fits in the specified * frame size. Output is the SNR offset and a set of bit allocation pointers * used to quantize the mantissas. */ -static int compute_bit_allocation(AC3EncodeContext *s) +int ff_ac3_compute_bit_allocation(AC3EncodeContext *s) { - int ret; - count_frame_bits(s); bit_alloc_masking(s); - ret = cbr_bit_allocation(s); - while (ret) { - /* fallback 1: disable channel coupling */ - if (s->cpl_on) { - s->cpl_on = 0; - compute_coupling_strategy(s); - s->compute_rematrixing_strategy(s); - apply_rematrixing(s); - process_exponents(s); - ret = compute_bit_allocation(s); - continue; - } - - /* fallback 2: downgrade exponents */ - if (!downgrade_exponents(s)) { - extract_exponents(s); - encode_exponents(s); - group_exponents(s); - ret = compute_bit_allocation(s); - continue; - } - - /* fallbacks were not enough... */ - break; - } - - return ret; + return cbr_bit_allocation(s); } @@ -1123,20 +1117,14 @@ static inline int sym_quant(int c, int e, int levels) */ static inline int asym_quant(int c, int e, int qbits) { - int lshift, m, v; + int m; - lshift = e + qbits - 24; - if (lshift >= 0) - v = c << lshift; - else - v = c >> (-lshift); - /* rounding */ - v = (v + 1) >> 1; + c = (((c << e) >> (24 - qbits)) + 1) >> 1; m = (1 << (qbits-1)); - if (v >= m) - v = m - 1; - av_assert2(v >= -m); - return v & ((1 << qbits)-1); + if (c >= m) + c = m - 1; + av_assert2(c >= -m); + return c; } @@ -1145,7 +1133,7 @@ static inline int asym_quant(int c, int e, int qbits) */ static void quantize_mantissas_blk_ch(AC3Mant *s, int32_t *fixed_coef, uint8_t *exp, uint8_t *bap, - uint16_t *qmant, int start_freq, + int16_t *qmant, int start_freq, int end_freq) { int i; @@ -1238,11 +1226,11 @@ static void quantize_mantissas_blk_ch(AC3Mant *s, int32_t *fixed_coef, /** * Quantize mantissas using coefficients, exponents, and bit allocation pointers. */ -static void quantize_mantissas(AC3EncodeContext *s) +void ff_ac3_quantize_mantissas(AC3EncodeContext *s) { int blk, ch, ch0=0, got_cpl; - for (blk = 0; blk < AC3_MAX_BLOCKS; blk++) { + for (blk = 0; blk < s->num_blocks; blk++) { AC3Block *block = &s->blocks[blk]; AC3Mant m = { 0 }; @@ -1497,14 +1485,14 @@ static void output_audio_block(AC3EncodeContext *s, int blk) q = block->qmant[ch][i]; b = s->ref_bap[ch][blk][i]; switch (b) { - case 0: break; - case 1: if (q != 128) put_bits(&s->pb, 5, q); break; - case 2: if (q != 128) put_bits(&s->pb, 7, q); break; - case 3: put_bits(&s->pb, 3, q); break; - case 4: if (q != 128) put_bits(&s->pb, 7, q); break; - case 14: put_bits(&s->pb, 14, q); break; - case 15: put_bits(&s->pb, 16, q); break; - default: put_bits(&s->pb, b-1, q); break; + case 0: break; + case 1: if (q != 128) put_bits (&s->pb, 5, q); break; + case 2: if (q != 128) put_bits (&s->pb, 7, q); break; + case 3: put_sbits(&s->pb, 3, q); break; + case 4: if (q != 128) put_bits (&s->pb, 7, q); break; + case 14: put_sbits(&s->pb, 14, q); break; + case 15: put_sbits(&s->pb, 16, q); break; + default: put_sbits(&s->pb, b-1, q); break; } } if (ch == CPL_CH) @@ -1597,7 +1585,7 @@ static void output_frame_end(AC3EncodeContext *s) /** * Write the frame to the output bitstream. */ -static void output_frame(AC3EncodeContext *s, unsigned char *frame) +void ff_ac3_output_frame(AC3EncodeContext *s, unsigned char *frame) { int blk; @@ -1605,17 +1593,17 @@ static void output_frame(AC3EncodeContext *s, unsigned char *frame) s->output_frame_header(s); - for (blk = 0; blk < AC3_MAX_BLOCKS; blk++) + for (blk = 0; blk < s->num_blocks; blk++) output_audio_block(s, blk); output_frame_end(s); } -static void dprint_options(AVCodecContext *avctx) +static void dprint_options(AC3EncodeContext *s) { #ifdef DEBUG - AC3EncodeContext *s = avctx->priv_data; + AVCodecContext *avctx = s->avctx; AC3EncOptions *opt = &s->options; char strbuf[32]; @@ -1633,6 +1621,7 @@ static void dprint_options(AVCodecContext *avctx) av_dlog(avctx, "channel_layout: %s\n", strbuf); av_dlog(avctx, "sample_rate: %d\n", s->sample_rate); av_dlog(avctx, "bit_rate: %d\n", s->bit_rate); + av_dlog(avctx, "blocks/frame: %d (code=%d)\n", s->num_blocks, s->num_blks_code); if (s->cutoff) av_dlog(avctx, "cutoff: %d\n", s->cutoff); @@ -1766,98 +1755,167 @@ static void validate_mix_level(void *log_ctx, const char *opt_name, * Validate metadata options as set by AVOption system. * These values can optionally be changed per-frame. */ -static int validate_metadata(AVCodecContext *avctx) +int ff_ac3_validate_metadata(AC3EncodeContext *s) { - AC3EncodeContext *s = avctx->priv_data; + AVCodecContext *avctx = s->avctx; AC3EncOptions *opt = &s->options; - /* validate mixing levels */ - if (s->has_center) { - validate_mix_level(avctx, "center_mix_level", &opt->center_mix_level, - cmixlev_options, CMIXLEV_NUM_OPTIONS, 1, 0, - &s->center_mix_level); + opt->audio_production_info = 0; + opt->extended_bsi_1 = 0; + opt->extended_bsi_2 = 0; + opt->eac3_mixing_metadata = 0; + opt->eac3_info_metadata = 0; + + /* determine mixing metadata / xbsi1 use */ + if (s->channel_mode > AC3_CHMODE_STEREO && opt->preferred_stereo_downmix >= 0) { + opt->extended_bsi_1 = 1; + opt->eac3_mixing_metadata = 1; + } + if (s->has_center && + (opt->ltrt_center_mix_level >= 0 || opt->loro_center_mix_level >= 0)) { + opt->extended_bsi_1 = 1; + opt->eac3_mixing_metadata = 1; } - if (s->has_surround) { - validate_mix_level(avctx, "surround_mix_level", &opt->surround_mix_level, - surmixlev_options, SURMIXLEV_NUM_OPTIONS, 1, 0, - &s->surround_mix_level); + if (s->has_surround && + (opt->ltrt_surround_mix_level >= 0 || opt->loro_surround_mix_level >= 0)) { + opt->extended_bsi_1 = 1; + opt->eac3_mixing_metadata = 1; } - /* set audio production info flag */ - if (opt->mixing_level >= 0 || opt->room_type >= 0) { - if (opt->mixing_level < 0) { - av_log(avctx, AV_LOG_ERROR, "mixing_level must be set if " - "room_type is set\n"); - return AVERROR(EINVAL); - } - if (opt->mixing_level < 80) { - av_log(avctx, AV_LOG_ERROR, "invalid mixing level. must be between " - "80dB and 111dB\n"); - return AVERROR(EINVAL); + if (s->eac3) { + /* determine info metadata use */ + if (avctx->audio_service_type != AV_AUDIO_SERVICE_TYPE_MAIN) + opt->eac3_info_metadata = 1; + if (opt->copyright >= 0 || opt->original >= 0) + opt->eac3_info_metadata = 1; + if (s->channel_mode == AC3_CHMODE_STEREO && + (opt->dolby_headphone_mode >= 0 || opt->dolby_surround_mode >= 0)) + opt->eac3_info_metadata = 1; + if (s->channel_mode >= AC3_CHMODE_2F2R && opt->dolby_surround_ex_mode >= 0) + opt->eac3_info_metadata = 1; + if (opt->mixing_level >= 0 || opt->room_type >= 0 || opt->ad_converter_type >= 0) { + opt->audio_production_info = 1; + opt->eac3_info_metadata = 1; } - /* default room type */ - if (opt->room_type < 0) - opt->room_type = 0; - opt->audio_production_info = 1; } else { - opt->audio_production_info = 0; + /* determine audio production info use */ + if (opt->mixing_level >= 0 || opt->room_type >= 0) + opt->audio_production_info = 1; + + /* determine xbsi2 use */ + if (s->channel_mode >= AC3_CHMODE_2F2R && opt->dolby_surround_ex_mode >= 0) + opt->extended_bsi_2 = 1; + if (s->channel_mode == AC3_CHMODE_STEREO && opt->dolby_headphone_mode >= 0) + opt->extended_bsi_2 = 1; + if (opt->ad_converter_type >= 0) + opt->extended_bsi_2 = 1; } - /* set extended bsi 1 flag */ - if ((s->has_center || s->has_surround) && - (opt->preferred_stereo_downmix >= 0 || - opt->ltrt_center_mix_level >= 0 || - opt->ltrt_surround_mix_level >= 0 || - opt->loro_center_mix_level >= 0 || - opt->loro_surround_mix_level >= 0)) { + /* validate AC-3 mixing levels */ + if (!s->eac3) { + if (s->has_center) { + validate_mix_level(avctx, "center_mix_level", &opt->center_mix_level, + cmixlev_options, CMIXLEV_NUM_OPTIONS, 1, 0, + &s->center_mix_level); + } + if (s->has_surround) { + validate_mix_level(avctx, "surround_mix_level", &opt->surround_mix_level, + surmixlev_options, SURMIXLEV_NUM_OPTIONS, 1, 0, + &s->surround_mix_level); + } + } + + /* validate extended bsi 1 / mixing metadata */ + if (opt->extended_bsi_1 || opt->eac3_mixing_metadata) { /* default preferred stereo downmix */ if (opt->preferred_stereo_downmix < 0) opt->preferred_stereo_downmix = 0; - /* validate Lt/Rt center mix level */ - validate_mix_level(avctx, "ltrt_center_mix_level", - &opt->ltrt_center_mix_level, extmixlev_options, - EXTMIXLEV_NUM_OPTIONS, 5, 0, - &s->ltrt_center_mix_level); - /* validate Lt/Rt surround mix level */ - validate_mix_level(avctx, "ltrt_surround_mix_level", - &opt->ltrt_surround_mix_level, extmixlev_options, - EXTMIXLEV_NUM_OPTIONS, 6, 3, - &s->ltrt_surround_mix_level); - /* validate Lo/Ro center mix level */ - validate_mix_level(avctx, "loro_center_mix_level", - &opt->loro_center_mix_level, extmixlev_options, - EXTMIXLEV_NUM_OPTIONS, 5, 0, - &s->loro_center_mix_level); - /* validate Lo/Ro surround mix level */ - validate_mix_level(avctx, "loro_surround_mix_level", - &opt->loro_surround_mix_level, extmixlev_options, - EXTMIXLEV_NUM_OPTIONS, 6, 3, - &s->loro_surround_mix_level); - opt->extended_bsi_1 = 1; - } else { - opt->extended_bsi_1 = 0; + if (!s->eac3 || s->has_center) { + /* validate Lt/Rt center mix level */ + validate_mix_level(avctx, "ltrt_center_mix_level", + &opt->ltrt_center_mix_level, extmixlev_options, + EXTMIXLEV_NUM_OPTIONS, 5, 0, + &s->ltrt_center_mix_level); + /* validate Lo/Ro center mix level */ + validate_mix_level(avctx, "loro_center_mix_level", + &opt->loro_center_mix_level, extmixlev_options, + EXTMIXLEV_NUM_OPTIONS, 5, 0, + &s->loro_center_mix_level); + } + if (!s->eac3 || s->has_surround) { + /* validate Lt/Rt surround mix level */ + validate_mix_level(avctx, "ltrt_surround_mix_level", + &opt->ltrt_surround_mix_level, extmixlev_options, + EXTMIXLEV_NUM_OPTIONS, 6, 3, + &s->ltrt_surround_mix_level); + /* validate Lo/Ro surround mix level */ + validate_mix_level(avctx, "loro_surround_mix_level", + &opt->loro_surround_mix_level, extmixlev_options, + EXTMIXLEV_NUM_OPTIONS, 6, 3, + &s->loro_surround_mix_level); + } } - /* set extended bsi 2 flag */ - if (opt->dolby_surround_ex_mode >= 0 || - opt->dolby_headphone_mode >= 0 || - opt->ad_converter_type >= 0) { - /* default dolby surround ex mode */ - if (opt->dolby_surround_ex_mode < 0) - opt->dolby_surround_ex_mode = 0; + /* validate audio service type / channels combination */ + if ((avctx->audio_service_type == AV_AUDIO_SERVICE_TYPE_KARAOKE && + avctx->channels == 1) || + ((avctx->audio_service_type == AV_AUDIO_SERVICE_TYPE_COMMENTARY || + avctx->audio_service_type == AV_AUDIO_SERVICE_TYPE_EMERGENCY || + avctx->audio_service_type == AV_AUDIO_SERVICE_TYPE_VOICE_OVER) + && avctx->channels > 1)) { + av_log(avctx, AV_LOG_ERROR, "invalid audio service type for the " + "specified number of channels\n"); + return AVERROR(EINVAL); + } + + /* validate extended bsi 2 / info metadata */ + if (opt->extended_bsi_2 || opt->eac3_info_metadata) { /* default dolby headphone mode */ if (opt->dolby_headphone_mode < 0) opt->dolby_headphone_mode = 0; + /* default dolby surround ex mode */ + if (opt->dolby_surround_ex_mode < 0) + opt->dolby_surround_ex_mode = 0; /* default A/D converter type */ if (opt->ad_converter_type < 0) opt->ad_converter_type = 0; - opt->extended_bsi_2 = 1; - } else { - opt->extended_bsi_2 = 0; + } + + /* copyright & original defaults */ + if (!s->eac3 || opt->eac3_info_metadata) { + /* default copyright */ + if (opt->copyright < 0) + opt->copyright = 0; + /* default original */ + if (opt->original < 0) + opt->original = 1; + } + + /* dolby surround mode default */ + if (!s->eac3 || opt->eac3_info_metadata) { + if (opt->dolby_surround_mode < 0) + opt->dolby_surround_mode = 0; + } + + /* validate audio production info */ + if (opt->audio_production_info) { + if (opt->mixing_level < 0) { + av_log(avctx, AV_LOG_ERROR, "mixing_level must be set if " + "room_type is set\n"); + return AVERROR(EINVAL); + } + if (opt->mixing_level < 80) { + av_log(avctx, AV_LOG_ERROR, "invalid mixing level. must be between " + "80dB and 111dB\n"); + return AVERROR(EINVAL); + } + /* default room type */ + if (opt->room_type < 0) + opt->room_type = 0; } /* set bitstream id for alternate bitstream syntax */ - if (opt->extended_bsi_1 || opt->extended_bsi_2) { + if (!s->eac3 && (opt->extended_bsi_1 || opt->extended_bsi_2)) { if (s->bitstream_id > 8 && s->bitstream_id < 11) { static int warn_once = 1; if (warn_once) { @@ -1876,57 +1934,6 @@ static int validate_metadata(AVCodecContext *avctx) /** - * Encode a single AC-3 frame. - */ -int ff_ac3_encode_frame(AVCodecContext *avctx, unsigned char *frame, - int buf_size, void *data) -{ - AC3EncodeContext *s = avctx->priv_data; - const SampleType *samples = data; - int ret; - - if (!s->eac3 && s->options.allow_per_frame_metadata) { - ret = validate_metadata(avctx); - if (ret) - return ret; - } - - if (s->bit_alloc.sr_code == 1 || s->eac3) - adjust_frame_size(s); - - s->deinterleave_input_samples(s, samples); - - s->apply_mdct(s); - - s->scale_coefficients(s); - - s->cpl_on = s->cpl_enabled; - compute_coupling_strategy(s); - - if (s->cpl_on) - s->apply_channel_coupling(s); - - s->compute_rematrixing_strategy(s); - - apply_rematrixing(s); - - process_exponents(s); - - ret = compute_bit_allocation(s); - if (ret) { - av_log(avctx, AV_LOG_ERROR, "Bit allocation failed. Try increasing the bitrate.\n"); - return ret; - } - - quantize_mantissas(s); - - output_frame(s, frame); - - return s->frame_size; -} - - -/** * Finalize encoding and free any memory allocated by the encoder. */ av_cold int ff_ac3_encode_close(AVCodecContext *avctx) @@ -1948,7 +1955,9 @@ av_cold int ff_ac3_encode_close(AVCodecContext *avctx) av_freep(&s->band_psd_buffer); av_freep(&s->mask_buffer); av_freep(&s->qmant_buffer); - for (blk = 0; blk < AC3_MAX_BLOCKS; blk++) { + av_freep(&s->cpl_coord_exp_buffer); + av_freep(&s->cpl_coord_mant_buffer); + for (blk = 0; blk < s->num_blocks; blk++) { AC3Block *block = &s->blocks[blk]; av_freep(&block->mdct_coef); av_freep(&block->fixed_coef); @@ -1958,10 +1967,11 @@ av_cold int ff_ac3_encode_close(AVCodecContext *avctx) av_freep(&block->band_psd); av_freep(&block->mask); av_freep(&block->qmant); + av_freep(&block->cpl_coord_exp); + av_freep(&block->cpl_coord_mant); } - s->mdct_end(s->mdct); - av_freep(&s->mdct); + s->mdct_end(s); av_freep(&avctx->coded_frame); return 0; @@ -2016,8 +2026,9 @@ static av_cold int set_channel_info(AC3EncodeContext *s, int channels, } -static av_cold int validate_options(AVCodecContext *avctx, AC3EncodeContext *s) +static av_cold int validate_options(AC3EncodeContext *s) { + AVCodecContext *avctx = s->avctx; int i, ret, max_sr; /* validate channel layout */ @@ -2053,18 +2064,30 @@ static av_cold int validate_options(AVCodecContext *avctx, AC3EncodeContext *s) /* validate bit rate */ if (s->eac3) { int max_br, min_br, wpf, min_br_dist, min_br_code; + int num_blks_code, num_blocks, frame_samples; /* calculate min/max bitrate */ - max_br = 2048 * s->sample_rate / AC3_FRAME_SIZE * 16; - min_br = ((s->sample_rate + (AC3_FRAME_SIZE-1)) / AC3_FRAME_SIZE) * 16; + /* TODO: More testing with 3 and 2 blocks. All E-AC-3 samples I've + found use either 6 blocks or 1 block, even though 2 or 3 blocks + would work as far as the bit rate is concerned. */ + for (num_blks_code = 3; num_blks_code >= 0; num_blks_code--) { + num_blocks = ((int[]){ 1, 2, 3, 6 })[num_blks_code]; + frame_samples = AC3_BLOCK_SIZE * num_blocks; + max_br = 2048 * s->sample_rate / frame_samples * 16; + min_br = ((s->sample_rate + (frame_samples-1)) / frame_samples) * 16; + if (avctx->bit_rate <= max_br) + break; + } if (avctx->bit_rate < min_br || avctx->bit_rate > max_br) { av_log(avctx, AV_LOG_ERROR, "invalid bit rate. must be %d to %d " "for this sample rate\n", min_br, max_br); return AVERROR(EINVAL); } + s->num_blks_code = num_blks_code; + s->num_blocks = num_blocks; /* calculate words-per-frame for the selected bitrate */ - wpf = (avctx->bit_rate / 16) * AC3_FRAME_SIZE / s->sample_rate; + wpf = (avctx->bit_rate / 16) * frame_samples / s->sample_rate; av_assert1(wpf > 0 && wpf <= 2048); /* find the closest AC-3 bitrate code to the selected bitrate. @@ -2096,6 +2119,8 @@ static av_cold int validate_options(AVCodecContext *avctx, AC3EncodeContext *s) } s->frame_size_code = i << 1; s->frame_size_min = 2 * ff_ac3_frame_size_tab[s->frame_size_code][s->bit_alloc.sr_code]; + s->num_blks_code = 0x3; + s->num_blocks = 6; } s->bit_rate = avctx->bit_rate; s->frame_size = s->frame_size_min; @@ -2109,23 +2134,9 @@ static av_cold int validate_options(AVCodecContext *avctx, AC3EncodeContext *s) if (s->cutoff > (s->sample_rate >> 1)) s->cutoff = s->sample_rate >> 1; - /* validate audio service type / channels combination */ - if ((avctx->audio_service_type == AV_AUDIO_SERVICE_TYPE_KARAOKE && - avctx->channels == 1) || - ((avctx->audio_service_type == AV_AUDIO_SERVICE_TYPE_COMMENTARY || - avctx->audio_service_type == AV_AUDIO_SERVICE_TYPE_EMERGENCY || - avctx->audio_service_type == AV_AUDIO_SERVICE_TYPE_VOICE_OVER) - && avctx->channels > 1)) { - av_log(avctx, AV_LOG_ERROR, "invalid audio service type for the " - "specified number of channels\n"); - return AVERROR(EINVAL); - } - - if (!s->eac3) { - ret = validate_metadata(avctx); - if (ret) - return ret; - } + ret = ff_ac3_validate_metadata(s); + if (ret) + return ret; s->rematrixing_enabled = s->options.stereo_rematrixing && (s->channel_mode == AC3_CHMODE_STEREO); @@ -2160,13 +2171,13 @@ static av_cold void set_bandwidth(AC3EncodeContext *s) /* set number of coefficients for each channel */ for (ch = 1; ch <= s->fbw_channels; ch++) { s->start_freq[ch] = 0; - for (blk = 0; blk < AC3_MAX_BLOCKS; blk++) + for (blk = 0; blk < s->num_blocks; blk++) s->blocks[blk].end_freq[ch] = s->bandwidth_code * 3 + 73; } /* LFE channel always has 7 coefs */ if (s->lfe_on) { s->start_freq[s->lfe_channel] = 0; - for (blk = 0; blk < AC3_MAX_BLOCKS; blk++) + for (blk = 0; blk < s->num_blocks; blk++) s->blocks[blk].end_freq[ch] = 7; } @@ -2203,46 +2214,48 @@ static av_cold void set_bandwidth(AC3EncodeContext *s) s->start_freq[CPL_CH] = cpl_start_band * 12 + 37; s->cpl_end_freq = cpl_end_band * 12 + 37; - for (blk = 0; blk < AC3_MAX_BLOCKS; blk++) + for (blk = 0; blk < s->num_blocks; blk++) s->blocks[blk].end_freq[CPL_CH] = s->cpl_end_freq; } } -static av_cold int allocate_buffers(AVCodecContext *avctx) +static av_cold int allocate_buffers(AC3EncodeContext *s) { + AVCodecContext *avctx = s->avctx; int blk, ch; - AC3EncodeContext *s = avctx->priv_data; int channels = s->channels + 1; /* includes coupling channel */ + int channel_blocks = channels * s->num_blocks; + int total_coefs = AC3_MAX_COEFS * channel_blocks; if (s->allocate_sample_buffers(s)) goto alloc_fail; - FF_ALLOC_OR_GOTO(avctx, s->bap_buffer, AC3_MAX_BLOCKS * channels * - AC3_MAX_COEFS * sizeof(*s->bap_buffer), alloc_fail); - FF_ALLOC_OR_GOTO(avctx, s->bap1_buffer, AC3_MAX_BLOCKS * channels * - AC3_MAX_COEFS * sizeof(*s->bap1_buffer), alloc_fail); - FF_ALLOCZ_OR_GOTO(avctx, s->mdct_coef_buffer, AC3_MAX_BLOCKS * channels * - AC3_MAX_COEFS * sizeof(*s->mdct_coef_buffer), alloc_fail); - FF_ALLOC_OR_GOTO(avctx, s->exp_buffer, AC3_MAX_BLOCKS * channels * - AC3_MAX_COEFS * sizeof(*s->exp_buffer), alloc_fail); - FF_ALLOC_OR_GOTO(avctx, s->grouped_exp_buffer, AC3_MAX_BLOCKS * channels * - 128 * sizeof(*s->grouped_exp_buffer), alloc_fail); - FF_ALLOC_OR_GOTO(avctx, s->psd_buffer, AC3_MAX_BLOCKS * channels * - AC3_MAX_COEFS * sizeof(*s->psd_buffer), alloc_fail); - FF_ALLOC_OR_GOTO(avctx, s->band_psd_buffer, AC3_MAX_BLOCKS * channels * - 64 * sizeof(*s->band_psd_buffer), alloc_fail); - FF_ALLOC_OR_GOTO(avctx, s->mask_buffer, AC3_MAX_BLOCKS * channels * - 64 * sizeof(*s->mask_buffer), alloc_fail); - FF_ALLOC_OR_GOTO(avctx, s->qmant_buffer, AC3_MAX_BLOCKS * channels * - AC3_MAX_COEFS * sizeof(*s->qmant_buffer), alloc_fail); + FF_ALLOC_OR_GOTO(avctx, s->bap_buffer, total_coefs * + sizeof(*s->bap_buffer), alloc_fail); + FF_ALLOC_OR_GOTO(avctx, s->bap1_buffer, total_coefs * + sizeof(*s->bap1_buffer), alloc_fail); + FF_ALLOCZ_OR_GOTO(avctx, s->mdct_coef_buffer, total_coefs * + sizeof(*s->mdct_coef_buffer), alloc_fail); + FF_ALLOC_OR_GOTO(avctx, s->exp_buffer, total_coefs * + sizeof(*s->exp_buffer), alloc_fail); + FF_ALLOC_OR_GOTO(avctx, s->grouped_exp_buffer, channel_blocks * 128 * + sizeof(*s->grouped_exp_buffer), alloc_fail); + FF_ALLOC_OR_GOTO(avctx, s->psd_buffer, total_coefs * + sizeof(*s->psd_buffer), alloc_fail); + FF_ALLOC_OR_GOTO(avctx, s->band_psd_buffer, channel_blocks * 64 * + sizeof(*s->band_psd_buffer), alloc_fail); + FF_ALLOC_OR_GOTO(avctx, s->mask_buffer, channel_blocks * 64 * + sizeof(*s->mask_buffer), alloc_fail); + FF_ALLOC_OR_GOTO(avctx, s->qmant_buffer, total_coefs * + sizeof(*s->qmant_buffer), alloc_fail); if (s->cpl_enabled) { - FF_ALLOC_OR_GOTO(avctx, s->cpl_coord_exp_buffer, AC3_MAX_BLOCKS * channels * - 16 * sizeof(*s->cpl_coord_exp_buffer), alloc_fail); - FF_ALLOC_OR_GOTO(avctx, s->cpl_coord_mant_buffer, AC3_MAX_BLOCKS * channels * - 16 * sizeof(*s->cpl_coord_mant_buffer), alloc_fail); + FF_ALLOC_OR_GOTO(avctx, s->cpl_coord_exp_buffer, channel_blocks * 16 * + sizeof(*s->cpl_coord_exp_buffer), alloc_fail); + FF_ALLOC_OR_GOTO(avctx, s->cpl_coord_mant_buffer, channel_blocks * 16 * + sizeof(*s->cpl_coord_mant_buffer), alloc_fail); } - for (blk = 0; blk < AC3_MAX_BLOCKS; blk++) { + for (blk = 0; blk < s->num_blocks; blk++) { AC3Block *block = &s->blocks[blk]; FF_ALLOCZ_OR_GOTO(avctx, block->mdct_coef, channels * sizeof(*block->mdct_coef), alloc_fail); @@ -2278,23 +2291,23 @@ static av_cold int allocate_buffers(AVCodecContext *avctx) } /* arrangement: channel, block, coeff */ - block->exp[ch] = &s->exp_buffer [AC3_MAX_COEFS * (AC3_MAX_BLOCKS * ch + blk)]; - block->mdct_coef[ch] = &s->mdct_coef_buffer [AC3_MAX_COEFS * (AC3_MAX_BLOCKS * ch + blk)]; + block->exp[ch] = &s->exp_buffer [AC3_MAX_COEFS * (s->num_blocks * ch + blk)]; + block->mdct_coef[ch] = &s->mdct_coef_buffer [AC3_MAX_COEFS * (s->num_blocks * ch + blk)]; } } if (!s->fixed_point) { - FF_ALLOCZ_OR_GOTO(avctx, s->fixed_coef_buffer, AC3_MAX_BLOCKS * channels * - AC3_MAX_COEFS * sizeof(*s->fixed_coef_buffer), alloc_fail); - for (blk = 0; blk < AC3_MAX_BLOCKS; blk++) { + FF_ALLOCZ_OR_GOTO(avctx, s->fixed_coef_buffer, total_coefs * + sizeof(*s->fixed_coef_buffer), alloc_fail); + for (blk = 0; blk < s->num_blocks; blk++) { AC3Block *block = &s->blocks[blk]; FF_ALLOCZ_OR_GOTO(avctx, block->fixed_coef, channels * sizeof(*block->fixed_coef), alloc_fail); for (ch = 0; ch < channels; ch++) - block->fixed_coef[ch] = &s->fixed_coef_buffer[AC3_MAX_COEFS * (AC3_MAX_BLOCKS * ch + blk)]; + block->fixed_coef[ch] = &s->fixed_coef_buffer[AC3_MAX_COEFS * (s->num_blocks * ch + blk)]; } } else { - for (blk = 0; blk < AC3_MAX_BLOCKS; blk++) { + for (blk = 0; blk < s->num_blocks; blk++) { AC3Block *block = &s->blocks[blk]; FF_ALLOCZ_OR_GOTO(avctx, block->fixed_coef, channels * sizeof(*block->fixed_coef), alloc_fail); @@ -2321,14 +2334,14 @@ av_cold int ff_ac3_encode_init(AVCodecContext *avctx) s->eac3 = avctx->codec_id == CODEC_ID_EAC3; - avctx->frame_size = AC3_FRAME_SIZE; - ff_ac3_common_init(); - ret = validate_options(avctx, s); + ret = validate_options(s); if (ret) return ret; + avctx->frame_size = AC3_BLOCK_SIZE * s->num_blocks; + s->bitstream_mode = avctx->audio_service_type; if (s->bitstream_mode == AV_AUDIO_SERVICE_TYPE_KARAOKE) s->bitstream_mode = 0x7; @@ -2348,24 +2361,11 @@ av_cold int ff_ac3_encode_init(AVCodecContext *avctx) if (CONFIG_AC3_FIXED_ENCODER && s->fixed_point) { s->mdct_end = ff_ac3_fixed_mdct_end; s->mdct_init = ff_ac3_fixed_mdct_init; - s->apply_window = ff_ac3_fixed_apply_window; - s->normalize_samples = ff_ac3_fixed_normalize_samples; - s->scale_coefficients = ff_ac3_fixed_scale_coefficients; s->allocate_sample_buffers = ff_ac3_fixed_allocate_sample_buffers; - s->deinterleave_input_samples = ff_ac3_fixed_deinterleave_input_samples; - s->apply_mdct = ff_ac3_fixed_apply_mdct; - s->apply_channel_coupling = ff_ac3_fixed_apply_channel_coupling; - s->compute_rematrixing_strategy = ff_ac3_fixed_compute_rematrixing_strategy; } else if (CONFIG_AC3_ENCODER || CONFIG_EAC3_ENCODER) { s->mdct_end = ff_ac3_float_mdct_end; s->mdct_init = ff_ac3_float_mdct_init; - s->apply_window = ff_ac3_float_apply_window; - s->scale_coefficients = ff_ac3_float_scale_coefficients; s->allocate_sample_buffers = ff_ac3_float_allocate_sample_buffers; - s->deinterleave_input_samples = ff_ac3_float_deinterleave_input_samples; - s->apply_mdct = ff_ac3_float_apply_mdct; - s->apply_channel_coupling = ff_ac3_float_apply_channel_coupling; - s->compute_rematrixing_strategy = ff_ac3_float_compute_rematrixing_strategy; } if (CONFIG_EAC3_ENCODER && s->eac3) s->output_frame_header = ff_eac3_output_frame_header; @@ -2378,12 +2378,11 @@ av_cold int ff_ac3_encode_init(AVCodecContext *avctx) bit_alloc_init(s); - FF_ALLOCZ_OR_GOTO(avctx, s->mdct, sizeof(AC3MDCTContext), init_fail); - ret = s->mdct_init(avctx, s->mdct, 9); + ret = s->mdct_init(s); if (ret) goto init_fail; - ret = allocate_buffers(avctx); + ret = allocate_buffers(s); if (ret) goto init_fail; @@ -2392,7 +2391,7 @@ av_cold int ff_ac3_encode_init(AVCodecContext *avctx) dsputil_init(&s->dsp, avctx); ff_ac3dsp_init(&s->ac3dsp, avctx->flags & CODEC_FLAG_BITEXACT); - dprint_options(avctx); + dprint_options(s); return 0; init_fail: diff --git a/libavcodec/ac3enc.h b/libavcodec/ac3enc.h index 2d04f26303..2647204d14 100644 --- a/libavcodec/ac3enc.h +++ b/libavcodec/ac3enc.h @@ -50,22 +50,22 @@ #if CONFIG_AC3ENC_FLOAT #define AC3_NAME(x) ff_ac3_float_ ## x #define MAC_COEF(d,a,b) ((d)+=(a)*(b)) +#define COEF_MIN (-16777215.0/16777216.0) +#define COEF_MAX ( 16777215.0/16777216.0) typedef float SampleType; typedef float CoefType; typedef float CoefSumType; #else #define AC3_NAME(x) ff_ac3_fixed_ ## x #define MAC_COEF(d,a,b) MAC64(d,a,b) +#define COEF_MIN -16777215 +#define COEF_MAX 16777215 typedef int16_t SampleType; typedef int32_t CoefType; typedef int64_t CoefSumType; #endif -typedef struct AC3MDCTContext { - const SampleType *window; ///< MDCT window function - FFTContext fft; ///< FFT context for MDCT calculation -} AC3MDCTContext; #if 0 /** * Encoding Options used by AVOption. @@ -92,6 +92,8 @@ typedef struct AC3EncOptions { int dolby_surround_ex_mode; int dolby_headphone_mode; int ad_converter_type; + int eac3_mixing_metadata; + int eac3_info_metadata; /* other encoding options */ int allow_per_frame_metadata; @@ -139,7 +141,8 @@ typedef struct AC3EncodeContext { PutBitContext pb; ///< bitstream writer context DSPContext dsp; AC3DSPContext ac3dsp; ///< AC-3 optimized functions - AC3MDCTContext *mdct; ///< MDCT context + FFTContext mdct; ///< FFT context for MDCT calculation + const SampleType *mdct_window; ///< MDCT window function array AC3Block blocks[AC3_MAX_BLOCKS]; ///< per-block info @@ -151,6 +154,8 @@ typedef struct AC3EncodeContext { int bit_rate; ///< target bit rate, in bits-per-second int sample_rate; ///< sampling frequency, in Hz + int num_blks_code; ///< number of blocks code (numblkscod) + int num_blocks; ///< number of blocks per frame int frame_size_min; ///< minimum frame size in case rounding is necessary int frame_size; ///< current frame size in bytes int frame_size_code; ///< frame size code (frmsizecod) @@ -212,31 +217,23 @@ typedef struct AC3EncodeContext { int16_t *psd_buffer; int16_t *band_psd_buffer; int16_t *mask_buffer; - uint16_t *qmant_buffer; + int16_t *qmant_buffer; uint8_t *cpl_coord_exp_buffer; uint8_t *cpl_coord_mant_buffer; uint8_t exp_strategy[AC3_MAX_CHANNELS][AC3_MAX_BLOCKS]; ///< exponent strategies + uint8_t frame_exp_strategy[AC3_MAX_CHANNELS]; ///< frame exp strategy index + int use_frame_exp_strategy; ///< indicates use of frame exp strategy uint8_t exp_ref_block[AC3_MAX_CHANNELS][AC3_MAX_BLOCKS]; ///< reference blocks for EXP_REUSE uint8_t *ref_bap [AC3_MAX_CHANNELS][AC3_MAX_BLOCKS]; ///< bit allocation pointers (bap) int ref_bap_set; ///< indicates if ref_bap pointers have been set /* fixed vs. float function pointers */ - void (*mdct_end)(AC3MDCTContext *mdct); - int (*mdct_init)(AVCodecContext *avctx, AC3MDCTContext *mdct, int nbits); - void (*apply_window)(DSPContext *dsp, SampleType *output, - const SampleType *input, const SampleType *window, - unsigned int len); - int (*normalize_samples)(struct AC3EncodeContext *s); - void (*scale_coefficients)(struct AC3EncodeContext *s); + void (*mdct_end)(struct AC3EncodeContext *s); + int (*mdct_init)(struct AC3EncodeContext *s); /* fixed vs. float templated function pointers */ int (*allocate_sample_buffers)(struct AC3EncodeContext *s); - void (*deinterleave_input_samples)(struct AC3EncodeContext *s, - const SampleType *samples); - void (*apply_mdct)(struct AC3EncodeContext *s); - void (*apply_channel_coupling)(struct AC3EncodeContext *s); - void (*compute_rematrixing_strategy)(struct AC3EncodeContext *s); /* AC-3 vs. E-AC-3 function pointers */ void (*output_frame_header)(struct AC3EncodeContext *s); @@ -245,52 +242,42 @@ typedef struct AC3EncodeContext { int ff_ac3_encode_init(AVCodecContext *avctx); -int ff_ac3_encode_frame(AVCodecContext *avctx, unsigned char *frame, - int buf_size, void *data); - int ff_ac3_encode_close(AVCodecContext *avctx); +int ff_ac3_validate_metadata(AC3EncodeContext *s); -/* prototypes for functions in ac3enc_fixed.c and ac3enc_float.c */ +void ff_ac3_adjust_frame_size(AC3EncodeContext *s); -void ff_ac3_fixed_mdct_end(AC3MDCTContext *mdct); -void ff_ac3_float_mdct_end(AC3MDCTContext *mdct); +void ff_ac3_compute_coupling_strategy(AC3EncodeContext *s); -int ff_ac3_fixed_mdct_init(AVCodecContext *avctx, AC3MDCTContext *mdct, - int nbits); -int ff_ac3_float_mdct_init(AVCodecContext *avctx, AC3MDCTContext *mdct, - int nbits); +void ff_ac3_apply_rematrixing(AC3EncodeContext *s); -void ff_ac3_fixed_apply_window(DSPContext *dsp, SampleType *output, - const SampleType *input, - const SampleType *window, unsigned int len); -void ff_ac3_float_apply_window(DSPContext *dsp, SampleType *output, - const SampleType *input, - const SampleType *window, unsigned int len); +void ff_ac3_process_exponents(AC3EncodeContext *s); -int ff_ac3_fixed_normalize_samples(AC3EncodeContext *s); +int ff_ac3_compute_bit_allocation(AC3EncodeContext *s); -void ff_ac3_fixed_scale_coefficients(AC3EncodeContext *s); -void ff_ac3_float_scale_coefficients(AC3EncodeContext *s); +void ff_ac3_quantize_mantissas(AC3EncodeContext *s); +void ff_ac3_output_frame(AC3EncodeContext *s, unsigned char *frame); -/* prototypes for functions in ac3enc_template.c */ -int ff_ac3_fixed_allocate_sample_buffers(AC3EncodeContext *s); -int ff_ac3_float_allocate_sample_buffers(AC3EncodeContext *s); +/* prototypes for functions in ac3enc_fixed.c and ac3enc_float.c */ -void ff_ac3_fixed_deinterleave_input_samples(AC3EncodeContext *s, - const SampleType *samples); -void ff_ac3_float_deinterleave_input_samples(AC3EncodeContext *s, - const SampleType *samples); +void ff_ac3_fixed_mdct_end(AC3EncodeContext *s); +void ff_ac3_float_mdct_end(AC3EncodeContext *s); -void ff_ac3_fixed_apply_mdct(AC3EncodeContext *s); -void ff_ac3_float_apply_mdct(AC3EncodeContext *s); +int ff_ac3_fixed_mdct_init(AC3EncodeContext *s); +int ff_ac3_float_mdct_init(AC3EncodeContext *s); -void ff_ac3_fixed_apply_channel_coupling(AC3EncodeContext *s); -void ff_ac3_float_apply_channel_coupling(AC3EncodeContext *s); -void ff_ac3_fixed_compute_rematrixing_strategy(AC3EncodeContext *s); -void ff_ac3_float_compute_rematrixing_strategy(AC3EncodeContext *s); +/* prototypes for functions in ac3enc_template.c */ + +int ff_ac3_fixed_allocate_sample_buffers(AC3EncodeContext *s); +int ff_ac3_float_allocate_sample_buffers(AC3EncodeContext *s); + +int ff_ac3_fixed_encode_frame(AVCodecContext *avctx, unsigned char *frame, + int buf_size, void *data); +int ff_ac3_float_encode_frame(AVCodecContext *avctx, unsigned char *frame, + int buf_size, void *data); #endif /* AVCODEC_AC3ENC_H */ diff --git a/libavcodec/ac3enc_fixed.c b/libavcodec/ac3enc_fixed.c index f4d447e3b2..c2e51552bc 100644 --- a/libavcodec/ac3enc_fixed.c +++ b/libavcodec/ac3enc_fixed.c @@ -32,8 +32,8 @@ #define AC3ENC_TYPE AC3ENC_TYPE_AC3_FIXED #include "ac3enc_opts_template.c" -static AVClass ac3enc_class = { "Fixed-Point AC-3 Encoder", av_default_item_name, - ac3fixed_options, LIBAVUTIL_VERSION_INT }; +static const AVClass ac3enc_class = { "Fixed-Point AC-3 Encoder", av_default_item_name, + ac3fixed_options, LIBAVUTIL_VERSION_INT }; #include "ac3enc_template.c" @@ -41,9 +41,9 @@ static AVClass ac3enc_class = { "Fixed-Point AC-3 Encoder", av_default_item_name /** * Finalize MDCT and free allocated memory. */ -av_cold void AC3_NAME(mdct_end)(AC3MDCTContext *mdct) +av_cold void AC3_NAME(mdct_end)(AC3EncodeContext *s) { - ff_mdct_end(&mdct->fft); + ff_mdct_end(&s->mdct); } @@ -51,11 +51,10 @@ av_cold void AC3_NAME(mdct_end)(AC3MDCTContext *mdct) * Initialize MDCT tables. * @param nbits log2(MDCT size) */ -av_cold int AC3_NAME(mdct_init)(AVCodecContext *avctx, AC3MDCTContext *mdct, - int nbits) +av_cold int AC3_NAME(mdct_init)(AC3EncodeContext *s) { - int ret = ff_mdct_init(&mdct->fft, nbits, 0, -1.0); - mdct->window = ff_ac3_window; + int ret = ff_mdct_init(&s->mdct, 9, 0, -1.0); + s->mdct_window = ff_ac3_window; return ret; } @@ -63,36 +62,23 @@ av_cold int AC3_NAME(mdct_init)(AVCodecContext *avctx, AC3MDCTContext *mdct, /** * Apply KBD window to input samples prior to MDCT. */ -void AC3_NAME(apply_window)(DSPContext *dsp, int16_t *output, - const int16_t *input, const int16_t *window, - unsigned int len) +static void apply_window(DSPContext *dsp, int16_t *output, const int16_t *input, + const int16_t *window, unsigned int len) { dsp->apply_window_int16(output, input, window, len); } /** - * Calculate the log2() of the maximum absolute value in an array. - * @param tab input array - * @param n number of values in the array - * @return log2(max(abs(tab[]))) - */ -static int log2_tab(AC3EncodeContext *s, int16_t *src, int len) -{ - int v = s->ac3dsp.ac3_max_msb_abs_int16(src, len); - return av_log2(v); -} - - -/** * Normalize the input samples to use the maximum available precision. * This assumes signed 16-bit input samples. * * @return exponent shift */ -int AC3_NAME(normalize_samples)(AC3EncodeContext *s) +static int normalize_samples(AC3EncodeContext *s) { - int v = 14 - log2_tab(s, s->windowed_samples, AC3_WINDOW_SIZE); + int v = s->ac3dsp.ac3_max_msb_abs_int16(s->windowed_samples, AC3_WINDOW_SIZE); + v = 14 - av_log2(v); if (v > 0) s->ac3dsp.ac3_lshift_int16(s->windowed_samples, AC3_WINDOW_SIZE, v); /* +6 to right-shift from 31-bit to 25-bit */ @@ -103,11 +89,11 @@ int AC3_NAME(normalize_samples)(AC3EncodeContext *s) /** * Scale MDCT coefficients to 25-bit signed fixed-point. */ -void AC3_NAME(scale_coefficients)(AC3EncodeContext *s) +static void scale_coefficients(AC3EncodeContext *s) { int blk, ch; - for (blk = 0; blk < AC3_MAX_BLOCKS; blk++) { + for (blk = 0; blk < s->num_blocks; blk++) { AC3Block *block = &s->blocks[blk]; for (ch = 1; ch <= s->channels; ch++) { s->ac3dsp.ac3_rshift_int32(block->mdct_coef[ch], AC3_MAX_COEFS, @@ -117,6 +103,15 @@ void AC3_NAME(scale_coefficients)(AC3EncodeContext *s) } +/** + * Clip MDCT coefficients to allowable range. + */ +static void clip_coefficients(DSPContext *dsp, int32_t *coef, unsigned int len) +{ + dsp->vector_clip_int32(coef, coef, COEF_MIN, COEF_MAX, len); +} + + static av_cold int ac3_fixed_encode_init(AVCodecContext *avctx) { AC3EncodeContext *s = avctx->priv_data; @@ -126,14 +121,13 @@ static av_cold int ac3_fixed_encode_init(AVCodecContext *avctx) AVCodec ff_ac3_fixed_encoder = { - "ac3_fixed", - AVMEDIA_TYPE_AUDIO, - CODEC_ID_AC3, - sizeof(AC3EncodeContext), - ac3_fixed_encode_init, - ff_ac3_encode_frame, - ff_ac3_encode_close, - NULL, + .name = "ac3_fixed", + .type = AVMEDIA_TYPE_AUDIO, + .id = CODEC_ID_AC3, + .priv_data_size = sizeof(AC3EncodeContext), + .init = ac3_fixed_encode_init, + .encode = ff_ac3_fixed_encode_frame, + .close = ff_ac3_encode_close, .sample_fmts = (const enum AVSampleFormat[]){AV_SAMPLE_FMT_S16,AV_SAMPLE_FMT_NONE}, .long_name = NULL_IF_CONFIG_SMALL("ATSC A/52A (AC-3)"), .priv_class = &ac3enc_class, diff --git a/libavcodec/ac3enc_float.c b/libavcodec/ac3enc_float.c index 9e798106f3..8845518fe0 100644 --- a/libavcodec/ac3enc_float.c +++ b/libavcodec/ac3enc_float.c @@ -35,8 +35,8 @@ #if CONFIG_AC3_ENCODER #define AC3ENC_TYPE AC3ENC_TYPE_AC3 #include "ac3enc_opts_template.c" -static AVClass ac3enc_class = { "AC-3 Encoder", av_default_item_name, - ac3_options, LIBAVUTIL_VERSION_INT }; +static const AVClass ac3enc_class = { "AC-3 Encoder", av_default_item_name, + ac3_options, LIBAVUTIL_VERSION_INT }; #endif #include "ac3enc_template.c" @@ -45,10 +45,10 @@ static AVClass ac3enc_class = { "AC-3 Encoder", av_default_item_name, /** * Finalize MDCT and free allocated memory. */ -av_cold void ff_ac3_float_mdct_end(AC3MDCTContext *mdct) +av_cold void ff_ac3_float_mdct_end(AC3EncodeContext *s) { - ff_mdct_end(&mdct->fft); - av_freep(&mdct->window); + ff_mdct_end(&s->mdct); + av_freep(&s->mdct_window); } @@ -56,62 +56,78 @@ av_cold void ff_ac3_float_mdct_end(AC3MDCTContext *mdct) * Initialize MDCT tables. * @param nbits log2(MDCT size) */ -av_cold int ff_ac3_float_mdct_init(AVCodecContext *avctx, AC3MDCTContext *mdct, - int nbits) +av_cold int ff_ac3_float_mdct_init(AC3EncodeContext *s) { float *window; int i, n, n2; - n = 1 << nbits; + n = 1 << 9; n2 = n >> 1; window = av_malloc(n * sizeof(*window)); if (!window) { - av_log(avctx, AV_LOG_ERROR, "Cannot allocate memory.\n"); + av_log(s->avctx, AV_LOG_ERROR, "Cannot allocate memory.\n"); return AVERROR(ENOMEM); } ff_kbd_window_init(window, 5.0, n2); for (i = 0; i < n2; i++) window[n-1-i] = window[i]; - mdct->window = window; + s->mdct_window = window; - return ff_mdct_init(&mdct->fft, nbits, 0, -2.0 / n); + return ff_mdct_init(&s->mdct, 9, 0, -2.0 / n); } /** * Apply KBD window to input samples prior to MDCT. */ -void ff_ac3_float_apply_window(DSPContext *dsp, float *output, - const float *input, const float *window, - unsigned int len) +static void apply_window(DSPContext *dsp, float *output, const float *input, + const float *window, unsigned int len) { dsp->vector_fmul(output, input, window, len); } /** + * Normalize the input samples. + * Not needed for the floating-point encoder. + */ +static int normalize_samples(AC3EncodeContext *s) +{ + return 0; +} + + +/** * Scale MDCT coefficients from float to 24-bit fixed-point. */ -void ff_ac3_float_scale_coefficients(AC3EncodeContext *s) +static void scale_coefficients(AC3EncodeContext *s) { - int chan_size = AC3_MAX_COEFS * AC3_MAX_BLOCKS; + int chan_size = AC3_MAX_COEFS * s->num_blocks; s->ac3dsp.float_to_fixed24(s->fixed_coef_buffer + chan_size, s->mdct_coef_buffer + chan_size, chan_size * s->channels); } +/** + * Clip MDCT coefficients to allowable range. + */ +static void clip_coefficients(DSPContext *dsp, float *coef, unsigned int len) +{ + dsp->vector_clipf(coef, coef, COEF_MIN, COEF_MAX, len); +} + + #if CONFIG_AC3_ENCODER AVCodec ff_ac3_float_encoder = { - "ac3_float", - AVMEDIA_TYPE_AUDIO, - CODEC_ID_AC3, - sizeof(AC3EncodeContext), - ff_ac3_encode_init, - ff_ac3_encode_frame, - ff_ac3_encode_close, - NULL, + .name = "ac3 float", + .type = AVMEDIA_TYPE_AUDIO, + .id = CODEC_ID_AC3, + .priv_data_size = sizeof(AC3EncodeContext), + .init = ff_ac3_encode_init, + .encode = ff_ac3_float_encode_frame, + .close = ff_ac3_encode_close, .sample_fmts = (const enum AVSampleFormat[]){AV_SAMPLE_FMT_FLT,AV_SAMPLE_FMT_NONE}, .long_name = NULL_IF_CONFIG_SMALL("ATSC A/52A (AC-3)"), .priv_class = &ac3enc_class, diff --git a/libavcodec/ac3enc_opts_template.c b/libavcodec/ac3enc_opts_template.c index 39138a1083..7c0eead011 100644 --- a/libavcodec/ac3enc_opts_template.c +++ b/libavcodec/ac3enc_opts_template.c @@ -29,12 +29,13 @@ static const AVOption ac3_options[] = { #else /* AC3ENC_TYPE_EAC3 */ static const AVOption eac3_options[] = { #endif -#if AC3ENC_TYPE != AC3ENC_TYPE_EAC3 /* Metadata Options */ {"per_frame_metadata", "Allow Changing Metadata Per-Frame", OFFSET(allow_per_frame_metadata), FF_OPT_TYPE_INT, {.dbl = 0 }, 0, 1, AC3ENC_PARAM}, -/* downmix levels */ +#if AC3ENC_TYPE != AC3ENC_TYPE_EAC3 +/* AC-3 downmix levels */ {"center_mixlev", "Center Mix Level", OFFSET(center_mix_level), FF_OPT_TYPE_FLOAT, {.dbl = LEVEL_MINUS_4POINT5DB }, 0.0, 1.0, AC3ENC_PARAM}, {"surround_mixlev", "Surround Mix Level", OFFSET(surround_mix_level), FF_OPT_TYPE_FLOAT, {.dbl = LEVEL_MINUS_6DB }, 0.0, 1.0, AC3ENC_PARAM}, +#endif /* audio production information */ {"mixing_level", "Mixing Level", OFFSET(mixing_level), FF_OPT_TYPE_INT, {.dbl = -1 }, -1, 111, AC3ENC_PARAM}, {"room_type", "Room Type", OFFSET(room_type), FF_OPT_TYPE_INT, {.dbl = -1 }, -1, 2, AC3ENC_PARAM, "room_type"}, @@ -42,15 +43,13 @@ static const AVOption eac3_options[] = { {"large", "Large Room", 0, FF_OPT_TYPE_CONST, {.dbl = 1 }, INT_MIN, INT_MAX, AC3ENC_PARAM, "room_type"}, {"small", "Small Room", 0, FF_OPT_TYPE_CONST, {.dbl = 2 }, INT_MIN, INT_MAX, AC3ENC_PARAM, "room_type"}, /* other metadata options */ -{"copyright", "Copyright Bit", OFFSET(copyright), FF_OPT_TYPE_INT, {.dbl = 0 }, 0, 1, AC3ENC_PARAM}, -#endif +{"copyright", "Copyright Bit", OFFSET(copyright), FF_OPT_TYPE_INT, {.dbl = -1 }, -1, 1, AC3ENC_PARAM}, {"dialnorm", "Dialogue Level (dB)", OFFSET(dialogue_level), FF_OPT_TYPE_INT, {.dbl = -31 }, -31, -1, AC3ENC_PARAM}, -#if AC3ENC_TYPE != AC3ENC_TYPE_EAC3 -{"dsur_mode", "Dolby Surround Mode", OFFSET(dolby_surround_mode), FF_OPT_TYPE_INT, {.dbl = 0 }, 0, 2, AC3ENC_PARAM, "dsur_mode"}, +{"dsur_mode", "Dolby Surround Mode", OFFSET(dolby_surround_mode), FF_OPT_TYPE_INT, {.dbl = -1 }, -1, 2, AC3ENC_PARAM, "dsur_mode"}, {"notindicated", "Not Indicated (default)", 0, FF_OPT_TYPE_CONST, {.dbl = 0 }, INT_MIN, INT_MAX, AC3ENC_PARAM, "dsur_mode"}, {"on", "Dolby Surround Encoded", 0, FF_OPT_TYPE_CONST, {.dbl = 1 }, INT_MIN, INT_MAX, AC3ENC_PARAM, "dsur_mode"}, {"off", "Not Dolby Surround Encoded", 0, FF_OPT_TYPE_CONST, {.dbl = 2 }, INT_MIN, INT_MAX, AC3ENC_PARAM, "dsur_mode"}, -{"original", "Original Bit Stream", OFFSET(original), FF_OPT_TYPE_INT, {.dbl = 1 }, 0, 1, AC3ENC_PARAM}, +{"original", "Original Bit Stream", OFFSET(original), FF_OPT_TYPE_INT, {.dbl = -1 }, -1, 1, AC3ENC_PARAM}, /* extended bitstream information */ {"dmix_mode", "Preferred Stereo Downmix Mode", OFFSET(preferred_stereo_downmix), FF_OPT_TYPE_INT, {.dbl = -1 }, -1, 2, AC3ENC_PARAM, "dmix_mode"}, {"notindicated", "Not Indicated (default)", 0, FF_OPT_TYPE_CONST, {.dbl = 0 }, INT_MIN, INT_MAX, AC3ENC_PARAM, "dmix_mode"}, @@ -71,7 +70,6 @@ static const AVOption eac3_options[] = { {"ad_conv_type", "A/D Converter Type", OFFSET(ad_converter_type), FF_OPT_TYPE_INT, {.dbl = -1 }, -1, 1, AC3ENC_PARAM, "ad_conv_type"}, {"standard", "Standard (default)", 0, FF_OPT_TYPE_CONST, {.dbl = 0 }, INT_MIN, INT_MAX, AC3ENC_PARAM, "ad_conv_type"}, {"hdcd", "HDCD", 0, FF_OPT_TYPE_CONST, {.dbl = 1 }, INT_MIN, INT_MAX, AC3ENC_PARAM, "ad_conv_type"}, -#endif /* Other Encoding Options */ {"stereo_rematrixing", "Stereo Rematrixing", OFFSET(stereo_rematrixing), FF_OPT_TYPE_INT, {.dbl = 1 }, 0, 1, AC3ENC_PARAM}, #if AC3ENC_TYPE != AC3ENC_TYPE_AC3_FIXED diff --git a/libavcodec/ac3enc_template.c b/libavcodec/ac3enc_template.c index f6248a82c9..103e7b1a96 100644 --- a/libavcodec/ac3enc_template.c +++ b/libavcodec/ac3enc_template.c @@ -31,6 +31,19 @@ #include "ac3enc.h" +/* prototypes for static functions in ac3enc_fixed.c and ac3enc_float.c */ + +static void scale_coefficients(AC3EncodeContext *s); + +static void apply_window(DSPContext *dsp, SampleType *output, + const SampleType *input, const SampleType *window, + unsigned int len); + +static int normalize_samples(AC3EncodeContext *s); + +static void clip_coefficients(DSPContext *dsp, CoefType *coef, unsigned int len); + + int AC3_NAME(allocate_sample_buffers)(AC3EncodeContext *s) { int ch; @@ -55,8 +68,8 @@ alloc_fail: * Deinterleave input samples. * Channels are reordered from Libav's default order to AC-3 order. */ -void AC3_NAME(deinterleave_input_samples)(AC3EncodeContext *s, - const SampleType *samples) +static void deinterleave_input_samples(AC3EncodeContext *s, + const SampleType *samples) { int ch, i; @@ -66,13 +79,13 @@ void AC3_NAME(deinterleave_input_samples)(AC3EncodeContext *s, int sinc; /* copy last 256 samples of previous frame to the start of the current frame */ - memcpy(&s->planar_samples[ch][0], &s->planar_samples[ch][AC3_FRAME_SIZE], + memcpy(&s->planar_samples[ch][0], &s->planar_samples[ch][AC3_BLOCK_SIZE * s->num_blocks], AC3_BLOCK_SIZE * sizeof(s->planar_samples[0][0])); /* deinterleave */ sinc = s->channels; sptr = samples + s->channel_map[ch]; - for (i = AC3_BLOCK_SIZE; i < AC3_FRAME_SIZE+AC3_BLOCK_SIZE; i++) { + for (i = AC3_BLOCK_SIZE; i < AC3_BLOCK_SIZE * (s->num_blocks + 1); i++) { s->planar_samples[ch][i] = *sptr; sptr += sinc; } @@ -85,23 +98,23 @@ void AC3_NAME(deinterleave_input_samples)(AC3EncodeContext *s, * This applies the KBD window and normalizes the input to reduce precision * loss due to fixed-point calculations. */ -void AC3_NAME(apply_mdct)(AC3EncodeContext *s) +static void apply_mdct(AC3EncodeContext *s) { int blk, ch; for (ch = 0; ch < s->channels; ch++) { - for (blk = 0; blk < AC3_MAX_BLOCKS; blk++) { + for (blk = 0; blk < s->num_blocks; blk++) { AC3Block *block = &s->blocks[blk]; const SampleType *input_samples = &s->planar_samples[ch][blk * AC3_BLOCK_SIZE]; - s->apply_window(&s->dsp, s->windowed_samples, input_samples, - s->mdct->window, AC3_WINDOW_SIZE); + apply_window(&s->dsp, s->windowed_samples, input_samples, + s->mdct_window, AC3_WINDOW_SIZE); if (s->fixed_point) - block->coeff_shift[ch+1] = s->normalize_samples(s); + block->coeff_shift[ch+1] = normalize_samples(s); - s->mdct->fft.mdct_calcw(&s->mdct->fft, block->mdct_coef[ch+1], - s->windowed_samples); + s->mdct.mdct_calcw(&s->mdct, block->mdct_coef[ch+1], + s->windowed_samples); } } } @@ -127,7 +140,7 @@ static inline float calc_cpl_coord(float energy_ch, float energy_cpl) * adaptive coupling strategy were to be implemented it might be useful * at that time to use coupling for the fixed-point encoder as well. */ -void AC3_NAME(apply_channel_coupling)(AC3EncodeContext *s) +static void apply_channel_coupling(AC3EncodeContext *s) { #if CONFIG_AC3ENC_FLOAT LOCAL_ALIGNED_16(float, cpl_coords, [AC3_MAX_BLOCKS], [AC3_MAX_CHANNELS][16]); @@ -146,7 +159,7 @@ void AC3_NAME(apply_channel_coupling)(AC3EncodeContext *s) cpl_start = FFMIN(256, cpl_start + num_cpl_coefs) - num_cpl_coefs; /* calculate coupling channel from fbw channels */ - for (blk = 0; blk < AC3_MAX_BLOCKS; blk++) { + for (blk = 0; blk < s->num_blocks; blk++) { AC3Block *block = &s->blocks[blk]; CoefType *cpl_coef = &block->mdct_coef[CPL_CH][cpl_start]; if (!block->cpl_in_use) @@ -160,8 +173,8 @@ void AC3_NAME(apply_channel_coupling)(AC3EncodeContext *s) cpl_coef[i] += ch_coef[i]; } - /* coefficients must be clipped to +/- 1.0 in order to be encoded */ - s->dsp.vector_clipf(cpl_coef, cpl_coef, -1.0f, 1.0f, num_cpl_coefs); + /* coefficients must be clipped in order to be encoded */ + clip_coefficients(&s->dsp, cpl_coef, num_cpl_coefs); /* scale coupling coefficients from float to 24-bit fixed-point */ s->ac3dsp.float_to_fixed24(&block->fixed_coef[CPL_CH][cpl_start], @@ -175,7 +188,7 @@ void AC3_NAME(apply_channel_coupling)(AC3EncodeContext *s) while (i < s->cpl_end_freq) { int band_size = s->cpl_band_sizes[bnd]; for (ch = CPL_CH; ch <= s->fbw_channels; ch++) { - for (blk = 0; blk < AC3_MAX_BLOCKS; blk++) { + for (blk = 0; blk < s->num_blocks; blk++) { AC3Block *block = &s->blocks[blk]; if (!block->cpl_in_use || (ch > CPL_CH && !block->channel_in_cpl[ch])) continue; @@ -190,7 +203,7 @@ void AC3_NAME(apply_channel_coupling)(AC3EncodeContext *s) } /* determine which blocks to send new coupling coordinates for */ - for (blk = 0; blk < AC3_MAX_BLOCKS; blk++) { + for (blk = 0; blk < s->num_blocks; blk++) { AC3Block *block = &s->blocks[blk]; AC3Block *block0 = blk ? &s->blocks[blk-1] : NULL; int new_coords = 0; @@ -248,7 +261,7 @@ void AC3_NAME(apply_channel_coupling)(AC3EncodeContext *s) coordinates in successive blocks */ for (bnd = 0; bnd < s->num_cpl_bands; bnd++) { blk = 0; - while (blk < AC3_MAX_BLOCKS) { + while (blk < s->num_blocks) { int blk1; CoefSumType energy_cpl; AC3Block *block = &s->blocks[blk]; @@ -260,7 +273,7 @@ void AC3_NAME(apply_channel_coupling)(AC3EncodeContext *s) energy_cpl = energy[blk][CPL_CH][bnd]; blk1 = blk+1; - while (!s->blocks[blk1].new_cpl_coords && blk1 < AC3_MAX_BLOCKS) { + while (!s->blocks[blk1].new_cpl_coords && blk1 < s->num_blocks) { if (s->blocks[blk1].cpl_in_use) energy_cpl += energy[blk1][CPL_CH][bnd]; blk1++; @@ -272,7 +285,7 @@ void AC3_NAME(apply_channel_coupling)(AC3EncodeContext *s) continue; energy_ch = energy[blk][ch][bnd]; blk1 = blk+1; - while (!s->blocks[blk1].new_cpl_coords && blk1 < AC3_MAX_BLOCKS) { + while (!s->blocks[blk1].new_cpl_coords && blk1 < s->num_blocks) { if (s->blocks[blk1].cpl_in_use) energy_ch += energy[blk1][ch][bnd]; blk1++; @@ -284,11 +297,12 @@ void AC3_NAME(apply_channel_coupling)(AC3EncodeContext *s) } /* calculate exponents/mantissas for coupling coordinates */ - for (blk = 0; blk < AC3_MAX_BLOCKS; blk++) { + for (blk = 0; blk < s->num_blocks; blk++) { AC3Block *block = &s->blocks[blk]; if (!block->cpl_in_use || !block->new_cpl_coords) continue; + clip_coefficients(&s->dsp, cpl_coords[blk][1], s->fbw_channels * 16); s->ac3dsp.float_to_fixed24(fixed_cpl_coords[blk][1], cpl_coords[blk][1], s->fbw_channels * 16); @@ -339,7 +353,7 @@ void AC3_NAME(apply_channel_coupling)(AC3EncodeContext *s) /** * Determine rematrixing flags for each block and band. */ -void AC3_NAME(compute_rematrixing_strategy)(AC3EncodeContext *s) +static void compute_rematrixing_strategy(AC3EncodeContext *s) { int nb_coefs; int blk, bnd, i; @@ -348,7 +362,7 @@ void AC3_NAME(compute_rematrixing_strategy)(AC3EncodeContext *s) if (s->channel_mode != AC3_CHMODE_STEREO) return; - for (blk = 0; blk < AC3_MAX_BLOCKS; blk++) { + for (blk = 0; blk < s->num_blocks; blk++) { block = &s->blocks[blk]; block->new_rematrixing_strategy = !blk; @@ -397,3 +411,61 @@ void AC3_NAME(compute_rematrixing_strategy)(AC3EncodeContext *s) block0 = block; } } + + +/** + * Encode a single AC-3 frame. + */ +int AC3_NAME(encode_frame)(AVCodecContext *avctx, unsigned char *frame, + int buf_size, void *data) +{ + AC3EncodeContext *s = avctx->priv_data; + const SampleType *samples = data; + int ret; + + if (s->options.allow_per_frame_metadata) { + ret = ff_ac3_validate_metadata(s); + if (ret) + return ret; + } + + if (s->bit_alloc.sr_code == 1 || s->eac3) + ff_ac3_adjust_frame_size(s); + + deinterleave_input_samples(s, samples); + + apply_mdct(s); + + if (s->fixed_point) + scale_coefficients(s); + + clip_coefficients(&s->dsp, s->blocks[0].mdct_coef[1], + AC3_MAX_COEFS * s->num_blocks * s->channels); + + s->cpl_on = s->cpl_enabled; + ff_ac3_compute_coupling_strategy(s); + + if (s->cpl_on) + apply_channel_coupling(s); + + compute_rematrixing_strategy(s); + + if (!s->fixed_point) + scale_coefficients(s); + + ff_ac3_apply_rematrixing(s); + + ff_ac3_process_exponents(s); + + ret = ff_ac3_compute_bit_allocation(s); + if (ret) { + av_log(avctx, AV_LOG_ERROR, "Bit allocation failed. Try increasing the bitrate.\n"); + return ret; + } + + ff_ac3_quantize_mantissas(s); + + ff_ac3_output_frame(s, frame); + + return s->frame_size; +} diff --git a/libavcodec/acelp_pitch_delay.c b/libavcodec/acelp_pitch_delay.c index cddf7262b6..c2155a5c0c 100644 --- a/libavcodec/acelp_pitch_delay.c +++ b/libavcodec/acelp_pitch_delay.c @@ -20,6 +20,7 @@ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ +#include "libavutil/mathematics.h" #include "avcodec.h" #include "dsputil.h" #include "acelp_pitch_delay.h" @@ -104,20 +105,9 @@ int16_t ff_acelp_decode_gain_code( for(i=0; i<ma_pred_order; i++) mr_energy += quant_energy[i] * ma_prediction_coeff[i]; -#ifdef G729_BITEXACT - mr_energy += (((-6165LL * ff_log2(dsp->scalarproduct_int16(fc_v, fc_v, subframe_size, 0))) >> 3) & ~0x3ff); - - mr_energy = (5439 * (mr_energy >> 15)) >> 8; // (0.15) = (0.15) * (7.23) - - return bidir_sal( - ((ff_exp2(mr_energy & 0x7fff) + 16) >> 5) * (gain_corr_factor >> 1), - (mr_energy >> 15) - 25 - ); -#else mr_energy = gain_corr_factor * exp(M_LN10 / (20 << 23) * mr_energy) / sqrt(dsp->scalarproduct_int16(fc_v, fc_v, subframe_size, 0)); return mr_energy >> 12; -#endif } float ff_amr_set_fixed_gain(float fixed_gain_factor, float fixed_mean_energy, diff --git a/libavcodec/acelp_pitch_delay.h b/libavcodec/acelp_pitch_delay.h index ce06bc2539..72977f1f49 100644 --- a/libavcodec/acelp_pitch_delay.h +++ b/libavcodec/acelp_pitch_delay.h @@ -30,11 +30,11 @@ #define PITCH_DELAY_MAX 143 /** - * \brief Decode pitch delay of the first subframe encoded by 8 bits with 1/3 + * @brief Decode pitch delay of the first subframe encoded by 8 bits with 1/3 * resolution. - * \param ac_index adaptive codebook index (8 bits) + * @param ac_index adaptive codebook index (8 bits) * - * \return pitch delay in 1/3 units + * @return pitch delay in 1/3 units * * Pitch delay is coded: * with 1/3 resolution, 19 < pitch_delay < 85 @@ -43,18 +43,18 @@ int ff_acelp_decode_8bit_to_1st_delay3(int ac_index); /** - * \brief Decode pitch delay of the second subframe encoded by 5 or 6 bits + * @brief Decode pitch delay of the second subframe encoded by 5 or 6 bits * with 1/3 precision. - * \param ac_index adaptive codebook index (5 or 6 bits) - * \param pitch_delay_min lower bound (integer) of pitch delay interval + * @param ac_index adaptive codebook index (5 or 6 bits) + * @param pitch_delay_min lower bound (integer) of pitch delay interval * for second subframe * - * \return pitch delay in 1/3 units + * @return pitch delay in 1/3 units * * Pitch delay is coded: * with 1/3 resolution, -6 < pitch_delay - int(prev_pitch_delay) < 5 * - * \remark The routine is used in G.729 @@8k, AMR @@10.2k, AMR @@7.95k, + * @remark The routine is used in G.729 @@8k, AMR @@10.2k, AMR @@7.95k, * AMR @@7.4k for the second subframe. */ int ff_acelp_decode_5_6_bit_to_2nd_delay3( @@ -62,19 +62,19 @@ int ff_acelp_decode_5_6_bit_to_2nd_delay3( int pitch_delay_min); /** - * \brief Decode pitch delay with 1/3 precision. - * \param ac_index adaptive codebook index (4 bits) - * \param pitch_delay_min lower bound (integer) of pitch delay interval for + * @brief Decode pitch delay with 1/3 precision. + * @param ac_index adaptive codebook index (4 bits) + * @param pitch_delay_min lower bound (integer) of pitch delay interval for * second subframe * - * \return pitch delay in 1/3 units + * @return pitch delay in 1/3 units * * Pitch delay is coded: * integers only, -6 < pitch_delay - int(prev_pitch_delay) <= -2 * with 1/3 resolution, -2 < pitch_delay - int(prev_pitch_delay) < 1 * integers only, 1 <= pitch_delay - int(prev_pitch_delay) < 5 * - * \remark The routine is used in G.729 @@6.4k, AMR @@6.7k, AMR @@5.9k, + * @remark The routine is used in G.729 @@6.4k, AMR @@6.7k, AMR @@5.9k, * AMR @@5.15k, AMR @@4.75k for the second subframe. */ int ff_acelp_decode_4bit_to_2nd_delay3( @@ -82,44 +82,44 @@ int ff_acelp_decode_4bit_to_2nd_delay3( int pitch_delay_min); /** - * \brief Decode pitch delay of the first subframe encoded by 9 bits + * @brief Decode pitch delay of the first subframe encoded by 9 bits * with 1/6 precision. - * \param ac_index adaptive codebook index (9 bits) + * @param ac_index adaptive codebook index (9 bits) * - * \return pitch delay in 1/6 units + * @return pitch delay in 1/6 units * * Pitch delay is coded: * with 1/6 resolution, 17 < pitch_delay < 95 * integers only, 95 <= pitch_delay <= 143 * - * \remark The routine is used in AMR @@12.2k for the first and third subframes. + * @remark The routine is used in AMR @@12.2k for the first and third subframes. */ int ff_acelp_decode_9bit_to_1st_delay6(int ac_index); /** - * \brief Decode pitch delay of the second subframe encoded by 6 bits + * @brief Decode pitch delay of the second subframe encoded by 6 bits * with 1/6 precision. - * \param ac_index adaptive codebook index (6 bits) - * \param pitch_delay_min lower bound (integer) of pitch delay interval for + * @param ac_index adaptive codebook index (6 bits) + * @param pitch_delay_min lower bound (integer) of pitch delay interval for * second subframe * - * \return pitch delay in 1/6 units + * @return pitch delay in 1/6 units * * Pitch delay is coded: * with 1/6 resolution, -6 < pitch_delay - int(prev_pitch_delay) < 5 * - * \remark The routine is used in AMR @@12.2k for the second and fourth subframes. + * @remark The routine is used in AMR @@12.2k for the second and fourth subframes. */ int ff_acelp_decode_6bit_to_2nd_delay6( int ac_index, int pitch_delay_min); /** - * \brief Update past quantized energies - * \param[in,out] quant_energy past quantized energies (5.10) - * \param gain_corr_factor gain correction factor - * \param log2_ma_pred_order log2() of MA prediction order - * \param erasure frame erasure flag + * @brief Update past quantized energies + * @param[in,out] quant_energy past quantized energies (5.10) + * @param gain_corr_factor gain correction factor + * @param log2_ma_pred_order log2() of MA prediction order + * @param erasure frame erasure flag * * If frame erasure flag is not equal to zero, memory is updated with * averaged energy, attenuated by 4dB: @@ -128,7 +128,7 @@ int ff_acelp_decode_6bit_to_2nd_delay6( * In normal mode memory is updated with * Er - Ep = 20 * log10(gain_corr_factor) * - * \remark The routine is used in G.729 and AMR (all modes). + * @remark The routine is used in G.729 and AMR (all modes). */ void ff_acelp_update_past_gain( int16_t* quant_energy, @@ -137,16 +137,16 @@ void ff_acelp_update_past_gain( int erasure); /** - * \brief Decode the adaptive codebook gain and add + * @brief Decode the adaptive codebook gain and add * correction (4.1.5 and 3.9.1 of G.729). - * \param dsp initialized dsputil context - * \param gain_corr_factor gain correction factor (2.13) - * \param fc_v fixed-codebook vector (2.13) - * \param mr_energy mean innovation energy and fixed-point correction (7.13) - * \param[in,out] quant_energy past quantized energies (5.10) - * \param subframe_size length of subframe + * @param dsp initialized dsputil context + * @param gain_corr_factor gain correction factor (2.13) + * @param fc_v fixed-codebook vector (2.13) + * @param mr_energy mean innovation energy and fixed-point correction (7.13) + * @param[in,out] quant_energy past quantized energies (5.10) + * @param subframe_size length of subframe * - * \return quantized fixed-codebook gain (14.1) + * @return quantized fixed-codebook gain (14.1) * * The routine implements equations 69, 66 and 71 of the G.729 specification (3.9.1) * @@ -205,7 +205,7 @@ void ff_acelp_update_past_gain( * * mr_energy = Em + 10log(N) + 10log(2^26) * - * \remark The routine is used in G.729 and AMR (all modes). + * @remark The routine is used in G.729 and AMR (all modes). */ int16_t ff_acelp_decode_gain_code( DSPContext *dsp, diff --git a/libavcodec/acelp_vectors.c b/libavcodec/acelp_vectors.c index a44ab8cfe6..3d6ef83964 100644 --- a/libavcodec/acelp_vectors.c +++ b/libavcodec/acelp_vectors.c @@ -48,26 +48,6 @@ const uint8_t ff_fc_2pulses_9bits_track1_gray[16] = 28, 26, }; -const uint8_t ff_fc_2pulses_9bits_track2_gray[32] = -{ - 0, 2, - 5, 4, - 12, 10, - 7, 9, - 25, 24, - 20, 22, - 14, 15, - 19, 17, - 36, 31, - 21, 26, - 1, 6, - 16, 11, - 27, 29, - 32, 30, - 39, 37, - 34, 35, -}; - const uint8_t ff_fc_4pulses_8bits_tracks_13[16] = { 0, 5, 10, 15, 20, 25, 30, 35, 40, 45, 50, 55, 60, 65, 70, 75, diff --git a/libavcodec/acelp_vectors.h b/libavcodec/acelp_vectors.h index f3bc781446..557c07b35e 100644 --- a/libavcodec/acelp_vectors.h +++ b/libavcodec/acelp_vectors.h @@ -82,37 +82,6 @@ extern const uint8_t ff_fc_2pulses_9bits_track1[16]; extern const uint8_t ff_fc_2pulses_9bits_track1_gray[16]; /** - * Track|Pulse| Positions - * ----------------------------------------- - * 2 | 1 | 0, 7, 14, 20, 27, 34, 1, 21 - * | | 2, 9, 15, 22, 29, 35, 6, 26 - * | | 4,10, 17, 24, 30, 37, 11, 31 - * | | 5,12, 19, 25, 32, 39, 16, 36 - * ----------------------------------------- - * - * @remark Track in the table should be read top-to-bottom, left-to-right. - * - * @note (EE.1) This table (from the reference code) does not comply with - * the specification. - * The specification contains the following table: - * - * Track|Pulse| Positions - * ----------------------------------------- - * 2 | 1 | 0, 5, 10, 15, 20, 25, 30, 35 - * | | 1, 6, 11, 16, 21, 26, 31, 36 - * | | 2, 7, 12, 17, 22, 27, 32, 37 - * | | 4, 9, 14, 19, 24, 29, 34, 39 - * - * ----------------------------------------- - * - * @note (EE.2) Reference G.729D code also uses gray decoding for each - * pulse index before looking up the value in the table. - * - * Used in G.729 @@6.4k (with gray coding) - */ -extern const uint8_t ff_fc_2pulses_9bits_track2_gray[32]; - -/** * b60 hamming windowed sinc function coefficients */ extern const float ff_b60_sinc[61]; diff --git a/libavcodec/adpcm.c b/libavcodec/adpcm.c index ba312558b0..4e1299379f 100644 --- a/libavcodec/adpcm.c +++ b/libavcodec/adpcm.c @@ -198,6 +198,7 @@ static av_cold int adpcm_encode_init(AVCodecContext *avctx) avctx->frame_size = (BLKSIZE - 4 * avctx->channels) * 8 / (4 * avctx->channels) + 1; /* each 16 bits sample gives one nibble */ /* and we have 4 bytes per channel overhead */ avctx->block_align = BLKSIZE; + avctx->bits_per_coded_sample = 4; /* seems frame_size isn't taken into account... have to buffer the samples :-( */ break; case CODEC_ID_ADPCM_IMA_QT: @@ -208,6 +209,7 @@ static av_cold int adpcm_encode_init(AVCodecContext *avctx) avctx->frame_size = (BLKSIZE - 7 * avctx->channels) * 2 / avctx->channels + 2; /* each 16 bits sample gives one nibble */ /* and we have 7 bytes per channel overhead */ avctx->block_align = BLKSIZE; + avctx->bits_per_coded_sample = 4; avctx->extradata_size = 32; extradata = avctx->extradata = av_malloc(avctx->extradata_size); if (!extradata) diff --git a/libavcodec/adxdec.c b/libavcodec/adxdec.c index 991528a7d2..df3e8dd355 100644 --- a/libavcodec/adxdec.c +++ b/libavcodec/adxdec.c @@ -167,14 +167,12 @@ static int adx_decode_frame(AVCodecContext *avctx, } AVCodec ff_adpcm_adx_decoder = { - "adpcm_adx", - AVMEDIA_TYPE_AUDIO, - CODEC_ID_ADPCM_ADX, - sizeof(ADXContext), - adx_decode_init, - NULL, - NULL, - adx_decode_frame, + .name = "adpcm_adx", + .type = AVMEDIA_TYPE_AUDIO, + .id = CODEC_ID_ADPCM_ADX, + .priv_data_size = sizeof(ADXContext), + .init = adx_decode_init, + .decode = adx_decode_frame, .long_name = NULL_IF_CONFIG_SMALL("SEGA CRI ADX ADPCM"), }; diff --git a/libavcodec/adxenc.c b/libavcodec/adxenc.c index ca48f94b71..893afe0981 100644 --- a/libavcodec/adxenc.c +++ b/libavcodec/adxenc.c @@ -184,14 +184,13 @@ static int adx_encode_frame(AVCodecContext *avctx, } AVCodec ff_adpcm_adx_encoder = { - "adpcm_adx", - AVMEDIA_TYPE_AUDIO, - CODEC_ID_ADPCM_ADX, - sizeof(ADXContext), - adx_encode_init, - adx_encode_frame, - adx_encode_close, - NULL, + .name = "adpcm_adx", + .type = AVMEDIA_TYPE_AUDIO, + .id = CODEC_ID_ADPCM_ADX, + .priv_data_size = sizeof(ADXContext), + .init = adx_encode_init, + .encode = adx_encode_frame, + .close = adx_encode_close, .sample_fmts = (const enum AVSampleFormat[]){AV_SAMPLE_FMT_S16,AV_SAMPLE_FMT_NONE}, .long_name = NULL_IF_CONFIG_SMALL("SEGA CRI ADX ADPCM"), }; diff --git a/libavcodec/alac.c b/libavcodec/alac.c index 96c15fffc9..06bf6f8c78 100644 --- a/libavcodec/alac.c +++ b/libavcodec/alac.c @@ -23,9 +23,7 @@ * @file * ALAC (Apple Lossless Audio Codec) decoder * @author 2005 David Hammerton - * - * For more information on the ALAC format, visit: - * http://crazney.net/programs/itunes/alac.html + * @see http://crazney.net/programs/itunes/alac.html * * Note: This decoder expects a 36- (0x24-)byte QuickTime atom to be * passed through the extradata[_size] fields. This atom is tacked onto @@ -286,20 +284,9 @@ static void predictor_decompress_fir_adapt(int32_t *error_buffer, buffer_out[i+1] = val; } -#if 0 /* 4 and 8 are very common cases (the only ones i've seen). these * should be unrolled and optimized */ - if (predictor_coef_num == 4) { - /* FIXME: optimized general case */ - return; - } - - if (predictor_coef_table == 8) { - /* FIXME: optimized general case */ - return; - } -#endif /* general case */ if (predictor_coef_num > 0) { @@ -692,13 +679,12 @@ static av_cold int alac_decode_close(AVCodecContext *avctx) } AVCodec ff_alac_decoder = { - "alac", - AVMEDIA_TYPE_AUDIO, - CODEC_ID_ALAC, - sizeof(ALACContext), - alac_decode_init, - NULL, - alac_decode_close, - alac_decode_frame, + .name = "alac", + .type = AVMEDIA_TYPE_AUDIO, + .id = CODEC_ID_ALAC, + .priv_data_size = sizeof(ALACContext), + .init = alac_decode_init, + .close = alac_decode_close, + .decode = alac_decode_frame, .long_name = NULL_IF_CONFIG_SMALL("ALAC (Apple Lossless Audio Codec)"), }; diff --git a/libavcodec/alacenc.c b/libavcodec/alacenc.c index bb618e16f2..c399471c1d 100644 --- a/libavcodec/alacenc.c +++ b/libavcodec/alacenc.c @@ -529,13 +529,13 @@ static av_cold int alac_encode_close(AVCodecContext *avctx) } AVCodec ff_alac_encoder = { - "alac", - AVMEDIA_TYPE_AUDIO, - CODEC_ID_ALAC, - sizeof(AlacEncodeContext), - alac_encode_init, - alac_encode_frame, - alac_encode_close, + .name = "alac", + .type = AVMEDIA_TYPE_AUDIO, + .id = CODEC_ID_ALAC, + .priv_data_size = sizeof(AlacEncodeContext), + .init = alac_encode_init, + .encode = alac_encode_frame, + .close = alac_encode_close, .capabilities = CODEC_CAP_SMALL_LAST_FRAME, .sample_fmts = (const enum AVSampleFormat[]){ AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_NONE}, .long_name = NULL_IF_CONFIG_SMALL("ALAC (Apple Lossless Audio Codec)"), diff --git a/libavcodec/allcodecs.c b/libavcodec/allcodecs.c index e6305cf7d2..5480c7e78b 100644 --- a/libavcodec/allcodecs.c +++ b/libavcodec/allcodecs.c @@ -109,7 +109,7 @@ void avcodec_register_all(void) REGISTER_ENCDEC (FFV1, ffv1); REGISTER_ENCDEC (FFVHUFF, ffvhuff); REGISTER_ENCDEC (FLASHSV, flashsv); - REGISTER_ENCODER (FLASHSV2, flashsv2); + REGISTER_ENCDEC (FLASHSV2, flashsv2); REGISTER_DECODER (FLIC, flic); REGISTER_ENCDEC (FLV, flv); REGISTER_DECODER (FOURXM, fourxm); diff --git a/libavcodec/alpha/dsputil_alpha.c b/libavcodec/alpha/dsputil_alpha.c index 6ce3f4bf15..d8f999dfdc 100644 --- a/libavcodec/alpha/dsputil_alpha.c +++ b/libavcodec/alpha/dsputil_alpha.c @@ -270,7 +270,7 @@ static void put_pixels16_axp_asm(uint8_t *block, const uint8_t *pixels, void dsputil_init_alpha(DSPContext* c, AVCodecContext *avctx) { - const int high_bit_depth = avctx->codec_id == CODEC_ID_H264 && avctx->bits_per_raw_sample > 8; + const int high_bit_depth = avctx->bits_per_raw_sample > 8; if (!high_bit_depth) { c->put_pixels_tab[0][0] = put_pixels16_axp_asm; @@ -321,7 +321,8 @@ void dsputil_init_alpha(DSPContext* c, AVCodecContext *avctx) c->put_pixels_clamped = put_pixels_clamped_mvi_asm; c->add_pixels_clamped = add_pixels_clamped_mvi_asm; - c->get_pixels = get_pixels_mvi; + if (!high_bit_depth) + c->get_pixels = get_pixels_mvi; c->diff_pixels = diff_pixels_mvi; c->sad[0] = pix_abs16x16_mvi_asm; c->sad[1] = pix_abs8x8_mvi; @@ -335,7 +336,7 @@ void dsputil_init_alpha(DSPContext* c, AVCodecContext *avctx) put_pixels_clamped_axp_p = c->put_pixels_clamped; add_pixels_clamped_axp_p = c->add_pixels_clamped; - if (!avctx->lowres && + if (!avctx->lowres && avctx->bits_per_raw_sample <= 8 && (avctx->idct_algo == FF_IDCT_AUTO || avctx->idct_algo == FF_IDCT_SIMPLEALPHA)) { c->idct_put = ff_simple_idct_put_axp; diff --git a/libavcodec/alsdec.c b/libavcodec/alsdec.c index 509d49c694..9a79180d82 100644 --- a/libavcodec/alsdec.c +++ b/libavcodec/alsdec.c @@ -1739,14 +1739,13 @@ static av_cold void flush(AVCodecContext *avctx) AVCodec ff_als_decoder = { - "als", - AVMEDIA_TYPE_AUDIO, - CODEC_ID_MP4ALS, - sizeof(ALSDecContext), - decode_init, - NULL, - decode_end, - decode_frame, + .name = "als", + .type = AVMEDIA_TYPE_AUDIO, + .id = CODEC_ID_MP4ALS, + .priv_data_size = sizeof(ALSDecContext), + .init = decode_init, + .close = decode_end, + .decode = decode_frame, .flush = flush, .capabilities = CODEC_CAP_SUBFRAMES, .long_name = NULL_IF_CONFIG_SMALL("MPEG-4 Audio Lossless Coding (ALS)"), diff --git a/libavcodec/amrnbdec.c b/libavcodec/amrnbdec.c index 55dc8b05f5..8a9e77709a 100644 --- a/libavcodec/amrnbdec.c +++ b/libavcodec/amrnbdec.c @@ -83,7 +83,7 @@ /** Maximum sharpening factor * * The specification says 0.8, which should be 13107, but the reference C code - * uses 13017 instead. (Amusingly the same applies to SHARP_MAX in g729dec.c.) + * uses 13017 instead. (Amusingly the same applies to SHARP_MAX in bitexact G.729.) */ #define SHARP_MAX 0.79449462890625 @@ -204,7 +204,7 @@ static enum Mode unpack_bitstream(AMRContext *p, const uint8_t *buf, } -/// @defgroup amr_lpc_decoding AMR pitch LPC coefficient decoding functions +/// @name AMR pitch LPC coefficient decoding functions /// @{ /** @@ -341,7 +341,7 @@ static void lsf2lsp_3(AMRContext *p) /// @} -/// @defgroup amr_pitch_vector_decoding AMR pitch vector decoding functions +/// @name AMR pitch vector decoding functions /// @{ /** @@ -403,7 +403,7 @@ static void decode_pitch_vector(AMRContext *p, /// @} -/// @defgroup amr_algebraic_code_book AMR algebraic code book (fixed) vector decoding functions +/// @name AMR algebraic code book (fixed) vector decoding functions /// @{ /** @@ -547,7 +547,7 @@ static void pitch_sharpening(AMRContext *p, int subframe, enum Mode mode, /// @} -/// @defgroup amr_gain_decoding AMR gain decoding functions +/// @name AMR gain decoding functions /// @{ /** @@ -633,7 +633,7 @@ static void decode_gains(AMRContext *p, const AMRNBSubframe *amr_subframe, /// @} -/// @defgroup amr_pre_processing AMR pre-processing functions +/// @name AMR preprocessing functions /// @{ /** @@ -751,7 +751,7 @@ static const float *anti_sparseness(AMRContext *p, AMRFixed *fixed_sparse, /// @} -/// @defgroup amr_synthesis AMR synthesis functions +/// @name AMR synthesis functions /// @{ /** @@ -812,7 +812,7 @@ static int synthesis(AMRContext *p, float *lpc, /// @} -/// @defgroup amr_update AMR update functions +/// @name AMR update functions /// @{ /** @@ -837,7 +837,7 @@ static void update_state(AMRContext *p) /// @} -/// @defgroup amr_postproc AMR Post processing functions +/// @name AMR Postprocessing functions /// @{ /** @@ -936,7 +936,8 @@ static int amrnb_decode_frame(AVCodecContext *avctx, void *data, int *data_size, p->cur_frame_mode = unpack_bitstream(p, buf, buf_size); if (p->cur_frame_mode == MODE_DTX) { - av_log_missing_feature(avctx, "dtx mode", 1); + av_log_missing_feature(avctx, "dtx mode", 0); + av_log(avctx, AV_LOG_INFO, "Note: libopencore_amrnb supports dtx\n"); return -1; } diff --git a/libavcodec/anm.c b/libavcodec/anm.c index 02244f70e1..b84aef1c1b 100644 --- a/libavcodec/anm.c +++ b/libavcodec/anm.c @@ -184,14 +184,13 @@ static av_cold int decode_end(AVCodecContext *avctx) } AVCodec ff_anm_decoder = { - "anm", - AVMEDIA_TYPE_VIDEO, - CODEC_ID_ANM, - sizeof(AnmContext), - decode_init, - NULL, - decode_end, - decode_frame, - CODEC_CAP_DR1, + .name = "anm", + .type = AVMEDIA_TYPE_VIDEO, + .id = CODEC_ID_ANM, + .priv_data_size = sizeof(AnmContext), + .init = decode_init, + .close = decode_end, + .decode = decode_frame, + .capabilities = CODEC_CAP_DR1, .long_name = NULL_IF_CONFIG_SMALL("Deluxe Paint Animation"), }; diff --git a/libavcodec/ansi.c b/libavcodec/ansi.c index 7043b7c9d9..ebcc288539 100644 --- a/libavcodec/ansi.c +++ b/libavcodec/ansi.c @@ -154,7 +154,7 @@ static void draw_char(AVCodecContext *avctx, int c) /** * Execute ANSI escape code - * @param <0 error + * @return 0 on success, negative on error */ static int execute_code(AVCodecContext * avctx, int c) { diff --git a/libavcodec/apedec.c b/libavcodec/apedec.c index 05498777b4..300a0097d8 100644 --- a/libavcodec/apedec.c +++ b/libavcodec/apedec.c @@ -217,7 +217,7 @@ static av_cold int ape_decode_close(AVCodecContext * avctx) } /** - * @defgroup rangecoder APE range decoder + * @name APE range decoding functions * @{ */ @@ -885,14 +885,13 @@ static void ape_flush(AVCodecContext *avctx) } AVCodec ff_ape_decoder = { - "ape", - AVMEDIA_TYPE_AUDIO, - CODEC_ID_APE, - sizeof(APEContext), - ape_decode_init, - NULL, - ape_decode_close, - ape_decode_frame, + .name = "ape", + .type = AVMEDIA_TYPE_AUDIO, + .id = CODEC_ID_APE, + .priv_data_size = sizeof(APEContext), + .init = ape_decode_init, + .close = ape_decode_close, + .decode = ape_decode_frame, .capabilities = CODEC_CAP_SUBFRAMES, .flush = ape_flush, .long_name = NULL_IF_CONFIG_SMALL("Monkey's Audio"), diff --git a/libavcodec/arm/aac.h b/libavcodec/arm/aac.h index 3b14c094c6..bd4d293f02 100644 --- a/libavcodec/arm/aac.h +++ b/libavcodec/arm/aac.h @@ -114,12 +114,15 @@ static inline float *VMUL4S(float *dst, const float *v, unsigned idx, "vmov d1, %2, %3 \n\t" "lsls %6, %6, #1 \n\t" "and %0, %5, #1<<31 \n\t" + "it cs \n\t" "lslcs %5, %5, #1 \n\t" "lsls %6, %6, #1 \n\t" "and %1, %5, #1<<31 \n\t" + "it cs \n\t" "lslcs %5, %5, #1 \n\t" "lsls %6, %6, #1 \n\t" "and %2, %5, #1<<31 \n\t" + "it cs \n\t" "lslcs %5, %5, #1 \n\t" "vmov d4, %0, %1 \n\t" "and %3, %5, #1<<31 \n\t" diff --git a/libavcodec/arm/ac3dsp_arm.S b/libavcodec/arm/ac3dsp_arm.S index 545714cff1..9a7d20eb7b 100644 --- a/libavcodec/arm/ac3dsp_arm.S +++ b/libavcodec/arm/ac3dsp_arm.S @@ -27,6 +27,7 @@ function ff_ac3_update_bap_counts_arm, export=1 lsl r3, lr, #1 ldrh r12, [r0, r3] subs r2, r2, #1 + it gt ldrbgt lr, [r1], #1 add r12, r12, #1 strh r12, [r0, r3] diff --git a/libavcodec/arm/ac3dsp_armv6.S b/libavcodec/arm/ac3dsp_armv6.S index 2b2f2acf22..b6aee867b3 100644 --- a/libavcodec/arm/ac3dsp_armv6.S +++ b/libavcodec/arm/ac3dsp_armv6.S @@ -37,14 +37,16 @@ function ff_ac3_bit_alloc_calc_bap_armv6, export=1 ldrb r10, [r4], #1 1: ldrsh r9, [r0], #2 @ mask[band] - ldr r8, =0x1fe0 + mov r8, #0xff0 sub r9, r9, r12 @ - snr_offset mov r11, r10 ldrb r10, [r4], #1 @ band_start_tab[band++] subs r9, r9, r5 @ - floor + it lt movlt r9, #0 cmp r10, r3 @ - end - and r9, r9, r8 @ & 0x1fe0 + and r9, r9, r8, lsl #1 @ & 0x1fe0 + ite gt subgt r8, r3, r11 suble r8, r10, r11 add r9, r9, r5 @ + floor => m diff --git a/libavcodec/arm/ac3dsp_neon.S b/libavcodec/arm/ac3dsp_neon.S index 946b39f25b..e97197c27a 100644 --- a/libavcodec/arm/ac3dsp_neon.S +++ b/libavcodec/arm/ac3dsp_neon.S @@ -41,6 +41,7 @@ endfunc function ff_ac3_exponent_min_neon, export=1 cmp r1, #0 + it eq bxeq lr push {lr} mov r12, #256 @@ -94,19 +95,14 @@ function ff_float_to_fixed24_neon, export=1 endfunc function ff_ac3_extract_exponents_neon, export=1 - vmov.i32 q14, #24 vmov.i32 q15, #8 1: - vld1.32 {q0}, [r1,:128] + vld1.32 {q0}, [r1,:128]! vabs.s32 q1, q0 vclz.i32 q3, q1 vsub.i32 q3, q3, q15 - vcge.s32 q2, q3, q14 - vbit q3, q14, q2 - vbic q0, q0, q2 vmovn.i32 d6, q3 vmovn.i16 d6, q3 - vst1.32 {q0}, [r1,:128]! vst1.32 {d6[0]}, [r0,:32]! subs r2, r2, #4 bgt 1b diff --git a/libavcodec/arm/asm-offsets.h b/libavcodec/arm/asm-offsets.h index fe124ba702..5cfc5cb10c 100644 --- a/libavcodec/arm/asm-offsets.h +++ b/libavcodec/arm/asm-offsets.h @@ -29,11 +29,11 @@ #endif /* MpegEncContext */ -#define Y_DC_SCALE 0xb4 -#define C_DC_SCALE 0xb8 -#define AC_PRED 0xbc -#define BLOCK_LAST_INDEX 0xc0 -#define H263_AIC 0xf0 -#define INTER_SCANTAB_RASTER_END 0x138 +#define Y_DC_SCALE 0xa8 +#define C_DC_SCALE 0xac +#define AC_PRED 0xb0 +#define BLOCK_LAST_INDEX 0xb4 +#define H263_AIC 0xe4 +#define INTER_SCANTAB_RASTER_END 0x12c #endif /* AVCODEC_ARM_ASM_OFFSETS_H */ diff --git a/libavcodec/arm/asm.S b/libavcodec/arm/asm.S index bb999fd61a..fc7ee60357 100644 --- a/libavcodec/arm/asm.S +++ b/libavcodec/arm/asm.S @@ -26,7 +26,16 @@ # define ELF @ #endif +#if CONFIG_THUMB +# define A @ +# define T +#else +# define A +# define T @ +#endif + .syntax unified +T .thumb .macro require8 val=1 ELF .eabi_attribute 24, \val @@ -82,6 +91,90 @@ ELF .size \name, . - \name #endif .endm +.macro ldr_pre rt, rn, rm:vararg +A ldr \rt, [\rn, \rm]! +T add \rn, \rn, \rm +T ldr \rt, [\rn] +.endm + +.macro ldr_post rt, rn, rm:vararg +A ldr \rt, [\rn], \rm +T ldr \rt, [\rn] +T add \rn, \rn, \rm +.endm + +.macro ldrd_reg rt, rt2, rn, rm +A ldrd \rt, \rt2, [\rn, \rm] +T add \rt, \rn, \rm +T ldrd \rt, \rt2, [\rt] +.endm + +.macro ldrd_post rt, rt2, rn, rm +A ldrd \rt, \rt2, [\rn], \rm +T ldrd \rt, \rt2, [\rn] +T add \rn, \rn, \rm +.endm + +.macro ldrh_pre rt, rn, rm +A ldrh \rt, [\rn, \rm]! +T add \rn, \rn, \rm +T ldrh \rt, [\rn] +.endm + +.macro ldrh_dpre rt, rn, rm +A ldrh \rt, [\rn, -\rm]! +T sub \rn, \rn, \rm +T ldrh \rt, [\rn] +.endm + +.macro ldrh_post rt, rn, rm +A ldrh \rt, [\rn], \rm +T ldrh \rt, [\rn] +T add \rn, \rn, \rm +.endm + +.macro str_post rt, rn, rm:vararg +A str \rt, [\rn], \rm +T str \rt, [\rn] +T add \rn, \rn, \rm +.endm + +.macro strb_post rt, rn, rm:vararg +A strb \rt, [\rn], \rm +T strb \rt, [\rn] +T add \rn, \rn, \rm +.endm + +.macro strd_post rt, rt2, rn, rm +A strd \rt, \rt2, [\rn], \rm +T strd \rt, \rt2, [\rn] +T add \rn, \rn, \rm +.endm + +.macro strh_pre rt, rn, rm +A strh \rt, [\rn, \rm]! +T add \rn, \rn, \rm +T strh \rt, [\rn] +.endm + +.macro strh_dpre rt, rn, rm +A strh \rt, [\rn, -\rm]! +T sub \rn, \rn, \rm +T strh \rt, [\rn] +.endm + +.macro strh_post rt, rn, rm +A strh \rt, [\rn], \rm +T strh \rt, [\rn] +T add \rn, \rn, \rm +.endm + +.macro strh_dpost rt, rn, rm +A strh \rt, [\rn], -\rm +T strh \rt, [\rn] +T sub \rn, \rn, \rm +.endm + #if HAVE_VFP_ARGS .eabi_attribute 28, 1 # define VFP diff --git a/libavcodec/arm/dcadsp_neon.S b/libavcodec/arm/dcadsp_neon.S index c3bddd3e41..852527a59e 100644 --- a/libavcodec/arm/dcadsp_neon.S +++ b/libavcodec/arm/dcadsp_neon.S @@ -27,6 +27,7 @@ function ff_dca_lfe_fir_neon, export=1 add r5, r2, #256*4-16 @ cf1 sub r1, r1, #12 cmp r3, #32 + ite eq moveq r6, #256/32 movne r6, #256/64 NOVFP vldr s0, [sp, #16] @ scale diff --git a/libavcodec/arm/dsputil_arm.S b/libavcodec/arm/dsputil_arm.S index 7ee85e808b..1247b0fa84 100644 --- a/libavcodec/arm/dsputil_arm.S +++ b/libavcodec/arm/dsputil_arm.S @@ -24,11 +24,6 @@ preserve8 -#if !HAVE_PLD -.macro pld reg -.endm -#endif - #if HAVE_ARMV5TE function ff_prefetch_arm, export=1 subs r2, r2, #1 @@ -37,6 +32,8 @@ function ff_prefetch_arm, export=1 bne ff_prefetch_arm bx lr endfunc +#else +#define pld @ #endif .macro ALIGN_QWORD_D shift, Rd0, Rd1, Rd2, Rd3, Rn0, Rn1, Rn2, Rn3, Rn4 @@ -554,10 +551,12 @@ endfunc and r9, r5, r14 and r10, r6, r14 and r11, r7, r14 + it eq andeq r14, r14, r14, \rnd #1 add r8, r8, r10 add r9, r9, r11 ldr r12, =0xfcfcfcfc >> 2 + itt eq addeq r8, r8, r14 addeq r9, r9, r14 and r4, r12, r4, lsr #2 @@ -638,8 +637,10 @@ function ff_add_pixels_clamped_arm, export=1 mvn r5, r5 mvn r7, r7 tst r6, #0x100 + it ne movne r6, r5, lsr #24 tst r8, #0x100 + it ne movne r8, r7, lsr #24 mov r9, r6 ldrsh r5, [r0, #4] /* moved form [A] */ @@ -654,8 +655,10 @@ function ff_add_pixels_clamped_arm, export=1 mvn r5, r5 mvn r7, r7 tst r6, #0x100 + it ne movne r6, r5, lsr #24 tst r8, #0x100 + it ne movne r8, r7, lsr #24 orr r9, r9, r6, lsl #16 ldr r4, [r1, #4] /* moved form [B] */ @@ -676,8 +679,10 @@ function ff_add_pixels_clamped_arm, export=1 mvn r5, r5 mvn r7, r7 tst r6, #0x100 + it ne movne r6, r5, lsr #24 tst r8, #0x100 + it ne movne r8, r7, lsr #24 mov r9, r6 ldrsh r5, [r0, #12] /* moved from [D] */ @@ -692,8 +697,10 @@ function ff_add_pixels_clamped_arm, export=1 mvn r5, r5 mvn r7, r7 tst r6, #0x100 + it ne movne r6, r5, lsr #24 tst r8, #0x100 + it ne movne r8, r7, lsr #24 orr r9, r9, r6, lsl #16 add r0, r0, #16 /* moved from [E] */ diff --git a/libavcodec/arm/dsputil_armv6.S b/libavcodec/arm/dsputil_armv6.S index 214d947da3..a2c8588fad 100644 --- a/libavcodec/arm/dsputil_armv6.S +++ b/libavcodec/arm/dsputil_armv6.S @@ -47,16 +47,16 @@ function ff_put_pixels16_armv6, export=1 ldr r5, [r1, #4] ldr r6, [r1, #8] ldr r7, [r1, #12] - ldr r4, [r1], r2 + ldr_post r4, r1, r2 strd r6, r7, [r0, #8] ldr r9, [r1, #4] - strd r4, r5, [r0], r2 + strd_post r4, r5, r0, r2 ldr r10, [r1, #8] ldr r11, [r1, #12] - ldr r8, [r1], r2 + ldr_post r8, r1, r2 strd r10, r11, [r0, #8] subs r3, r3, #2 - strd r8, r9, [r0], r2 + strd_post r8, r9, r0, r2 bne 1b pop {r4-r11} @@ -67,12 +67,12 @@ function ff_put_pixels8_armv6, export=1 push {r4-r7} 1: ldr r5, [r1, #4] - ldr r4, [r1], r2 + ldr_post r4, r1, r2 ldr r7, [r1, #4] - strd r4, r5, [r0], r2 - ldr r6, [r1], r2 + strd_post r4, r5, r0, r2 + ldr_post r6, r1, r2 subs r3, r3, #2 - strd r6, r7, [r0], r2 + strd_post r6, r7, r0, r2 bne 1b pop {r4-r7} @@ -90,7 +90,7 @@ function ff_put_pixels8_x2_armv6, export=1 ldr r5, [r1, #4] ldr r7, [r1, #5] lsr r6, r4, #8 - ldr r8, [r1, r2]! + ldr_pre r8, r1, r2 orr r6, r6, r5, lsl #24 ldr r9, [r1, #4] ldr r11, [r1, #5] @@ -112,9 +112,9 @@ function ff_put_pixels8_x2_armv6, export=1 uhadd8 r9, r9, r11 and r6, r6, r12 uadd8 r8, r8, r14 - strd r4, r5, [r0], r2 + strd_post r4, r5, r0, r2 uadd8 r9, r9, r6 - strd r8, r9, [r0], r2 + strd_post r8, r9, r0, r2 bne 1b pop {r4-r11, pc} @@ -127,7 +127,7 @@ function ff_put_pixels8_y2_armv6, export=1 orr r12, r12, r12, lsl #16 ldr r4, [r1] ldr r5, [r1, #4] - ldr r6, [r1, r2]! + ldr_pre r6, r1, r2 ldr r7, [r1, #4] 1: subs r3, r3, #2 @@ -136,7 +136,7 @@ function ff_put_pixels8_y2_armv6, export=1 uhadd8 r9, r5, r7 eor r11, r5, r7 and r10, r10, r12 - ldr r4, [r1, r2]! + ldr_pre r4, r1, r2 uadd8 r8, r8, r10 and r11, r11, r12 uadd8 r9, r9, r11 @@ -148,11 +148,11 @@ function ff_put_pixels8_y2_armv6, export=1 eor r7, r5, r7 uadd8 r10, r10, r6 and r7, r7, r12 - ldr r6, [r1, r2]! + ldr_pre r6, r1, r2 uadd8 r11, r11, r7 - strd r8, r9, [r0], r2 + strd_post r8, r9, r0, r2 ldr r7, [r1, #4] - strd r10, r11, [r0], r2 + strd_post r10, r11, r0, r2 bne 1b pop {r4-r11} @@ -166,7 +166,7 @@ function ff_put_pixels8_x2_no_rnd_armv6, export=1 ldr r4, [r1] ldr r5, [r1, #4] ldr r7, [r1, #5] - ldr r8, [r1, r2]! + ldr_pre r8, r1, r2 ldr r9, [r1, #4] ldr r14, [r1, #5] add r1, r1, r2 @@ -191,16 +191,16 @@ function ff_put_pixels8_y2_no_rnd_armv6, export=1 push {r4-r9, lr} ldr r4, [r1] ldr r5, [r1, #4] - ldr r6, [r1, r2]! + ldr_pre r6, r1, r2 ldr r7, [r1, #4] 1: subs r3, r3, #2 uhadd8 r8, r4, r6 - ldr r4, [r1, r2]! + ldr_pre r4, r1, r2 uhadd8 r9, r5, r7 ldr r5, [r1, #4] uhadd8 r12, r4, r6 - ldr r6, [r1, r2]! + ldr_pre r6, r1, r2 uhadd8 r14, r5, r7 ldr r7, [r1, #4] stm r0, {r8,r9} @@ -220,44 +220,44 @@ function ff_avg_pixels8_armv6, export=1 orr lr, lr, lr, lsl #16 ldrd r4, r5, [r0] ldr r10, [r1, #4] - ldr r9, [r1], r2 + ldr_post r9, r1, r2 subs r3, r3, #2 1: pld [r1, r2] eor r8, r4, r9 uhadd8 r4, r4, r9 eor r12, r5, r10 - ldrd r6, r7, [r0, r2] + ldrd_reg r6, r7, r0, r2 uhadd8 r5, r5, r10 and r8, r8, lr ldr r10, [r1, #4] and r12, r12, lr uadd8 r4, r4, r8 - ldr r9, [r1], r2 + ldr_post r9, r1, r2 eor r8, r6, r9 uadd8 r5, r5, r12 pld [r1, r2, lsl #1] eor r12, r7, r10 uhadd8 r6, r6, r9 - strd r4, r5, [r0], r2 + strd_post r4, r5, r0, r2 uhadd8 r7, r7, r10 beq 2f and r8, r8, lr - ldrd r4, r5, [r0, r2] + ldrd_reg r4, r5, r0, r2 uadd8 r6, r6, r8 ldr r10, [r1, #4] and r12, r12, lr subs r3, r3, #2 uadd8 r7, r7, r12 - ldr r9, [r1], r2 - strd r6, r7, [r0], r2 + ldr_post r9, r1, r2 + strd_post r6, r7, r0, r2 b 1b 2: and r8, r8, lr and r12, r12, lr uadd8 r6, r6, r8 uadd8 r7, r7, r12 - strd r6, r7, [r0], r2 + strd_post r6, r7, r0, r2 pop {r4-r10, pc} endfunc @@ -284,7 +284,7 @@ function ff_add_pixels_clamped_armv6, export=1 orr r6, r8, r5, lsl #8 orr r7, r4, lr, lsl #8 subs r3, r3, #1 - strd r6, r7, [r1], r2 + strd_post r6, r7, r1, r2 bgt 1b pop {r4-r8,pc} endfunc @@ -294,7 +294,7 @@ function ff_get_pixels_armv6, export=1 push {r4-r8, lr} mov lr, #8 1: - ldrd r4, r5, [r1], r2 + ldrd_post r4, r5, r1, r2 subs lr, lr, #1 uxtb16 r6, r4 uxtb16 r4, r4, ror #8 @@ -317,8 +317,8 @@ function ff_diff_pixels_armv6, export=1 push {r4-r9, lr} mov lr, #8 1: - ldrd r4, r5, [r1], r3 - ldrd r6, r7, [r2], r3 + ldrd_post r4, r5, r1, r3 + ldrd_post r6, r7, r2, r3 uxtb16 r8, r4 uxtb16 r4, r4, ror #8 uxtb16 r9, r6 @@ -492,19 +492,19 @@ function ff_pix_abs8_armv6, export=1 push {r4-r9, lr} mov r0, #0 mov lr, #0 - ldrd r4, r5, [r1], r3 + ldrd_post r4, r5, r1, r3 1: subs r12, r12, #2 ldr r7, [r2, #4] - ldr r6, [r2], r3 - ldrd r8, r9, [r1], r3 + ldr_post r6, r2, r3 + ldrd_post r8, r9, r1, r3 usada8 r0, r4, r6, r0 pld [r2, r3] usada8 lr, r5, r7, lr ldr r7, [r2, #4] - ldr r6, [r2], r3 + ldr_post r6, r2, r3 beq 2f - ldrd r4, r5, [r1], r3 + ldrd_post r4, r5, r1, r3 usada8 r0, r8, r6, r0 pld [r2, r3] usada8 lr, r9, r7, lr @@ -613,7 +613,7 @@ function ff_pix_sum_armv6, export=1 ldr r7, [r0, #12] usada8 r2, r6, lr, r2 beq 2f - ldr r4, [r0, r1]! + ldr_pre r4, r0, r1 usada8 r3, r7, lr, r3 bgt 1b 2: diff --git a/libavcodec/arm/dsputil_init_arm.c b/libavcodec/arm/dsputil_init_arm.c index 0351412761..ccbe1ed296 100644 --- a/libavcodec/arm/dsputil_init_arm.c +++ b/libavcodec/arm/dsputil_init_arm.c @@ -75,12 +75,12 @@ static void simple_idct_arm_add(uint8_t *dest, int line_size, DCTELEM *block) void dsputil_init_arm(DSPContext* c, AVCodecContext *avctx) { - const int high_bit_depth = avctx->codec_id == CODEC_ID_H264 && avctx->bits_per_raw_sample > 8; + const int high_bit_depth = avctx->bits_per_raw_sample > 8; ff_put_pixels_clamped = c->put_pixels_clamped; ff_add_pixels_clamped = c->add_pixels_clamped; - if (!avctx->lowres) { + if (!avctx->lowres && avctx->bits_per_raw_sample <= 8) { if(avctx->idct_algo == FF_IDCT_AUTO || avctx->idct_algo == FF_IDCT_ARM){ c->idct_put = j_rev_dct_arm_put; diff --git a/libavcodec/arm/dsputil_init_armv5te.c b/libavcodec/arm/dsputil_init_armv5te.c index 750e5147b4..2390aabb62 100644 --- a/libavcodec/arm/dsputil_init_armv5te.c +++ b/libavcodec/arm/dsputil_init_armv5te.c @@ -29,8 +29,9 @@ void ff_prefetch_arm(void *mem, int stride, int h); void av_cold ff_dsputil_init_armv5te(DSPContext* c, AVCodecContext *avctx) { - if (!avctx->lowres && (avctx->idct_algo == FF_IDCT_AUTO || - avctx->idct_algo == FF_IDCT_SIMPLEARMV5TE)) { + if (!avctx->lowres && avctx->bits_per_raw_sample <= 8 && + (avctx->idct_algo == FF_IDCT_AUTO || + avctx->idct_algo == FF_IDCT_SIMPLEARMV5TE)) { c->idct_put = ff_simple_idct_put_armv5te; c->idct_add = ff_simple_idct_add_armv5te; c->idct = ff_simple_idct_armv5te; diff --git a/libavcodec/arm/dsputil_init_armv6.c b/libavcodec/arm/dsputil_init_armv6.c index 9acea4a1d6..fb0d00973e 100644 --- a/libavcodec/arm/dsputil_init_armv6.c +++ b/libavcodec/arm/dsputil_init_armv6.c @@ -72,10 +72,11 @@ int ff_pix_sum_armv6(uint8_t *pix, int line_size); void av_cold ff_dsputil_init_armv6(DSPContext* c, AVCodecContext *avctx) { - const int high_bit_depth = avctx->codec_id == CODEC_ID_H264 && avctx->bits_per_raw_sample > 8; + const int high_bit_depth = avctx->bits_per_raw_sample > 8; - if (!avctx->lowres && (avctx->idct_algo == FF_IDCT_AUTO || - avctx->idct_algo == FF_IDCT_SIMPLEARMV6)) { + if (!avctx->lowres && avctx->bits_per_raw_sample <= 8 && + (avctx->idct_algo == FF_IDCT_AUTO || + avctx->idct_algo == FF_IDCT_SIMPLEARMV6)) { c->idct_put = ff_simple_idct_put_armv6; c->idct_add = ff_simple_idct_add_armv6; c->idct = ff_simple_idct_armv6; @@ -105,8 +106,9 @@ void av_cold ff_dsputil_init_armv6(DSPContext* c, AVCodecContext *avctx) c->avg_pixels_tab[1][0] = ff_avg_pixels8_armv6; } + if (!high_bit_depth) + c->get_pixels = ff_get_pixels_armv6; c->add_pixels_clamped = ff_add_pixels_clamped_armv6; - c->get_pixels = ff_get_pixels_armv6; c->diff_pixels = ff_diff_pixels_armv6; c->pix_abs[0][0] = ff_pix_abs16_armv6; diff --git a/libavcodec/arm/dsputil_init_neon.c b/libavcodec/arm/dsputil_init_neon.c index 6faf3dc8d0..15536d0bd2 100644 --- a/libavcodec/arm/dsputil_init_neon.c +++ b/libavcodec/arm/dsputil_init_neon.c @@ -143,14 +143,6 @@ void ff_vector_fmul_window_neon(float *dst, const float *src0, const float *src1, const float *win, int len); void ff_vector_fmul_scalar_neon(float *dst, const float *src, float mul, int len); -void ff_vector_fmul_sv_scalar_2_neon(float *dst, const float *src, - const float **vp, float mul, int len); -void ff_vector_fmul_sv_scalar_4_neon(float *dst, const float *src, - const float **vp, float mul, int len); -void ff_sv_fmul_scalar_2_neon(float *dst, const float **vp, float mul, - int len); -void ff_sv_fmul_scalar_4_neon(float *dst, const float **vp, float mul, - int len); void ff_butterflies_float_neon(float *v1, float *v2, int len); float ff_scalarproduct_float_neon(const float *v1, const float *v2, int len); void ff_vector_fmul_reverse_neon(float *dst, const float *src0, @@ -160,6 +152,8 @@ void ff_vector_fmul_add_neon(float *dst, const float *src0, const float *src1, void ff_vector_clipf_neon(float *dst, const float *src, float min, float max, int len); +void ff_vector_clip_int32_neon(int32_t *dst, const int32_t *src, int32_t min, + int32_t max, unsigned int len); void ff_vorbis_inverse_coupling_neon(float *mag, float *ang, int blocksize); @@ -173,9 +167,9 @@ void ff_apply_window_int16_neon(int16_t *dst, const int16_t *src, void ff_dsputil_init_neon(DSPContext *c, AVCodecContext *avctx) { - const int high_bit_depth = avctx->codec_id == CODEC_ID_H264 && avctx->bits_per_raw_sample > 8; + const int high_bit_depth = avctx->bits_per_raw_sample > 8; - if (!avctx->lowres) { + if (!avctx->lowres && avctx->bits_per_raw_sample <= 8) { if (avctx->idct_algo == FF_IDCT_AUTO || avctx->idct_algo == FF_IDCT_SIMPLENEON) { c->idct_put = ff_simple_idct_put_neon; @@ -316,12 +310,7 @@ void ff_dsputil_init_neon(DSPContext *c, AVCodecContext *avctx) c->vector_fmul_reverse = ff_vector_fmul_reverse_neon; c->vector_fmul_add = ff_vector_fmul_add_neon; c->vector_clipf = ff_vector_clipf_neon; - - c->vector_fmul_sv_scalar[0] = ff_vector_fmul_sv_scalar_2_neon; - c->vector_fmul_sv_scalar[1] = ff_vector_fmul_sv_scalar_4_neon; - - c->sv_fmul_scalar[0] = ff_sv_fmul_scalar_2_neon; - c->sv_fmul_scalar[1] = ff_sv_fmul_scalar_4_neon; + c->vector_clip_int32 = ff_vector_clip_int32_neon; if (CONFIG_VORBIS_DECODER) c->vorbis_inverse_coupling = ff_vorbis_inverse_coupling_neon; diff --git a/libavcodec/arm/dsputil_iwmmxt.c b/libavcodec/arm/dsputil_iwmmxt.c index 85be83148a..2837af119f 100644 --- a/libavcodec/arm/dsputil_iwmmxt.c +++ b/libavcodec/arm/dsputil_iwmmxt.c @@ -155,7 +155,7 @@ static void nop(uint8_t *block, const uint8_t *pixels, int line_size, int h) void ff_dsputil_init_iwmmxt(DSPContext* c, AVCodecContext *avctx) { int mm_flags = AV_CPU_FLAG_IWMMXT; /* multimedia extension flags */ - const int high_bit_depth = avctx->codec_id == CODEC_ID_H264 && avctx->bits_per_raw_sample > 8; + const int high_bit_depth = avctx->bits_per_raw_sample > 8; if (avctx->dsp_mask) { if (avctx->dsp_mask & AV_CPU_FLAG_FORCE) diff --git a/libavcodec/arm/dsputil_neon.S b/libavcodec/arm/dsputil_neon.S index 0dbf5ca48a..94a7a8cb75 100644 --- a/libavcodec/arm/dsputil_neon.S +++ b/libavcodec/arm/dsputil_neon.S @@ -531,6 +531,7 @@ function ff_vorbis_inverse_coupling_neon, export=1 2: vst1.32 {d2-d3}, [r3, :128]! vst1.32 {d0-d1}, [r12,:128]! + it lt bxlt lr 3: vld1.32 {d2-d3}, [r1,:128] @@ -575,6 +576,7 @@ NOVFP vdup.32 q8, r2 2: vst1.32 {q2},[r0,:128]! vst1.32 {q3},[r0,:128]! ands len, len, #15 + it eq bxeq lr 3: vld1.32 {q0},[r1,:128]! vmul.f32 q0, q0, q8 @@ -585,109 +587,6 @@ NOVFP vdup.32 q8, r2 .unreq len endfunc -function ff_vector_fmul_sv_scalar_2_neon, export=1 -VFP vdup.32 d16, d0[0] -NOVFP vdup.32 d16, r3 -NOVFP ldr r3, [sp] - vld1.32 {d0},[r1,:64]! - vld1.32 {d1},[r1,:64]! -1: subs r3, r3, #4 - vmul.f32 d4, d0, d16 - vmul.f32 d5, d1, d16 - ldr r12, [r2], #4 - vld1.32 {d2},[r12,:64] - ldr r12, [r2], #4 - vld1.32 {d3},[r12,:64] - vmul.f32 d4, d4, d2 - vmul.f32 d5, d5, d3 - beq 2f - vld1.32 {d0},[r1,:64]! - vld1.32 {d1},[r1,:64]! - vst1.32 {d4},[r0,:64]! - vst1.32 {d5},[r0,:64]! - b 1b -2: vst1.32 {d4},[r0,:64]! - vst1.32 {d5},[r0,:64]! - bx lr -endfunc - -function ff_vector_fmul_sv_scalar_4_neon, export=1 -VFP vdup.32 q10, d0[0] -NOVFP vdup.32 q10, r3 -NOVFP ldr r3, [sp] - push {lr} - bics lr, r3, #7 - beq 3f - vld1.32 {q0},[r1,:128]! - vld1.32 {q2},[r1,:128]! -1: ldr r12, [r2], #4 - vld1.32 {q1},[r12,:128] - ldr r12, [r2], #4 - vld1.32 {q3},[r12,:128] - vmul.f32 q8, q0, q10 - vmul.f32 q8, q8, q1 - vmul.f32 q9, q2, q10 - vmul.f32 q9, q9, q3 - subs lr, lr, #8 - beq 2f - vld1.32 {q0},[r1,:128]! - vld1.32 {q2},[r1,:128]! - vst1.32 {q8},[r0,:128]! - vst1.32 {q9},[r0,:128]! - b 1b -2: vst1.32 {q8},[r0,:128]! - vst1.32 {q9},[r0,:128]! - ands r3, r3, #7 - popeq {pc} -3: vld1.32 {q0},[r1,:128]! - ldr r12, [r2], #4 - vld1.32 {q1},[r12,:128] - vmul.f32 q0, q0, q10 - vmul.f32 q0, q0, q1 - vst1.32 {q0},[r0,:128]! - subs r3, r3, #4 - bgt 3b - pop {pc} -endfunc - -function ff_sv_fmul_scalar_2_neon, export=1 -VFP len .req r2 -NOVFP len .req r3 -VFP vdup.32 q8, d0[0] -NOVFP vdup.32 q8, r2 - ldr r12, [r1], #4 - vld1.32 {d0},[r12,:64] - ldr r12, [r1], #4 - vld1.32 {d1},[r12,:64] -1: vmul.f32 q1, q0, q8 - subs len, len, #4 - beq 2f - ldr r12, [r1], #4 - vld1.32 {d0},[r12,:64] - ldr r12, [r1], #4 - vld1.32 {d1},[r12,:64] - vst1.32 {q1},[r0,:128]! - b 1b -2: vst1.32 {q1},[r0,:128]! - bx lr - .unreq len -endfunc - -function ff_sv_fmul_scalar_4_neon, export=1 -VFP len .req r2 -NOVFP len .req r3 -VFP vdup.32 q8, d0[0] -NOVFP vdup.32 q8, r2 -1: ldr r12, [r1], #4 - vld1.32 {q0},[r12,:128] - vmul.f32 q0, q0, q8 - vst1.32 {q0},[r0,:128]! - subs len, len, #4 - bgt 1b - bx lr - .unreq len -endfunc - function ff_butterflies_float_neon, export=1 1: vld1.32 {q0},[r0,:128] vld1.32 {q1},[r1,:128] @@ -812,3 +711,19 @@ function ff_apply_window_int16_neon, export=1 pop {r4,pc} endfunc + +function ff_vector_clip_int32_neon, export=1 + vdup.32 q0, r2 + vdup.32 q1, r3 + ldr r2, [sp] +1: + vld1.32 {q2-q3}, [r1,:128]! + vmin.s32 q2, q2, q1 + vmin.s32 q3, q3, q1 + vmax.s32 q2, q2, q0 + vmax.s32 q3, q3, q0 + vst1.32 {q2-q3}, [r0,:128]! + subs r2, r2, #8 + bgt 1b + bx lr +endfunc diff --git a/libavcodec/arm/dsputil_vfp.S b/libavcodec/arm/dsputil_vfp.S index 497c02be92..108208174d 100644 --- a/libavcodec/arm/dsputil_vfp.S +++ b/libavcodec/arm/dsputil_vfp.S @@ -55,18 +55,23 @@ function ff_vector_fmul_vfp, export=1 1: subs r3, r3, #16 vmul.f32 s12, s4, s12 + itttt ge vldmiage r1!, {s16-s19} vldmiage r2!, {s24-s27} vldmiage r1!, {s20-s23} vldmiage r2!, {s28-s31} + it ge vmulge.f32 s24, s16, s24 vstmia r0!, {s8-s11} vstmia r0!, {s12-s15} + it ge vmulge.f32 s28, s20, s28 + itttt gt vldmiagt r1!, {s0-s3} vldmiagt r2!, {s8-s11} vldmiagt r1!, {s4-s7} vldmiagt r2!, {s12-s15} + ittt ge vmulge.f32 s8, s0, s8 vstmiage r0!, {s24-s27} vstmiage r0!, {s28-s31} @@ -97,33 +102,49 @@ function ff_vector_fmul_reverse_vfp, export=1 vmul.f32 s11, s0, s11 1: subs r3, r3, #16 + it ge vldmdbge r2!, {s16-s19} vmul.f32 s12, s7, s12 + it ge vldmiage r1!, {s24-s27} vmul.f32 s13, s6, s13 + it ge vldmdbge r2!, {s20-s23} vmul.f32 s14, s5, s14 + it ge vldmiage r1!, {s28-s31} vmul.f32 s15, s4, s15 + it ge vmulge.f32 s24, s19, s24 + it gt vldmdbgt r2!, {s0-s3} + it ge vmulge.f32 s25, s18, s25 vstmia r0!, {s8-s13} + it ge vmulge.f32 s26, s17, s26 + it gt vldmiagt r1!, {s8-s11} + itt ge vmulge.f32 s27, s16, s27 vmulge.f32 s28, s23, s28 + it gt vldmdbgt r2!, {s4-s7} + it ge vmulge.f32 s29, s22, s29 vstmia r0!, {s14-s15} + ittt ge vmulge.f32 s30, s21, s30 vmulge.f32 s31, s20, s31 vmulge.f32 s8, s3, s8 + it gt vldmiagt r1!, {s12-s15} + itttt ge vmulge.f32 s9, s2, s9 vmulge.f32 s10, s1, s10 vstmiage r0!, {s24-s27} vmulge.f32 s11, s0, s11 + it ge vstmiage r0!, {s28-s31} bgt 1b diff --git a/libavcodec/arm/fft_fixed_neon.S b/libavcodec/arm/fft_fixed_neon.S index bd6c853ec8..0316b80bce 100644 --- a/libavcodec/arm/fft_fixed_neon.S +++ b/libavcodec/arm/fft_fixed_neon.S @@ -56,7 +56,7 @@ vhsub.s16 \r0, \d0, \d1 @ t3, t4, t8, t7 vhsub.s16 \r1, \d1, \d0 vhadd.s16 \d0, \d0, \d1 @ t1, t2, t6, t5 - vmov.i64 \d1, #0xffff<<32 + vmov.i64 \d1, #0xffff00000000 vbit \r0, \r1, \d1 vrev64.16 \r1, \r0 @ t7, t8, t4, t3 vtrn.32 \r0, \r1 @ t3, t4, t7, t8 @@ -75,9 +75,9 @@ .endm function fft4_neon - vld1.16 {d0-d1}, [r0,:128] + vld1.16 {d0-d1}, [r0] fft4 d0, d1, d2, d3 - vst1.16 {d0-d1}, [r0,:128] + vst1.16 {d0-d1}, [r0] bx lr endfunc diff --git a/libavcodec/arm/fft_neon.S b/libavcodec/arm/fft_neon.S index 1db7abd146..fd76edcd15 100644 --- a/libavcodec/arm/fft_neon.S +++ b/libavcodec/arm/fft_neon.S @@ -349,9 +349,7 @@ function ff_fft_permute_neon, export=1 pop {r4,pc} endfunc - .section .rodata - .align 4 -fft_tab_neon: +const fft_tab_neon .word fft4_neon .word fft8_neon .word fft16_neon @@ -367,8 +365,12 @@ fft_tab_neon: .word fft16384_neon .word fft32768_neon .word fft65536_neon -ELF .size fft_tab_neon, . - fft_tab_neon +endconst - .align 4 -pmmp: .float +1.0, -1.0, -1.0, +1.0 -mppm: .float -M_SQRT1_2, M_SQRT1_2, M_SQRT1_2, -M_SQRT1_2 +const pmmp, align=4 + .float +1.0, -1.0, -1.0, +1.0 +endconst + +const mppm, align=4 + .float -M_SQRT1_2, M_SQRT1_2, M_SQRT1_2, -M_SQRT1_2 +endconst diff --git a/libavcodec/arm/fmtconvert_neon.S b/libavcodec/arm/fmtconvert_neon.S index 359e57e40b..d1ad32ed27 100644 --- a/libavcodec/arm/fmtconvert_neon.S +++ b/libavcodec/arm/fmtconvert_neon.S @@ -71,6 +71,7 @@ endfunc function ff_float_to_int16_interleave_neon, export=1 cmp r3, #2 + itt lt ldrlt r1, [r1] blt ff_float_to_int16_neon bne 4f @@ -196,6 +197,7 @@ function ff_float_to_int16_interleave_neon, export=1 vst1.64 {d3}, [r8], ip vst1.64 {d7}, [r8], ip subs r3, r3, #4 + it eq popeq {r4-r8,pc} cmp r3, #4 add r0, r0, #8 @@ -305,6 +307,7 @@ function ff_float_to_int16_interleave_neon, export=1 vst1.32 {d23[1]}, [r8], ip 8: subs r3, r3, #2 add r0, r0, #4 + it eq popeq {r4-r8,pc} @ 1 channel @@ -354,6 +357,7 @@ function ff_float_to_int16_interleave_neon, export=1 vst1.16 {d2[3]}, [r5,:16], ip vst1.16 {d3[1]}, [r5,:16], ip vst1.16 {d3[3]}, [r5,:16], ip + it eq popeq {r4-r8,pc} vld1.64 {d0-d1}, [r4,:128]! vcvt.s32.f32 q0, q0, #16 diff --git a/libavcodec/arm/fmtconvert_vfp.S b/libavcodec/arm/fmtconvert_vfp.S index da2ef8c158..7e2eb83620 100644 --- a/libavcodec/arm/fmtconvert_vfp.S +++ b/libavcodec/arm/fmtconvert_vfp.S @@ -46,6 +46,7 @@ function ff_float_to_int16_vfp, export=1 vmov r5, r6, s2, s3 vmov r7, r8, s4, s5 vmov ip, lr, s6, s7 + it gt vldmiagt r1!, {s16-s23} ssat r4, #16, r4 ssat r3, #16, r3 @@ -53,10 +54,12 @@ function ff_float_to_int16_vfp, export=1 ssat r5, #16, r5 pkhbt r3, r3, r4, lsl #16 pkhbt r4, r5, r6, lsl #16 + itttt gt vcvtgt.s32.f32 s0, s16 vcvtgt.s32.f32 s1, s17 vcvtgt.s32.f32 s2, s18 vcvtgt.s32.f32 s3, s19 + itttt gt vcvtgt.s32.f32 s4, s20 vcvtgt.s32.f32 s5, s21 vcvtgt.s32.f32 s6, s22 diff --git a/libavcodec/arm/h264dsp_neon.S b/libavcodec/arm/h264dsp_neon.S index bd15ced736..338de6f643 100644 --- a/libavcodec/arm/h264dsp_neon.S +++ b/libavcodec/arm/h264dsp_neon.S @@ -71,7 +71,9 @@ function ff_\type\()_h264_chroma_mc8_neon, export=1 pld [r1] pld [r1, r2] - muls r7, r4, r5 +A muls r7, r4, r5 +T mul r7, r4, r5 +T cmp r7, #0 rsb r6, r7, r5, lsl #3 rsb ip, r7, r4, lsl #3 sub r4, r7, r4, lsl #3 @@ -197,7 +199,9 @@ function ff_\type\()_h264_chroma_mc4_neon, export=1 pld [r1] pld [r1, r2] - muls r7, r4, r5 +A muls r7, r4, r5 +T mul r7, r4, r5 +T cmp r7, #0 rsb r6, r7, r5, lsl #3 rsb ip, r7, r4, lsl #3 sub r4, r7, r4, lsl #3 @@ -368,10 +372,10 @@ function ff_\type\()_h264_chroma_mc2_neon, export=1 pop {r4-r6, pc} 2: .ifc \type,put - ldrh r5, [r1], r2 - strh r5, [r0], r2 - ldrh r6, [r1], r2 - strh r6, [r0], r2 + ldrh_post r5, r1, r2 + strh_post r5, r0, r2 + ldrh_post r6, r1, r2 + strh_post r6, r0, r2 .else vld1.16 {d16[0]}, [r1], r2 vld1.16 {d16[1]}, [r1], r2 @@ -404,28 +408,17 @@ endfunc ldr ip, [sp] tst r2, r2 ldr ip, [ip] + it ne tstne r3, r3 vmov.32 d24[0], ip and ip, ip, ip, lsl #16 + it eq bxeq lr ands ip, ip, ip, lsl #8 + it lt bxlt lr .endm - .macro align_push_regs - and ip, sp, #15 - add ip, ip, #32 - sub sp, sp, ip - vst1.64 {d12-d15}, [sp,:128] - sub sp, sp, #32 - vst1.64 {d8-d11}, [sp,:128] - .endm - - .macro align_pop_regs - vld1.64 {d8-d11}, [sp,:128]! - vld1.64 {d12-d15}, [sp,:128], ip - .endm - .macro h264_loop_filter_luma vdup.8 q11, r2 @ alpha vmovl.u8 q12, d24 @@ -506,7 +499,7 @@ function ff_h264_v_loop_filter_luma_neon, export=1 vld1.64 {d18,d19}, [r0,:128], r1 vld1.64 {d16,d17}, [r0,:128], r1 - align_push_regs + vpush {d8-d15} h264_loop_filter_luma @@ -516,7 +509,7 @@ function ff_h264_v_loop_filter_luma_neon, export=1 vst1.64 {d0, d1}, [r0,:128], r1 vst1.64 {d10,d11}, [r0,:128] - align_pop_regs + vpop {d8-d15} bx lr endfunc @@ -543,7 +536,7 @@ function ff_h264_h_loop_filter_luma_neon, export=1 transpose_8x8 q3, q10, q9, q8, q0, q1, q2, q13 - align_push_regs + vpush {d8-d15} h264_loop_filter_luma @@ -568,7 +561,7 @@ function ff_h264_h_loop_filter_luma_neon, export=1 vst1.32 {d1[1]}, [r0], r1 vst1.32 {d11[1]}, [r0], r1 - align_pop_regs + vpop {d8-d15} bx lr endfunc @@ -1116,6 +1109,7 @@ function \type\()_h264_qpel8_hv_lowpass_neon vrhadd.u8 d11, d11, d7 sub r0, r0, r2, lsl #3 .endif + vst1.64 {d12}, [r0,:64], r2 vst1.64 {d13}, [r0,:64], r2 vst1.64 {d14}, [r0,:64], r2 @@ -1263,7 +1257,9 @@ function ff_\type\()_h264_qpel8_mc11_neon, export=1 \type\()_h264_qpel8_mc11: lowpass_const r3 mov r11, sp - bic sp, sp, #15 +A bic sp, sp, #15 +T bic r0, r11, #15 +T mov sp, r0 sub sp, sp, #64 mov r0, sp sub r1, r1, #2 @@ -1271,14 +1267,14 @@ function ff_\type\()_h264_qpel8_mc11_neon, export=1 mov ip, #8 vpush {d8-d15} bl put_h264_qpel8_h_lowpass_neon - ldrd r0, [r11] + ldrd r0, [r11], #8 mov r3, r2 add ip, sp, #64 sub r1, r1, r2, lsl #1 mov r2, #8 bl \type\()_h264_qpel8_v_lowpass_l2_neon vpop {d8-d15} - add sp, r11, #8 + mov sp, r11 pop {r11, pc} endfunc @@ -1287,7 +1283,9 @@ function ff_\type\()_h264_qpel8_mc21_neon, export=1 \type\()_h264_qpel8_mc21: lowpass_const r3 mov r11, sp - bic sp, sp, #15 +A bic sp, sp, #15 +T bic r0, r11, #15 +T mov sp, r0 sub sp, sp, #(8*8+16*12) sub r1, r1, #2 mov r3, #8 @@ -1296,14 +1294,14 @@ function ff_\type\()_h264_qpel8_mc21_neon, export=1 vpush {d8-d15} bl put_h264_qpel8_h_lowpass_neon mov r4, r0 - ldrd r0, [r11] + ldrd r0, [r11], #8 sub r1, r1, r2, lsl #1 sub r1, r1, #2 mov r3, r2 sub r2, r4, #64 bl \type\()_h264_qpel8_hv_lowpass_l2_neon vpop {d8-d15} - add sp, r11, #8 + mov sp, r11 pop {r4, r10, r11, pc} endfunc @@ -1330,7 +1328,9 @@ function ff_\type\()_h264_qpel8_mc12_neon, export=1 \type\()_h264_qpel8_mc12: lowpass_const r3 mov r11, sp - bic sp, sp, #15 +A bic sp, sp, #15 +T bic r0, r11, #15 +T mov sp, r0 sub sp, sp, #(8*8+16*12) sub r1, r1, r2, lsl #1 mov r3, r2 @@ -1339,20 +1339,22 @@ function ff_\type\()_h264_qpel8_mc12_neon, export=1 vpush {d8-d15} bl put_h264_qpel8_v_lowpass_neon mov r4, r0 - ldrd r0, [r11] + ldrd r0, [r11], #8 sub r1, r1, r3, lsl #1 sub r1, r1, #2 sub r2, r4, #64 bl \type\()_h264_qpel8_hv_lowpass_l2_neon vpop {d8-d15} - add sp, r11, #8 + mov sp, r11 pop {r4, r10, r11, pc} endfunc function ff_\type\()_h264_qpel8_mc22_neon, export=1 push {r4, r10, r11, lr} mov r11, sp - bic sp, sp, #15 +A bic sp, sp, #15 +T bic r4, r11, #15 +T mov sp, r4 sub r1, r1, r2, lsl #1 sub r1, r1, #2 mov r3, r2 @@ -1441,21 +1443,23 @@ function ff_\type\()_h264_qpel16_mc11_neon, export=1 \type\()_h264_qpel16_mc11: lowpass_const r3 mov r11, sp - bic sp, sp, #15 +A bic sp, sp, #15 +T bic r0, r11, #15 +T mov sp, r0 sub sp, sp, #256 mov r0, sp sub r1, r1, #2 mov r3, #16 vpush {d8-d15} bl put_h264_qpel16_h_lowpass_neon - ldrd r0, [r11] + ldrd r0, [r11], #8 mov r3, r2 add ip, sp, #64 sub r1, r1, r2, lsl #1 mov r2, #16 bl \type\()_h264_qpel16_v_lowpass_l2_neon vpop {d8-d15} - add sp, r11, #8 + mov sp, r11 pop {r4, r11, pc} endfunc @@ -1464,20 +1468,22 @@ function ff_\type\()_h264_qpel16_mc21_neon, export=1 \type\()_h264_qpel16_mc21: lowpass_const r3 mov r11, sp - bic sp, sp, #15 +A bic sp, sp, #15 +T bic r0, r11, #15 +T mov sp, r0 sub sp, sp, #(16*16+16*12) sub r1, r1, #2 mov r0, sp vpush {d8-d15} bl put_h264_qpel16_h_lowpass_neon_packed mov r4, r0 - ldrd r0, [r11] + ldrd r0, [r11], #8 sub r1, r1, r2, lsl #1 sub r1, r1, #2 mov r3, r2 bl \type\()_h264_qpel16_hv_lowpass_l2_neon vpop {d8-d15} - add sp, r11, #8 + mov sp, r11 pop {r4-r5, r9-r11, pc} endfunc @@ -1504,7 +1510,9 @@ function ff_\type\()_h264_qpel16_mc12_neon, export=1 \type\()_h264_qpel16_mc12: lowpass_const r3 mov r11, sp - bic sp, sp, #15 +A bic sp, sp, #15 +T bic r0, r11, #15 +T mov sp, r0 sub sp, sp, #(16*16+16*12) sub r1, r1, r2, lsl #1 mov r0, sp @@ -1512,13 +1520,13 @@ function ff_\type\()_h264_qpel16_mc12_neon, export=1 vpush {d8-d15} bl put_h264_qpel16_v_lowpass_neon_packed mov r4, r0 - ldrd r0, [r11] + ldrd r0, [r11], #8 sub r1, r1, r3, lsl #1 sub r1, r1, #2 mov r2, r3 bl \type\()_h264_qpel16_hv_lowpass_l2_neon vpop {d8-d15} - add sp, r11, #8 + mov sp, r11 pop {r4-r5, r9-r11, pc} endfunc @@ -1526,7 +1534,9 @@ function ff_\type\()_h264_qpel16_mc22_neon, export=1 push {r4, r9-r11, lr} lowpass_const r3 mov r11, sp - bic sp, sp, #15 +A bic sp, sp, #15 +T bic r4, r11, #15 +T mov sp, r4 sub r1, r1, r2, lsl #1 sub r1, r1, #2 mov r3, r2 diff --git a/libavcodec/arm/h264idct_neon.S b/libavcodec/arm/h264idct_neon.S index afd3718518..8cf9bd8b66 100644 --- a/libavcodec/arm/h264idct_neon.S +++ b/libavcodec/arm/h264idct_neon.S @@ -106,10 +106,12 @@ function ff_h264_idct_add16_neon, export=1 blt 2f ldrsh lr, [r1] add r0, r0, r4 + it ne movne lr, #0 cmp lr, #0 - adrne lr, ff_h264_idct_dc_add_neon - adreq lr, ff_h264_idct_add_neon + ite ne + adrne lr, ff_h264_idct_dc_add_neon + CONFIG_THUMB + adreq lr, ff_h264_idct_add_neon + CONFIG_THUMB blx lr 2: subs ip, ip, #1 add r1, r1, #32 @@ -132,8 +134,9 @@ function ff_h264_idct_add16intra_neon, export=1 add r0, r0, r4 cmp r8, #0 ldrsh r8, [r1] - adrne lr, ff_h264_idct_add_neon - adreq lr, ff_h264_idct_dc_add_neon + iteet ne + adrne lr, ff_h264_idct_add_neon + CONFIG_THUMB + adreq lr, ff_h264_idct_dc_add_neon + CONFIG_THUMB cmpeq r8, #0 blxne lr subs ip, ip, #1 @@ -159,12 +162,14 @@ function ff_h264_idct_add8_neon, export=1 add r1, r3, r12, lsl #5 cmp r8, #0 ldrsh r8, [r1] - adrne lr, ff_h264_idct_add_neon - adreq lr, ff_h264_idct_dc_add_neon + iteet ne + adrne lr, ff_h264_idct_add_neon + CONFIG_THUMB + adreq lr, ff_h264_idct_dc_add_neon + CONFIG_THUMB cmpeq r8, #0 blxne lr add r12, r12, #1 cmp r12, #4 + itt eq moveq r12, #16 moveq r4, r9 cmp r12, #20 @@ -365,10 +370,12 @@ function ff_h264_idct8_add4_neon, export=1 blt 2f ldrsh lr, [r1] add r0, r0, r4 + it ne movne lr, #0 cmp lr, #0 - adrne lr, ff_h264_idct8_dc_add_neon - adreq lr, ff_h264_idct8_add_neon + ite ne + adrne lr, ff_h264_idct8_dc_add_neon + CONFIG_THUMB + adreq lr, ff_h264_idct8_add_neon + CONFIG_THUMB blx lr 2: subs r12, r12, #4 add r1, r1, #128 @@ -376,8 +383,8 @@ function ff_h264_idct8_add4_neon, export=1 pop {r4-r8,pc} endfunc - .section .rodata -scan8: .byte 4+ 1*8, 5+ 1*8, 4+ 2*8, 5+ 2*8 +const scan8 + .byte 4+ 1*8, 5+ 1*8, 4+ 2*8, 5+ 2*8 .byte 6+ 1*8, 7+ 1*8, 6+ 2*8, 7+ 2*8 .byte 4+ 3*8, 5+ 3*8, 4+ 4*8, 5+ 4*8 .byte 6+ 3*8, 7+ 3*8, 6+ 4*8, 7+ 4*8 @@ -389,3 +396,4 @@ scan8: .byte 4+ 1*8, 5+ 1*8, 4+ 2*8, 5+ 2*8 .byte 6+11*8, 7+11*8, 6+12*8, 7+12*8 .byte 4+13*8, 5+13*8, 4+14*8, 5+14*8 .byte 6+13*8, 7+13*8, 6+14*8, 7+14*8 +endconst diff --git a/libavcodec/arm/h264pred_neon.S b/libavcodec/arm/h264pred_neon.S index 63c96ee725..0dac20b4a0 100644 --- a/libavcodec/arm/h264pred_neon.S +++ b/libavcodec/arm/h264pred_neon.S @@ -166,12 +166,9 @@ function ff_pred16x16_plane_neon, export=1 bx lr endfunc - .section .rodata - .align 4 -p16weight: +const p16weight, align=4 .short 1,2,3,4,5,6,7,8 - - .text +endconst function ff_pred8x8_hor_neon, export=1 sub r2, r0, #1 diff --git a/libavcodec/arm/mathops.h b/libavcodec/arm/mathops.h index 299a973cb6..d67714c496 100644 --- a/libavcodec/arm/mathops.h +++ b/libavcodec/arm/mathops.h @@ -64,11 +64,14 @@ static inline av_const int mid_pred(int a, int b, int c) __asm__ ( "mov %0, %2 \n\t" "cmp %1, %2 \n\t" + "itt gt \n\t" "movgt %0, %1 \n\t" "movgt %1, %2 \n\t" "cmp %1, %3 \n\t" + "it le \n\t" "movle %1, %3 \n\t" "cmp %0, %1 \n\t" + "it gt \n\t" "movgt %0, %1 \n\t" : "=&r"(m), "+r"(a) : "r"(b), "r"(c) diff --git a/libavcodec/arm/mdct_neon.S b/libavcodec/arm/mdct_neon.S index fcf802275f..2def704497 100644 --- a/libavcodec/arm/mdct_neon.S +++ b/libavcodec/arm/mdct_neon.S @@ -191,7 +191,9 @@ function ff_mdct_calc_neon, export=1 vadd.f32 d17, d17, d3 @ in2u+in1d -I 1: vmul.f32 d7, d0, d21 @ I*s - ldr r10, [r3, lr, lsr #1] +A ldr r10, [r3, lr, lsr #1] +T lsr r10, lr, #1 +T ldr r10, [r3, r10] vmul.f32 d6, d1, d20 @ -R*c ldr r6, [r3, #4]! vmul.f32 d4, d1, d21 @ -R*s diff --git a/libavcodec/arm/mpegaudiodsp_fixed_armv6.S b/libavcodec/arm/mpegaudiodsp_fixed_armv6.S index 9ec731480b..b517b973e7 100644 --- a/libavcodec/arm/mpegaudiodsp_fixed_armv6.S +++ b/libavcodec/arm/mpegaudiodsp_fixed_armv6.S @@ -75,7 +75,7 @@ function ff_mpadsp_apply_window_fixed_armv6, export=1 sum8 r8, r9, r1, r0, r10, r11, r12, lr sum8 r8, r9, r1, r2, r10, r11, r12, lr, rsb, 32 round r10, r8, r9 - strh r10, [r3], r4 + strh_post r10, r3, r4 mov lr, #15 1: @@ -127,10 +127,10 @@ function ff_mpadsp_apply_window_fixed_armv6, export=1 round r10, r8, r9 adds r8, r8, r4 adc r9, r9, r7 - strh r10, [r3], r12 + strh_post r10, r3, r12 round r11, r8, r9 subs lr, lr, #1 - strh r11, [r5], -r12 + strh_dpost r11, r5, r12 bgt 1b sum8 r8, r9, r1, r0, r10, r11, r12, lr, rsb, 33 diff --git a/libavcodec/arm/mpegvideo_armv5te_s.S b/libavcodec/arm/mpegvideo_armv5te_s.S index 82095ab15d..3db9c734e9 100644 --- a/libavcodec/arm/mpegvideo_armv5te_s.S +++ b/libavcodec/arm/mpegvideo_armv5te_s.S @@ -38,15 +38,21 @@ .macro dequant_t dst, src, mul, add, tmp rsbs \tmp, ip, \src, asr #16 + it gt addgt \tmp, \add, #0 + it lt rsblt \tmp, \add, #0 + it ne smlatbne \dst, \src, \mul, \tmp .endm .macro dequant_b dst, src, mul, add, tmp rsbs \tmp, ip, \src, lsl #16 + it gt addgt \tmp, \add, #0 + it lt rsblt \tmp, \add, #0 + it ne smlabbne \dst, \src, \mul, \tmp .endm @@ -80,21 +86,27 @@ function ff_dct_unquantize_h263_armv5te, export=1 strh lr, [r0], #2 subs r3, r3, #8 + it gt ldrdgt r4, [r0, #0] /* load data early to avoid load/use pipeline stall */ bgt 1b adds r3, r3, #2 + it le pople {r4-r9,pc} 2: ldrsh r9, [r0, #0] ldrsh lr, [r0, #2] mov r8, r2 cmp r9, #0 + it lt rsblt r8, r2, #0 + it ne smlabbne r9, r9, r1, r8 mov r8, r2 cmp lr, #0 + it lt rsblt r8, r2, #0 + it ne smlabbne lr, lr, r1, r8 strh r9, [r0], #2 strh lr, [r0], #2 diff --git a/libavcodec/arm/mpegvideo_iwmmxt.c b/libavcodec/arm/mpegvideo_iwmmxt.c index 0a288630ab..bb47c0d321 100644 --- a/libavcodec/arm/mpegvideo_iwmmxt.c +++ b/libavcodec/arm/mpegvideo_iwmmxt.c @@ -93,29 +93,9 @@ static void dct_unquantize_h263_intra_iwmmxt(MpegEncContext *s, block_orig[0] = level; } -#if 0 -static void dct_unquantize_h263_inter_iwmmxt(MpegEncContext *s, - DCTELEM *block, int n, int qscale) -{ - int nCoeffs; - - assert(s->block_last_index[n]>=0); - - if(s->ac_pred) - nCoeffs=63; - else - nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ]; - - ippiQuantInvInter_Compact_H263_16s_I(block, nCoeffs+1, qscale); -} -#endif - void MPV_common_init_iwmmxt(MpegEncContext *s) { if (!(mm_flags & AV_CPU_FLAG_IWMMXT)) return; s->dct_unquantize_h263_intra = dct_unquantize_h263_intra_iwmmxt; -#if 0 - s->dct_unquantize_h263_inter = dct_unquantize_h263_inter_iwmmxt; -#endif } diff --git a/libavcodec/arm/mpegvideo_neon.S b/libavcodec/arm/mpegvideo_neon.S index b695fb7c22..849047e13c 100644 --- a/libavcodec/arm/mpegvideo_neon.S +++ b/libavcodec/arm/mpegvideo_neon.S @@ -57,6 +57,7 @@ function ff_dct_unquantize_h263_neon, export=1 subs r3, r3, #16 vst1.16 {q0}, [r1,:128]! vst1.16 {q8}, [r1,:128]! + it le bxle lr cmp r3, #8 bgt 1b @@ -78,6 +79,7 @@ function ff_dct_unquantize_h263_intra_neon, export=1 ldr r6, [r0, #AC_PRED] add lr, r0, #INTER_SCANTAB_RASTER_END cmp r6, #0 + it ne movne r12, #63 bne 1f ldr r12, [r12, r2, lsl #2] @@ -86,9 +88,11 @@ function ff_dct_unquantize_h263_intra_neon, export=1 ldrsh r4, [r1] cmp r5, #0 mov r5, r1 + it ne movne r2, #0 bne 2f cmp r2, #4 + it ge addge r0, r0, #4 sub r2, r3, #1 ldr r6, [r0, #Y_DC_SCALE] diff --git a/libavcodec/arm/rdft_neon.S b/libavcodec/arm/rdft_neon.S index 4f8a1032cc..19886e6d0b 100644 --- a/libavcodec/arm/rdft_neon.S +++ b/libavcodec/arm/rdft_neon.S @@ -137,6 +137,7 @@ function ff_rdft_calc_neon, export=1 vst1.32 {d22}, [r5,:64] cmp r6, #0 + it eq popeq {r4-r8,pc} vmul.f32 d22, d22, d18 diff --git a/libavcodec/arm/simple_idct_arm.S b/libavcodec/arm/simple_idct_arm.S index ecb83d23ad..990dde6ff7 100644 --- a/libavcodec/arm/simple_idct_arm.S +++ b/libavcodec/arm/simple_idct_arm.S @@ -121,11 +121,13 @@ __b_evaluation: ldr r11, [r12, #offW7] @ R11=W7 mul r5, r10, r7 @ R5=W5*ROWr16[1]=b2 (ROWr16[1] must be the second arg, to have the possibility to save 1 cycle) mul r7, r11, r7 @ R7=W7*ROWr16[1]=b3 (ROWr16[1] must be the second arg, to have the possibility to save 1 cycle) - teq r2, #0 @ if null avoid muls - mlane r0, r9, r2, r0 @ R0+=W3*ROWr16[3]=b0 (ROWr16[3] must be the second arg, to have the possibility to save 1 cycle) + teq r2, #0 @ if null avoid muls + itttt ne + mlane r0, r9, r2, r0 @ R0+=W3*ROWr16[3]=b0 (ROWr16[3] must be the second arg, to have the possibility to save 1 cycle) rsbne r2, r2, #0 @ R2=-ROWr16[3] mlane r1, r11, r2, r1 @ R1-=W7*ROWr16[3]=b1 (ROWr16[3] must be the second arg, to have the possibility to save 1 cycle) mlane r5, r8, r2, r5 @ R5-=W1*ROWr16[3]=b2 (ROWr16[3] must be the second arg, to have the possibility to save 1 cycle) + it ne mlane r7, r10, r2, r7 @ R7-=W5*ROWr16[3]=b3 (ROWr16[3] must be the second arg, to have the possibility to save 1 cycle) @@ at this point, R0=b0, R1=b1, R2 (free), R3=ROWr32[2], R4=ROWr32[3], @@ -148,19 +150,23 @@ __b_evaluation: @@ MAC16(b3, -W1, row[7]); @@ MAC16(b1, -W5, row[7]); mov r3, r3, asr #16 @ R3=ROWr16[5] - teq r3, #0 @ if null avoid muls + teq r3, #0 @ if null avoid muls + it ne mlane r0, r10, r3, r0 @ R0+=W5*ROWr16[5]=b0 mov r4, r4, asr #16 @ R4=ROWr16[7] + itttt ne mlane r5, r11, r3, r5 @ R5+=W7*ROWr16[5]=b2 mlane r7, r9, r3, r7 @ R7+=W3*ROWr16[5]=b3 rsbne r3, r3, #0 @ R3=-ROWr16[5] mlane r1, r8, r3, r1 @ R7-=W1*ROWr16[5]=b1 @@ R3 is free now - teq r4, #0 @ if null avoid muls + teq r4, #0 @ if null avoid muls + itttt ne mlane r0, r11, r4, r0 @ R0+=W7*ROWr16[7]=b0 mlane r5, r9, r4, r5 @ R5+=W3*ROWr16[7]=b2 rsbne r4, r4, #0 @ R4=-ROWr16[7] mlane r7, r8, r4, r7 @ R7-=W1*ROWr16[7]=b3 + it ne mlane r1, r10, r4, r1 @ R1-=W5*ROWr16[7]=b1 @@ R4 is free now __end_b_evaluation: @@ -204,16 +210,19 @@ __a_evaluation: @@ a2 -= W4*row[4] @@ a3 += W4*row[4] ldrsh r11, [r14, #8] @ R11=ROWr16[4] - teq r11, #0 @ if null avoid muls + teq r11, #0 @ if null avoid muls + it ne mulne r11, r9, r11 @ R11=W4*ROWr16[4] @@ R9 is free now ldrsh r9, [r14, #12] @ R9=ROWr16[6] + itttt ne addne r6, r6, r11 @ R6+=W4*ROWr16[4] (a0) subne r2, r2, r11 @ R2-=W4*ROWr16[4] (a1) subne r3, r3, r11 @ R3-=W4*ROWr16[4] (a2) addne r4, r4, r11 @ R4+=W4*ROWr16[4] (a3) @@ W6 alone is no more useful, save W2*ROWr16[6] in it instead - teq r9, #0 @ if null avoid muls + teq r9, #0 @ if null avoid muls + itttt ne mulne r11, r10, r9 @ R11=W6*ROWr16[6] addne r6, r6, r11 @ R6+=W6*ROWr16[6] (a0) mulne r10, r8, r9 @ R10=W2*ROWr16[6] @@ -222,6 +231,7 @@ __a_evaluation: @@ a1 -= W2*row[6]; @@ a2 += W2*row[6]; subne r4, r4, r11 @ R4-=W6*ROWr16[6] (a3) + itt ne subne r2, r2, r10 @ R2-=W2*ROWr16[6] (a1) addne r3, r3, r10 @ R3+=W2*ROWr16[6] (a2) @@ -323,10 +333,12 @@ __b_evaluation2: ldrsh r2, [r14, #48] mul r7, r11, r7 @ R7=W7*ROWr16[1]=b3 (ROWr16[1] must be the second arg, to have the possibility to save 1 cycle) teq r2, #0 @ if 0, then avoid muls + itttt ne mlane r0, r9, r2, r0 @ R0+=W3*ROWr16[3]=b0 (ROWr16[3] must be the second arg, to have the possibility to save 1 cycle) rsbne r2, r2, #0 @ R2=-ROWr16[3] mlane r1, r11, r2, r1 @ R1-=W7*ROWr16[3]=b1 (ROWr16[3] must be the second arg, to have the possibility to save 1 cycle) mlane r5, r8, r2, r5 @ R5-=W1*ROWr16[3]=b2 (ROWr16[3] must be the second arg, to have the possibility to save 1 cycle) + it ne mlane r7, r10, r2, r7 @ R7-=W5*ROWr16[3]=b3 (ROWr16[3] must be the second arg, to have the possibility to save 1 cycle) @@ at this point, R0=b0, R1=b1, R2 (free), R3 (free), R4 (free), @@ -342,18 +354,22 @@ __b_evaluation2: @@ MAC16(b1, -W5, col[7x8]); ldrsh r3, [r14, #80] @ R3=COLr16[5x8] teq r3, #0 @ if 0 then avoid muls + itttt ne mlane r0, r10, r3, r0 @ R0+=W5*ROWr16[5x8]=b0 mlane r5, r11, r3, r5 @ R5+=W7*ROWr16[5x8]=b2 mlane r7, r9, r3, r7 @ R7+=W3*ROWr16[5x8]=b3 rsbne r3, r3, #0 @ R3=-ROWr16[5x8] ldrsh r4, [r14, #112] @ R4=COLr16[7x8] + it ne mlane r1, r8, r3, r1 @ R7-=W1*ROWr16[5x8]=b1 @@ R3 is free now teq r4, #0 @ if 0 then avoid muls + itttt ne mlane r0, r11, r4, r0 @ R0+=W7*ROWr16[7x8]=b0 mlane r5, r9, r4, r5 @ R5+=W3*ROWr16[7x8]=b2 rsbne r4, r4, #0 @ R4=-ROWr16[7x8] mlane r7, r8, r4, r7 @ R7-=W1*ROWr16[7x8]=b3 + it ne mlane r1, r10, r4, r1 @ R1-=W5*ROWr16[7x8]=b1 @@ R4 is free now __end_b_evaluation2: @@ -390,15 +406,18 @@ __a_evaluation2: @@ a3 += W4*row[4] ldrsh r11, [r14, #64] @ R11=ROWr16[4] teq r11, #0 @ if null avoid muls + itttt ne mulne r11, r9, r11 @ R11=W4*ROWr16[4] @@ R9 is free now addne r6, r6, r11 @ R6+=W4*ROWr16[4] (a0) subne r2, r2, r11 @ R2-=W4*ROWr16[4] (a1) subne r3, r3, r11 @ R3-=W4*ROWr16[4] (a2) ldrsh r9, [r14, #96] @ R9=ROWr16[6] + it ne addne r4, r4, r11 @ R4+=W4*ROWr16[4] (a3) @@ W6 alone is no more useful, save W2*ROWr16[6] in it instead teq r9, #0 @ if null avoid muls + itttt ne mulne r11, r10, r9 @ R11=W6*ROWr16[6] addne r6, r6, r11 @ R6+=W6*ROWr16[6] (a0) mulne r10, r8, r9 @ R10=W2*ROWr16[6] @@ -407,6 +426,7 @@ __a_evaluation2: @@ a1 -= W2*row[6]; @@ a2 += W2*row[6]; subne r4, r4, r11 @ R4-=W6*ROWr16[6] (a3) + itt ne subne r2, r2, r10 @ R2-=W2*ROWr16[6] (a1) addne r3, r3, r10 @ R3+=W2*ROWr16[6] (a2) __end_a_evaluation2: diff --git a/libavcodec/arm/simple_idct_armv5te.S b/libavcodec/arm/simple_idct_armv5te.S index 3c4b5c06d1..71727ceccc 100644 --- a/libavcodec/arm/simple_idct_armv5te.S +++ b/libavcodec/arm/simple_idct_armv5te.S @@ -49,6 +49,7 @@ function idct_row_armv5te ldrd v1, [a1, #8] ldrd a3, [a1] /* a3 = row[1:0], a4 = row[3:2] */ orrs v1, v1, v2 + itt eq cmpeq v1, a4 cmpeq v1, a3, lsr #16 beq row_dc_only @@ -269,6 +270,7 @@ function idct_col_armv5te ldmfd sp!, {a3, a4} adds a2, a3, v1 mov a2, a2, lsr #20 + it mi orrmi a2, a2, #0xf000 add ip, a4, v2 mov ip, ip, asr #20 @@ -276,6 +278,7 @@ function idct_col_armv5te str a2, [a1] subs a3, a3, v1 mov a2, a3, lsr #20 + it mi orrmi a2, a2, #0xf000 sub a4, a4, v2 mov a4, a4, asr #20 @@ -285,6 +288,7 @@ function idct_col_armv5te subs a2, a3, v3 mov a2, a2, lsr #20 + it mi orrmi a2, a2, #0xf000 sub ip, a4, v4 mov ip, ip, asr #20 @@ -292,6 +296,7 @@ function idct_col_armv5te str a2, [a1, #(16*1)] adds a3, a3, v3 mov a2, a3, lsr #20 + it mi orrmi a2, a2, #0xf000 add a4, a4, v4 mov a4, a4, asr #20 @@ -301,6 +306,7 @@ function idct_col_armv5te adds a2, a3, v5 mov a2, a2, lsr #20 + it mi orrmi a2, a2, #0xf000 add ip, a4, v6 mov ip, ip, asr #20 @@ -308,6 +314,7 @@ function idct_col_armv5te str a2, [a1, #(16*2)] subs a3, a3, v5 mov a2, a3, lsr #20 + it mi orrmi a2, a2, #0xf000 sub a4, a4, v6 mov a4, a4, asr #20 @@ -317,6 +324,7 @@ function idct_col_armv5te adds a2, a3, v7 mov a2, a2, lsr #20 + it mi orrmi a2, a2, #0xf000 add ip, a4, fp mov ip, ip, asr #20 @@ -324,6 +332,7 @@ function idct_col_armv5te str a2, [a1, #(16*3)] subs a3, a3, v7 mov a2, a3, lsr #20 + it mi orrmi a2, a2, #0xf000 sub a4, a4, fp mov a4, a4, asr #20 @@ -335,15 +344,19 @@ endfunc .macro clip dst, src:vararg movs \dst, \src + it mi movmi \dst, #0 cmp \dst, #255 + it gt movgt \dst, #255 .endm .macro aclip dst, src:vararg adds \dst, \src + it mi movmi \dst, #0 cmp \dst, #255 + it gt movgt \dst, #255 .endm @@ -370,35 +383,35 @@ function idct_col_put_armv5te orr a2, a3, a4, lsl #8 rsb v2, lr, lr, lsl #3 ldmfd sp!, {a3, a4} - strh a2, [v2, v1]! + strh_pre a2, v2, v1 sub a2, a3, v3 clip a2, a2, asr #20 sub ip, a4, v4 clip ip, ip, asr #20 orr a2, a2, ip, lsl #8 - strh a2, [v1, lr]! + strh_pre a2, v1, lr add a3, a3, v3 clip a2, a3, asr #20 add a4, a4, v4 clip a4, a4, asr #20 orr a2, a2, a4, lsl #8 ldmfd sp!, {a3, a4} - strh a2, [v2, -lr]! + strh_dpre a2, v2, lr add a2, a3, v5 clip a2, a2, asr #20 add ip, a4, v6 clip ip, ip, asr #20 orr a2, a2, ip, lsl #8 - strh a2, [v1, lr]! + strh_pre a2, v1, lr sub a3, a3, v5 clip a2, a3, asr #20 sub a4, a4, v6 clip a4, a4, asr #20 orr a2, a2, a4, lsl #8 ldmfd sp!, {a3, a4} - strh a2, [v2, -lr]! + strh_dpre a2, v2, lr add a2, a3, v7 clip a2, a2, asr #20 @@ -411,7 +424,7 @@ function idct_col_put_armv5te sub a4, a4, fp clip a4, a4, asr #20 orr a2, a2, a4, lsl #8 - strh a2, [v2, -lr] + strh_dpre a2, v2, lr ldr pc, [sp], #4 endfunc @@ -436,7 +449,7 @@ function idct_col_add_armv5te ldr v1, [sp, #32] sub a4, a4, v2 rsb v2, v1, v1, lsl #3 - ldrh ip, [v2, lr]! + ldrh_pre ip, v2, lr strh a2, [lr] and a2, ip, #255 aclip a3, a2, a3, asr #20 @@ -448,7 +461,7 @@ function idct_col_add_armv5te strh a2, [v2] ldmfd sp!, {a3, a4} - ldrh ip, [lr, v1]! + ldrh_pre ip, lr, v1 sub a2, a3, v3 add a3, a3, v3 and v3, ip, #255 @@ -458,7 +471,7 @@ function idct_col_add_armv5te aclip v3, v3, ip, lsr #8 orr a2, a2, v3, lsl #8 add a4, a4, v4 - ldrh ip, [v2, -v1]! + ldrh_dpre ip, v2, v1 strh a2, [lr] and a2, ip, #255 aclip a3, a2, a3, asr #20 @@ -468,7 +481,7 @@ function idct_col_add_armv5te strh a2, [v2] ldmfd sp!, {a3, a4} - ldrh ip, [lr, v1]! + ldrh_pre ip, lr, v1 add a2, a3, v5 sub a3, a3, v5 and v3, ip, #255 @@ -478,7 +491,7 @@ function idct_col_add_armv5te aclip v3, v3, ip, lsr #8 orr a2, a2, v3, lsl #8 sub a4, a4, v6 - ldrh ip, [v2, -v1]! + ldrh_dpre ip, v2, v1 strh a2, [lr] and a2, ip, #255 aclip a3, a2, a3, asr #20 @@ -488,7 +501,7 @@ function idct_col_add_armv5te strh a2, [v2] ldmfd sp!, {a3, a4} - ldrh ip, [lr, v1]! + ldrh_pre ip, lr, v1 add a2, a3, v7 sub a3, a3, v7 and v3, ip, #255 @@ -498,7 +511,7 @@ function idct_col_add_armv5te aclip v3, v3, ip, lsr #8 orr a2, a2, v3, lsl #8 sub a4, a4, fp - ldrh ip, [v2, -v1]! + ldrh_dpre ip, v2, v1 strh a2, [lr] and a2, ip, #255 aclip a3, a2, a3, asr #20 diff --git a/libavcodec/arm/simple_idct_armv6.S b/libavcodec/arm/simple_idct_armv6.S index d61c1fd3ea..a176b3a7b4 100644 --- a/libavcodec/arm/simple_idct_armv6.S +++ b/libavcodec/arm/simple_idct_armv6.S @@ -200,6 +200,7 @@ function idct_row_armv6 ldr r3, [r0, #8] /* r3 = row[3,1] */ ldr r2, [r0] /* r2 = row[2,0] */ orrs lr, lr, ip + itt eq cmpeq lr, r3 cmpeq lr, r2, lsr #16 beq 1f @@ -282,14 +283,14 @@ function idct_col_put_armv6 pop {r1, r2} idct_finish_shift_sat COL_SHIFT - strb r4, [r1], r2 - strb r5, [r1], r2 - strb r6, [r1], r2 - strb r7, [r1], r2 - strb r11,[r1], r2 - strb r10,[r1], r2 - strb r9, [r1], r2 - strb r8, [r1], r2 + strb_post r4, r1, r2 + strb_post r5, r1, r2 + strb_post r6, r1, r2 + strb_post r7, r1, r2 + strb_post r11,r1, r2 + strb_post r10,r1, r2 + strb_post r9, r1, r2 + strb_post r8, r1, r2 sub r1, r1, r2, lsl #3 @@ -318,16 +319,16 @@ function idct_col_add_armv6 add ip, r3, ip, asr #COL_SHIFT usat ip, #8, ip add r4, r7, r4, asr #COL_SHIFT - strb ip, [r1], r2 + strb_post ip, r1, r2 ldrb ip, [r1, r2] usat r4, #8, r4 ldrb r11,[r1, r2, lsl #2] add r5, ip, r5, asr #COL_SHIFT usat r5, #8, r5 - strb r4, [r1], r2 + strb_post r4, r1, r2 ldrb r3, [r1, r2] ldrb ip, [r1, r2, lsl #2] - strb r5, [r1], r2 + strb_post r5, r1, r2 ldrb r7, [r1, r2] ldrb r4, [r1, r2, lsl #2] add r6, r3, r6, asr #COL_SHIFT @@ -340,11 +341,11 @@ function idct_col_add_armv6 usat r8, #8, r8 add lr, r4, lr, asr #COL_SHIFT usat lr, #8, lr - strb r6, [r1], r2 - strb r10,[r1], r2 - strb r9, [r1], r2 - strb r8, [r1], r2 - strb lr, [r1], r2 + strb_post r6, r1, r2 + strb_post r10,r1, r2 + strb_post r9, r1, r2 + strb_post r8, r1, r2 + strb_post lr, r1, r2 sub r1, r1, r2, lsl #3 diff --git a/libavcodec/arm/simple_idct_neon.S b/libavcodec/arm/simple_idct_neon.S index 17cde5835a..5df8f6e4fc 100644 --- a/libavcodec/arm/simple_idct_neon.S +++ b/libavcodec/arm/simple_idct_neon.S @@ -71,7 +71,7 @@ function idct_row4_pld_neon add r3, r0, r1, lsl #2 pld [r0, r1] pld [r0, r1, lsl #1] - pld [r3, -r1] +A pld [r3, -r1] pld [r3] pld [r3, r1] add r3, r3, r1, lsl #1 @@ -164,6 +164,7 @@ function idct_col4_neon orrs r4, r4, r5 idct_col4_top + it eq addeq r2, r2, #16 beq 1f @@ -176,6 +177,7 @@ function idct_col4_neon 1: orrs r6, r6, r7 ldrd r4, [r2, #16] + it eq addeq r2, r2, #16 beq 2f @@ -187,6 +189,7 @@ function idct_col4_neon 2: orrs r4, r4, r5 ldrd r4, [r2, #16] + it eq addeq r2, r2, #16 beq 3f @@ -199,6 +202,7 @@ function idct_col4_neon vadd.i32 q13, q13, q8 3: orrs r4, r4, r5 + it eq addeq r2, r2, #16 beq 4f @@ -239,10 +243,9 @@ function idct_col4_st8_neon bx lr endfunc - .section .rodata - .align 4 -idct_coeff_neon: +const idct_coeff_neon, align=4 .short W1, W2, W3, W4, W5, W6, W7, W4c +endconst .macro idct_start data push {r4-r7, lr} diff --git a/libavcodec/arm/synth_filter_neon.S b/libavcodec/arm/synth_filter_neon.S index 1464abe562..3f91d67506 100644 --- a/libavcodec/arm/synth_filter_neon.S +++ b/libavcodec/arm/synth_filter_neon.S @@ -100,9 +100,11 @@ NOVFP vldr s0, [sp, #12*4] @ scale vst1.32 {q9}, [r2,:128] subs r1, r1, #1 + it eq popeq {r4-r11,pc} cmp r4, #0 + itt eq subeq r8, r8, #512*4 subeq r9, r9, #512*4 sub r5, r5, #512*4 diff --git a/libavcodec/arm/vp3dsp_neon.S b/libavcodec/arm/vp3dsp_neon.S index d97ed3d21d..ae3e40201a 100644 --- a/libavcodec/arm/vp3dsp_neon.S +++ b/libavcodec/arm/vp3dsp_neon.S @@ -20,11 +20,9 @@ #include "asm.S" -.section .rodata -.align 4 - -vp3_idct_constants: +const vp3_idct_constants, align=4 .short 64277, 60547, 54491, 46341, 36410, 25080, 12785 +endconst #define xC1S7 d0[0] #define xC2S6 d0[1] @@ -34,8 +32,6 @@ vp3_idct_constants: #define xC6S2 d1[1] #define xC7S1 d1[2] -.text - .macro vp3_loop_filter vsubl.u8 q3, d18, d17 vsubl.u8 q2, d16, d19 diff --git a/libavcodec/arm/vp56_arith.h b/libavcodec/arm/vp56_arith.h index cd02579e5b..ece9ac2a6c 100644 --- a/libavcodec/arm/vp56_arith.h +++ b/libavcodec/arm/vp56_arith.h @@ -21,6 +21,14 @@ #ifndef AVCODEC_ARM_VP56_ARITH_H #define AVCODEC_ARM_VP56_ARITH_H +#if CONFIG_THUMB +# define A(x) +# define T(x) x +#else +# define A(x) x +# define T(x) +#endif + #if HAVE_ARMV6 && HAVE_INLINE_ASM #define vp56_rac_get_prob vp56_rac_get_prob_armv6 @@ -32,15 +40,21 @@ static inline int vp56_rac_get_prob_armv6(VP56RangeCoder *c, int pr) unsigned bit; __asm__ ("adds %3, %3, %0 \n" + "itt cs \n" "cmpcs %7, %4 \n" - "ldrcsh %2, [%4], #2 \n" + A("ldrcsh %2, [%4], #2 \n") + T("ldrhcs %2, [%4], #2 \n") "rsb %0, %6, #256 \n" "smlabb %0, %5, %6, %0 \n" + T("itttt cs \n") "rev16cs %2, %2 \n" - "orrcs %1, %1, %2, lsl %3 \n" + T("lslcs %2, %2, %3 \n") + T("orrcs %1, %1, %2 \n") + A("orrcs %1, %1, %2, lsl %3 \n") "subcs %3, %3, #16 \n" "lsr %0, %0, #8 \n" "cmp %1, %0, lsl #16 \n" + "ittte ge \n" "subge %1, %1, %0, lsl #16 \n" "subge %0, %5, %0 \n" "movge %2, #1 \n" @@ -64,12 +78,17 @@ static inline int vp56_rac_get_prob_branchy_armv6(VP56RangeCoder *c, int pr) unsigned tmp; __asm__ ("adds %3, %3, %0 \n" + "itt cs \n" "cmpcs %7, %4 \n" - "ldrcsh %2, [%4], #2 \n" + A("ldrcsh %2, [%4], #2 \n") + T("ldrhcs %2, [%4], #2 \n") "rsb %0, %6, #256 \n" "smlabb %0, %5, %6, %0 \n" + T("itttt cs \n") "rev16cs %2, %2 \n" - "orrcs %1, %1, %2, lsl %3 \n" + T("lslcs %2, %2, %3 \n") + T("orrcs %1, %1, %2 \n") + A("orrcs %1, %1, %2, lsl %3 \n") "subcs %3, %3, #16 \n" "lsr %0, %0, #8 \n" "lsl %2, %0, #16 \n" diff --git a/libavcodec/arm/vp8_armv6.S b/libavcodec/arm/vp8_armv6.S index 93f4dd664b..8a3beb9fbb 100644 --- a/libavcodec/arm/vp8_armv6.S +++ b/libavcodec/arm/vp8_armv6.S @@ -25,13 +25,18 @@ lsl \cw, \cw, \t0 lsl \t0, \h, \t0 rsb \h, \pr, #256 + it cs ldrhcs \t1, [\buf], #2 smlabb \h, \t0, \pr, \h +T itttt cs rev16cs \t1, \t1 - orrcs \cw, \cw, \t1, lsl \bs +A orrcs \cw, \cw, \t1, lsl \bs +T lslcs \t1, \t1, \bs +T orrcs \cw, \cw, \t1 subcs \bs, \bs, #16 lsr \h, \h, #8 cmp \cw, \h, lsl #16 + itt ge subge \cw, \cw, \h, lsl #16 subge \h, \t0, \h .endm @@ -40,14 +45,20 @@ adds \bs, \bs, \t0 lsl \cw, \cw, \t0 lsl \t0, \h, \t0 + it cs ldrhcs \t1, [\buf], #2 mov \h, #128 + it cs rev16cs \t1, \t1 add \h, \h, \t0, lsl #7 - orrcs \cw, \cw, \t1, lsl \bs +A orrcs \cw, \cw, \t1, lsl \bs +T ittt cs +T lslcs \t1, \t1, \bs +T orrcs \cw, \cw, \t1 subcs \bs, \bs, #16 lsr \h, \h, #8 cmp \cw, \h, lsl #16 + itt ge subge \cw, \cw, \h, lsl #16 subge \h, \t0, \h .endm @@ -59,6 +70,7 @@ function ff_decode_block_coeffs_armv6, export=1 cmp r3, #0 ldr r11, [r5] ldm r0, {r5-r7} @ high, bits, buf + it ne pkhtbne r11, r11, r11, asr #16 ldr r8, [r0, #16] @ code_word 0: @@ -80,19 +92,26 @@ function ff_decode_block_coeffs_armv6, export=1 adds r6, r6, r9 add r4, r4, #11 lsl r8, r8, r9 + it cs ldrhcs r10, [r7], #2 lsl r9, r5, r9 mov r5, #128 + it cs rev16cs r10, r10 add r5, r5, r9, lsl #7 - orrcs r8, r8, r10, lsl r6 +T ittt cs +T lslcs r10, r10, r6 +T orrcs r8, r8, r10 +A orrcs r8, r8, r10, lsl r6 subcs r6, r6, #16 lsr r5, r5, #8 cmp r8, r5, lsl #16 movrel r10, zigzag_scan-1 + itt ge subge r8, r8, r5, lsl #16 subge r5, r9, r5 ldrb r10, [r10, r3] + it ge rsbge r12, r12, #0 cmp r3, #16 strh r12, [r1, r10] @@ -108,6 +127,7 @@ function ff_decode_block_coeffs_armv6, export=1 ldr r0, [sp] ldr r9, [r0, #12] cmp r7, r9 + it hi movhi r7, r9 stm r0, {r5-r7} @ high, bits, buf str r8, [r0, #16] @ code_word @@ -131,11 +151,13 @@ function ff_decode_block_coeffs_armv6, export=1 mov r12, #2 ldrb r0, [r4, #4] rac_get_prob r5, r6, r7, r8, r0, r9, r10 + it ge addge r12, #1 ldrb r9, [lr, r5] blt 4f ldrb r0, [r4, #5] rac_get_prob r5, r6, r7, r8, r0, r9, r10 + it ge addge r12, #1 ldrb r9, [lr, r5] b 4f @@ -153,6 +175,7 @@ function ff_decode_block_coeffs_armv6, export=1 mov r12, #5 mov r0, #159 rac_get_prob r5, r6, r7, r8, r0, r9, r10 + it ge addge r12, r12, #1 ldrb r9, [lr, r5] b 4f @@ -160,23 +183,28 @@ function ff_decode_block_coeffs_armv6, export=1 mov r12, #7 mov r0, #165 rac_get_prob r5, r6, r7, r8, r0, r9, r10 + it ge addge r12, r12, #2 ldrb r9, [lr, r5] mov r0, #145 rac_get_prob r5, r6, r7, r8, r0, r9, r10 + it ge addge r12, r12, #1 ldrb r9, [lr, r5] b 4f 3: ldrb r0, [r4, #8] rac_get_prob r5, r6, r7, r8, r0, r9, r10 + it ge addge r4, r4, #1 ldrb r9, [lr, r5] + ite ge movge r12, #2 movlt r12, #0 ldrb r0, [r4, #9] rac_get_prob r5, r6, r7, r8, r0, r9, r10 mov r9, #8 + it ge addge r12, r12, #1 movrel r4, X(ff_vp8_dct_cat_prob) lsl r9, r9, r12 @@ -189,6 +217,7 @@ function ff_decode_block_coeffs_armv6, export=1 lsl r1, r1, #1 rac_get_prob r5, r6, r7, r8, r0, r9, r10 ldrb r0, [r4], #1 + it ge addge r1, r1, #1 cmp r0, #0 bne 1b @@ -200,6 +229,7 @@ function ff_decode_block_coeffs_armv6, export=1 add r4, r2, r4 add r4, r4, #22 rac_get_128 r5, r6, r7, r8, r9, r10 + it ge rsbge r12, r12, #0 smulbb r12, r12, r11 movrel r9, zigzag_scan-1 @@ -210,9 +240,9 @@ function ff_decode_block_coeffs_armv6, export=1 b 5b endfunc - .section .rodata -zigzag_scan: +const zigzag_scan .byte 0, 2, 8, 16 .byte 10, 4, 6, 12 .byte 18, 24, 26, 20 .byte 14, 22, 28, 30 +endconst diff --git a/libavcodec/arm/vp8dsp_neon.S b/libavcodec/arm/vp8dsp_neon.S index 23330900f7..28487e7a60 100644 --- a/libavcodec/arm/vp8dsp_neon.S +++ b/libavcodec/arm/vp8dsp_neon.S @@ -746,14 +746,14 @@ function ff_put_vp8_pixels4_neon, export=1 push {r4-r6,lr} 1: subs r12, r12, #4 - ldr r4, [r2], r3 - ldr r5, [r2], r3 - ldr r6, [r2], r3 - ldr lr, [r2], r3 - str r4, [r0], r1 - str r5, [r0], r1 - str r6, [r0], r1 - str lr, [r0], r1 + ldr_post r4, r2, r3 + ldr_post r5, r2, r3 + ldr_post r6, r2, r3 + ldr_post lr, r2, r3 + str_post r4, r0, r1 + str_post r5, r0, r1 + str_post r6, r0, r1 + str_post lr, r0, r1 bgt 1b pop {r4-r6,pc} endfunc diff --git a/libavcodec/ass.h b/libavcodec/ass.h index 74ef61b776..efff44d6ed 100644 --- a/libavcodec/ass.h +++ b/libavcodec/ass.h @@ -25,8 +25,7 @@ #include "avcodec.h" /** - * Default values for ASS style. - * @defgroup ass_default + * @name Default values for ASS style * @{ */ #define ASS_DEFAULT_FONT "Arial" diff --git a/libavcodec/asv1.c b/libavcodec/asv1.c index ff0d9eff01..a53d238fdd 100644 --- a/libavcodec/asv1.c +++ b/libavcodec/asv1.c @@ -603,39 +603,37 @@ static av_cold int decode_end(AVCodecContext *avctx){ } AVCodec ff_asv1_decoder = { - "asv1", - AVMEDIA_TYPE_VIDEO, - CODEC_ID_ASV1, - sizeof(ASV1Context), - decode_init, - NULL, - decode_end, - decode_frame, - CODEC_CAP_DR1, + .name = "asv1", + .type = AVMEDIA_TYPE_VIDEO, + .id = CODEC_ID_ASV1, + .priv_data_size = sizeof(ASV1Context), + .init = decode_init, + .close = decode_end, + .decode = decode_frame, + .capabilities = CODEC_CAP_DR1, .long_name= NULL_IF_CONFIG_SMALL("ASUS V1"), }; AVCodec ff_asv2_decoder = { - "asv2", - AVMEDIA_TYPE_VIDEO, - CODEC_ID_ASV2, - sizeof(ASV1Context), - decode_init, - NULL, - decode_end, - decode_frame, - CODEC_CAP_DR1, + .name = "asv2", + .type = AVMEDIA_TYPE_VIDEO, + .id = CODEC_ID_ASV2, + .priv_data_size = sizeof(ASV1Context), + .init = decode_init, + .close = decode_end, + .decode = decode_frame, + .capabilities = CODEC_CAP_DR1, .long_name= NULL_IF_CONFIG_SMALL("ASUS V2"), }; #if CONFIG_ASV1_ENCODER AVCodec ff_asv1_encoder = { - "asv1", - AVMEDIA_TYPE_VIDEO, - CODEC_ID_ASV1, - sizeof(ASV1Context), - encode_init, - encode_frame, + .name = "asv1", + .type = AVMEDIA_TYPE_VIDEO, + .id = CODEC_ID_ASV1, + .priv_data_size = sizeof(ASV1Context), + .init = encode_init, + .encode = encode_frame, //encode_end, .pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_NONE}, .long_name= NULL_IF_CONFIG_SMALL("ASUS V1"), @@ -644,12 +642,12 @@ AVCodec ff_asv1_encoder = { #if CONFIG_ASV2_ENCODER AVCodec ff_asv2_encoder = { - "asv2", - AVMEDIA_TYPE_VIDEO, - CODEC_ID_ASV2, - sizeof(ASV1Context), - encode_init, - encode_frame, + .name = "asv2", + .type = AVMEDIA_TYPE_VIDEO, + .id = CODEC_ID_ASV2, + .priv_data_size = sizeof(ASV1Context), + .init = encode_init, + .encode = encode_frame, //encode_end, .pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_NONE}, .long_name= NULL_IF_CONFIG_SMALL("ASUS V2"), diff --git a/libavcodec/aura.c b/libavcodec/aura.c index 18024f1c08..e2c90b45f7 100644 --- a/libavcodec/aura.c +++ b/libavcodec/aura.c @@ -124,16 +124,14 @@ static av_cold int aura_decode_end(AVCodecContext *avctx) } AVCodec ff_aura2_decoder = { - "aura2", - AVMEDIA_TYPE_VIDEO, - CODEC_ID_AURA2, - sizeof(AuraDecodeContext), - aura_decode_init, - NULL, - aura_decode_end, - aura_decode_frame, - CODEC_CAP_DR1, - NULL, + .name = "aura2", + .type = AVMEDIA_TYPE_VIDEO, + .id = CODEC_ID_AURA2, + .priv_data_size = sizeof(AuraDecodeContext), + .init = aura_decode_init, + .close = aura_decode_end, + .decode = aura_decode_frame, + .capabilities = CODEC_CAP_DR1, .long_name = NULL_IF_CONFIG_SMALL("Auravision Aura 2"), }; diff --git a/libavcodec/avcodec.h b/libavcodec/avcodec.h index ec8206b2e6..61fed2385d 100644 --- a/libavcodec/avcodec.h +++ b/libavcodec/avcodec.h @@ -30,6 +30,10 @@ #include "libavutil/samplefmt.h" #include "libavutil/avutil.h" #include "libavutil/cpu.h" +#include "libavutil/dict.h" +#include "libavutil/log.h" +#include "libavutil/pixfmt.h" +#include "libavutil/rational.h" #include "libavcodec/version.h" @@ -450,7 +454,7 @@ enum CodecID { * Note: If the first 23 bits of the additional bytes are not 0, then damaged * MPEG bitstreams could cause overread and segfault. */ -#define FF_INPUT_BUFFER_PADDING_SIZE 8 +#define FF_INPUT_BUFFER_PADDING_SIZE 16 /** * minimum encoding buffer size @@ -767,304 +771,6 @@ typedef struct AVPanScan{ int16_t position[3][2]; }AVPanScan; -#define FF_COMMON_FRAME \ - /**\ - * pointer to the picture planes.\ - * This might be different from the first allocated byte\ - * - encoding: \ - * - decoding: \ - */\ - uint8_t *data[4];\ - int linesize[4];\ - /**\ - * pointer to the first allocated byte of the picture. Can be used in get_buffer/release_buffer.\ - * This isn't used by libavcodec unless the default get/release_buffer() is used.\ - * - encoding: \ - * - decoding: \ - */\ - uint8_t *base[4];\ - /**\ - * 1 -> keyframe, 0-> not\ - * - encoding: Set by libavcodec.\ - * - decoding: Set by libavcodec.\ - */\ - int key_frame;\ -\ - /**\ - * Picture type of the frame, see ?_TYPE below.\ - * - encoding: Set by libavcodec. for coded_picture (and set by user for input).\ - * - decoding: Set by libavcodec.\ - */\ - enum AVPictureType pict_type;\ -\ - /**\ - * presentation timestamp in time_base units (time when frame should be shown to user)\ - * If AV_NOPTS_VALUE then frame_rate = 1/time_base will be assumed.\ - * - encoding: MUST be set by user.\ - * - decoding: Set by libavcodec.\ - */\ - int64_t pts;\ -\ - /**\ - * picture number in bitstream order\ - * - encoding: set by\ - * - decoding: Set by libavcodec.\ - */\ - int coded_picture_number;\ - /**\ - * picture number in display order\ - * - encoding: set by\ - * - decoding: Set by libavcodec.\ - */\ - int display_picture_number;\ -\ - /**\ - * quality (between 1 (good) and FF_LAMBDA_MAX (bad)) \ - * - encoding: Set by libavcodec. for coded_picture (and set by user for input).\ - * - decoding: Set by libavcodec.\ - */\ - int quality; \ -\ - /**\ - * buffer age (1->was last buffer and dint change, 2->..., ...).\ - * Set to INT_MAX if the buffer has not been used yet.\ - * - encoding: unused\ - * - decoding: MUST be set by get_buffer().\ - */\ - int age;\ -\ - /**\ - * is this picture used as reference\ - * The values for this are the same as the MpegEncContext.picture_structure\ - * variable, that is 1->top field, 2->bottom field, 3->frame/both fields.\ - * Set to 4 for delayed, non-reference frames.\ - * - encoding: unused\ - * - decoding: Set by libavcodec. (before get_buffer() call)).\ - */\ - int reference;\ -\ - /**\ - * QP table\ - * - encoding: unused\ - * - decoding: Set by libavcodec.\ - */\ - int8_t *qscale_table;\ - /**\ - * QP store stride\ - * - encoding: unused\ - * - decoding: Set by libavcodec.\ - */\ - int qstride;\ -\ - /**\ - * mbskip_table[mb]>=1 if MB didn't change\ - * stride= mb_width = (width+15)>>4\ - * - encoding: unused\ - * - decoding: Set by libavcodec.\ - */\ - uint8_t *mbskip_table;\ -\ - /**\ - * motion vector table\ - * @code\ - * example:\ - * int mv_sample_log2= 4 - motion_subsample_log2;\ - * int mb_width= (width+15)>>4;\ - * int mv_stride= (mb_width << mv_sample_log2) + 1;\ - * motion_val[direction][x + y*mv_stride][0->mv_x, 1->mv_y];\ - * @endcode\ - * - encoding: Set by user.\ - * - decoding: Set by libavcodec.\ - */\ - int16_t (*motion_val[2])[2];\ -\ - /**\ - * macroblock type table\ - * mb_type_base + mb_width + 2\ - * - encoding: Set by user.\ - * - decoding: Set by libavcodec.\ - */\ - uint32_t *mb_type;\ -\ - /**\ - * log2 of the size of the block which a single vector in motion_val represents: \ - * (4->16x16, 3->8x8, 2-> 4x4, 1-> 2x2)\ - * - encoding: unused\ - * - decoding: Set by libavcodec.\ - */\ - uint8_t motion_subsample_log2;\ -\ - /**\ - * for some private data of the user\ - * - encoding: unused\ - * - decoding: Set by user.\ - */\ - void *opaque;\ -\ - /**\ - * error\ - * - encoding: Set by libavcodec. if flags&CODEC_FLAG_PSNR.\ - * - decoding: unused\ - */\ - uint64_t error[4];\ -\ - /**\ - * type of the buffer (to keep track of who has to deallocate data[*])\ - * - encoding: Set by the one who allocates it.\ - * - decoding: Set by the one who allocates it.\ - * Note: User allocated (direct rendering) & internal buffers cannot coexist currently.\ - */\ - int type;\ - \ - /**\ - * When decoding, this signals how much the picture must be delayed.\ - * extra_delay = repeat_pict / (2*fps)\ - * - encoding: unused\ - * - decoding: Set by libavcodec.\ - */\ - int repeat_pict;\ - \ - /**\ - * \ - */\ - int qscale_type;\ - \ - /**\ - * The content of the picture is interlaced.\ - * - encoding: Set by user.\ - * - decoding: Set by libavcodec. (default 0)\ - */\ - int interlaced_frame;\ - \ - /**\ - * If the content is interlaced, is top field displayed first.\ - * - encoding: Set by user.\ - * - decoding: Set by libavcodec.\ - */\ - int top_field_first;\ - \ - /**\ - * Pan scan.\ - * - encoding: Set by user.\ - * - decoding: Set by libavcodec.\ - */\ - AVPanScan *pan_scan;\ - \ - /**\ - * Tell user application that palette has changed from previous frame.\ - * - encoding: ??? (no palette-enabled encoder yet)\ - * - decoding: Set by libavcodec. (default 0).\ - */\ - int palette_has_changed;\ - \ - /**\ - * codec suggestion on buffer type if != 0\ - * - encoding: unused\ - * - decoding: Set by libavcodec. (before get_buffer() call)).\ - */\ - int buffer_hints;\ -\ - /**\ - * DCT coefficients\ - * - encoding: unused\ - * - decoding: Set by libavcodec.\ - */\ - short *dct_coeff;\ -\ - /**\ - * motion reference frame index\ - * the order in which these are stored can depend on the codec.\ - * - encoding: Set by user.\ - * - decoding: Set by libavcodec.\ - */\ - int8_t *ref_index[2];\ -\ - /**\ - * reordered opaque 64bit (generally an integer or a double precision float\ - * PTS but can be anything). \ - * The user sets AVCodecContext.reordered_opaque to represent the input at\ - * that time,\ - * the decoder reorders values as needed and sets AVFrame.reordered_opaque\ - * to exactly one of the values provided by the user through AVCodecContext.reordered_opaque \ - * @deprecated in favor of pkt_pts\ - * - encoding: unused\ - * - decoding: Read by user.\ - */\ - int64_t reordered_opaque;\ -\ - /**\ - * hardware accelerator private data (FFmpeg allocated)\ - * - encoding: unused\ - * - decoding: Set by libavcodec\ - */\ - void *hwaccel_picture_private;\ -\ - /**\ - * reordered pts from the last AVPacket that has been input into the decoder\ - * - encoding: unused\ - * - decoding: Read by user.\ - */\ - int64_t pkt_pts;\ -\ - /**\ - * dts from the last AVPacket that has been input into the decoder\ - * - encoding: unused\ - * - decoding: Read by user.\ - */\ - int64_t pkt_dts;\ -\ - /**\ - * the AVCodecContext which ff_thread_get_buffer() was last called on\ - * - encoding: Set by libavcodec.\ - * - decoding: Set by libavcodec.\ - */\ - struct AVCodecContext *owner;\ -\ - /**\ - * used by multithreading to store frame-specific info\ - * - encoding: Set by libavcodec.\ - * - decoding: Set by libavcodec.\ - */\ - void *thread_opaque;\ -\ - /**\ - * frame timestamp estimated using various heuristics, in stream time base\ - * - encoding: unused\ - * - decoding: set by libavcodec, read by user.\ - */\ - int64_t best_effort_timestamp;\ -\ - /**\ - * reordered pos from the last AVPacket that has been input into the decoder\ - * - encoding: unused\ - * - decoding: Read by user.\ - */\ - int64_t pkt_pos;\ -\ - /**\ - * reordered sample aspect ratio for the video frame, 0/1 if unknown\unspecified - * - encoding: unused\ - * - decoding: Read by user.\ - */\ - AVRational sample_aspect_ratio;\ -\ - /**\ - * width and height of the video frame\ - * - encoding: unused\ - * - decoding: Read by user.\ - */\ - int width, height;\ -\ - /**\ - * format of the frame, -1 if unknown or unset\ - * It should be cast to the corresponding enum (enum PixelFormat\ - * for video, enum AVSampleFormat for audio)\ - * - encoding: unused\ - * - decoding: Read by user.\ - */\ - int format;\ - - #define FF_QSCALE_TYPE_MPEG1 0 #define FF_QSCALE_TYPE_MPEG2 1 #define FF_QSCALE_TYPE_H264 2 @@ -1111,6 +817,9 @@ typedef struct AVPacket { uint8_t *data; int size; int stream_index; + /** + * A combination of AV_PKT_FLAG values + */ int flags; /** * Duration of this packet in AVStream->time_base units, 0 if unknown. @@ -1140,22 +849,316 @@ typedef struct AVPacket { */ int64_t convergence_duration; } AVPacket; -#define AV_PKT_FLAG_KEY 0x0001 +#define AV_PKT_FLAG_KEY 0x0001 ///< The packet contains a keyframe #if LIBAVCODEC_VERSION_MAJOR < 53 #define PKT_FLAG_KEY AV_PKT_FLAG_KEY #endif +#define AV_PKT_FLAG_CORRUPT 0x0002 ///< The packet content is corrupted /** * Audio Video Frame. - * New fields can be added to the end of FF_COMMON_FRAME with minor version - * bumps. - * Removal, reordering and changes to existing fields require a major - * version bump. No fields should be added into AVFrame before or after - * FF_COMMON_FRAME! + * New fields can be added to the end of AVFRAME with minor version + * bumps. Removal, reordering and changes to existing fields require + * a major version bump. * sizeof(AVFrame) must not be used outside libav*. */ typedef struct AVFrame { - FF_COMMON_FRAME + /** + * pointer to the picture planes. + * This might be different from the first allocated byte + * - encoding: + * - decoding: + */ + uint8_t *data[4]; + int linesize[4]; + /** + * pointer to the first allocated byte of the picture. Can be used in get_buffer/release_buffer. + * This isn't used by libavcodec unless the default get/release_buffer() is used. + * - encoding: + * - decoding: + */ + uint8_t *base[4]; + /** + * 1 -> keyframe, 0-> not + * - encoding: Set by libavcodec. + * - decoding: Set by libavcodec. + */ + int key_frame; + + /** + * Picture type of the frame, see ?_TYPE below. + * - encoding: Set by libavcodec. for coded_picture (and set by user for input). + * - decoding: Set by libavcodec. + */ + enum AVPictureType pict_type; + + /** + * presentation timestamp in time_base units (time when frame should be shown to user) + * If AV_NOPTS_VALUE then frame_rate = 1/time_base will be assumed. + * - encoding: MUST be set by user. + * - decoding: Set by libavcodec. + */ + int64_t pts; + + /** + * picture number in bitstream order + * - encoding: set by + * - decoding: Set by libavcodec. + */ + int coded_picture_number; + /** + * picture number in display order + * - encoding: set by + * - decoding: Set by libavcodec. + */ + int display_picture_number; + + /** + * quality (between 1 (good) and FF_LAMBDA_MAX (bad)) + * - encoding: Set by libavcodec. for coded_picture (and set by user for input). + * - decoding: Set by libavcodec. + */ + int quality; + + /** + * buffer age (1->was last buffer and dint change, 2->..., ...). + * Set to INT_MAX if the buffer has not been used yet. + * - encoding: unused + * - decoding: MUST be set by get_buffer(). + */ + int age; + + /** + * is this picture used as reference + * The values for this are the same as the MpegEncContext.picture_structure + * variable, that is 1->top field, 2->bottom field, 3->frame/both fields. + * Set to 4 for delayed, non-reference frames. + * - encoding: unused + * - decoding: Set by libavcodec. (before get_buffer() call)). + */ + int reference; + + /** + * QP table + * - encoding: unused + * - decoding: Set by libavcodec. + */ + int8_t *qscale_table; + /** + * QP store stride + * - encoding: unused + * - decoding: Set by libavcodec. + */ + int qstride; + + /** + * mbskip_table[mb]>=1 if MB didn't change + * stride= mb_width = (width+15)>>4 + * - encoding: unused + * - decoding: Set by libavcodec. + */ + uint8_t *mbskip_table; + + /** + * motion vector table + * @code + * example: + * int mv_sample_log2= 4 - motion_subsample_log2; + * int mb_width= (width+15)>>4; + * int mv_stride= (mb_width << mv_sample_log2) + 1; + * motion_val[direction][x + y*mv_stride][0->mv_x, 1->mv_y]; + * @endcode + * - encoding: Set by user. + * - decoding: Set by libavcodec. + */ + int16_t (*motion_val[2])[2]; + + /** + * macroblock type table + * mb_type_base + mb_width + 2 + * - encoding: Set by user. + * - decoding: Set by libavcodec. + */ + uint32_t *mb_type; + + /** + * log2 of the size of the block which a single vector in motion_val represents: + * (4->16x16, 3->8x8, 2-> 4x4, 1-> 2x2) + * - encoding: unused + * - decoding: Set by libavcodec. + */ + uint8_t motion_subsample_log2; + + /** + * for some private data of the user + * - encoding: unused + * - decoding: Set by user. + */ + void *opaque; + + /** + * error + * - encoding: Set by libavcodec. if flags&CODEC_FLAG_PSNR. + * - decoding: unused + */ + uint64_t error[4]; + + /** + * type of the buffer (to keep track of who has to deallocate data[*]) + * - encoding: Set by the one who allocates it. + * - decoding: Set by the one who allocates it. + * Note: User allocated (direct rendering) & internal buffers cannot coexist currently. + */ + int type; + + /** + * When decoding, this signals how much the picture must be delayed. + * extra_delay = repeat_pict / (2*fps) + * - encoding: unused + * - decoding: Set by libavcodec. + */ + int repeat_pict; + + /** + * + */ + int qscale_type; + + /** + * The content of the picture is interlaced. + * - encoding: Set by user. + * - decoding: Set by libavcodec. (default 0) + */ + int interlaced_frame; + + /** + * If the content is interlaced, is top field displayed first. + * - encoding: Set by user. + * - decoding: Set by libavcodec. + */ + int top_field_first; + + /** + * Pan scan. + * - encoding: Set by user. + * - decoding: Set by libavcodec. + */ + AVPanScan *pan_scan; + + /** + * Tell user application that palette has changed from previous frame. + * - encoding: ??? (no palette-enabled encoder yet) + * - decoding: Set by libavcodec. (default 0). + */ + int palette_has_changed; + + /** + * codec suggestion on buffer type if != 0 + * - encoding: unused + * - decoding: Set by libavcodec. (before get_buffer() call)). + */ + int buffer_hints; + + /** + * DCT coefficients + * - encoding: unused + * - decoding: Set by libavcodec. + */ + short *dct_coeff; + + /** + * motion reference frame index + * the order in which these are stored can depend on the codec. + * - encoding: Set by user. + * - decoding: Set by libavcodec. + */ + int8_t *ref_index[2]; + + /** + * reordered opaque 64bit (generally an integer or a double precision float + * PTS but can be anything). + * The user sets AVCodecContext.reordered_opaque to represent the input at + * that time, + * the decoder reorders values as needed and sets AVFrame.reordered_opaque + * to exactly one of the values provided by the user through AVCodecContext.reordered_opaque + * @deprecated in favor of pkt_pts + * - encoding: unused + * - decoding: Read by user. + */ + int64_t reordered_opaque; + + /** + * hardware accelerator private data (FFmpeg-allocated) + * - encoding: unused + * - decoding: Set by libavcodec + */ + void *hwaccel_picture_private; + + /** + * reordered pts from the last AVPacket that has been input into the decoder + * - encoding: unused + * - decoding: Read by user. + */ + int64_t pkt_pts; + + /** + * dts from the last AVPacket that has been input into the decoder + * - encoding: unused + * - decoding: Read by user. + */ + int64_t pkt_dts; + + /** + * the AVCodecContext which ff_thread_get_buffer() was last called on + * - encoding: Set by libavcodec. + * - decoding: Set by libavcodec. + */ + struct AVCodecContext *owner; + + /** + * used by multithreading to store frame-specific info + * - encoding: Set by libavcodec. + * - decoding: Set by libavcodec. + */ + void *thread_opaque; + + /** + * frame timestamp estimated using various heuristics, in stream time base + * - encoding: unused + * - decoding: set by libavcodec, read by user. + */ + int64_t best_effort_timestamp; + + /** + * reordered pos from the last AVPacket that has been input into the decoder + * - encoding: unused + * - decoding: Read by user. + */ + int64_t pkt_pos; + + /** + * reordered sample aspect ratio for the video frame, 0/1 if unknown\unspecified + * - encoding: unused + * - decoding: Read by user. + */ + AVRational sample_aspect_ratio; + + /** + * width and height of the video frame + * - encoding: unused + * - decoding: Read by user. + */ + int width, height; + + /** + * format of the frame, -1 if unknown or unset + * It should be cast to the corresponding enum (enum PixelFormat + * for video, enum AVSampleFormat for audio) + * - encoding: unused + * - decoding: Read by user. + */ + int format; + } AVFrame; /** @@ -1442,7 +1445,7 @@ typedef struct AVCodecContext { * A demuxer should set this to what is stored in the field used to identify the codec. * If there are multiple such fields in a container then the demuxer should choose the one * which maximizes the information about the used codec. - * If the codec tag field in a container is larger then 32 bits then the demuxer should + * If the codec tag field in a container is larger than 32 bits then the demuxer should * remap the longer ID to 32 bits with a table or other structure. Alternatively a new * extra_codec_tag + size could be added but for this a clear advantage must be demonstrated * first. @@ -1528,7 +1531,12 @@ typedef struct AVCodecContext { #define FF_ER_CAREFUL 1 #define FF_ER_COMPLIANT 2 #define FF_ER_AGGRESSIVE 3 +#if FF_API_VERY_AGGRESSIVE #define FF_ER_VERY_AGGRESSIVE 4 +#define FF_ER_EXPLODE 5 +#else +#define FF_ER_EXPLODE 4 +#endif /* FF_API_VERY_AGGRESSIVE */ /** * Called at the beginning of each frame to get a buffer for it. @@ -2349,6 +2357,23 @@ typedef struct AVCodecContext { #define FF_PROFILE_VC1_COMPLEX 2 #define FF_PROFILE_VC1_ADVANCED 3 +#define FF_PROFILE_MPEG4_SIMPLE 0 +#define FF_PROFILE_MPEG4_SIMPLE_SCALABLE 1 +#define FF_PROFILE_MPEG4_CORE 2 +#define FF_PROFILE_MPEG4_MAIN 3 +#define FF_PROFILE_MPEG4_N_BIT 4 +#define FF_PROFILE_MPEG4_SCALABLE_TEXTURE 5 +#define FF_PROFILE_MPEG4_SIMPLE_FACE_ANIMATION 6 +#define FF_PROFILE_MPEG4_BASIC_ANIMATED_TEXTURE 7 +#define FF_PROFILE_MPEG4_HYBRID 8 +#define FF_PROFILE_MPEG4_ADVANCED_REAL_TIME 9 +#define FF_PROFILE_MPEG4_CORE_SCALABLE 10 +#define FF_PROFILE_MPEG4_ADVANCED_CODING 11 +#define FF_PROFILE_MPEG4_ADVANCED_CORE 12 +#define FF_PROFILE_MPEG4_ADVANCED_SCALABLE_TEXTURE 13 +#define FF_PROFILE_MPEG4_SIMPLE_STUDIO 14 +#define FF_PROFILE_MPEG4_ADVANCED_SIMPLE 15 + /** * level * - encoding: Set by user. @@ -2622,7 +2647,7 @@ typedef struct AVCodecContext { #if FF_API_FLAC_GLOBAL_OPTS /** - * @defgroup flac_opts FLAC options + * @name FLAC options * @deprecated Use FLAC encoder private options instead. * @{ */ @@ -2667,13 +2692,16 @@ typedef struct AVCodecContext { int request_channels; #endif +#if FF_API_DRC_SCALE /** * Percentage of dynamic range compression to be applied by the decoder. * The default value is 1.0, corresponding to full compression. * - encoding: unused * - decoding: Set by user. + * @deprecated use AC3 decoder private option instead. */ - float drc_scale; + attribute_deprecated float drc_scale; +#endif /** * opaque 64bit number (generally a PTS) that will be reordered and @@ -2993,6 +3021,8 @@ typedef struct AVProfile { const char *name; ///< short name for the profile } AVProfile; +typedef struct AVCodecDefault AVCodecDefault; + /** * AVCodec. */ @@ -3037,7 +3067,7 @@ typedef struct AVCodec { const AVProfile *profiles; ///< array of recognized profiles, or NULL if unknown, array is terminated by {FF_PROFILE_UNKNOWN} /** - * @defgroup framethreading Frame-level threading support functions. + * @name Frame-level threading support functions * @{ */ /** @@ -3055,6 +3085,11 @@ typedef struct AVCodec { */ int (*update_thread_context)(AVCodecContext *dst, const AVCodecContext *src); /** @} */ + + /** + * Private codec-specific defaults. + */ + const AVCodecDefault *defaults; } AVCodec; /** @@ -3381,7 +3416,7 @@ void av_resample_close(struct AVResampleContext *c); /** * Allocate memory for a picture. Call avpicture_free() to free it. * - * \see avpicture_fill() + * @see avpicture_fill() * * @param picture the picture to be filled in * @param pix_fmt the format of the picture @@ -3428,7 +3463,7 @@ int avpicture_fill(AVPicture *picture, uint8_t *ptr, * The data is stored compactly, without any gaps for alignment or padding * which may be applied by avpicture_fill(). * - * \see avpicture_get_size() + * @see avpicture_get_size() * * @param[in] src AVPicture containing image data * @param[in] pix_fmt The format in which the picture data is stored. @@ -3683,21 +3718,38 @@ void avcodec_get_context_defaults2(AVCodecContext *s, enum AVMediaType); * we WILL change its arguments and name a few times! */ int avcodec_get_context_defaults3(AVCodecContext *s, AVCodec *codec); +#if FF_API_ALLOC_CONTEXT /** * Allocate an AVCodecContext and set its fields to default values. The * resulting struct can be deallocated by simply calling av_free(). * * @return An AVCodecContext filled with default values or NULL on failure. * @see avcodec_get_context_defaults + * + * @deprecated use avcodec_alloc_context3() */ +attribute_deprecated AVCodecContext *avcodec_alloc_context(void); /** THIS FUNCTION IS NOT YET PART OF THE PUBLIC API! * we WILL change its arguments and name a few times! */ +attribute_deprecated AVCodecContext *avcodec_alloc_context2(enum AVMediaType); +#endif -/** THIS FUNCTION IS NOT YET PART OF THE PUBLIC API! - * we WILL change its arguments and name a few times! */ +/** + * Allocate an AVCodecContext and set its fields to default values. The + * resulting struct can be deallocated by simply calling av_free(). + * + * @param codec if non-NULL, allocate private data and initialize defaults + * for the given codec. It is illegal to then call avcodec_open() + * with a different codec. + * + * @return An AVCodecContext filled with default values or NULL on failure. + * @see avcodec_get_context_defaults + * + * @deprecated use avcodec_alloc_context3() + */ AVCodecContext *avcodec_alloc_context3(AVCodec *codec); /** @@ -3707,7 +3759,7 @@ AVCodecContext *avcodec_alloc_context3(AVCodec *codec); * can use this AVCodecContext to decode/encode video/audio data. * * @param dest target codec context, should be initialized with - * avcodec_alloc_context(), but otherwise uninitialized + * avcodec_alloc_context3(), but otherwise uninitialized * @param src source codec context * @return AVERROR() on error (e.g. memory allocation error), 0 on success */ @@ -3785,6 +3837,7 @@ int avcodec_default_execute(AVCodecContext *c, int (*func)(AVCodecContext *c2, v int avcodec_default_execute2(AVCodecContext *c, int (*func)(AVCodecContext *c2, void *arg2, int, int),void *arg, int *ret, int count); //FIXME func typedef +#if FF_API_AVCODEC_OPEN /** * Initialize the AVCodecContext to use the given AVCodec. Prior to using this * function the context has to be allocated. @@ -3801,7 +3854,7 @@ int avcodec_default_execute2(AVCodecContext *c, int (*func)(AVCodecContext *c2, * if (!codec) * exit(1); * - * context = avcodec_alloc_context(); + * context = avcodec_alloc_context3(codec); * * if (avcodec_open(context, codec) < 0) * exit(1); @@ -3810,9 +3863,46 @@ int avcodec_default_execute2(AVCodecContext *c, int (*func)(AVCodecContext *c2, * @param avctx The context which will be set up to use the given codec. * @param codec The codec to use within the context. * @return zero on success, a negative value on error - * @see avcodec_alloc_context, avcodec_find_decoder, avcodec_find_encoder, avcodec_close + * @see avcodec_alloc_context3, avcodec_find_decoder, avcodec_find_encoder, avcodec_close + * + * @deprecated use avcodec_open2 */ +attribute_deprecated int avcodec_open(AVCodecContext *avctx, AVCodec *codec); +#endif + +/** + * Initialize the AVCodecContext to use the given AVCodec. Prior to using this + * function the context has to be allocated with avcodec_alloc_context(). + * + * The functions avcodec_find_decoder_by_name(), avcodec_find_encoder_by_name(), + * avcodec_find_decoder() and avcodec_find_encoder() provide an easy way for + * retrieving a codec. + * + * @warning This function is not thread safe! + * + * @code + * avcodec_register_all(); + * av_dict_set(&opts, "b", "2.5M", 0); + * codec = avcodec_find_decoder(CODEC_ID_H264); + * if (!codec) + * exit(1); + * + * context = avcodec_alloc_context(); + * + * if (avcodec_open(context, codec, opts) < 0) + * exit(1); + * @endcode + * + * @param avctx The context to initialize. + * @param options A dictionary filled with AVCodecContext and codec-private options. + * On return this object will be filled with options that were not found. + * + * @return zero on success, a negative value on error + * @see avcodec_alloc_context3(), avcodec_find_decoder(), avcodec_find_encoder(), + * av_dict_set(), av_opt_find(). + */ +int avcodec_open2(AVCodecContext *avctx, AVCodec *codec, AVDictionary **options); #if FF_API_AUDIO_OLD /** @@ -4063,7 +4153,7 @@ int av_get_bits_per_sample(enum CodecID codec_id); #if FF_API_OLD_SAMPLE_FMT /** - * @deprecated Use av_get_bits_per_sample_fmt() instead. + * @deprecated Use av_get_bytes_per_sample() instead. */ attribute_deprecated int av_get_bits_per_sample_format(enum AVSampleFormat sample_fmt); @@ -4112,7 +4202,7 @@ typedef struct AVCodecParserContext { int64_t offset; ///< byte offset from starting packet start int64_t cur_frame_end[AV_PARSER_PTS_NB]; - /*! + /** * Set by parser to 1 for key frames and 0 for non-key frames. * It is initialized to -1, so if the parser doesn't set this flag, * old-style fallback using AV_PICTURE_TYPE_I picture type as key frames @@ -4394,7 +4484,7 @@ void av_log_missing_feature(void *avc, const char *feature, int want_sample); * a pointer to an AVClass struct * @param[in] msg string containing an optional message, or NULL if no message */ -void av_log_ask_for_sample(void *avc, const char *msg, ...); +void av_log_ask_for_sample(void *avc, const char *msg, ...) av_printf_format(2, 3); /** * Register the hardware accelerator hwaccel. diff --git a/libavcodec/avs.c b/libavcodec/avs.c index 06fcd5cd0e..14f7cf0507 100644 --- a/libavcodec/avs.c +++ b/libavcodec/avs.c @@ -153,14 +153,12 @@ static av_cold int avs_decode_init(AVCodecContext * avctx) } AVCodec ff_avs_decoder = { - "avs", - AVMEDIA_TYPE_VIDEO, - CODEC_ID_AVS, - sizeof(AvsContext), - avs_decode_init, - NULL, - NULL, - avs_decode_frame, - CODEC_CAP_DR1, + .name = "avs", + .type = AVMEDIA_TYPE_VIDEO, + .id = CODEC_ID_AVS, + .priv_data_size = sizeof(AvsContext), + .init = avs_decode_init, + .decode = avs_decode_frame, + .capabilities = CODEC_CAP_DR1, .long_name = NULL_IF_CONFIG_SMALL("AVS (Audio Video Standard) video"), }; diff --git a/libavcodec/bethsoftvideo.c b/libavcodec/bethsoftvideo.c index 3869b87aa2..fa2db05765 100644 --- a/libavcodec/bethsoftvideo.c +++ b/libavcodec/bethsoftvideo.c @@ -23,8 +23,8 @@ * @file * @brief Bethesda Softworks VID Video Decoder * @author Nicholas Tung [ntung (at. ntung com] (2007-03) - * @sa http://wiki.multimedia.cx/index.php?title=Bethsoft_VID - * @sa http://www.svatopluk.com/andux/docs/dfvid.html + * @see http://wiki.multimedia.cx/index.php?title=Bethsoft_VID + * @see http://www.svatopluk.com/andux/docs/dfvid.html */ #include "libavutil/common.h" diff --git a/libavcodec/bfi.c b/libavcodec/bfi.c index 9e40b86e50..1f31d7fb61 100644 --- a/libavcodec/bfi.c +++ b/libavcodec/bfi.c @@ -23,7 +23,7 @@ * @file * @brief Brute Force & Ignorance (.bfi) video decoder * @author Sisir Koppaka ( sisir.koppaka at gmail dot com ) - * @sa http://wiki.multimedia.cx/index.php?title=BFI + * @see http://wiki.multimedia.cx/index.php?title=BFI */ #include "libavutil/common.h" diff --git a/libavcodec/bfin/dsputil_bfin.c b/libavcodec/bfin/dsputil_bfin.c index 5b94472326..bfcc337388 100644 --- a/libavcodec/bfin/dsputil_bfin.c +++ b/libavcodec/bfin/dsputil_bfin.c @@ -197,14 +197,14 @@ static int bfin_pix_abs8_xy2 (void *c, uint8_t *blk1, uint8_t *blk2, int line_si void dsputil_init_bfin( DSPContext* c, AVCodecContext *avctx ) { - const int high_bit_depth = avctx->codec_id == CODEC_ID_H264 && avctx->bits_per_raw_sample > 8; + const int high_bit_depth = avctx->bits_per_raw_sample > 8; - c->get_pixels = ff_bfin_get_pixels; c->diff_pixels = ff_bfin_diff_pixels; c->put_pixels_clamped = ff_bfin_put_pixels_clamped; c->add_pixels_clamped = ff_bfin_add_pixels_clamped; if (!high_bit_depth) + c->get_pixels = ff_bfin_get_pixels; c->clear_blocks = bfin_clear_blocks; c->pix_sum = ff_bfin_pix_sum; c->pix_norm1 = ff_bfin_pix_norm1; @@ -253,19 +253,21 @@ void dsputil_init_bfin( DSPContext* c, AVCodecContext *avctx ) /* c->put_no_rnd_pixels_tab[0][3] = ff_bfin_put_pixels16_xy2_nornd; */ } - if (avctx->dct_algo == FF_DCT_AUTO) - c->fdct = ff_bfin_fdct; - - if (avctx->idct_algo==FF_IDCT_VP3) { - c->idct_permutation_type = FF_NO_IDCT_PERM; - c->idct = ff_bfin_vp3_idct; - c->idct_add = ff_bfin_vp3_idct_add; - c->idct_put = ff_bfin_vp3_idct_put; - } else if (avctx->idct_algo == FF_IDCT_AUTO) { - c->idct_permutation_type = FF_NO_IDCT_PERM; - c->idct = ff_bfin_idct; - c->idct_add = bfin_idct_add; - c->idct_put = bfin_idct_put; + if (avctx->bits_per_raw_sample <= 8) { + if (avctx->dct_algo == FF_DCT_AUTO) + c->fdct = ff_bfin_fdct; + + if (avctx->idct_algo == FF_IDCT_VP3) { + c->idct_permutation_type = FF_NO_IDCT_PERM; + c->idct = ff_bfin_vp3_idct; + c->idct_add = ff_bfin_vp3_idct_add; + c->idct_put = ff_bfin_vp3_idct_put; + } else if (avctx->idct_algo == FF_IDCT_AUTO) { + c->idct_permutation_type = FF_NO_IDCT_PERM; + c->idct = ff_bfin_idct; + c->idct_add = bfin_idct_add; + c->idct_put = bfin_idct_put; + } } } diff --git a/libavcodec/bink.c b/libavcodec/bink.c index ef07747dbc..e137312693 100644 --- a/libavcodec/bink.c +++ b/libavcodec/bink.c @@ -24,6 +24,7 @@ #include "avcodec.h" #include "dsputil.h" #include "binkdata.h" +#include "binkdsp.h" #include "mathops.h" #define ALT_BITSTREAM_READER_LE @@ -60,8 +61,8 @@ static const int binkb_bundle_signed[BINKB_NB_SRC] = { 0, 0, 0, 1, 1, 0, 1, 0, 0, 0 }; -static uint32_t binkb_intra_quant[16][64]; -static uint32_t binkb_inter_quant[16][64]; +static int32_t binkb_intra_quant[16][64]; +static int32_t binkb_inter_quant[16][64]; /** * IDs for different data types used in Bink video codec @@ -109,11 +110,11 @@ typedef struct Bundle { typedef struct BinkContext { AVCodecContext *avctx; DSPContext dsp; + BinkDSPContext bdsp; AVFrame pic, last; int version; ///< internal Bink file version int has_alpha; int swap_planes; - ScanTable scantable; ///< permutated scantable for DCT coeffs decoding Bundle bundle[BINKB_NB_SRC]; ///< bundles for decoding all data types Tree col_high[16]; ///< trees for decoding high nibble in "colours" data type @@ -580,8 +581,8 @@ static inline int binkb_get_value(BinkContext *c, int bundle_num) * @param quant_matrices quantization matrices * @return 0 for success, negative value in other cases */ -static int read_dct_coeffs(GetBitContext *gb, DCTELEM block[64], const uint8_t *scan, - const uint32_t quant_matrices[16][64], int q) +static int read_dct_coeffs(GetBitContext *gb, int32_t block[64], const uint8_t *scan, + const int32_t quant_matrices[16][64], int q) { int coef_list[128]; int mode_list[128]; @@ -590,7 +591,7 @@ static int read_dct_coeffs(GetBitContext *gb, DCTELEM block[64], const uint8_t * int coef_count = 0; int coef_idx[64]; int quant_idx; - const uint32_t *quant; + const int32_t *quant; coef_list[list_end] = 4; mode_list[list_end++] = 0; coef_list[list_end] = 24; mode_list[list_end++] = 0; @@ -623,7 +624,6 @@ static int read_dct_coeffs(GetBitContext *gb, DCTELEM block[64], const uint8_t * coef_list[--list_start] = ccoef; mode_list[ list_start] = 3; } else { - int t; if (!bits) { t = 1 - (get_bits1(gb) << 1); } else { @@ -791,6 +791,7 @@ static int binkb_decode_plane(BinkContext *c, GetBitContext *gb, int plane_idx, const uint8_t *scan; int xoff, yoff; LOCAL_ALIGNED_16(DCTELEM, block, [64]); + LOCAL_ALIGNED_16(int32_t, dctblock, [64]); int coordmap[64]; int ybias = is_key ? -15 : 0; int qp; @@ -845,11 +846,11 @@ static int binkb_decode_plane(BinkContext *c, GetBitContext *gb, int plane_idx, dst[coordmap[*scan++]] = binkb_get_value(c, BINKB_SRC_COLORS); break; case 2: - c->dsp.clear_block(block); - block[0] = binkb_get_value(c, BINKB_SRC_INTRA_DC); + memset(dctblock, 0, sizeof(*dctblock) * 64); + dctblock[0] = binkb_get_value(c, BINKB_SRC_INTRA_DC); qp = binkb_get_value(c, BINKB_SRC_INTRA_Q); - read_dct_coeffs(gb, block, c->scantable.permutated, binkb_intra_quant, qp); - c->dsp.idct_put(dst, stride, block); + read_dct_coeffs(gb, dctblock, bink_scan, binkb_intra_quant, qp); + c->bdsp.idct_put(dst, stride, dctblock); break; case 3: xoff = binkb_get_value(c, BINKB_SRC_X_OFF); @@ -878,11 +879,11 @@ static int binkb_decode_plane(BinkContext *c, GetBitContext *gb, int plane_idx, } else { put_pixels8x8_overlapped(dst, ref, stride); } - c->dsp.clear_block(block); - block[0] = binkb_get_value(c, BINKB_SRC_INTER_DC); + memset(dctblock, 0, sizeof(*dctblock) * 64); + dctblock[0] = binkb_get_value(c, BINKB_SRC_INTER_DC); qp = binkb_get_value(c, BINKB_SRC_INTER_Q); - read_dct_coeffs(gb, block, c->scantable.permutated, binkb_inter_quant, qp); - c->dsp.idct_add(dst, stride, block); + read_dct_coeffs(gb, dctblock, bink_scan, binkb_inter_quant, qp); + c->bdsp.idct_add(dst, stride, dctblock); break; case 5: v = binkb_get_value(c, BINKB_SRC_COLORS); @@ -937,6 +938,7 @@ static int bink_decode_plane(BinkContext *c, GetBitContext *gb, int plane_idx, int xoff, yoff; LOCAL_ALIGNED_16(DCTELEM, block, [64]); LOCAL_ALIGNED_16(uint8_t, ublock, [64]); + LOCAL_ALIGNED_16(int32_t, dctblock, [64]); int coordmap[64]; const int stride = c->pic.linesize[plane_idx]; @@ -1019,11 +1021,10 @@ static int bink_decode_plane(BinkContext *c, GetBitContext *gb, int plane_idx, ublock[*scan++] = get_value(c, BINK_SRC_COLORS); break; case INTRA_BLOCK: - c->dsp.clear_block(block); - block[0] = get_value(c, BINK_SRC_INTRA_DC); - read_dct_coeffs(gb, block, c->scantable.permutated, bink_intra_quant, -1); - c->dsp.idct(block); - c->dsp.put_pixels_nonclamped(block, ublock, 8); + memset(dctblock, 0, sizeof(*dctblock) * 64); + dctblock[0] = get_value(c, BINK_SRC_INTRA_DC); + read_dct_coeffs(gb, dctblock, bink_scan, bink_intra_quant, -1); + c->bdsp.idct_put(ublock, 8, dctblock); break; case FILL_BLOCK: v = get_value(c, BINK_SRC_COLORS); @@ -1048,7 +1049,7 @@ static int bink_decode_plane(BinkContext *c, GetBitContext *gb, int plane_idx, return -1; } if (blk != FILL_BLOCK) - c->dsp.scale_block(ublock, dst, stride); + c->bdsp.scale_block(ublock, dst, stride); bx++; dst += 8; prev += 8; @@ -1103,10 +1104,10 @@ static int bink_decode_plane(BinkContext *c, GetBitContext *gb, int plane_idx, c->dsp.add_pixels8(dst, block, stride); break; case INTRA_BLOCK: - c->dsp.clear_block(block); - block[0] = get_value(c, BINK_SRC_INTRA_DC); - read_dct_coeffs(gb, block, c->scantable.permutated, bink_intra_quant, -1); - c->dsp.idct_put(dst, stride, block); + memset(dctblock, 0, sizeof(*dctblock) * 64); + dctblock[0] = get_value(c, BINK_SRC_INTRA_DC); + read_dct_coeffs(gb, dctblock, bink_scan, bink_intra_quant, -1); + c->bdsp.idct_put(dst, stride, dctblock); break; case FILL_BLOCK: v = get_value(c, BINK_SRC_COLORS); @@ -1117,10 +1118,10 @@ static int bink_decode_plane(BinkContext *c, GetBitContext *gb, int plane_idx, yoff = get_value(c, BINK_SRC_Y_OFF); ref = prev + xoff + yoff * stride; c->dsp.put_pixels_tab[1][0](dst, ref, stride, 8); - c->dsp.clear_block(block); - block[0] = get_value(c, BINK_SRC_INTER_DC); - read_dct_coeffs(gb, block, c->scantable.permutated, bink_inter_quant, -1); - c->dsp.idct_add(dst, stride, block); + memset(dctblock, 0, sizeof(*dctblock) * 64); + dctblock[0] = get_value(c, BINK_SRC_INTER_DC); + read_dct_coeffs(gb, dctblock, bink_scan, bink_inter_quant, -1); + c->bdsp.idct_add(dst, stride, dctblock); break; case PATTERN_BLOCK: for (i = 0; i < 2; i++) @@ -1288,7 +1289,7 @@ static av_cold int decode_init(AVCodecContext *avctx) avctx->idct_algo = FF_IDCT_BINK; dsputil_init(&c->dsp, avctx); - ff_init_scantable(c->dsp.idct_permutation, &c->scantable, bink_scan); + ff_binkdsp_init(&c->bdsp); init_bundles(c); @@ -1316,13 +1317,12 @@ static av_cold int decode_end(AVCodecContext *avctx) } AVCodec ff_bink_decoder = { - "binkvideo", - AVMEDIA_TYPE_VIDEO, - CODEC_ID_BINKVIDEO, - sizeof(BinkContext), - decode_init, - NULL, - decode_end, - decode_frame, + .name = "binkvideo", + .type = AVMEDIA_TYPE_VIDEO, + .id = CODEC_ID_BINKVIDEO, + .priv_data_size = sizeof(BinkContext), + .init = decode_init, + .close = decode_end, + .decode = decode_frame, .long_name = NULL_IF_CONFIG_SMALL("Bink video"), }; diff --git a/libavcodec/binkaudio.c b/libavcodec/binkaudio.c index ff36458c7e..2d06aaa9e9 100644 --- a/libavcodec/binkaudio.c +++ b/libavcodec/binkaudio.c @@ -90,8 +90,7 @@ static av_cold int decode_init(AVCodecContext *avctx) return -1; } - if (avctx->extradata && avctx->extradata_size > 0) - s->version_b = avctx->extradata[0]; + s->version_b = avctx->extradata && avctx->extradata[3] == 'b'; if (avctx->codec->id == CODEC_ID_BINKAUDIO_RDFT) { // audio is already interleaved for the RDFT format variant @@ -293,25 +292,23 @@ static int decode_frame(AVCodecContext *avctx, } AVCodec ff_binkaudio_rdft_decoder = { - "binkaudio_rdft", - AVMEDIA_TYPE_AUDIO, - CODEC_ID_BINKAUDIO_RDFT, - sizeof(BinkAudioContext), - decode_init, - NULL, - decode_end, - decode_frame, + .name = "binkaudio_rdft", + .type = AVMEDIA_TYPE_AUDIO, + .id = CODEC_ID_BINKAUDIO_RDFT, + .priv_data_size = sizeof(BinkAudioContext), + .init = decode_init, + .close = decode_end, + .decode = decode_frame, .long_name = NULL_IF_CONFIG_SMALL("Bink Audio (RDFT)") }; AVCodec ff_binkaudio_dct_decoder = { - "binkaudio_dct", - AVMEDIA_TYPE_AUDIO, - CODEC_ID_BINKAUDIO_DCT, - sizeof(BinkAudioContext), - decode_init, - NULL, - decode_end, - decode_frame, + .name = "binkaudio_dct", + .type = AVMEDIA_TYPE_AUDIO, + .id = CODEC_ID_BINKAUDIO_DCT, + .priv_data_size = sizeof(BinkAudioContext), + .init = decode_init, + .close = decode_end, + .decode = decode_frame, .long_name = NULL_IF_CONFIG_SMALL("Bink Audio (DCT)") }; diff --git a/libavcodec/binkdata.h b/libavcodec/binkdata.h index 2c20b4a4c1..b9dc1f2639 100644 --- a/libavcodec/binkdata.h +++ b/libavcodec/binkdata.h @@ -285,7 +285,7 @@ static const uint8_t bink_patterns[16][64] = { } }; -static const uint32_t bink_intra_quant[16][64] = { +static const int32_t bink_intra_quant[16][64] = { { 0x010000, 0x016315, 0x01E83D, 0x02A535, 0x014E7B, 0x016577, 0x02F1E6, 0x02724C, 0x010000, 0x00EEDA, 0x024102, 0x017F9B, 0x00BE80, 0x00611E, 0x01083C, 0x00A552, @@ -448,7 +448,7 @@ static const uint32_t bink_intra_quant[16][64] = { }, }; -static const uint32_t bink_inter_quant[16][64] = { +static const int32_t bink_inter_quant[16][64] = { { 0x010000, 0x017946, 0x01A5A9, 0x0248DC, 0x016363, 0x0152A7, 0x0243EC, 0x0209EA, 0x012000, 0x00E248, 0x01BBDA, 0x015CBC, 0x00A486, 0x0053E0, 0x00F036, 0x008095, diff --git a/libavcodec/binkidct.c b/libavcodec/binkdsp.c index ddb6cc1af3..c751743aa8 100644 --- a/libavcodec/binkidct.c +++ b/libavcodec/binkdsp.c @@ -1,5 +1,5 @@ /* - * Bink IDCT algorithm + * Bink DSP routines * Copyright (c) 2009 Kostya Shishkov * * This file is part of FFmpeg. @@ -21,10 +21,11 @@ /** * @file - * Bink IDCT algorithm + * Bink DSP routines */ #include "dsputil.h" +#include "binkdsp.h" #define A1 2896 /* (1/sqrt(2))<<12 */ #define A2 2217 @@ -62,7 +63,7 @@ #define MUNGE_ROW(x) (((x) + 0x7F)>>8) #define IDCT_ROW(dest,src) IDCT_TRANSFORM(dest,0,1,2,3,4,5,6,7,0,1,2,3,4,5,6,7,MUNGE_ROW,src) -static inline void bink_idct_col(int *dest, const DCTELEM *src) +static inline void bink_idct_col(int *dest, const int32_t *src) { if ((src[8]|src[16]|src[24]|src[32]|src[40]|src[48]|src[56])==0) { dest[0] = @@ -78,7 +79,7 @@ static inline void bink_idct_col(int *dest, const DCTELEM *src) } } -void ff_bink_idct_c(DCTELEM *block) +static void bink_idct_c(int32_t *block) { int i; int temp[64]; @@ -90,17 +91,17 @@ void ff_bink_idct_c(DCTELEM *block) } } -void ff_bink_idct_add_c(uint8_t *dest, int linesize, DCTELEM *block) +static void bink_idct_add_c(uint8_t *dest, int linesize, int32_t *block) { int i, j; - ff_bink_idct_c(block); + bink_idct_c(block); for (i = 0; i < 8; i++, dest += linesize, block += 8) for (j = 0; j < 8; j++) dest[j] += block[j]; } -void ff_bink_idct_put_c(uint8_t *dest, int linesize, DCTELEM *block) +static void bink_idct_put_c(uint8_t *dest, int linesize, int32_t *block) { int i; int temp[64]; @@ -110,3 +111,26 @@ void ff_bink_idct_put_c(uint8_t *dest, int linesize, DCTELEM *block) IDCT_ROW( (&dest[i*linesize]), (&temp[8*i]) ); } } + +static void scale_block_c(const uint8_t src[64]/*align 8*/, uint8_t *dst/*align 8*/, int linesize) +{ + int i, j; + uint16_t *dst1 = (uint16_t *) dst; + uint16_t *dst2 = (uint16_t *)(dst + linesize); + + for (j = 0; j < 8; j++) { + for (i = 0; i < 8; i++) { + dst1[i] = dst2[i] = src[i] * 0x0101; + } + src += 8; + dst1 += linesize; + dst2 += linesize; + } +} + +void ff_binkdsp_init(BinkDSPContext *c) +{ + c->idct_add = bink_idct_add_c; + c->idct_put = bink_idct_put_c; + c->scale_block = scale_block_c; +} diff --git a/libavcodec/binkdsp.h b/libavcodec/binkdsp.h new file mode 100644 index 0000000000..d105f717e9 --- /dev/null +++ b/libavcodec/binkdsp.h @@ -0,0 +1,40 @@ +/* + * Bink DSP routines + * Copyright (c) 2009 Kostya Shishkov + * + * This file is part of Libav. + * + * Libav is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * Libav is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with Libav; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/** + * @file + * Bink DSP routines + */ + +#ifndef AVCODEC_BINKDSP_H +#define AVCODEC_BINKDSP_H + +#include "dsputil.h" + +typedef struct BinkDSPContext { + void (*idct_put)(uint8_t *dest/*align 8*/, int line_size, int32_t *block/*align 16*/); + void (*idct_add)(uint8_t *dest/*align 8*/, int line_size, int32_t *block/*align 16*/); + void (*scale_block)(const uint8_t src[64]/*align 8*/, uint8_t *dst/*align 8*/, int linesize); +} BinkDSPContext; + +void ff_binkdsp_init(BinkDSPContext *c); + +#endif /* AVCODEC_BINKDSP_H */ diff --git a/libavcodec/high_bit_depth.h b/libavcodec/bit_depth_template.c index 511cd00f3a..9071ec2a35 100644 --- a/libavcodec/high_bit_depth.h +++ b/libavcodec/bit_depth_template.c @@ -1,3 +1,21 @@ +/* + * This file is part of Libav. + * + * Libav is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * Libav is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with Libav; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + #include "dsputil.h" #ifndef BIT_DEPTH @@ -25,15 +43,6 @@ # undef PIXEL_SPLAT_X4 #else # define AVCODEC_H264_HIGH_DEPTH_H -# define CLIP_PIXEL(depth)\ - static inline uint16_t av_clip_pixel_ ## depth (int p)\ - {\ - const int pixel_max = (1 << depth)-1;\ - return (p & ~pixel_max) ? (-p)>>31 & pixel_max : p;\ - } - -CLIP_PIXEL( 9) -CLIP_PIXEL(10) #endif #if BIT_DEPTH > 8 @@ -52,6 +61,9 @@ CLIP_PIXEL(10) # define AV_WN4P AV_WN64 # define AV_WN4PA AV_WN64A # define PIXEL_SPLAT_X4(x) ((x)*0x0001000100010001ULL) + +# define av_clip_pixel(a) av_clip_uintp2(a, BIT_DEPTH) +# define CLIP(a) av_clip_uintp2(a, BIT_DEPTH) #else # define pixel uint8_t # define pixel2 uint16_t @@ -68,21 +80,12 @@ CLIP_PIXEL(10) # define AV_WN4P AV_WN32 # define AV_WN4PA AV_WN32A # define PIXEL_SPLAT_X4(x) ((x)*0x01010101U) -#endif -#if BIT_DEPTH == 8 # define av_clip_pixel(a) av_clip_uint8(a) # define CLIP(a) cm[a] -# define FUNC(a) a ## _8 -# define FUNCC(a) a ## _8_c -#elif BIT_DEPTH == 9 -# define av_clip_pixel(a) av_clip_pixel_9(a) -# define CLIP(a) av_clip_pixel_9(a) -# define FUNC(a) a ## _9 -# define FUNCC(a) a ## _9_c -#elif BIT_DEPTH == 10 -# define av_clip_pixel(a) av_clip_pixel_10(a) -# define CLIP(a) av_clip_pixel_10(a) -# define FUNC(a) a ## _10 -# define FUNCC(a) a ## _10_c #endif + +#define FUNC3(a, b, c) a ## _ ## b ## c +#define FUNC2(a, b, c) FUNC3(a, b, c) +#define FUNC(a) FUNC2(a, BIT_DEPTH,) +#define FUNCC(a) FUNC2(a, BIT_DEPTH, _c) diff --git a/libavcodec/bitstream.c b/libavcodec/bitstream.c index f0fa9652c6..b164ecdd11 100644 --- a/libavcodec/bitstream.c +++ b/libavcodec/bitstream.c @@ -43,11 +43,7 @@ const uint8_t ff_log2_run[41]={ void align_put_bits(PutBitContext *s) { -#ifdef ALT_BITSTREAM_WRITER - put_bits(s,( - s->index) & 7,0); -#else put_bits(s,s->bit_left & 7,0); -#endif } void ff_put_string(PutBitContext *pb, const char *string, int terminate_string) diff --git a/libavcodec/bmp.c b/libavcodec/bmp.c index 4c5166404b..0b387249e6 100644 --- a/libavcodec/bmp.c +++ b/libavcodec/bmp.c @@ -336,14 +336,13 @@ static av_cold int bmp_decode_end(AVCodecContext *avctx) } AVCodec ff_bmp_decoder = { - "bmp", - AVMEDIA_TYPE_VIDEO, - CODEC_ID_BMP, - sizeof(BMPContext), - bmp_decode_init, - NULL, - bmp_decode_end, - bmp_decode_frame, - CODEC_CAP_DR1, + .name = "bmp", + .type = AVMEDIA_TYPE_VIDEO, + .id = CODEC_ID_BMP, + .priv_data_size = sizeof(BMPContext), + .init = bmp_decode_init, + .close = bmp_decode_end, + .decode = bmp_decode_frame, + .capabilities = CODEC_CAP_DR1, .long_name = NULL_IF_CONFIG_SMALL("BMP image"), }; diff --git a/libavcodec/bmpenc.c b/libavcodec/bmpenc.c index 3719a539f5..63c3b729a9 100644 --- a/libavcodec/bmpenc.c +++ b/libavcodec/bmpenc.c @@ -150,13 +150,12 @@ static int bmp_encode_frame(AVCodecContext *avctx, unsigned char *buf, int buf_s } AVCodec ff_bmp_encoder = { - "bmp", - AVMEDIA_TYPE_VIDEO, - CODEC_ID_BMP, - sizeof(BMPContext), - bmp_encode_init, - bmp_encode_frame, - NULL, //encode_end, + .name = "bmp", + .type = AVMEDIA_TYPE_VIDEO, + .id = CODEC_ID_BMP, + .priv_data_size = sizeof(BMPContext), + .init = bmp_encode_init, + .encode = bmp_encode_frame, .pix_fmts = (const enum PixelFormat[]){ PIX_FMT_BGR24, PIX_FMT_RGB555, PIX_FMT_RGB565, diff --git a/libavcodec/c93.c b/libavcodec/c93.c index 31296395f7..1f4ed1fdf0 100644 --- a/libavcodec/c93.c +++ b/libavcodec/c93.c @@ -247,14 +247,13 @@ static int decode_frame(AVCodecContext *avctx, void *data, } AVCodec ff_c93_decoder = { - "c93", - AVMEDIA_TYPE_VIDEO, - CODEC_ID_C93, - sizeof(C93DecoderContext), - decode_init, - NULL, - decode_end, - decode_frame, - CODEC_CAP_DR1, + .name = "c93", + .type = AVMEDIA_TYPE_VIDEO, + .id = CODEC_ID_C93, + .priv_data_size = sizeof(C93DecoderContext), + .init = decode_init, + .close = decode_end, + .decode = decode_frame, + .capabilities = CODEC_CAP_DR1, .long_name = NULL_IF_CONFIG_SMALL("Interplay C93"), }; diff --git a/libavcodec/cabac.c b/libavcodec/cabac.c index 76253afe3e..5632bf811e 100644 --- a/libavcodec/cabac.c +++ b/libavcodec/cabac.c @@ -75,18 +75,7 @@ static const uint8_t lps_state[64]= { 33,33,34,34,35,35,35,36, 36,36,37,37,37,38,38,63, }; -#if 0 -const uint8_t ff_h264_norm_shift_old[128]= { - 7,6,5,5,4,4,4,4,3,3,3,3,3,3,3,3, - 2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2, - 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, - 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, - 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, -}; -#endif + const uint8_t ff_h264_norm_shift[512]= { 9,8,7,7,6,6,6,6,5,5,5,5,5,5,5,5, 4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4, @@ -177,6 +166,140 @@ void ff_init_cabac_states(CABACContext *c){ #include "avcodec.h" #include "cabac.h" +static void put_cabac(CABACContext *c, uint8_t * const state, int bit){ + int RangeLPS= ff_h264_lps_range[2*(c->range&0xC0) + *state]; + + if(bit == ((*state)&1)){ + c->range -= RangeLPS; + *state= ff_h264_mps_state[*state]; + }else{ + c->low += c->range - RangeLPS; + c->range = RangeLPS; + *state= ff_h264_lps_state[*state]; + } + + renorm_cabac_encoder(c); + +#ifdef STRICT_LIMITS + c->symCount++; +#endif +} + +/** + * @param bit 0 -> write zero bit, !=0 write one bit + */ +static void put_cabac_bypass(CABACContext *c, int bit){ + c->low += c->low; + + if(bit){ + c->low += c->range; + } +//FIXME optimize + if(c->low<0x200){ + put_cabac_bit(c, 0); + }else if(c->low<0x400){ + c->outstanding_count++; + c->low -= 0x200; + }else{ + put_cabac_bit(c, 1); + c->low -= 0x400; + } + +#ifdef STRICT_LIMITS + c->symCount++; +#endif +} + +/** + * + * @return the number of bytes written + */ +static int put_cabac_terminate(CABACContext *c, int bit){ + c->range -= 2; + + if(!bit){ + renorm_cabac_encoder(c); + }else{ + c->low += c->range; + c->range= 2; + + renorm_cabac_encoder(c); + + assert(c->low <= 0x1FF); + put_cabac_bit(c, c->low>>9); + put_bits(&c->pb, 2, ((c->low>>7)&3)|1); + + flush_put_bits(&c->pb); //FIXME FIXME FIXME XXX wrong + } + +#ifdef STRICT_LIMITS + c->symCount++; +#endif + + return (put_bits_count(&c->pb)+7)>>3; +} + +/** + * put (truncated) unary binarization. + */ +static void put_cabac_u(CABACContext *c, uint8_t * state, int v, int max, int max_index, int truncated){ + int i; + + assert(v <= max); + + for(i=0; i<v; i++){ + put_cabac(c, state, 1); + if(i < max_index) state++; + } + if(truncated==0 || v<max) + put_cabac(c, state, 0); +} + +/** + * put unary exp golomb k-th order binarization. + */ +static void put_cabac_ueg(CABACContext *c, uint8_t * state, int v, int max, int is_signed, int k, int max_index){ + int i; + + if(v==0) + put_cabac(c, state, 0); + else{ + const int sign= v < 0; + + if(is_signed) v= FFABS(v); + + if(v<max){ + for(i=0; i<v; i++){ + put_cabac(c, state, 1); + if(i < max_index) state++; + } + + put_cabac(c, state, 0); + }else{ + int m= 1<<k; + + for(i=0; i<max; i++){ + put_cabac(c, state, 1); + if(i < max_index) state++; + } + + v -= max; + while(v >= m){ //FIXME optimize + put_cabac_bypass(c, 1); + v-= m; + m+= m; + } + put_cabac_bypass(c, 0); + while(m>>=1){ + put_cabac_bypass(c, v&m); + } + } + + if(is_signed) + put_cabac_bypass(c, sign); + } +} + int main(void){ CABACContext c; uint8_t b[9*SIZE]; diff --git a/libavcodec/cabac.h b/libavcodec/cabac.h index 1b2d53f3d5..ed156e6fca 100644 --- a/libavcodec/cabac.h +++ b/libavcodec/cabac.h @@ -90,178 +90,6 @@ static inline void renorm_cabac_encoder(CABACContext *c){ } } -#ifdef TEST -static void put_cabac(CABACContext *c, uint8_t * const state, int bit){ - int RangeLPS= ff_h264_lps_range[2*(c->range&0xC0) + *state]; - - if(bit == ((*state)&1)){ - c->range -= RangeLPS; - *state= ff_h264_mps_state[*state]; - }else{ - c->low += c->range - RangeLPS; - c->range = RangeLPS; - *state= ff_h264_lps_state[*state]; - } - - renorm_cabac_encoder(c); - -#ifdef STRICT_LIMITS - c->symCount++; -#endif -} - -static void put_cabac_static(CABACContext *c, int RangeLPS, int bit){ - assert(c->range > RangeLPS); - - if(!bit){ - c->range -= RangeLPS; - }else{ - c->low += c->range - RangeLPS; - c->range = RangeLPS; - } - - renorm_cabac_encoder(c); - -#ifdef STRICT_LIMITS - c->symCount++; -#endif -} - -/** - * @param bit 0 -> write zero bit, !=0 write one bit - */ -static void put_cabac_bypass(CABACContext *c, int bit){ - c->low += c->low; - - if(bit){ - c->low += c->range; - } -//FIXME optimize - if(c->low<0x200){ - put_cabac_bit(c, 0); - }else if(c->low<0x400){ - c->outstanding_count++; - c->low -= 0x200; - }else{ - put_cabac_bit(c, 1); - c->low -= 0x400; - } - -#ifdef STRICT_LIMITS - c->symCount++; -#endif -} - -/** - * - * @return the number of bytes written - */ -static int put_cabac_terminate(CABACContext *c, int bit){ - c->range -= 2; - - if(!bit){ - renorm_cabac_encoder(c); - }else{ - c->low += c->range; - c->range= 2; - - renorm_cabac_encoder(c); - - assert(c->low <= 0x1FF); - put_cabac_bit(c, c->low>>9); - put_bits(&c->pb, 2, ((c->low>>7)&3)|1); - - flush_put_bits(&c->pb); //FIXME FIXME FIXME XXX wrong - } - -#ifdef STRICT_LIMITS - c->symCount++; -#endif - - return (put_bits_count(&c->pb)+7)>>3; -} - -/** - * put (truncated) unary binarization. - */ -static void put_cabac_u(CABACContext *c, uint8_t * state, int v, int max, int max_index, int truncated){ - int i; - - assert(v <= max); - -#if 1 - for(i=0; i<v; i++){ - put_cabac(c, state, 1); - if(i < max_index) state++; - } - if(truncated==0 || v<max) - put_cabac(c, state, 0); -#else - if(v <= max_index){ - for(i=0; i<v; i++){ - put_cabac(c, state+i, 1); - } - if(truncated==0 || v<max) - put_cabac(c, state+i, 0); - }else{ - for(i=0; i<=max_index; i++){ - put_cabac(c, state+i, 1); - } - for(; i<v; i++){ - put_cabac(c, state+max_index, 1); - } - if(truncated==0 || v<max) - put_cabac(c, state+max_index, 0); - } -#endif -} - -/** - * put unary exp golomb k-th order binarization. - */ -static void put_cabac_ueg(CABACContext *c, uint8_t * state, int v, int max, int is_signed, int k, int max_index){ - int i; - - if(v==0) - put_cabac(c, state, 0); - else{ - const int sign= v < 0; - - if(is_signed) v= FFABS(v); - - if(v<max){ - for(i=0; i<v; i++){ - put_cabac(c, state, 1); - if(i < max_index) state++; - } - - put_cabac(c, state, 0); - }else{ - int m= 1<<k; - - for(i=0; i<max; i++){ - put_cabac(c, state, 1); - if(i < max_index) state++; - } - - v -= max; - while(v >= m){ //FIXME optimize - put_cabac_bypass(c, 1); - v-= m; - m+= m; - } - put_cabac_bypass(c, 0); - while(m>>=1){ - put_cabac_bypass(c, v&m); - } - } - - if(is_signed) - put_cabac_bypass(c, sign); - } -} -#endif /* TEST */ - static void refill(CABACContext *c){ #if CABAC_BITS == 16 c->low+= (c->bytestream[0]<<9) + (c->bytestream[1]<<1); diff --git a/libavcodec/cavs.c b/libavcodec/cavs.c index db49ef3e67..47fc5a5da8 100644 --- a/libavcodec/cavs.c +++ b/libavcodec/cavs.c @@ -333,9 +333,9 @@ static inline void mc_dir_part(AVSContext *h,Picture *pic,int square, const int mx= mv->x + src_x_offset*8; const int my= mv->y + src_y_offset*8; const int luma_xy= (mx&3) + ((my&3)<<2); - uint8_t * src_y = pic->data[0] + (mx>>2) + (my>>2)*h->l_stride; - uint8_t * src_cb= pic->data[1] + (mx>>3) + (my>>3)*h->c_stride; - uint8_t * src_cr= pic->data[2] + (mx>>3) + (my>>3)*h->c_stride; + uint8_t * src_y = pic->f.data[0] + (mx >> 2) + (my >> 2) * h->l_stride; + uint8_t * src_cb = pic->f.data[1] + (mx >> 3) + (my >> 3) * h->c_stride; + uint8_t * src_cr = pic->f.data[2] + (mx >> 3) + (my >> 3) * h->c_stride; int extra_width= 0; //(s->flags&CODEC_FLAG_EMU_EDGE) ? 0 : 16; int extra_height= extra_width; int emu=0; @@ -344,7 +344,7 @@ static inline void mc_dir_part(AVSContext *h,Picture *pic,int square, const int pic_width = 16*h->mb_width; const int pic_height = 16*h->mb_height; - if(!pic->data[0]) + if(!pic->f.data[0]) return; if(mx&7) extra_width -= 3; if(my&7) extra_height -= 3; @@ -602,9 +602,9 @@ int ff_cavs_next_mb(AVSContext *h) { h->mbx = 0; h->mby++; /* re-calculate sample pointers */ - h->cy = h->picture.data[0] + h->mby*16*h->l_stride; - h->cu = h->picture.data[1] + h->mby*8*h->c_stride; - h->cv = h->picture.data[2] + h->mby*8*h->c_stride; + h->cy = h->picture.f.data[0] + h->mby * 16 * h->l_stride; + h->cu = h->picture.f.data[1] + h->mby * 8 * h->c_stride; + h->cv = h->picture.f.data[2] + h->mby * 8 * h->c_stride; if(h->mby == h->mb_height) { //frame end return 0; } @@ -629,11 +629,11 @@ void ff_cavs_init_pic(AVSContext *h) { h->mv[MV_FWD_X0] = ff_cavs_dir_mv; set_mvs(&h->mv[MV_FWD_X0], BLK_16X16); h->pred_mode_Y[3] = h->pred_mode_Y[6] = NOT_AVAIL; - h->cy = h->picture.data[0]; - h->cu = h->picture.data[1]; - h->cv = h->picture.data[2]; - h->l_stride = h->picture.linesize[0]; - h->c_stride = h->picture.linesize[1]; + h->cy = h->picture.f.data[0]; + h->cu = h->picture.f.data[1]; + h->cv = h->picture.f.data[2]; + h->l_stride = h->picture.f.linesize[0]; + h->c_stride = h->picture.f.linesize[1]; h->luma_scan[2] = 8*h->l_stride; h->luma_scan[3] = 8*h->l_stride+8; h->mbx = h->mby = h->mbidx = 0; diff --git a/libavcodec/cavsdec.c b/libavcodec/cavsdec.c index c6ccb06524..6f4c83b850 100644 --- a/libavcodec/cavsdec.c +++ b/libavcodec/cavsdec.c @@ -476,8 +476,8 @@ static int decode_pic(AVSContext *h) { return -1; } /* make sure we have the reference frames we need */ - if(!h->DPB[0].data[0] || - (!h->DPB[1].data[0] && h->pic_type == AV_PICTURE_TYPE_B)) + if(!h->DPB[0].f.data[0] || + (!h->DPB[1].f.data[0] && h->pic_type == AV_PICTURE_TYPE_B)) return -1; } else { h->pic_type = AV_PICTURE_TYPE_I; @@ -494,7 +494,7 @@ static int decode_pic(AVSContext *h) { skip_bits(&s->gb,1); //marker_bit } /* release last B frame */ - if(h->picture.data[0]) + if(h->picture.f.data[0]) s->avctx->release_buffer(s->avctx, (AVFrame *)&h->picture); s->avctx->get_buffer(s->avctx, (AVFrame *)&h->picture); @@ -585,7 +585,7 @@ static int decode_pic(AVSContext *h) { } while(ff_cavs_next_mb(h)); } if(h->pic_type != AV_PICTURE_TYPE_B) { - if(h->DPB[1].data[0]) + if(h->DPB[1].f.data[0]) s->avctx->release_buffer(s->avctx, (AVFrame *)&h->DPB[1]); h->DPB[1] = h->DPB[0]; h->DPB[0] = h->picture; @@ -648,7 +648,7 @@ static int cavs_decode_frame(AVCodecContext * avctx,void *data, int *data_size, s->avctx = avctx; if (buf_size == 0) { - if(!s->low_delay && h->DPB[0].data[0]) { + if (!s->low_delay && h->DPB[0].f.data[0]) { *data_size = sizeof(AVPicture); *picture = *(AVFrame *) &h->DPB[0]; } @@ -669,9 +669,9 @@ static int cavs_decode_frame(AVCodecContext * avctx,void *data, int *data_size, break; case PIC_I_START_CODE: if(!h->got_keyframe) { - if(h->DPB[0].data[0]) + if(h->DPB[0].f.data[0]) avctx->release_buffer(avctx, (AVFrame *)&h->DPB[0]); - if(h->DPB[1].data[0]) + if(h->DPB[1].f.data[0]) avctx->release_buffer(avctx, (AVFrame *)&h->DPB[1]); h->got_keyframe = 1; } @@ -685,7 +685,7 @@ static int cavs_decode_frame(AVCodecContext * avctx,void *data, int *data_size, break; *data_size = sizeof(AVPicture); if(h->pic_type != AV_PICTURE_TYPE_B) { - if(h->DPB[1].data[0]) { + if(h->DPB[1].f.data[0]) { *picture = *(AVFrame *) &h->DPB[1]; } else { *data_size = 0; @@ -710,15 +710,14 @@ static int cavs_decode_frame(AVCodecContext * avctx,void *data, int *data_size, } AVCodec ff_cavs_decoder = { - "cavs", - AVMEDIA_TYPE_VIDEO, - CODEC_ID_CAVS, - sizeof(AVSContext), - ff_cavs_init, - NULL, - ff_cavs_end, - cavs_decode_frame, - CODEC_CAP_DR1 | CODEC_CAP_DELAY, + .name = "cavs", + .type = AVMEDIA_TYPE_VIDEO, + .id = CODEC_ID_CAVS, + .priv_data_size = sizeof(AVSContext), + .init = ff_cavs_init, + .close = ff_cavs_end, + .decode = cavs_decode_frame, + .capabilities = CODEC_CAP_DR1 | CODEC_CAP_DELAY, .flush= cavs_flush, .long_name= NULL_IF_CONFIG_SMALL("Chinese AVS video (AVS1-P2, JiZhun profile)"), }; diff --git a/libavcodec/cdgraphics.c b/libavcodec/cdgraphics.c index aae7bbbb1b..f7d9e5f7e3 100644 --- a/libavcodec/cdgraphics.c +++ b/libavcodec/cdgraphics.c @@ -26,8 +26,8 @@ * @file * @brief CD Graphics Video Decoder * @author Michael Tison - * @sa http://wiki.multimedia.cx/index.php?title=CD_Graphics - * @sa http://www.ccs.neu.edu/home/bchafy/cdb/info/cdg + * @see http://wiki.multimedia.cx/index.php?title=CD_Graphics + * @see http://www.ccs.neu.edu/home/bchafy/cdb/info/cdg */ /// default screen sizes @@ -368,14 +368,13 @@ static av_cold int cdg_decode_end(AVCodecContext *avctx) } AVCodec ff_cdgraphics_decoder = { - "cdgraphics", - AVMEDIA_TYPE_VIDEO, - CODEC_ID_CDGRAPHICS, - sizeof(CDGraphicsContext), - cdg_decode_init, - NULL, - cdg_decode_end, - cdg_decode_frame, - CODEC_CAP_DR1, + .name = "cdgraphics", + .type = AVMEDIA_TYPE_VIDEO, + .id = CODEC_ID_CDGRAPHICS, + .priv_data_size = sizeof(CDGraphicsContext), + .init = cdg_decode_init, + .close = cdg_decode_end, + .decode = cdg_decode_frame, + .capabilities = CODEC_CAP_DR1, .long_name = NULL_IF_CONFIG_SMALL("CD Graphics video"), }; diff --git a/libavcodec/celp_filters.h b/libavcodec/celp_filters.h index 145e3d3346..2fb2b03aaa 100644 --- a/libavcodec/celp_filters.h +++ b/libavcodec/celp_filters.h @@ -34,7 +34,7 @@ * * fc_out[n] = sum(i,0,len-1){ fc_in[i] * filter[(len + n - i)%len] } * - * \note fc_in and fc_out should not overlap! + * @note fc_in and fc_out should not overlap! */ void ff_celp_convolve_circ(int16_t *fc_out, const int16_t *fc_in, const int16_t *filter, int len); diff --git a/libavcodec/celp_math.c b/libavcodec/celp_math.c index 09111da819..de33109b44 100644 --- a/libavcodec/celp_math.c +++ b/libavcodec/celp_math.c @@ -27,82 +27,6 @@ #include "avcodec.h" #include "celp_math.h" -#ifdef G729_BITEXACT -/** - * Cosine table: base_cos[i] = (1<<15) * cos(i*PI/64) - */ -static const int16_t base_cos[64] = -{ - 32767, 32729, 32610, 32413, 32138, 31786, 31357, 30853, - 30274, 29622, 28899, 28106, 27246, 26320, 25330, 24279, - 23170, 22006, 20788, 19520, 18205, 16846, 15447, 14010, - 12540, 11039, 9512, 7962, 6393, 4808, 3212, 1608, - 0, -1608, -3212, -4808, -6393, -7962, -9512, -11039, - -12540, -14010, -15447, -16846, -18205, -19520, -20788, -22006, - -23170, -24279, -25330, -26320, -27246, -28106, -28899, -29622, - -30274, -30853, -31357, -31786, -32138, -32413, -32610, -32729 -}; - -/** - * Slope used to compute cos(x) - * - * cos(ind*64+offset) = base_cos[ind]+offset*slope_cos[ind] - * values multiplied by 1<<19 - */ -static const int16_t slope_cos[64] = -{ - -632, -1893, -3150, -4399, -5638, -6863, -8072, -9261, - -10428, -11570, -12684, -13767, -14817, -15832, -16808, -17744, - -18637, -19486, -20287, -21039, -21741, -22390, -22986, -23526, - -24009, -24435, -24801, -25108, -25354, -25540, -25664, -25726, - -25726, -25664, -25540, -25354, -25108, -24801, -24435, -24009, - -23526, -22986, -22390, -21741, -21039, -20287, -19486, -18637, - -17744, -16808, -15832, -14817, -13767, -12684, -11570, -10428, - -9261, -8072, -6863, -5638, -4399, -3150, -1893, -632 -}; - -/** - * Table used to compute exp2(x) - * - * tab_exp2[i] = (1<<14) * exp2(i/32) = 2^(i/32) i=0..32 - */ -static const uint16_t tab_exp2[33] = -{ - 16384, 16743, 17109, 17484, 17867, 18258, 18658, 19066, 19484, 19911, - 20347, 20792, 21247, 21713, 22188, 22674, 23170, 23678, 24196, 24726, - 25268, 25821, 26386, 26964, 27554, 28158, 28774, 29405, 30048, 30706, - 31379, 32066, 32767 -}; - -int16_t ff_cos(uint16_t arg) -{ - uint8_t offset= arg; - uint8_t ind = arg >> 8; - - assert(arg < 0x4000); - - return FFMAX(base_cos[ind] + ((slope_cos[ind] * offset) >> 12), -0x8000); -} - -int ff_exp2(uint16_t power) -{ - uint16_t frac_x0; - uint16_t frac_dx; - int result; - - assert(power <= 0x7fff); - - frac_x0 = power >> 10; - frac_dx = (power & 0x03ff) << 5; - - result = tab_exp2[frac_x0] << 15; - result += frac_dx * (tab_exp2[frac_x0+1] - tab_exp2[frac_x0]); - - return result >> 10; -} - -#else // G729_BITEXACT - /** * Cosine table: base_cos[i] = (1<<15) * cos(i*PI/64) */ @@ -154,8 +78,6 @@ int ff_exp2(uint16_t power) return result + ((result*(power&31)*89)>>22); } -#endif // else G729_BITEXACT - /** * Table used to compute log2(x) * @@ -163,17 +85,10 @@ int ff_exp2(uint16_t power) */ static const uint16_t tab_log2[33] = { -#ifdef G729_BITEXACT - 0, 1455, 2866, 4236, 5568, 6863, 8124, 9352, - 10549, 11716, 12855, 13967, 15054, 16117, 17156, 18172, - 19167, 20142, 21097, 22033, 22951, 23852, 24735, 25603, - 26455, 27291, 28113, 28922, 29716, 30497, 31266, 32023, 32767, -#else 4, 1459, 2870, 4240, 5572, 6867, 8127, 9355, 10552, 11719, 12858, 13971, 15057, 16120, 17158, 18175, 19170, 20145, 21100, 22036, 22954, 23854, 24738, 25605, 26457, 27294, 28116, 28924, 29719, 30500, 31269, 32025, 32769, -#endif }; int ff_log2(uint32_t value) diff --git a/libavcodec/cinepak.c b/libavcodec/cinepak.c index f2cbdc406e..6bdb29b9eb 100644 --- a/libavcodec/cinepak.c +++ b/libavcodec/cinepak.c @@ -457,14 +457,13 @@ static av_cold int cinepak_decode_end(AVCodecContext *avctx) } AVCodec ff_cinepak_decoder = { - "cinepak", - AVMEDIA_TYPE_VIDEO, - CODEC_ID_CINEPAK, - sizeof(CinepakContext), - cinepak_decode_init, - NULL, - cinepak_decode_end, - cinepak_decode_frame, - CODEC_CAP_DR1, + .name = "cinepak", + .type = AVMEDIA_TYPE_VIDEO, + .id = CODEC_ID_CINEPAK, + .priv_data_size = sizeof(CinepakContext), + .init = cinepak_decode_init, + .close = cinepak_decode_end, + .decode = cinepak_decode_frame, + .capabilities = CODEC_CAP_DR1, .long_name = NULL_IF_CONFIG_SMALL("Cinepak"), }; diff --git a/libavcodec/cljr.c b/libavcodec/cljr.c index c9b0911674..2e7fd51f1f 100644 --- a/libavcodec/cljr.c +++ b/libavcodec/cljr.c @@ -142,27 +142,24 @@ static av_cold int encode_init(AVCodecContext *avctx){ #endif AVCodec ff_cljr_decoder = { - "cljr", - AVMEDIA_TYPE_VIDEO, - CODEC_ID_CLJR, - sizeof(CLJRContext), - decode_init, - NULL, - NULL, - decode_frame, - CODEC_CAP_DR1, + .name = "cljr", + .type = AVMEDIA_TYPE_VIDEO, + .id = CODEC_ID_CLJR, + .priv_data_size = sizeof(CLJRContext), + .init = decode_init, + .decode = decode_frame, + .capabilities = CODEC_CAP_DR1, .long_name = NULL_IF_CONFIG_SMALL("Cirrus Logic AccuPak"), }; #if CONFIG_CLJR_ENCODER AVCodec ff_cljr_encoder = { - "cljr", - AVMEDIA_TYPE_VIDEO, - CODEC_ID_CLJR, - sizeof(CLJRContext), - encode_init, - encode_frame, - //encode_end, + .name = "cljr", + .type = AVMEDIA_TYPE_VIDEO, + .id = CODEC_ID_CLJR, + .priv_data_size = sizeof(CLJRContext), + .init = encode_init, + .encode = encode_frame, .long_name = NULL_IF_CONFIG_SMALL("Cirrus Logic AccuPak"), }; #endif diff --git a/libavcodec/cook.c b/libavcodec/cook.c index f0dee7905a..6def1ac2f7 100644 --- a/libavcodec/cook.c +++ b/libavcodec/cook.c @@ -335,7 +335,7 @@ static av_cold int cook_decode_close(AVCodecContext *avctx) * Fill the gain array for the timedomain quantization. * * @param gb pointer to the GetBitContext - * @param gaininfo[9] array of gain indexes + * @param gaininfo array[9] of gain indexes */ static void decode_gain_info(GetBitContext *gb, int *gaininfo) @@ -1156,7 +1156,6 @@ static av_cold int cook_decode_init(AVCodecContext *avctx) default: av_log_ask_for_sample(avctx, "Unknown Cook version.\n"); return -1; - break; } if(s > 1 && q->subpacket[s].samples_per_channel != q->samples_per_channel) { diff --git a/libavcodec/crystalhd.c b/libavcodec/crystalhd.c index ce1cd55228..173f35607d 100644 --- a/libavcodec/crystalhd.c +++ b/libavcodec/crystalhd.c @@ -402,36 +402,17 @@ static av_cold int init(AVCodecContext *avctx) uint8_t *dummy_p; int dummy_int; - uint32_t orig_data_size = avctx->extradata_size; - uint8_t *orig_data = av_malloc(orig_data_size); - if (!orig_data) { - av_log(avctx, AV_LOG_ERROR, - "Failed to allocate copy of extradata\n"); - return AVERROR(ENOMEM); - } - memcpy(orig_data, avctx->extradata, orig_data_size); - - priv->bsfc = av_bitstream_filter_init("h264_mp4toannexb"); if (!priv->bsfc) { av_log(avctx, AV_LOG_ERROR, "Cannot open the h264_mp4toannexb BSF!\n"); - av_free(orig_data); return AVERROR_BSF_NOT_FOUND; } av_bitstream_filter_filter(priv->bsfc, avctx, NULL, &dummy_p, &dummy_int, NULL, 0, 0); - - priv->sps_pps_buf = avctx->extradata; - priv->sps_pps_size = avctx->extradata_size; - avctx->extradata = orig_data; - avctx->extradata_size = orig_data_size; - - format.pMetaData = priv->sps_pps_buf; - format.metaDataSz = priv->sps_pps_size; - format.startCodeSz = (avctx->extradata[4] & 0x03) + 1; } - break; + subtype = BC_MSUBTYPE_H264; + // Fall-through case BC_MSUBTYPE_H264: format.startCodeSz = 4; // Fall-through @@ -809,7 +790,9 @@ static int decode(AVCodecContext *avctx, void *data, int *data_size, AVPacket *a CopyRet rec_ret; CHDContext *priv = avctx->priv_data; HANDLE dev = priv->dev; + uint8_t *in_data = avpkt->data; int len = avpkt->size; + int free_data = 0; uint8_t pic_type = 0; av_log(avctx, AV_LOG_VERBOSE, "CrystalHD: decode_frame\n"); @@ -834,15 +817,14 @@ static int decode(AVCodecContext *avctx, void *data, int *data_size, AVPacket *a int32_t tx_free = (int32_t)DtsTxFreeSize(dev); if (priv->parser) { - uint8_t *in_data = avpkt->data; - int in_len = len; int ret = 0; if (priv->bsfc) { ret = av_bitstream_filter_filter(priv->bsfc, avctx, NULL, - &in_data, &in_len, + &in_data, &len, avpkt->data, len, 0); } + free_data = ret > 0; if (ret >= 0) { uint8_t *pout; @@ -851,13 +833,13 @@ static int decode(AVCodecContext *avctx, void *data, int *data_size, AVPacket *a H264Context *h = priv->parser->priv_data; index = av_parser_parse2(priv->parser, avctx, &pout, &psize, - in_data, in_len, avctx->pkt->pts, + in_data, len, avctx->pkt->pts, avctx->pkt->dts, 0); if (index < 0) { av_log(avctx, AV_LOG_WARNING, "CrystalHD: Failed to parse h.264 packet to " "detect interlacing.\n"); - } else if (index != in_len) { + } else if (index != len) { av_log(avctx, AV_LOG_WARNING, "CrystalHD: Failed to parse h.264 packet " "completely. Interlaced frames may be " @@ -874,9 +856,6 @@ static int decode(AVCodecContext *avctx, void *data, int *data_size, AVPacket *a "packet. Interlaced frames may be incorrectly " "detected.\n"); } - if (ret > 0) { - av_freep(&in_data); - } } if (len < tx_free - 1024) { @@ -891,11 +870,17 @@ static int decode(AVCodecContext *avctx, void *data, int *data_size, AVPacket *a */ uint64_t pts = opaque_list_push(priv, avctx->pkt->pts, pic_type); if (!pts) { + if (free_data) { + av_freep(&in_data); + } return AVERROR(ENOMEM); } av_log(priv->avctx, AV_LOG_VERBOSE, "input \"pts\": %"PRIu64"\n", pts); - ret = DtsProcInput(dev, avpkt->data, len, pts, 0); + ret = DtsProcInput(dev, in_data, len, pts, 0); + if (free_data) { + av_freep(&in_data); + } if (ret == BC_STS_BUSY) { av_log(avctx, AV_LOG_WARNING, "CrystalHD: ProcInput returned busy\n"); diff --git a/libavcodec/cscd.c b/libavcodec/cscd.c index 9255503e05..8f7f132271 100644 --- a/libavcodec/cscd.c +++ b/libavcodec/cscd.c @@ -256,15 +256,14 @@ static av_cold int decode_end(AVCodecContext *avctx) { } AVCodec ff_cscd_decoder = { - "camstudio", - AVMEDIA_TYPE_VIDEO, - CODEC_ID_CSCD, - sizeof(CamStudioContext), - decode_init, - NULL, - decode_end, - decode_frame, - CODEC_CAP_DR1, + .name = "camstudio", + .type = AVMEDIA_TYPE_VIDEO, + .id = CODEC_ID_CSCD, + .priv_data_size = sizeof(CamStudioContext), + .init = decode_init, + .close = decode_end, + .decode = decode_frame, + .capabilities = CODEC_CAP_DR1, .long_name = NULL_IF_CONFIG_SMALL("CamStudio"), }; diff --git a/libavcodec/cyuv.c b/libavcodec/cyuv.c index 1c665aefc8..2df6087ae0 100644 --- a/libavcodec/cyuv.c +++ b/libavcodec/cyuv.c @@ -180,32 +180,28 @@ static av_cold int cyuv_decode_end(AVCodecContext *avctx) #if CONFIG_AURA_DECODER AVCodec ff_aura_decoder = { - "aura", - AVMEDIA_TYPE_VIDEO, - CODEC_ID_AURA, - sizeof(CyuvDecodeContext), - cyuv_decode_init, - NULL, - cyuv_decode_end, - cyuv_decode_frame, - CODEC_CAP_DR1, - NULL, + .name = "aura", + .type = AVMEDIA_TYPE_VIDEO, + .id = CODEC_ID_AURA, + .priv_data_size = sizeof(CyuvDecodeContext), + .init = cyuv_decode_init, + .close = cyuv_decode_end, + .decode = cyuv_decode_frame, + .capabilities = CODEC_CAP_DR1, .long_name = NULL_IF_CONFIG_SMALL("Auravision AURA"), }; #endif #if CONFIG_CYUV_DECODER AVCodec ff_cyuv_decoder = { - "cyuv", - AVMEDIA_TYPE_VIDEO, - CODEC_ID_CYUV, - sizeof(CyuvDecodeContext), - cyuv_decode_init, - NULL, - cyuv_decode_end, - cyuv_decode_frame, - CODEC_CAP_DR1, - NULL, + .name = "cyuv", + .type = AVMEDIA_TYPE_VIDEO, + .id = CODEC_ID_CYUV, + .priv_data_size = sizeof(CyuvDecodeContext), + .init = cyuv_decode_init, + .close = cyuv_decode_end, + .decode = cyuv_decode_frame, + .capabilities = CODEC_CAP_DR1, .long_name = NULL_IF_CONFIG_SMALL("Creative YUV (CYUV)"), }; #endif diff --git a/libavcodec/dca.c b/libavcodec/dca.c index 4b445bc743..e11439f939 100644 --- a/libavcodec/dca.c +++ b/libavcodec/dca.c @@ -1316,7 +1316,7 @@ static int dca_convert_bitstream(const uint8_t * src, int src_size, uint8_t * ds PutBitContext pb; if ((unsigned)src_size > (unsigned)max_size) { -// av_log(NULL, AV_LOG_ERROR, "Input frame size larger then DCA_MAX_FRAME_SIZE!\n"); +// av_log(NULL, AV_LOG_ERROR, "Input frame size larger than DCA_MAX_FRAME_SIZE!\n"); // return -1; src_size = max_size; } @@ -1650,6 +1650,7 @@ static int dca_decode_frame(AVCodecContext * avctx, //set AVCodec values with parsed data avctx->sample_rate = s->sample_rate; avctx->bit_rate = s->bit_rate; + avctx->frame_size = s->sample_blocks * 32; s->profile = FF_PROFILE_DTS; diff --git a/libavcodec/dct-test.c b/libavcodec/dct-test.c index 2abb05bd3b..9e1e99672b 100644 --- a/libavcodec/dct-test.c +++ b/libavcodec/dct-test.c @@ -68,12 +68,12 @@ void ff_simple_idct_neon(DCTELEM *data); void ff_simple_idct_axp(DCTELEM *data); struct algo { - const char *name; - enum { FDCT, IDCT } is_idct; - void (* func) (DCTELEM *block); - void (* ref) (DCTELEM *block); - enum formattag { NO_PERM,MMX_PERM, MMX_SIMPLE_PERM, SCALE_PERM, SSE2_PERM, PARTTRANS_PERM } format; - int mm_support; + const char *name; + void (*func)(DCTELEM *block); + enum formattag { NO_PERM, MMX_PERM, MMX_SIMPLE_PERM, SCALE_PERM, + SSE2_PERM, PARTTRANS_PERM } format; + int mm_support; + int nonspec; }; #ifndef FAAN_POSTSCALE @@ -84,61 +84,69 @@ struct algo { static int cpu_flags; -struct algo algos[] = { - {"REF-DBL", 0, ff_ref_fdct, ff_ref_fdct, NO_PERM}, - {"FAAN", 0, ff_faandct, ff_ref_fdct, FAAN_SCALE}, - {"FAANI", 1, ff_faanidct, ff_ref_idct, NO_PERM}, - {"IJG-AAN-INT", 0, fdct_ifast, ff_ref_fdct, SCALE_PERM}, - {"IJG-LLM-INT", 0, ff_jpeg_fdct_islow, ff_ref_fdct, NO_PERM}, - {"REF-DBL", 1, ff_ref_idct, ff_ref_idct, NO_PERM}, - {"INT", 1, j_rev_dct, ff_ref_idct, MMX_PERM}, - {"SIMPLE-C", 1, ff_simple_idct, ff_ref_idct, NO_PERM}, +static const struct algo fdct_tab[] = { + { "REF-DBL", ff_ref_fdct, NO_PERM }, + { "FAAN", ff_faandct, FAAN_SCALE }, + { "IJG-AAN-INT", fdct_ifast, SCALE_PERM }, + { "IJG-LLM-INT", ff_jpeg_fdct_islow_8, NO_PERM }, #if HAVE_MMX - {"MMX", 0, ff_fdct_mmx, ff_ref_fdct, NO_PERM, AV_CPU_FLAG_MMX}, -#if HAVE_MMX2 - {"MMX2", 0, ff_fdct_mmx2, ff_ref_fdct, NO_PERM, AV_CPU_FLAG_MMX2}, - {"SSE2", 0, ff_fdct_sse2, ff_ref_fdct, NO_PERM, AV_CPU_FLAG_SSE2}, + { "MMX", ff_fdct_mmx, NO_PERM, AV_CPU_FLAG_MMX }, + { "MMX2", ff_fdct_mmx2, NO_PERM, AV_CPU_FLAG_MMX2 }, + { "SSE2", ff_fdct_sse2, NO_PERM, AV_CPU_FLAG_SSE2 }, #endif -#if CONFIG_GPL - {"LIBMPEG2-MMX", 1, ff_mmx_idct, ff_ref_idct, MMX_PERM, AV_CPU_FLAG_MMX}, - {"LIBMPEG2-MMX2", 1, ff_mmxext_idct, ff_ref_idct, MMX_PERM, AV_CPU_FLAG_MMX2}, +#if HAVE_ALTIVEC + { "altivecfdct", fdct_altivec, NO_PERM, AV_CPU_FLAG_ALTIVEC }, #endif - {"SIMPLE-MMX", 1, ff_simple_idct_mmx, ff_ref_idct, MMX_SIMPLE_PERM, AV_CPU_FLAG_MMX}, - {"XVID-MMX", 1, ff_idct_xvid_mmx, ff_ref_idct, NO_PERM, AV_CPU_FLAG_MMX}, - {"XVID-MMX2", 1, ff_idct_xvid_mmx2, ff_ref_idct, NO_PERM, AV_CPU_FLAG_MMX2}, - {"XVID-SSE2", 1, ff_idct_xvid_sse2, ff_ref_idct, SSE2_PERM, AV_CPU_FLAG_SSE2}, + +#if ARCH_BFIN + { "BFINfdct", ff_bfin_fdct, NO_PERM }, #endif -#if HAVE_ALTIVEC - {"altivecfdct", 0, fdct_altivec, ff_ref_fdct, NO_PERM, AV_CPU_FLAG_ALTIVEC}, + { 0 } +}; + +static const struct algo idct_tab[] = { + { "FAANI", ff_faanidct, NO_PERM }, + { "REF-DBL", ff_ref_idct, NO_PERM }, + { "INT", j_rev_dct, MMX_PERM }, + { "SIMPLE-C", ff_simple_idct_8, NO_PERM }, + +#if HAVE_MMX +#if CONFIG_GPL + { "LIBMPEG2-MMX", ff_mmx_idct, MMX_PERM, AV_CPU_FLAG_MMX, 1 }, + { "LIBMPEG2-MMX2", ff_mmxext_idct, MMX_PERM, AV_CPU_FLAG_MMX2, 1 }, +#endif + { "SIMPLE-MMX", ff_simple_idct_mmx, MMX_SIMPLE_PERM, AV_CPU_FLAG_MMX }, + { "XVID-MMX", ff_idct_xvid_mmx, NO_PERM, AV_CPU_FLAG_MMX, 1 }, + { "XVID-MMX2", ff_idct_xvid_mmx2, NO_PERM, AV_CPU_FLAG_MMX2, 1 }, + { "XVID-SSE2", ff_idct_xvid_sse2, SSE2_PERM, AV_CPU_FLAG_SSE2, 1 }, #endif #if ARCH_BFIN - {"BFINfdct", 0, ff_bfin_fdct, ff_ref_fdct, NO_PERM}, - {"BFINidct", 1, ff_bfin_idct, ff_ref_idct, NO_PERM}, + { "BFINidct", ff_bfin_idct, NO_PERM }, #endif #if ARCH_ARM - {"SIMPLE-ARM", 1, ff_simple_idct_arm, ff_ref_idct, NO_PERM }, - {"INT-ARM", 1, ff_j_rev_dct_arm, ff_ref_idct, MMX_PERM }, + { "SIMPLE-ARM", ff_simple_idct_arm, NO_PERM }, + { "INT-ARM", ff_j_rev_dct_arm, MMX_PERM }, +#endif #if HAVE_ARMV5TE - {"SIMPLE-ARMV5TE", 1, ff_simple_idct_armv5te, ff_ref_idct, NO_PERM }, + { "SIMPLE-ARMV5TE", ff_simple_idct_armv5te,NO_PERM }, #endif #if HAVE_ARMV6 - {"SIMPLE-ARMV6", 1, ff_simple_idct_armv6, ff_ref_idct, MMX_PERM }, + { "SIMPLE-ARMV6", ff_simple_idct_armv6, MMX_PERM }, #endif #if HAVE_NEON - {"SIMPLE-NEON", 1, ff_simple_idct_neon, ff_ref_idct, PARTTRANS_PERM }, + { "SIMPLE-NEON", ff_simple_idct_neon, PARTTRANS_PERM }, #endif -#endif /* ARCH_ARM */ #if ARCH_ALPHA - {"SIMPLE-ALPHA", 1, ff_simple_idct_axp, ff_ref_idct, NO_PERM }, + { "SIMPLE-ALPHA", ff_simple_idct_axp, NO_PERM }, #endif - { 0 } + { 0 } }; #define AANSCALE_BITS 12 @@ -148,7 +156,7 @@ uint8_t cropTbl[256 + 2 * MAX_NEG_CROP]; static int64_t gettime(void) { struct timeval tv; - gettimeofday(&tv,NULL); + gettimeofday(&tv, NULL); return (int64_t)tv.tv_sec * 1000000 + tv.tv_usec; } @@ -157,18 +165,18 @@ static int64_t gettime(void) static short idct_mmx_perm[64]; -static short idct_simple_mmx_perm[64]={ - 0x00, 0x08, 0x04, 0x09, 0x01, 0x0C, 0x05, 0x0D, - 0x10, 0x18, 0x14, 0x19, 0x11, 0x1C, 0x15, 0x1D, - 0x20, 0x28, 0x24, 0x29, 0x21, 0x2C, 0x25, 0x2D, - 0x12, 0x1A, 0x16, 0x1B, 0x13, 0x1E, 0x17, 0x1F, - 0x02, 0x0A, 0x06, 0x0B, 0x03, 0x0E, 0x07, 0x0F, - 0x30, 0x38, 0x34, 0x39, 0x31, 0x3C, 0x35, 0x3D, - 0x22, 0x2A, 0x26, 0x2B, 0x23, 0x2E, 0x27, 0x2F, - 0x32, 0x3A, 0x36, 0x3B, 0x33, 0x3E, 0x37, 0x3F, +static short idct_simple_mmx_perm[64] = { + 0x00, 0x08, 0x04, 0x09, 0x01, 0x0C, 0x05, 0x0D, + 0x10, 0x18, 0x14, 0x19, 0x11, 0x1C, 0x15, 0x1D, + 0x20, 0x28, 0x24, 0x29, 0x21, 0x2C, 0x25, 0x2D, + 0x12, 0x1A, 0x16, 0x1B, 0x13, 0x1E, 0x17, 0x1F, + 0x02, 0x0A, 0x06, 0x0B, 0x03, 0x0E, 0x07, 0x0F, + 0x30, 0x38, 0x34, 0x39, 0x31, 0x3C, 0x35, 0x3D, + 0x22, 0x2A, 0x26, 0x2B, 0x23, 0x2E, 0x27, 0x2F, + 0x32, 0x3A, 0x36, 0x3B, 0x33, 0x3E, 0x37, 0x3F, }; -static const uint8_t idct_sse2_row_perm[8] = {0, 4, 1, 5, 2, 6, 3, 7}; +static const uint8_t idct_sse2_row_perm[8] = { 0, 4, 1, 5, 2, 6, 3, 7 }; static void idct_mmx_init(void) { @@ -177,13 +185,11 @@ static void idct_mmx_init(void) /* the mmx/mmxext idct uses a reordered input, so we patch scan tables */ for (i = 0; i < 64; i++) { idct_mmx_perm[i] = (i & 0x38) | ((i & 6) >> 1) | ((i & 1) << 2); -// idct_simple_mmx_perm[i] = simple_block_permute_op(i); } } DECLARE_ALIGNED(16, static DCTELEM, block)[64]; -DECLARE_ALIGNED(8, static DCTELEM, block1)[64]; -DECLARE_ALIGNED(8, static DCTELEM, block_org)[64]; +DECLARE_ALIGNED(8, static DCTELEM, block1)[64]; static inline void mmx_emms(void) { @@ -193,188 +199,174 @@ static inline void mmx_emms(void) #endif } -static void dct_error(const char *name, int is_idct, - void (*fdct_func)(DCTELEM *block), - void (*fdct_ref)(DCTELEM *block), int form, int test, const int bits) + +static int dct_error(const struct algo *dct, int test, int is_idct, int speed, const int bits) { + void (*ref)(DCTELEM *block) = is_idct ? ff_ref_idct : ff_ref_fdct; int it, i, scale; int err_inf, v; - int64_t err2, ti, ti1, it1; - int64_t sysErr[64], sysErrMax=0; - int maxout=0; - int blockSumErrMax=0, blockSumErr; + int64_t err2, ti, ti1, it1, err_sum = 0; + int64_t sysErr[64], sysErrMax = 0; + int maxout = 0; + int blockSumErrMax = 0, blockSumErr; AVLFG prng; const int vals=1<<bits; + double omse, ome; + int spec_err; av_lfg_init(&prng, 1); err_inf = 0; err2 = 0; - for(i=0; i<64; i++) sysErr[i]=0; - for(it=0;it<NB_ITS;it++) { - for(i=0;i<64;i++) + for (i = 0; i < 64; i++) + sysErr[i] = 0; + for (it = 0; it < NB_ITS; it++) { + for (i = 0; i < 64; i++) block1[i] = 0; - switch(test){ + switch (test) { case 0: - for(i=0;i<64;i++) + for (i = 0; i < 64; i++) block1[i] = (av_lfg_get(&prng) % (2*vals)) -vals; - if (is_idct){ + if (is_idct) { ff_ref_fdct(block1); - - for(i=0;i<64;i++) - block1[i]>>=3; + for (i = 0; i < 64; i++) + block1[i] >>= 3; } - break; - case 1:{ - int num = av_lfg_get(&prng) % 10 + 1; - for(i=0;i<num;i++) - block1[av_lfg_get(&prng) % 64] = av_lfg_get(&prng) % (2*vals) -vals; - }break; + break; + case 1: { + int num = av_lfg_get(&prng) % 10 + 1; + for (i = 0; i < num; i++) + block1[av_lfg_get(&prng) % 64] = av_lfg_get(&prng) % (2*vals) -vals; + } + break; case 2: block1[0] = av_lfg_get(&prng) % (16*vals) - (8*vals); - block1[63]= (block1[0]&1)^1; - break; + block1[63] = (block1[0] & 1) ^ 1; + break; } -#if 0 // simulate mismatch control -{ int sum=0; - for(i=0;i<64;i++) - sum+=block1[i]; - - if((sum&1)==0) block1[63]^=1; -} -#endif - - for(i=0; i<64; i++) - block_org[i]= block1[i]; - - if (form == MMX_PERM) { - for(i=0;i<64;i++) + if (dct->format == MMX_PERM) { + for (i = 0; i < 64; i++) block[idct_mmx_perm[i]] = block1[i]; - } else if (form == MMX_SIMPLE_PERM) { - for(i=0;i<64;i++) + } else if (dct->format == MMX_SIMPLE_PERM) { + for (i = 0; i < 64; i++) block[idct_simple_mmx_perm[i]] = block1[i]; - - } else if (form == SSE2_PERM) { - for(i=0; i<64; i++) - block[(i&0x38) | idct_sse2_row_perm[i&7]] = block1[i]; - } else if (form == PARTTRANS_PERM) { - for(i=0; i<64; i++) - block[(i&0x24) | ((i&3)<<3) | ((i>>3)&3)] = block1[i]; + } else if (dct->format == SSE2_PERM) { + for (i = 0; i < 64; i++) + block[(i & 0x38) | idct_sse2_row_perm[i & 7]] = block1[i]; + } else if (dct->format == PARTTRANS_PERM) { + for (i = 0; i < 64; i++) + block[(i & 0x24) | ((i & 3) << 3) | ((i >> 3) & 3)] = block1[i]; } else { - for(i=0; i<64; i++) - block[i]= block1[i]; + for (i = 0; i < 64; i++) + block[i] = block1[i]; } -#if 0 // simulate mismatch control for tested IDCT but not the ref -{ int sum=0; - for(i=0;i<64;i++) - sum+=block[i]; - - if((sum&1)==0) block[63]^=1; -} -#endif - fdct_func(block); + dct->func(block); mmx_emms(); - if (form == SCALE_PERM) { - for(i=0; i<64; i++) { - scale = 8*(1 << (AANSCALE_BITS + 11)) / ff_aanscales[i]; - block[i] = (block[i] * scale /*+ (1<<(AANSCALE_BITS-1))*/) >> AANSCALE_BITS; + if (dct->format == SCALE_PERM) { + for (i = 0; i < 64; i++) { + scale = 8 * (1 << (AANSCALE_BITS + 11)) / ff_aanscales[i]; + block[i] = (block[i] * scale) >> AANSCALE_BITS; } } - fdct_ref(block1); + ref(block1); - blockSumErr=0; - for(i=0;i<64;i++) { - v = abs(block[i] - block1[i]); + blockSumErr = 0; + for (i = 0; i < 64; i++) { + int err = block[i] - block1[i]; + err_sum += err; + v = abs(err); if (v > err_inf) err_inf = v; err2 += v * v; sysErr[i] += block[i] - block1[i]; blockSumErr += v; - if( abs(block[i])>maxout) maxout=abs(block[i]); + if (abs(block[i]) > maxout) + maxout = abs(block[i]); } - if(blockSumErrMax < blockSumErr) blockSumErrMax= blockSumErr; -#if 0 // print different matrix pairs - if(blockSumErr){ - printf("\n"); - for(i=0; i<64; i++){ - if((i&7)==0) printf("\n"); - printf("%4d ", block_org[i]); - } - for(i=0; i<64; i++){ - if((i&7)==0) printf("\n"); - printf("%4d ", block[i] - block1[i]); - } - } -#endif + if (blockSumErrMax < blockSumErr) + blockSumErrMax = blockSumErr; } - for(i=0; i<64; i++) sysErrMax= FFMAX(sysErrMax, FFABS(sysErr[i])); + for (i = 0; i < 64; i++) + sysErrMax = FFMAX(sysErrMax, FFABS(sysErr[i])); - for(i=0; i<64; i++){ - if(i%8==0) printf("\n"); - printf("%7d ", (int)sysErr[i]); + for (i = 0; i < 64; i++) { + if (i % 8 == 0) + printf("\n"); + printf("%7d ", (int) sysErr[i]); } printf("\n"); - printf("%s %s: err_inf=%d err2=%0.8f syserr=%0.8f maxout=%d blockSumErr=%d\n", - is_idct ? "IDCT" : "DCT", - name, err_inf, (double)err2 / NB_ITS / 64.0, (double)sysErrMax / NB_ITS, maxout, blockSumErrMax); + omse = (double) err2 / NB_ITS / 64; + ome = (double) err_sum / NB_ITS / 64; + + spec_err = is_idct && (err_inf > 1 || omse > 0.02 || fabs(ome) > 0.0015); + + printf("%s %s: ppe=%d omse=%0.8f ome=%0.8f syserr=%0.8f maxout=%d blockSumErr=%d\n", + is_idct ? "IDCT" : "DCT", dct->name, err_inf, + omse, ome, (double) sysErrMax / NB_ITS, + maxout, blockSumErrMax); + + if (spec_err && !dct->nonspec) + return 1; + + if (!speed) + return 0; /* speed test */ - for(i=0;i<64;i++) + for (i = 0; i < 64; i++) block1[i] = 0; - switch(test){ + + switch (test) { case 0: - for(i=0;i<64;i++) + for (i = 0; i < 64; i++) block1[i] = av_lfg_get(&prng) % (2*vals) -vals; - if (is_idct){ + if (is_idct) { ff_ref_fdct(block1); - - for(i=0;i<64;i++) - block1[i]>>=3; + for (i = 0; i < 64; i++) + block1[i] >>= 3; } - break; - case 1:{ + break; + case 1: case 2: block1[0] = av_lfg_get(&prng) % (2*vals) -vals; block1[1] = av_lfg_get(&prng) % (2*vals) -vals; block1[2] = av_lfg_get(&prng) % (2*vals) -vals; block1[3] = av_lfg_get(&prng) % (2*vals) -vals; - }break; + break; } - if (form == MMX_PERM) { - for(i=0;i<64;i++) + if (dct->format == MMX_PERM) { + for (i = 0; i < 64; i++) block[idct_mmx_perm[i]] = block1[i]; - } else if(form == MMX_SIMPLE_PERM) { - for(i=0;i<64;i++) + } else if (dct->format == MMX_SIMPLE_PERM) { + for (i = 0; i < 64; i++) block[idct_simple_mmx_perm[i]] = block1[i]; } else { - for(i=0; i<64; i++) - block[i]= block1[i]; + for (i = 0; i < 64; i++) + block[i] = block1[i]; } ti = gettime(); it1 = 0; do { - for(it=0;it<NB_ITS_SPEED;it++) { - for(i=0; i<64; i++) - block[i]= block1[i]; -// memcpy(block, block1, sizeof(DCTELEM) * 64); -// do not memcpy especially not fastmemcpy because it does movntq !!! - fdct_func(block); + for (it = 0; it < NB_ITS_SPEED; it++) { + for (i = 0; i < 64; i++) + block[i] = block1[i]; + dct->func(block); } it1 += NB_ITS_SPEED; ti1 = gettime() - ti; } while (ti1 < 1000000); mmx_emms(); - printf("%s %s: %0.1f kdct/s\n", - is_idct ? "IDCT" : "DCT", - name, (double)it1 * 1000.0 / (double)ti1); + printf("%s %s: %0.1f kdct/s\n", is_idct ? "IDCT" : "DCT", dct->name, + (double) it1 * 1000.0 / (double) ti1); + + return 0; } DECLARE_ALIGNED(8, static uint8_t, img_dest)[64]; @@ -392,19 +384,19 @@ static void idct248_ref(uint8_t *dest, int linesize, int16_t *block) if (!init) { init = 1; - for(i=0;i<8;i++) { + for (i = 0; i < 8; i++) { sum = 0; - for(j=0;j<8;j++) { - s = (i==0) ? sqrt(1.0/8.0) : sqrt(1.0/4.0); + for (j = 0; j < 8; j++) { + s = (i == 0) ? sqrt(1.0 / 8.0) : sqrt(1.0 / 4.0); c8[i][j] = s * cos(M_PI * i * (j + 0.5) / 8.0); sum += c8[i][j] * c8[i][j]; } } - for(i=0;i<4;i++) { + for (i = 0; i < 4; i++) { sum = 0; - for(j=0;j<4;j++) { - s = (i==0) ? sqrt(1.0/4.0) : sqrt(1.0/2.0); + for (j = 0; j < 4; j++) { + s = (i == 0) ? sqrt(1.0 / 4.0) : sqrt(1.0 / 2.0); c4[i][j] = s * cos(M_PI * i * (j + 0.5) / 4.0); sum += c4[i][j] * c4[i][j]; } @@ -413,58 +405,59 @@ static void idct248_ref(uint8_t *dest, int linesize, int16_t *block) /* butterfly */ s = 0.5 * sqrt(2.0); - for(i=0;i<4;i++) { - for(j=0;j<8;j++) { - block1[8*(2*i)+j] = (block[8*(2*i)+j] + block[8*(2*i+1)+j]) * s; - block1[8*(2*i+1)+j] = (block[8*(2*i)+j] - block[8*(2*i+1)+j]) * s; + for (i = 0; i < 4; i++) { + for (j = 0; j < 8; j++) { + block1[8 * (2 * i) + j] = + (block[8 * (2 * i) + j] + block[8 * (2 * i + 1) + j]) * s; + block1[8 * (2 * i + 1) + j] = + (block[8 * (2 * i) + j] - block[8 * (2 * i + 1) + j]) * s; } } /* idct8 on lines */ - for(i=0;i<8;i++) { - for(j=0;j<8;j++) { + for (i = 0; i < 8; i++) { + for (j = 0; j < 8; j++) { sum = 0; - for(k=0;k<8;k++) - sum += c8[k][j] * block1[8*i+k]; - block2[8*i+j] = sum; + for (k = 0; k < 8; k++) + sum += c8[k][j] * block1[8 * i + k]; + block2[8 * i + j] = sum; } } /* idct4 */ - for(i=0;i<8;i++) { - for(j=0;j<4;j++) { + for (i = 0; i < 8; i++) { + for (j = 0; j < 4; j++) { /* top */ sum = 0; - for(k=0;k<4;k++) - sum += c4[k][j] * block2[8*(2*k)+i]; - block3[8*(2*j)+i] = sum; + for (k = 0; k < 4; k++) + sum += c4[k][j] * block2[8 * (2 * k) + i]; + block3[8 * (2 * j) + i] = sum; /* bottom */ sum = 0; - for(k=0;k<4;k++) - sum += c4[k][j] * block2[8*(2*k+1)+i]; - block3[8*(2*j+1)+i] = sum; + for (k = 0; k < 4; k++) + sum += c4[k][j] * block2[8 * (2 * k + 1) + i]; + block3[8 * (2 * j + 1) + i] = sum; } } /* clamp and store the result */ - for(i=0;i<8;i++) { - for(j=0;j<8;j++) { - v = block3[8*i+j]; - if (v < 0) - v = 0; - else if (v > 255) - v = 255; - dest[i * linesize + j] = (int)rint(v); + for (i = 0; i < 8; i++) { + for (j = 0; j < 8; j++) { + v = block3[8 * i + j]; + if (v < 0) v = 0; + else if (v > 255) v = 255; + dest[i * linesize + j] = (int) rint(v); } } } static void idct248_error(const char *name, - void (*idct248_put)(uint8_t *dest, int line_size, int16_t *block)) + void (*idct248_put)(uint8_t *dest, int line_size, + int16_t *block), + int speed) { int it, i, it1, ti, ti1, err_max, v; - AVLFG prng; av_lfg_init(&prng, 1); @@ -472,23 +465,22 @@ static void idct248_error(const char *name, /* just one test to see if code is correct (precision is less important here) */ err_max = 0; - for(it=0;it<NB_ITS;it++) { - + for (it = 0; it < NB_ITS; it++) { /* XXX: use forward transform to generate values */ - for(i=0;i<64;i++) + for (i = 0; i < 64; i++) block1[i] = av_lfg_get(&prng) % 256 - 128; block1[0] += 1024; - for(i=0; i<64; i++) - block[i]= block1[i]; + for (i = 0; i < 64; i++) + block[i] = block1[i]; idct248_ref(img_dest1, 8, block); - for(i=0; i<64; i++) - block[i]= block1[i]; + for (i = 0; i < 64; i++) + block[i] = block1[i]; idct248_put(img_dest, 8, block); - for(i=0;i<64;i++) { - v = abs((int)img_dest[i] - (int)img_dest1[i]); + for (i = 0; i < 64; i++) { + v = abs((int) img_dest[i] - (int) img_dest1[i]); if (v == 255) printf("%d %d\n", img_dest[i], img_dest1[i]); if (v > err_max) @@ -514,18 +506,17 @@ static void idct248_error(const char *name, } #endif } - printf("%s %s: err_inf=%d\n", - 1 ? "IDCT248" : "DCT248", - name, err_max); + printf("%s %s: err_inf=%d\n", 1 ? "IDCT248" : "DCT248", name, err_max); + + if (!speed) + return; ti = gettime(); it1 = 0; do { - for(it=0;it<NB_ITS_SPEED;it++) { - for(i=0; i<64; i++) - block[i]= block1[i]; -// memcpy(block, block1, sizeof(DCTELEM) * 64); -// do not memcpy especially not fastmemcpy because it does movntq !!! + for (it = 0; it < NB_ITS_SPEED; it++) { + for (i = 0; i < 64; i++) + block[i] = block1[i]; idct248_put(img_dest, 8, block); } it1 += NB_ITS_SPEED; @@ -533,9 +524,8 @@ static void idct248_error(const char *name, } while (ti1 < 1000000); mmx_emms(); - printf("%s %s: %0.1f kdct/s\n", - 1 ? "IDCT248" : "DCT248", - name, (double)it1 * 1000.0 / (double)ti1); + printf("%s %s: %0.1f kdct/s\n", 1 ? "IDCT248" : "DCT248", name, + (double) it1 * 1000.0 / (double) ti1); } static void help(void) @@ -545,56 +535,67 @@ static void help(void) " 1 -> test with random sparse matrixes\n" " 2 -> do 3. test from mpeg4 std\n" "-i test IDCT implementations\n" - "-4 test IDCT248 implementations\n"); + "-4 test IDCT248 implementations\n" + "-t speed test\n"); } int main(int argc, char **argv) { int test_idct = 0, test_248_dct = 0; - int c,i; - int test=1; + int c, i; + int test = 1; + int speed = 0; + int err = 0; int bits=8; + cpu_flags = av_get_cpu_flags(); ff_ref_dct_init(); idct_mmx_init(); - for(i=0;i<256;i++) cropTbl[i + MAX_NEG_CROP] = i; - for(i=0;i<MAX_NEG_CROP;i++) { + for (i = 0; i < 256; i++) + cropTbl[i + MAX_NEG_CROP] = i; + for (i = 0; i < MAX_NEG_CROP; i++) { cropTbl[i] = 0; cropTbl[i + MAX_NEG_CROP + 256] = 255; } - for(;;) { - c = getopt(argc, argv, "ih4"); + for (;;) { + c = getopt(argc, argv, "ih4t"); if (c == -1) break; - switch(c) { + switch (c) { case 'i': test_idct = 1; break; case '4': test_248_dct = 1; break; - default : + case 't': + speed = 1; + break; + default: case 'h': help(); return 0; } } - if(optind <argc) test= atoi(argv[optind]); + if (optind < argc) + test = atoi(argv[optind]); if(optind+1 < argc) bits= atoi(argv[optind+1]); printf("ffmpeg DCT/IDCT test\n"); if (test_248_dct) { - idct248_error("SIMPLE-C", ff_simple_idct248_put); + idct248_error("SIMPLE-C", ff_simple_idct248_put, speed); } else { - for (i=0;algos[i].name;i++) - if (algos[i].is_idct == test_idct && !(~cpu_flags & algos[i].mm_support)) { - dct_error (algos[i].name, algos[i].is_idct, algos[i].func, algos[i].ref, algos[i].format, test, bits); - } + const struct algo *algos = test_idct ? idct_tab : fdct_tab; + for (i = 0; algos[i].name; i++) + if (!(~cpu_flags & algos[i].mm_support)) { + err |= dct_error(&algos[i], test, test_idct, speed, bits); + } } - return 0; + + return err; } diff --git a/libavcodec/dctref.h b/libavcodec/dctref.h index be481f07b0..f6fde8863a 100644 --- a/libavcodec/dctref.h +++ b/libavcodec/dctref.h @@ -22,10 +22,8 @@ #ifndef AVCODEC_DCTREF_H #define AVCODEC_DCTREF_H -#include "dsputil.h" - -void ff_ref_fdct(DCTELEM *block); -void ff_ref_idct(DCTELEM *block); +void ff_ref_fdct(short *block); +void ff_ref_idct(short *block); void ff_ref_dct_init(void); #endif /* AVCODEC_DCTREF_H */ diff --git a/libavcodec/dfa.c b/libavcodec/dfa.c index 598fedc980..f222a59459 100644 --- a/libavcodec/dfa.c +++ b/libavcodec/dfa.c @@ -384,14 +384,13 @@ static av_cold int dfa_decode_end(AVCodecContext *avctx) } AVCodec ff_dfa_decoder = { - "dfa", - AVMEDIA_TYPE_VIDEO, - CODEC_ID_DFA, - sizeof(DfaContext), - dfa_decode_init, - NULL, - dfa_decode_end, - dfa_decode_frame, - CODEC_CAP_DR1, + .name = "dfa", + .type = AVMEDIA_TYPE_VIDEO, + .id = CODEC_ID_DFA, + .priv_data_size = sizeof(DfaContext), + .init = dfa_decode_init, + .close = dfa_decode_end, + .decode = dfa_decode_frame, + .capabilities = CODEC_CAP_DR1, .long_name = NULL_IF_CONFIG_SMALL("Chronomaster DFA"), }; diff --git a/libavcodec/dnxhddata.c b/libavcodec/dnxhddata.c index f628a642a5..536636df94 100644 --- a/libavcodec/dnxhddata.c +++ b/libavcodec/dnxhddata.c @@ -22,6 +22,28 @@ #include "avcodec.h" #include "dnxhddata.h" +static const uint8_t dnxhd_1235_luma_weight[] = { + 0, 32, 32, 32, 33, 35, 38, 39, + 32, 33, 32, 33, 36, 36, 39, 42, + 32, 32, 33, 36, 35, 37, 41, 43, + 31, 33, 34, 36, 36, 40, 42, 48, + 32, 34, 36, 37, 39, 42, 46, 51, + 36, 37, 37, 39, 41, 46, 51, 55, + 37, 39, 41, 41, 47, 50, 55, 56, + 41, 42, 41, 44, 50, 53, 60, 60 +}; + +static const uint8_t dnxhd_1235_chroma_weight[] = { + 0, 32, 33, 34, 39, 41, 54, 59, + 33, 34, 35, 38, 43, 49, 58, 84, + 34, 37, 39, 44, 46, 55, 74, 87, + 40, 42, 47, 48, 58, 70, 87, 86, + 43, 50, 56, 63, 72, 94, 91, 82, + 55, 63, 65, 75, 93, 89, 85, 73, + 61, 67, 82, 81, 83, 90, 79, 73, + 74, 84, 75, 78, 90, 85, 73, 73 +}; + static const uint8_t dnxhd_1237_luma_weight[] = { 0, 32, 33, 34, 34, 36, 37, 36, 36, 37, 38, 38, 38, 39, 41, 44, @@ -108,7 +130,7 @@ static const uint8_t dnxhd_1242_chroma_weight[] = { 48, 49, 51, 51, 52, 52, 54, 54, 49, 49, 52, 53, 54, 54, 53, 53, 55, 59, 63, 62, 60, 60, 60, 60, - }; +}; static const uint8_t dnxhd_1243_luma_weight[] = { 0, 32, 32, 33, 33, 35, 35, 35, @@ -132,6 +154,28 @@ static const uint8_t dnxhd_1243_chroma_weight[] = { 46, 45, 46, 47, 47, 48, 47, 47, }; +static const uint8_t dnxhd_1250_luma_weight[] = { + 0, 32, 35, 35, 36, 36, 41, 43, + 32, 34, 35, 36, 37, 39, 43, 47, + 33, 34, 36, 38, 38, 42, 42, 50, + 34, 36, 38, 38, 41, 40, 47, 54, + 35, 38, 39, 40, 39, 45, 49, 58, + 38, 39, 40, 39, 46, 47, 54, 60, + 38, 39, 41, 46, 46, 48, 57, 62, + 40, 41, 44, 45, 49, 54, 63, 63 +}; + +static const uint8_t dnxhd_1250_chroma_weight[] = { + 0, 32, 35, 36, 40, 42, 51, 51, + 35, 36, 39, 39, 43, 51, 52, 55, + 36, 41, 41, 43, 51, 53, 54, 56, + 43, 44, 45, 50, 54, 54, 55, 57, + 45, 48, 50, 51, 55, 58, 59, 58, + 49, 52, 49, 57, 58, 62, 58, 60, + 51, 51, 56, 58, 62, 61, 59, 62, + 52, 52, 60, 61, 59, 59, 63, 63 +}; + static const uint8_t dnxhd_1251_luma_weight[] = { 0, 32, 32, 34, 34, 34, 34, 35, 35, 35, 36, 37, 36, 36, 35, 36, @@ -184,35 +228,144 @@ static const uint8_t dnxhd_1237_dc_bits[12] = { }; static const uint16_t dnxhd_1237_ac_codes[257] = { - 0, 1, 4, 5, 12, 26, 27, 56, 57, 58, 59, 120, 121, 244, 245, 246, 247, 248, 498, 499, 500, 501, 502, 1006, 1007, 1008, 1009, 1010, 1011, 2024, 2025, 2026, 2027, 2028, 2029, 2030, 2031, 4064, 4065, 4066, 4067, 4068, 4069, 4070, 4071, 4072, 4073, 8148, 8149, 8150, 8151, 8152, 8153, 8154, 8155, 8156, 8157, 8158, 16318, 16319, 16320, 16321, 16322, 16323, 16324, 16325, 16326, 16327, 16328, 16329, 16330, 16331, 16332, 16333, 32668, 32669, 32670, 32671, 32672, 32673, 32674, 32675, 32676, 32677, 32678, 32679, 32680, 32681, 32682, 32683, 32684, 65370, 65371, 65372, 65373, 65374, 65375, 65376, 65377, 65378, 65379, 65380, 65381, 65382, 65383, 65384, 65385, 65386, 65387, 65388, 65389, 65390, 65391, 65392, 65393, 65394, 65395, 65396, 65397, 65398, 65399, 65400, 65401, 65402, 65403, 65404, 65405, 65406, 65407, 65408, 65409, 65410, 65411, 65412, 65413, 65414, 65415, 65416, 65417, 65418, 65419, 65420, 65421, 65422, 65423, 65424, 65425, 65426, 65427, 65428, 65429, 65430, 65431, 65432, 65433, 65434, 65435, 65436, 65437, 65438, 65439, 65440, 65441, 65442, 65443, 65444, 65445, 65446, 65447, 65448, 65449, 65450, 65451, 65452, 65453, 65454, 65455, 65456, 65457, 65458, 65459, 65460, 65461, 65462, 65463, 65464, 65465, 65466, 65467, 65468, 65469, 65470, 65471, 65472, 65473, 65474, 65475, 65476, 65477, 65478, 65479, 65480, 65481, 65482, 65483, 65484, 65485, 65486, 65487, 65488, 65489, 65490, 65491, 65492, 65493, 65494, 65495, 65496, 65497, 65498, 65499, 65500, 65501, 65502, 65503, 65504, 65505, 65506, 65507, 65508, 65509, 65510, 65511, 65512, 65513, 65514, 65515, 65516, 65517, 65518, 65519, 65520, 65521, 65522, 65523, 65524, 65525, 65526, 65527, 65528, 65529, 65530, 65531, 65532, 65533, 65534, 65535, + 0, 1, 4, 5, 12, 26, 27, 56, + 57, 58, 59, 120, 121, 244, 245, 246, + 247, 248, 498, 499, 500, 501, 502, 1006, + 1007, 1008, 1009, 1010, 1011, 2024, 2025, 2026, + 2027, 2028, 2029, 2030, 2031, 4064, 4065, 4066, + 4067, 4068, 4069, 4070, 4071, 4072, 4073, 8148, + 8149, 8150, 8151, 8152, 8153, 8154, 8155, 8156, + 8157, 8158, 16318, 16319, 16320, 16321, 16322, 16323, + 16324, 16325, 16326, 16327, 16328, 16329, 16330, 16331, + 16332, 16333, 32668, 32669, 32670, 32671, 32672, 32673, + 32674, 32675, 32676, 32677, 32678, 32679, 32680, 32681, + 32682, 32683, 32684, 65370, 65371, 65372, 65373, 65374, + 65375, 65376, 65377, 65378, 65379, 65380, 65381, 65382, + 65383, 65384, 65385, 65386, 65387, 65388, 65389, 65390, + 65391, 65392, 65393, 65394, 65395, 65396, 65397, 65398, + 65399, 65400, 65401, 65402, 65403, 65404, 65405, 65406, + 65407, 65408, 65409, 65410, 65411, 65412, 65413, 65414, + 65415, 65416, 65417, 65418, 65419, 65420, 65421, 65422, + 65423, 65424, 65425, 65426, 65427, 65428, 65429, 65430, + 65431, 65432, 65433, 65434, 65435, 65436, 65437, 65438, + 65439, 65440, 65441, 65442, 65443, 65444, 65445, 65446, + 65447, 65448, 65449, 65450, 65451, 65452, 65453, 65454, + 65455, 65456, 65457, 65458, 65459, 65460, 65461, 65462, + 65463, 65464, 65465, 65466, 65467, 65468, 65469, 65470, + 65471, 65472, 65473, 65474, 65475, 65476, 65477, 65478, + 65479, 65480, 65481, 65482, 65483, 65484, 65485, 65486, + 65487, 65488, 65489, 65490, 65491, 65492, 65493, 65494, + 65495, 65496, 65497, 65498, 65499, 65500, 65501, 65502, + 65503, 65504, 65505, 65506, 65507, 65508, 65509, 65510, + 65511, 65512, 65513, 65514, 65515, 65516, 65517, 65518, + 65519, 65520, 65521, 65522, 65523, 65524, 65525, 65526, + 65527, 65528, 65529, 65530, 65531, 65532, 65533, 65534, + 65535, }; static const uint8_t dnxhd_1237_ac_bits[257] = { - 2, 2, 3, 3, 4, 5, 5, 6, 6, 6, 6, 7, 7, 8, 8, 8, 8, 8, 9, 9, 9, 9, 9, 10, 10, 10, 10, 10, 10, 11, 11, 11, 11, 11, 11, 11, 11, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, + 2, 2, 3, 3, 4, 5, 5, 6, 6, 6, 6, 7, 7, 8, 8, 8, + 8, 8, 9, 9, 9, 9, 9, 10, 10, 10, 10, 10, 10, 11, 11, 11, + 11, 11, 11, 11, 11, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 13, + 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 14, 14, 14, 14, 14, 14, + 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 15, 15, 15, 15, 15, 15, + 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 16, 16, 16, 16, 16, + 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, + 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, + 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, + 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, + 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, + 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, + 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, + 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, + 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, + 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, + 16, }; static const uint8_t dnxhd_1237_ac_level[257] = { - 1, 1, 2, 0, 3, 4, 2, 5, 6, 7, 3, 8, 9, 10, 11, 12, 4, 5, 13, 14, 15, 16, 6, 17, 18, 19, 20, 21, 7, 22, 23, 24, 25, 26, 27, 8, 9, 28, 29, 30, 31, 32, 33, 34, 10, 11, 12, 35, 36, 37, 38, 39, 40, 41, 13, 14, 15, 16, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 17, 18, 19, 20, 21, 53, 54, 55, 56, 57, 58, 59, 60, 61, 64, 1, 22, 23, 24, 25, 26, 27, 62, 63, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, + 1, 1, 2, 0, 3, 4, 2, 5, 6, 7, 3, 8, 9, 10, 11, 12, + 4, 5, 13, 14, 15, 16, 6, 17, 18, 19, 20, 21, 7, 22, 23, 24, + 25, 26, 27, 8, 9, 28, 29, 30, 31, 32, 33, 34, 10, 11, 12, 35, + 36, 37, 38, 39, 40, 41, 13, 14, 15, 16, 42, 43, 44, 45, 46, 47, + 48, 49, 50, 51, 52, 17, 18, 19, 20, 21, 53, 54, 55, 56, 57, 58, + 59, 60, 61, 64, 1, 22, 23, 24, 25, 26, 27, 62, 63, 2, 3, 4, + 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, + 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, + 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, + 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 28, 29, 30, 31, + 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, + 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, + 64, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, + 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, + 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, + 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, + 64, }; static const uint8_t dnxhd_1237_ac_run_flag[257] = { - 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, + 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, + 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, + 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, }; static const uint8_t dnxhd_1237_ac_index_flag[257] = { - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, }; static const uint16_t dnxhd_1237_run_codes[62] = { - 0, 4, 10, 11, 24, 25, 26, 54, 55, 56, 57, 58, 118, 119, 240, 482, 483, 484, 485, 486, 487, 488, 489, 490, 491, 492, 493, 494, 990, 991, 992, 993, 994, 995, 996, 997, 998, 999, 1000, 1001, 1002, 1003, 1004, 1005, 1006, 1007, 1008, 1009, 1010, 1011, 1012, 1013, 1014, 1015, 1016, 1017, 1018, 1019, 1020, 1021, 1022, 1023, + 0, 4, 10, 11, 24, 25, 26, 54, + 55, 56, 57, 58, 118, 119, 240, 482, + 483, 484, 485, 486, 487, 488, 489, 490, + 491, 492, 493, 494, 990, 991, 992, 993, + 994, 995, 996, 997, 998, 999, 1000, 1001, + 1002, 1003, 1004, 1005, 1006, 1007, 1008, 1009, + 1010, 1011, 1012, 1013, 1014, 1015, 1016, 1017, + 1018, 1019, 1020, 1021, 1022, 1023, }; static const uint8_t dnxhd_1237_run_bits[62] = { - 1, 3, 4, 4, 5, 5, 5, 6, 6, 6, 6, 6, 7, 7, 8, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, + 1, 3, 4, 4, 5, 5, 5, 6, 6, 6, 6, 6, 7, 7, 8, 9, + 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 10, 10, 10, 10, + 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, + 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, }; static const uint8_t dnxhd_1237_run[62] = { - 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 53, 57, 58, 59, 60, 61, 62, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 54, 55, 56, + 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, + 17, 18, 19, 20, 21, 53, 57, 58, 59, 60, 61, 62, 22, 23, 24, 25, + 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, + 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 54, 55, 56, }; static const uint8_t dnxhd_1238_dc_codes[12] = { @@ -224,35 +377,144 @@ static const uint8_t dnxhd_1238_dc_bits[12] = { }; static const uint16_t dnxhd_1238_ac_codes[257] = { - 0, 1, 4, 10, 11, 24, 25, 26, 54, 55, 56, 57, 116, 117, 118, 119, 240, 241, 242, 243, 244, 245, 492, 493, 494, 495, 496, 497, 498, 499, 1000, 1001, 1002, 1003, 1004, 1005, 1006, 1007, 1008, 2018, 2019, 2020, 2021, 2022, 2023, 2024, 2025, 2026, 2027, 4056, 4057, 4058, 4059, 4060, 4061, 4062, 4063, 4064, 4065, 4066, 4067, 4068, 4069, 8140, 8141, 8142, 8143, 8144, 8145, 8146, 8147, 8148, 8149, 8150, 8151, 8152, 8153, 8154, 8155, 8156, 16314, 16315, 16316, 16317, 16318, 16319, 16320, 16321, 16322, 16323, 16324, 16325, 16326, 16327, 16328, 16329, 16330, 16331, 16332, 16333, 16334, 16335, 16336, 16337, 16338, 32678, 32679, 32680, 32681, 32682, 32683, 32684, 32685, 32686, 32687, 32688, 32689, 32690, 32691, 32692, 32693, 32694, 32695, 32696, 32697, 32698, 32699, 32700, 32701, 32702, 32703, 32704, 32705, 65412, 65413, 65414, 65415, 65416, 65417, 65418, 65419, 65420, 65421, 65422, 65423, 65424, 65425, 65426, 65427, 65428, 65429, 65430, 65431, 65432, 65433, 65434, 65435, 65436, 65437, 65438, 65439, 65440, 65441, 65442, 65443, 65444, 65445, 65446, 65447, 65448, 65449, 65450, 65451, 65452, 65453, 65454, 65455, 65456, 65457, 65458, 65459, 65460, 65461, 65462, 65463, 65464, 65465, 65466, 65467, 65468, 65469, 65470, 65471, 65472, 65473, 65474, 65475, 65476, 65477, 65478, 65479, 65480, 65481, 65482, 65483, 65484, 65485, 65486, 65487, 65488, 65489, 65490, 65491, 65492, 65493, 65494, 65495, 65496, 65497, 65498, 65499, 65500, 65501, 65502, 65503, 65504, 65505, 65506, 65507, 65508, 65509, 65510, 65511, 65512, 65513, 65514, 65515, 65516, 65517, 65518, 65519, 65520, 65521, 65522, 65523, 65524, 65525, 65526, 65527, 65528, 65529, 65530, 65531, 65532, 65533, 65534, 65535, + 0, 1, 4, 10, 11, 24, 25, 26, + 54, 55, 56, 57, 116, 117, 118, 119, + 240, 241, 242, 243, 244, 245, 492, 493, + 494, 495, 496, 497, 498, 499, 1000, 1001, + 1002, 1003, 1004, 1005, 1006, 1007, 1008, 2018, + 2019, 2020, 2021, 2022, 2023, 2024, 2025, 2026, + 2027, 4056, 4057, 4058, 4059, 4060, 4061, 4062, + 4063, 4064, 4065, 4066, 4067, 4068, 4069, 8140, + 8141, 8142, 8143, 8144, 8145, 8146, 8147, 8148, + 8149, 8150, 8151, 8152, 8153, 8154, 8155, 8156, + 16314, 16315, 16316, 16317, 16318, 16319, 16320, 16321, + 16322, 16323, 16324, 16325, 16326, 16327, 16328, 16329, + 16330, 16331, 16332, 16333, 16334, 16335, 16336, 16337, + 16338, 32678, 32679, 32680, 32681, 32682, 32683, 32684, + 32685, 32686, 32687, 32688, 32689, 32690, 32691, 32692, + 32693, 32694, 32695, 32696, 32697, 32698, 32699, 32700, + 32701, 32702, 32703, 32704, 32705, 65412, 65413, 65414, + 65415, 65416, 65417, 65418, 65419, 65420, 65421, 65422, + 65423, 65424, 65425, 65426, 65427, 65428, 65429, 65430, + 65431, 65432, 65433, 65434, 65435, 65436, 65437, 65438, + 65439, 65440, 65441, 65442, 65443, 65444, 65445, 65446, + 65447, 65448, 65449, 65450, 65451, 65452, 65453, 65454, + 65455, 65456, 65457, 65458, 65459, 65460, 65461, 65462, + 65463, 65464, 65465, 65466, 65467, 65468, 65469, 65470, + 65471, 65472, 65473, 65474, 65475, 65476, 65477, 65478, + 65479, 65480, 65481, 65482, 65483, 65484, 65485, 65486, + 65487, 65488, 65489, 65490, 65491, 65492, 65493, 65494, + 65495, 65496, 65497, 65498, 65499, 65500, 65501, 65502, + 65503, 65504, 65505, 65506, 65507, 65508, 65509, 65510, + 65511, 65512, 65513, 65514, 65515, 65516, 65517, 65518, + 65519, 65520, 65521, 65522, 65523, 65524, 65525, 65526, + 65527, 65528, 65529, 65530, 65531, 65532, 65533, 65534, + 65535, }; static const uint8_t dnxhd_1238_ac_bits[257] = { - 2, 2, 3, 4, 4, 5, 5, 5, 6, 6, 6, 6, 7, 7, 7, 7, 8, 8, 8, 8, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9, 10, 10, 10, 10, 10, 10, 10, 10, 10, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, + 2, 2, 3, 4, 4, 5, 5, 5, 6, 6, 6, 6, 7, 7, 7, 7, + 8, 8, 8, 8, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9, 10, 10, + 10, 10, 10, 10, 10, 10, 10, 11, 11, 11, 11, 11, 11, 11, 11, 11, + 11, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 13, + 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, + 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, + 14, 14, 14, 14, 14, 14, 14, 14, 14, 15, 15, 15, 15, 15, 15, 15, + 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, + 15, 15, 15, 15, 15, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, + 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, + 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, + 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, + 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, + 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, + 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, + 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, + 16, }; static const uint8_t dnxhd_1238_ac_level[257] = { - 1, 1, 2, 3, 0, 4, 5, 2, 6, 7, 8, 3, 9, 10, 11, 4, 12, 13, 14, 15, 16, 5, 17, 18, 19, 20, 21, 22, 6, 7, 23, 24, 25, 26, 27, 28, 29, 8, 9, 30, 31, 32, 33, 34, 35, 36, 37, 10, 11, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 12, 13, 14, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 15, 16, 17, 18, 62, 63, 64, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 19, 20, 21, 22, 23, 24, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 40, 25, 26, 27, 28, 29, 30, 38, 39, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, + 1, 1, 2, 3, 0, 4, 5, 2, 6, 7, 8, 3, 9, 10, 11, 4, + 12, 13, 14, 15, 16, 5, 17, 18, 19, 20, 21, 22, 6, 7, 23, 24, + 25, 26, 27, 28, 29, 8, 9, 30, 31, 32, 33, 34, 35, 36, 37, 10, + 11, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 12, 13, 14, 49, + 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 15, 16, 17, 18, + 62, 63, 64, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, + 14, 15, 16, 19, 20, 21, 22, 23, 24, 17, 18, 19, 20, 21, 22, 23, + 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 40, 25, + 26, 27, 28, 29, 30, 38, 39, 41, 42, 43, 44, 45, 46, 47, 48, 49, + 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 31, + 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, + 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, + 64, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, + 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, + 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, + 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, + 64, }; /* 0 is EOB */ static const uint8_t dnxhd_1238_ac_run_flag[257] = { - 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, + 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, + 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, + 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, + 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, }; static const uint8_t dnxhd_1238_ac_index_flag[257] = { - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, + 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, }; static const uint16_t dnxhd_1235_1238_1241_run_codes[62] = { - 0, 4, 10, 11, 24, 25, 26, 27, 56, 57, 58, 59, 120, 242, 486, 487, 488, 489, 980, 981, 982, 983, 984, 985, 986, 987, 988, 989, 990, 991, 992, 993, 994, 995, 996, 997, 998, 999, 1000, 1001, 1002, 1003, 1004, 1005, 1006, 1007, 1008, 1009, 1010, 1011, 1012, 1013, 1014, 1015, 1016, 1017, 1018, 1019, 1020, 1021, 1022, 1023, + 0, 4, 10, 11, 24, 25, 26, 27, + 56, 57, 58, 59, 120, 242, 486, 487, + 488, 489, 980, 981, 982, 983, 984, 985, + 986, 987, 988, 989, 990, 991, 992, 993, + 994, 995, 996, 997, 998, 999, 1000, 1001, + 1002, 1003, 1004, 1005, 1006, 1007, 1008, 1009, + 1010, 1011, 1012, 1013, 1014, 1015, 1016, 1017, + 1018, 1019, 1020, 1021, 1022, 1023, }; static const uint8_t dnxhd_1235_1238_1241_run_bits[62] = { - 1, 3, 4, 4, 5, 5, 5, 5, 6, 6, 6, 6, 7, 8, 9, 9, 9, 9, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, + 1, 3, 4, 4, 5, 5, 5, 5, 6, 6, 6, 6, 7, 8, 9, 9, + 9, 9, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, + 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, + 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, }; static const uint8_t dnxhd_1238_run[62] = { - 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 20, 21, 17, 18, 19, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, + 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, + 20, 21, 17, 18, 19, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, + 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, + 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, }; static const uint8_t dnxhd_1235_1241_dc_codes[14] = { @@ -262,84 +524,551 @@ static const uint8_t dnxhd_1235_1241_dc_codes[14] = { static const uint8_t dnxhd_1235_1241_dc_bits[14] = { 4, 6, 4, 4, 4, 3, 3, 3, 3, 3, 4, 5, 7, 7, }; + static const uint16_t dnxhd_1235_1241_ac_codes[257] = { - 0, 1, 4, 10, 11, 24, 25, 26, 54, 55, 56, 57, 116, 117, 118, 119, 240, 241, 242, 243, 244, 245, 492, 493, 494, 495, 496, 497, 498, 998, 999, 1000, 1001, 1002, 1003, 1004, 1005, 1006, 1007, 2016, 2017, 2018, 2019, 2020, 2021, 2022, 2023, 2024, 2025, 2026, 4054, 4055, 4056, 4057, 4058, 4059, 4060, 4061, 4062, 4063, 4064, 4065, 4066, 4067, 4068, 4069, 8140, 8141, 8142, 8143, 8144, 8145, 8146, 8147, 8148, 8149, 8150, 8151, 8152, 8153, 8154, 8155, 8156, 8157, 16316, 16317, 16318, 16319, 16320, 16321, 16322, 16323, 16324, 16325, 16326, 16327, 16328, 16329, 16330, 16331, 16332, 16333, 16334, 16335, 16336, 16337, 32676, 32677, 32678, 32679, 32680, 32681, 32682, 32683, 32684, 32685, 32686, 32687, 32688, 32689, 32690, 32691, 32692, 32693, 32694, 32695, 32696, 32697, 32698, 32699, 32700, 32701, 32702, 32703, 32704, 32705, 32706, 32707, 32708, 65418, 65419, 65420, 65421, 65422, 65423, 65424, 65425, 65426, 65427, 65428, 65429, 65430, 65431, 65432, 65433, 65434, 65435, 65436, 65437, 65438, 65439, 65440, 65441, 65442, 65443, 65444, 65445, 65446, 65447, 65448, 65449, 65450, 65451, 65452, 65453, 65454, 65455, 65456, 65457, 65458, 65459, 65460, 65461, 65462, 65463, 65464, 65465, 65466, 65467, 65468, 65469, 65470, 65471, 65472, 65473, 65474, 65475, 65476, 65477, 65478, 65479, 65480, 65481, 65482, 65483, 65484, 65485, 65486, 65487, 65488, 65489, 65490, 65491, 65492, 65493, 65494, 65495, 65496, 65497, 65498, 65499, 65500, 65501, 65502, 65503, 65504, 65505, 65506, 65507, 65508, 65509, 65510, 65511, 65512, 65513, 65514, 65515, 65516, 65517, 65518, 65519, 65520, 65521, 65522, 65523, 65524, 65525, 65526, 65527, 65528, 65529, 65530, 65531, 65532, 65533, 65534, 65535, + 0, 1, 4, 10, 11, 24, 25, 26, + 54, 55, 56, 57, 116, 117, 118, 119, + 240, 241, 242, 243, 244, 245, 492, 493, + 494, 495, 496, 497, 498, 998, 999, 1000, + 1001, 1002, 1003, 1004, 1005, 1006, 1007, 2016, + 2017, 2018, 2019, 2020, 2021, 2022, 2023, 2024, + 2025, 2026, 4054, 4055, 4056, 4057, 4058, 4059, + 4060, 4061, 4062, 4063, 4064, 4065, 4066, 4067, + 4068, 4069, 8140, 8141, 8142, 8143, 8144, 8145, + 8146, 8147, 8148, 8149, 8150, 8151, 8152, 8153, + 8154, 8155, 8156, 8157, 16316, 16317, 16318, 16319, + 16320, 16321, 16322, 16323, 16324, 16325, 16326, 16327, + 16328, 16329, 16330, 16331, 16332, 16333, 16334, 16335, + 16336, 16337, 32676, 32677, 32678, 32679, 32680, 32681, + 32682, 32683, 32684, 32685, 32686, 32687, 32688, 32689, + 32690, 32691, 32692, 32693, 32694, 32695, 32696, 32697, + 32698, 32699, 32700, 32701, 32702, 32703, 32704, 32705, + 32706, 32707, 32708, 65418, 65419, 65420, 65421, 65422, + 65423, 65424, 65425, 65426, 65427, 65428, 65429, 65430, + 65431, 65432, 65433, 65434, 65435, 65436, 65437, 65438, + 65439, 65440, 65441, 65442, 65443, 65444, 65445, 65446, + 65447, 65448, 65449, 65450, 65451, 65452, 65453, 65454, + 65455, 65456, 65457, 65458, 65459, 65460, 65461, 65462, + 65463, 65464, 65465, 65466, 65467, 65468, 65469, 65470, + 65471, 65472, 65473, 65474, 65475, 65476, 65477, 65478, + 65479, 65480, 65481, 65482, 65483, 65484, 65485, 65486, + 65487, 65488, 65489, 65490, 65491, 65492, 65493, 65494, + 65495, 65496, 65497, 65498, 65499, 65500, 65501, 65502, + 65503, 65504, 65505, 65506, 65507, 65508, 65509, 65510, + 65511, 65512, 65513, 65514, 65515, 65516, 65517, 65518, + 65519, 65520, 65521, 65522, 65523, 65524, 65525, 65526, + 65527, 65528, 65529, 65530, 65531, 65532, 65533, 65534, + 65535, }; static const uint8_t dnxhd_1235_1241_ac_bits[257] = { - 2, 2, 3, 4, 4, 5, 5, 5, 6, 6, 6, 6, 7, 7, 7, 7, 8, 8, 8, 8, 8, 8, 9, 9, 9, 9, 9, 9, 9, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, + 2, 2, 3, 4, 4, 5, 5, 5, 6, 6, 6, 6, 7, 7, 7, 7, + 8, 8, 8, 8, 8, 8, 9, 9, 9, 9, 9, 9, 9, 10, 10, 10, + 10, 10, 10, 10, 10, 10, 10, 11, 11, 11, 11, 11, 11, 11, 11, 11, + 11, 11, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, + 12, 12, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, + 13, 13, 13, 13, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, + 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 15, 15, 15, 15, 15, 15, + 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, + 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 16, 16, 16, 16, 16, + 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, + 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, + 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, + 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, + 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, + 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, + 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, + 16, }; static const uint8_t dnxhd_1235_1241_ac_level[257] = { - 1, 1, 2, 3, 0, 4, 5, 2, 6, 7, 8, 3, 9, 10, 11, 4, 12, 13, 14, 15, 16, 5, 17, 18, 19, 20, 21, 6, 7, 22, 23, 24, 25, 26, 27, 28, 29, 8, 9, 30, 31, 32, 33, 34, 35, 36, 37, 38, 10, 11, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 12, 13, 14, 15, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 1, 16, 17, 18, 19, 64, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 20, 21, 22, 23, 24, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 25, 26, 27, 28, 29, 30, 31, 32, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, + 1, 1, 2, 3, 0, 4, 5, 2, 6, 7, 8, 3, 9, 10, 11, 4, + 12, 13, 14, 15, 16, 5, 17, 18, 19, 20, 21, 6, 7, 22, 23, 24, + 25, 26, 27, 28, 29, 8, 9, 30, 31, 32, 33, 34, 35, 36, 37, 38, + 10, 11, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 12, 13, + 14, 15, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 1, + 16, 17, 18, 19, 64, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, + 13, 14, 15, 16, 17, 20, 21, 22, 23, 24, 18, 19, 20, 21, 22, 23, + 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, + 40, 41, 42, 25, 26, 27, 28, 29, 30, 31, 32, 43, 44, 45, 46, 47, + 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, + 64, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, + 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, + 64, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, + 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, + 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, + 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, + 64, }; static const uint8_t dnxhd_1235_1241_ac_run_flag[257] = { - 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, + 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, + 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, + 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, }; static const uint8_t dnxhd_1235_1241_ac_index_flag[257] = { - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, + 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, }; static const uint8_t dnxhd_1235_1241_run[62] = { - 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 18, 20, 17, 19, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, + 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, + 18, 20, 17, 19, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, + 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, + 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, +}; + +static const uint8_t dnxhd_1250_dc_codes[14] = { + 10, 62, 11, 12, 13, 0, 1, 2, 3, 4, 14, 30, 126, 127 +}; +static const uint8_t dnxhd_1250_dc_bits[14] = { + 4, 6, 4, 4, 4, 3, 3, 3, 3, 3, 4, 5, 7, 7 +}; +static const uint16_t dnxhd_1250_ac_codes[257] = { + 0, 1, 4, 10, 11, 24, 25, 26, + 54, 55, 56, 57, 116, 117, 118, 119, + 240, 241, 242, 243, 244, 245, 492, 493, + 494, 495, 496, 497, 498, 998, 999, 1000, + 1001, 1002, 1003, 1004, 1005, 1006, 2014, 2015, + 2016, 2017, 2018, 2019, 2020, 2021, 2022, 2023, + 2024, 2025, 4052, 4053, 4054, 4055, 4056, 4057, + 4058, 4059, 4060, 4061, 4062, 4063, 4064, 4065, + 4066, 4067, 8136, 8137, 8138, 8139, 8140, 8141, + 8142, 8143, 8144, 8145, 8146, 8147, 8148, 8149, + 8150, 8151, 8152, 8153, 8154, 8155, 8156, 16314, + 16315, 16316, 16317, 16318, 16319, 16320, 16321, 16322, + 16323, 16324, 16325, 16326, 16327, 16328, 16329, 16330, + 16331, 16332, 16333, 16334, 16335, 16336, 16337, 16338, + 32678, 32679, 32680, 32681, 32682, 32683, 32684, 32685, + 32686, 32687, 32688, 32689, 32690, 32691, 32692, 32693, + 32694, 32695, 32696, 32697, 32698, 32699, 32700, 32701, + 32702, 32703, 32704, 32705, 32706, 32707, 32708, 32709, + 32710, 32711, 32712, 65426, 65427, 65428, 65429, 65430, + 65431, 65432, 65433, 65434, 65435, 65436, 65437, 65438, + 65439, 65440, 65441, 65442, 65443, 65444, 65445, 65446, + 65447, 65448, 65449, 65450, 65451, 65452, 65453, 65454, + 65455, 65456, 65457, 65458, 65459, 65460, 65461, 65462, + 65463, 65464, 65465, 65466, 65467, 65468, 65469, 65470, + 65471, 65472, 65473, 65474, 65475, 65476, 65477, 65478, + 65479, 65480, 65481, 65482, 65483, 65484, 65485, 65486, + 65487, 65488, 65489, 65490, 65491, 65492, 65493, 65494, + 65495, 65496, 65497, 65498, 65499, 65500, 65501, 65502, + 65503, 65504, 65505, 65506, 65507, 65508, 65509, 65510, + 65511, 65512, 65513, 65514, 65515, 65516, 65517, 65518, + 65519, 65520, 65521, 65522, 65523, 65524, 65525, 65526, + 65527, 65528, 65529, 65530, 65531, 65532, 65533, 65534, + 65535 +}; +static const uint8_t dnxhd_1250_ac_bits[257] = { + 2, 2, 3, 4, 4, 5, 5, 5, 6, 6, 6, 6, 7, 7, 7, 7, + 8, 8, 8, 8, 8, 8, 9, 9, 9, 9, 9, 9, 9, 10, 10, 10, + 10, 10, 10, 10, 10, 10, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, + 11, 11, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, + 12, 12, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, + 13, 13, 13, 13, 13, 13, 13, 14, 14, 14, 14, 14, 14, 14, 14, 14, + 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, + 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, + 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, + 15, 15, 15, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, + 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, + 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, + 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, + 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, + 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, + 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, + 16 +}; +static const uint8_t dnxhd_1250_ac_level[257] = { + 1, 1, 2, 3, 0, 4, 5, 2, 6, 7, 8, 3, 9, 10, 11, 4, + 12, 13, 14, 15, 16, 5, 17, 18, 19, 20, 21, 22, 6, 23, 24, 25, + 26, 27, 28, 29, 7, 8, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, + 9, 10, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 11, + 12, 13, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 1, 2, + 3, 4, 5, 14, 15, 16, 17, 6, 7, 8, 9, 10, 11, 12, 13, 14, + 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 18, 19, 20, 21, + 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, + 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 55, 56, 22, 23, 24, + 25, 26, 27, 54, 57, 58, 59, 60, 61, 62, 63, 64, 28, 29, 30, 31, + 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, + 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, + 64, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, + 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, + 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, + 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, + 64 +}; +static const uint8_t dnxhd_1250_ac_run_flag[257] = { + 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, + 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, + 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, + 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, + 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1 +}; +static const uint8_t dnxhd_1250_ac_index_flag[257] = { + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, + 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, + 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1 +}; +static const uint16_t dnxhd_1250_run_codes[62] = { + 0, 4, 5, 12, 26, 27, 28, 58, + 118, 119, 120, 242, 486, 487, 976, 977, + 978, 979, 980, 981, 982, 983, 984, 985, + 986, 987, 988, 989, 990, 991, 992, 993, + 994, 995, 996, 997, 998, 999, 1000, 1001, + 1002, 1003, 1004, 1005, 1006, 1007, 1008, 1009, + 1010, 1011, 1012, 1013, 1014, 1015, 1016, 1017, + 1018, 1019, 1020, 1021, 1022, 1023 +}; +static const uint8_t dnxhd_1250_run_bits[62] = { + 1, 3, 3, 4, 5, 5, 5, 6, 7, 7, 7, 8, 9, 9, 10, 10, + 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, + 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, + 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10 +}; +static const uint8_t dnxhd_1250_run[62] = { + 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, + 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, + 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, + 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62 }; static const uint8_t dnxhd_1251_dc_codes[12] = { 0, 12, 13, 1, 2, 3, 4, 5, 14, 30, 62, 63, }; + static const uint8_t dnxhd_1251_dc_bits[12] = { 3, 4, 4, 3, 3, 3, 3, 3, 4, 5, 6, 6, }; + static const uint16_t dnxhd_1251_ac_codes[257] = { - 0, 1, 4, 10, 11, 24, 25, 26, 54, 55, 56, 57, 116, 117, 118, 119, 240, 241, 242, 243, 244, 245, 492, 493, 494, 495, 496, 497, 996, 997, 998, 999, 1000, 1001, 1002, 1003, 1004, 1005, 2012, 2013, 2014, 2015, 2016, 2017, 2018, 2019, 2020, 2021, 2022, 2023, 2024, 2025, 4052, 4053, 4054, 4055, 4056, 4057, 4058, 4059, 4060, 4061, 4062, 4063, 4064, 4065, 4066, 8134, 8135, 8136, 8137, 8138, 8139, 8140, 8141, 8142, 8143, 8144, 8145, 8146, 8147, 8148, 8149, 8150, 8151, 8152, 8153, 8154, 8155, 8156, 16314, 16315, 16316, 16317, 16318, 16319, 16320, 16321, 16322, 16323, 16324, 16325, 16326, 16327, 16328, 16329, 16330, 16331, 16332, 16333, 16334, 16335, 16336, 16337, 16338, 16339, 32680, 32681, 32682, 32683, 32684, 32685, 32686, 32687, 32688, 32689, 32690, 32691, 32692, 32693, 32694, 32695, 32696, 32697, 32698, 32699, 32700, 32701, 32702, 32703, 32704, 32705, 32706, 32707, 32708, 32709, 32710, 32711, 32712, 32713, 32714, 65430, 65431, 65432, 65433, 65434, 65435, 65436, 65437, 65438, 65439, 65440, 65441, 65442, 65443, 65444, 65445, 65446, 65447, 65448, 65449, 65450, 65451, 65452, 65453, 65454, 65455, 65456, 65457, 65458, 65459, 65460, 65461, 65462, 65463, 65464, 65465, 65466, 65467, 65468, 65469, 65470, 65471, 65472, 65473, 65474, 65475, 65476, 65477, 65478, 65479, 65480, 65481, 65482, 65483, 65484, 65485, 65486, 65487, 65488, 65489, 65490, 65491, 65492, 65493, 65494, 65495, 65496, 65497, 65498, 65499, 65500, 65501, 65502, 65503, 65504, 65505, 65506, 65507, 65508, 65509, 65510, 65511, 65512, 65513, 65514, 65515, 65516, 65517, 65518, 65519, 65520, 65521, 65522, 65523, 65524, 65525, 65526, 65527, 65528, 65529, 65530, 65531, 65532, 65533, 65534, 65535, + 0, 1, 4, 10, 11, 24, 25, 26, + 54, 55, 56, 57, 116, 117, 118, 119, + 240, 241, 242, 243, 244, 245, 492, 493, + 494, 495, 496, 497, 996, 997, 998, 999, + 1000, 1001, 1002, 1003, 1004, 1005, 2012, 2013, + 2014, 2015, 2016, 2017, 2018, 2019, 2020, 2021, + 2022, 2023, 2024, 2025, 4052, 4053, 4054, 4055, + 4056, 4057, 4058, 4059, 4060, 4061, 4062, 4063, + 4064, 4065, 4066, 8134, 8135, 8136, 8137, 8138, + 8139, 8140, 8141, 8142, 8143, 8144, 8145, 8146, + 8147, 8148, 8149, 8150, 8151, 8152, 8153, 8154, + 8155, 8156, 16314, 16315, 16316, 16317, 16318, 16319, + 16320, 16321, 16322, 16323, 16324, 16325, 16326, 16327, + 16328, 16329, 16330, 16331, 16332, 16333, 16334, 16335, + 16336, 16337, 16338, 16339, 32680, 32681, 32682, 32683, + 32684, 32685, 32686, 32687, 32688, 32689, 32690, 32691, + 32692, 32693, 32694, 32695, 32696, 32697, 32698, 32699, + 32700, 32701, 32702, 32703, 32704, 32705, 32706, 32707, + 32708, 32709, 32710, 32711, 32712, 32713, 32714, 65430, + 65431, 65432, 65433, 65434, 65435, 65436, 65437, 65438, + 65439, 65440, 65441, 65442, 65443, 65444, 65445, 65446, + 65447, 65448, 65449, 65450, 65451, 65452, 65453, 65454, + 65455, 65456, 65457, 65458, 65459, 65460, 65461, 65462, + 65463, 65464, 65465, 65466, 65467, 65468, 65469, 65470, + 65471, 65472, 65473, 65474, 65475, 65476, 65477, 65478, + 65479, 65480, 65481, 65482, 65483, 65484, 65485, 65486, + 65487, 65488, 65489, 65490, 65491, 65492, 65493, 65494, + 65495, 65496, 65497, 65498, 65499, 65500, 65501, 65502, + 65503, 65504, 65505, 65506, 65507, 65508, 65509, 65510, + 65511, 65512, 65513, 65514, 65515, 65516, 65517, 65518, + 65519, 65520, 65521, 65522, 65523, 65524, 65525, 65526, + 65527, 65528, 65529, 65530, 65531, 65532, 65533, 65534, + 65535, }; + static const uint8_t dnxhd_1251_ac_bits[257] = { - 2, 2, 3, 4, 4, 5, 5, 5, 6, 6, 6, 6, 7, 7, 7, 7, 8, 8, 8, 8, 8, 8, 9, 9, 9, 9, 9, 9, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, + 2, 2, 3, 4, 4, 5, 5, 5, 6, 6, 6, 6, 7, 7, 7, 7, + 8, 8, 8, 8, 8, 8, 9, 9, 9, 9, 9, 9, 10, 10, 10, 10, + 10, 10, 10, 10, 10, 10, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, + 11, 11, 11, 11, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, + 12, 12, 12, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, + 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 14, 14, 14, 14, 14, 14, + 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, + 14, 14, 14, 14, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, + 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, + 15, 15, 15, 15, 15, 15, 15, 16, 16, 16, 16, 16, 16, 16, 16, 16, + 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, + 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, + 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, + 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, + 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, + 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, + 16, }; + static const uint8_t dnxhd_1251_ac_level[257] = { - 1, 1, 2, 3, 0, 4, 5, 2, 6, 7, 8, 3, 9, 10, 11, 4, 12, 13, 14, 15, 16, 5, 17, 18, 19, 20, 21, 6, 22, 23, 24, 25, 26, 27, 28, 29, 7, 8, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 9, 10, 11, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 12, 13, 14, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 1, 2, 3, 4, 5, 6, 7, 8, 15, 16, 17, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 18, 19, 20, 21, 22, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 23, 24, 25, 26, 27, 28, 59, 60, 61, 62, 63, 64, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, + 1, 1, 2, 3, 0, 4, 5, 2, 6, 7, 8, 3, 9, 10, 11, 4, + 12, 13, 14, 15, 16, 5, 17, 18, 19, 20, 21, 6, 22, 23, 24, 25, + 26, 27, 28, 29, 7, 8, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, + 40, 9, 10, 11, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, + 12, 13, 14, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 1, + 2, 3, 4, 5, 6, 7, 8, 15, 16, 17, 9, 10, 11, 12, 13, 14, + 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 18, + 19, 20, 21, 22, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, + 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, + 58, 23, 24, 25, 26, 27, 28, 59, 60, 61, 62, 63, 64, 29, 30, 31, + 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, + 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, + 64, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, + 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, + 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, + 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, + 64, }; + static const uint8_t dnxhd_1251_ac_run_flag[257] = { - 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, + 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, + 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, + 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, }; + static const uint8_t dnxhd_1251_ac_index_flag[257] = { - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, + 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, + 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, }; + static const uint16_t dnxhd_1251_run_codes[62] = { - 0, 4, 5, 12, 26, 27, 28, 58, 118, 119, 120, 242, 486, 487, 976, 977, 978, 979, 980, 981, 982, 983, 984, 985, 986, 987, 988, 989, 990, 991, 992, 993, 994, 995, 996, 997, 998, 999, 1000, 1001, 1002, 1003, 1004, 1005, 1006, 1007, 1008, 1009, 1010, 1011, 1012, 1013, 1014, 1015, 1016, 1017, 1018, 1019, 1020, 1021, 1022, 1023, + 0, 4, 5, 12, 26, 27, 28, 58, + 118, 119, 120, 242, 486, 487, 976, 977, + 978, 979, 980, 981, 982, 983, 984, 985, + 986, 987, 988, 989, 990, 991, 992, 993, + 994, 995, 996, 997, 998, 999, 1000, 1001, + 1002, 1003, 1004, 1005, 1006, 1007, 1008, 1009, + 1010, 1011, 1012, 1013, 1014, 1015, 1016, 1017, + 1018, 1019, 1020, 1021, 1022, 1023, }; + static const uint8_t dnxhd_1251_run_bits[62] = { - 1, 3, 3, 4, 5, 5, 5, 6, 7, 7, 7, 8, 9, 9, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, + 1, 3, 3, 4, 5, 5, 5, 6, 7, 7, 7, 8, 9, 9, 10, 10, + 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, + 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, + 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, }; + static const uint8_t dnxhd_1251_run[62] = { - 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, + 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, + 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, + 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, + 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, }; static const uint8_t dnxhd_1252_dc_codes[12] = { 0, 12, 13, 1, 2, 3, 4, 5, 14, 30, 62, 63, }; + static const uint8_t dnxhd_1252_dc_bits[12] = { 3, 4, 4, 3, 3, 3, 3, 3, 4, 5, 6, 6, }; + static const uint16_t dnxhd_1252_ac_codes[257] = { - 0, 1, 4, 10, 11, 12, 26, 27, 56, 57, 58, 118, 119, 120, 242, 243, 244, 245, 246, 247, 496, 497, 498, 499, 500, 1002, 1003, 1004, 1005, 1006, 1007, 1008, 1009, 2020, 2021, 2022, 2023, 2024, 2025, 2026, 2027, 2028, 2029, 4060, 4061, 4062, 4063, 4064, 4065, 4066, 4067, 4068, 4069, 4070, 4071, 8144, 8145, 8146, 8147, 8148, 8149, 8150, 8151, 8152, 8153, 8154, 8155, 8156, 8157, 8158, 16318, 16319, 16320, 16321, 16322, 16323, 16324, 16325, 16326, 16327, 16328, 16329, 16330, 16331, 16332, 16333, 16334, 16335, 32672, 32673, 32674, 32675, 32676, 32677, 32678, 32679, 32680, 32681, 32682, 32683, 32684, 32685, 32686, 32687, 32688, 32689, 32690, 32691, 32692, 32693, 32694, 65390, 65391, 65392, 65393, 65394, 65395, 65396, 65397, 65398, 65399, 65400, 65401, 65402, 65403, 65404, 65405, 65406, 65407, 65408, 65409, 65410, 65411, 65412, 65413, 65414, 65415, 65416, 65417, 65418, 65419, 65420, 65421, 65422, 65423, 65424, 65425, 65426, 65427, 65428, 65429, 65430, 65431, 65432, 65433, 65434, 65435, 65436, 65437, 65438, 65439, 65440, 65441, 65442, 65443, 65444, 65445, 65446, 65447, 65448, 65449, 65450, 65451, 65452, 65453, 65454, 65455, 65456, 65457, 65458, 65459, 65460, 65461, 65462, 65463, 65464, 65465, 65466, 65467, 65468, 65469, 65470, 65471, 65472, 65473, 65474, 65475, 65476, 65477, 65478, 65479, 65480, 65481, 65482, 65483, 65484, 65485, 65486, 65487, 65488, 65489, 65490, 65491, 65492, 65493, 65494, 65495, 65496, 65497, 65498, 65499, 65500, 65501, 65502, 65503, 65504, 65505, 65506, 65507, 65508, 65509, 65510, 65511, 65512, 65513, 65514, 65515, 65516, 65517, 65518, 65519, 65520, 65521, 65522, 65523, 65524, 65525, 65526, 65527, 65528, 65529, 65530, 65531, 65532, 65533, 65534, 65535, + 0, 1, 4, 10, 11, 12, 26, 27, + 56, 57, 58, 118, 119, 120, 242, 243, + 244, 245, 246, 247, 496, 497, 498, 499, + 500, 1002, 1003, 1004, 1005, 1006, 1007, 1008, + 1009, 2020, 2021, 2022, 2023, 2024, 2025, 2026, + 2027, 2028, 2029, 4060, 4061, 4062, 4063, 4064, + 4065, 4066, 4067, 4068, 4069, 4070, 4071, 8144, + 8145, 8146, 8147, 8148, 8149, 8150, 8151, 8152, + 8153, 8154, 8155, 8156, 8157, 8158, 16318, 16319, + 16320, 16321, 16322, 16323, 16324, 16325, 16326, 16327, + 16328, 16329, 16330, 16331, 16332, 16333, 16334, 16335, + 32672, 32673, 32674, 32675, 32676, 32677, 32678, 32679, + 32680, 32681, 32682, 32683, 32684, 32685, 32686, 32687, + 32688, 32689, 32690, 32691, 32692, 32693, 32694, 65390, + 65391, 65392, 65393, 65394, 65395, 65396, 65397, 65398, + 65399, 65400, 65401, 65402, 65403, 65404, 65405, 65406, + 65407, 65408, 65409, 65410, 65411, 65412, 65413, 65414, + 65415, 65416, 65417, 65418, 65419, 65420, 65421, 65422, + 65423, 65424, 65425, 65426, 65427, 65428, 65429, 65430, + 65431, 65432, 65433, 65434, 65435, 65436, 65437, 65438, + 65439, 65440, 65441, 65442, 65443, 65444, 65445, 65446, + 65447, 65448, 65449, 65450, 65451, 65452, 65453, 65454, + 65455, 65456, 65457, 65458, 65459, 65460, 65461, 65462, + 65463, 65464, 65465, 65466, 65467, 65468, 65469, 65470, + 65471, 65472, 65473, 65474, 65475, 65476, 65477, 65478, + 65479, 65480, 65481, 65482, 65483, 65484, 65485, 65486, + 65487, 65488, 65489, 65490, 65491, 65492, 65493, 65494, + 65495, 65496, 65497, 65498, 65499, 65500, 65501, 65502, + 65503, 65504, 65505, 65506, 65507, 65508, 65509, 65510, + 65511, 65512, 65513, 65514, 65515, 65516, 65517, 65518, + 65519, 65520, 65521, 65522, 65523, 65524, 65525, 65526, + 65527, 65528, 65529, 65530, 65531, 65532, 65533, 65534, + 65535, }; + static const uint8_t dnxhd_1252_ac_bits[257] = { - 2, 2, 3, 4, 4, 4, 5, 5, 6, 6, 6, 7, 7, 7, 8, 8, 8, 8, 8, 8, 9, 9, 9, 9, 9, 10, 10, 10, 10, 10, 10, 10, 10, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, + 2, 2, 3, 4, 4, 4, 5, 5, 6, 6, 6, 7, 7, 7, 8, 8, + 8, 8, 8, 8, 9, 9, 9, 9, 9, 10, 10, 10, 10, 10, 10, 10, + 10, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 12, 12, 12, 12, 12, + 12, 12, 12, 12, 12, 12, 12, 13, 13, 13, 13, 13, 13, 13, 13, 13, + 13, 13, 13, 13, 13, 13, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, + 14, 14, 14, 14, 14, 14, 14, 14, 15, 15, 15, 15, 15, 15, 15, 15, + 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 16, + 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, + 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, + 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, + 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, + 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, + 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, + 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, + 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, + 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, + 16, }; + static const uint8_t dnxhd_1252_ac_level[257] = { - 1, 1, 2, 3, 2, 0, 4, 5, 6, 7, 3, 8, 9, 10, 11, 12, 13, 14, 4, 5, 15, 16, 17, 18, 6, 19, 20, 21, 22, 23, 24, 7, 8, 25, 26, 27, 28, 29, 30, 31, 32, 9, 10, 33, 34, 35, 36, 37, 38, 39, 40, 41, 11, 12, 13, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 14, 15, 16, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 1, 2, 3, 17, 18, 19, 20, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 21, 22, 23, 24, 25, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, + 1, 1, 2, 3, 2, 0, 4, 5, 6, 7, 3, 8, 9, 10, 11, 12, + 13, 14, 4, 5, 15, 16, 17, 18, 6, 19, 20, 21, 22, 23, 24, 7, + 8, 25, 26, 27, 28, 29, 30, 31, 32, 9, 10, 33, 34, 35, 36, 37, + 38, 39, 40, 41, 11, 12, 13, 42, 43, 44, 45, 46, 47, 48, 49, 50, + 51, 52, 53, 14, 15, 16, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, + 64, 1, 2, 3, 17, 18, 19, 20, 4, 5, 6, 7, 8, 9, 10, 11, + 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 21, 22, 23, 24, 25, 22, + 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, + 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, + 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 26, 27, 28, 29, 30, 31, + 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, + 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, + 64, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, + 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, + 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, + 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, + 64, }; + static const uint8_t dnxhd_1252_ac_run_flag[257] = { - 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, + 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, + 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, }; + static const uint8_t dnxhd_1252_ac_index_flag[257] = { - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, }; const CIDEntry ff_dnxhd_cid_table[] = { + { 1235, 1920, 1080, 0, 917504, 917504, 6, 10, + dnxhd_1235_luma_weight, dnxhd_1235_chroma_weight, + dnxhd_1235_1241_dc_codes, dnxhd_1235_1241_dc_bits, + dnxhd_1235_1241_ac_codes, dnxhd_1235_1241_ac_bits, dnxhd_1235_1241_ac_level, + dnxhd_1235_1241_ac_run_flag, dnxhd_1235_1241_ac_index_flag, + dnxhd_1235_1238_1241_run_codes, dnxhd_1235_1238_1241_run_bits, dnxhd_1235_1241_run, + { 175, 185, 365, 440 } }, { 1237, 1920, 1080, 0, 606208, 606208, 4, 8, dnxhd_1237_luma_weight, dnxhd_1237_chroma_weight, dnxhd_1237_dc_codes, dnxhd_1237_dc_bits, @@ -375,6 +1104,13 @@ const CIDEntry ff_dnxhd_cid_table[] = { dnxhd_1238_ac_run_flag, dnxhd_1238_ac_index_flag, dnxhd_1235_1238_1241_run_codes, dnxhd_1235_1238_1241_run_bits, dnxhd_1238_run, { 185, 220 } }, + { 1250, 1280, 720, 0, 458752, 458752, 6, 10, + dnxhd_1250_luma_weight, dnxhd_1250_chroma_weight, + dnxhd_1250_dc_codes, dnxhd_1250_dc_bits, + dnxhd_1250_ac_codes, dnxhd_1250_ac_bits, dnxhd_1250_ac_level, + dnxhd_1250_ac_run_flag, dnxhd_1250_ac_index_flag, + dnxhd_1250_run_codes, dnxhd_1250_run_bits, dnxhd_1250_run, + { 90, 180, 220 } }, { 1251, 1280, 720, 0, 458752, 458752, 4, 8, dnxhd_1251_luma_weight, dnxhd_1251_chroma_weight, dnxhd_1251_dc_codes, dnxhd_1251_dc_bits, @@ -407,7 +1143,7 @@ int ff_dnxhd_get_cid_table(int cid) return -1; } -int ff_dnxhd_find_cid(AVCodecContext *avctx) +int ff_dnxhd_find_cid(AVCodecContext *avctx, int bit_depth) { int i, j; int mbs = avctx->bit_rate/1000000; @@ -417,7 +1153,7 @@ int ff_dnxhd_find_cid(AVCodecContext *avctx) const CIDEntry *cid = &ff_dnxhd_cid_table[i]; if (cid->width == avctx->width && cid->height == avctx->height && cid->interlaced == !!(avctx->flags & CODEC_FLAG_INTERLACED_DCT) && - cid->bit_depth == 8) { // until 10 bit is supported + cid->bit_depth == bit_depth) { for (j = 0; j < sizeof(cid->bit_rates); j++) { if (cid->bit_rates[j] == mbs) return cid->cid; diff --git a/libavcodec/dnxhddata.h b/libavcodec/dnxhddata.h index 32c77db0ef..4d03a600f4 100644 --- a/libavcodec/dnxhddata.h +++ b/libavcodec/dnxhddata.h @@ -46,6 +46,6 @@ typedef struct { extern const CIDEntry ff_dnxhd_cid_table[]; int ff_dnxhd_get_cid_table(int cid); -int ff_dnxhd_find_cid(AVCodecContext *avctx); +int ff_dnxhd_find_cid(AVCodecContext *avctx, int bit_depth); #endif /* AVCODEC_DNXHDDATA_H */ diff --git a/libavcodec/dnxhddec.c b/libavcodec/dnxhddec.c index 8cbe1a8111..04d42398e7 100644 --- a/libavcodec/dnxhddec.c +++ b/libavcodec/dnxhddec.c @@ -1,6 +1,9 @@ /* * VC3/DNxHD decoder. * Copyright (c) 2007 SmartJog S.A., Baptiste Coudurier <baptiste dot coudurier at smartjog dot com> + * Copyright (c) 2011 MirriAd Ltd + * + * 10 bit support added by MirriAd Ltd, Joseph Artsimovich <joseph@mirriad.com> * * This file is part of FFmpeg. * @@ -28,7 +31,7 @@ #include "dnxhddata.h" #include "dsputil.h" -typedef struct { +typedef struct DNXHDContext { AVCodecContext *avctx; AVFrame picture; GetBitContext gb; @@ -43,17 +46,22 @@ typedef struct { DECLARE_ALIGNED(16, DCTELEM, blocks)[8][64]; ScanTable scantable; const CIDEntry *cid_table; + int bit_depth; // 8, 10 or 0 if not initialized at all. + void (*decode_dct_block)(struct DNXHDContext *ctx, DCTELEM *block, + int n, int qscale); } DNXHDContext; #define DNXHD_VLC_BITS 9 #define DNXHD_DC_VLC_BITS 7 +static void dnxhd_decode_dct_block_8(DNXHDContext *ctx, DCTELEM *block, int n, int qscale); +static void dnxhd_decode_dct_block_10(DNXHDContext *ctx, DCTELEM *block, int n, int qscale); + static av_cold int dnxhd_decode_init(AVCodecContext *avctx) { DNXHDContext *ctx = avctx->priv_data; ctx->avctx = avctx; - dsputil_init(&ctx->dsp, avctx); avctx->coded_frame = &ctx->picture; avcodec_get_frame_defaults(&ctx->picture); ctx->picture.type = AV_PICTURE_TYPE_I; @@ -63,7 +71,7 @@ static av_cold int dnxhd_decode_init(AVCodecContext *avctx) static int dnxhd_init_vlc(DNXHDContext *ctx, int cid) { - if (!ctx->cid_table) { + if (cid != ctx->cid) { int index; if ((index = ff_dnxhd_get_cid_table(cid)) < 0) { @@ -71,10 +79,15 @@ static int dnxhd_init_vlc(DNXHDContext *ctx, int cid) return -1; } ctx->cid_table = &ff_dnxhd_cid_table[index]; + + free_vlc(&ctx->ac_vlc); + free_vlc(&ctx->dc_vlc); + free_vlc(&ctx->run_vlc); + init_vlc(&ctx->ac_vlc, DNXHD_VLC_BITS, 257, ctx->cid_table->ac_bits, 1, 1, ctx->cid_table->ac_codes, 2, 2, 0); - init_vlc(&ctx->dc_vlc, DNXHD_DC_VLC_BITS, ctx->cid_table->bit_depth+4, + init_vlc(&ctx->dc_vlc, DNXHD_DC_VLC_BITS, ctx->bit_depth + 4, ctx->cid_table->dc_bits, 1, 1, ctx->cid_table->dc_codes, 1, 1, 0); init_vlc(&ctx->run_vlc, DNXHD_VLC_BITS, 62, @@ -82,6 +95,7 @@ static int dnxhd_init_vlc(DNXHDContext *ctx, int cid) ctx->cid_table->run_codes, 2, 2, 0); ff_init_scantable(ctx->dsp.idct_permutation, &ctx->scantable, ff_zigzag_direct); + ctx->cid = cid; } return 0; } @@ -89,7 +103,7 @@ static int dnxhd_init_vlc(DNXHDContext *ctx, int cid) static int dnxhd_decode_header(DNXHDContext *ctx, const uint8_t *buf, int buf_size, int first_field) { static const uint8_t header_prefix[] = { 0x00, 0x00, 0x02, 0x80, 0x01 }; - int i; + int i, cid; if (buf_size < 0x280) return -1; @@ -111,14 +125,27 @@ static int dnxhd_decode_header(DNXHDContext *ctx, const uint8_t *buf, int buf_si av_dlog(ctx->avctx, "width %d, heigth %d\n", ctx->width, ctx->height); if (buf[0x21] & 0x40) { - av_log(ctx->avctx, AV_LOG_ERROR, "10 bit per component\n"); - return -1; + ctx->avctx->pix_fmt = PIX_FMT_YUV422P10; + ctx->avctx->bits_per_raw_sample = 10; + if (ctx->bit_depth != 10) { + dsputil_init(&ctx->dsp, ctx->avctx); + ctx->bit_depth = 10; + ctx->decode_dct_block = dnxhd_decode_dct_block_10; + } + } else { + ctx->avctx->pix_fmt = PIX_FMT_YUV422P; + ctx->avctx->bits_per_raw_sample = 8; + if (ctx->bit_depth != 8) { + dsputil_init(&ctx->dsp, ctx->avctx); + ctx->bit_depth = 8; + ctx->decode_dct_block = dnxhd_decode_dct_block_8; + } } - ctx->cid = AV_RB32(buf + 0x28); - av_dlog(ctx->avctx, "compression id %d\n", ctx->cid); + cid = AV_RB32(buf + 0x28); + av_dlog(ctx->avctx, "compression id %d\n", cid); - if (dnxhd_init_vlc(ctx, ctx->cid) < 0) + if (dnxhd_init_vlc(ctx, cid) < 0) return -1; if (buf_size < ctx->cid_table->coding_unit_size) { @@ -152,79 +179,103 @@ static int dnxhd_decode_header(DNXHDContext *ctx, const uint8_t *buf, int buf_si return 0; } -static int dnxhd_decode_dc(DNXHDContext *ctx) -{ - int len; - - len = get_vlc2(&ctx->gb, ctx->dc_vlc.table, DNXHD_DC_VLC_BITS, 1); - return len ? get_xbits(&ctx->gb, len) : 0; -} - -static void dnxhd_decode_dct_block(DNXHDContext *ctx, DCTELEM *block, int n, int qscale) +static av_always_inline void dnxhd_decode_dct_block(DNXHDContext *ctx, + DCTELEM *block, int n, + int qscale, + int index_bits, + int level_bias, + int level_shift) { - int i, j, index, index2; + int i, j, index1, index2, len; int level, component, sign; - const uint8_t *weigth_matrix; + const uint8_t *weight_matrix; + OPEN_READER(bs, &ctx->gb); if (n&2) { component = 1 + (n&1); - weigth_matrix = ctx->cid_table->chroma_weight; + weight_matrix = ctx->cid_table->chroma_weight; } else { component = 0; - weigth_matrix = ctx->cid_table->luma_weight; + weight_matrix = ctx->cid_table->luma_weight; } - ctx->last_dc[component] += dnxhd_decode_dc(ctx); + UPDATE_CACHE(bs, &ctx->gb); + GET_VLC(len, bs, &ctx->gb, ctx->dc_vlc.table, DNXHD_DC_VLC_BITS, 1); + if (len) { + level = GET_CACHE(bs, &ctx->gb); + LAST_SKIP_BITS(bs, &ctx->gb, len); + sign = ~level >> 31; + level = (NEG_USR32(sign ^ level, len) ^ sign) - sign; + ctx->last_dc[component] += level; + } block[0] = ctx->last_dc[component]; //av_log(ctx->avctx, AV_LOG_DEBUG, "dc %d\n", block[0]); + for (i = 1; ; i++) { - index = get_vlc2(&ctx->gb, ctx->ac_vlc.table, DNXHD_VLC_BITS, 2); - //av_log(ctx->avctx, AV_LOG_DEBUG, "index %d\n", index); - level = ctx->cid_table->ac_level[index]; + UPDATE_CACHE(bs, &ctx->gb); + GET_VLC(index1, bs, &ctx->gb, ctx->ac_vlc.table, + DNXHD_VLC_BITS, 2); + //av_log(ctx->avctx, AV_LOG_DEBUG, "index %d\n", index1); + level = ctx->cid_table->ac_level[index1]; if (!level) { /* EOB */ //av_log(ctx->avctx, AV_LOG_DEBUG, "EOB\n"); - return; + break; } - sign = get_sbits(&ctx->gb, 1); - if (ctx->cid_table->ac_index_flag[index]) { - level += get_bits(&ctx->gb, ctx->cid_table->index_bits)<<6; + sign = SHOW_SBITS(bs, &ctx->gb, 1); + SKIP_BITS(bs, &ctx->gb, 1); + + if (ctx->cid_table->ac_index_flag[index1]) { + level += SHOW_UBITS(bs, &ctx->gb, index_bits) << 6; + SKIP_BITS(bs, &ctx->gb, index_bits); } - if (ctx->cid_table->ac_run_flag[index]) { - index2 = get_vlc2(&ctx->gb, ctx->run_vlc.table, DNXHD_VLC_BITS, 2); + if (ctx->cid_table->ac_run_flag[index1]) { + UPDATE_CACHE(bs, &ctx->gb); + GET_VLC(index2, bs, &ctx->gb, ctx->run_vlc.table, + DNXHD_VLC_BITS, 2); i += ctx->cid_table->run[index2]; } if (i > 63) { av_log(ctx->avctx, AV_LOG_ERROR, "ac tex damaged %d, %d\n", n, i); - return; + break; } j = ctx->scantable.permutated[i]; //av_log(ctx->avctx, AV_LOG_DEBUG, "j %d\n", j); - //av_log(ctx->avctx, AV_LOG_DEBUG, "level %d, weigth %d\n", level, weigth_matrix[i]); - level = (2*level+1) * qscale * weigth_matrix[i]; - if (ctx->cid_table->bit_depth == 10) { - if (weigth_matrix[i] != 8) - level += 8; - level >>= 4; - } else { - if (weigth_matrix[i] != 32) - level += 32; - level >>= 6; - } + //av_log(ctx->avctx, AV_LOG_DEBUG, "level %d, weight %d\n", level, weight_matrix[i]); + level = (2*level+1) * qscale * weight_matrix[i]; + if (level_bias < 32 || weight_matrix[i] != level_bias) + level += level_bias; + level >>= level_shift; + //av_log(NULL, AV_LOG_DEBUG, "i %d, j %d, end level %d\n", i, j, level); block[j] = (level^sign) - sign; } + + CLOSE_READER(bs, &ctx->gb); +} + +static void dnxhd_decode_dct_block_8(DNXHDContext *ctx, DCTELEM *block, + int n, int qscale) +{ + dnxhd_decode_dct_block(ctx, block, n, qscale, 4, 32, 6); +} + +static void dnxhd_decode_dct_block_10(DNXHDContext *ctx, DCTELEM *block, + int n, int qscale) +{ + dnxhd_decode_dct_block(ctx, block, n, qscale, 6, 8, 4); } static int dnxhd_decode_macroblock(DNXHDContext *ctx, int x, int y) { + int shift1 = ctx->bit_depth == 10; int dct_linesize_luma = ctx->picture.linesize[0]; int dct_linesize_chroma = ctx->picture.linesize[1]; uint8_t *dest_y, *dest_u, *dest_v; - int dct_offset; + int dct_y_offset, dct_x_offset; int qscale, i; qscale = get_bits(&ctx->gb, 11); @@ -233,7 +284,7 @@ static int dnxhd_decode_macroblock(DNXHDContext *ctx, int x, int y) for (i = 0; i < 8; i++) { ctx->dsp.clear_block(ctx->blocks[i]); - dnxhd_decode_dct_block(ctx, ctx->blocks[i], i, qscale); + ctx->decode_dct_block(ctx, ctx->blocks[i], i, qscale); } if (ctx->picture.interlaced_frame) { @@ -241,9 +292,9 @@ static int dnxhd_decode_macroblock(DNXHDContext *ctx, int x, int y) dct_linesize_chroma <<= 1; } - dest_y = ctx->picture.data[0] + ((y * dct_linesize_luma) << 4) + (x << 4); - dest_u = ctx->picture.data[1] + ((y * dct_linesize_chroma) << 4) + (x << 3); - dest_v = ctx->picture.data[2] + ((y * dct_linesize_chroma) << 4) + (x << 3); + dest_y = ctx->picture.data[0] + ((y * dct_linesize_luma) << 4) + (x << (4 + shift1)); + dest_u = ctx->picture.data[1] + ((y * dct_linesize_chroma) << 4) + (x << (3 + shift1)); + dest_v = ctx->picture.data[2] + ((y * dct_linesize_chroma) << 4) + (x << (3 + shift1)); if (ctx->cur_field) { dest_y += ctx->picture.linesize[0]; @@ -251,18 +302,19 @@ static int dnxhd_decode_macroblock(DNXHDContext *ctx, int x, int y) dest_v += ctx->picture.linesize[2]; } - dct_offset = dct_linesize_luma << 3; - ctx->dsp.idct_put(dest_y, dct_linesize_luma, ctx->blocks[0]); - ctx->dsp.idct_put(dest_y + 8, dct_linesize_luma, ctx->blocks[1]); - ctx->dsp.idct_put(dest_y + dct_offset, dct_linesize_luma, ctx->blocks[4]); - ctx->dsp.idct_put(dest_y + dct_offset + 8, dct_linesize_luma, ctx->blocks[5]); + dct_y_offset = dct_linesize_luma << 3; + dct_x_offset = 8 << shift1; + ctx->dsp.idct_put(dest_y, dct_linesize_luma, ctx->blocks[0]); + ctx->dsp.idct_put(dest_y + dct_x_offset, dct_linesize_luma, ctx->blocks[1]); + ctx->dsp.idct_put(dest_y + dct_y_offset, dct_linesize_luma, ctx->blocks[4]); + ctx->dsp.idct_put(dest_y + dct_y_offset + dct_x_offset, dct_linesize_luma, ctx->blocks[5]); if (!(ctx->avctx->flags & CODEC_FLAG_GRAY)) { - dct_offset = dct_linesize_chroma << 3; - ctx->dsp.idct_put(dest_u, dct_linesize_chroma, ctx->blocks[2]); - ctx->dsp.idct_put(dest_v, dct_linesize_chroma, ctx->blocks[3]); - ctx->dsp.idct_put(dest_u + dct_offset, dct_linesize_chroma, ctx->blocks[6]); - ctx->dsp.idct_put(dest_v + dct_offset, dct_linesize_chroma, ctx->blocks[7]); + dct_y_offset = dct_linesize_chroma << 3; + ctx->dsp.idct_put(dest_u, dct_linesize_chroma, ctx->blocks[2]); + ctx->dsp.idct_put(dest_v, dct_linesize_chroma, ctx->blocks[3]); + ctx->dsp.idct_put(dest_u + dct_y_offset, dct_linesize_chroma, ctx->blocks[6]); + ctx->dsp.idct_put(dest_v + dct_y_offset, dct_linesize_chroma, ctx->blocks[7]); } return 0; @@ -274,7 +326,7 @@ static int dnxhd_decode_macroblocks(DNXHDContext *ctx, const uint8_t *buf, int b for (y = 0; y < ctx->mb_height; y++) { ctx->last_dc[0] = ctx->last_dc[1] = - ctx->last_dc[2] = 1<<(ctx->cid_table->bit_depth+2); // for levels +2^(bitdepth-1) + ctx->last_dc[2] = 1 << (ctx->bit_depth + 2); // for levels +2^(bitdepth-1) init_get_bits(&ctx->gb, buf + ctx->mb_scan_index[y], (buf_size - ctx->mb_scan_index[y]) << 3); for (x = 0; x < ctx->mb_width; x++) { //START_TIMER; @@ -307,7 +359,6 @@ static int dnxhd_decode_frame(AVCodecContext *avctx, void *data, int *data_size, first_field = 1; } - avctx->pix_fmt = PIX_FMT_YUV422P; if (av_image_check_size(ctx->width, ctx->height, 0, avctx)) return -1; avcodec_set_dimensions(avctx, ctx->width, ctx->height); @@ -348,14 +399,13 @@ static av_cold int dnxhd_decode_close(AVCodecContext *avctx) } AVCodec ff_dnxhd_decoder = { - "dnxhd", - AVMEDIA_TYPE_VIDEO, - CODEC_ID_DNXHD, - sizeof(DNXHDContext), - dnxhd_decode_init, - NULL, - dnxhd_decode_close, - dnxhd_decode_frame, - CODEC_CAP_DR1, + .name = "dnxhd", + .type = AVMEDIA_TYPE_VIDEO, + .id = CODEC_ID_DNXHD, + .priv_data_size = sizeof(DNXHDContext), + .init = dnxhd_decode_init, + .close = dnxhd_decode_close, + .decode = dnxhd_decode_frame, + .capabilities = CODEC_CAP_DR1, .long_name = NULL_IF_CONFIG_SMALL("VC3/DNxHD"), }; diff --git a/libavcodec/dnxhdenc.c b/libavcodec/dnxhdenc.c index 62bc9f0dd5..d6506a2c7f 100644 --- a/libavcodec/dnxhdenc.c +++ b/libavcodec/dnxhdenc.c @@ -1,8 +1,10 @@ /* * VC3/DNxHD encoder * Copyright (c) 2007 Baptiste Coudurier <baptiste dot coudurier at smartjog dot com> + * Copyright (c) 2011 MirriAd Ltd * * VC-3 encoder funded by the British Broadcasting Corporation + * 10 bit support added by MirriAd Ltd, Joseph Artsimovich <joseph@mirriad.com> * * This file is part of FFmpeg. * @@ -28,9 +30,11 @@ #include "avcodec.h" #include "dsputil.h" #include "mpegvideo.h" +#include "mpegvideo_common.h" #include "dnxhdenc.h" #define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM +#define DNX10BIT_QMAT_SHIFT 18 // The largest value that will not lead to overflow for 10bit samples. static const AVOption options[]={ {"nitris_compat", "encode with Avid Nitris compatibility", offsetof(DNXHDEncContext, nitris_compat), FF_OPT_TYPE_INT, {.dbl = 0}, 0, 1, VE}, @@ -38,11 +42,9 @@ static const AVOption options[]={ }; static const AVClass class = { "dnxhd", av_default_item_name, options, LIBAVUTIL_VERSION_INT }; -int dct_quantize_c(MpegEncContext *s, DCTELEM *block, int n, int qscale, int *overflow); - #define LAMBDA_FRAC_BITS 10 -static av_always_inline void dnxhd_get_pixels_8x4(DCTELEM *restrict block, const uint8_t *pixels, int line_size) +static void dnxhd_8bit_get_pixels_8x4_sym(DCTELEM *restrict block, const uint8_t *pixels, int line_size) { int i; for (i = 0; i < 4; i++) { @@ -53,10 +55,48 @@ static av_always_inline void dnxhd_get_pixels_8x4(DCTELEM *restrict block, const pixels += line_size; block += 8; } - memcpy(block , block- 8, sizeof(*block)*8); - memcpy(block+ 8, block-16, sizeof(*block)*8); - memcpy(block+16, block-24, sizeof(*block)*8); - memcpy(block+24, block-32, sizeof(*block)*8); + memcpy(block, block - 8, sizeof(*block) * 8); + memcpy(block + 8, block - 16, sizeof(*block) * 8); + memcpy(block + 16, block - 24, sizeof(*block) * 8); + memcpy(block + 24, block - 32, sizeof(*block) * 8); +} + +static av_always_inline void dnxhd_10bit_get_pixels_8x4_sym(DCTELEM *restrict block, const uint8_t *pixels, int line_size) +{ + int i; + + block += 32; + + for (i = 0; i < 4; i++) { + memcpy(block + i * 8, pixels + i * line_size, 8 * sizeof(*block)); + memcpy(block - (i+1) * 8, pixels + i * line_size, 8 * sizeof(*block)); + } +} + +static int dnxhd_10bit_dct_quantize(MpegEncContext *ctx, DCTELEM *block, + int n, int qscale, int *overflow) +{ + const uint8_t *scantable= ctx->intra_scantable.scantable; + const int *qmat = ctx->q_intra_matrix[qscale]; + int last_non_zero = 0; + int i; + + ctx->dsp.fdct(block); + + // Divide by 4 with rounding, to compensate scaling of DCT coefficients + block[0] = (block[0] + 2) >> 2; + + for (i = 1; i < 64; ++i) { + int j = scantable[i]; + int sign = block[j] >> 31; + int level = (block[j] ^ sign) - sign; + level = level * qmat[j] >> DNX10BIT_QMAT_SHIFT; + block[j] = (level ^ sign) - sign; + if (level) + last_non_zero = i; + } + + return last_non_zero; } static int dnxhd_init_vlc(DNXHDEncContext *ctx) @@ -65,9 +105,9 @@ static int dnxhd_init_vlc(DNXHDEncContext *ctx) int max_level = 1<<(ctx->cid_table->bit_depth+2); FF_ALLOCZ_OR_GOTO(ctx->m.avctx, ctx->vlc_codes, max_level*4*sizeof(*ctx->vlc_codes), fail); - FF_ALLOCZ_OR_GOTO(ctx->m.avctx, ctx->vlc_bits , max_level*4*sizeof(*ctx->vlc_bits ), fail); - FF_ALLOCZ_OR_GOTO(ctx->m.avctx, ctx->run_codes, 63*2 , fail); - FF_ALLOCZ_OR_GOTO(ctx->m.avctx, ctx->run_bits , 63 , fail); + FF_ALLOCZ_OR_GOTO(ctx->m.avctx, ctx->vlc_bits, max_level*4*sizeof(*ctx->vlc_bits) , fail); + FF_ALLOCZ_OR_GOTO(ctx->m.avctx, ctx->run_codes, 63*2, fail); + FF_ALLOCZ_OR_GOTO(ctx->m.avctx, ctx->run_bits, 63, fail); ctx->vlc_codes += max_level*2; ctx->vlc_bits += max_level*2; @@ -119,31 +159,55 @@ static int dnxhd_init_qmat(DNXHDEncContext *ctx, int lbias, int cbias) // init first elem to 1 to avoid div by 0 in convert_matrix uint16_t weight_matrix[64] = {1,}; // convert_matrix needs uint16_t* int qscale, i; + const uint8_t *luma_weight_table = ctx->cid_table->luma_weight; + const uint8_t *chroma_weight_table = ctx->cid_table->chroma_weight; - FF_ALLOCZ_OR_GOTO(ctx->m.avctx, ctx->qmatrix_l, (ctx->m.avctx->qmax+1) * 64 * sizeof(int) , fail); - FF_ALLOCZ_OR_GOTO(ctx->m.avctx, ctx->qmatrix_c, (ctx->m.avctx->qmax+1) * 64 * sizeof(int) , fail); + FF_ALLOCZ_OR_GOTO(ctx->m.avctx, ctx->qmatrix_l, (ctx->m.avctx->qmax+1) * 64 * sizeof(int), fail); + FF_ALLOCZ_OR_GOTO(ctx->m.avctx, ctx->qmatrix_c, (ctx->m.avctx->qmax+1) * 64 * sizeof(int), fail); FF_ALLOCZ_OR_GOTO(ctx->m.avctx, ctx->qmatrix_l16, (ctx->m.avctx->qmax+1) * 64 * 2 * sizeof(uint16_t), fail); FF_ALLOCZ_OR_GOTO(ctx->m.avctx, ctx->qmatrix_c16, (ctx->m.avctx->qmax+1) * 64 * 2 * sizeof(uint16_t), fail); - for (i = 1; i < 64; i++) { - int j = ctx->m.dsp.idct_permutation[ff_zigzag_direct[i]]; - weight_matrix[j] = ctx->cid_table->luma_weight[i]; - } - ff_convert_matrix(&ctx->m.dsp, ctx->qmatrix_l, ctx->qmatrix_l16, weight_matrix, - ctx->m.intra_quant_bias, 1, ctx->m.avctx->qmax, 1); - for (i = 1; i < 64; i++) { - int j = ctx->m.dsp.idct_permutation[ff_zigzag_direct[i]]; - weight_matrix[j] = ctx->cid_table->chroma_weight[i]; - } - ff_convert_matrix(&ctx->m.dsp, ctx->qmatrix_c, ctx->qmatrix_c16, weight_matrix, - ctx->m.intra_quant_bias, 1, ctx->m.avctx->qmax, 1); - for (qscale = 1; qscale <= ctx->m.avctx->qmax; qscale++) { - for (i = 0; i < 64; i++) { - ctx->qmatrix_l [qscale] [i] <<= 2; ctx->qmatrix_c [qscale] [i] <<= 2; - ctx->qmatrix_l16[qscale][0][i] <<= 2; ctx->qmatrix_l16[qscale][1][i] <<= 2; - ctx->qmatrix_c16[qscale][0][i] <<= 2; ctx->qmatrix_c16[qscale][1][i] <<= 2; + if (ctx->cid_table->bit_depth == 8) { + for (i = 1; i < 64; i++) { + int j = ctx->m.dsp.idct_permutation[ff_zigzag_direct[i]]; + weight_matrix[j] = ctx->cid_table->luma_weight[i]; + } + ff_convert_matrix(&ctx->m.dsp, ctx->qmatrix_l, ctx->qmatrix_l16, weight_matrix, + ctx->m.intra_quant_bias, 1, ctx->m.avctx->qmax, 1); + for (i = 1; i < 64; i++) { + int j = ctx->m.dsp.idct_permutation[ff_zigzag_direct[i]]; + weight_matrix[j] = ctx->cid_table->chroma_weight[i]; + } + ff_convert_matrix(&ctx->m.dsp, ctx->qmatrix_c, ctx->qmatrix_c16, weight_matrix, + ctx->m.intra_quant_bias, 1, ctx->m.avctx->qmax, 1); + + for (qscale = 1; qscale <= ctx->m.avctx->qmax; qscale++) { + for (i = 0; i < 64; i++) { + ctx->qmatrix_l [qscale] [i] <<= 2; ctx->qmatrix_c [qscale] [i] <<= 2; + ctx->qmatrix_l16[qscale][0][i] <<= 2; ctx->qmatrix_l16[qscale][1][i] <<= 2; + ctx->qmatrix_c16[qscale][0][i] <<= 2; ctx->qmatrix_c16[qscale][1][i] <<= 2; + } + } + } else { + // 10-bit + for (qscale = 1; qscale <= ctx->m.avctx->qmax; qscale++) { + for (i = 1; i < 64; i++) { + int j = ctx->m.dsp.idct_permutation[ff_zigzag_direct[i]]; + + // The quantization formula from the VC-3 standard is: + // quantized = sign(block[i]) * floor(abs(block[i]/s) * p / (qscale * weight_table[i])) + // Where p is 32 for 8-bit samples and 8 for 10-bit ones. + // The s factor compensates scaling of DCT coefficients done by the DCT routines, + // and therefore is not present in standard. It's 8 for 8-bit samples and 4 for 10-bit ones. + // We want values of ctx->qtmatrix_l and ctx->qtmatrix_r to be: + // ((1 << DNX10BIT_QMAT_SHIFT) * (p / s)) / (qscale * weight_table[i]) + // For 10-bit samples, p / s == 2 + ctx->qmatrix_l[qscale][j] = (1 << (DNX10BIT_QMAT_SHIFT + 1)) / (qscale * luma_weight_table[i]); + ctx->qmatrix_c[qscale][j] = (1 << (DNX10BIT_QMAT_SHIFT + 1)) / (qscale * chroma_weight_table[i]); + } } } + return 0; fail: return -1; @@ -166,10 +230,22 @@ static int dnxhd_init_rc(DNXHDEncContext *ctx) static int dnxhd_encode_init(AVCodecContext *avctx) { DNXHDEncContext *ctx = avctx->priv_data; - int i, index; + int i, index, bit_depth; + + switch (avctx->pix_fmt) { + case PIX_FMT_YUV422P: + bit_depth = 8; + break; + case PIX_FMT_YUV422P10: + bit_depth = 10; + break; + default: + av_log(avctx, AV_LOG_ERROR, "pixel format is incompatible with DNxHD\n"); + return -1; + } - ctx->cid = ff_dnxhd_find_cid(avctx); - if (!ctx->cid || avctx->pix_fmt != PIX_FMT_YUV422P) { + ctx->cid = ff_dnxhd_find_cid(avctx, bit_depth); + if (!ctx->cid) { av_log(avctx, AV_LOG_ERROR, "video parameters incompatible with DNxHD\n"); return -1; } @@ -182,15 +258,25 @@ static int dnxhd_encode_init(AVCodecContext *avctx) ctx->m.mb_intra = 1; ctx->m.h263_aic = 1; - ctx->get_pixels_8x4_sym = dnxhd_get_pixels_8x4; + avctx->bits_per_raw_sample = ctx->cid_table->bit_depth; dsputil_init(&ctx->m.dsp, avctx); ff_dct_common_init(&ctx->m); + if (!ctx->m.dct_quantize) + ctx->m.dct_quantize = dct_quantize_c; + + if (ctx->cid_table->bit_depth == 10) { + ctx->m.dct_quantize = dnxhd_10bit_dct_quantize; + ctx->get_pixels_8x4_sym = dnxhd_10bit_get_pixels_8x4_sym; + ctx->block_width_l2 = 4; + } else { + ctx->get_pixels_8x4_sym = dnxhd_8bit_get_pixels_8x4_sym; + ctx->block_width_l2 = 3; + } + #if HAVE_MMX ff_dnxhd_init_mmx(ctx); #endif - if (!ctx->m.dct_quantize) - ctx->m.dct_quantize = dct_quantize_c; ctx->m.mb_height = (avctx->height + 15) / 16; ctx->m.mb_width = (avctx->width + 15) / 16; @@ -219,7 +305,7 @@ static int dnxhd_encode_init(AVCodecContext *avctx) FF_ALLOCZ_OR_GOTO(ctx->m.avctx, ctx->slice_size, ctx->m.mb_height*sizeof(uint32_t), fail); FF_ALLOCZ_OR_GOTO(ctx->m.avctx, ctx->slice_offs, ctx->m.mb_height*sizeof(uint32_t), fail); FF_ALLOCZ_OR_GOTO(ctx->m.avctx, ctx->mb_bits, ctx->m.mb_num *sizeof(uint16_t), fail); - FF_ALLOCZ_OR_GOTO(ctx->m.avctx, ctx->mb_qscale, ctx->m.mb_num *sizeof(uint8_t) , fail); + FF_ALLOCZ_OR_GOTO(ctx->m.avctx, ctx->mb_qscale, ctx->m.mb_num *sizeof(uint8_t), fail); ctx->frame.key_frame = 1; ctx->frame.pict_type = AV_PICTURE_TYPE_I; @@ -256,7 +342,7 @@ static int dnxhd_write_header(AVCodecContext *avctx, uint8_t *buf) AV_WB16(buf + 0x1a, avctx->width); // SPL AV_WB16(buf + 0x1d, avctx->height>>ctx->interlaced); // NAL - buf[0x21] = 0x38; // FIXME 8 bit per comp + buf[0x21] = ctx->cid_table->bit_depth == 10 ? 0x58 : 0x38; buf[0x22] = 0x88 + (ctx->interlaced<<2); AV_WB32(buf + 0x28, ctx->cid); // CID buf[0x2c] = ctx->interlaced ? 0 : 0x80; @@ -322,15 +408,27 @@ static av_always_inline void dnxhd_unquantize_c(DNXHDEncContext *ctx, DCTELEM *b if (level) { if (level < 0) { level = (1-2*level) * qscale * weight_matrix[i]; - if (weight_matrix[i] != 32) - level += 32; - level >>= 6; + if (ctx->cid_table->bit_depth == 10) { + if (weight_matrix[i] != 8) + level += 8; + level >>= 4; + } else { + if (weight_matrix[i] != 32) + level += 32; + level >>= 6; + } level = -level; } else { level = (2*level+1) * qscale * weight_matrix[i]; - if (weight_matrix[i] != 32) - level += 32; - level >>= 6; + if (ctx->cid_table->bit_depth == 10) { + if (weight_matrix[i] != 8) + level += 8; + level >>= 4; + } else { + if (weight_matrix[i] != 32) + level += 32; + level >>= 6; + } } block[j] = level; } @@ -342,7 +440,7 @@ static av_always_inline int dnxhd_ssd_block(DCTELEM *qblock, DCTELEM *block) int score = 0; int i; for (i = 0; i < 64; i++) - score += (block[i]-qblock[i])*(block[i]-qblock[i]); + score += (block[i] - qblock[i]) * (block[i] - qblock[i]); return score; } @@ -365,31 +463,35 @@ static av_always_inline int dnxhd_calc_ac_bits(DNXHDEncContext *ctx, DCTELEM *bl static av_always_inline void dnxhd_get_blocks(DNXHDEncContext *ctx, int mb_x, int mb_y) { - const uint8_t *ptr_y = ctx->thread[0]->src[0] + ((mb_y << 4) * ctx->m.linesize) + (mb_x << 4); - const uint8_t *ptr_u = ctx->thread[0]->src[1] + ((mb_y << 4) * ctx->m.uvlinesize) + (mb_x << 3); - const uint8_t *ptr_v = ctx->thread[0]->src[2] + ((mb_y << 4) * ctx->m.uvlinesize) + (mb_x << 3); + const int bs = ctx->block_width_l2; + const int bw = 1 << bs; + const uint8_t *ptr_y = ctx->thread[0]->src[0] + ((mb_y << 4) * ctx->m.linesize) + (mb_x << bs+1); + const uint8_t *ptr_u = ctx->thread[0]->src[1] + ((mb_y << 4) * ctx->m.uvlinesize) + (mb_x << bs); + const uint8_t *ptr_v = ctx->thread[0]->src[2] + ((mb_y << 4) * ctx->m.uvlinesize) + (mb_x << bs); DSPContext *dsp = &ctx->m.dsp; - dsp->get_pixels(ctx->blocks[0], ptr_y , ctx->m.linesize); - dsp->get_pixels(ctx->blocks[1], ptr_y + 8, ctx->m.linesize); - dsp->get_pixels(ctx->blocks[2], ptr_u , ctx->m.uvlinesize); - dsp->get_pixels(ctx->blocks[3], ptr_v , ctx->m.uvlinesize); + dsp->get_pixels(ctx->blocks[0], ptr_y, ctx->m.linesize); + dsp->get_pixels(ctx->blocks[1], ptr_y + bw, ctx->m.linesize); + dsp->get_pixels(ctx->blocks[2], ptr_u, ctx->m.uvlinesize); + dsp->get_pixels(ctx->blocks[3], ptr_v, ctx->m.uvlinesize); if (mb_y+1 == ctx->m.mb_height && ctx->m.avctx->height == 1080) { if (ctx->interlaced) { - ctx->get_pixels_8x4_sym(ctx->blocks[4], ptr_y + ctx->dct_y_offset , ctx->m.linesize); - ctx->get_pixels_8x4_sym(ctx->blocks[5], ptr_y + ctx->dct_y_offset + 8, ctx->m.linesize); - ctx->get_pixels_8x4_sym(ctx->blocks[6], ptr_u + ctx->dct_uv_offset , ctx->m.uvlinesize); - ctx->get_pixels_8x4_sym(ctx->blocks[7], ptr_v + ctx->dct_uv_offset , ctx->m.uvlinesize); + ctx->get_pixels_8x4_sym(ctx->blocks[4], ptr_y + ctx->dct_y_offset, ctx->m.linesize); + ctx->get_pixels_8x4_sym(ctx->blocks[5], ptr_y + ctx->dct_y_offset + bw, ctx->m.linesize); + ctx->get_pixels_8x4_sym(ctx->blocks[6], ptr_u + ctx->dct_uv_offset, ctx->m.uvlinesize); + ctx->get_pixels_8x4_sym(ctx->blocks[7], ptr_v + ctx->dct_uv_offset, ctx->m.uvlinesize); } else { - dsp->clear_block(ctx->blocks[4]); dsp->clear_block(ctx->blocks[5]); - dsp->clear_block(ctx->blocks[6]); dsp->clear_block(ctx->blocks[7]); + dsp->clear_block(ctx->blocks[4]); + dsp->clear_block(ctx->blocks[5]); + dsp->clear_block(ctx->blocks[6]); + dsp->clear_block(ctx->blocks[7]); } } else { - dsp->get_pixels(ctx->blocks[4], ptr_y + ctx->dct_y_offset , ctx->m.linesize); - dsp->get_pixels(ctx->blocks[5], ptr_y + ctx->dct_y_offset + 8, ctx->m.linesize); - dsp->get_pixels(ctx->blocks[6], ptr_u + ctx->dct_uv_offset , ctx->m.uvlinesize); - dsp->get_pixels(ctx->blocks[7], ptr_v + ctx->dct_uv_offset , ctx->m.uvlinesize); + dsp->get_pixels(ctx->blocks[4], ptr_y + ctx->dct_y_offset, ctx->m.linesize); + dsp->get_pixels(ctx->blocks[5], ptr_y + ctx->dct_y_offset + bw, ctx->m.linesize); + dsp->get_pixels(ctx->blocks[6], ptr_u + ctx->dct_uv_offset, ctx->m.uvlinesize); + dsp->get_pixels(ctx->blocks[7], ptr_v + ctx->dct_uv_offset, ctx->m.uvlinesize); } } @@ -416,7 +518,7 @@ static int dnxhd_calc_bits_thread(AVCodecContext *avctx, void *arg, int jobnr, i ctx->m.last_dc[0] = ctx->m.last_dc[1] = - ctx->m.last_dc[2] = 1024; + ctx->m.last_dc[2] = 1 << (ctx->cid_table->bit_depth + 2); for (mb_x = 0; mb_x < ctx->m.mb_width; mb_x++) { unsigned mb = mb_y * ctx->m.mb_width + mb_x; @@ -439,6 +541,8 @@ static int dnxhd_calc_bits_thread(AVCodecContext *avctx, void *arg, int jobnr, i diff = block[0] - ctx->m.last_dc[n]; if (diff < 0) nbits = av_log2_16bit(-2*diff); else nbits = av_log2_16bit( 2*diff); + + assert(nbits < ctx->cid_table->bit_depth + 4); dc_bits += ctx->cid_table->dc_bits[nbits] + nbits; ctx->m.last_dc[n] = block[0]; @@ -464,7 +568,7 @@ static int dnxhd_encode_thread(AVCodecContext *avctx, void *arg, int jobnr, int ctx->m.last_dc[0] = ctx->m.last_dc[1] = - ctx->m.last_dc[2] = 1024; + ctx->m.last_dc[2] = 1 << (ctx->cid_table->bit_depth + 2); for (mb_x = 0; mb_x < ctx->m.mb_width; mb_x++) { unsigned mb = mb_y * ctx->m.mb_width + mb_x; int qscale = ctx->mb_qscale[mb]; @@ -497,14 +601,14 @@ static void dnxhd_setup_threads_slices(DNXHDEncContext *ctx) for (mb_y = 0; mb_y < ctx->m.mb_height; mb_y++) { int thread_size; ctx->slice_offs[mb_y] = offset; - ctx->slice_size[mb_y] = 0; - for (mb_x = 0; mb_x < ctx->m.mb_width; mb_x++) { - unsigned mb = mb_y * ctx->m.mb_width + mb_x; - ctx->slice_size[mb_y] += ctx->mb_bits[mb]; - } - ctx->slice_size[mb_y] = (ctx->slice_size[mb_y]+31)&~31; - ctx->slice_size[mb_y] >>= 3; - thread_size = ctx->slice_size[mb_y]; + ctx->slice_size[mb_y] = 0; + for (mb_x = 0; mb_x < ctx->m.mb_width; mb_x++) { + unsigned mb = mb_y * ctx->m.mb_width + mb_x; + ctx->slice_size[mb_y] += ctx->mb_bits[mb]; + } + ctx->slice_size[mb_y] = (ctx->slice_size[mb_y]+31)&~31; + ctx->slice_size[mb_y] >>= 3; + thread_size = ctx->slice_size[mb_y]; offset += thread_size; } } @@ -514,13 +618,40 @@ static int dnxhd_mb_var_thread(AVCodecContext *avctx, void *arg, int jobnr, int DNXHDEncContext *ctx = avctx->priv_data; int mb_y = jobnr, mb_x; ctx = ctx->thread[threadnr]; - for (mb_x = 0; mb_x < ctx->m.mb_width; mb_x++) { - unsigned mb = mb_y * ctx->m.mb_width + mb_x; - uint8_t *pix = ctx->thread[0]->src[0] + ((mb_y<<4) * ctx->m.linesize) + (mb_x<<4); - int sum = ctx->m.dsp.pix_sum(pix, ctx->m.linesize); - int varc = (ctx->m.dsp.pix_norm1(pix, ctx->m.linesize) - (((unsigned)(sum*sum))>>8)+128)>>8; - ctx->mb_cmp[mb].value = varc; - ctx->mb_cmp[mb].mb = mb; + if (ctx->cid_table->bit_depth == 8) { + uint8_t *pix = ctx->thread[0]->src[0] + ((mb_y<<4) * ctx->m.linesize); + for (mb_x = 0; mb_x < ctx->m.mb_width; ++mb_x, pix += 16) { + unsigned mb = mb_y * ctx->m.mb_width + mb_x; + int sum = ctx->m.dsp.pix_sum(pix, ctx->m.linesize); + int varc = (ctx->m.dsp.pix_norm1(pix, ctx->m.linesize) - (((unsigned)(sum*sum))>>8)+128)>>8; + ctx->mb_cmp[mb].value = varc; + ctx->mb_cmp[mb].mb = mb; + } + } else { // 10-bit + int const linesize = ctx->m.linesize >> 1; + for (mb_x = 0; mb_x < ctx->m.mb_width; ++mb_x) { + uint16_t *pix = (uint16_t*)ctx->thread[0]->src[0] + ((mb_y << 4) * linesize) + (mb_x << 4); + unsigned mb = mb_y * ctx->m.mb_width + mb_x; + int sum = 0; + int sqsum = 0; + int mean, sqmean; + int i, j; + // Macroblocks are 16x16 pixels, unlike DCT blocks which are 8x8. + for (i = 0; i < 16; ++i) { + for (j = 0; j < 16; ++j) { + // Turn 16-bit pixels into 10-bit ones. + int const sample = (unsigned)pix[j] >> 6; + sum += sample; + sqsum += sample * sample; + // 2^10 * 2^10 * 16 * 16 = 2^28, which is less than INT_MAX + } + pix += linesize; + } + mean = sum >> 8; // 16*16 == 2^8 + sqmean = sqsum >> 8; + ctx->mb_cmp[mb].value = sqmean - mean * mean; + ctx->mb_cmp[mb].mb = mb; + } } return 0; } @@ -862,15 +993,15 @@ static int dnxhd_encode_end(AVCodecContext *avctx) } AVCodec ff_dnxhd_encoder = { - "dnxhd", - AVMEDIA_TYPE_VIDEO, - CODEC_ID_DNXHD, - sizeof(DNXHDEncContext), - dnxhd_encode_init, - dnxhd_encode_picture, - dnxhd_encode_end, + .name = "dnxhd", + .type = AVMEDIA_TYPE_VIDEO, + .id = CODEC_ID_DNXHD, + .priv_data_size = sizeof(DNXHDEncContext), + .init = dnxhd_encode_init, + .encode = dnxhd_encode_picture, + .close = dnxhd_encode_end, .capabilities = CODEC_CAP_SLICE_THREADS, - .pix_fmts = (const enum PixelFormat[]){PIX_FMT_YUV422P, PIX_FMT_NONE}, + .pix_fmts = (const enum PixelFormat[]){PIX_FMT_YUV422P, PIX_FMT_YUV422P10, PIX_FMT_NONE}, .long_name = NULL_IF_CONFIG_SMALL("VC3/DNxHD"), .priv_class = &class, }; diff --git a/libavcodec/dnxhdenc.h b/libavcodec/dnxhdenc.h index a968ae0223..80b6f85c89 100644 --- a/libavcodec/dnxhdenc.h +++ b/libavcodec/dnxhdenc.h @@ -52,8 +52,12 @@ typedef struct DNXHDEncContext { struct DNXHDEncContext *thread[MAX_THREADS]; + // Because our samples are either 8 or 16 bits for 8-bit and 10-bit + // encoding respectively, these refer either to bytes or to two-byte words. unsigned dct_y_offset; unsigned dct_uv_offset; + unsigned block_width_l2; + int interlaced; int cur_field; diff --git a/libavcodec/dpx.c b/libavcodec/dpx.c index afd71cc173..e8dae300e5 100644 --- a/libavcodec/dpx.c +++ b/libavcodec/dpx.c @@ -234,15 +234,12 @@ static av_cold int decode_end(AVCodecContext *avctx) } AVCodec ff_dpx_decoder = { - "dpx", - AVMEDIA_TYPE_VIDEO, - CODEC_ID_DPX, - sizeof(DPXContext), - decode_init, - NULL, - decode_end, - decode_frame, - 0, - NULL, + .name = "dpx", + .type = AVMEDIA_TYPE_VIDEO, + .id = CODEC_ID_DPX, + .priv_data_size = sizeof(DPXContext), + .init = decode_init, + .close = decode_end, + .decode = decode_frame, .long_name = NULL_IF_CONFIG_SMALL("DPX image"), }; diff --git a/libavcodec/dsicinav.c b/libavcodec/dsicinav.c index f12560714a..27f42041ba 100644 --- a/libavcodec/dsicinav.c +++ b/libavcodec/dsicinav.c @@ -345,26 +345,23 @@ static int cinaudio_decode_frame(AVCodecContext *avctx, AVCodec ff_dsicinvideo_decoder = { - "dsicinvideo", - AVMEDIA_TYPE_VIDEO, - CODEC_ID_DSICINVIDEO, - sizeof(CinVideoContext), - cinvideo_decode_init, - NULL, - cinvideo_decode_end, - cinvideo_decode_frame, - CODEC_CAP_DR1, + .name = "dsicinvideo", + .type = AVMEDIA_TYPE_VIDEO, + .id = CODEC_ID_DSICINVIDEO, + .priv_data_size = sizeof(CinVideoContext), + .init = cinvideo_decode_init, + .close = cinvideo_decode_end, + .decode = cinvideo_decode_frame, + .capabilities = CODEC_CAP_DR1, .long_name = NULL_IF_CONFIG_SMALL("Delphine Software International CIN video"), }; AVCodec ff_dsicinaudio_decoder = { - "dsicinaudio", - AVMEDIA_TYPE_AUDIO, - CODEC_ID_DSICINAUDIO, - sizeof(CinAudioContext), - cinaudio_decode_init, - NULL, - NULL, - cinaudio_decode_frame, + .name = "dsicinaudio", + .type = AVMEDIA_TYPE_AUDIO, + .id = CODEC_ID_DSICINAUDIO, + .priv_data_size = sizeof(CinAudioContext), + .init = cinaudio_decode_init, + .decode = cinaudio_decode_frame, .long_name = NULL_IF_CONFIG_SMALL("Delphine Software International CIN audio"), }; diff --git a/libavcodec/dsputil.c b/libavcodec/dsputil.c index 0e596b1b01..6f8bbe8801 100644 --- a/libavcodec/dsputil.c +++ b/libavcodec/dsputil.c @@ -184,7 +184,7 @@ static int pix_norm1_c(uint8_t * pix, int line_size) s += sq[pix[6]]; s += sq[pix[7]]; #else -#if LONG_MAX > 2147483647 +#if HAVE_FAST_64BIT register uint64_t x=*(uint64_t*)pix; s += sq[x&0xff]; s += sq[(x>>8)&0xff]; @@ -306,25 +306,6 @@ static int sse16_c(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h) return s; } -static void get_pixels_c(DCTELEM *restrict block, const uint8_t *pixels, int line_size) -{ - int i; - - /* read the pixels */ - for(i=0;i<8;i++) { - block[0] = pixels[0]; - block[1] = pixels[1]; - block[2] = pixels[2]; - block[3] = pixels[3]; - block[4] = pixels[4]; - block[5] = pixels[5]; - block[6] = pixels[6]; - block[7] = pixels[7]; - pixels += line_size; - block += 8; - } -} - static void diff_pixels_c(DCTELEM *restrict block, const uint8_t *s1, const uint8_t *s2, int stride){ int i; @@ -423,27 +404,6 @@ void ff_put_signed_pixels_clamped_c(const DCTELEM *block, } } -static void put_pixels_nonclamped_c(const DCTELEM *block, uint8_t *restrict pixels, - int line_size) -{ - int i; - - /* read the pixels */ - for(i=0;i<8;i++) { - pixels[0] = block[0]; - pixels[1] = block[1]; - pixels[2] = block[2]; - pixels[3] = block[3]; - pixels[4] = block[4]; - pixels[5] = block[5]; - pixels[6] = block[6]; - pixels[7] = block[7]; - - pixels += line_size; - block += 8; - } -} - void ff_add_pixels_clamped_c(const DCTELEM *block, uint8_t *restrict pixels, int line_size) { @@ -525,22 +485,6 @@ static void fill_block8_c(uint8_t *block, uint8_t value, int line_size, int h) } } -static void scale_block_c(const uint8_t src[64]/*align 8*/, uint8_t *dst/*align 8*/, int linesize) -{ - int i, j; - uint16_t *dst1 = (uint16_t *) dst; - uint16_t *dst2 = (uint16_t *)(dst + linesize); - - for (j = 0; j < 8; j++) { - for (i = 0; i < 8; i++) { - dst1[i] = dst2[i] = src[i] * 0x0101; - } - src += 8; - dst1 += linesize; - dst2 += linesize; - } -} - #define avg2(a,b) ((a+b+1)>>1) #define avg4(a,b,c,d) ((a+b+c+d+2)>>2) @@ -818,27 +762,6 @@ static inline void avg_tpel_pixels_mc22_c(uint8_t *dst, const uint8_t *src, int dst += stride; } } -#if 0 -#define TPEL_WIDTH(width)\ -static void put_tpel_pixels ## width ## _mc00_c(uint8_t *dst, const uint8_t *src, int stride, int height){\ - void put_tpel_pixels_mc00_c(dst, src, stride, width, height);}\ -static void put_tpel_pixels ## width ## _mc10_c(uint8_t *dst, const uint8_t *src, int stride, int height){\ - void put_tpel_pixels_mc10_c(dst, src, stride, width, height);}\ -static void put_tpel_pixels ## width ## _mc20_c(uint8_t *dst, const uint8_t *src, int stride, int height){\ - void put_tpel_pixels_mc20_c(dst, src, stride, width, height);}\ -static void put_tpel_pixels ## width ## _mc01_c(uint8_t *dst, const uint8_t *src, int stride, int height){\ - void put_tpel_pixels_mc01_c(dst, src, stride, width, height);}\ -static void put_tpel_pixels ## width ## _mc11_c(uint8_t *dst, const uint8_t *src, int stride, int height){\ - void put_tpel_pixels_mc11_c(dst, src, stride, width, height);}\ -static void put_tpel_pixels ## width ## _mc21_c(uint8_t *dst, const uint8_t *src, int stride, int height){\ - void put_tpel_pixels_mc21_c(dst, src, stride, width, height);}\ -static void put_tpel_pixels ## width ## _mc02_c(uint8_t *dst, const uint8_t *src, int stride, int height){\ - void put_tpel_pixels_mc02_c(dst, src, stride, width, height);}\ -static void put_tpel_pixels ## width ## _mc12_c(uint8_t *dst, const uint8_t *src, int stride, int height){\ - void put_tpel_pixels_mc12_c(dst, src, stride, width, height);}\ -static void put_tpel_pixels ## width ## _mc22_c(uint8_t *dst, const uint8_t *src, int stride, int height){\ - void put_tpel_pixels_mc22_c(dst, src, stride, width, height);} -#endif #define QPEL_MC(r, OPNAME, RND, OP) \ static void OPNAME ## mpeg4_qpel8_h_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\ @@ -2246,7 +2169,7 @@ static int quant_psnr8x8_c(/*MpegEncContext*/ void *c, uint8_t *src1, uint8_t *s s->block_last_index[0/*FIXME*/]= s->fast_dct_quantize(s, temp, 0/*FIXME*/, s->qscale, &i); s->dct_unquantize_inter(s, temp, 0, s->qscale); - ff_simple_idct(temp); //FIXME + ff_simple_idct_8(temp); //FIXME for(i=0; i<64; i++) sum+= (temp[i]-bak[i])*(temp[i]-bak[i]); @@ -2520,50 +2443,6 @@ static void vector_fmul_scalar_c(float *dst, const float *src, float mul, dst[i] = src[i] * mul; } -static void vector_fmul_sv_scalar_2_c(float *dst, const float *src, - const float **sv, float mul, int len) -{ - int i; - for (i = 0; i < len; i += 2, sv++) { - dst[i ] = src[i ] * sv[0][0] * mul; - dst[i+1] = src[i+1] * sv[0][1] * mul; - } -} - -static void vector_fmul_sv_scalar_4_c(float *dst, const float *src, - const float **sv, float mul, int len) -{ - int i; - for (i = 0; i < len; i += 4, sv++) { - dst[i ] = src[i ] * sv[0][0] * mul; - dst[i+1] = src[i+1] * sv[0][1] * mul; - dst[i+2] = src[i+2] * sv[0][2] * mul; - dst[i+3] = src[i+3] * sv[0][3] * mul; - } -} - -static void sv_fmul_scalar_2_c(float *dst, const float **sv, float mul, - int len) -{ - int i; - for (i = 0; i < len; i += 2, sv++) { - dst[i ] = sv[0][0] * mul; - dst[i+1] = sv[0][1] * mul; - } -} - -static void sv_fmul_scalar_4_c(float *dst, const float **sv, float mul, - int len) -{ - int i; - for (i = 0; i < len; i += 4, sv++) { - dst[i ] = sv[0][0] * mul; - dst[i+1] = sv[0][1] * mul; - dst[i+2] = sv[0][2] * mul; - dst[i+3] = sv[0][3] * mul; - } -} - static void butterflies_float_c(float *restrict v1, float *restrict v2, int len) { @@ -2664,6 +2543,22 @@ static void apply_window_int16_c(int16_t *output, const int16_t *input, } } +static void vector_clip_int32_c(int32_t *dst, const int32_t *src, int32_t min, + int32_t max, unsigned int len) +{ + do { + *dst++ = av_clip(*src++, min, max); + *dst++ = av_clip(*src++, min, max); + *dst++ = av_clip(*src++, min, max); + *dst++ = av_clip(*src++, min, max); + *dst++ = av_clip(*src++, min, max); + *dst++ = av_clip(*src++, min, max); + *dst++ = av_clip(*src++, min, max); + *dst++ = av_clip(*src++, min, max); + len -= 8; + } while (len > 0); +} + #define W0 2048 #define W1 2841 /* 2048*sqrt (2)*cos (1*pi/16) */ #define W2 2676 /* 2048*sqrt (2)*cos (2*pi/16) */ @@ -2816,9 +2711,9 @@ av_cold void dsputil_static_init(void) int ff_check_alignment(void){ static int did_fail=0; - DECLARE_ALIGNED(16, int, aligned); + LOCAL_ALIGNED_16(int, aligned, [4]); - if((intptr_t)&aligned & 15){ + if((intptr_t)aligned & 15){ if(!did_fail){ #if HAVE_MMX || HAVE_ALTIVEC av_log(NULL, AV_LOG_ERROR, @@ -2841,44 +2736,28 @@ av_cold void dsputil_init(DSPContext* c, AVCodecContext *avctx) ff_check_alignment(); #if CONFIG_ENCODERS - if(avctx->dct_algo==FF_DCT_FASTINT) { - c->fdct = fdct_ifast; - c->fdct248 = fdct_ifast248; - } - else if(avctx->dct_algo==FF_DCT_FAAN) { - c->fdct = ff_faandct; - c->fdct248 = ff_faandct248; - } - else { - c->fdct = ff_jpeg_fdct_islow; //slow/accurate/default - c->fdct248 = ff_fdct248_islow; + if (avctx->bits_per_raw_sample == 10) { + c->fdct = ff_jpeg_fdct_islow_10; + c->fdct248 = ff_fdct248_islow_10; + } else { + if(avctx->dct_algo==FF_DCT_FASTINT) { + c->fdct = fdct_ifast; + c->fdct248 = fdct_ifast248; + } + else if(avctx->dct_algo==FF_DCT_FAAN) { + c->fdct = ff_faandct; + c->fdct248 = ff_faandct248; + } + else { + c->fdct = ff_jpeg_fdct_islow_8; //slow/accurate/default + c->fdct248 = ff_fdct248_islow_8; + } } #endif //CONFIG_ENCODERS if(avctx->lowres==1){ - if(avctx->idct_algo==FF_IDCT_INT || avctx->idct_algo==FF_IDCT_AUTO || !CONFIG_H264_DECODER){ - c->idct_put= ff_jref_idct4_put; - c->idct_add= ff_jref_idct4_add; - }else{ - if (avctx->codec_id != CODEC_ID_H264) { - c->idct_put= ff_h264_lowres_idct_put_8_c; - c->idct_add= ff_h264_lowres_idct_add_8_c; - } else { - switch (avctx->bits_per_raw_sample) { - case 9: - c->idct_put= ff_h264_lowres_idct_put_9_c; - c->idct_add= ff_h264_lowres_idct_add_9_c; - break; - case 10: - c->idct_put= ff_h264_lowres_idct_put_10_c; - c->idct_add= ff_h264_lowres_idct_add_10_c; - break; - default: - c->idct_put= ff_h264_lowres_idct_put_8_c; - c->idct_add= ff_h264_lowres_idct_add_8_c; - } - } - } + c->idct_put= ff_jref_idct4_put; + c->idct_add= ff_jref_idct4_add; c->idct = j_rev_dct4; c->idct_permutation_type= FF_NO_IDCT_PERM; }else if(avctx->lowres==2){ @@ -2892,6 +2771,12 @@ av_cold void dsputil_init(DSPContext* c, AVCodecContext *avctx) c->idct = j_rev_dct1; c->idct_permutation_type= FF_NO_IDCT_PERM; }else{ + if (avctx->bits_per_raw_sample == 10) { + c->idct_put = ff_simple_idct_put_10; + c->idct_add = ff_simple_idct_add_10; + c->idct = ff_simple_idct_10; + c->idct_permutation_type = FF_NO_IDCT_PERM; + } else { if(avctx->idct_algo==FF_IDCT_INT){ c->idct_put= ff_jref_idct_put; c->idct_add= ff_jref_idct_add; @@ -2916,24 +2801,18 @@ av_cold void dsputil_init(DSPContext* c, AVCodecContext *avctx) }else if(CONFIG_EATGQ_DECODER && avctx->idct_algo==FF_IDCT_EA) { c->idct_put= ff_ea_idct_put_c; c->idct_permutation_type= FF_NO_IDCT_PERM; - }else if(CONFIG_BINK_DECODER && avctx->idct_algo==FF_IDCT_BINK) { - c->idct = ff_bink_idct_c; - c->idct_add = ff_bink_idct_add_c; - c->idct_put = ff_bink_idct_put_c; - c->idct_permutation_type = FF_NO_IDCT_PERM; }else{ //accurate/default - c->idct_put= ff_simple_idct_put; - c->idct_add= ff_simple_idct_add; - c->idct = ff_simple_idct; + c->idct_put = ff_simple_idct_put_8; + c->idct_add = ff_simple_idct_add_8; + c->idct = ff_simple_idct_8; c->idct_permutation_type= FF_NO_IDCT_PERM; } + } } - c->get_pixels = get_pixels_c; c->diff_pixels = diff_pixels_c; c->put_pixels_clamped = ff_put_pixels_clamped_c; c->put_signed_pixels_clamped = ff_put_signed_pixels_clamped_c; - c->put_pixels_nonclamped = put_pixels_nonclamped_c; c->add_pixels_clamped = ff_add_pixels_clamped_c; c->sum_abs_dctelem = sum_abs_dctelem_c; c->gmc1 = gmc1_c; @@ -2943,7 +2822,6 @@ av_cold void dsputil_init(DSPContext* c, AVCodecContext *avctx) c->fill_block_tab[0] = fill_block16_c; c->fill_block_tab[1] = fill_block8_c; - c->scale_block = scale_block_c; /* TODO [0] 16 [1] 8 */ c->pix_abs[0][0] = pix_abs16_c; @@ -3106,16 +2984,11 @@ av_cold void dsputil_init(DSPContext* c, AVCodecContext *avctx) c->scalarproduct_int16 = scalarproduct_int16_c; c->scalarproduct_and_madd_int16 = scalarproduct_and_madd_int16_c; c->apply_window_int16 = apply_window_int16_c; + c->vector_clip_int32 = vector_clip_int32_c; c->scalarproduct_float = scalarproduct_float_c; c->butterflies_float = butterflies_float_c; c->vector_fmul_scalar = vector_fmul_scalar_c; - c->vector_fmul_sv_scalar[0] = vector_fmul_sv_scalar_2_c; - c->vector_fmul_sv_scalar[1] = vector_fmul_sv_scalar_4_c; - - c->sv_fmul_scalar[0] = sv_fmul_scalar_2_c; - c->sv_fmul_scalar[1] = sv_fmul_scalar_4_c; - c->shrink[0]= av_image_copy_plane; c->shrink[1]= ff_shrink22; c->shrink[2]= ff_shrink44; @@ -3156,13 +3029,14 @@ av_cold void dsputil_init(DSPContext* c, AVCodecContext *avctx) c->PFX ## _pixels_tab[IDX][15] = FUNCC(PFX ## NUM ## _mc33, depth) -#define BIT_DEPTH_FUNCS(depth)\ +#define BIT_DEPTH_FUNCS(depth, dct)\ + c->get_pixels = FUNCC(get_pixels ## dct , depth);\ c->draw_edges = FUNCC(draw_edges , depth);\ c->emulated_edge_mc = FUNC (ff_emulated_edge_mc , depth);\ - c->clear_block = FUNCC(clear_block , depth);\ - c->clear_blocks = FUNCC(clear_blocks , depth);\ - c->add_pixels8 = FUNCC(add_pixels8 , depth);\ - c->add_pixels4 = FUNCC(add_pixels4 , depth);\ + c->clear_block = FUNCC(clear_block ## dct , depth);\ + c->clear_blocks = FUNCC(clear_blocks ## dct , depth);\ + c->add_pixels8 = FUNCC(add_pixels8 ## dct , depth);\ + c->add_pixels4 = FUNCC(add_pixels4 ## dct , depth);\ c->put_no_rnd_pixels_l2[0] = FUNCC(put_no_rnd_pixels16_l2, depth);\ c->put_no_rnd_pixels_l2[1] = FUNCC(put_no_rnd_pixels8_l2 , depth);\ \ @@ -3194,21 +3068,26 @@ av_cold void dsputil_init(DSPContext* c, AVCodecContext *avctx) dspfunc2(avg_h264_qpel, 1, 8, depth);\ dspfunc2(avg_h264_qpel, 2, 4, depth); - if (avctx->codec_id != CODEC_ID_H264 || avctx->bits_per_raw_sample == 8) { - BIT_DEPTH_FUNCS(8) - } else { - switch (avctx->bits_per_raw_sample) { - case 9: - BIT_DEPTH_FUNCS(9) - break; - case 10: - BIT_DEPTH_FUNCS(10) - break; - default: - av_log(avctx, AV_LOG_DEBUG, "Unsupported bit depth: %d\n", avctx->bits_per_raw_sample); - BIT_DEPTH_FUNCS(8) - break; + switch (avctx->bits_per_raw_sample) { + case 9: + if (c->dct_bits == 32) { + BIT_DEPTH_FUNCS(9, _32); + } else { + BIT_DEPTH_FUNCS(9, _16); + } + break; + case 10: + if (c->dct_bits == 32) { + BIT_DEPTH_FUNCS(10, _32); + } else { + BIT_DEPTH_FUNCS(10, _16); } + break; + default: + av_log(avctx, AV_LOG_DEBUG, "Unsupported bit depth: %d\n", avctx->bits_per_raw_sample); + case 8: + BIT_DEPTH_FUNCS(8, _16); + break; } @@ -3268,4 +3147,3 @@ av_cold void dsputil_init(DSPContext* c, AVCodecContext *avctx) av_log(avctx, AV_LOG_ERROR, "Internal error, IDCT permutation not set\n"); } } - diff --git a/libavcodec/dsputil.h b/libavcodec/dsputil.h index 8dd26849e9..216c3b9557 100644 --- a/libavcodec/dsputil.h +++ b/libavcodec/dsputil.h @@ -40,8 +40,10 @@ typedef short DCTELEM; void fdct_ifast (DCTELEM *data); void fdct_ifast248 (DCTELEM *data); -void ff_jpeg_fdct_islow (DCTELEM *data); -void ff_fdct248_islow (DCTELEM *data); +void ff_jpeg_fdct_islow_8(DCTELEM *data); +void ff_jpeg_fdct_islow_10(DCTELEM *data); +void ff_fdct248_islow_8(DCTELEM *data); +void ff_fdct248_islow_10(DCTELEM *data); void j_rev_dct (DCTELEM *data); void j_rev_dct4 (DCTELEM *data); @@ -58,8 +60,6 @@ void ff_h264_idct8_add_ ## depth ## _c(uint8_t *dst, DCTELEM *block, int stride) void ff_h264_idct_add_ ## depth ## _c(uint8_t *dst, DCTELEM *block, int stride);\ void ff_h264_idct8_dc_add_ ## depth ## _c(uint8_t *dst, DCTELEM *block, int stride);\ void ff_h264_idct_dc_add_ ## depth ## _c(uint8_t *dst, DCTELEM *block, int stride);\ -void ff_h264_lowres_idct_add_ ## depth ## _c(uint8_t *dst, int stride, DCTELEM *block);\ -void ff_h264_lowres_idct_put_ ## depth ## _c(uint8_t *dst, int stride, DCTELEM *block);\ void ff_h264_idct_add16_ ## depth ## _c(uint8_t *dst, const int *blockoffset, DCTELEM *block, int stride, const uint8_t nnzc[6*8]);\ void ff_h264_idct_add16intra_ ## depth ## _c(uint8_t *dst, const int *blockoffset, DCTELEM *block, int stride, const uint8_t nnzc[6*8]);\ void ff_h264_idct8_add4_ ## depth ## _c(uint8_t *dst, const int *blockoffset, DCTELEM *block, int stride, const uint8_t nnzc[6*8]);\ @@ -111,11 +111,6 @@ void ff_vp3_idct_dc_add_c(uint8_t *dest/*align 8*/, int line_size, const DCTELEM void ff_vp3_v_loop_filter_c(uint8_t *src, int stride, int *bounding_values); void ff_vp3_h_loop_filter_c(uint8_t *src, int stride, int *bounding_values); -/* Bink functions */ -void ff_bink_idct_c (DCTELEM *block); -void ff_bink_idct_add_c(uint8_t *dest, int linesize, DCTELEM *block); -void ff_bink_idct_put_c(uint8_t *dest, int linesize, DCTELEM *block); - /* EA functions */ void ff_ea_idct_put_c(uint8_t *dest, int linesize, DCTELEM *block); @@ -158,7 +153,7 @@ void clear_blocks_c(DCTELEM *blocks); /* add and put pixel (decoding) */ // blocksizes for op_pixels_func are 8x4,8x8 16x8 16x16 -//h for op_pixels_func is limited to {width/2, width} but never larger than 16 and never smaller then 4 +//h for op_pixels_func is limited to {width/2, width} but never larger than 16 and never smaller than 4 typedef void (*op_pixels_func)(uint8_t *block/*align width (8 or 16)*/, const uint8_t *pixels/*align 1*/, int line_size, int h); typedef void (*tpel_mc_func)(uint8_t *block/*align width (8 or 16)*/, const uint8_t *pixels/*align 1*/, int line_size, int w, int h); typedef void (*qpel_mc_func)(uint8_t *dst/*align width (8 or 16)*/, uint8_t *src/*align 1*/, int stride); @@ -191,7 +186,7 @@ static void a(uint8_t *block, const uint8_t *pixels, int line_size, int h){\ } /* motion estimation */ -// h is limited to {width/2, width, 2*width} but never larger than 16 and never smaller then 2 +// h is limited to {width/2, width, 2*width} but never larger than 16 and never smaller than 2 // although currently h<4 is not used as functions with width <8 are neither used nor implemented typedef int (*me_cmp_func)(void /*MpegEncContext*/ *s, uint8_t *blk1/*align width (8 or 16)*/, uint8_t *blk2/*align 1*/, int line_size, int h)/* __attribute__ ((const))*/; @@ -219,8 +214,6 @@ EMULATED_EDGE(8) EMULATED_EDGE(9) EMULATED_EDGE(10) -#define ff_emulated_edge_mc ff_emulated_edge_mc_8 - void ff_add_pixels_clamped_c(const DCTELEM *block, uint8_t *dest, int linesize); void ff_put_pixels_clamped_c(const DCTELEM *block, uint8_t *dest, int linesize); void ff_put_signed_pixels_clamped_c(const DCTELEM *block, uint8_t *dest, int linesize); @@ -229,12 +222,16 @@ void ff_put_signed_pixels_clamped_c(const DCTELEM *block, uint8_t *dest, int lin * DSPContext. */ typedef struct DSPContext { + /** + * Size of DCT coefficients. + */ + int dct_bits; + /* pixel ops : interface with DCT */ void (*get_pixels)(DCTELEM *block/*align 16*/, const uint8_t *pixels/*align 8*/, int line_size); void (*diff_pixels)(DCTELEM *block/*align 16*/, const uint8_t *s1/*align 8*/, const uint8_t *s2/*align 8*/, int stride); void (*put_pixels_clamped)(const DCTELEM *block/*align 16*/, uint8_t *pixels/*align 8*/, int line_size); void (*put_signed_pixels_clamped)(const DCTELEM *block/*align 16*/, uint8_t *pixels/*align 8*/, int line_size); - void (*put_pixels_nonclamped)(const DCTELEM *block/*align 16*/, uint8_t *pixels/*align 8*/, int line_size); void (*add_pixels_clamped)(const DCTELEM *block/*align 16*/, uint8_t *pixels/*align 8*/, int line_size); void (*add_pixels8)(uint8_t *pixels, DCTELEM *block, int line_size); void (*add_pixels4)(uint8_t *pixels, DCTELEM *block, int line_size); @@ -427,32 +424,6 @@ typedef struct DSPContext { void (*vector_fmul_scalar)(float *dst, const float *src, float mul, int len); /** - * Multiply a vector of floats by concatenated short vectors of - * floats and by a scalar float. Source and destination vectors - * must overlap exactly or not at all. - * [0]: short vectors of length 2, 8-byte aligned - * [1]: short vectors of length 4, 16-byte aligned - * @param dst output vector, 16-byte aligned - * @param src input vector, 16-byte aligned - * @param sv array of pointers to short vectors - * @param mul scalar value - * @param len number of elements in src and dst, multiple of 4 - */ - void (*vector_fmul_sv_scalar[2])(float *dst, const float *src, - const float **sv, float mul, int len); - /** - * Multiply short vectors of floats by a scalar float, store - * concatenated result. - * [0]: short vectors of length 2, 8-byte aligned - * [1]: short vectors of length 4, 16-byte aligned - * @param dst output vector, 16-byte aligned - * @param sv array of pointers to short vectors - * @param mul scalar value - * @param len number of output elements, multiple of 4 - */ - void (*sv_fmul_scalar[2])(float *dst, const float **sv, - float mul, int len); - /** * Calculate the scalar product of two vectors of floats. * @param v1 first vector, 16-byte aligned * @param v2 second vector, 16-byte aligned @@ -561,6 +532,22 @@ typedef struct DSPContext { void (*apply_window_int16)(int16_t *output, const int16_t *input, const int16_t *window, unsigned int len); + /** + * Clip each element in an array of int32_t to a given minimum and maximum value. + * @param dst destination array + * constraints: 16-byte aligned + * @param src source array + * constraints: 16-byte aligned + * @param min minimum value + * constraints: must in the the range [-(1<<24), 1<<24] + * @param max maximum value + * constraints: must in the the range [-(1<<24), 1<<24] + * @param len number of elements in the array + * constraints: multiple of 32 greater than zero + */ + void (*vector_clip_int32)(int32_t *dst, const int32_t *src, int32_t min, + int32_t max, unsigned int len); + /* rv30 functions */ qpel_mc_func put_rv30_tpel_pixels_tab[4][16]; qpel_mc_func avg_rv30_tpel_pixels_tab[4][16]; @@ -571,9 +558,7 @@ typedef struct DSPContext { h264_chroma_mc_func put_rv40_chroma_pixels_tab[3]; h264_chroma_mc_func avg_rv40_chroma_pixels_tab[3]; - /* bink functions */ op_fill_func fill_block_tab[2]; - void (*scale_block)(const uint8_t src[64]/*align 8*/, uint8_t *dst/*align 8*/, int linesize); } DSPContext; void dsputil_static_init(void); diff --git a/libavcodec/dsputil_template.c b/libavcodec/dsputil_template.c index 58533d2ce9..85d4fec7dc 100644 --- a/libavcodec/dsputil_template.c +++ b/libavcodec/dsputil_template.c @@ -27,7 +27,7 @@ * DSP utils */ -#include "high_bit_depth.h" +#include "bit_depth_template.c" static inline void FUNC(copy_block2)(uint8_t *dst, const uint8_t *src, int dstStride, int srcStride, int h) { @@ -192,187 +192,89 @@ void FUNC(ff_emulated_edge_mc)(uint8_t *buf, const uint8_t *src, int linesize, i } } -static void FUNCC(add_pixels8)(uint8_t *restrict p_pixels, DCTELEM *p_block, int line_size) -{ - int i; - pixel *restrict pixels = (pixel *restrict)p_pixels; - dctcoef *block = (dctcoef*)p_block; - line_size >>= sizeof(pixel)-1; - - for(i=0;i<8;i++) { - pixels[0] += block[0]; - pixels[1] += block[1]; - pixels[2] += block[2]; - pixels[3] += block[3]; - pixels[4] += block[4]; - pixels[5] += block[5]; - pixels[6] += block[6]; - pixels[7] += block[7]; - pixels += line_size; - block += 8; - } +#define DCTELEM_FUNCS(dctcoef, suffix) \ +static void FUNCC(get_pixels ## suffix)(DCTELEM *restrict _block, \ + const uint8_t *_pixels, \ + int line_size) \ +{ \ + const pixel *pixels = (const pixel *) _pixels; \ + dctcoef *restrict block = (dctcoef *) _block; \ + int i; \ + \ + /* read the pixels */ \ + for(i=0;i<8;i++) { \ + block[0] = pixels[0]; \ + block[1] = pixels[1]; \ + block[2] = pixels[2]; \ + block[3] = pixels[3]; \ + block[4] = pixels[4]; \ + block[5] = pixels[5]; \ + block[6] = pixels[6]; \ + block[7] = pixels[7]; \ + pixels += line_size / sizeof(pixel); \ + block += 8; \ + } \ +} \ + \ +static void FUNCC(add_pixels8 ## suffix)(uint8_t *restrict _pixels, \ + DCTELEM *_block, \ + int line_size) \ +{ \ + int i; \ + pixel *restrict pixels = (pixel *restrict)_pixels; \ + dctcoef *block = (dctcoef*)_block; \ + line_size /= sizeof(pixel); \ + \ + for(i=0;i<8;i++) { \ + pixels[0] += block[0]; \ + pixels[1] += block[1]; \ + pixels[2] += block[2]; \ + pixels[3] += block[3]; \ + pixels[4] += block[4]; \ + pixels[5] += block[5]; \ + pixels[6] += block[6]; \ + pixels[7] += block[7]; \ + pixels += line_size; \ + block += 8; \ + } \ +} \ + \ +static void FUNCC(add_pixels4 ## suffix)(uint8_t *restrict _pixels, \ + DCTELEM *_block, \ + int line_size) \ +{ \ + int i; \ + pixel *restrict pixels = (pixel *restrict)_pixels; \ + dctcoef *block = (dctcoef*)_block; \ + line_size /= sizeof(pixel); \ + \ + for(i=0;i<4;i++) { \ + pixels[0] += block[0]; \ + pixels[1] += block[1]; \ + pixels[2] += block[2]; \ + pixels[3] += block[3]; \ + pixels += line_size; \ + block += 4; \ + } \ +} \ + \ +static void FUNCC(clear_block ## suffix)(DCTELEM *block) \ +{ \ + memset(block, 0, sizeof(dctcoef)*64); \ +} \ + \ +/** \ + * memset(blocks, 0, sizeof(DCTELEM)*6*64) \ + */ \ +static void FUNCC(clear_blocks ## suffix)(DCTELEM *blocks) \ +{ \ + memset(blocks, 0, sizeof(dctcoef)*6*64); \ } -static void FUNCC(add_pixels4)(uint8_t *restrict p_pixels, DCTELEM *p_block, int line_size) -{ - int i; - pixel *restrict pixels = (pixel *restrict)p_pixels; - dctcoef *block = (dctcoef*)p_block; - line_size >>= sizeof(pixel)-1; - - for(i=0;i<4;i++) { - pixels[0] += block[0]; - pixels[1] += block[1]; - pixels[2] += block[2]; - pixels[3] += block[3]; - pixels += line_size; - block += 4; - } -} - -#if 0 - -#define PIXOP2(OPNAME, OP) \ -static void OPNAME ## _pixels(uint8_t *block, const uint8_t *pixels, int line_size, int h)\ -{\ - int i;\ - for(i=0; i<h; i++){\ - OP(*((uint64_t*)block), AV_RN64(pixels));\ - pixels+=line_size;\ - block +=line_size;\ - }\ -}\ -\ -static void OPNAME ## _no_rnd_pixels_x2_c(uint8_t *block, const uint8_t *pixels, int line_size, int h)\ -{\ - int i;\ - for(i=0; i<h; i++){\ - const uint64_t a= AV_RN64(pixels );\ - const uint64_t b= AV_RN64(pixels+1);\ - OP(*((uint64_t*)block), (a&b) + (((a^b)&0xFEFEFEFEFEFEFEFEULL)>>1));\ - pixels+=line_size;\ - block +=line_size;\ - }\ -}\ -\ -static void OPNAME ## _pixels_x2_c(uint8_t *block, const uint8_t *pixels, int line_size, int h)\ -{\ - int i;\ - for(i=0; i<h; i++){\ - const uint64_t a= AV_RN64(pixels );\ - const uint64_t b= AV_RN64(pixels+1);\ - OP(*((uint64_t*)block), (a|b) - (((a^b)&0xFEFEFEFEFEFEFEFEULL)>>1));\ - pixels+=line_size;\ - block +=line_size;\ - }\ -}\ -\ -static void OPNAME ## _no_rnd_pixels_y2_c(uint8_t *block, const uint8_t *pixels, int line_size, int h)\ -{\ - int i;\ - for(i=0; i<h; i++){\ - const uint64_t a= AV_RN64(pixels );\ - const uint64_t b= AV_RN64(pixels+line_size);\ - OP(*((uint64_t*)block), (a&b) + (((a^b)&0xFEFEFEFEFEFEFEFEULL)>>1));\ - pixels+=line_size;\ - block +=line_size;\ - }\ -}\ -\ -static void OPNAME ## _pixels_y2_c(uint8_t *block, const uint8_t *pixels, int line_size, int h)\ -{\ - int i;\ - for(i=0; i<h; i++){\ - const uint64_t a= AV_RN64(pixels );\ - const uint64_t b= AV_RN64(pixels+line_size);\ - OP(*((uint64_t*)block), (a|b) - (((a^b)&0xFEFEFEFEFEFEFEFEULL)>>1));\ - pixels+=line_size;\ - block +=line_size;\ - }\ -}\ -\ -static void OPNAME ## _pixels_xy2_c(uint8_t *block, const uint8_t *pixels, int line_size, int h)\ -{\ - int i;\ - const uint64_t a= AV_RN64(pixels );\ - const uint64_t b= AV_RN64(pixels+1);\ - uint64_t l0= (a&0x0303030303030303ULL)\ - + (b&0x0303030303030303ULL)\ - + 0x0202020202020202ULL;\ - uint64_t h0= ((a&0xFCFCFCFCFCFCFCFCULL)>>2)\ - + ((b&0xFCFCFCFCFCFCFCFCULL)>>2);\ - uint64_t l1,h1;\ -\ - pixels+=line_size;\ - for(i=0; i<h; i+=2){\ - uint64_t a= AV_RN64(pixels );\ - uint64_t b= AV_RN64(pixels+1);\ - l1= (a&0x0303030303030303ULL)\ - + (b&0x0303030303030303ULL);\ - h1= ((a&0xFCFCFCFCFCFCFCFCULL)>>2)\ - + ((b&0xFCFCFCFCFCFCFCFCULL)>>2);\ - OP(*((uint64_t*)block), h0+h1+(((l0+l1)>>2)&0x0F0F0F0F0F0F0F0FULL));\ - pixels+=line_size;\ - block +=line_size;\ - a= AV_RN64(pixels );\ - b= AV_RN64(pixels+1);\ - l0= (a&0x0303030303030303ULL)\ - + (b&0x0303030303030303ULL)\ - + 0x0202020202020202ULL;\ - h0= ((a&0xFCFCFCFCFCFCFCFCULL)>>2)\ - + ((b&0xFCFCFCFCFCFCFCFCULL)>>2);\ - OP(*((uint64_t*)block), h0+h1+(((l0+l1)>>2)&0x0F0F0F0F0F0F0F0FULL));\ - pixels+=line_size;\ - block +=line_size;\ - }\ -}\ -\ -static void OPNAME ## _no_rnd_pixels_xy2_c(uint8_t *block, const uint8_t *pixels, int line_size, int h)\ -{\ - int i;\ - const uint64_t a= AV_RN64(pixels );\ - const uint64_t b= AV_RN64(pixels+1);\ - uint64_t l0= (a&0x0303030303030303ULL)\ - + (b&0x0303030303030303ULL)\ - + 0x0101010101010101ULL;\ - uint64_t h0= ((a&0xFCFCFCFCFCFCFCFCULL)>>2)\ - + ((b&0xFCFCFCFCFCFCFCFCULL)>>2);\ - uint64_t l1,h1;\ -\ - pixels+=line_size;\ - for(i=0; i<h; i+=2){\ - uint64_t a= AV_RN64(pixels );\ - uint64_t b= AV_RN64(pixels+1);\ - l1= (a&0x0303030303030303ULL)\ - + (b&0x0303030303030303ULL);\ - h1= ((a&0xFCFCFCFCFCFCFCFCULL)>>2)\ - + ((b&0xFCFCFCFCFCFCFCFCULL)>>2);\ - OP(*((uint64_t*)block), h0+h1+(((l0+l1)>>2)&0x0F0F0F0F0F0F0F0FULL));\ - pixels+=line_size;\ - block +=line_size;\ - a= AV_RN64(pixels );\ - b= AV_RN64(pixels+1);\ - l0= (a&0x0303030303030303ULL)\ - + (b&0x0303030303030303ULL)\ - + 0x0101010101010101ULL;\ - h0= ((a&0xFCFCFCFCFCFCFCFCULL)>>2)\ - + ((b&0xFCFCFCFCFCFCFCFCULL)>>2);\ - OP(*((uint64_t*)block), h0+h1+(((l0+l1)>>2)&0x0F0F0F0F0F0F0F0FULL));\ - pixels+=line_size;\ - block +=line_size;\ - }\ -}\ -\ -CALL_2X_PIXELS(OPNAME ## _pixels16_c , OPNAME ## _pixels_c , 8*sizeof(pixel))\ -CALL_2X_PIXELS(OPNAME ## _pixels16_x2_c , OPNAME ## _pixels_x2_c , 8*sizeof(pixel))\ -CALL_2X_PIXELS(OPNAME ## _pixels16_y2_c , OPNAME ## _pixels_y2_c , 8*sizeof(pixel))\ -CALL_2X_PIXELS(OPNAME ## _pixels16_xy2_c, OPNAME ## _pixels_xy2_c, 8*sizeof(pixel))\ -CALL_2X_PIXELS(OPNAME ## _no_rnd_pixels16_x2_c , OPNAME ## _no_rnd_pixels_x2_c , 8*sizeof(pixel))\ -CALL_2X_PIXELS(OPNAME ## _no_rnd_pixels16_y2_c , OPNAME ## _no_rnd_pixels_y2_c , 8*sizeof(pixel))\ -CALL_2X_PIXELS(OPNAME ## _no_rnd_pixels16_xy2_c, OPNAME ## _no_rnd_pixels_xy2_c, 8*sizeof(pixel)) - -#define op_avg(a, b) a = ( ((a)|(b)) - ((((a)^(b))&0xFEFEFEFEFEFEFEFEULL)>>1) ) -#else // 64 bit variant +DCTELEM_FUNCS(DCTELEM, _16) +#if BIT_DEPTH > 8 +DCTELEM_FUNCS(dctcoef, _32) +#endif #define PIXOP2(OPNAME, OP) \ static void FUNCC(OPNAME ## _pixels2)(uint8_t *block, const uint8_t *pixels, int line_size, int h){\ @@ -749,7 +651,6 @@ CALL_2X_PIXELS(FUNCC(OPNAME ## _no_rnd_pixels16_y2) , FUNCC(OPNAME ## _no_rnd_pi CALL_2X_PIXELS(FUNCC(OPNAME ## _no_rnd_pixels16_xy2), FUNCC(OPNAME ## _no_rnd_pixels8_xy2), 8*sizeof(pixel))\ #define op_avg(a, b) a = rnd_avg_pixel4(a, b) -#endif #define op_put(a, b) a = b PIXOP2(avg, op_avg) @@ -1377,16 +1278,3 @@ void FUNCC(ff_avg_pixels16x16)(uint8_t *dst, uint8_t *src, int stride) { FUNCC(avg_pixels16)(dst, src, stride, 16); } -static void FUNCC(clear_block)(DCTELEM *block) -{ - memset(block, 0, sizeof(dctcoef)*64); -} - -/** - * memset(blocks, 0, sizeof(DCTELEM)*6*64) - */ -static void FUNCC(clear_blocks)(DCTELEM *blocks) -{ - memset(blocks, 0, sizeof(dctcoef)*6*64); -} - diff --git a/libavcodec/dv.c b/libavcodec/dv.c index d6c49c86cb..c616a60d87 100644 --- a/libavcodec/dv.c +++ b/libavcodec/dv.c @@ -370,7 +370,6 @@ typedef struct BlockInfo { /* bit budget for AC only in 5 MBs */ static const int vs_total_ac_bits = (100 * 4 + 68*2) * 5; -/* see dv_88_areas and dv_248_areas for details */ static const int mb_area_start[5] = { 1, 6, 21, 43, 64 }; static inline int put_bits_left(PutBitContext* s) @@ -378,7 +377,7 @@ static inline int put_bits_left(PutBitContext* s) return (s->buf_end - s->buf) * 8 - put_bits_count(s); } -/* decode ac coefficients */ +/* decode AC coefficients */ static void dv_decode_ac(GetBitContext *gb, BlockInfo *mb, DCTELEM *block) { int last_index = gb->size_in_bits; @@ -391,7 +390,7 @@ static void dv_decode_ac(GetBitContext *gb, BlockInfo *mb, DCTELEM *block) OPEN_READER(re, gb); UPDATE_CACHE(re, gb); - /* if we must parse a partial vlc, we do it here */ + /* if we must parse a partial VLC, we do it here */ if (partial_bit_count > 0) { re_cache = ((unsigned)re_cache >> partial_bit_count) | (mb->partial_bit_buffer << (sizeof(re_cache) * 8 - partial_bit_count)); @@ -476,8 +475,8 @@ static int dv_decode_video_segment(AVCodecContext *avctx, void *arg) GetBitContext gb; BlockInfo mb_data[5 * DV_MAX_BPM], *mb, *mb1; LOCAL_ALIGNED_16(DCTELEM, sblock, [5*DV_MAX_BPM], [64]); - LOCAL_ALIGNED_16(uint8_t, mb_bit_buffer, [80 + 4]); /* allow some slack */ - LOCAL_ALIGNED_16(uint8_t, vs_bit_buffer, [5 * 80 + 4]); /* allow some slack */ + LOCAL_ALIGNED_16(uint8_t, mb_bit_buffer, [ 80 + FF_INPUT_BUFFER_PADDING_SIZE]); /* allow some slack */ + LOCAL_ALIGNED_16(uint8_t, vs_bit_buffer, [5*80 + FF_INPUT_BUFFER_PADDING_SIZE]); /* allow some slack */ const int log2_blocksize = 3-s->avctx->lowres; int is_field_mode[5]; @@ -486,7 +485,7 @@ static int dv_decode_video_segment(AVCodecContext *avctx, void *arg) memset(sblock, 0, 5*DV_MAX_BPM*sizeof(*sblock)); - /* pass 1 : read DC and AC coefficients in blocks */ + /* pass 1: read DC and AC coefficients in blocks */ buf_ptr = &s->buf[work_chunk->buf_offset*80]; block1 = &sblock[0][0]; mb1 = mb_data; @@ -503,7 +502,7 @@ static int dv_decode_video_segment(AVCodecContext *avctx, void *arg) last_index = s->sys->block_sizes[j]; init_get_bits(&gb, buf_ptr, last_index); - /* get the dc */ + /* get the DC */ dc = get_sbits(&gb, 9); dct_mode = get_bits1(&gb); class1 = get_bits(&gb, 2); @@ -530,7 +529,7 @@ static int dv_decode_video_segment(AVCodecContext *avctx, void *arg) av_dlog(avctx, "MB block: %d, %d ", mb_index, j); dv_decode_ac(&gb, mb, block); - /* write the remaining bits in a new buffer only if the + /* write the remaining bits in a new buffer only if the block is finished */ if (mb->pos >= 64) bit_copy(&pb, &gb); @@ -539,11 +538,12 @@ static int dv_decode_video_segment(AVCodecContext *avctx, void *arg) mb++; } - /* pass 2 : we can do it just after */ + /* pass 2: we can do it just after */ av_dlog(avctx, "***pass 2 size=%d MB#=%d\n", put_bits_count(&pb), mb_index); block = block1; mb = mb1; init_get_bits(&gb, mb_bit_buffer, put_bits_count(&pb)); + put_bits32(&pb, 0); // padding must be zeroed flush_put_bits(&pb); for (j = 0; j < s->sys->bpm; j++, block += 64, mb++) { if (mb->pos < 64 && get_bits_left(&gb) > 0) { @@ -559,11 +559,12 @@ static int dv_decode_video_segment(AVCodecContext *avctx, void *arg) bit_copy(&vs_pb, &gb); } - /* we need a pass other the whole video segment */ + /* we need a pass over the whole video segment */ av_dlog(avctx, "***pass 3 size=%d\n", put_bits_count(&vs_pb)); block = &sblock[0][0]; mb = mb_data; init_get_bits(&gb, vs_bit_buffer, put_bits_count(&vs_pb)); + put_bits32(&vs_pb, 0); // padding must be zeroed flush_put_bits(&vs_pb); for (mb_index = 0; mb_index < 5; mb_index++) { for (j = 0; j < s->sys->bpm; j++) { @@ -640,7 +641,7 @@ static int dv_decode_video_segment(AVCodecContext *avctx, void *arg) } #if CONFIG_SMALL -/* Converts run and level (where level != 0) pair into vlc, returning bit size */ +/* Converts run and level (where level != 0) pair into VLC, returning bit size */ static av_always_inline int dv_rl2vlc(int run, int level, int sign, uint32_t* vlc) { int size; @@ -817,7 +818,7 @@ static av_always_inline int dv_init_enc_block(EncBlockInfo* bi, uint8_t *data, i if (level + 15 > 30U) { bi->sign[i] = (level >> 31) & 1; - /* weigh it and and shift down into range, adding for rounding */ + /* weight it and and shift down into range, adding for rounding */ /* the extra division by a factor of 2^4 reverses the 8x expansion of the DCT AND the 2x doubling of the weights */ level = (FFABS(level) * weight[i] + (1 << (dv_weight_bits+3))) >> (dv_weight_bits+4); @@ -1108,7 +1109,7 @@ static int dvvideo_decode_frame(AVCodecContext *avctx, vsc_pack = buf + 80*5 + 48 + 5; if ( *vsc_pack == dv_video_control ) { apt = buf[4] & 0x07; - is16_9 = (vsc_pack && ((vsc_pack[2] & 0x07) == 0x02 || (!apt && (vsc_pack[2] & 0x07) == 0x07))); + is16_9 = (vsc_pack[2] & 0x07) == 0x02 || (!apt && (vsc_pack[2] & 0x07) == 0x07); avctx->sample_aspect_ratio = s->sys->sar[is16_9]; } @@ -1279,12 +1280,12 @@ static int dvvideo_close(AVCodecContext *c) #if CONFIG_DVVIDEO_ENCODER AVCodec ff_dvvideo_encoder = { - "dvvideo", - AVMEDIA_TYPE_VIDEO, - CODEC_ID_DVVIDEO, - sizeof(DVVideoContext), - dvvideo_init_encoder, - dvvideo_encode_frame, + .name = "dvvideo", + .type = AVMEDIA_TYPE_VIDEO, + .id = CODEC_ID_DVVIDEO, + .priv_data_size = sizeof(DVVideoContext), + .init = dvvideo_init_encoder, + .encode = dvvideo_encode_frame, .capabilities = CODEC_CAP_SLICE_THREADS, .pix_fmts = (const enum PixelFormat[]) {PIX_FMT_YUV411P, PIX_FMT_YUV422P, PIX_FMT_YUV420P, PIX_FMT_NONE}, .long_name = NULL_IF_CONFIG_SMALL("DV (Digital Video)"), @@ -1293,16 +1294,14 @@ AVCodec ff_dvvideo_encoder = { #if CONFIG_DVVIDEO_DECODER AVCodec ff_dvvideo_decoder = { - "dvvideo", - AVMEDIA_TYPE_VIDEO, - CODEC_ID_DVVIDEO, - sizeof(DVVideoContext), - dvvideo_init, - NULL, - dvvideo_close, - dvvideo_decode_frame, - CODEC_CAP_DR1 | CODEC_CAP_SLICE_THREADS, - NULL, + .name = "dvvideo", + .type = AVMEDIA_TYPE_VIDEO, + .id = CODEC_ID_DVVIDEO, + .priv_data_size = sizeof(DVVideoContext), + .init = dvvideo_init, + .close = dvvideo_close, + .decode = dvvideo_decode_frame, + .capabilities = CODEC_CAP_DR1 | CODEC_CAP_SLICE_THREADS, .max_lowres = 3, .long_name = NULL_IF_CONFIG_SMALL("DV (Digital Video)"), }; diff --git a/libavcodec/dvbsub.c b/libavcodec/dvbsub.c index ed128234e8..2df8b58021 100644 --- a/libavcodec/dvbsub.c +++ b/libavcodec/dvbsub.c @@ -403,11 +403,10 @@ static int dvbsub_encode(AVCodecContext *avctx, } AVCodec ff_dvbsub_encoder = { - "dvbsub", - AVMEDIA_TYPE_SUBTITLE, - CODEC_ID_DVB_SUBTITLE, - sizeof(DVBSubtitleContext), - NULL, - dvbsub_encode, + .name = "dvbsub", + .type = AVMEDIA_TYPE_SUBTITLE, + .id = CODEC_ID_DVB_SUBTITLE, + .priv_data_size = sizeof(DVBSubtitleContext), + .encode = dvbsub_encode, .long_name = NULL_IF_CONFIG_SMALL("DVB subtitles"), }; diff --git a/libavcodec/dvbsubdec.c b/libavcodec/dvbsubdec.c index 5b09eb14d6..1ee7aad5d1 100644 --- a/libavcodec/dvbsubdec.c +++ b/libavcodec/dvbsubdec.c @@ -1463,6 +1463,7 @@ static int dvbsub_decode(AVCodecContext *avctx, break; case DVBSUB_DISPLAYDEFINITION_SEGMENT: dvbsub_parse_display_definition_segment(avctx, p, segment_length); + break; case DVBSUB_DISPLAY_SEGMENT: *data_size = dvbsub_display_end_segment(avctx, p, segment_length, sub); break; @@ -1481,13 +1482,12 @@ static int dvbsub_decode(AVCodecContext *avctx, AVCodec ff_dvbsub_decoder = { - "dvbsub", - AVMEDIA_TYPE_SUBTITLE, - CODEC_ID_DVB_SUBTITLE, - sizeof(DVBSubContext), - dvbsub_init_decoder, - NULL, - dvbsub_close_decoder, - dvbsub_decode, + .name = "dvbsub", + .type = AVMEDIA_TYPE_SUBTITLE, + .id = CODEC_ID_DVB_SUBTITLE, + .priv_data_size = sizeof(DVBSubContext), + .init = dvbsub_init_decoder, + .close = dvbsub_close_decoder, + .decode = dvbsub_decode, .long_name = NULL_IF_CONFIG_SMALL("DVB subtitles"), }; diff --git a/libavcodec/dvdsubdec.c b/libavcodec/dvdsubdec.c index 6d5973c59b..1c3d75e2e3 100644 --- a/libavcodec/dvdsubdec.c +++ b/libavcodec/dvdsubdec.c @@ -344,6 +344,10 @@ static int decode_dvd_subtitles(AVSubtitle *sub_header, sub_header->rects[0]->pict.linesize[0] = w; } } + if (next_cmd_pos < cmd_pos) { + av_log(NULL, AV_LOG_ERROR, "Invalid command offset\n"); + break; + } if (next_cmd_pos == cmd_pos) break; cmd_pos = next_cmd_pos; @@ -494,13 +498,9 @@ static int dvdsub_decode(AVCodecContext *avctx, } AVCodec ff_dvdsub_decoder = { - "dvdsub", - AVMEDIA_TYPE_SUBTITLE, - CODEC_ID_DVD_SUBTITLE, - 0, - NULL, - NULL, - NULL, - dvdsub_decode, + .name = "dvdsub", + .type = AVMEDIA_TYPE_SUBTITLE, + .id = CODEC_ID_DVD_SUBTITLE, + .decode = dvdsub_decode, .long_name = NULL_IF_CONFIG_SMALL("DVD subtitles"), }; diff --git a/libavcodec/dvdsubenc.c b/libavcodec/dvdsubenc.c index d09ac269ed..1aa6ce7b78 100644 --- a/libavcodec/dvdsubenc.c +++ b/libavcodec/dvdsubenc.c @@ -216,11 +216,9 @@ static int dvdsub_encode(AVCodecContext *avctx, } AVCodec ff_dvdsub_encoder = { - "dvdsub", - AVMEDIA_TYPE_SUBTITLE, - CODEC_ID_DVD_SUBTITLE, - 0, - NULL, - dvdsub_encode, + .name = "dvdsub", + .type = AVMEDIA_TYPE_SUBTITLE, + .id = CODEC_ID_DVD_SUBTITLE, + .encode = dvdsub_encode, .long_name = NULL_IF_CONFIG_SMALL("DVD subtitles"), }; diff --git a/libavcodec/dxa.c b/libavcodec/dxa.c index 807ecd85ee..b3bdae5c55 100644 --- a/libavcodec/dxa.c +++ b/libavcodec/dxa.c @@ -321,15 +321,14 @@ static av_cold int decode_end(AVCodecContext *avctx) } AVCodec ff_dxa_decoder = { - "dxa", - AVMEDIA_TYPE_VIDEO, - CODEC_ID_DXA, - sizeof(DxaDecContext), - decode_init, - NULL, - decode_end, - decode_frame, - CODEC_CAP_DR1, + .name = "dxa", + .type = AVMEDIA_TYPE_VIDEO, + .id = CODEC_ID_DXA, + .priv_data_size = sizeof(DxaDecContext), + .init = decode_init, + .close = decode_end, + .decode = decode_frame, + .capabilities = CODEC_CAP_DR1, .long_name = NULL_IF_CONFIG_SMALL("Feeble Files/ScummVM DXA"), }; diff --git a/libavcodec/dxva2.c b/libavcodec/dxva2.c index 3f14311c9a..b6f8aea429 100644 --- a/libavcodec/dxva2.c +++ b/libavcodec/dxva2.c @@ -24,7 +24,7 @@ void *ff_dxva2_get_surface(const Picture *picture) { - return picture->data[3]; + return picture->f.data[3]; } unsigned ff_dxva2_get_surface_index(const struct dxva_context *ctx, diff --git a/libavcodec/dxva2.h b/libavcodec/dxva2.h index 5c5fe21e2f..6eb494b9fe 100644 --- a/libavcodec/dxva2.h +++ b/libavcodec/dxva2.h @@ -27,6 +27,8 @@ #include <dxva2api.h> +#define FF_DXVA2_WORKAROUND_SCALING_LIST_ZIGZAG 1 ///< Work around for DXVA2 and old UVD/UVD+ ATI video cards + /** * This structure is used to provides the necessary configurations and data * to the DXVA2 FFmpeg HWAccel implementation. diff --git a/libavcodec/dxva2_h264.c b/libavcodec/dxva2_h264.c index bc80e982fb..b5ffe02f44 100644 --- a/libavcodec/dxva2_h264.c +++ b/libavcodec/dxva2_h264.c @@ -70,15 +70,15 @@ static void fill_picture_parameters(struct dxva_context *ctx, const H264Context ff_dxva2_get_surface_index(ctx, r), r->long_ref != 0); - if ((r->reference & PICT_TOP_FIELD) && r->field_poc[0] != INT_MAX) + if ((r->f.reference & PICT_TOP_FIELD) && r->field_poc[0] != INT_MAX) pp->FieldOrderCntList[i][0] = r->field_poc[0]; - if ((r->reference & PICT_BOTTOM_FIELD) && r->field_poc[1] != INT_MAX) + if ((r->f.reference & PICT_BOTTOM_FIELD) && r->field_poc[1] != INT_MAX) pp->FieldOrderCntList[i][1] = r->field_poc[1]; pp->FrameNumList[i] = r->long_ref ? r->pic_id : r->frame_num; - if (r->reference & PICT_TOP_FIELD) + if (r->f.reference & PICT_TOP_FIELD) pp->UsedForReferenceFlags |= 1 << (2*i + 0); - if (r->reference & PICT_BOTTOM_FIELD) + if (r->f.reference & PICT_BOTTOM_FIELD) pp->UsedForReferenceFlags |= 1 << (2*i + 1); } else { pp->RefFrameList[i].bPicEntry = 0xff; @@ -113,7 +113,10 @@ static void fill_picture_parameters(struct dxva_context *ctx, const H264Context pp->bit_depth_luma_minus8 = h->sps.bit_depth_luma - 8; pp->bit_depth_chroma_minus8 = h->sps.bit_depth_chroma - 8; - pp->Reserved16Bits = 3; /* FIXME is there a way to detect the right mode ? */ + if (ctx->workaround & FF_DXVA2_WORKAROUND_SCALING_LIST_ZIGZAG) + pp->Reserved16Bits = 0; + else + pp->Reserved16Bits = 3; /* FIXME is there a way to detect the right mode ? */ pp->StatusReportFeedbackNumber = 1 + ctx->report_id++; pp->CurrFieldOrderCnt[0] = 0; if ((s->picture_structure & PICT_TOP_FIELD) && @@ -150,17 +153,27 @@ static void fill_picture_parameters(struct dxva_context *ctx, const H264Context //pp->SliceGroupMap[810]; /* XXX not implemented by FFmpeg */ } -static void fill_scaling_lists(const H264Context *h, DXVA_Qmatrix_H264 *qm) +static void fill_scaling_lists(struct dxva_context *ctx, const H264Context *h, DXVA_Qmatrix_H264 *qm) { unsigned i, j; memset(qm, 0, sizeof(*qm)); - for (i = 0; i < 6; i++) - for (j = 0; j < 16; j++) - qm->bScalingLists4x4[i][j] = h->pps.scaling_matrix4[i][zigzag_scan[j]]; + if (ctx->workaround & FF_DXVA2_WORKAROUND_SCALING_LIST_ZIGZAG) { + for (i = 0; i < 6; i++) + for (j = 0; j < 16; j++) + qm->bScalingLists4x4[i][j] = h->pps.scaling_matrix4[i][j]; + + for (i = 0; i < 2; i++) + for (j = 0; j < 64; j++) + qm->bScalingLists8x8[i][j] = h->pps.scaling_matrix8[i][j]; + } else { + for (i = 0; i < 6; i++) + for (j = 0; j < 16; j++) + qm->bScalingLists4x4[i][j] = h->pps.scaling_matrix4[i][zigzag_scan[j]]; - for (i = 0; i < 2; i++) - for (j = 0; j < 64; j++) - qm->bScalingLists8x8[i][j] = h->pps.scaling_matrix8[i][ff_zigzag_direct[j]]; + for (i = 0; i < 2; i++) + for (j = 0; j < 64; j++) + qm->bScalingLists8x8[i][j] = h->pps.scaling_matrix8[i][ff_zigzag_direct[j]]; + } } static int is_slice_short(struct dxva_context *ctx) @@ -216,7 +229,7 @@ static void fill_slice_long(AVCodecContext *avctx, DXVA_Slice_H264_Long *slice, unsigned plane; fill_picture_entry(&slice->RefPicList[list][i], ff_dxva2_get_surface_index(ctx, r), - r->reference == PICT_BOTTOM_FIELD); + r->f.reference == PICT_BOTTOM_FIELD); for (plane = 0; plane < 3; plane++) { int w, o; if (plane == 0 && h->luma_weight_flag[list]) { @@ -265,7 +278,7 @@ static int commit_bitstream_and_slice_buffer(AVCodecContext *avctx, const unsigned mb_count = s->mb_width * s->mb_height; struct dxva_context *ctx = avctx->hwaccel_context; const Picture *current_picture = h->s.current_picture_ptr; - struct dxva2_picture_context *ctx_pic = current_picture->hwaccel_picture_private; + struct dxva2_picture_context *ctx_pic = current_picture->f.hwaccel_picture_private; DXVA_Slice_H264_Short *slice = NULL; uint8_t *dxva_data, *current, *end; unsigned dxva_size; @@ -360,7 +373,7 @@ static int start_frame(AVCodecContext *avctx, { const H264Context *h = avctx->priv_data; struct dxva_context *ctx = avctx->hwaccel_context; - struct dxva2_picture_context *ctx_pic = h->s.current_picture_ptr->hwaccel_picture_private; + struct dxva2_picture_context *ctx_pic = h->s.current_picture_ptr->f.hwaccel_picture_private; if (!ctx->decoder || !ctx->cfg || ctx->surface_count <= 0) return -1; @@ -370,7 +383,7 @@ static int start_frame(AVCodecContext *avctx, fill_picture_parameters(ctx, h, &ctx_pic->pp); /* Fill up DXVA_Qmatrix_H264 */ - fill_scaling_lists(h, &ctx_pic->qm); + fill_scaling_lists(ctx, h, &ctx_pic->qm); ctx_pic->slice_count = 0; ctx_pic->bitstream_size = 0; @@ -384,7 +397,7 @@ static int decode_slice(AVCodecContext *avctx, const H264Context *h = avctx->priv_data; struct dxva_context *ctx = avctx->hwaccel_context; const Picture *current_picture = h->s.current_picture_ptr; - struct dxva2_picture_context *ctx_pic = current_picture->hwaccel_picture_private; + struct dxva2_picture_context *ctx_pic = current_picture->f.hwaccel_picture_private; unsigned position; if (ctx_pic->slice_count >= MAX_SLICES) @@ -413,7 +426,7 @@ static int end_frame(AVCodecContext *avctx) H264Context *h = avctx->priv_data; MpegEncContext *s = &h->s; struct dxva2_picture_context *ctx_pic = - h->s.current_picture_ptr->hwaccel_picture_private; + h->s.current_picture_ptr->f.hwaccel_picture_private; if (ctx_pic->slice_count <= 0 || ctx_pic->bitstream_size <= 0) return -1; diff --git a/libavcodec/dxva2_mpeg2.c b/libavcodec/dxva2_mpeg2.c index 62e6ec1cfa..02065744ce 100644 --- a/libavcodec/dxva2_mpeg2.c +++ b/libavcodec/dxva2_mpeg2.c @@ -151,7 +151,7 @@ static int commit_bitstream_and_slice_buffer(AVCodecContext *avctx, const struct MpegEncContext *s = avctx->priv_data; struct dxva_context *ctx = avctx->hwaccel_context; struct dxva2_picture_context *ctx_pic = - s->current_picture_ptr->hwaccel_picture_private; + s->current_picture_ptr->f.hwaccel_picture_private; const int is_field = s->picture_structure != PICT_FRAME; const unsigned mb_count = s->mb_width * (s->mb_height >> is_field); uint8_t *dxva_data, *current, *end; @@ -210,7 +210,7 @@ static int start_frame(AVCodecContext *avctx, const struct MpegEncContext *s = avctx->priv_data; struct dxva_context *ctx = avctx->hwaccel_context; struct dxva2_picture_context *ctx_pic = - s->current_picture_ptr->hwaccel_picture_private; + s->current_picture_ptr->f.hwaccel_picture_private; if (!ctx->decoder || !ctx->cfg || ctx->surface_count <= 0) return -1; @@ -230,7 +230,7 @@ static int decode_slice(AVCodecContext *avctx, { const struct MpegEncContext *s = avctx->priv_data; struct dxva2_picture_context *ctx_pic = - s->current_picture_ptr->hwaccel_picture_private; + s->current_picture_ptr->f.hwaccel_picture_private; unsigned position; if (ctx_pic->slice_count >= MAX_SLICES) @@ -250,7 +250,7 @@ static int end_frame(AVCodecContext *avctx) { struct MpegEncContext *s = avctx->priv_data; struct dxva2_picture_context *ctx_pic = - s->current_picture_ptr->hwaccel_picture_private; + s->current_picture_ptr->f.hwaccel_picture_private; if (ctx_pic->slice_count <= 0 || ctx_pic->bitstream_size <= 0) return -1; diff --git a/libavcodec/dxva2_vc1.c b/libavcodec/dxva2_vc1.c index 5b9bb692e0..2bdd0cc6c7 100644 --- a/libavcodec/dxva2_vc1.c +++ b/libavcodec/dxva2_vc1.c @@ -161,7 +161,7 @@ static int commit_bitstream_and_slice_buffer(AVCodecContext *avctx, const VC1Context *v = avctx->priv_data; struct dxva_context *ctx = avctx->hwaccel_context; const MpegEncContext *s = &v->s; - struct dxva2_picture_context *ctx_pic = s->current_picture_ptr->hwaccel_picture_private; + struct dxva2_picture_context *ctx_pic = s->current_picture_ptr->f.hwaccel_picture_private; DXVA_SliceInfo *slice = &ctx_pic->si; @@ -213,7 +213,7 @@ static int start_frame(AVCodecContext *avctx, { const VC1Context *v = avctx->priv_data; struct dxva_context *ctx = avctx->hwaccel_context; - struct dxva2_picture_context *ctx_pic = v->s.current_picture_ptr->hwaccel_picture_private; + struct dxva2_picture_context *ctx_pic = v->s.current_picture_ptr->f.hwaccel_picture_private; if (!ctx->decoder || !ctx->cfg || ctx->surface_count <= 0) return -1; @@ -231,7 +231,7 @@ static int decode_slice(AVCodecContext *avctx, { const VC1Context *v = avctx->priv_data; const Picture *current_picture = v->s.current_picture_ptr; - struct dxva2_picture_context *ctx_pic = current_picture->hwaccel_picture_private; + struct dxva2_picture_context *ctx_pic = current_picture->f.hwaccel_picture_private; if (ctx_pic->bitstream_size > 0) return -1; @@ -252,7 +252,7 @@ static int decode_slice(AVCodecContext *avctx, static int end_frame(AVCodecContext *avctx) { VC1Context *v = avctx->priv_data; - struct dxva2_picture_context *ctx_pic = v->s.current_picture_ptr->hwaccel_picture_private; + struct dxva2_picture_context *ctx_pic = v->s.current_picture_ptr->f.hwaccel_picture_private; if (ctx_pic->bitstream_size <= 0) return -1; diff --git a/libavcodec/eac3dec_data.c b/libavcodec/eac3_data.c index 031702e9e9..b159e1682f 100644 --- a/libavcodec/eac3dec_data.c +++ b/libavcodec/eac3_data.c @@ -1,5 +1,5 @@ /* - * E-AC-3 decoder tables + * E-AC-3 tables * Copyright (c) 2007 Bartlomiej Wolowiec <bartek.wolowiec@gmail.com> * * This file is part of FFmpeg. @@ -24,7 +24,7 @@ * Tables taken directly from the E-AC-3 spec. */ -#include "eac3dec_data.h" +#include "eac3_data.h" #include "ac3.h" const uint8_t ff_eac3_bits_vs_hebap[20] = { diff --git a/libavcodec/eac3dec_data.h b/libavcodec/eac3_data.h index 133183398f..10a67f16d2 100644 --- a/libavcodec/eac3dec_data.h +++ b/libavcodec/eac3_data.h @@ -1,5 +1,5 @@ /* - * E-AC-3 decoder tables + * E-AC-3 tables * Copyright (c) 2007 Bartlomiej Wolowiec <bartek.wolowiec@gmail.com> * * This file is part of FFmpeg. @@ -19,8 +19,8 @@ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ -#ifndef AVCODEC_EAC3DEC_DATA_H -#define AVCODEC_EAC3DEC_DATA_H +#ifndef AVCODEC_EAC3_DATA_H +#define AVCODEC_EAC3_DATA_H #include <stdint.h> @@ -33,4 +33,4 @@ extern const int16_t (* const ff_eac3_mantissa_vq[8])[6]; extern const uint8_t ff_eac3_frm_expstr[32][6]; extern const float ff_eac3_spx_atten_tab[32][3]; -#endif /* AVCODEC_EAC3DEC_DATA_H */ +#endif /* AVCODEC_EAC3_DATA_H */ diff --git a/libavcodec/eac3dec.c b/libavcodec/eac3dec.c index 40f571ffc7..ad240e4341 100644 --- a/libavcodec/eac3dec.c +++ b/libavcodec/eac3dec.c @@ -51,7 +51,7 @@ #include "ac3_parser.h" #include "ac3dec.h" #include "ac3dec_data.h" -#include "eac3dec_data.h" +#include "eac3_data.h" /** gain adaptive quantization mode */ typedef enum { diff --git a/libavcodec/eac3enc.c b/libavcodec/eac3enc.c index d37acaf20b..75113b272f 100644 --- a/libavcodec/eac3enc.c +++ b/libavcodec/eac3enc.c @@ -27,12 +27,63 @@ #define CONFIG_AC3ENC_FLOAT 1 #include "ac3enc.h" #include "eac3enc.h" +#include "eac3_data.h" #define AC3ENC_TYPE AC3ENC_TYPE_EAC3 #include "ac3enc_opts_template.c" -static AVClass eac3enc_class = { "E-AC-3 Encoder", av_default_item_name, - eac3_options, LIBAVUTIL_VERSION_INT }; +static const AVClass eac3enc_class = { "E-AC-3 Encoder", av_default_item_name, + eac3_options, LIBAVUTIL_VERSION_INT }; + + +/** + * LUT for finding a matching frame exponent strategy index from a set of + * exponent strategies for a single channel across all 6 blocks. + */ +static int8_t eac3_frame_expstr_index_tab[3][4][4][4][4][4]; + + +void ff_eac3_exponent_init(void) +{ + int i; + + memset(eac3_frame_expstr_index_tab, -1, sizeof(eac3_frame_expstr_index_tab)); + for (i = 0; i < 32; i++) { + eac3_frame_expstr_index_tab[ff_eac3_frm_expstr[i][0]-1] + [ff_eac3_frm_expstr[i][1]] + [ff_eac3_frm_expstr[i][2]] + [ff_eac3_frm_expstr[i][3]] + [ff_eac3_frm_expstr[i][4]] + [ff_eac3_frm_expstr[i][5]] = i; + } +} + + +void ff_eac3_get_frame_exp_strategy(AC3EncodeContext *s) +{ + int ch; + + if (s->num_blocks < 6) { + s->use_frame_exp_strategy = 0; + return; + } + + s->use_frame_exp_strategy = 1; + for (ch = !s->cpl_on; ch <= s->fbw_channels; ch++) { + int expstr = eac3_frame_expstr_index_tab[s->exp_strategy[ch][0]-1] + [s->exp_strategy[ch][1]] + [s->exp_strategy[ch][2]] + [s->exp_strategy[ch][3]] + [s->exp_strategy[ch][4]] + [s->exp_strategy[ch][5]]; + if (expstr < 0) { + s->use_frame_exp_strategy = 0; + break; + } + s->frame_exp_strategy[ch] = expstr; + } +} + void ff_eac3_set_cpl_states(AC3EncodeContext *s) @@ -43,7 +94,7 @@ void ff_eac3_set_cpl_states(AC3EncodeContext *s) /* set first cpl coords */ for (ch = 1; ch <= s->fbw_channels; ch++) first_cpl_coords[ch] = 1; - for (blk = 0; blk < AC3_MAX_BLOCKS; blk++) { + for (blk = 0; blk < s->num_blocks; blk++) { AC3Block *block = &s->blocks[blk]; for (ch = 1; ch <= s->fbw_channels; ch++) { if (block->channel_in_cpl[ch]) { @@ -58,7 +109,7 @@ void ff_eac3_set_cpl_states(AC3EncodeContext *s) } /* set first cpl leak */ - for (blk = 0; blk < AC3_MAX_BLOCKS; blk++) { + for (blk = 0; blk < s->num_blocks; blk++) { AC3Block *block = &s->blocks[blk]; if (block->cpl_in_use) { block->new_cpl_leak = 2; @@ -84,22 +135,64 @@ void ff_eac3_output_frame_header(AC3EncodeContext *s) put_bits(&s->pb, 2, s->bit_alloc.sr_code); /* sample rate code */ } else { put_bits(&s->pb, 2, s->bit_alloc.sr_code); /* sample rate code */ - put_bits(&s->pb, 2, 0x3); /* number of blocks = 6 */ + put_bits(&s->pb, 2, s->num_blks_code); /* number of blocks */ } put_bits(&s->pb, 3, s->channel_mode); /* audio coding mode */ put_bits(&s->pb, 1, s->lfe_on); /* LFE channel indicator */ put_bits(&s->pb, 5, s->bitstream_id); /* bitstream id (EAC3=16) */ put_bits(&s->pb, 5, -opt->dialogue_level); /* dialogue normalization level */ put_bits(&s->pb, 1, 0); /* no compression gain */ - put_bits(&s->pb, 1, 0); /* no mixing metadata */ - /* TODO: mixing metadata */ - put_bits(&s->pb, 1, 0); /* no info metadata */ - /* TODO: info metadata */ + /* mixing metadata*/ + put_bits(&s->pb, 1, opt->eac3_mixing_metadata); + if (opt->eac3_mixing_metadata) { + if (s->channel_mode > AC3_CHMODE_STEREO) + put_bits(&s->pb, 2, opt->preferred_stereo_downmix); + if (s->has_center) { + put_bits(&s->pb, 3, s->ltrt_center_mix_level); + put_bits(&s->pb, 3, s->loro_center_mix_level); + } + if (s->has_surround) { + put_bits(&s->pb, 3, s->ltrt_surround_mix_level); + put_bits(&s->pb, 3, s->loro_surround_mix_level); + } + if (s->lfe_on) + put_bits(&s->pb, 1, 0); + put_bits(&s->pb, 1, 0); /* no program scale */ + put_bits(&s->pb, 1, 0); /* no ext program scale */ + put_bits(&s->pb, 2, 0); /* no mixing parameters */ + if (s->channel_mode < AC3_CHMODE_STEREO) + put_bits(&s->pb, 1, 0); /* no pan info */ + put_bits(&s->pb, 1, 0); /* no frame mix config info */ + } + /* info metadata*/ + put_bits(&s->pb, 1, opt->eac3_info_metadata); + if (opt->eac3_info_metadata) { + put_bits(&s->pb, 3, s->bitstream_mode); + put_bits(&s->pb, 1, opt->copyright); + put_bits(&s->pb, 1, opt->original); + if (s->channel_mode == AC3_CHMODE_STEREO) { + put_bits(&s->pb, 2, opt->dolby_surround_mode); + put_bits(&s->pb, 2, opt->dolby_headphone_mode); + } + if (s->channel_mode >= AC3_CHMODE_2F2R) + put_bits(&s->pb, 2, opt->dolby_surround_ex_mode); + put_bits(&s->pb, 1, opt->audio_production_info); + if (opt->audio_production_info) { + put_bits(&s->pb, 5, opt->mixing_level - 80); + put_bits(&s->pb, 2, opt->room_type); + put_bits(&s->pb, 1, opt->ad_converter_type); + } + put_bits(&s->pb, 1, 0); + } + if (s->num_blocks != 6) + put_bits(&s->pb, 1, !(s->avctx->frame_number % 6)); /* converter sync flag */ put_bits(&s->pb, 1, 0); /* no additional bit stream info */ /* frame header */ - put_bits(&s->pb, 1, 1); /* exponent strategy syntax = each block */ + if (s->num_blocks == 6) { + put_bits(&s->pb, 1, !s->use_frame_exp_strategy);/* exponent strategy syntax */ put_bits(&s->pb, 1, 0); /* aht enabled = no */ + } put_bits(&s->pb, 2, 0); /* snr offset strategy = 1 */ put_bits(&s->pb, 1, 0); /* transient pre-noise processing enabled = no */ put_bits(&s->pb, 1, 0); /* block switch syntax enabled = no */ @@ -112,7 +205,7 @@ void ff_eac3_output_frame_header(AC3EncodeContext *s) /* coupling strategy use flags */ if (s->channel_mode > AC3_CHMODE_MONO) { put_bits(&s->pb, 1, s->blocks[0].cpl_in_use); - for (blk = 1; blk < AC3_MAX_BLOCKS; blk++) { + for (blk = 1; blk < s->num_blocks; blk++) { AC3Block *block = &s->blocks[blk]; put_bits(&s->pb, 1, block->new_cpl_strategy); if (block->new_cpl_strategy) @@ -120,21 +213,35 @@ void ff_eac3_output_frame_header(AC3EncodeContext *s) } } /* exponent strategy */ - for (blk = 0; blk < AC3_MAX_BLOCKS; blk++) - for (ch = !s->blocks[blk].cpl_in_use; ch <= s->fbw_channels; ch++) - put_bits(&s->pb, 2, s->exp_strategy[ch][blk]); + if (s->use_frame_exp_strategy) { + for (ch = !s->cpl_on; ch <= s->fbw_channels; ch++) + put_bits(&s->pb, 5, s->frame_exp_strategy[ch]); + } else { + for (blk = 0; blk < s->num_blocks; blk++) + for (ch = !s->blocks[blk].cpl_in_use; ch <= s->fbw_channels; ch++) + put_bits(&s->pb, 2, s->exp_strategy[ch][blk]); + } if (s->lfe_on) { - for (blk = 0; blk < AC3_MAX_BLOCKS; blk++) + for (blk = 0; blk < s->num_blocks; blk++) put_bits(&s->pb, 1, s->exp_strategy[s->lfe_channel][blk]); } - /* E-AC-3 to AC-3 converter exponent strategy (unfortunately not optional...) */ - for (ch = 1; ch <= s->fbw_channels; ch++) - put_bits(&s->pb, 5, 0); + /* E-AC-3 to AC-3 converter exponent strategy (not optional when num blocks == 6) */ + if (s->num_blocks != 6) { + put_bits(&s->pb, 1, 0); + } else { + for (ch = 1; ch <= s->fbw_channels; ch++) { + if (s->use_frame_exp_strategy) + put_bits(&s->pb, 5, s->frame_exp_strategy[ch]); + else + put_bits(&s->pb, 5, 0); + } + } /* snr offsets */ put_bits(&s->pb, 6, s->coarse_snr_offset); put_bits(&s->pb, 4, s->fine_snr_offset[1]); /* block start info */ - put_bits(&s->pb, 1, 0); + if (s->num_blocks > 1) + put_bits(&s->pb, 1, 0); } @@ -145,7 +252,7 @@ AVCodec ff_eac3_encoder = { .id = CODEC_ID_EAC3, .priv_data_size = sizeof(AC3EncodeContext), .init = ff_ac3_encode_init, - .encode = ff_ac3_encode_frame, + .encode = ff_ac3_float_encode_frame, .close = ff_ac3_encode_close, .sample_fmts = (const enum AVSampleFormat[]){AV_SAMPLE_FMT_FLT,AV_SAMPLE_FMT_NONE}, .long_name = NULL_IF_CONFIG_SMALL("ATSC A/52 E-AC-3"), diff --git a/libavcodec/eac3enc.h b/libavcodec/eac3enc.h index eacb8cf164..a92a24c2fa 100644 --- a/libavcodec/eac3enc.h +++ b/libavcodec/eac3enc.h @@ -30,6 +30,16 @@ #include "ac3enc.h" /** + * Initialize E-AC-3 exponent tables. + */ +void ff_eac3_exponent_init(void); + +/** + * Determine frame exponent strategy use and indices. + */ +void ff_eac3_get_frame_exp_strategy(AC3EncodeContext *s); + +/** * Set coupling states. * This determines whether certain flags must be written to the bitstream or * whether they will be implicitly already known by the decoder. diff --git a/libavcodec/eacmv.c b/libavcodec/eacmv.c index 408d948812..76de9c9810 100644 --- a/libavcodec/eacmv.c +++ b/libavcodec/eacmv.c @@ -210,14 +210,13 @@ static av_cold int cmv_decode_end(AVCodecContext *avctx){ } AVCodec ff_eacmv_decoder = { - "eacmv", - AVMEDIA_TYPE_VIDEO, - CODEC_ID_CMV, - sizeof(CmvContext), - cmv_decode_init, - NULL, - cmv_decode_end, - cmv_decode_frame, - CODEC_CAP_DR1, + .name = "eacmv", + .type = AVMEDIA_TYPE_VIDEO, + .id = CODEC_ID_CMV, + .priv_data_size = sizeof(CmvContext), + .init = cmv_decode_init, + .close = cmv_decode_end, + .decode = cmv_decode_frame, + .capabilities = CODEC_CAP_DR1, .long_name = NULL_IF_CONFIG_SMALL("Electronic Arts CMV video"), }; diff --git a/libavcodec/eamad.c b/libavcodec/eamad.c index d4881ab843..74fb4c121b 100644 --- a/libavcodec/eamad.c +++ b/libavcodec/eamad.c @@ -307,14 +307,13 @@ static av_cold int decode_end(AVCodecContext *avctx) } AVCodec ff_eamad_decoder = { - "eamad", - AVMEDIA_TYPE_VIDEO, - CODEC_ID_MAD, - sizeof(MadContext), - decode_init, - NULL, - decode_end, - decode_frame, - CODEC_CAP_DR1, + .name = "eamad", + .type = AVMEDIA_TYPE_VIDEO, + .id = CODEC_ID_MAD, + .priv_data_size = sizeof(MadContext), + .init = decode_init, + .close = decode_end, + .decode = decode_frame, + .capabilities = CODEC_CAP_DR1, .long_name = NULL_IF_CONFIG_SMALL("Electronic Arts Madcow Video") }; diff --git a/libavcodec/eatgq.c b/libavcodec/eatgq.c index a353580a15..95692a471b 100644 --- a/libavcodec/eatgq.c +++ b/libavcodec/eatgq.c @@ -244,14 +244,13 @@ static av_cold int tgq_decode_end(AVCodecContext *avctx){ } AVCodec ff_eatgq_decoder = { - "eatgq", - AVMEDIA_TYPE_VIDEO, - CODEC_ID_TGQ, - sizeof(TgqContext), - tgq_decode_init, - NULL, - tgq_decode_end, - tgq_decode_frame, - CODEC_CAP_DR1, + .name = "eatgq", + .type = AVMEDIA_TYPE_VIDEO, + .id = CODEC_ID_TGQ, + .priv_data_size = sizeof(TgqContext), + .init = tgq_decode_init, + .close = tgq_decode_end, + .decode = tgq_decode_frame, + .capabilities = CODEC_CAP_DR1, .long_name = NULL_IF_CONFIG_SMALL("Electronic Arts TGQ video"), }; diff --git a/libavcodec/eatgv.c b/libavcodec/eatgv.c index 0855f10417..991c5d12b8 100644 --- a/libavcodec/eatgv.c +++ b/libavcodec/eatgv.c @@ -337,13 +337,12 @@ static av_cold int tgv_decode_end(AVCodecContext *avctx) } AVCodec ff_eatgv_decoder = { - "eatgv", - AVMEDIA_TYPE_VIDEO, - CODEC_ID_TGV, - sizeof(TgvContext), - tgv_decode_init, - NULL, - tgv_decode_end, - tgv_decode_frame, + .name = "eatgv", + .type = AVMEDIA_TYPE_VIDEO, + .id = CODEC_ID_TGV, + .priv_data_size = sizeof(TgvContext), + .init = tgv_decode_init, + .close = tgv_decode_end, + .decode = tgv_decode_frame, .long_name = NULL_IF_CONFIG_SMALL("Electronic Arts TGV video"), }; diff --git a/libavcodec/eatqi.c b/libavcodec/eatqi.c index 44792f0483..a1ad8147dc 100644 --- a/libavcodec/eatqi.c +++ b/libavcodec/eatqi.c @@ -22,10 +22,8 @@ /** * @file * Electronic Arts TQI Video Decoder - * by Peter Ross <pross@xvid.org> - * - * Technical details here: - * http://wiki.multimedia.cx/index.php?title=Electronic_Arts_TQI + * @author Peter Ross <pross@xvid.org> + * @see http://wiki.multimedia.cx/index.php?title=Electronic_Arts_TQI */ #include "avcodec.h" @@ -155,14 +153,13 @@ static av_cold int tqi_decode_end(AVCodecContext *avctx) } AVCodec ff_eatqi_decoder = { - "eatqi", - AVMEDIA_TYPE_VIDEO, - CODEC_ID_TQI, - sizeof(TqiContext), - tqi_decode_init, - NULL, - tqi_decode_end, - tqi_decode_frame, - CODEC_CAP_DR1, + .name = "eatqi", + .type = AVMEDIA_TYPE_VIDEO, + .id = CODEC_ID_TQI, + .priv_data_size = sizeof(TqiContext), + .init = tqi_decode_init, + .close = tqi_decode_end, + .decode = tqi_decode_frame, + .capabilities = CODEC_CAP_DR1, .long_name = NULL_IF_CONFIG_SMALL("Electronic Arts TQI Video"), }; diff --git a/libavcodec/error_resilience.c b/libavcodec/error_resilience.c index d6ac81f009..3fb95ead88 100644 --- a/libavcodec/error_resilience.c +++ b/libavcodec/error_resilience.c @@ -41,9 +41,9 @@ #undef mb_intra static void decode_mb(MpegEncContext *s, int ref){ - s->dest[0] = s->current_picture.data[0] + (s->mb_y * 16* s->linesize ) + s->mb_x * 16; - s->dest[1] = s->current_picture.data[1] + (s->mb_y * (16>>s->chroma_y_shift) * s->uvlinesize) + s->mb_x * (16>>s->chroma_x_shift); - s->dest[2] = s->current_picture.data[2] + (s->mb_y * (16>>s->chroma_y_shift) * s->uvlinesize) + s->mb_x * (16>>s->chroma_x_shift); + s->dest[0] = s->current_picture.f.data[0] + (s->mb_y * 16 * s->linesize) + s->mb_x * 16; + s->dest[1] = s->current_picture.f.data[1] + (s->mb_y * (16 >> s->chroma_y_shift) * s->uvlinesize) + s->mb_x * (16 >> s->chroma_x_shift); + s->dest[2] = s->current_picture.f.data[2] + (s->mb_y * (16 >> s->chroma_y_shift) * s->uvlinesize) + s->mb_x * (16 >> s->chroma_x_shift); if(CONFIG_H264_DECODER && s->codec_id == CODEC_ID_H264){ H264Context *h= (void*)s; @@ -52,7 +52,7 @@ static void decode_mb(MpegEncContext *s, int ref){ assert(ref>=0); if(ref >= h->ref_count[0]) //FIXME it is posible albeit uncommon that slice references differ between slices, we take the easy approuch and ignore it for now. If this turns out to have any relevance in practice then correct remapping should be added ref=0; - fill_rectangle(&s->current_picture.ref_index[0][4*h->mb_xy], 2, 2, 2, ref, 1); + fill_rectangle(&s->current_picture.f.ref_index[0][4*h->mb_xy], 2, 2, 2, ref, 1); fill_rectangle(&h->ref_cache[0][scan8[0]], 4, 4, 8, ref, 1); fill_rectangle(h->mv_cache[0][ scan8[0] ], 4, 4, 8, pack16to32(s->mv[0][0][0],s->mv[0][0][1]), 4); assert(!FRAME_MBAFF); @@ -166,14 +166,14 @@ static void guess_dc(MpegEncContext *s, int16_t *dc, int w, int h, int stride, i error= s->error_status_table[mb_index]; - if(IS_INTER(s->current_picture.mb_type[mb_index])) continue; //inter + if(IS_INTER(s->current_picture.f.mb_type[mb_index])) continue; //inter if(!(error&DC_ERROR)) continue; //dc-ok /* right block */ for(j=b_x+1; j<w; j++){ int mb_index_j= (j>>is_luma) + (b_y>>is_luma)*s->mb_stride; int error_j= s->error_status_table[mb_index_j]; - int intra_j= IS_INTRA(s->current_picture.mb_type[mb_index_j]); + int intra_j = IS_INTRA(s->current_picture.f.mb_type[mb_index_j]); if(intra_j==0 || !(error_j&DC_ERROR)){ color[0]= dc[j + b_y*stride]; distance[0]= j-b_x; @@ -185,7 +185,7 @@ static void guess_dc(MpegEncContext *s, int16_t *dc, int w, int h, int stride, i for(j=b_x-1; j>=0; j--){ int mb_index_j= (j>>is_luma) + (b_y>>is_luma)*s->mb_stride; int error_j= s->error_status_table[mb_index_j]; - int intra_j= IS_INTRA(s->current_picture.mb_type[mb_index_j]); + int intra_j = IS_INTRA(s->current_picture.f.mb_type[mb_index_j]); if(intra_j==0 || !(error_j&DC_ERROR)){ color[1]= dc[j + b_y*stride]; distance[1]= b_x-j; @@ -197,7 +197,7 @@ static void guess_dc(MpegEncContext *s, int16_t *dc, int w, int h, int stride, i for(j=b_y+1; j<h; j++){ int mb_index_j= (b_x>>is_luma) + (j>>is_luma)*s->mb_stride; int error_j= s->error_status_table[mb_index_j]; - int intra_j= IS_INTRA(s->current_picture.mb_type[mb_index_j]); + int intra_j = IS_INTRA(s->current_picture.f.mb_type[mb_index_j]); if(intra_j==0 || !(error_j&DC_ERROR)){ color[2]= dc[b_x + j*stride]; distance[2]= j-b_y; @@ -209,7 +209,7 @@ static void guess_dc(MpegEncContext *s, int16_t *dc, int w, int h, int stride, i for(j=b_y-1; j>=0; j--){ int mb_index_j= (b_x>>is_luma) + (j>>is_luma)*s->mb_stride; int error_j= s->error_status_table[mb_index_j]; - int intra_j= IS_INTRA(s->current_picture.mb_type[mb_index_j]); + int intra_j = IS_INTRA(s->current_picture.f.mb_type[mb_index_j]); if(intra_j==0 || !(error_j&DC_ERROR)){ color[3]= dc[b_x + j*stride]; distance[3]= b_y-j; @@ -248,13 +248,13 @@ static void h_block_filter(MpegEncContext *s, uint8_t *dst, int w, int h, int st int y; int left_status = s->error_status_table[( b_x >>is_luma) + (b_y>>is_luma)*s->mb_stride]; int right_status= s->error_status_table[((b_x+1)>>is_luma) + (b_y>>is_luma)*s->mb_stride]; - int left_intra= IS_INTRA(s->current_picture.mb_type [( b_x >>is_luma) + (b_y>>is_luma)*s->mb_stride]); - int right_intra= IS_INTRA(s->current_picture.mb_type [((b_x+1)>>is_luma) + (b_y>>is_luma)*s->mb_stride]); + int left_intra = IS_INTRA(s->current_picture.f.mb_type[( b_x >> is_luma) + (b_y >> is_luma) * s->mb_stride]); + int right_intra = IS_INTRA(s->current_picture.f.mb_type[((b_x + 1) >> is_luma) + (b_y >> is_luma) * s->mb_stride]); int left_damage = left_status&(DC_ERROR|AC_ERROR|MV_ERROR); int right_damage= right_status&(DC_ERROR|AC_ERROR|MV_ERROR); int offset= b_x*8 + b_y*stride*8; - int16_t *left_mv= s->current_picture.motion_val[0][mvy_stride*b_y + mvx_stride* b_x ]; - int16_t *right_mv= s->current_picture.motion_val[0][mvy_stride*b_y + mvx_stride*(b_x+1)]; + int16_t *left_mv= s->current_picture.f.motion_val[0][mvy_stride*b_y + mvx_stride* b_x ]; + int16_t *right_mv= s->current_picture.f.motion_val[0][mvy_stride*b_y + mvx_stride*(b_x+1)]; if(!(left_damage||right_damage)) continue; // both undamaged @@ -311,13 +311,13 @@ static void v_block_filter(MpegEncContext *s, uint8_t *dst, int w, int h, int st int x; int top_status = s->error_status_table[(b_x>>is_luma) + ( b_y >>is_luma)*s->mb_stride]; int bottom_status= s->error_status_table[(b_x>>is_luma) + ((b_y+1)>>is_luma)*s->mb_stride]; - int top_intra= IS_INTRA(s->current_picture.mb_type [(b_x>>is_luma) + ( b_y >>is_luma)*s->mb_stride]); - int bottom_intra= IS_INTRA(s->current_picture.mb_type [(b_x>>is_luma) + ((b_y+1)>>is_luma)*s->mb_stride]); + int top_intra = IS_INTRA(s->current_picture.f.mb_type[(b_x >> is_luma) + ( b_y >> is_luma) * s->mb_stride]); + int bottom_intra = IS_INTRA(s->current_picture.f.mb_type[(b_x >> is_luma) + ((b_y + 1) >> is_luma) * s->mb_stride]); int top_damage = top_status&(DC_ERROR|AC_ERROR|MV_ERROR); int bottom_damage= bottom_status&(DC_ERROR|AC_ERROR|MV_ERROR); int offset= b_x*8 + b_y*stride*8; - int16_t *top_mv= s->current_picture.motion_val[0][mvy_stride* b_y + mvx_stride*b_x]; - int16_t *bottom_mv= s->current_picture.motion_val[0][mvy_stride*(b_y+1) + mvx_stride*b_x]; + int16_t *top_mv = s->current_picture.f.motion_val[0][mvy_stride * b_y + mvx_stride * b_x]; + int16_t *bottom_mv = s->current_picture.f.motion_val[0][mvy_stride * (b_y + 1) + mvx_stride * b_x]; if(!(top_damage||bottom_damage)) continue; // both undamaged @@ -376,19 +376,19 @@ static void guess_mv(MpegEncContext *s){ int f=0; int error= s->error_status_table[mb_xy]; - if(IS_INTRA(s->current_picture.mb_type[mb_xy])) f=MV_FROZEN; //intra //FIXME check + if(IS_INTRA(s->current_picture.f.mb_type[mb_xy])) f=MV_FROZEN; //intra //FIXME check if(!(error&MV_ERROR)) f=MV_FROZEN; //inter with undamaged MV fixed[mb_xy]= f; if(f==MV_FROZEN) num_avail++; - else if(s->last_picture.data[0] && s->last_picture.motion_val[0]){ + else if(s->last_picture.f.data[0] && s->last_picture.f.motion_val[0]){ const int mb_y= mb_xy / s->mb_stride; const int mb_x= mb_xy % s->mb_stride; const int mot_index= (mb_x + mb_y*mot_stride) * mot_step; - s->current_picture.motion_val[0][mot_index][0]= s->last_picture.motion_val[0][mot_index][0]; - s->current_picture.motion_val[0][mot_index][1]= s->last_picture.motion_val[0][mot_index][1]; - s->current_picture.ref_index[0][4*mb_xy] = s->last_picture.ref_index[0][4*mb_xy]; + s->current_picture.f.motion_val[0][mot_index][0]= s->last_picture.f.motion_val[0][mot_index][0]; + s->current_picture.f.motion_val[0][mot_index][1]= s->last_picture.f.motion_val[0][mot_index][1]; + s->current_picture.f.ref_index[0][4*mb_xy] = s->last_picture.f.ref_index[0][4*mb_xy]; } } @@ -397,10 +397,10 @@ static void guess_mv(MpegEncContext *s){ for(mb_x=0; mb_x<s->mb_width; mb_x++){ const int mb_xy= mb_x + mb_y*s->mb_stride; - if(IS_INTRA(s->current_picture.mb_type[mb_xy])) continue; + if(IS_INTRA(s->current_picture.f.mb_type[mb_xy])) continue; if(!(s->error_status_table[mb_xy]&MV_ERROR)) continue; - s->mv_dir = s->last_picture.data[0] ? MV_DIR_FORWARD : MV_DIR_BACKWARD; + s->mv_dir = s->last_picture.f.data[0] ? MV_DIR_FORWARD : MV_DIR_BACKWARD; s->mb_intra=0; s->mv_type = MV_TYPE_16X16; s->mb_skipped=0; @@ -442,8 +442,8 @@ int score_sum=0; if((mb_x^mb_y^pass)&1) continue; if(fixed[mb_xy]==MV_FROZEN) continue; - assert(!IS_INTRA(s->current_picture.mb_type[mb_xy])); - assert(s->last_picture_ptr && s->last_picture_ptr->data[0]); + assert(!IS_INTRA(s->current_picture.f.mb_type[mb_xy])); + assert(s->last_picture_ptr && s->last_picture_ptr->f.data[0]); j=0; if(mb_x>0 && fixed[mb_xy-1 ]==MV_FROZEN) j=1; @@ -462,27 +462,27 @@ int score_sum=0; none_left=0; if(mb_x>0 && fixed[mb_xy-1]){ - mv_predictor[pred_count][0]= s->current_picture.motion_val[0][mot_index - mot_step][0]; - mv_predictor[pred_count][1]= s->current_picture.motion_val[0][mot_index - mot_step][1]; - ref [pred_count] = s->current_picture.ref_index[0][4*(mb_xy-1)]; + mv_predictor[pred_count][0]= s->current_picture.f.motion_val[0][mot_index - mot_step][0]; + mv_predictor[pred_count][1]= s->current_picture.f.motion_val[0][mot_index - mot_step][1]; + ref [pred_count] = s->current_picture.f.ref_index[0][4*(mb_xy-1)]; pred_count++; } if(mb_x+1<mb_width && fixed[mb_xy+1]){ - mv_predictor[pred_count][0]= s->current_picture.motion_val[0][mot_index + mot_step][0]; - mv_predictor[pred_count][1]= s->current_picture.motion_val[0][mot_index + mot_step][1]; - ref [pred_count] = s->current_picture.ref_index[0][4*(mb_xy+1)]; + mv_predictor[pred_count][0]= s->current_picture.f.motion_val[0][mot_index + mot_step][0]; + mv_predictor[pred_count][1]= s->current_picture.f.motion_val[0][mot_index + mot_step][1]; + ref [pred_count] = s->current_picture.f.ref_index[0][4*(mb_xy+1)]; pred_count++; } if(mb_y>0 && fixed[mb_xy-mb_stride]){ - mv_predictor[pred_count][0]= s->current_picture.motion_val[0][mot_index - mot_stride*mot_step][0]; - mv_predictor[pred_count][1]= s->current_picture.motion_val[0][mot_index - mot_stride*mot_step][1]; - ref [pred_count] = s->current_picture.ref_index[0][4*(mb_xy-s->mb_stride)]; + mv_predictor[pred_count][0]= s->current_picture.f.motion_val[0][mot_index - mot_stride*mot_step][0]; + mv_predictor[pred_count][1]= s->current_picture.f.motion_val[0][mot_index - mot_stride*mot_step][1]; + ref [pred_count] = s->current_picture.f.ref_index[0][4*(mb_xy-s->mb_stride)]; pred_count++; } if(mb_y+1<mb_height && fixed[mb_xy+mb_stride]){ - mv_predictor[pred_count][0]= s->current_picture.motion_val[0][mot_index + mot_stride*mot_step][0]; - mv_predictor[pred_count][1]= s->current_picture.motion_val[0][mot_index + mot_stride*mot_step][1]; - ref [pred_count] = s->current_picture.ref_index[0][4*(mb_xy+s->mb_stride)]; + mv_predictor[pred_count][0]= s->current_picture.f.motion_val[0][mot_index + mot_stride*mot_step][0]; + mv_predictor[pred_count][1]= s->current_picture.f.motion_val[0][mot_index + mot_stride*mot_step][1]; + ref [pred_count] = s->current_picture.f.ref_index[0][4*(mb_xy+s->mb_stride)]; pred_count++; } if(pred_count==0) continue; @@ -542,16 +542,16 @@ skip_mean_and_median: ff_thread_await_progress((AVFrame *) s->last_picture_ptr, mb_y, 0); } - if (!s->last_picture.motion_val[0] || - !s->last_picture.ref_index[0]) + if (!s->last_picture.f.motion_val[0] || + !s->last_picture.f.ref_index[0]) goto skip_last_mv; - prev_x = s->last_picture.motion_val[0][mot_index][0]; - prev_y = s->last_picture.motion_val[0][mot_index][1]; - prev_ref = s->last_picture.ref_index[0][4*mb_xy]; + prev_x = s->last_picture.f.motion_val[0][mot_index][0]; + prev_y = s->last_picture.f.motion_val[0][mot_index][1]; + prev_ref = s->last_picture.f.ref_index[0][4*mb_xy]; } else { - prev_x = s->current_picture.motion_val[0][mot_index][0]; - prev_y = s->current_picture.motion_val[0][mot_index][1]; - prev_ref = s->current_picture.ref_index[0][4*mb_xy]; + prev_x = s->current_picture.f.motion_val[0][mot_index][0]; + prev_y = s->current_picture.f.motion_val[0][mot_index][1]; + prev_ref = s->current_picture.f.ref_index[0][4*mb_xy]; } /* last MV */ @@ -573,10 +573,10 @@ skip_mean_and_median: for(j=0; j<pred_count; j++){ int score=0; - uint8_t *src= s->current_picture.data[0] + mb_x*16 + mb_y*16*s->linesize; + uint8_t *src = s->current_picture.f.data[0] + mb_x*16 + mb_y*16*s->linesize; - s->current_picture.motion_val[0][mot_index][0]= s->mv[0][0][0]= mv_predictor[j][0]; - s->current_picture.motion_val[0][mot_index][1]= s->mv[0][0][1]= mv_predictor[j][1]; + s->current_picture.f.motion_val[0][mot_index][0] = s->mv[0][0][0] = mv_predictor[j][0]; + s->current_picture.f.motion_val[0][mot_index][1] = s->mv[0][0][1] = mv_predictor[j][1]; if(ref[j]<0) //predictor intra or otherwise not available continue; @@ -615,8 +615,8 @@ score_sum+= best_score; for(i=0; i<mot_step; i++) for(j=0; j<mot_step; j++){ - s->current_picture.motion_val[0][mot_index+i+j*mot_stride][0]= s->mv[0][0][0]; - s->current_picture.motion_val[0][mot_index+i+j*mot_stride][1]= s->mv[0][0][1]; + s->current_picture.f.motion_val[0][mot_index + i + j * mot_stride][0] = s->mv[0][0][0]; + s->current_picture.f.motion_val[0][mot_index + i + j * mot_stride][1] = s->mv[0][0][1]; } decode_mb(s, ref[best_pred]); @@ -648,7 +648,7 @@ score_sum+= best_score; static int is_intra_more_likely(MpegEncContext *s){ int is_intra_likely, i, j, undamaged_count, skip_amount, mb_x, mb_y; - if(!s->last_picture_ptr || !s->last_picture_ptr->data[0]) return 1; //no previous frame available -> use spatial prediction + if (!s->last_picture_ptr || !s->last_picture_ptr->f.data[0]) return 1; //no previous frame available -> use spatial prediction undamaged_count=0; for(i=0; i<s->mb_num; i++){ @@ -660,7 +660,7 @@ static int is_intra_more_likely(MpegEncContext *s){ if(s->codec_id == CODEC_ID_H264){ H264Context *h= (void*)s; - if(h->ref_count[0] <= 0 || !h->ref_list[0][0].data[0]) + if (h->ref_count[0] <= 0 || !h->ref_list[0][0].f.data[0]) return 1; } @@ -687,8 +687,8 @@ static int is_intra_more_likely(MpegEncContext *s){ if((j%skip_amount) != 0) continue; //skip a few to speed things up if(s->pict_type==AV_PICTURE_TYPE_I){ - uint8_t *mb_ptr = s->current_picture.data[0] + mb_x*16 + mb_y*16*s->linesize; - uint8_t *last_mb_ptr= s->last_picture.data [0] + mb_x*16 + mb_y*16*s->linesize; + uint8_t *mb_ptr = s->current_picture.f.data[0] + mb_x*16 + mb_y*16*s->linesize; + uint8_t *last_mb_ptr= s->last_picture.f.data [0] + mb_x*16 + mb_y*16*s->linesize; if (s->avctx->codec_id == CODEC_ID_H264) { // FIXME @@ -700,7 +700,7 @@ static int is_intra_more_likely(MpegEncContext *s){ // FIXME need await_progress() here is_intra_likely -= s->dsp.sad[0](NULL, last_mb_ptr, last_mb_ptr+s->linesize*16, s->linesize, 16); }else{ - if(IS_INTRA(s->current_picture.mb_type[mb_xy])) + if (IS_INTRA(s->current_picture.f.mb_type[mb_xy])) is_intra_likely++; else is_intra_likely--; @@ -802,15 +802,15 @@ void ff_er_frame_end(MpegEncContext *s){ s->picture_structure != PICT_FRAME || // we dont support ER of field pictures yet, though it should not crash if enabled s->error_count==3*s->mb_width*(s->avctx->skip_top + s->avctx->skip_bottom)) return; - if(s->current_picture.motion_val[0] == NULL){ + if (s->current_picture.f.motion_val[0] == NULL) { av_log(s->avctx, AV_LOG_ERROR, "Warning MVs not available\n"); for(i=0; i<2; i++){ - pic->ref_index[i]= av_mallocz(s->mb_stride * s->mb_height * 4 * sizeof(uint8_t)); + pic->f.ref_index[i] = av_mallocz(s->mb_stride * s->mb_height * 4 * sizeof(uint8_t)); pic->motion_val_base[i]= av_mallocz((size+4) * 2 * sizeof(uint16_t)); - pic->motion_val[i]= pic->motion_val_base[i]+4; + pic->f.motion_val[i] = pic->motion_val_base[i] + 4; } - pic->motion_subsample_log2= 3; + pic->f.motion_subsample_log2 = 3; s->current_picture= *s->current_picture_ptr; } @@ -965,25 +965,25 @@ void ff_er_frame_end(MpegEncContext *s){ continue; if(is_intra_likely) - s->current_picture.mb_type[mb_xy]= MB_TYPE_INTRA4x4; + s->current_picture.f.mb_type[mb_xy] = MB_TYPE_INTRA4x4; else - s->current_picture.mb_type[mb_xy]= MB_TYPE_16x16 | MB_TYPE_L0; + s->current_picture.f.mb_type[mb_xy] = MB_TYPE_16x16 | MB_TYPE_L0; } // change inter to intra blocks if no reference frames are available - if (!s->last_picture.data[0] && !s->next_picture.data[0]) + if (!s->last_picture.f.data[0] && !s->next_picture.f.data[0]) for(i=0; i<s->mb_num; i++){ const int mb_xy= s->mb_index2xy[i]; - if(!IS_INTRA(s->current_picture.mb_type[mb_xy])) - s->current_picture.mb_type[mb_xy]= MB_TYPE_INTRA4x4; + if (!IS_INTRA(s->current_picture.f.mb_type[mb_xy])) + s->current_picture.f.mb_type[mb_xy] = MB_TYPE_INTRA4x4; } /* handle inter blocks with damaged AC */ for(mb_y=0; mb_y<s->mb_height; mb_y++){ for(mb_x=0; mb_x<s->mb_width; mb_x++){ const int mb_xy= mb_x + mb_y * s->mb_stride; - const int mb_type= s->current_picture.mb_type[mb_xy]; - int dir = !s->last_picture.data[0]; + const int mb_type= s->current_picture.f.mb_type[mb_xy]; + int dir = !s->last_picture.f.data[0]; error= s->error_status_table[mb_xy]; if(IS_INTRA(mb_type)) continue; //intra @@ -998,13 +998,13 @@ void ff_er_frame_end(MpegEncContext *s){ int j; s->mv_type = MV_TYPE_8X8; for(j=0; j<4; j++){ - s->mv[0][j][0] = s->current_picture.motion_val[dir][ mb_index + (j&1) + (j>>1)*s->b8_stride ][0]; - s->mv[0][j][1] = s->current_picture.motion_val[dir][ mb_index + (j&1) + (j>>1)*s->b8_stride ][1]; + s->mv[0][j][0] = s->current_picture.f.motion_val[dir][mb_index + (j & 1) + (j >> 1) * s->b8_stride][0]; + s->mv[0][j][1] = s->current_picture.f.motion_val[dir][mb_index + (j & 1) + (j >> 1) * s->b8_stride][1]; } }else{ s->mv_type = MV_TYPE_16X16; - s->mv[0][0][0] = s->current_picture.motion_val[dir][ mb_x*2 + mb_y*2*s->b8_stride ][0]; - s->mv[0][0][1] = s->current_picture.motion_val[dir][ mb_x*2 + mb_y*2*s->b8_stride ][1]; + s->mv[0][0][0] = s->current_picture.f.motion_val[dir][ mb_x*2 + mb_y*2*s->b8_stride ][0]; + s->mv[0][0][1] = s->current_picture.f.motion_val[dir][ mb_x*2 + mb_y*2*s->b8_stride ][1]; } s->dsp.clear_blocks(s->block[0]); @@ -1021,7 +1021,7 @@ void ff_er_frame_end(MpegEncContext *s){ for(mb_x=0; mb_x<s->mb_width; mb_x++){ int xy= mb_x*2 + mb_y*2*s->b8_stride; const int mb_xy= mb_x + mb_y * s->mb_stride; - const int mb_type= s->current_picture.mb_type[mb_xy]; + const int mb_type= s->current_picture.f.mb_type[mb_xy]; error= s->error_status_table[mb_xy]; if(IS_INTRA(mb_type)) continue; @@ -1029,8 +1029,8 @@ void ff_er_frame_end(MpegEncContext *s){ if(!(error&AC_ERROR)) continue; //undamaged inter s->mv_dir = MV_DIR_FORWARD|MV_DIR_BACKWARD; - if(!s->last_picture.data[0]) s->mv_dir &= ~MV_DIR_FORWARD; - if(!s->next_picture.data[0]) s->mv_dir &= ~MV_DIR_BACKWARD; + if(!s->last_picture.f.data[0]) s->mv_dir &= ~MV_DIR_FORWARD; + if(!s->next_picture.f.data[0]) s->mv_dir &= ~MV_DIR_BACKWARD; s->mb_intra=0; s->mv_type = MV_TYPE_16X16; s->mb_skipped=0; @@ -1045,10 +1045,10 @@ void ff_er_frame_end(MpegEncContext *s){ ff_thread_await_progress((AVFrame *) s->next_picture_ptr, mb_y, 0); } - s->mv[0][0][0] = s->next_picture.motion_val[0][xy][0]*time_pb/time_pp; - s->mv[0][0][1] = s->next_picture.motion_val[0][xy][1]*time_pb/time_pp; - s->mv[1][0][0] = s->next_picture.motion_val[0][xy][0]*(time_pb - time_pp)/time_pp; - s->mv[1][0][1] = s->next_picture.motion_val[0][xy][1]*(time_pb - time_pp)/time_pp; + s->mv[0][0][0] = s->next_picture.f.motion_val[0][xy][0] * time_pb / time_pp; + s->mv[0][0][1] = s->next_picture.f.motion_val[0][xy][1] * time_pb / time_pp; + s->mv[1][0][0] = s->next_picture.f.motion_val[0][xy][0] * (time_pb - time_pp) / time_pp; + s->mv[1][0][1] = s->next_picture.f.motion_val[0][xy][1] * (time_pb - time_pp) / time_pp; }else{ s->mv[0][0][0]= 0; s->mv[0][0][1]= 0; @@ -1075,16 +1075,16 @@ void ff_er_frame_end(MpegEncContext *s){ int16_t *dc_ptr; uint8_t *dest_y, *dest_cb, *dest_cr; const int mb_xy= mb_x + mb_y * s->mb_stride; - const int mb_type= s->current_picture.mb_type[mb_xy]; + const int mb_type = s->current_picture.f.mb_type[mb_xy]; error= s->error_status_table[mb_xy]; if(IS_INTRA(mb_type) && s->partitioned_frame) continue; // if(error&MV_ERROR) continue; //inter data damaged FIXME is this good? - dest_y = s->current_picture.data[0] + mb_x*16 + mb_y*16*s->linesize; - dest_cb= s->current_picture.data[1] + mb_x*8 + mb_y*8 *s->uvlinesize; - dest_cr= s->current_picture.data[2] + mb_x*8 + mb_y*8 *s->uvlinesize; + dest_y = s->current_picture.f.data[0] + mb_x * 16 + mb_y * 16 * s->linesize; + dest_cb = s->current_picture.f.data[1] + mb_x * 8 + mb_y * 8 * s->uvlinesize; + dest_cr = s->current_picture.f.data[2] + mb_x * 8 + mb_y * 8 * s->uvlinesize; dc_ptr= &s->dc_val[0][mb_x*2 + mb_y*2*s->b8_stride]; for(n=0; n<4; n++){ @@ -1125,16 +1125,16 @@ void ff_er_frame_end(MpegEncContext *s){ for(mb_x=0; mb_x<s->mb_width; mb_x++){ uint8_t *dest_y, *dest_cb, *dest_cr; const int mb_xy= mb_x + mb_y * s->mb_stride; - const int mb_type= s->current_picture.mb_type[mb_xy]; + const int mb_type = s->current_picture.f.mb_type[mb_xy]; error= s->error_status_table[mb_xy]; if(IS_INTER(mb_type)) continue; if(!(error&AC_ERROR)) continue; //undamaged - dest_y = s->current_picture.data[0] + mb_x*16 + mb_y*16*s->linesize; - dest_cb= s->current_picture.data[1] + mb_x*8 + mb_y*8 *s->uvlinesize; - dest_cr= s->current_picture.data[2] + mb_x*8 + mb_y*8 *s->uvlinesize; + dest_y = s->current_picture.f.data[0] + mb_x * 16 + mb_y * 16 * s->linesize; + dest_cb = s->current_picture.f.data[1] + mb_x * 8 + mb_y * 8 * s->uvlinesize; + dest_cr = s->current_picture.f.data[2] + mb_x * 8 + mb_y * 8 * s->uvlinesize; put_dc(s, dest_y, dest_cb, dest_cr, mb_x, mb_y); } @@ -1143,14 +1143,14 @@ void ff_er_frame_end(MpegEncContext *s){ if(s->avctx->error_concealment&FF_EC_DEBLOCK){ /* filter horizontal block boundaries */ - h_block_filter(s, s->current_picture.data[0], s->mb_width*2, s->mb_height*2, s->linesize , 1); - h_block_filter(s, s->current_picture.data[1], s->mb_width , s->mb_height , s->uvlinesize, 0); - h_block_filter(s, s->current_picture.data[2], s->mb_width , s->mb_height , s->uvlinesize, 0); + h_block_filter(s, s->current_picture.f.data[0], s->mb_width*2, s->mb_height*2, s->linesize , 1); + h_block_filter(s, s->current_picture.f.data[1], s->mb_width , s->mb_height , s->uvlinesize, 0); + h_block_filter(s, s->current_picture.f.data[2], s->mb_width , s->mb_height , s->uvlinesize, 0); /* filter vertical block boundaries */ - v_block_filter(s, s->current_picture.data[0], s->mb_width*2, s->mb_height*2, s->linesize , 1); - v_block_filter(s, s->current_picture.data[1], s->mb_width , s->mb_height , s->uvlinesize, 0); - v_block_filter(s, s->current_picture.data[2], s->mb_width , s->mb_height , s->uvlinesize, 0); + v_block_filter(s, s->current_picture.f.data[0], s->mb_width*2, s->mb_height*2, s->linesize , 1); + v_block_filter(s, s->current_picture.f.data[1], s->mb_width , s->mb_height , s->uvlinesize, 0); + v_block_filter(s, s->current_picture.f.data[2], s->mb_width , s->mb_height , s->uvlinesize, 0); } ec_clean: diff --git a/libavcodec/escape124.c b/libavcodec/escape124.c index 12e478fe19..2c86d5f34f 100644 --- a/libavcodec/escape124.c +++ b/libavcodec/escape124.c @@ -366,15 +366,14 @@ static int escape124_decode_frame(AVCodecContext *avctx, AVCodec ff_escape124_decoder = { - "escape124", - AVMEDIA_TYPE_VIDEO, - CODEC_ID_ESCAPE124, - sizeof(Escape124Context), - escape124_decode_init, - NULL, - escape124_decode_close, - escape124_decode_frame, - CODEC_CAP_DR1, + .name = "escape124", + .type = AVMEDIA_TYPE_VIDEO, + .id = CODEC_ID_ESCAPE124, + .priv_data_size = sizeof(Escape124Context), + .init = escape124_decode_init, + .close = escape124_decode_close, + .decode = escape124_decode_frame, + .capabilities = CODEC_CAP_DR1, .long_name = NULL_IF_CONFIG_SMALL("Escape 124"), }; diff --git a/libavcodec/faxcompr.c b/libavcodec/faxcompr.c index 34aa576660..c157b984d3 100644 --- a/libavcodec/faxcompr.c +++ b/libavcodec/faxcompr.c @@ -20,8 +20,8 @@ */ /** - * CCITT Fax Group 3 and 4 decompression * @file + * CCITT Fax Group 3 and 4 decompression * @author Konstantin Shishkov */ #include "avcodec.h" diff --git a/libavcodec/faxcompr.h b/libavcodec/faxcompr.h index 62f591ceee..53d11681b2 100644 --- a/libavcodec/faxcompr.h +++ b/libavcodec/faxcompr.h @@ -20,8 +20,8 @@ */ /** - * CCITT Fax Group 3 and 4 decompression * @file + * CCITT Fax Group 3 and 4 decompression * @author Konstantin Shishkov */ #ifndef AVCODEC_FAXCOMPR_H diff --git a/libavcodec/fft-test.c b/libavcodec/fft-test.c index a676627de2..be105fe834 100644 --- a/libavcodec/fft-test.c +++ b/libavcodec/fft-test.c @@ -252,8 +252,9 @@ int main(int argc, char **argv) #if CONFIG_FFT_FLOAT RDFTContext r1, *r = &r1; DCTContext d1, *d = &d1; + int fft_size_2; #endif - int fft_nbits, fft_size, fft_size_2; + int fft_nbits, fft_size; double scale = 1.0; AVLFG prng; av_lfg_init(&prng, 1); @@ -292,7 +293,6 @@ int main(int argc, char **argv) } fft_size = 1 << fft_nbits; - fft_size_2 = fft_size >> 1; tab = av_malloc(fft_size * sizeof(FFTComplex)); tab1 = av_malloc(fft_size * sizeof(FFTComplex)); tab_ref = av_malloc(fft_size * sizeof(FFTComplex)); @@ -372,6 +372,7 @@ int main(int argc, char **argv) break; #if CONFIG_FFT_FLOAT case TRANSFORM_RDFT: + fft_size_2 = fft_size >> 1; if (do_inverse) { tab1[ 0].im = 0; tab1[fft_size_2].im = 0; diff --git a/libavcodec/fft.h b/libavcodec/fft.h index 24db7e3d24..0e19e947b1 100644 --- a/libavcodec/fft.h +++ b/libavcodec/fft.h @@ -119,7 +119,7 @@ extern COSTABLE_CONST FFTSample* const FFT_NAME(ff_cos_tabs)[17]; /** * Initialize the cosine table in ff_cos_tabs[index] - * \param index index in ff_cos_tabs array of the table to initialize + * @param index index in ff_cos_tabs array of the table to initialize */ void ff_init_ff_cos_tabs(int index); diff --git a/libavcodec/ffv1.c b/libavcodec/ffv1.c index a0a4a1d009..8d9dc72018 100644 --- a/libavcodec/ffv1.c +++ b/libavcodec/ffv1.c @@ -42,25 +42,6 @@ extern const uint8_t ff_log2_run[41]; -static const int8_t quant3[256]={ - 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, --1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1, --1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1, --1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1, --1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1, --1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1, --1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1, --1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1, --1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1, 0, -}; - static const int8_t quant5_10bit[256]={ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, @@ -98,42 +79,7 @@ static const int8_t quant5[256]={ -2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2, -2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-1,-1,-1, }; -static const int8_t quant7[256]={ - 0, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, - 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, - 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, - 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, - 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, - 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, - 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, - 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, --3,-3,-3,-3,-3,-3,-3,-3,-3,-3,-3,-3,-3,-3,-3,-3, --3,-3,-3,-3,-3,-3,-3,-3,-3,-3,-3,-3,-3,-3,-3,-3, --3,-3,-3,-3,-3,-3,-3,-3,-3,-3,-3,-3,-3,-3,-3,-3, --3,-3,-3,-3,-3,-3,-3,-3,-3,-3,-3,-3,-3,-3,-3,-3, --3,-3,-3,-3,-3,-3,-3,-3,-3,-3,-3,-3,-3,-3,-3,-3, --3,-3,-3,-3,-3,-3,-3,-3,-3,-2,-2,-2,-2,-2,-2,-2, --2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2, --2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-1,-1, -}; -static const int8_t quant9[256]={ - 0, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, - 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, - 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, - 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, - 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, - 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, - 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, - 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, --4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4, --4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4, --4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4, --4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4, --4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4, --4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4, --4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-3,-3,-3,-3, --3,-3,-3,-3,-3,-3,-3,-3,-3,-3,-2,-2,-2,-2,-1,-1, -}; + static const int8_t quant9_10bit[256]={ 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, @@ -171,24 +117,6 @@ static const int8_t quant11[256]={ -4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4, -4,-4,-4,-4,-4,-3,-3,-3,-3,-3,-3,-3,-2,-2,-2,-1, }; -static const int8_t quant13[256]={ - 0, 1, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, - 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, - 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, - 5, 5, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, - 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, - 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, - 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, - 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, --6,-6,-6,-6,-6,-6,-6,-6,-6,-6,-6,-6,-6,-6,-6,-6, --6,-6,-6,-6,-6,-6,-6,-6,-6,-6,-6,-6,-6,-6,-6,-6, --6,-6,-6,-6,-6,-6,-6,-6,-6,-6,-6,-6,-6,-6,-6,-6, --6,-6,-6,-6,-6,-6,-6,-6,-6,-6,-6,-6,-6,-6,-6,-6, --6,-6,-6,-6,-6,-6,-6,-6,-6,-6,-6,-6,-6,-6,-6,-5, --5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5, --5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5, --4,-4,-4,-4,-4,-4,-4,-4,-4,-3,-3,-3,-3,-2,-2,-1, -}; static const uint8_t ver2_state[256]= { 0, 10, 10, 10, 10, 16, 16, 16, 28, 16, 16, 29, 42, 49, 20, 49, @@ -1836,28 +1764,26 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPac } AVCodec ff_ffv1_decoder = { - "ffv1", - AVMEDIA_TYPE_VIDEO, - CODEC_ID_FFV1, - sizeof(FFV1Context), - decode_init, - NULL, - common_end, - decode_frame, - CODEC_CAP_DR1 /*| CODEC_CAP_DRAW_HORIZ_BAND*/ | CODEC_CAP_SLICE_THREADS, - NULL, + .name = "ffv1", + .type = AVMEDIA_TYPE_VIDEO, + .id = CODEC_ID_FFV1, + .priv_data_size = sizeof(FFV1Context), + .init = decode_init, + .close = common_end, + .decode = decode_frame, + .capabilities = CODEC_CAP_DR1 /*| CODEC_CAP_DRAW_HORIZ_BAND*/ | CODEC_CAP_SLICE_THREADS, .long_name= NULL_IF_CONFIG_SMALL("FFmpeg video codec #1"), }; #if CONFIG_FFV1_ENCODER AVCodec ff_ffv1_encoder = { - "ffv1", - AVMEDIA_TYPE_VIDEO, - CODEC_ID_FFV1, - sizeof(FFV1Context), - encode_init, - encode_frame, - common_end, + .name = "ffv1", + .type = AVMEDIA_TYPE_VIDEO, + .id = CODEC_ID_FFV1, + .priv_data_size = sizeof(FFV1Context), + .init = encode_init, + .encode = encode_frame, + .close = common_end, .capabilities = CODEC_CAP_SLICE_THREADS, .pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_YUV444P, PIX_FMT_YUV422P, PIX_FMT_YUV411P, PIX_FMT_YUV410P, PIX_FMT_RGB32, PIX_FMT_YUV420P16, PIX_FMT_YUV422P16, PIX_FMT_YUV444P16, PIX_FMT_YUV420P9, PIX_FMT_YUV420P10, PIX_FMT_YUV422P10, PIX_FMT_NONE}, .long_name= NULL_IF_CONFIG_SMALL("FFmpeg video codec #1"), diff --git a/libavcodec/flacdec.c b/libavcodec/flacdec.c index ece095cf09..d5662736b5 100644 --- a/libavcodec/flacdec.c +++ b/libavcodec/flacdec.c @@ -23,9 +23,7 @@ * @file * FLAC (Free Lossless Audio Codec) decoder * @author Alex Beregszaszi - * - * For more information on the FLAC format, visit: - * http://flac.sourceforge.net/ + * @see http://flac.sourceforge.net/ * * This decoder can be used in 1 of 2 ways: Either raw FLAC data can be fed * through, starting from the initial 'fLaC' signature; or by passing the @@ -654,13 +652,12 @@ static av_cold int flac_decode_close(AVCodecContext *avctx) } AVCodec ff_flac_decoder = { - "flac", - AVMEDIA_TYPE_AUDIO, - CODEC_ID_FLAC, - sizeof(FLACContext), - flac_decode_init, - NULL, - flac_decode_close, - flac_decode_frame, + .name = "flac", + .type = AVMEDIA_TYPE_AUDIO, + .id = CODEC_ID_FLAC, + .priv_data_size = sizeof(FLACContext), + .init = flac_decode_init, + .close = flac_decode_close, + .decode = flac_decode_frame, .long_name= NULL_IF_CONFIG_SMALL("FLAC (Free Lossless Audio Codec)"), }; diff --git a/libavcodec/flacenc.c b/libavcodec/flacenc.c index 838017b411..6b19ba072d 100644 --- a/libavcodec/flacenc.c +++ b/libavcodec/flacenc.c @@ -1390,14 +1390,13 @@ static const AVClass flac_encoder_class = { }; AVCodec ff_flac_encoder = { - "flac", - AVMEDIA_TYPE_AUDIO, - CODEC_ID_FLAC, - sizeof(FlacEncodeContext), - flac_encode_init, - flac_encode_frame, - flac_encode_close, - NULL, + .name = "flac", + .type = AVMEDIA_TYPE_AUDIO, + .id = CODEC_ID_FLAC, + .priv_data_size = sizeof(FlacEncodeContext), + .init = flac_encode_init, + .encode = flac_encode_frame, + .close = flac_encode_close, .capabilities = CODEC_CAP_SMALL_LAST_FRAME | CODEC_CAP_DELAY | CODEC_CAP_LOSSLESS, .sample_fmts = (const enum AVSampleFormat[]){AV_SAMPLE_FMT_S16,AV_SAMPLE_FMT_NONE}, .long_name = NULL_IF_CONFIG_SMALL("FLAC (Free Lossless Audio Codec)"), diff --git a/libavcodec/flashsv.c b/libavcodec/flashsv.c index a57e851078..6b649bb874 100644 --- a/libavcodec/flashsv.c +++ b/libavcodec/flashsv.c @@ -25,35 +25,29 @@ * Flash Screen Video decoder * @author Alex Beregszaszi * @author Benjamin Larsson - */ - -/* Bitstream description - * The picture is divided into blocks that are zlib compressed. - * - * The decoder is fed complete frames, the frameheader contains: - * 4bits of block width - * 12bits of frame width - * 4bits of block height - * 12bits of frame height + * @author Daniel Verkamp + * @author Konstantin Shishkov * - * Directly after the header are the compressed blocks. The blocks - * have their compressed size represented with 16bits in the beginnig. - * If the size = 0 then the block is unchanged from the previous frame. - * All blocks are decompressed until the buffer is consumed. - * - * Encoding ideas, a basic encoder would just use a fixed block size. - * Block sizes can be multipels of 16, from 16 to 256. The blocks don't - * have to be quadratic. A brute force search with a set of diffrent - * block sizes should give a better result then to just use a fixed size. + * A description of the bitstream format for Flash Screen Video version 1/2 + * is part of the SWF File Format Specification (version 10), which can be + * downloaded from http://www.adobe.com/devnet/swf.html. */ #include <stdio.h> #include <stdlib.h> #include <zlib.h> +#include "libavutil/intreadwrite.h" #include "avcodec.h" +#include "bytestream.h" #include "get_bits.h" +typedef struct BlockInfo { + uint8_t *pos; + int size; + int unp_size; +} BlockInfo; + typedef struct FlashSVContext { AVCodecContext *avctx; AVFrame frame; @@ -62,21 +56,50 @@ typedef struct FlashSVContext { uint8_t *tmpblock; int block_size; z_stream zstream; + int ver; + const uint32_t *pal; + int is_keyframe; + uint8_t *keyframedata; + uint8_t *keyframe; + BlockInfo *blocks; + uint8_t *deflate_block; + int deflate_block_size; + int color_depth; + int zlibprime_curr, zlibprime_prev; + int diff_start, diff_height; } FlashSVContext; -static void copy_region(uint8_t *sptr, uint8_t *dptr, - int dx, int dy, int h, int w, int stride) +static int decode_hybrid(const uint8_t *sptr, uint8_t *dptr, int dx, int dy, + int h, int w, int stride, const uint32_t *pal) { - int i; - - for (i = dx + h; i > dx; i--) { - memcpy(dptr + (i * stride) + dy * 3, sptr, w * 3); - sptr += w * 3; + int x, y; + const uint8_t *orig_src = sptr; + + for (y = dx+h; y > dx; y--) { + uint8_t *dst = dptr + (y * stride) + dy * 3; + for (x = 0; x < w; x++) { + if (*sptr & 0x80) { + /* 15-bit color */ + unsigned c = AV_RB16(sptr) & ~0x8000; + unsigned b = c & 0x1F; + unsigned g = (c >> 5) & 0x1F; + unsigned r = c >> 10; + /* 000aaabb -> aaabbaaa */ + *dst++ = (b << 3) | (b >> 2); + *dst++ = (g << 3) | (g >> 2); + *dst++ = (r << 3) | (r >> 2); + sptr += 2; + } else { + /* palette index */ + uint32_t c = pal[*sptr++]; + bytestream_put_le24(&dst, c); + } + } } + return sptr - orig_src; } - static av_cold int flashsv_decode_init(AVCodecContext *avctx) { FlashSVContext *s = avctx->priv_data; @@ -86,7 +109,7 @@ static av_cold int flashsv_decode_init(AVCodecContext *avctx) s->zstream.zalloc = Z_NULL; s->zstream.zfree = Z_NULL; s->zstream.opaque = Z_NULL; - zret = inflateInit(&(s->zstream)); + zret = inflateInit(&s->zstream); if (zret != Z_OK) { av_log(avctx, AV_LOG_ERROR, "Inflate init error: %d\n", zret); return 1; @@ -99,10 +122,114 @@ static av_cold int flashsv_decode_init(AVCodecContext *avctx) } +static void flashsv2_prime(FlashSVContext *s, uint8_t *src, + int size, int unp_size) +{ + z_stream zs; + + zs.zalloc = NULL; + zs.zfree = NULL; + zs.opaque = NULL; + + s->zstream.next_in = src; + s->zstream.avail_in = size; + s->zstream.next_out = s->tmpblock; + s->zstream.avail_out = s->block_size * 3; + inflate(&s->zstream, Z_SYNC_FLUSH); + + deflateInit(&zs, 0); + zs.next_in = s->tmpblock; + zs.avail_in = s->block_size * 3 - s->zstream.avail_out; + zs.next_out = s->deflate_block; + zs.avail_out = s->deflate_block_size; + deflate(&zs, Z_SYNC_FLUSH); + deflateEnd(&zs); + + inflateReset(&s->zstream); + + s->zstream.next_in = s->deflate_block; + s->zstream.avail_in = s->deflate_block_size - zs.avail_out; + s->zstream.next_out = s->tmpblock; + s->zstream.avail_out = s->block_size * 3; + inflate(&s->zstream, Z_SYNC_FLUSH); +} + +static int flashsv_decode_block(AVCodecContext *avctx, AVPacket *avpkt, + GetBitContext *gb, int block_size, + int width, int height, int x_pos, int y_pos, + int blk_idx) +{ + struct FlashSVContext *s = avctx->priv_data; + uint8_t *line = s->tmpblock; + int k; + int ret = inflateReset(&s->zstream); + if (ret != Z_OK) { + //return -1; + } + if (s->zlibprime_curr || s->zlibprime_prev) { + flashsv2_prime(s, s->blocks[blk_idx].pos, s->blocks[blk_idx].size, + s->blocks[blk_idx].unp_size); + } + s->zstream.next_in = avpkt->data + get_bits_count(gb) / 8; + s->zstream.avail_in = block_size; + s->zstream.next_out = s->tmpblock; + s->zstream.avail_out = s->block_size * 3; + ret = inflate(&s->zstream, Z_FINISH); + if (ret == Z_DATA_ERROR) { + av_log(avctx, AV_LOG_ERROR, "Zlib resync occurred\n"); + inflateSync(&s->zstream); + ret = inflate(&s->zstream, Z_FINISH); + } + + if (ret != Z_OK && ret != Z_STREAM_END) { + //return -1; + } + + if (s->is_keyframe) { + s->blocks[blk_idx].pos = s->keyframedata + (get_bits_count(gb) / 8); + s->blocks[blk_idx].size = block_size; + s->blocks[blk_idx].unp_size = s->block_size * 3 - s->zstream.avail_out; + } + if (!s->color_depth) { + /* Flash Screen Video stores the image upside down, so copy + * lines to destination in reverse order. */ + for (k = 1; k <= s->diff_height; k++) { + memcpy(s->frame.data[0] + x_pos * 3 + + (s->image_height - y_pos - s->diff_start - k) * s->frame.linesize[0], + line, width * 3); + /* advance source pointer to next line */ + line += width * 3; + } + } else { + /* hybrid 15-bit/palette mode */ + decode_hybrid(s->tmpblock, s->frame.data[0], + s->image_height - (y_pos + 1 + s->diff_start + s->diff_height), + x_pos, s->diff_height, width, + s->frame.linesize[0], s->pal); + } + skip_bits_long(gb, 8 * block_size); /* skip the consumed bits */ + return 0; +} + +static int calc_deflate_block_size(int tmpblock_size) +{ + z_stream zstream; + int size; + + zstream.zalloc = Z_NULL; + zstream.zfree = Z_NULL; + zstream.opaque = Z_NULL; + if (deflateInit(&zstream, 0) != Z_OK) + return -1; + size = deflateBound(&zstream, tmpblock_size); + deflateEnd(&zstream); + + return size; +} + static int flashsv_decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPacket *avpkt) { - const uint8_t *buf = avpkt->data; int buf_size = avpkt->size; FlashSVContext *s = avctx->priv_data; int h_blocks, v_blocks, h_part, v_part, i, j; @@ -114,7 +241,7 @@ static int flashsv_decode_frame(AVCodecContext *avctx, void *data, if (buf_size < 4) return -1; - init_get_bits(&gb, buf, buf_size * 8); + init_get_bits(&gb, avpkt->data, buf_size * 8); /* start to parse the bitstream */ s->block_width = 16 * (get_bits(&gb, 4) + 1); @@ -122,7 +249,19 @@ static int flashsv_decode_frame(AVCodecContext *avctx, void *data, s->block_height = 16 * (get_bits(&gb, 4) + 1); s->image_height = get_bits(&gb, 12); - /* calculate amount of blocks and the size of the border blocks */ + if (s->ver == 2) { + skip_bits(&gb, 6); + if (get_bits1(&gb)) { + av_log_missing_feature(avctx, "iframe", 1); + return AVERROR_PATCHWELCOME; + } + if (get_bits1(&gb)) { + av_log_missing_feature(avctx, "custom palette", 1); + return AVERROR_PATCHWELCOME; + } + } + + /* calculate number of blocks and size of border (partial) blocks */ h_blocks = s->image_width / s->block_width; h_part = s->image_width % s->block_width; v_blocks = s->image_height / s->block_height; @@ -131,34 +270,61 @@ static int flashsv_decode_frame(AVCodecContext *avctx, void *data, /* the block size could change between frames, make sure the buffer * is large enough, if not, get a larger one */ if (s->block_size < s->block_width * s->block_height) { - av_free(s->tmpblock); - if ((s->tmpblock = av_malloc(3 * s->block_width * s->block_height)) == NULL) { + int tmpblock_size = 3 * s->block_width * s->block_height; + + s->tmpblock = av_realloc(s->tmpblock, tmpblock_size); + if (!s->tmpblock) { av_log(avctx, AV_LOG_ERROR, "Can't allocate decompression buffer.\n"); return AVERROR(ENOMEM); } + if (s->ver == 2) { + s->deflate_block_size = calc_deflate_block_size(tmpblock_size); + if (s->deflate_block_size <= 0) { + av_log(avctx, AV_LOG_ERROR, "Can't determine deflate buffer size.\n"); + return -1; + } + s->deflate_block = av_realloc(s->deflate_block, s->deflate_block_size); + if (!s->deflate_block) { + av_log(avctx, AV_LOG_ERROR, "Can't allocate deflate buffer.\n"); + return AVERROR(ENOMEM); + } + } } s->block_size = s->block_width * s->block_height; - /* init the image size once */ - if ((avctx->width == 0) && (avctx->height == 0)) { + /* initialize the image size once */ + if (avctx->width == 0 && avctx->height == 0) { avctx->width = s->image_width; avctx->height = s->image_height; } /* check for changes of image width and image height */ - if ((avctx->width != s->image_width) || (avctx->height != s->image_height)) { - av_log(avctx, AV_LOG_ERROR, "Frame width or height differs from first frames!\n"); - av_log(avctx, AV_LOG_ERROR, "fh = %d, fv %d vs ch = %d, cv = %d\n", avctx->height, - avctx->width, s->image_height, s->image_width); - return -1; + if (avctx->width != s->image_width || avctx->height != s->image_height) { + av_log(avctx, AV_LOG_ERROR, + "Frame width or height differs from first frames!\n"); + av_log(avctx, AV_LOG_ERROR, "fh = %d, fv %d vs ch = %d, cv = %d\n", + avctx->height, avctx->width, s->image_height, s->image_width); + return AVERROR_INVALIDDATA; } - av_log(avctx, AV_LOG_DEBUG, "image: %dx%d block: %dx%d num: %dx%d part: %dx%d\n", - s->image_width, s->image_height, s->block_width, s->block_height, - h_blocks, v_blocks, h_part, v_part); + /* we care for keyframes only in Screen Video v2 */ + s->is_keyframe = (avpkt->flags & AV_PKT_FLAG_KEY) && (s->ver == 2); + if (s->is_keyframe) { + s->keyframedata = av_realloc(s->keyframedata, avpkt->size); + memcpy(s->keyframedata, avpkt->data, avpkt->size); + s->blocks = av_realloc(s->blocks, + (v_blocks + !!v_part) * (h_blocks + !!h_part) + * sizeof(s->blocks[0])); + } - s->frame.reference = 1; - s->frame.buffer_hints = FF_BUFFER_HINTS_VALID | FF_BUFFER_HINTS_PRESERVE | FF_BUFFER_HINTS_REUSABLE; + av_dlog(avctx, "image: %dx%d block: %dx%d num: %dx%d part: %dx%d\n", + s->image_width, s->image_height, s->block_width, s->block_height, + h_blocks, v_blocks, h_part, v_part); + + s->frame.reference = 3; + s->frame.buffer_hints = FF_BUFFER_HINTS_VALID | + FF_BUFFER_HINTS_PRESERVE | + FF_BUFFER_HINTS_REUSABLE; if (avctx->reget_buffer(avctx, &s->frame) < 0) { av_log(avctx, AV_LOG_ERROR, "reget_buffer() failed\n"); return -1; @@ -167,53 +333,97 @@ static int flashsv_decode_frame(AVCodecContext *avctx, void *data, /* loop over all block columns */ for (j = 0; j < v_blocks + (v_part ? 1 : 0); j++) { - int hp = j * s->block_height; // horiz position in frame - int hs = (j < v_blocks) ? s->block_height : v_part; // size of block - + int y_pos = j * s->block_height; // vertical position in frame + int cur_blk_height = (j < v_blocks) ? s->block_height : v_part; /* loop over all block rows */ for (i = 0; i < h_blocks + (h_part ? 1 : 0); i++) { - int wp = i * s->block_width; // vert position in frame - int ws = (i < h_blocks) ? s->block_width : h_part; // size of block + int x_pos = i * s->block_width; // horizontal position in frame + int cur_blk_width = (i < h_blocks) ? s->block_width : h_part; + int has_diff = 0; /* get the size of the compressed zlib chunk */ int size = get_bits(&gb, 16); + + s->color_depth = 0; + s->zlibprime_curr = 0; + s->zlibprime_prev = 0; + s->diff_start = 0; + s->diff_height = cur_blk_height; + if (8 * size > get_bits_left(&gb)) { avctx->release_buffer(avctx, &s->frame); s->frame.data[0] = NULL; - return -1; + return AVERROR_INVALIDDATA; } - if (size == 0) { - /* no change, don't do anything */ - } else { - /* decompress block */ - int ret = inflateReset(&(s->zstream)); - if (ret != Z_OK) { - av_log(avctx, AV_LOG_ERROR, "error in decompression (reset) of block %dx%d\n", i, j); - /* return -1; */ + if (s->ver == 2 && size) { + skip_bits(&gb, 3); + s->color_depth = get_bits(&gb, 2); + has_diff = get_bits1(&gb); + s->zlibprime_curr = get_bits1(&gb); + s->zlibprime_prev = get_bits1(&gb); + + if (s->color_depth != 0 && s->color_depth != 2) { + av_log(avctx, AV_LOG_ERROR, + "%dx%d invalid color depth %d\n", i, j, s->color_depth); + return -1; } - s->zstream.next_in = buf + (get_bits_count(&gb) / 8); - s->zstream.avail_in = size; - s->zstream.next_out = s->tmpblock; - s->zstream.avail_out = s->block_size * 3; - ret = inflate(&(s->zstream), Z_FINISH); - if (ret == Z_DATA_ERROR) { - av_log(avctx, AV_LOG_ERROR, "Zlib resync occurred\n"); - inflateSync(&(s->zstream)); - ret = inflate(&(s->zstream), Z_FINISH); + + if (has_diff) { + s->diff_start = get_bits(&gb, 8); + s->diff_height = get_bits(&gb, 8); + av_log(avctx, AV_LOG_DEBUG, + "%dx%d diff start %d height %d\n", + i, j, s->diff_start, s->diff_height); + size -= 2; } - if ((ret != Z_OK) && (ret != Z_STREAM_END)) { - av_log(avctx, AV_LOG_ERROR, "error in decompression of block %dx%d: %d\n", i, j, ret); - /* return -1; */ + if (s->zlibprime_prev) + av_log(avctx, AV_LOG_DEBUG, "%dx%d zlibprime_prev\n", i, j); + + if (s->zlibprime_curr) { + int col = get_bits(&gb, 8); + int row = get_bits(&gb, 8); + av_log(avctx, AV_LOG_DEBUG, "%dx%d zlibprime_curr %dx%d\n", i, j, col, row); + size -= 2; + av_log_missing_feature(avctx, "zlibprime_curr", 1); + return AVERROR_PATCHWELCOME; } - copy_region(s->tmpblock, s->frame.data[0], s->image_height - (hp + hs + 1), - wp, hs, ws, s->frame.linesize[0]); - skip_bits_long(&gb, 8 * size); /* skip the consumed bits */ + size--; // account for flags byte + } + + if (has_diff) { + int k; + int off = (s->image_height - y_pos - 1) * s->frame.linesize[0]; + + for (k = 0; k < cur_blk_height; k++) + memcpy(s->frame.data[0] + off - k*s->frame.linesize[0] + x_pos*3, + s->keyframe + off - k*s->frame.linesize[0] + x_pos*3, + cur_blk_width * 3); + } + + /* skip unchanged blocks, which have size 0 */ + if (size) { + if (flashsv_decode_block(avctx, avpkt, &gb, size, + cur_blk_width, cur_blk_height, + x_pos, y_pos, + i + j * (h_blocks + !!h_part))) + av_log(avctx, AV_LOG_ERROR, + "error in decompression of block %dx%d\n", i, j); } } } + if (s->is_keyframe && s->ver == 2) { + if (!s->keyframe) { + s->keyframe = av_malloc(s->frame.linesize[0] * avctx->height); + if (!s->keyframe) { + av_log(avctx, AV_LOG_ERROR, "Cannot allocate image data\n"); + return AVERROR(ENOMEM); + } + } + memcpy(s->keyframe, s->frame.data[0], s->frame.linesize[0] * avctx->height); + } *data_size = sizeof(AVFrame); *(AVFrame*)data = s->frame; @@ -230,7 +440,7 @@ static int flashsv_decode_frame(AVCodecContext *avctx, void *data, static av_cold int flashsv_decode_end(AVCodecContext *avctx) { FlashSVContext *s = avctx->priv_data; - inflateEnd(&(s->zstream)); + inflateEnd(&s->zstream); /* release the frame if needed */ if (s->frame.data[0]) avctx->release_buffer(avctx, &s->frame); @@ -242,6 +452,7 @@ static av_cold int flashsv_decode_end(AVCodecContext *avctx) } +#if CONFIG_FLASHSV_DECODER AVCodec ff_flashsv_decoder = { .name = "flashsv", .type = AVMEDIA_TYPE_VIDEO, @@ -254,3 +465,67 @@ AVCodec ff_flashsv_decoder = { .pix_fmts = (const enum PixelFormat[]){PIX_FMT_BGR24, PIX_FMT_NONE}, .long_name = NULL_IF_CONFIG_SMALL("Flash Screen Video v1"), }; +#endif /* CONFIG_FLASHSV_DECODER */ + +#if CONFIG_FLASHSV2_DECODER +static const uint32_t ff_flashsv2_default_palette[128] = { + 0x000000, 0x333333, 0x666666, 0x999999, 0xCCCCCC, 0xFFFFFF, + 0x330000, 0x660000, 0x990000, 0xCC0000, 0xFF0000, 0x003300, + 0x006600, 0x009900, 0x00CC00, 0x00FF00, 0x000033, 0x000066, + 0x000099, 0x0000CC, 0x0000FF, 0x333300, 0x666600, 0x999900, + 0xCCCC00, 0xFFFF00, 0x003333, 0x006666, 0x009999, 0x00CCCC, + 0x00FFFF, 0x330033, 0x660066, 0x990099, 0xCC00CC, 0xFF00FF, + 0xFFFF33, 0xFFFF66, 0xFFFF99, 0xFFFFCC, 0xFF33FF, 0xFF66FF, + 0xFF99FF, 0xFFCCFF, 0x33FFFF, 0x66FFFF, 0x99FFFF, 0xCCFFFF, + 0xCCCC33, 0xCCCC66, 0xCCCC99, 0xCCCCFF, 0xCC33CC, 0xCC66CC, + 0xCC99CC, 0xCCFFCC, 0x33CCCC, 0x66CCCC, 0x99CCCC, 0xFFCCCC, + 0x999933, 0x999966, 0x9999CC, 0x9999FF, 0x993399, 0x996699, + 0x99CC99, 0x99FF99, 0x339999, 0x669999, 0xCC9999, 0xFF9999, + 0x666633, 0x666699, 0x6666CC, 0x6666FF, 0x663366, 0x669966, + 0x66CC66, 0x66FF66, 0x336666, 0x996666, 0xCC6666, 0xFF6666, + 0x333366, 0x333399, 0x3333CC, 0x3333FF, 0x336633, 0x339933, + 0x33CC33, 0x33FF33, 0x663333, 0x993333, 0xCC3333, 0xFF3333, + 0x003366, 0x336600, 0x660033, 0x006633, 0x330066, 0x663300, + 0x336699, 0x669933, 0x993366, 0x339966, 0x663399, 0x996633, + 0x6699CC, 0x99CC66, 0xCC6699, 0x66CC99, 0x9966CC, 0xCC9966, + 0x99CCFF, 0xCCFF99, 0xFF99CC, 0x99FFCC, 0xCC99FF, 0xFFCC99, + 0x111111, 0x222222, 0x444444, 0x555555, 0xAAAAAA, 0xBBBBBB, + 0xDDDDDD, 0xEEEEEE +}; + +static av_cold int flashsv2_decode_init(AVCodecContext *avctx) +{ + FlashSVContext *s = avctx->priv_data; + flashsv_decode_init(avctx); + s->pal = ff_flashsv2_default_palette; + s->ver = 2; + + return 0; +} + +static av_cold int flashsv2_decode_end(AVCodecContext *avctx) +{ + FlashSVContext *s = avctx->priv_data; + + av_freep(&s->keyframedata); + av_freep(&s->blocks); + av_freep(&s->keyframe); + av_freep(&s->deflate_block); + flashsv_decode_end(avctx); + + return 0; +} + +AVCodec ff_flashsv2_decoder = { + .name = "flashsv2", + .type = AVMEDIA_TYPE_VIDEO, + .id = CODEC_ID_FLASHSV2, + .priv_data_size = sizeof(FlashSVContext), + .init = flashsv2_decode_init, + .close = flashsv2_decode_end, + .decode = flashsv_decode_frame, + .capabilities = CODEC_CAP_DR1, + .pix_fmts = (const enum PixelFormat[]){PIX_FMT_BGR24, PIX_FMT_NONE}, + .long_name = NULL_IF_CONFIG_SMALL("Flash Screen Video v2"), +}; +#endif /* CONFIG_FLASHSV2_DECODER */ diff --git a/libavcodec/flashsvenc.c b/libavcodec/flashsvenc.c index c0327a911b..7e21e7d534 100644 --- a/libavcodec/flashsvenc.c +++ b/libavcodec/flashsvenc.c @@ -27,31 +27,21 @@ * Flash Screen Video encoder * @author Alex Beregszaszi * @author Benjamin Larsson + * + * A description of the bitstream format for Flash Screen Video version 1/2 + * is part of the SWF File Format Specification (version 10), which can be + * downloaded from http://www.adobe.com/devnet/swf.html. */ -/* Bitstream description - * The picture is divided into blocks that are zlib-compressed. - * - * The decoder is fed complete frames, the frameheader contains: - * 4bits of block width - * 12bits of frame width - * 4bits of block height - * 12bits of frame height - * - * Directly after the header are the compressed blocks. The blocks - * have their compressed size represented with 16bits in the beginig. - * If the size = 0 then the block is unchanged from the previous frame. - * All blocks are decompressed until the buffer is consumed. - * - * Encoding ideas, a basic encoder would just use a fixed block size. - * Block sizes can be multipels of 16, from 16 to 256. The blocks don't +/* + * Encoding ideas: A basic encoder would just use a fixed block size. + * Block sizes can be multiples of 16, from 16 to 256. The blocks don't * have to be quadratic. A brute force search with a set of different * block sizes should give a better result than to just use a fixed size. - */ - -/* TODO: - * Don't reencode the frame in brute force mode if the frame is a dupe. Speed up. - * Make the difference check faster. + * + * TODO: + * Don't reencode the frame in brute force mode if the frame is a dupe. + * Speed up. Make the difference check faster. */ #include <stdio.h> @@ -85,8 +75,8 @@ static int copy_region_enc(uint8_t *sptr, uint8_t *dptr, int dx, int dy, int diff = 0; for (i = dx + h; i > dx; i--) { - nsptr = sptr + (i * stride) + dy * 3; - npfptr = pfptr + (i * stride) + dy * 3; + nsptr = sptr + i * stride + dy * 3; + npfptr = pfptr + i * stride + dy * 3; for (j = 0; j < w * 3; j++) { diff |= npfptr[j] ^ nsptr[j]; dptr[j] = nsptr[j]; @@ -104,13 +94,14 @@ static av_cold int flashsv_encode_init(AVCodecContext *avctx) s->avctx = avctx; - if ((avctx->width > 4095) || (avctx->height > 4095)) { - av_log(avctx, AV_LOG_ERROR, "Input dimensions too large, input must be max 4096x4096 !\n"); + if (avctx->width > 4095 || avctx->height > 4095) { + av_log(avctx, AV_LOG_ERROR, + "Input dimensions too large, input must be max 4096x4096 !\n"); return AVERROR_INVALIDDATA; } // Needed if zlib unused or init aborted before deflateInit - memset(&(s->zstream), 0, sizeof(z_stream)); + memset(&s->zstream, 0, sizeof(z_stream)); s->last_key_frame = 0; @@ -141,9 +132,9 @@ static int encode_bitstream(FlashSVContext *s, AVFrame *p, uint8_t *buf, init_put_bits(&pb, buf, buf_size * 8); - put_bits(&pb, 4, (block_width / 16) - 1); + put_bits(&pb, 4, block_width / 16 - 1); put_bits(&pb, 12, s->image_width); - put_bits(&pb, 4, (block_height / 16) - 1); + put_bits(&pb, 4, block_height / 16 - 1); put_bits(&pb, 12, s->image_height); flush_put_bits(&pb); buf_pos = 4; @@ -156,37 +147,36 @@ static int encode_bitstream(FlashSVContext *s, AVFrame *p, uint8_t *buf, /* loop over all block columns */ for (j = 0; j < v_blocks + (v_part ? 1 : 0); j++) { - int hp = j * block_height; // horiz position in frame - int hs = (j < v_blocks) ? block_height : v_part; // size of block + int y_pos = j * block_height; // vertical position in frame + int cur_blk_height = (j < v_blocks) ? block_height : v_part; /* loop over all block rows */ for (i = 0; i < h_blocks + (h_part ? 1 : 0); i++) { - int wp = i * block_width; // vert position in frame - int ws = (i < h_blocks) ? block_width : h_part; // size of block + int x_pos = i * block_width; // horizontal position in frame + int cur_blk_width = (i < h_blocks) ? block_width : h_part; int ret = Z_OK; - uint8_t *ptr; - - ptr = buf + buf_pos; + uint8_t *ptr = buf + buf_pos; /* copy the block to the temp buffer before compression * (if it differs from the previous frame's block) */ res = copy_region_enc(p->data[0], s->tmpblock, - s->image_height - (hp + hs + 1), - wp, hs, ws, p->linesize[0], previous_frame); + s->image_height - (y_pos + cur_blk_height + 1), + x_pos, cur_blk_height, cur_blk_width, + p->linesize[0], previous_frame); if (res || *I_frame) { - unsigned long zsize; - zsize = 3 * block_width * block_height; - ret = compress2(ptr + 2, &zsize, s->tmpblock, 3 * ws * hs, 9); - + unsigned long zsize = 3 * block_width * block_height; + ret = compress2(ptr + 2, &zsize, s->tmpblock, + 3 * cur_blk_width * cur_blk_height, 9); - //ret = deflateReset(&(s->zstream)); + //ret = deflateReset(&s->zstream); if (ret != Z_OK) - av_log(s->avctx, AV_LOG_ERROR, "error while compressing block %dx%d\n", i, j); + av_log(s->avctx, AV_LOG_ERROR, + "error while compressing block %dx%d\n", i, j); - bytestream_put_be16(&ptr, (unsigned int) zsize); + bytestream_put_be16(&ptr, zsize); buf_pos += zsize + 2; - //av_log(avctx, AV_LOG_ERROR, "buf_pos = %d\n", buf_pos); + av_dlog(s->avctx, "buf_pos = %d\n", buf_pos); } else { pred_blocks++; bytestream_put_be16(&ptr, 0); @@ -213,7 +203,7 @@ static int flashsv_encode_frame(AVCodecContext *avctx, uint8_t *buf, uint8_t *pfptr; int res; int I_frame = 0; - int opt_w, opt_h; + int opt_w = 4, opt_h = 4; *p = *pict; @@ -228,42 +218,40 @@ static int flashsv_encode_frame(AVCodecContext *avctx, uint8_t *buf, } if (p->linesize[0] < 0) - pfptr = s->previous_frame - ((s->image_height - 1) * p->linesize[0]); + pfptr = s->previous_frame - (s->image_height - 1) * p->linesize[0]; else pfptr = s->previous_frame; /* Check the placement of keyframes */ - if (avctx->gop_size > 0) { - if (avctx->frame_number >= s->last_key_frame + avctx->gop_size) { - I_frame = 1; - } + if (avctx->gop_size > 0 && + avctx->frame_number >= s->last_key_frame + avctx->gop_size) { + I_frame = 1; } - opt_w = 4; - opt_h = 4; - - if (buf_size < s->image_width*s->image_height*3) { + if (buf_size < s->image_width * s->image_height * 3) { //Conservative upper bound check for compressed data av_log(avctx, AV_LOG_ERROR, "buf_size %d < %d\n", buf_size, s->image_width * s->image_height * 3); return -1; } - res = encode_bitstream(s, p, buf, buf_size, opt_w * 16, opt_h * 16, pfptr, &I_frame); + res = encode_bitstream(s, p, buf, buf_size, opt_w * 16, opt_h * 16, + pfptr, &I_frame); //save the current frame if (p->linesize[0] > 0) memcpy(s->previous_frame, p->data[0], s->image_height * p->linesize[0]); else - memcpy(s->previous_frame, p->data[0] + p->linesize[0] * (s->image_height - 1), + memcpy(s->previous_frame, + p->data[0] + p->linesize[0] * (s->image_height - 1), s->image_height * FFABS(p->linesize[0])); //mark the frame type so the muxer can mux it correctly if (I_frame) { - p->pict_type = AV_PICTURE_TYPE_I; - p->key_frame = 1; + p->pict_type = AV_PICTURE_TYPE_I; + p->key_frame = 1; s->last_key_frame = avctx->frame_number; - av_log(avctx, AV_LOG_DEBUG, "Inserting key frame at frame %d\n", avctx->frame_number); + av_dlog(avctx, "Inserting keyframe at frame %d\n", avctx->frame_number); } else { p->pict_type = AV_PICTURE_TYPE_P; p->key_frame = 0; @@ -278,7 +266,7 @@ static av_cold int flashsv_encode_end(AVCodecContext *avctx) { FlashSVContext *s = avctx->priv_data; - deflateEnd(&(s->zstream)); + deflateEnd(&s->zstream); av_free(s->encbuffer); av_free(s->previous_frame); diff --git a/libavcodec/flicvideo.c b/libavcodec/flicvideo.c index 8cc72e241e..be1ea50f75 100644 --- a/libavcodec/flicvideo.c +++ b/libavcodec/flicvideo.c @@ -112,7 +112,6 @@ static av_cold int flic_decode_init(AVCodecContext *avctx) case 24 : avctx->pix_fmt = PIX_FMT_BGR24; /* Supposedly BGR, but havent any files to test with */ av_log(avctx, AV_LOG_ERROR, "24Bpp FLC/FLX is unsupported due to no test files.\n"); return -1; - break; default : av_log(avctx, AV_LOG_ERROR, "Unknown FLC/FLX depth of %d Bpp is unsupported.\n",depth); return -1; @@ -749,18 +748,13 @@ static av_cold int flic_decode_end(AVCodecContext *avctx) } AVCodec ff_flic_decoder = { - "flic", - AVMEDIA_TYPE_VIDEO, - CODEC_ID_FLIC, - sizeof(FlicDecodeContext), - flic_decode_init, - NULL, - flic_decode_end, - flic_decode_frame, - CODEC_CAP_DR1, - NULL, - NULL, - NULL, - NULL, + .name = "flic", + .type = AVMEDIA_TYPE_VIDEO, + .id = CODEC_ID_FLIC, + .priv_data_size = sizeof(FlicDecodeContext), + .init = flic_decode_init, + .close = flic_decode_end, + .decode = flic_decode_frame, + .capabilities = CODEC_CAP_DR1, .long_name = NULL_IF_CONFIG_SMALL("Autodesk Animator Flic video"), }; diff --git a/libavcodec/flvdec.c b/libavcodec/flvdec.c index 2a6694403b..7337107469 100644 --- a/libavcodec/flvdec.c +++ b/libavcodec/flvdec.c @@ -119,15 +119,14 @@ int ff_flv_decode_picture_header(MpegEncContext *s) } AVCodec ff_flv_decoder = { - "flv", - AVMEDIA_TYPE_VIDEO, - CODEC_ID_FLV1, - sizeof(MpegEncContext), - ff_h263_decode_init, - NULL, - ff_h263_decode_end, - ff_h263_decode_frame, - CODEC_CAP_DRAW_HORIZ_BAND | CODEC_CAP_DR1, + .name = "flv", + .type = AVMEDIA_TYPE_VIDEO, + .id = CODEC_ID_FLV1, + .priv_data_size = sizeof(MpegEncContext), + .init = ff_h263_decode_init, + .close = ff_h263_decode_end, + .decode = ff_h263_decode_frame, + .capabilities = CODEC_CAP_DRAW_HORIZ_BAND | CODEC_CAP_DR1, .max_lowres= 3, .long_name= NULL_IF_CONFIG_SMALL("Flash Video (FLV) / Sorenson Spark / Sorenson H.263"), .pix_fmts= ff_pixfmt_list_420, diff --git a/libavcodec/flvenc.c b/libavcodec/flvenc.c index fc8c2a474f..99caa18d2d 100644 --- a/libavcodec/flvenc.c +++ b/libavcodec/flvenc.c @@ -85,13 +85,13 @@ void ff_flv2_encode_ac_esc(PutBitContext *pb, int slevel, int level, int run, in } AVCodec ff_flv_encoder = { - "flv", - AVMEDIA_TYPE_VIDEO, - CODEC_ID_FLV1, - sizeof(MpegEncContext), - MPV_encode_init, - MPV_encode_picture, - MPV_encode_end, + .name = "flv", + .type = AVMEDIA_TYPE_VIDEO, + .id = CODEC_ID_FLV1, + .priv_data_size = sizeof(MpegEncContext), + .init = MPV_encode_init, + .encode = MPV_encode_picture, + .close = MPV_encode_end, .pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_NONE}, .long_name= NULL_IF_CONFIG_SMALL("Flash Video (FLV) / Sorenson Spark / Sorenson H.263"), }; diff --git a/libavcodec/fraps.c b/libavcodec/fraps.c index 7e96b0d312..aad8731028 100644 --- a/libavcodec/fraps.c +++ b/libavcodec/fraps.c @@ -361,14 +361,13 @@ static av_cold int decode_end(AVCodecContext *avctx) AVCodec ff_fraps_decoder = { - "fraps", - AVMEDIA_TYPE_VIDEO, - CODEC_ID_FRAPS, - sizeof(FrapsContext), - decode_init, - NULL, - decode_end, - decode_frame, - CODEC_CAP_DR1, + .name = "fraps", + .type = AVMEDIA_TYPE_VIDEO, + .id = CODEC_ID_FRAPS, + .priv_data_size = sizeof(FrapsContext), + .init = decode_init, + .close = decode_end, + .decode = decode_frame, + .capabilities = CODEC_CAP_DR1, .long_name = NULL_IF_CONFIG_SMALL("Fraps"), }; diff --git a/libavcodec/frwu.c b/libavcodec/frwu.c index 08dfbf0c9b..2a2146d601 100644 --- a/libavcodec/frwu.c +++ b/libavcodec/frwu.c @@ -110,14 +110,12 @@ static av_cold int decode_close(AVCodecContext *avctx) } AVCodec ff_frwu_decoder = { - "frwu", - AVMEDIA_TYPE_VIDEO, - CODEC_ID_FRWU, - 0, - decode_init, - NULL, - decode_close, - decode_frame, - CODEC_CAP_DR1, + .name = "FRWU", + .type = AVMEDIA_TYPE_VIDEO, + .id = CODEC_ID_FRWU, + .init = decode_init, + .close = decode_close, + .decode = decode_frame, + .capabilities = CODEC_CAP_DR1, .long_name = NULL_IF_CONFIG_SMALL("Forward Uncompressed"), }; diff --git a/libavcodec/g722.c b/libavcodec/g722.c index f00fa6a0e2..30c6f5313c 100644 --- a/libavcodec/g722.c +++ b/libavcodec/g722.c @@ -26,7 +26,6 @@ /** * @file - * * G.722 ADPCM audio codec * * This G.722 decoder is a bit-exact implementation of the ITU G.722 diff --git a/libavcodec/g726.c b/libavcodec/g726.c index 30bd96fcc9..2ce113b24b 100644 --- a/libavcodec/g726.c +++ b/libavcodec/g726.c @@ -392,14 +392,13 @@ static int g726_decode_frame(AVCodecContext *avctx, #if CONFIG_ADPCM_G726_ENCODER AVCodec ff_adpcm_g726_encoder = { - "g726", - AVMEDIA_TYPE_AUDIO, - CODEC_ID_ADPCM_G726, - sizeof(G726Context), - g726_init, - g726_encode_frame, - g726_close, - NULL, + .name = "g726", + .type = AVMEDIA_TYPE_AUDIO, + .id = CODEC_ID_ADPCM_G726, + .priv_data_size = sizeof(G726Context), + .init = g726_init, + .encode = g726_encode_frame, + .close = g726_close, .capabilities = CODEC_CAP_SMALL_LAST_FRAME, .sample_fmts = (const enum AVSampleFormat[]){AV_SAMPLE_FMT_S16,AV_SAMPLE_FMT_NONE}, .long_name = NULL_IF_CONFIG_SMALL("G.726 ADPCM"), @@ -407,13 +406,12 @@ AVCodec ff_adpcm_g726_encoder = { #endif AVCodec ff_adpcm_g726_decoder = { - "g726", - AVMEDIA_TYPE_AUDIO, - CODEC_ID_ADPCM_G726, - sizeof(G726Context), - g726_init, - NULL, - g726_close, - g726_decode_frame, + .name = "g726", + .type = AVMEDIA_TYPE_AUDIO, + .id = CODEC_ID_ADPCM_G726, + .priv_data_size = sizeof(G726Context), + .init = g726_init, + .close = g726_close, + .decode = g726_decode_frame, .long_name = NULL_IF_CONFIG_SMALL("G.726 ADPCM"), }; diff --git a/libavcodec/g729data.h b/libavcodec/g729data.h deleted file mode 100644 index 1d64553f20..0000000000 --- a/libavcodec/g729data.h +++ /dev/null @@ -1,278 +0,0 @@ -/* - * data for G.729 decoder - * Copyright (c) 2007 Vladimir Voroshilov - * - * This file is part of FFmpeg. - * - * FFmpeg is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * FFmpeg is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with FFmpeg; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - */ - -#ifndef AVCODEC_G729DATA_H -#define AVCODEC_G729DATA_H - -#include <stdint.h> - -#define MA_NP 4 ///< Moving Average (MA) prediction order - -#define VQ_1ST_BITS 7 ///< first stage vector of quantizer (size in bits) -#define VQ_2ND_BITS 5 ///< second stage vector of quantizer (size in bits) - -#define GC_1ST_IDX_BITS_8K 3 ///< gain codebook (first stage) index, 8k mode (size in bits) -#define GC_2ND_IDX_BITS_8K 4 ///< gain codebook (second stage) index, 8k mode (size in bits) - -#define GC_1ST_IDX_BITS_6K4 3 ///< gain codebook (first stage) index, 6.4k mode (size in bits) -#define GC_2ND_IDX_BITS_6K4 3 ///< gain codebook (second stage) index, 6.4k mode (size in bits) - -/** - * first stage LSP codebook - * (10-dimensional, with 128 entries (3.24 of G.729) - */ -static const int16_t cb_lsp_1st[1<<VQ_1ST_BITS][10] = { /* (2.13) */ - { 1486, 2168, 3751, 9074, 12134, 13944, 17983, 19173, 21190, 21820}, - { 1730, 2640, 3450, 4870, 6126, 7876, 15644, 17817, 20294, 21902}, - { 1568, 2256, 3088, 4874, 11063, 13393, 18307, 19293, 21109, 21741}, - { 1733, 2512, 3357, 4708, 6977, 10296, 17024, 17956, 19145, 20350}, - { 1744, 2436, 3308, 8731, 10432, 12007, 15614, 16639, 21359, 21913}, - { 1786, 2369, 3372, 4521, 6795, 12963, 17674, 18988, 20855, 21640}, - { 1631, 2433, 3361, 6328, 10709, 12013, 13277, 13904, 19441, 21088}, - { 1489, 2364, 3291, 6250, 9227, 10403, 13843, 15278, 17721, 21451}, - { 1869, 2533, 3475, 4365, 9152, 14513, 15908, 17022, 20611, 21411}, - { 2070, 3025, 4333, 5854, 7805, 9231, 10597, 16047, 20109, 21834}, - { 1910, 2673, 3419, 4261, 11168, 15111, 16577, 17591, 19310, 20265}, - { 1141, 1815, 2624, 4623, 6495, 9588, 13968, 16428, 19351, 21286}, - { 2192, 3171, 4707, 5808, 10904, 12500, 14162, 15664, 21124, 21789}, - { 1286, 1907, 2548, 3453, 9574, 11964, 15978, 17344, 19691, 22495}, - { 1921, 2720, 4604, 6684, 11503, 12992, 14350, 15262, 16997, 20791}, - { 2052, 2759, 3897, 5246, 6638, 10267, 15834, 16814, 18149, 21675}, - { 1798, 2497, 5617, 11449, 13189, 14711, 17050, 18195, 20307, 21182}, - { 1009, 1647, 2889, 5709, 9541, 12354, 15231, 18494, 20966, 22033}, - { 3016, 3794, 5406, 7469, 12488, 13984, 15328, 16334, 19952, 20791}, - { 2203, 3040, 3796, 5442, 11987, 13512, 14931, 16370, 17856, 18803}, - { 2912, 4292, 7988, 9572, 11562, 13244, 14556, 16529, 20004, 21073}, - { 2861, 3607, 5923, 7034, 9234, 12054, 13729, 18056, 20262, 20974}, - { 3069, 4311, 5967, 7367, 11482, 12699, 14309, 16233, 18333, 19172}, - { 2434, 3661, 4866, 5798, 10383, 11722, 13049, 15668, 18862, 19831}, - { 2020, 2605, 3860, 9241, 13275, 14644, 16010, 17099, 19268, 20251}, - { 1877, 2809, 3590, 4707, 11056, 12441, 15622, 17168, 18761, 19907}, - { 2107, 2873, 3673, 5799, 13579, 14687, 15938, 17077, 18890, 19831}, - { 1612, 2284, 2944, 3572, 8219, 13959, 15924, 17239, 18592, 20117}, - { 2420, 3156, 6542, 10215, 12061, 13534, 15305, 16452, 18717, 19880}, - { 1667, 2612, 3534, 5237, 10513, 11696, 12940, 16798, 18058, 19378}, - { 2388, 3017, 4839, 9333, 11413, 12730, 15024, 16248, 17449, 18677}, - { 1875, 2786, 4231, 6320, 8694, 10149, 11785, 17013, 18608, 19960}, - { 679, 1411, 4654, 8006, 11446, 13249, 15763, 18127, 20361, 21567}, - { 1838, 2596, 3578, 4608, 5650, 11274, 14355, 15886, 20579, 21754}, - { 1303, 1955, 2395, 3322, 12023, 13764, 15883, 18077, 20180, 21232}, - { 1438, 2102, 2663, 3462, 8328, 10362, 13763, 17248, 19732, 22344}, - { 860, 1904, 6098, 7775, 9815, 12007, 14821, 16709, 19787, 21132}, - { 1673, 2723, 3704, 6125, 7668, 9447, 13683, 14443, 20538, 21731}, - { 1246, 1849, 2902, 4508, 7221, 12710, 14835, 16314, 19335, 22720}, - { 1525, 2260, 3862, 5659, 7342, 11748, 13370, 14442, 18044, 21334}, - { 1196, 1846, 3104, 7063, 10972, 12905, 14814, 17037, 19922, 22636}, - { 2147, 3106, 4475, 6511, 8227, 9765, 10984, 12161, 18971, 21300}, - { 1585, 2405, 2994, 4036, 11481, 13177, 14519, 15431, 19967, 21275}, - { 1778, 2688, 3614, 4680, 9465, 11064, 12473, 16320, 19742, 20800}, - { 1862, 2586, 3492, 6719, 11708, 13012, 14364, 16128, 19610, 20425}, - { 1395, 2156, 2669, 3386, 10607, 12125, 13614, 16705, 18976, 21367}, - { 1444, 2117, 3286, 6233, 9423, 12981, 14998, 15853, 17188, 21857}, - { 2004, 2895, 3783, 4897, 6168, 7297, 12609, 16445, 19297, 21465}, - { 1495, 2863, 6360, 8100, 11399, 14271, 15902, 17711, 20479, 22061}, - { 2484, 3114, 5718, 7097, 8400, 12616, 14073, 14847, 20535, 21396}, - { 2424, 3277, 5296, 6284, 11290, 12903, 16022, 17508, 19333, 20283}, - { 2565, 3778, 5360, 6989, 8782, 10428, 14390, 15742, 17770, 21734}, - { 2727, 3384, 6613, 9254, 10542, 12236, 14651, 15687, 20074, 21102}, - { 1916, 2953, 6274, 8088, 9710, 10925, 12392, 16434, 20010, 21183}, - { 3384, 4366, 5349, 7667, 11180, 12605, 13921, 15324, 19901, 20754}, - { 3075, 4283, 5951, 7619, 9604, 11010, 12384, 14006, 20658, 21497}, - { 1751, 2455, 5147, 9966, 11621, 13176, 14739, 16470, 20788, 21756}, - { 1442, 2188, 3330, 6813, 8929, 12135, 14476, 15306, 19635, 20544}, - { 2294, 2895, 4070, 8035, 12233, 13416, 14762, 17367, 18952, 19688}, - { 1937, 2659, 4602, 6697, 9071, 12863, 14197, 15230, 16047, 18877}, - { 2071, 2663, 4216, 9445, 10887, 12292, 13949, 14909, 19236, 20341}, - { 1740, 2491, 3488, 8138, 9656, 11153, 13206, 14688, 20896, 21907}, - { 2199, 2881, 4675, 8527, 10051, 11408, 14435, 15463, 17190, 20597}, - { 1943, 2988, 4177, 6039, 7478, 8536, 14181, 15551, 17622, 21579}, - { 1825, 3175, 7062, 9818, 12824, 15450, 18330, 19856, 21830, 22412}, - { 2464, 3046, 4822, 5977, 7696, 15398, 16730, 17646, 20588, 21320}, - { 2550, 3393, 5305, 6920, 10235, 14083, 18143, 19195, 20681, 21336}, - { 3003, 3799, 5321, 6437, 7919, 11643, 15810, 16846, 18119, 18980}, - { 3455, 4157, 6838, 8199, 9877, 12314, 15905, 16826, 19949, 20892}, - { 3052, 3769, 4891, 5810, 6977, 10126, 14788, 15990, 19773, 20904}, - { 3671, 4356, 5827, 6997, 8460, 12084, 14154, 14939, 19247, 20423}, - { 2716, 3684, 5246, 6686, 8463, 10001, 12394, 14131, 16150, 19776}, - { 1945, 2638, 4130, 7995, 14338, 15576, 17057, 18206, 20225, 20997}, - { 2304, 2928, 4122, 4824, 5640, 13139, 15825, 16938, 20108, 21054}, - { 1800, 2516, 3350, 5219, 13406, 15948, 17618, 18540, 20531, 21252}, - { 1436, 2224, 2753, 4546, 9657, 11245, 15177, 16317, 17489, 19135}, - { 2319, 2899, 4980, 6936, 8404, 13489, 15554, 16281, 20270, 20911}, - { 2187, 2919, 4610, 5875, 7390, 12556, 14033, 16794, 20998, 21769}, - { 2235, 2923, 5121, 6259, 8099, 13589, 15340, 16340, 17927, 20159}, - { 1765, 2638, 3751, 5730, 7883, 10108, 13633, 15419, 16808, 18574}, - { 3460, 5741, 9596, 11742, 14413, 16080, 18173, 19090, 20845, 21601}, - { 3735, 4426, 6199, 7363, 9250, 14489, 16035, 17026, 19873, 20876}, - { 3521, 4778, 6887, 8680, 12717, 14322, 15950, 18050, 20166, 21145}, - { 2141, 2968, 6865, 8051, 10010, 13159, 14813, 15861, 17528, 18655}, - { 4148, 6128, 9028, 10871, 12686, 14005, 15976, 17208, 19587, 20595}, - { 4403, 5367, 6634, 8371, 10163, 11599, 14963, 16331, 17982, 18768}, - { 4091, 5386, 6852, 8770, 11563, 13290, 15728, 16930, 19056, 20102}, - { 2746, 3625, 5299, 7504, 10262, 11432, 13172, 15490, 16875, 17514}, - { 2248, 3556, 8539, 10590, 12665, 14696, 16515, 17824, 20268, 21247}, - { 1279, 1960, 3920, 7793, 10153, 14753, 16646, 18139, 20679, 21466}, - { 2440, 3475, 6737, 8654, 12190, 14588, 17119, 17925, 19110, 19979}, - { 1879, 2514, 4497, 7572, 10017, 14948, 16141, 16897, 18397, 19376}, - { 2804, 3688, 7490, 10086, 11218, 12711, 16307, 17470, 20077, 21126}, - { 2023, 2682, 3873, 8268, 10255, 11645, 15187, 17102, 18965, 19788}, - { 2823, 3605, 5815, 8595, 10085, 11469, 16568, 17462, 18754, 19876}, - { 2851, 3681, 5280, 7648, 9173, 10338, 14961, 16148, 17559, 18474}, - { 1348, 2645, 5826, 8785, 10620, 12831, 16255, 18319, 21133, 22586}, - { 2141, 3036, 4293, 6082, 7593, 10629, 17158, 18033, 21466, 22084}, - { 1608, 2375, 3384, 6878, 9970, 11227, 16928, 17650, 20185, 21120}, - { 2774, 3616, 5014, 6557, 7788, 8959, 17068, 18302, 19537, 20542}, - { 1934, 4813, 6204, 7212, 8979, 11665, 15989, 17811, 20426, 21703}, - { 2288, 3507, 5037, 6841, 8278, 9638, 15066, 16481, 21653, 22214}, - { 2951, 3771, 4878, 7578, 9016, 10298, 14490, 15242, 20223, 20990}, - { 3256, 4791, 6601, 7521, 8644, 9707, 13398, 16078, 19102, 20249}, - { 1827, 2614, 3486, 6039, 12149, 13823, 16191, 17282, 21423, 22041}, - { 1000, 1704, 3002, 6335, 8471, 10500, 14878, 16979, 20026, 22427}, - { 1646, 2286, 3109, 7245, 11493, 12791, 16824, 17667, 18981, 20222}, - { 1708, 2501, 3315, 6737, 8729, 9924, 16089, 17097, 18374, 19917}, - { 2623, 3510, 4478, 5645, 9862, 11115, 15219, 18067, 19583, 20382}, - { 2518, 3434, 4728, 6388, 8082, 9285, 13162, 18383, 19819, 20552}, - { 1726, 2383, 4090, 6303, 7805, 12845, 14612, 17608, 19269, 20181}, - { 2860, 3735, 4838, 6044, 7254, 8402, 14031, 16381, 18037, 19410}, - { 4247, 5993, 7952, 9792, 12342, 14653, 17527, 18774, 20831, 21699}, - { 3502, 4051, 5680, 6805, 8146, 11945, 16649, 17444, 20390, 21564}, - { 3151, 4893, 5899, 7198, 11418, 13073, 15124, 17673, 20520, 21861}, - { 3960, 4848, 5926, 7259, 8811, 10529, 15661, 16560, 18196, 20183}, - { 4499, 6604, 8036, 9251, 10804, 12627, 15880, 17512, 20020, 21046}, - { 4251, 5541, 6654, 8318, 9900, 11686, 15100, 17093, 20572, 21687}, - { 3769, 5327, 7865, 9360, 10684, 11818, 13660, 15366, 18733, 19882}, - { 3083, 3969, 6248, 8121, 9798, 10994, 12393, 13686, 17888, 19105}, - { 2731, 4670, 7063, 9201, 11346, 13735, 16875, 18797, 20787, 22360}, - { 1187, 2227, 4737, 7214, 9622, 12633, 15404, 17968, 20262, 23533}, - { 1911, 2477, 3915, 10098, 11616, 12955, 16223, 17138, 19270, 20729}, - { 1764, 2519, 3887, 6944, 9150, 12590, 16258, 16984, 17924, 18435}, - { 1400, 3674, 7131, 8718, 10688, 12508, 15708, 17711, 19720, 21068}, - { 2322, 3073, 4287, 8108, 9407, 10628, 15862, 16693, 19714, 21474}, - { 2630, 3339, 4758, 8360, 10274, 11333, 12880, 17374, 19221, 19936}, - { 1721, 2577, 5553, 7195, 8651, 10686, 15069, 16953, 18703, 19929} -}; - -/** - * second stage LSP codebook, high and low parts - (both 5-dimensional, with 32 entries (3.2.4 of G.729) - */ -static const int16_t cb_lsp_2nd[1<<VQ_2ND_BITS][10] = { /* (2.13) */ - { -435, -815, -742, 1033, -518, 582, -1201, 829, 86, 385}, - { -833, -891, 463, -8, -1251, 1450, 72, -231, 864, 661}, - {-1021, 231, -306, 321, -220, -163, -526, -754, -1633, 267}, - { 57, -198, -339, -33, -1468, 573, 796, -169, -631, 816}, - { 171, -350, 294, 1660, 453, 519, 291, 159, -640, -1296}, - { -701, -842, -58, 950, 892, 1549, 715, 527, -714, -193}, - { 584, 31, -289, 356, -333, -457, 612, -283, -1381, -741}, - { -109, -808, 231, 77, -87, -344, 1341, 1087, -654, -569}, - { -859, 1236, 550, 854, 714, -543, -1752, -195, -98, -276}, - { -877, -954, -1248, -299, 212, -235, -728, 949, 1517, 895}, - { -77, 344, -620, 763, 413, 502, -362, -960, -483, 1386}, - { -314, -307, -256, -1260, -429, 450, -466, -108, 1010, 2223}, - { 711, 693, 521, 650, 1305, -28, -378, 744, -1005, 240}, - { -112, -271, -500, 946, 1733, 271, -15, 909, -259, 1688}, - { 575, -10, -468, -199, 1101, -1011, 581, -53, -747, 878}, - { 145, -285, -1280, -398, 36, -498, -1377, 18, -444, 1483}, - {-1133, -835, 1350, 1284, -95, 1015, -222, 443, 372, -354}, - {-1459, -1237, 416, -213, 466, 669, 659, 1640, 932, 534}, - { -15, 66, 468, 1019, -748, 1385, -182, -907, -721, -262}, - { -338, 148, 1445, 75, -760, 569, 1247, 337, 416, -121}, - { 389, 239, 1568, 981, 113, 369, -1003, -507, -587, -904}, - { -312, -98, 949, 31, 1104, 72, -141, 1465, 63, -785}, - { 1127, 584, 835, 277, -1159, 208, 301, -882, 117, -404}, - { 539, -114, 856, -493, 223, -912, 623, -76, 276, -440}, - { 2197, 2337, 1268, 670, 304, -267, -525, 140, 882, -139}, - {-1596, 550, 801, -456, -56, -697, 865, 1060, 413, 446}, - { 1154, 593, -77, 1237, -31, 581, -1037, -895, 669, 297}, - { 397, 558, 203, -797, -919, 3, 692, -292, 1050, 782}, - { 334, 1475, 632, -80, 48, -1061, -484, 362, -597, -852}, - { -545, -330, -429, -680, 1133, -1182, -744, 1340, 262, 63}, - { 1320, 827, -398, -576, 341, -774, -483, -1247, -70, 98}, - { -163, 674, -11, -886, 531, -1125, -265, -242, 724, 934} -}; - -/** - * gain codebook (first stage), 8k mode (3.9.2 of G.729) - */ -static const int16_t cb_gain_1st_8k[1<<GC_1ST_IDX_BITS_8K][2] = { /*(0.14) (2.13) */ - { 3242 , 9949 }, - { 1551 , 2425 }, - { 2678 , 27162 }, - { 1921 , 9291 }, - { 1831 , 5022 }, - { 1 , 1516 }, - { 356 , 14756 }, - { 57 , 5404 }, -}; - -/** - * gain codebook (second stage), 8k mode (3.9.2 of G.729) - */ -static const int16_t cb_gain_2nd_8k[1<<GC_2ND_IDX_BITS_8K][2] = { /*(1.14) (1.13) */ - { 5142 , 592 }, - { 17299 , 1861 }, - { 6160 , 2395 }, - { 16112 , 3392 }, - { 826 , 2005 }, - { 18973 , 5935 }, - { 1994 , 0 }, - { 15434 , 237 }, - { 10573 , 2966 }, - { 15132 , 4914 }, - { 11569 , 1196 }, - { 14194 , 1630 }, - { 8091 , 4861 }, - { 15161 , 14276 }, - { 9120 , 525 }, - { 13260 , 3256 }, -}; - -/** - * 4th order Moving Average (MA) Predictor codebook (3.2.4 of G.729) - */ -static const int16_t cb_ma_predictor[2][MA_NP][10] = { /* (0.15) */ - { - { 8421, 9109, 9175, 8965, 9034, 9057, 8765, 8775, 9106, 8673}, - { 7018, 7189, 7638, 7307, 7444, 7379, 7038, 6956, 6930, 6868}, - { 5472, 4990, 5134, 5177, 5246, 5141, 5206, 5095, 4830, 5147}, - { 4056, 3031, 2614, 3024, 2916, 2713, 3309, 3237, 2857, 3473} - }, - { - { 7733, 7880, 8188, 8175, 8247, 8490, 8637, 8601, 8359, 7569}, - { 4210, 3031, 2552, 3473, 3876, 3853, 4184, 4154, 3909, 3968}, - { 3214, 1930, 1313, 2143, 2493, 2385, 2755, 2706, 2542, 2919}, - { 3024, 1592, 940, 1631, 1723, 1579, 2034, 2084, 1913, 2601} - } -}; - -static const int16_t cb_ma_predictor_sum[2][10] = { /* (0.15) */ - { 7798, 8447, 8205, 8293, 8126, 8477, 8447, 8703, 9043, 8604}, - {14585, 18333, 19772, 17344, 16426, 16459, 15155, 15220, 16043, 15708} -}; - -/** - * initial LSP coefficients belongs to virtual frame preceding the - * first frame of the stream - */ -static const int16_t lsp_init[10]= { /* (0.15) */ - 30000, 26000, 21000, 15000, 8000, 0, -8000,-15000,-21000,-26000 -}; -#endif /* AVCODEC_G729DATA_H */ diff --git a/libavcodec/g729dec.c b/libavcodec/g729dec.c deleted file mode 100644 index 32db0597e3..0000000000 --- a/libavcodec/g729dec.c +++ /dev/null @@ -1,331 +0,0 @@ -/* - * G.729 decoder - * Copyright (c) 2008 Vladimir Voroshilov - * - * This file is part of FFmpeg. - * - * FFmpeg is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * FFmpeg is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with FFmpeg; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - */ -#include <stdlib.h> -#include <inttypes.h> -#include <limits.h> -#include <stdio.h> -#include <string.h> -#include <math.h> -#include <assert.h> - -#include "avcodec.h" -#include "libavutil/avutil.h" -#include "get_bits.h" - -#include "lsp.h" -#include "celp_math.h" -#include "acelp_filters.h" -#include "acelp_pitch_delay.h" -#include "acelp_vectors.h" -#include "g729data.h" - -/** - * minimum quantized LSF value (3.2.4) - * 0.005 in Q13 - */ -#define LSFQ_MIN 40 - -/** - * maximum quantized LSF value (3.2.4) - * 3.135 in Q13 - */ -#define LSFQ_MAX 25681 - -/** - * minimum LSF distance (3.2.4) - * 0.0391 in Q13 - */ -#define LSFQ_DIFF_MIN 321 - -/** - * minimum gain pitch value (3.8, Equation 47) - * 0.2 in (1.14) - */ -#define SHARP_MIN 3277 - -/** - * maximum gain pitch value (3.8, Equation 47) - * (EE) This does not comply with the specification. - * Specification says about 0.8, which should be - * 13107 in (1.14), but reference C code uses - * 13017 (equals to 0.7945) instead of it. - */ -#define SHARP_MAX 13017 - -/** - * subframe size - */ -#define SUBFRAME_SIZE 40 - - -typedef struct { - uint8_t ac_index_bits[2]; ///< adaptive codebook index for second subframe (size in bits) - uint8_t parity_bit; ///< parity bit for pitch delay - uint8_t gc_1st_index_bits; ///< gain codebook (first stage) index (size in bits) - uint8_t gc_2nd_index_bits; ///< gain codebook (second stage) index (size in bits) - uint8_t fc_signs_bits; ///< number of pulses in fixed-codebook vector - uint8_t fc_indexes_bits; ///< size (in bits) of fixed-codebook index entry -} G729FormatDescription; - -typedef struct { - int pitch_delay_int_prev; ///< integer part of previous subframe's pitch delay (4.1.3) - - /// (2.13) LSP quantizer outputs - int16_t past_quantizer_output_buf[MA_NP + 1][10]; - int16_t* past_quantizer_outputs[MA_NP + 1]; - - int16_t lsfq[10]; ///< (2.13) quantized LSF coefficients from previous frame - int16_t lsp_buf[2][10]; ///< (0.15) LSP coefficients (previous and current frames) (3.2.5) - int16_t *lsp[2]; ///< pointers to lsp_buf -} G729Context; - -static const G729FormatDescription format_g729_8k = { - .ac_index_bits = {8,5}, - .parity_bit = 1, - .gc_1st_index_bits = GC_1ST_IDX_BITS_8K, - .gc_2nd_index_bits = GC_2ND_IDX_BITS_8K, - .fc_signs_bits = 4, - .fc_indexes_bits = 13, -}; - -static const G729FormatDescription format_g729d_6k4 = { - .ac_index_bits = {8,4}, - .parity_bit = 0, - .gc_1st_index_bits = GC_1ST_IDX_BITS_6K4, - .gc_2nd_index_bits = GC_2ND_IDX_BITS_6K4, - .fc_signs_bits = 2, - .fc_indexes_bits = 9, -}; - -/** - * \brief pseudo random number generator - */ -static inline uint16_t g729_prng(uint16_t value) -{ - return 31821 * value + 13849; -} - -/** - * Get parity bit of bit 2..7 - */ -static inline int get_parity(uint8_t value) -{ - return (0x6996966996696996ULL >> (value >> 2)) & 1; -} - -static void lsf_decode(int16_t* lsfq, int16_t* past_quantizer_outputs[MA_NP + 1], - int16_t ma_predictor, - int16_t vq_1st, int16_t vq_2nd_low, int16_t vq_2nd_high) -{ - int i,j; - static const uint8_t min_distance[2]={10, 5}; //(2.13) - int16_t* quantizer_output = past_quantizer_outputs[MA_NP]; - - for (i = 0; i < 5; i++) { - quantizer_output[i] = cb_lsp_1st[vq_1st][i ] + cb_lsp_2nd[vq_2nd_low ][i ]; - quantizer_output[i + 5] = cb_lsp_1st[vq_1st][i + 5] + cb_lsp_2nd[vq_2nd_high][i + 5]; - } - - for (j = 0; j < 2; j++) { - for (i = 1; i < 10; i++) { - int diff = (quantizer_output[i - 1] - quantizer_output[i] + min_distance[j]) >> 1; - if (diff > 0) { - quantizer_output[i - 1] -= diff; - quantizer_output[i ] += diff; - } - } - } - - for (i = 0; i < 10; i++) { - int sum = quantizer_output[i] * cb_ma_predictor_sum[ma_predictor][i]; - for (j = 0; j < MA_NP; j++) - sum += past_quantizer_outputs[j][i] * cb_ma_predictor[ma_predictor][j][i]; - - lsfq[i] = sum >> 15; - } - - /* Rotate past_quantizer_outputs. */ - memmove(past_quantizer_outputs + 1, past_quantizer_outputs, MA_NP * sizeof(int16_t*)); - past_quantizer_outputs[0] = quantizer_output; - - ff_acelp_reorder_lsf(lsfq, LSFQ_DIFF_MIN, LSFQ_MIN, LSFQ_MAX, 10); -} - -static av_cold int decoder_init(AVCodecContext * avctx) -{ - G729Context* ctx = avctx->priv_data; - int i,k; - - if (avctx->channels != 1) { - av_log(avctx, AV_LOG_ERROR, "Only mono sound is supported (requested channels: %d).\n", avctx->channels); - return AVERROR(EINVAL); - } - - /* Both 8kbit/s and 6.4kbit/s modes uses two subframes per frame. */ - avctx->frame_size = SUBFRAME_SIZE << 1; - - for (k = 0; k < MA_NP + 1; k++) { - ctx->past_quantizer_outputs[k] = ctx->past_quantizer_output_buf[k]; - for (i = 1; i < 11; i++) - ctx->past_quantizer_outputs[k][i - 1] = (18717 * i) >> 3; - } - - ctx->lsp[0] = ctx->lsp_buf[0]; - ctx->lsp[1] = ctx->lsp_buf[1]; - memcpy(ctx->lsp[0], lsp_init, 10 * sizeof(int16_t)); - - return 0; -} - -static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, - AVPacket *avpkt) -{ - const uint8_t *buf = avpkt->data; - int buf_size = avpkt->size; - int16_t *out_frame = data; - GetBitContext gb; - G729FormatDescription format; - int frame_erasure = 0; ///< frame erasure detected during decoding - int bad_pitch = 0; ///< parity check failed - int i; - G729Context *ctx = avctx->priv_data; - int16_t lp[2][11]; // (3.12) - uint8_t ma_predictor; ///< switched MA predictor of LSP quantizer - uint8_t quantizer_1st; ///< first stage vector of quantizer - uint8_t quantizer_2nd_lo; ///< second stage lower vector of quantizer (size in bits) - uint8_t quantizer_2nd_hi; ///< second stage higher vector of quantizer (size in bits) - - int pitch_delay_int; // pitch delay, integer part - int pitch_delay_3x; // pitch delay, multiplied by 3 - - if (*data_size < SUBFRAME_SIZE << 2) { - av_log(avctx, AV_LOG_ERROR, "Error processing packet: output buffer too small\n"); - return AVERROR(EIO); - } - - if (buf_size == 10) { - format = format_g729_8k; - av_log(avctx, AV_LOG_DEBUG, "Packet type: %s\n", "G.729 @ 8kbit/s"); - } else if (buf_size == 8) { - format = format_g729d_6k4; - av_log(avctx, AV_LOG_DEBUG, "Packet type: %s\n", "G.729D @ 6.4kbit/s"); - } else { - av_log(avctx, AV_LOG_ERROR, "Packet size %d is unknown.\n", buf_size); - return AVERROR_INVALIDDATA; - } - - for (i=0; i < buf_size; i++) - frame_erasure |= buf[i]; - frame_erasure = !frame_erasure; - - init_get_bits(&gb, buf, buf_size); - - ma_predictor = get_bits(&gb, 1); - quantizer_1st = get_bits(&gb, VQ_1ST_BITS); - quantizer_2nd_lo = get_bits(&gb, VQ_2ND_BITS); - quantizer_2nd_hi = get_bits(&gb, VQ_2ND_BITS); - - lsf_decode(ctx->lsfq, ctx->past_quantizer_outputs, - ma_predictor, - quantizer_1st, quantizer_2nd_lo, quantizer_2nd_hi); - - ff_acelp_lsf2lsp(ctx->lsp[1], ctx->lsfq, 10); - - ff_acelp_lp_decode(&lp[0][0], &lp[1][0], ctx->lsp[1], ctx->lsp[0], 10); - - FFSWAP(int16_t*, ctx->lsp[1], ctx->lsp[0]); - - for (i = 0; i < 2; i++) { - uint8_t ac_index; ///< adaptive codebook index - uint8_t pulses_signs; ///< fixed-codebook vector pulse signs - int fc_indexes; ///< fixed-codebook indexes - uint8_t gc_1st_index; ///< gain codebook (first stage) index - uint8_t gc_2nd_index; ///< gain codebook (second stage) index - - ac_index = get_bits(&gb, format.ac_index_bits[i]); - if(!i && format.parity_bit) - bad_pitch = get_parity(ac_index) == get_bits1(&gb); - fc_indexes = get_bits(&gb, format.fc_indexes_bits); - pulses_signs = get_bits(&gb, format.fc_signs_bits); - gc_1st_index = get_bits(&gb, format.gc_1st_index_bits); - gc_2nd_index = get_bits(&gb, format.gc_2nd_index_bits); - - if(!i) { - if (bad_pitch) - pitch_delay_3x = 3 * ctx->pitch_delay_int_prev; - else - pitch_delay_3x = ff_acelp_decode_8bit_to_1st_delay3(ac_index); - } else { - int pitch_delay_min = av_clip(ctx->pitch_delay_int_prev - 5, - PITCH_DELAY_MIN, PITCH_DELAY_MAX - 9); - - if(packet_type == FORMAT_G729D_6K4) - pitch_delay_3x = ff_acelp_decode_4bit_to_2nd_delay3(ac_index, pitch_delay_min); - else - pitch_delay_3x = ff_acelp_decode_5_6_bit_to_2nd_delay3(ac_index, pitch_delay_min); - } - - /* Round pitch delay to nearest (used everywhere except ff_acelp_interpolate). */ - pitch_delay_int = (pitch_delay_3x + 1) / 3; - - ff_acelp_weighted_vector_sum(fc + pitch_delay_int, - fc + pitch_delay_int, - fc, 1 << 14, - av_clip(ctx->gain_pitch, SHARP_MIN, SHARP_MAX), - 0, 14, - SUBFRAME_SIZE - pitch_delay_int); - - if (frame_erasure) { - ctx->gain_pitch = (29491 * ctx->gain_pitch) >> 15; // 0.90 (0.15) - ctx->gain_code = ( 2007 * ctx->gain_code ) >> 11; // 0.98 (0.11) - - gain_corr_factor = 0; - } else { - ctx->gain_pitch = cb_gain_1st_8k[gc_1st_index][0] + - cb_gain_2nd_8k[gc_2nd_index][0]; - gain_corr_factor = cb_gain_1st_8k[gc_1st_index][1] + - cb_gain_2nd_8k[gc_2nd_index][1]; - - ff_acelp_weighted_vector_sum(ctx->exc + i * SUBFRAME_SIZE, - ctx->exc + i * SUBFRAME_SIZE, fc, - (!voicing && frame_erasure) ? 0 : ctx->gain_pitch, - ( voicing && frame_erasure) ? 0 : ctx->gain_code, - 1 << 13, 14, SUBFRAME_SIZE); - - ctx->pitch_delay_int_prev = pitch_delay_int; - } - - *data_size = SUBFRAME_SIZE << 2; - return buf_size; -} - -AVCodec ff_g729_decoder = -{ - "g729", - AVMEDIA_TYPE_AUDIO, - CODEC_ID_G729, - sizeof(G729Context), - decoder_init, - NULL, - NULL, - decode_frame, - .long_name = NULL_IF_CONFIG_SMALL("G.729"), -}; diff --git a/libavcodec/get_bits.h b/libavcodec/get_bits.h index 8579c87cd1..96d33b342e 100644 --- a/libavcodec/get_bits.h +++ b/libavcodec/get_bits.h @@ -201,19 +201,11 @@ static inline void skip_bits_long(GetBitContext *s, int n){ } \ } while (0) -#if ARCH_X86 -# define SKIP_CACHE(name, gb, num) \ - __asm__("shldl %2, %1, %0 \n\t" \ - "shll %2, %1 \n\t" \ - : "+r" (name##_cache0), "+r" (name##_cache1) \ - : "Ic" ((uint8_t)(num))) -#else # define SKIP_CACHE(name, gb, num) do { \ name##_cache0 <<= (num); \ name##_cache0 |= NEG_USR32(name##_cache1,num); \ name##_cache1 <<= (num); \ } while (0) -#endif # define SKIP_COUNTER(name, gb, num) name##_bit_count += (num) @@ -381,7 +373,7 @@ static inline int check_marker(GetBitContext *s, const char *msg) /** * init GetBitContext. - * @param buffer bitstream buffer, must be FF_INPUT_BUFFER_PADDING_SIZE bytes larger then the actual read bits + * @param buffer bitstream buffer, must be FF_INPUT_BUFFER_PADDING_SIZE bytes larger than the actual read bits * because some optimized bitstream readers read 32 or 64 bit at once and could read over the end * @param bit_size the size of the buffer in bits * @@ -504,7 +496,7 @@ void free_vlc(VLC *vlc); /** - * parses a vlc code, faster then get_vlc() + * parses a vlc code, faster than get_vlc() * @param bits is the number of bits which will be read at once, must be * identical to nb_bits in init_vlc() * @param max_depth is the number of times bits bits must be read to completely diff --git a/libavcodec/gif.c b/libavcodec/gif.c index 121b873888..8736f0f46c 100644 --- a/libavcodec/gif.c +++ b/libavcodec/gif.c @@ -167,13 +167,13 @@ static int gif_encode_close(AVCodecContext *avctx) } AVCodec ff_gif_encoder = { - "gif", - AVMEDIA_TYPE_VIDEO, - CODEC_ID_GIF, - sizeof(GIFContext), - gif_encode_init, - gif_encode_frame, - gif_encode_close, + .name = "gif", + .type = AVMEDIA_TYPE_VIDEO, + .id = CODEC_ID_GIF, + .priv_data_size = sizeof(GIFContext), + .init = gif_encode_init, + .encode = gif_encode_frame, + .close = gif_encode_close, .pix_fmts= (const enum PixelFormat[]){PIX_FMT_RGB8, PIX_FMT_BGR8, PIX_FMT_RGB4_BYTE, PIX_FMT_BGR4_BYTE, PIX_FMT_GRAY8, PIX_FMT_PAL8, PIX_FMT_NONE}, .long_name= NULL_IF_CONFIG_SMALL("GIF (Graphics Interchange Format)"), }; diff --git a/libavcodec/gifdec.c b/libavcodec/gifdec.c index 39d0db9c24..7a22fa702f 100644 --- a/libavcodec/gifdec.c +++ b/libavcodec/gifdec.c @@ -326,14 +326,13 @@ static av_cold int gif_decode_close(AVCodecContext *avctx) } AVCodec ff_gif_decoder = { - "gif", - AVMEDIA_TYPE_VIDEO, - CODEC_ID_GIF, - sizeof(GifState), - gif_decode_init, - NULL, - gif_decode_close, - gif_decode_frame, - CODEC_CAP_DR1, + .name = "gif", + .type = AVMEDIA_TYPE_VIDEO, + .id = CODEC_ID_GIF, + .priv_data_size = sizeof(GifState), + .init = gif_decode_init, + .close = gif_decode_close, + .decode = gif_decode_frame, + .capabilities = CODEC_CAP_DR1, .long_name = NULL_IF_CONFIG_SMALL("GIF (Graphics Interchange Format)"), }; diff --git a/libavcodec/gsmdec.c b/libavcodec/gsmdec.c index 4ce36b48bc..a3f67d3b52 100644 --- a/libavcodec/gsmdec.c +++ b/libavcodec/gsmdec.c @@ -85,25 +85,21 @@ static int gsm_decode_frame(AVCodecContext *avctx, void *data, } AVCodec ff_gsm_decoder = { - "gsm", - AVMEDIA_TYPE_AUDIO, - CODEC_ID_GSM, - sizeof(GSMContext), - gsm_init, - NULL, - NULL, - gsm_decode_frame, + .name = "gsm", + .type = AVMEDIA_TYPE_AUDIO, + .id = CODEC_ID_GSM, + .priv_data_size = sizeof(GSMContext), + .init = gsm_init, + .decode = gsm_decode_frame, .long_name = NULL_IF_CONFIG_SMALL("GSM"), }; AVCodec ff_gsm_ms_decoder = { - "gsm_ms", - AVMEDIA_TYPE_AUDIO, - CODEC_ID_GSM_MS, - sizeof(GSMContext), - gsm_init, - NULL, - NULL, - gsm_decode_frame, + .name = "gsm_ms", + .type = AVMEDIA_TYPE_AUDIO, + .id = CODEC_ID_GSM_MS, + .priv_data_size = sizeof(GSMContext), + .init = gsm_init, + .decode = gsm_decode_frame, .long_name = NULL_IF_CONFIG_SMALL("GSM Microsoft variant"), }; diff --git a/libavcodec/h261dec.c b/libavcodec/h261dec.c index 856fae3d9c..0ee5460fa7 100644 --- a/libavcodec/h261dec.c +++ b/libavcodec/h261dec.c @@ -215,7 +215,7 @@ static int h261_decode_mb_skipped(H261Context *h, int mba1, int mba2 ) s->mv_dir = MV_DIR_FORWARD; s->mv_type = MV_TYPE_16X16; - s->current_picture.mb_type[xy]= MB_TYPE_SKIP | MB_TYPE_16x16 | MB_TYPE_L0; + s->current_picture.f.mb_type[xy] = MB_TYPE_SKIP | MB_TYPE_16x16 | MB_TYPE_L0; s->mv[0][0][0] = 0; s->mv[0][0][1] = 0; s->mb_skipped = 1; @@ -323,14 +323,14 @@ static int h261_decode_mb(H261Context *h){ } if(s->mb_intra){ - s->current_picture.mb_type[xy]= MB_TYPE_INTRA; + s->current_picture.f.mb_type[xy] = MB_TYPE_INTRA; goto intra; } //set motion vectors s->mv_dir = MV_DIR_FORWARD; s->mv_type = MV_TYPE_16X16; - s->current_picture.mb_type[xy]= MB_TYPE_16x16 | MB_TYPE_L0; + s->current_picture.f.mb_type[xy] = MB_TYPE_16x16 | MB_TYPE_L0; s->mv[0][0][0] = h->current_mv_x * 2;//gets divided by 2 in motion compensation s->mv[0][0][1] = h->current_mv_y * 2; @@ -464,7 +464,7 @@ static int h261_decode_picture_header(H261Context *h){ s->picture_number = (s->picture_number&~31) + i; s->avctx->time_base= (AVRational){1001, 30000}; - s->current_picture.pts= s->picture_number; + s->current_picture.f.pts = s->picture_number; /* PTYPE starts here */ @@ -570,7 +570,7 @@ retry: } //we need to set current_picture_ptr before reading the header, otherwise we cannot store anyting im there - if(s->current_picture_ptr==NULL || s->current_picture_ptr->data[0]){ + if (s->current_picture_ptr == NULL || s->current_picture_ptr->f.data[0]) { int i= ff_find_unused_picture(s, 0); s->current_picture_ptr= &s->picture[i]; } @@ -596,8 +596,8 @@ retry: } // for skipping the frame - s->current_picture.pict_type= s->pict_type; - s->current_picture.key_frame= s->pict_type == AV_PICTURE_TYPE_I; + s->current_picture.f.pict_type = s->pict_type; + s->current_picture.f.key_frame = s->pict_type == AV_PICTURE_TYPE_I; #if FF_API_HURRY_UP /* skip everything if we are in a hurry>=5 */ @@ -644,15 +644,14 @@ static av_cold int h261_decode_end(AVCodecContext *avctx) } AVCodec ff_h261_decoder = { - "h261", - AVMEDIA_TYPE_VIDEO, - CODEC_ID_H261, - sizeof(H261Context), - h261_decode_init, - NULL, - h261_decode_end, - h261_decode_frame, - CODEC_CAP_DR1, + .name = "h261", + .type = AVMEDIA_TYPE_VIDEO, + .id = CODEC_ID_H261, + .priv_data_size = sizeof(H261Context), + .init = h261_decode_init, + .close = h261_decode_end, + .decode = h261_decode_frame, + .capabilities = CODEC_CAP_DR1, .max_lowres = 3, .long_name = NULL_IF_CONFIG_SMALL("H.261"), }; diff --git a/libavcodec/h261enc.c b/libavcodec/h261enc.c index cd282fba75..91d90d8989 100644 --- a/libavcodec/h261enc.c +++ b/libavcodec/h261enc.c @@ -322,13 +322,13 @@ static void h261_encode_block(H261Context * h, DCTELEM * block, int n){ } AVCodec ff_h261_encoder = { - "h261", - AVMEDIA_TYPE_VIDEO, - CODEC_ID_H261, - sizeof(H261Context), - MPV_encode_init, - MPV_encode_picture, - MPV_encode_end, + .name = "h261", + .type = AVMEDIA_TYPE_VIDEO, + .id = CODEC_ID_H261, + .priv_data_size = sizeof(H261Context), + .init = MPV_encode_init, + .encode = MPV_encode_picture, + .close = MPV_encode_end, .pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_NONE}, .long_name= NULL_IF_CONFIG_SMALL("H.261"), }; diff --git a/libavcodec/h263.c b/libavcodec/h263.c index 43d5b4b3e9..53f06bb2e6 100644 --- a/libavcodec/h263.c +++ b/libavcodec/h263.c @@ -52,7 +52,7 @@ void ff_h263_update_motion_val(MpegEncContext * s){ const int wrap = s->b8_stride; const int xy = s->block_index[0]; - s->current_picture.mbskip_table[mb_xy]= s->mb_skipped; + s->current_picture.f.mbskip_table[mb_xy] = s->mb_skipped; if(s->mv_type != MV_TYPE_8X8){ int motion_x, motion_y; @@ -71,30 +71,30 @@ void ff_h263_update_motion_val(MpegEncContext * s){ s->p_field_mv_table[i][0][mb_xy][0]= s->mv[0][i][0]; s->p_field_mv_table[i][0][mb_xy][1]= s->mv[0][i][1]; } - s->current_picture.ref_index[0][4*mb_xy ]= - s->current_picture.ref_index[0][4*mb_xy + 1]= s->field_select[0][0]; - s->current_picture.ref_index[0][4*mb_xy + 2]= - s->current_picture.ref_index[0][4*mb_xy + 3]= s->field_select[0][1]; + s->current_picture.f.ref_index[0][4*mb_xy ] = + s->current_picture.f.ref_index[0][4*mb_xy + 1] = s->field_select[0][0]; + s->current_picture.f.ref_index[0][4*mb_xy + 2] = + s->current_picture.f.ref_index[0][4*mb_xy + 3] = s->field_select[0][1]; } /* no update if 8X8 because it has been done during parsing */ - s->current_picture.motion_val[0][xy][0] = motion_x; - s->current_picture.motion_val[0][xy][1] = motion_y; - s->current_picture.motion_val[0][xy + 1][0] = motion_x; - s->current_picture.motion_val[0][xy + 1][1] = motion_y; - s->current_picture.motion_val[0][xy + wrap][0] = motion_x; - s->current_picture.motion_val[0][xy + wrap][1] = motion_y; - s->current_picture.motion_val[0][xy + 1 + wrap][0] = motion_x; - s->current_picture.motion_val[0][xy + 1 + wrap][1] = motion_y; + s->current_picture.f.motion_val[0][xy][0] = motion_x; + s->current_picture.f.motion_val[0][xy][1] = motion_y; + s->current_picture.f.motion_val[0][xy + 1][0] = motion_x; + s->current_picture.f.motion_val[0][xy + 1][1] = motion_y; + s->current_picture.f.motion_val[0][xy + wrap][0] = motion_x; + s->current_picture.f.motion_val[0][xy + wrap][1] = motion_y; + s->current_picture.f.motion_val[0][xy + 1 + wrap][0] = motion_x; + s->current_picture.f.motion_val[0][xy + 1 + wrap][1] = motion_y; } if(s->encoding){ //FIXME encoding MUST be cleaned up if (s->mv_type == MV_TYPE_8X8) - s->current_picture.mb_type[mb_xy]= MB_TYPE_L0 | MB_TYPE_8x8; + s->current_picture.f.mb_type[mb_xy] = MB_TYPE_L0 | MB_TYPE_8x8; else if(s->mb_intra) - s->current_picture.mb_type[mb_xy]= MB_TYPE_INTRA; + s->current_picture.f.mb_type[mb_xy] = MB_TYPE_INTRA; else - s->current_picture.mb_type[mb_xy]= MB_TYPE_L0 | MB_TYPE_16x16; + s->current_picture.f.mb_type[mb_xy] = MB_TYPE_L0 | MB_TYPE_16x16; } } @@ -154,7 +154,7 @@ void ff_h263_loop_filter(MpegEncContext * s){ Diag Top Left Center */ - if(!IS_SKIP(s->current_picture.mb_type[xy])){ + if (!IS_SKIP(s->current_picture.f.mb_type[xy])) { qp_c= s->qscale; s->dsp.h263_v_loop_filter(dest_y+8*linesize , linesize, qp_c); s->dsp.h263_v_loop_filter(dest_y+8*linesize+8, linesize, qp_c); @@ -164,10 +164,10 @@ void ff_h263_loop_filter(MpegEncContext * s){ if(s->mb_y){ int qp_dt, qp_tt, qp_tc; - if(IS_SKIP(s->current_picture.mb_type[xy-s->mb_stride])) + if (IS_SKIP(s->current_picture.f.mb_type[xy - s->mb_stride])) qp_tt=0; else - qp_tt= s->current_picture.qscale_table[xy-s->mb_stride]; + qp_tt = s->current_picture.f.qscale_table[xy - s->mb_stride]; if(qp_c) qp_tc= qp_c; @@ -187,10 +187,10 @@ void ff_h263_loop_filter(MpegEncContext * s){ s->dsp.h263_h_loop_filter(dest_y-8*linesize+8 , linesize, qp_tt); if(s->mb_x){ - if(qp_tt || IS_SKIP(s->current_picture.mb_type[xy-1-s->mb_stride])) + if (qp_tt || IS_SKIP(s->current_picture.f.mb_type[xy - 1 - s->mb_stride])) qp_dt= qp_tt; else - qp_dt= s->current_picture.qscale_table[xy-1-s->mb_stride]; + qp_dt = s->current_picture.f.qscale_table[xy - 1 - s->mb_stride]; if(qp_dt){ const int chroma_qp= s->chroma_qscale_table[qp_dt]; @@ -209,10 +209,10 @@ void ff_h263_loop_filter(MpegEncContext * s){ if(s->mb_x){ int qp_lc; - if(qp_c || IS_SKIP(s->current_picture.mb_type[xy-1])) + if (qp_c || IS_SKIP(s->current_picture.f.mb_type[xy - 1])) qp_lc= qp_c; else - qp_lc= s->current_picture.qscale_table[xy-1]; + qp_lc = s->current_picture.f.qscale_table[xy - 1]; if(qp_lc){ s->dsp.h263_h_loop_filter(dest_y, linesize, qp_lc); @@ -321,7 +321,7 @@ int16_t *h263_pred_motion(MpegEncContext * s, int block, int dir, static const int off[4]= {2, 1, 1, -1}; wrap = s->b8_stride; - mot_val = s->current_picture.motion_val[dir] + s->block_index[block]; + mot_val = s->current_picture.f.motion_val[dir] + s->block_index[block]; A = mot_val[ - 1]; /* special case for first (slice) line */ diff --git a/libavcodec/h263dec.c b/libavcodec/h263dec.c index ff312b0935..3b8b5804b1 100644 --- a/libavcodec/h263dec.c +++ b/libavcodec/h263dec.c @@ -269,7 +269,7 @@ static int decode_slice(MpegEncContext *s){ if( s->codec_id==CODEC_ID_MPEG4 && (s->workaround_bugs&FF_BUG_AUTODETECT) && get_bits_left(&s->gb) >=0 - && get_bits_left(&s->gb) < 48 + && get_bits_left(&s->gb) < 137 // && !s->resync_marker && !s->data_partitioning){ @@ -406,7 +406,7 @@ retry: /* We need to set current_picture_ptr before reading the header, * otherwise we cannot store anyting in there */ - if(s->current_picture_ptr==NULL || s->current_picture_ptr->data[0]){ + if (s->current_picture_ptr == NULL || s->current_picture_ptr->f.data[0]) { int i= ff_find_unused_picture(s, 0); s->current_picture_ptr= &s->picture[i]; } @@ -593,8 +593,8 @@ retry: s->gob_index = ff_h263_get_gob_height(s); // for skipping the frame - s->current_picture.pict_type= s->pict_type; - s->current_picture.key_frame= s->pict_type == AV_PICTURE_TYPE_I; + s->current_picture.f.pict_type = s->pict_type; + s->current_picture.f.key_frame = s->pict_type == AV_PICTURE_TYPE_I; /* skip B-frames if we don't have reference frames */ if(s->last_picture_ptr==NULL && (s->pict_type==AV_PICTURE_TYPE_B || s->dropable)) return get_consumed_bytes(s, buf_size); @@ -658,7 +658,7 @@ retry: s->mb_x=0; s->mb_y=0; - decode_slice(s); + ret = decode_slice(s); while(s->mb_y<s->mb_height){ if(s->msmpeg4_version){ if(s->slice_height==0 || s->mb_x!=0 || (s->mb_y%s->slice_height)!=0 || get_bits_count(&s->gb) > s->gb.size_in_bits) @@ -674,7 +674,7 @@ retry: if(s->msmpeg4_version<4 && s->h263_pred) ff_mpeg4_clean_buffers(s); - decode_slice(s); + if (decode_slice(s) < 0) ret = AVERROR_INVALIDDATA; } if (s->msmpeg4_version && s->msmpeg4_version<4 && s->pict_type==AV_PICTURE_TYPE_I) @@ -721,8 +721,8 @@ intrax8_decoded: MPV_frame_end(s); -assert(s->current_picture.pict_type == s->current_picture_ptr->pict_type); -assert(s->current_picture.pict_type == s->pict_type); + assert(s->current_picture.f.pict_type == s->current_picture_ptr->f.pict_type); + assert(s->current_picture.f.pict_type == s->pict_type); if (s->pict_type == AV_PICTURE_TYPE_B || s->low_delay) { *pict= *(AVFrame*)s->current_picture_ptr; } else if (s->last_picture_ptr != NULL) { @@ -738,19 +738,18 @@ assert(s->current_picture.pict_type == s->pict_type); av_log(avctx, AV_LOG_DEBUG, "%"PRId64"\n", rdtsc()-time); #endif - return get_consumed_bytes(s, buf_size); + return (ret && avctx->error_recognition >= FF_ER_EXPLODE)?ret:get_consumed_bytes(s, buf_size); } AVCodec ff_h263_decoder = { - "h263", - AVMEDIA_TYPE_VIDEO, - CODEC_ID_H263, - sizeof(MpegEncContext), - ff_h263_decode_init, - NULL, - ff_h263_decode_end, - ff_h263_decode_frame, - CODEC_CAP_DRAW_HORIZ_BAND | CODEC_CAP_DR1 | CODEC_CAP_TRUNCATED | CODEC_CAP_DELAY, + .name = "h263", + .type = AVMEDIA_TYPE_VIDEO, + .id = CODEC_ID_H263, + .priv_data_size = sizeof(MpegEncContext), + .init = ff_h263_decode_init, + .close = ff_h263_decode_end, + .decode = ff_h263_decode_frame, + .capabilities = CODEC_CAP_DRAW_HORIZ_BAND | CODEC_CAP_DR1 | CODEC_CAP_TRUNCATED | CODEC_CAP_DELAY, .flush= ff_mpeg_flush, .max_lowres= 3, .long_name= NULL_IF_CONFIG_SMALL("H.263 / H.263-1996, H.263+ / H.263-1998 / H.263 version 2"), diff --git a/libavcodec/h264.c b/libavcodec/h264.c index 419f7c5b84..39b2ab9d2c 100644 --- a/libavcodec/h264.c +++ b/libavcodec/h264.c @@ -60,15 +60,6 @@ static const enum PixelFormat hwaccel_pixfmt_list_h264_jpeg_420[] = { PIX_FMT_NONE }; -void ff_h264_write_back_intra_pred_mode(H264Context *h){ - int8_t *mode= h->intra4x4_pred_mode + h->mb2br_xy[h->mb_xy]; - - AV_COPY32(mode, h->intra4x4_pred_mode_cache + 4 + 8*4); - mode[4]= h->intra4x4_pred_mode_cache[7+8*3]; - mode[5]= h->intra4x4_pred_mode_cache[7+8*2]; - mode[6]= h->intra4x4_pred_mode_cache[7+8*1]; -} - /** * checks if the top & left blocks are available if needed & changes the dc mode so it only uses the available blocks. */ @@ -270,8 +261,8 @@ static inline void get_lowest_part_y(H264Context *h, int refs[2][48], int n, int // Error resilience puts the current picture in the ref list. // Don't try to wait on these as it will cause a deadlock. // Fields can wait on each other, though. - if(ref->thread_opaque != s->current_picture.thread_opaque || - (ref->reference&3) != s->picture_structure) { + if (ref->f.thread_opaque != s->current_picture.f.thread_opaque || + (ref->f.reference & 3) != s->picture_structure) { my = get_lowest_part_list_y(h, ref, n, height, y_offset, 0); if (refs[0][ref_n] < 0) nrefs[0] += 1; refs[0][ref_n] = FFMAX(refs[0][ref_n], my); @@ -282,8 +273,8 @@ static inline void get_lowest_part_y(H264Context *h, int refs[2][48], int n, int int ref_n = h->ref_cache[1][ scan8[n] ]; Picture *ref= &h->ref_list[1][ref_n]; - if(ref->thread_opaque != s->current_picture.thread_opaque || - (ref->reference&3) != s->picture_structure) { + if (ref->f.thread_opaque != s->current_picture.f.thread_opaque || + (ref->f.reference & 3) != s->picture_structure) { my = get_lowest_part_list_y(h, ref, n, height, y_offset, 1); if (refs[1][ref_n] < 0) nrefs[1] += 1; refs[1][ref_n] = FFMAX(refs[1][ref_n], my); @@ -299,7 +290,7 @@ static inline void get_lowest_part_y(H264Context *h, int refs[2][48], int n, int static void await_references(H264Context *h){ MpegEncContext * const s = &h->s; const int mb_xy= h->mb_xy; - const int mb_type= s->current_picture.mb_type[mb_xy]; + const int mb_type = s->current_picture.f.mb_type[mb_xy]; int refs[2][48]; int nrefs[2] = {0}; int ref, list; @@ -359,7 +350,7 @@ static void await_references(H264Context *h){ int row = refs[list][ref]; if(row >= 0){ Picture *ref_pic = &h->ref_list[list][ref]; - int ref_field = ref_pic->reference - 1; + int ref_field = ref_pic->f.reference - 1; int ref_field_picture = ref_pic->field_picture; int pic_height = 16*s->mb_height >> ref_field_picture; @@ -457,7 +448,7 @@ static inline void mc_dir_part(H264Context *h, Picture *pic, int n, int square, int my= h->mv_cache[list][ scan8[n] ][1] + src_y_offset*8; const int luma_xy= (mx&3) + ((my&3)<<2); int offset = ((mx>>2) << pixel_shift) + (my>>2)*h->mb_linesize; - uint8_t * src_y = pic->data[0] + offset; + uint8_t * src_y = pic->f.data[0] + offset; uint8_t * src_cb, * src_cr; int extra_width= h->emu_edge_width; int extra_height= h->emu_edge_height; @@ -487,7 +478,7 @@ static inline void mc_dir_part(H264Context *h, Picture *pic, int n, int square, if(CONFIG_GRAY && s->flags&CODEC_FLAG_GRAY) return; if(chroma444){ - src_cb = pic->data[1] + offset; + src_cb = pic->f.data[1] + offset; if(emu){ s->dsp.emulated_edge_mc(s->edge_emu_buffer, src_cb - (2 << pixel_shift) - 2*h->mb_linesize, h->mb_linesize, 16+5, 16+5/*FIXME*/, full_mx-2, full_my-2, pic_width, pic_height); @@ -498,7 +489,7 @@ static inline void mc_dir_part(H264Context *h, Picture *pic, int n, int square, qpix_op[luma_xy](dest_cb + delta, src_cb + delta, h->mb_linesize); } - src_cr = pic->data[2] + offset; + src_cr = pic->f.data[2] + offset; if(emu){ s->dsp.emulated_edge_mc(s->edge_emu_buffer, src_cr - (2 << pixel_shift) - 2*h->mb_linesize, h->mb_linesize, 16+5, 16+5/*FIXME*/, full_mx-2, full_my-2, pic_width, pic_height); @@ -513,11 +504,11 @@ static inline void mc_dir_part(H264Context *h, Picture *pic, int n, int square, if(MB_FIELD){ // chroma offset when predicting from a field of opposite parity - my += 2 * ((s->mb_y & 1) - (pic->reference - 1)); + my += 2 * ((s->mb_y & 1) - (pic->f.reference - 1)); emu |= (my>>3) < 0 || (my>>3) + 8 >= (pic_height>>1); } - src_cb= pic->data[1] + ((mx>>3) << pixel_shift) + (my>>3)*h->mb_uvlinesize; - src_cr= pic->data[2] + ((mx>>3) << pixel_shift) + (my>>3)*h->mb_uvlinesize; + src_cb = pic->f.data[1] + ((mx >> 3) << pixel_shift) + (my >> 3) * h->mb_uvlinesize; + src_cr = pic->f.data[2] + ((mx >> 3) << pixel_shift) + (my >> 3) * h->mb_uvlinesize; if(emu){ s->dsp.emulated_edge_mc(s->edge_emu_buffer, src_cb, h->mb_uvlinesize, 9, 9/*FIXME*/, (mx>>3), (my>>3), pic_width>>1, pic_height>>1); @@ -673,8 +664,8 @@ static inline void prefetch_motion(H264Context *h, int list, int pixel_shift, in if(refn >= 0){ const int mx= (h->mv_cache[list][scan8[0]][0]>>2) + 16*s->mb_x + 8; const int my= (h->mv_cache[list][scan8[0]][1]>>2) + 16*s->mb_y; - uint8_t **src= h->ref_list[list][refn].data; - int off= ((mx+64)<<h->pixel_shift) + (my + (s->mb_x&3)*4)*h->mb_linesize; + uint8_t **src = h->ref_list[list][refn].f.data; + int off= (mx << pixel_shift) + (my + (s->mb_x&3)*4)*h->mb_linesize + (64 << pixel_shift); s->dsp.prefetch(src[0]+off, s->linesize, 4); if(chroma444){ s->dsp.prefetch(src[1]+off, s->linesize, 4); @@ -693,7 +684,7 @@ static av_always_inline void hl_motion(H264Context *h, uint8_t *dest_y, uint8_t int pixel_shift, int chroma444){ MpegEncContext * const s = &h->s; const int mb_xy= h->mb_xy; - const int mb_type= s->current_picture.mb_type[mb_xy]; + const int mb_type = s->current_picture.f.mb_type[mb_xy]; assert(IS_INTER(mb_type)); @@ -787,24 +778,6 @@ static av_always_inline void hl_motion(H264Context *h, uint8_t *dest_y, uint8_t prefetch_motion(h, 1, pixel_shift, chroma444); } -#define hl_motion_fn(sh, bits) \ -static av_always_inline void hl_motion_ ## bits(H264Context *h, \ - uint8_t *dest_y, \ - uint8_t *dest_cb, uint8_t *dest_cr, \ - qpel_mc_func (*qpix_put)[16], \ - h264_chroma_mc_func (*chroma_put), \ - qpel_mc_func (*qpix_avg)[16], \ - h264_chroma_mc_func (*chroma_avg), \ - h264_weight_func *weight_op, \ - h264_biweight_func *weight_avg, \ - int chroma444) \ -{ \ - hl_motion(h, dest_y, dest_cb, dest_cr, qpix_put, chroma_put, \ - qpix_avg, chroma_avg, weight_op, weight_avg, sh, chroma444); \ -} -hl_motion_fn(0, 8); -hl_motion_fn(1, 16); - static void free_tables(H264Context *h, int free_rbsp){ int i; H264Context *hx; @@ -1247,7 +1220,7 @@ int ff_h264_frame_start(H264Context *h){ * Zero here; IDR markings per slice in frame or fields are ORed in later. * See decode_nal_units(). */ - s->current_picture_ptr->key_frame= 0; + s->current_picture_ptr->f.key_frame = 0; s->current_picture_ptr->mmco_reset= 0; assert(s->linesize && s->uvlinesize); @@ -1272,7 +1245,7 @@ int ff_h264_frame_start(H264Context *h){ /* some macroblocks can be accessed before they're available in case of lost slices, mbaff or threading*/ memset(h->slice_table, -1, (s->mb_height*s->mb_stride-1) * sizeof(*h->slice_table)); -// s->decode= (s->flags&CODEC_FLAG_PSNR) || !s->encoding || s->current_picture.reference /*|| h->contains_intra*/ || 1; +// s->decode = (s->flags & CODEC_FLAG_PSNR) || !s->encoding || s->current_picture.f.reference /*|| h->contains_intra*/ || 1; // We mark the current picture as non-reference after allocating it, so // that if we break out due to an error it can be released automatically @@ -1281,7 +1254,7 @@ int ff_h264_frame_start(H264Context *h){ // get released even with set reference, besides SVQ3 and others do not // mark frames as reference later "naturally". if(s->codec_id != CODEC_ID_SVQ3) - s->current_picture_ptr->reference= 0; + s->current_picture_ptr->f.reference = 0; s->current_picture_ptr->field_poc[0]= s->current_picture_ptr->field_poc[1]= INT_MAX; @@ -1307,8 +1280,8 @@ static void decode_postinit(H264Context *h, int setup_finished){ Picture *cur = s->current_picture_ptr; int i, pics, out_of_order, out_idx; - s->current_picture_ptr->qscale_type= FF_QSCALE_TYPE_H264; - s->current_picture_ptr->pict_type= s->pict_type; + s->current_picture_ptr->f.qscale_type = FF_QSCALE_TYPE_H264; + s->current_picture_ptr->f.pict_type = s->pict_type; if (h->next_output_pic) return; @@ -1321,8 +1294,8 @@ static void decode_postinit(H264Context *h, int setup_finished){ return; } - cur->interlaced_frame = 0; - cur->repeat_pict = 0; + cur->f.interlaced_frame = 0; + cur->f.repeat_pict = 0; /* Signal interlacing information externally. */ /* Prioritize picture timing SEI information over used decoding process if it exists. */ @@ -1334,53 +1307,53 @@ static void decode_postinit(H264Context *h, int setup_finished){ break; case SEI_PIC_STRUCT_TOP_FIELD: case SEI_PIC_STRUCT_BOTTOM_FIELD: - cur->interlaced_frame = 1; + cur->f.interlaced_frame = 1; break; case SEI_PIC_STRUCT_TOP_BOTTOM: case SEI_PIC_STRUCT_BOTTOM_TOP: if (FIELD_OR_MBAFF_PICTURE) - cur->interlaced_frame = 1; + cur->f.interlaced_frame = 1; else // try to flag soft telecine progressive - cur->interlaced_frame = h->prev_interlaced_frame; + cur->f.interlaced_frame = h->prev_interlaced_frame; break; case SEI_PIC_STRUCT_TOP_BOTTOM_TOP: case SEI_PIC_STRUCT_BOTTOM_TOP_BOTTOM: // Signal the possibility of telecined film externally (pic_struct 5,6) // From these hints, let the applications decide if they apply deinterlacing. - cur->repeat_pict = 1; + cur->f.repeat_pict = 1; break; case SEI_PIC_STRUCT_FRAME_DOUBLING: // Force progressive here, as doubling interlaced frame is a bad idea. - cur->repeat_pict = 2; + cur->f.repeat_pict = 2; break; case SEI_PIC_STRUCT_FRAME_TRIPLING: - cur->repeat_pict = 4; + cur->f.repeat_pict = 4; break; } if ((h->sei_ct_type & 3) && h->sei_pic_struct <= SEI_PIC_STRUCT_BOTTOM_TOP) - cur->interlaced_frame = (h->sei_ct_type & (1<<1)) != 0; + cur->f.interlaced_frame = (h->sei_ct_type & (1 << 1)) != 0; }else{ /* Derive interlacing flag from used decoding process. */ - cur->interlaced_frame = FIELD_OR_MBAFF_PICTURE; + cur->f.interlaced_frame = FIELD_OR_MBAFF_PICTURE; } - h->prev_interlaced_frame = cur->interlaced_frame; + h->prev_interlaced_frame = cur->f.interlaced_frame; if (cur->field_poc[0] != cur->field_poc[1]){ /* Derive top_field_first from field pocs. */ - cur->top_field_first = cur->field_poc[0] < cur->field_poc[1]; + cur->f.top_field_first = cur->field_poc[0] < cur->field_poc[1]; }else{ - if(cur->interlaced_frame || h->sps.pic_struct_present_flag){ + if (cur->f.interlaced_frame || h->sps.pic_struct_present_flag) { /* Use picture timing SEI information. Even if it is a information of a past frame, better than nothing. */ if(h->sei_pic_struct == SEI_PIC_STRUCT_TOP_BOTTOM || h->sei_pic_struct == SEI_PIC_STRUCT_TOP_BOTTOM_TOP) - cur->top_field_first = 1; + cur->f.top_field_first = 1; else - cur->top_field_first = 0; + cur->f.top_field_first = 0; }else{ /* Most likely progressive */ - cur->top_field_first = 0; + cur->f.top_field_first = 0; } } @@ -1406,17 +1379,17 @@ static void decode_postinit(H264Context *h, int setup_finished){ assert(pics <= MAX_DELAYED_PIC_COUNT); h->delayed_pic[pics++] = cur; - if(cur->reference == 0) - cur->reference = DELAYED_PIC_REF; + if (cur->f.reference == 0) + cur->f.reference = DELAYED_PIC_REF; out = h->delayed_pic[0]; out_idx = 0; - for(i=1; h->delayed_pic[i] && !h->delayed_pic[i]->key_frame && !h->delayed_pic[i]->mmco_reset; i++) + for (i = 1; h->delayed_pic[i] && !h->delayed_pic[i]->f.key_frame && !h->delayed_pic[i]->mmco_reset; i++) if(h->delayed_pic[i]->poc < out->poc){ out = h->delayed_pic[i]; out_idx = i; } - if(s->avctx->has_b_frames == 0 && (h->delayed_pic[0]->key_frame || h->delayed_pic[0]->mmco_reset)) + if (s->avctx->has_b_frames == 0 && (h->delayed_pic[0]->f.key_frame || h->delayed_pic[0]->mmco_reset)) h->next_outputed_poc= INT_MIN; out_of_order = out->poc < h->next_outputed_poc; @@ -1425,14 +1398,14 @@ static void decode_postinit(H264Context *h, int setup_finished){ else if((out_of_order && pics-1 == s->avctx->has_b_frames && s->avctx->has_b_frames < MAX_DELAYED_PIC_COUNT) || (s->low_delay && ((h->next_outputed_poc != INT_MIN && out->poc > h->next_outputed_poc + 2) - || cur->pict_type == AV_PICTURE_TYPE_B))) + || cur->f.pict_type == AV_PICTURE_TYPE_B))) { s->low_delay = 0; s->avctx->has_b_frames++; } if(out_of_order || pics > s->avctx->has_b_frames){ - out->reference &= ~DELAYED_PIC_REF; + out->f.reference &= ~DELAYED_PIC_REF; out->owner2 = s; // for frame threading, the owner must be the second field's thread // or else the first thread can release the picture and reuse it unsafely for(i=out_idx; h->delayed_pic[i]; i++) @@ -1440,7 +1413,7 @@ static void decode_postinit(H264Context *h, int setup_finished){ } if(!out_of_order && pics > s->avctx->has_b_frames){ h->next_output_pic = out; - if(out_idx==0 && h->delayed_pic[0] && (h->delayed_pic[0]->key_frame || h->delayed_pic[0]->mmco_reset)) { + if (out_idx == 0 && h->delayed_pic[0] && (h->delayed_pic[0]->f.key_frame || h->delayed_pic[0]->mmco_reset)) { h->next_outputed_poc = INT_MIN; } else h->next_outputed_poc = out->poc; @@ -1452,7 +1425,7 @@ static void decode_postinit(H264Context *h, int setup_finished){ ff_thread_finish_setup(s->avctx); } -static inline void backup_mb_border(H264Context *h, uint8_t *src_y, uint8_t *src_cb, uint8_t *src_cr, int linesize, int uvlinesize, int chroma444, int simple){ +static av_always_inline void backup_mb_border(H264Context *h, uint8_t *src_y, uint8_t *src_cb, uint8_t *src_cr, int linesize, int uvlinesize, int chroma444, int simple){ MpegEncContext * const s = &h->s; uint8_t *top_border; int top_idx = 1; @@ -1527,7 +1500,7 @@ static inline void backup_mb_border(H264Context *h, uint8_t *src_y, uint8_t *src } } -static inline void xchg_mb_border(H264Context *h, uint8_t *src_y, +static av_always_inline void xchg_mb_border(H264Context *h, uint8_t *src_y, uint8_t *src_cb, uint8_t *src_cr, int linesize, int uvlinesize, int xchg, int chroma444, @@ -1682,7 +1655,7 @@ static av_always_inline void hl_decode_mb_predict_luma(H264Context *h, int mb_ty uint64_t tr_high; if(dir == DIAG_DOWN_LEFT_PRED || dir == VERT_LEFT_PRED){ const int topright_avail= (h->topright_samples_available<<i)&0x8000; - assert(mb_y || linesize <= block_offset[i]); + assert(s->mb_y || linesize <= block_offset[i]); if(!topright_avail){ if (pixel_shift) { tr_high= ((uint16_t*)ptr)[3 - linesize/2]*0x0001000100010001ULL; @@ -1784,7 +1757,7 @@ static av_always_inline void hl_decode_mb_internal(H264Context *h, int simple, i const int mb_x= s->mb_x; const int mb_y= s->mb_y; const int mb_xy= h->mb_xy; - const int mb_type= s->current_picture.mb_type[mb_xy]; + const int mb_type = s->current_picture.f.mb_type[mb_xy]; uint8_t *dest_y, *dest_cb, *dest_cr; int linesize, uvlinesize /*dct_offset*/; int i, j; @@ -1794,9 +1767,9 @@ static av_always_inline void hl_decode_mb_internal(H264Context *h, int simple, i const int is_h264 = !CONFIG_SVQ3_DECODER || simple || s->codec_id == CODEC_ID_H264; void (*idct_add)(uint8_t *dst, DCTELEM *block, int stride); - dest_y = s->current_picture.data[0] + ((mb_x << pixel_shift) + mb_y * s->linesize ) * 16; - dest_cb = s->current_picture.data[1] + ((mb_x << pixel_shift) + mb_y * s->uvlinesize) * 8; - dest_cr = s->current_picture.data[2] + ((mb_x << pixel_shift) + mb_y * s->uvlinesize) * 8; + dest_y = s->current_picture.f.data[0] + ((mb_x << pixel_shift) + mb_y * s->linesize ) * 16; + dest_cb = s->current_picture.f.data[1] + ((mb_x << pixel_shift) + mb_y * s->uvlinesize) * 8; + dest_cr = s->current_picture.f.data[2] + ((mb_x << pixel_shift) + mb_y * s->uvlinesize) * 8; s->dsp.prefetch(dest_y + (s->mb_x&3)*4*s->linesize + (64 << pixel_shift), s->linesize, 4); s->dsp.prefetch(dest_cb + (s->mb_x&7)*s->uvlinesize + (64 << pixel_shift), dest_cr - dest_cb, 2); @@ -1836,8 +1809,8 @@ static av_always_inline void hl_decode_mb_internal(H264Context *h, int simple, i } if (!simple && IS_INTRA_PCM(mb_type)) { + const int bit_depth = h->sps.bit_depth_luma; if (pixel_shift) { - const int bit_depth = h->sps.bit_depth_luma; int j; GetBitContext gb; init_get_bits(&gb, (uint8_t*)h->mb, 384*bit_depth); @@ -1848,6 +1821,15 @@ static av_always_inline void hl_decode_mb_internal(H264Context *h, int simple, i tmp_y[j] = get_bits(&gb, bit_depth); } if(simple || !CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){ + if (!h->sps.chroma_format_idc) { + for (i = 0; i < 8; i++) { + uint16_t *tmp_cb = (uint16_t*)(dest_cb + i*uvlinesize); + uint16_t *tmp_cr = (uint16_t*)(dest_cr + i*uvlinesize); + for (j = 0; j < 8; j++) { + tmp_cb[j] = tmp_cr[j] = 1 << (bit_depth - 1); + } + } + } else { for (i = 0; i < 8; i++) { uint16_t *tmp_cb = (uint16_t*)(dest_cb + i*uvlinesize); for (j = 0; j < 8; j++) @@ -1858,16 +1840,24 @@ static av_always_inline void hl_decode_mb_internal(H264Context *h, int simple, i for (j = 0; j < 8; j++) tmp_cr[j] = get_bits(&gb, bit_depth); } + } } } else { for (i=0; i<16; i++) { memcpy(dest_y + i* linesize, h->mb + i*8, 16); } if(simple || !CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){ + if (!h->sps.chroma_format_idc) { + for (i=0; i<8; i++) { + memset(dest_cb+ i*uvlinesize, 1 << (bit_depth - 1), 8); + memset(dest_cr+ i*uvlinesize, 1 << (bit_depth - 1), 8); + } + } else { for (i=0; i<8; i++) { memcpy(dest_cb+ i*uvlinesize, h->mb + 128 + i*4, 8); memcpy(dest_cr+ i*uvlinesize, h->mb + 160 + i*4, 8); } + } } } } else { @@ -1885,18 +1875,11 @@ static av_always_inline void hl_decode_mb_internal(H264Context *h, int simple, i if(h->deblocking_filter) xchg_mb_border(h, dest_y, dest_cb, dest_cr, linesize, uvlinesize, 0, 0, simple, pixel_shift); }else if(is_h264){ - if (pixel_shift) { - hl_motion_16(h, dest_y, dest_cb, dest_cr, - s->me.qpel_put, s->dsp.put_h264_chroma_pixels_tab, - s->me.qpel_avg, s->dsp.avg_h264_chroma_pixels_tab, - h->h264dsp.weight_h264_pixels_tab, - h->h264dsp.biweight_h264_pixels_tab, 0); - } else - hl_motion_8(h, dest_y, dest_cb, dest_cr, - s->me.qpel_put, s->dsp.put_h264_chroma_pixels_tab, - s->me.qpel_avg, s->dsp.avg_h264_chroma_pixels_tab, - h->h264dsp.weight_h264_pixels_tab, - h->h264dsp.biweight_h264_pixels_tab, 0); + hl_motion(h, dest_y, dest_cb, dest_cr, + s->me.qpel_put, s->dsp.put_h264_chroma_pixels_tab, + s->me.qpel_avg, s->dsp.avg_h264_chroma_pixels_tab, + h->h264dsp.weight_h264_pixels_tab, + h->h264dsp.biweight_h264_pixels_tab, pixel_shift, 0); } hl_decode_mb_idct_luma(h, mb_type, is_h264, simple, transform_bypass, pixel_shift, block_offset, linesize, dest_y, 0); @@ -1955,7 +1938,7 @@ static av_always_inline void hl_decode_mb_444_internal(H264Context *h, int simpl const int mb_x= s->mb_x; const int mb_y= s->mb_y; const int mb_xy= h->mb_xy; - const int mb_type= s->current_picture.mb_type[mb_xy]; + const int mb_type = s->current_picture.f.mb_type[mb_xy]; uint8_t *dest[3]; int linesize; int i, j, p; @@ -1965,7 +1948,7 @@ static av_always_inline void hl_decode_mb_444_internal(H264Context *h, int simpl for (p = 0; p < plane_count; p++) { - dest[p] = s->current_picture.data[p] + ((mb_x << pixel_shift) + mb_y * s->linesize) * 16; + dest[p] = s->current_picture.f.data[p] + ((mb_x << pixel_shift) + mb_y * s->linesize) * 16; s->dsp.prefetch(dest[p] + (s->mb_x&3)*4*s->linesize + (64 << pixel_shift), s->linesize, 4); } @@ -2029,18 +2012,11 @@ static av_always_inline void hl_decode_mb_444_internal(H264Context *h, int simpl if(h->deblocking_filter) xchg_mb_border(h, dest[0], dest[1], dest[2], linesize, linesize, 0, 1, simple, pixel_shift); }else{ - if (pixel_shift) { - hl_motion_16(h, dest[0], dest[1], dest[2], - s->me.qpel_put, s->dsp.put_h264_chroma_pixels_tab, - s->me.qpel_avg, s->dsp.avg_h264_chroma_pixels_tab, - h->h264dsp.weight_h264_pixels_tab, - h->h264dsp.biweight_h264_pixels_tab, 1); - } else - hl_motion_8(h, dest[0], dest[1], dest[2], - s->me.qpel_put, s->dsp.put_h264_chroma_pixels_tab, - s->me.qpel_avg, s->dsp.avg_h264_chroma_pixels_tab, - h->h264dsp.weight_h264_pixels_tab, - h->h264dsp.biweight_h264_pixels_tab, 1); + hl_motion(h, dest[0], dest[1], dest[2], + s->me.qpel_put, s->dsp.put_h264_chroma_pixels_tab, + s->me.qpel_avg, s->dsp.avg_h264_chroma_pixels_tab, + h->h264dsp.weight_h264_pixels_tab, + h->h264dsp.biweight_h264_pixels_tab, pixel_shift, 1); } for (p = 0; p < plane_count; p++) @@ -2081,7 +2057,7 @@ static void av_noinline hl_decode_mb_444_simple(H264Context *h){ void ff_h264_hl_decode_mb(H264Context *h){ MpegEncContext * const s = &h->s; const int mb_xy= h->mb_xy; - const int mb_type= s->current_picture.mb_type[mb_xy]; + const int mb_type = s->current_picture.f.mb_type[mb_xy]; int is_complex = CONFIG_SMALL || h->is_complex || IS_INTRA_PCM(mb_type) || s->qscale == 0; if (CHROMA444) { @@ -2198,9 +2174,10 @@ static void implicit_weight_table(H264Context *h, int field){ for(ref0=ref_start; ref0 < ref_count0; ref0++){ int poc0 = h->ref_list[0][ref0].poc; for(ref1=ref_start; ref1 < ref_count1; ref1++){ + int w= 32; + if (!h->ref_list[0][ref0].long_ref && !h->ref_list[1][ref1].long_ref){ int poc1 = h->ref_list[1][ref1].poc; int td = av_clip(poc1 - poc0, -128, 127); - int w= 32; if(td){ int tb = av_clip(cur_poc - poc0, -128, 127); int tx = (16384 + (FFABS(td) >> 1)) / td; @@ -2208,6 +2185,7 @@ static void implicit_weight_table(H264Context *h, int field){ if(dist_scale_factor >= -64 && dist_scale_factor <= 128) w = 64 - dist_scale_factor; } + } if(field<0){ h->implicit_weight[ref0][ref1][0]= h->implicit_weight[ref0][ref1][1]= w; @@ -2235,14 +2213,14 @@ static void flush_dpb(AVCodecContext *avctx){ int i; for(i=0; i<MAX_DELAYED_PIC_COUNT; i++) { if(h->delayed_pic[i]) - h->delayed_pic[i]->reference= 0; + h->delayed_pic[i]->f.reference = 0; h->delayed_pic[i]= NULL; } h->outputed_poc=h->next_outputed_poc= INT_MIN; h->prev_interlaced_frame = 1; idr(h); if(h->s.current_picture_ptr) - h->s.current_picture_ptr->reference= 0; + h->s.current_picture_ptr->f.reference = 0; h->s.first_field= 0; ff_h264_reset_sei(h); ff_mpeg_flush(avctx); @@ -2488,7 +2466,8 @@ static int decode_slice_header(H264Context *h, H264Context *h0){ s->dropable= h->nal_ref_idc == 0; - if((s->avctx->flags2 & CODEC_FLAG2_FAST) && !h->nal_ref_idc){ + /* FIXME: 2tap qpel isn't implemented for high bit depth. */ + if((s->avctx->flags2 & CODEC_FLAG2_FAST) && !h->nal_ref_idc && !h->pixel_shift){ s->me.qpel_put= s->dsp.put_2tap_qpel_pixels_tab; s->me.qpel_avg= s->dsp.avg_2tap_qpel_pixels_tab; }else{ @@ -2720,8 +2699,8 @@ static int decode_slice_header(H264Context *h, H264Context *h0){ * be fixed. */ if (h->short_ref_count) { if (prev) { - av_image_copy(h->short_ref[0]->data, h->short_ref[0]->linesize, - (const uint8_t**)prev->data, prev->linesize, + av_image_copy(h->short_ref[0]->f.data, h->short_ref[0]->f.linesize, + (const uint8_t**)prev->f.data, prev->f.linesize, s->avctx->pix_fmt, s->mb_width*16, s->mb_height*16); h->short_ref[0]->poc = prev->poc+2; } @@ -2732,7 +2711,7 @@ static int decode_slice_header(H264Context *h, H264Context *h0){ /* See if we have a decoded first field looking for a pair... */ if (s0->first_field) { assert(s0->current_picture_ptr); - assert(s0->current_picture_ptr->data[0]); + assert(s0->current_picture_ptr->f.data[0]); assert(s0->current_picture_ptr->reference != DELAYED_PIC_REF); /* figure out if we have a complementary field pair */ @@ -2746,7 +2725,7 @@ static int decode_slice_header(H264Context *h, H264Context *h0){ } else { if (h->nal_ref_idc && - s0->current_picture_ptr->reference && + s0->current_picture_ptr->f.reference && s0->current_picture_ptr->frame_num != h->frame_num) { /* * This and previous field were reference, but had @@ -2974,7 +2953,9 @@ static int decode_slice_header(H264Context *h, H264Context *h0){ } } } - h->qp_thresh= 15 + 52 - FFMIN(h->slice_alpha_c0_offset, h->slice_beta_offset) - FFMAX3(0, h->pps.chroma_qp_index_offset[0], h->pps.chroma_qp_index_offset[1]); + h->qp_thresh = 15 + 52 - FFMIN(h->slice_alpha_c0_offset, h->slice_beta_offset) + - FFMAX3(0, h->pps.chroma_qp_index_offset[0], h->pps.chroma_qp_index_offset[1]) + + 6 * (h->sps.bit_depth_luma - 8); #if 0 //FMO if( h->pps.num_slice_groups > 1 && h->pps.mb_slice_group_map_type >= 3 && h->pps.mb_slice_group_map_type <= 5) @@ -2984,7 +2965,7 @@ static int decode_slice_header(H264Context *h, H264Context *h0){ h0->last_slice_type = slice_type; h->slice_num = ++h0->current_slice; if(h->slice_num >= MAX_SLICES){ - av_log(s->avctx, AV_LOG_ERROR, "Too many slices (%d >= %d), increase MAX_SLICES and recompile\n", h->slice_num, MAX_SLICES); + av_log(s->avctx, AV_LOG_WARNING, "Possibly too many slices (%d >= %d), increase MAX_SLICES and recompile if there are artifacts\n", h->slice_num, MAX_SLICES); } for(j=0; j<2; j++){ @@ -2992,16 +2973,16 @@ static int decode_slice_header(H264Context *h, H264Context *h0){ int *ref2frm= h->ref2frm[h->slice_num&(MAX_SLICES-1)][j]; for(i=0; i<16; i++){ id_list[i]= 60; - if(h->ref_list[j][i].data[0]){ + if (h->ref_list[j][i].f.data[0]) { int k; - uint8_t *base= h->ref_list[j][i].base[0]; + uint8_t *base = h->ref_list[j][i].f.base[0]; for(k=0; k<h->short_ref_count; k++) - if(h->short_ref[k]->base[0] == base){ + if (h->short_ref[k]->f.base[0] == base) { id_list[i]= k; break; } for(k=0; k<h->long_ref_count; k++) - if(h->long_ref[k] && h->long_ref[k]->base[0] == base){ + if (h->long_ref[k] && h->long_ref[k]->f.base[0] == base) { id_list[i]= h->short_ref_count + k; break; } @@ -3012,12 +2993,12 @@ static int decode_slice_header(H264Context *h, H264Context *h0){ ref2frm[1]= -1; for(i=0; i<16; i++) ref2frm[i+2]= 4*id_list[i] - +(h->ref_list[j][i].reference&3); + + (h->ref_list[j][i].f.reference & 3); ref2frm[18+0]= ref2frm[18+1]= -1; for(i=16; i<48; i++) ref2frm[i+4]= 4*id_list[(i-16)>>1] - +(h->ref_list[j][i].reference&3); + + (h->ref_list[j][i].f.reference & 3); } //FIXME: fix draw_edges+PAFF+frame threads @@ -3056,6 +3037,82 @@ int ff_h264_get_slice_type(const H264Context *h) } } +static av_always_inline void fill_filter_caches_inter(H264Context *h, MpegEncContext * const s, int mb_type, int top_xy, + int left_xy[LEFT_MBS], int top_type, int left_type[LEFT_MBS], int mb_xy, int list) +{ + int b_stride = h->b_stride; + int16_t (*mv_dst)[2] = &h->mv_cache[list][scan8[0]]; + int8_t *ref_cache = &h->ref_cache[list][scan8[0]]; + if(IS_INTER(mb_type) || IS_DIRECT(mb_type)){ + if(USES_LIST(top_type, list)){ + const int b_xy= h->mb2b_xy[top_xy] + 3*b_stride; + const int b8_xy= 4*top_xy + 2; + int (*ref2frm)[64] = h->ref2frm[ h->slice_table[top_xy]&(MAX_SLICES-1) ][0] + (MB_MBAFF ? 20 : 2); + AV_COPY128(mv_dst - 1*8, s->current_picture.f.motion_val[list][b_xy + 0]); + ref_cache[0 - 1*8]= + ref_cache[1 - 1*8]= ref2frm[list][s->current_picture.f.ref_index[list][b8_xy + 0]]; + ref_cache[2 - 1*8]= + ref_cache[3 - 1*8]= ref2frm[list][s->current_picture.f.ref_index[list][b8_xy + 1]]; + }else{ + AV_ZERO128(mv_dst - 1*8); + AV_WN32A(&ref_cache[0 - 1*8], ((LIST_NOT_USED)&0xFF)*0x01010101u); + } + + if(!IS_INTERLACED(mb_type^left_type[LTOP])){ + if(USES_LIST(left_type[LTOP], list)){ + const int b_xy= h->mb2b_xy[left_xy[LTOP]] + 3; + const int b8_xy= 4*left_xy[LTOP] + 1; + int (*ref2frm)[64] = h->ref2frm[ h->slice_table[left_xy[LTOP]]&(MAX_SLICES-1) ][0] + (MB_MBAFF ? 20 : 2); + AV_COPY32(mv_dst - 1 + 0, s->current_picture.f.motion_val[list][b_xy + b_stride*0]); + AV_COPY32(mv_dst - 1 + 8, s->current_picture.f.motion_val[list][b_xy + b_stride*1]); + AV_COPY32(mv_dst - 1 + 16, s->current_picture.f.motion_val[list][b_xy + b_stride*2]); + AV_COPY32(mv_dst - 1 + 24, s->current_picture.f.motion_val[list][b_xy + b_stride*3]); + ref_cache[-1 + 0]= + ref_cache[-1 + 8]= ref2frm[list][s->current_picture.f.ref_index[list][b8_xy + 2*0]]; + ref_cache[-1 + 16]= + ref_cache[-1 + 24]= ref2frm[list][s->current_picture.f.ref_index[list][b8_xy + 2*1]]; + }else{ + AV_ZERO32(mv_dst - 1 + 0); + AV_ZERO32(mv_dst - 1 + 8); + AV_ZERO32(mv_dst - 1 +16); + AV_ZERO32(mv_dst - 1 +24); + ref_cache[-1 + 0]= + ref_cache[-1 + 8]= + ref_cache[-1 + 16]= + ref_cache[-1 + 24]= LIST_NOT_USED; + } + } + } + + if(!USES_LIST(mb_type, list)){ + fill_rectangle(mv_dst, 4, 4, 8, pack16to32(0,0), 4); + AV_WN32A(&ref_cache[0*8], ((LIST_NOT_USED)&0xFF)*0x01010101u); + AV_WN32A(&ref_cache[1*8], ((LIST_NOT_USED)&0xFF)*0x01010101u); + AV_WN32A(&ref_cache[2*8], ((LIST_NOT_USED)&0xFF)*0x01010101u); + AV_WN32A(&ref_cache[3*8], ((LIST_NOT_USED)&0xFF)*0x01010101u); + return; + } + + { + int8_t *ref = &s->current_picture.f.ref_index[list][4*mb_xy]; + int (*ref2frm)[64] = h->ref2frm[ h->slice_num&(MAX_SLICES-1) ][0] + (MB_MBAFF ? 20 : 2); + uint32_t ref01 = (pack16to32(ref2frm[list][ref[0]],ref2frm[list][ref[1]])&0x00FF00FF)*0x0101; + uint32_t ref23 = (pack16to32(ref2frm[list][ref[2]],ref2frm[list][ref[3]])&0x00FF00FF)*0x0101; + AV_WN32A(&ref_cache[0*8], ref01); + AV_WN32A(&ref_cache[1*8], ref01); + AV_WN32A(&ref_cache[2*8], ref23); + AV_WN32A(&ref_cache[3*8], ref23); + } + + { + int16_t (*mv_src)[2] = &s->current_picture.f.motion_val[list][4*s->mb_x + 4*s->mb_y*b_stride]; + AV_COPY128(mv_dst + 8*0, mv_src + 0*b_stride); + AV_COPY128(mv_dst + 8*1, mv_src + 1*b_stride); + AV_COPY128(mv_dst + 8*2, mv_src + 2*b_stride); + AV_COPY128(mv_dst + 8*3, mv_src + 3*b_stride); + } +} + /** * * @return non zero if the loop filter can be skiped @@ -3063,208 +3120,124 @@ int ff_h264_get_slice_type(const H264Context *h) static int fill_filter_caches(H264Context *h, int mb_type){ MpegEncContext * const s = &h->s; const int mb_xy= h->mb_xy; - int top_xy, left_xy[2]; - int top_type, left_type[2]; + int top_xy, left_xy[LEFT_MBS]; + int top_type, left_type[LEFT_MBS]; + uint8_t *nnz; + uint8_t *nnz_cache; top_xy = mb_xy - (s->mb_stride << MB_FIELD); - //FIXME deblocking could skip the intra and nnz parts. - /* Wow, what a mess, why didn't they simplify the interlacing & intra * stuff, I can't imagine that these complex rules are worth it. */ - left_xy[1] = left_xy[0] = mb_xy-1; + left_xy[LBOT] = left_xy[LTOP] = mb_xy-1; if(FRAME_MBAFF){ - const int left_mb_field_flag = IS_INTERLACED(s->current_picture.mb_type[mb_xy-1]); + const int left_mb_field_flag = IS_INTERLACED(s->current_picture.f.mb_type[mb_xy - 1]); const int curr_mb_field_flag = IS_INTERLACED(mb_type); if(s->mb_y&1){ if (left_mb_field_flag != curr_mb_field_flag) { - left_xy[0] -= s->mb_stride; + left_xy[LTOP] -= s->mb_stride; } }else{ if(curr_mb_field_flag){ - top_xy += s->mb_stride & (((s->current_picture.mb_type[top_xy ]>>7)&1)-1); + top_xy += s->mb_stride & (((s->current_picture.f.mb_type[top_xy] >> 7) & 1) - 1); } if (left_mb_field_flag != curr_mb_field_flag) { - left_xy[1] += s->mb_stride; + left_xy[LBOT] += s->mb_stride; } } } h->top_mb_xy = top_xy; - h->left_mb_xy[0] = left_xy[0]; - h->left_mb_xy[1] = left_xy[1]; + h->left_mb_xy[LTOP] = left_xy[LTOP]; + h->left_mb_xy[LBOT] = left_xy[LBOT]; { //for sufficiently low qp, filtering wouldn't do anything //this is a conservative estimate: could also check beta_offset and more accurate chroma_qp int qp_thresh = h->qp_thresh; //FIXME strictly we should store qp_thresh for each mb of a slice - int qp = s->current_picture.qscale_table[mb_xy]; + int qp = s->current_picture.f.qscale_table[mb_xy]; if(qp <= qp_thresh - && (left_xy[0]<0 || ((qp + s->current_picture.qscale_table[left_xy[0]] + 1)>>1) <= qp_thresh) - && (top_xy < 0 || ((qp + s->current_picture.qscale_table[top_xy ] + 1)>>1) <= qp_thresh)){ + && (left_xy[LTOP] < 0 || ((qp + s->current_picture.f.qscale_table[left_xy[LTOP]] + 1) >> 1) <= qp_thresh) + && (top_xy < 0 || ((qp + s->current_picture.f.qscale_table[top_xy ] + 1) >> 1) <= qp_thresh)) { if(!FRAME_MBAFF) return 1; - if( (left_xy[0]< 0 || ((qp + s->current_picture.qscale_table[left_xy[1] ] + 1)>>1) <= qp_thresh) - && (top_xy < s->mb_stride || ((qp + s->current_picture.qscale_table[top_xy -s->mb_stride] + 1)>>1) <= qp_thresh)) + if ((left_xy[LTOP] < 0 || ((qp + s->current_picture.f.qscale_table[left_xy[LBOT] ] + 1) >> 1) <= qp_thresh) && + (top_xy < s->mb_stride || ((qp + s->current_picture.f.qscale_table[top_xy - s->mb_stride] + 1) >> 1) <= qp_thresh)) return 1; } } - top_type = s->current_picture.mb_type[top_xy] ; - left_type[0] = s->current_picture.mb_type[left_xy[0]]; - left_type[1] = s->current_picture.mb_type[left_xy[1]]; + top_type = s->current_picture.f.mb_type[top_xy]; + left_type[LTOP] = s->current_picture.f.mb_type[left_xy[LTOP]]; + left_type[LBOT] = s->current_picture.f.mb_type[left_xy[LBOT]]; if(h->deblocking_filter == 2){ - if(h->slice_table[top_xy ] != h->slice_num) top_type= 0; - if(h->slice_table[left_xy[0] ] != h->slice_num) left_type[0]= left_type[1]= 0; + if(h->slice_table[top_xy ] != h->slice_num) top_type= 0; + if(h->slice_table[left_xy[LBOT]] != h->slice_num) left_type[LTOP]= left_type[LBOT]= 0; }else{ - if(h->slice_table[top_xy ] == 0xFFFF) top_type= 0; - if(h->slice_table[left_xy[0] ] == 0xFFFF) left_type[0]= left_type[1] =0; + if(h->slice_table[top_xy ] == 0xFFFF) top_type= 0; + if(h->slice_table[left_xy[LBOT]] == 0xFFFF) left_type[LTOP]= left_type[LBOT] =0; } - h->top_type = top_type ; - h->left_type[0]= left_type[0]; - h->left_type[1]= left_type[1]; + h->top_type = top_type; + h->left_type[LTOP]= left_type[LTOP]; + h->left_type[LBOT]= left_type[LBOT]; if(IS_INTRA(mb_type)) return 0; - AV_COPY32(&h->non_zero_count_cache[4+8* 1], &h->non_zero_count[mb_xy][ 0]); - AV_COPY32(&h->non_zero_count_cache[4+8* 2], &h->non_zero_count[mb_xy][ 4]); - AV_COPY32(&h->non_zero_count_cache[4+8* 3], &h->non_zero_count[mb_xy][ 8]); - AV_COPY32(&h->non_zero_count_cache[4+8* 4], &h->non_zero_count[mb_xy][12]); + fill_filter_caches_inter(h, s, mb_type, top_xy, left_xy, top_type, left_type, mb_xy, 0); + if(h->list_count == 2) + fill_filter_caches_inter(h, s, mb_type, top_xy, left_xy, top_type, left_type, mb_xy, 1); + nnz = h->non_zero_count[mb_xy]; + nnz_cache = h->non_zero_count_cache; + AV_COPY32(&nnz_cache[4+8*1], &nnz[ 0]); + AV_COPY32(&nnz_cache[4+8*2], &nnz[ 4]); + AV_COPY32(&nnz_cache[4+8*3], &nnz[ 8]); + AV_COPY32(&nnz_cache[4+8*4], &nnz[12]); h->cbp= h->cbp_table[mb_xy]; - { - int list; - for(list=0; list<h->list_count; list++){ - int8_t *ref; - int y, b_stride; - int16_t (*mv_dst)[2]; - int16_t (*mv_src)[2]; - - if(!USES_LIST(mb_type, list)){ - fill_rectangle( h->mv_cache[list][scan8[0]], 4, 4, 8, pack16to32(0,0), 4); - AV_WN32A(&h->ref_cache[list][scan8[ 0]], ((LIST_NOT_USED)&0xFF)*0x01010101u); - AV_WN32A(&h->ref_cache[list][scan8[ 2]], ((LIST_NOT_USED)&0xFF)*0x01010101u); - AV_WN32A(&h->ref_cache[list][scan8[ 8]], ((LIST_NOT_USED)&0xFF)*0x01010101u); - AV_WN32A(&h->ref_cache[list][scan8[10]], ((LIST_NOT_USED)&0xFF)*0x01010101u); - continue; - } - - ref = &s->current_picture.ref_index[list][4*mb_xy]; - { - int (*ref2frm)[64] = h->ref2frm[ h->slice_num&(MAX_SLICES-1) ][0] + (MB_MBAFF ? 20 : 2); - AV_WN32A(&h->ref_cache[list][scan8[ 0]], (pack16to32(ref2frm[list][ref[0]],ref2frm[list][ref[1]])&0x00FF00FF)*0x0101); - AV_WN32A(&h->ref_cache[list][scan8[ 2]], (pack16to32(ref2frm[list][ref[0]],ref2frm[list][ref[1]])&0x00FF00FF)*0x0101); - ref += 2; - AV_WN32A(&h->ref_cache[list][scan8[ 8]], (pack16to32(ref2frm[list][ref[0]],ref2frm[list][ref[1]])&0x00FF00FF)*0x0101); - AV_WN32A(&h->ref_cache[list][scan8[10]], (pack16to32(ref2frm[list][ref[0]],ref2frm[list][ref[1]])&0x00FF00FF)*0x0101); - } - - b_stride = h->b_stride; - mv_dst = &h->mv_cache[list][scan8[0]]; - mv_src = &s->current_picture.motion_val[list][4*s->mb_x + 4*s->mb_y*b_stride]; - for(y=0; y<4; y++){ - AV_COPY128(mv_dst + 8*y, mv_src + y*b_stride); - } - - } - } - - -/* -0 . T T. T T T T -1 L . .L . . . . -2 L . .L . . . . -3 . T TL . . . . -4 L . .L . . . . -5 L . .. . . . . -*/ -//FIXME constraint_intra_pred & partitioning & nnz (let us hope this is just a typo in the spec) if(top_type){ - AV_COPY32(&h->non_zero_count_cache[4+8*0], &h->non_zero_count[top_xy][3*4]); + nnz = h->non_zero_count[top_xy]; + AV_COPY32(&nnz_cache[4+8*0], &nnz[3*4]); } - if(left_type[0]){ - h->non_zero_count_cache[3+8*1]= h->non_zero_count[left_xy[0]][3+0*4]; - h->non_zero_count_cache[3+8*2]= h->non_zero_count[left_xy[0]][3+1*4]; - h->non_zero_count_cache[3+8*3]= h->non_zero_count[left_xy[0]][3+2*4]; - h->non_zero_count_cache[3+8*4]= h->non_zero_count[left_xy[0]][3+3*4]; + if(left_type[LTOP]){ + nnz = h->non_zero_count[left_xy[LTOP]]; + nnz_cache[3+8*1]= nnz[3+0*4]; + nnz_cache[3+8*2]= nnz[3+1*4]; + nnz_cache[3+8*3]= nnz[3+2*4]; + nnz_cache[3+8*4]= nnz[3+3*4]; } // CAVLC 8x8dct requires NNZ values for residual decoding that differ from what the loop filter needs if(!CABAC && h->pps.transform_8x8_mode){ if(IS_8x8DCT(top_type)){ - h->non_zero_count_cache[4+8*0]= - h->non_zero_count_cache[5+8*0]= (h->cbp_table[top_xy] & 0x4000) >> 12; - h->non_zero_count_cache[6+8*0]= - h->non_zero_count_cache[7+8*0]= (h->cbp_table[top_xy] & 0x8000) >> 12; + nnz_cache[4+8*0]= + nnz_cache[5+8*0]= (h->cbp_table[top_xy] & 0x4000) >> 12; + nnz_cache[6+8*0]= + nnz_cache[7+8*0]= (h->cbp_table[top_xy] & 0x8000) >> 12; } - if(IS_8x8DCT(left_type[0])){ - h->non_zero_count_cache[3+8*1]= - h->non_zero_count_cache[3+8*2]= (h->cbp_table[left_xy[0]]&0x2000) >> 12; //FIXME check MBAFF + if(IS_8x8DCT(left_type[LTOP])){ + nnz_cache[3+8*1]= + nnz_cache[3+8*2]= (h->cbp_table[left_xy[LTOP]]&0x2000) >> 12; //FIXME check MBAFF } - if(IS_8x8DCT(left_type[1])){ - h->non_zero_count_cache[3+8*3]= - h->non_zero_count_cache[3+8*4]= (h->cbp_table[left_xy[1]]&0x8000) >> 12; //FIXME check MBAFF + if(IS_8x8DCT(left_type[LBOT])){ + nnz_cache[3+8*3]= + nnz_cache[3+8*4]= (h->cbp_table[left_xy[LBOT]]&0x8000) >> 12; //FIXME check MBAFF } if(IS_8x8DCT(mb_type)){ - h->non_zero_count_cache[scan8[0 ]]= h->non_zero_count_cache[scan8[1 ]]= - h->non_zero_count_cache[scan8[2 ]]= h->non_zero_count_cache[scan8[3 ]]= (h->cbp & 0x1000) >> 12; + nnz_cache[scan8[0 ]]= nnz_cache[scan8[1 ]]= + nnz_cache[scan8[2 ]]= nnz_cache[scan8[3 ]]= (h->cbp & 0x1000) >> 12; - h->non_zero_count_cache[scan8[0+ 4]]= h->non_zero_count_cache[scan8[1+ 4]]= - h->non_zero_count_cache[scan8[2+ 4]]= h->non_zero_count_cache[scan8[3+ 4]]= (h->cbp & 0x2000) >> 12; + nnz_cache[scan8[0+ 4]]= nnz_cache[scan8[1+ 4]]= + nnz_cache[scan8[2+ 4]]= nnz_cache[scan8[3+ 4]]= (h->cbp & 0x2000) >> 12; - h->non_zero_count_cache[scan8[0+ 8]]= h->non_zero_count_cache[scan8[1+ 8]]= - h->non_zero_count_cache[scan8[2+ 8]]= h->non_zero_count_cache[scan8[3+ 8]]= (h->cbp & 0x4000) >> 12; + nnz_cache[scan8[0+ 8]]= nnz_cache[scan8[1+ 8]]= + nnz_cache[scan8[2+ 8]]= nnz_cache[scan8[3+ 8]]= (h->cbp & 0x4000) >> 12; - h->non_zero_count_cache[scan8[0+12]]= h->non_zero_count_cache[scan8[1+12]]= - h->non_zero_count_cache[scan8[2+12]]= h->non_zero_count_cache[scan8[3+12]]= (h->cbp & 0x8000) >> 12; - } - } - - if(IS_INTER(mb_type) || IS_DIRECT(mb_type)){ - int list; - for(list=0; list<h->list_count; list++){ - if(USES_LIST(top_type, list)){ - const int b_xy= h->mb2b_xy[top_xy] + 3*h->b_stride; - const int b8_xy= 4*top_xy + 2; - int (*ref2frm)[64] = h->ref2frm[ h->slice_table[top_xy]&(MAX_SLICES-1) ][0] + (MB_MBAFF ? 20 : 2); - AV_COPY128(h->mv_cache[list][scan8[0] + 0 - 1*8], s->current_picture.motion_val[list][b_xy + 0]); - h->ref_cache[list][scan8[0] + 0 - 1*8]= - h->ref_cache[list][scan8[0] + 1 - 1*8]= ref2frm[list][s->current_picture.ref_index[list][b8_xy + 0]]; - h->ref_cache[list][scan8[0] + 2 - 1*8]= - h->ref_cache[list][scan8[0] + 3 - 1*8]= ref2frm[list][s->current_picture.ref_index[list][b8_xy + 1]]; - }else{ - AV_ZERO128(h->mv_cache[list][scan8[0] + 0 - 1*8]); - AV_WN32A(&h->ref_cache[list][scan8[0] + 0 - 1*8], ((LIST_NOT_USED)&0xFF)*0x01010101u); - } - - if(!IS_INTERLACED(mb_type^left_type[0])){ - if(USES_LIST(left_type[0], list)){ - const int b_xy= h->mb2b_xy[left_xy[0]] + 3; - const int b8_xy= 4*left_xy[0] + 1; - int (*ref2frm)[64] = h->ref2frm[ h->slice_table[left_xy[0]]&(MAX_SLICES-1) ][0] + (MB_MBAFF ? 20 : 2); - AV_COPY32(h->mv_cache[list][scan8[0] - 1 + 0 ], s->current_picture.motion_val[list][b_xy + h->b_stride*0]); - AV_COPY32(h->mv_cache[list][scan8[0] - 1 + 8 ], s->current_picture.motion_val[list][b_xy + h->b_stride*1]); - AV_COPY32(h->mv_cache[list][scan8[0] - 1 +16 ], s->current_picture.motion_val[list][b_xy + h->b_stride*2]); - AV_COPY32(h->mv_cache[list][scan8[0] - 1 +24 ], s->current_picture.motion_val[list][b_xy + h->b_stride*3]); - h->ref_cache[list][scan8[0] - 1 + 0 ]= - h->ref_cache[list][scan8[0] - 1 + 8 ]= ref2frm[list][s->current_picture.ref_index[list][b8_xy + 2*0]]; - h->ref_cache[list][scan8[0] - 1 +16 ]= - h->ref_cache[list][scan8[0] - 1 +24 ]= ref2frm[list][s->current_picture.ref_index[list][b8_xy + 2*1]]; - }else{ - AV_ZERO32(h->mv_cache [list][scan8[0] - 1 + 0 ]); - AV_ZERO32(h->mv_cache [list][scan8[0] - 1 + 8 ]); - AV_ZERO32(h->mv_cache [list][scan8[0] - 1 +16 ]); - AV_ZERO32(h->mv_cache [list][scan8[0] - 1 +24 ]); - h->ref_cache[list][scan8[0] - 1 + 0 ]= - h->ref_cache[list][scan8[0] - 1 + 8 ]= - h->ref_cache[list][scan8[0] - 1 + 16 ]= - h->ref_cache[list][scan8[0] - 1 + 24 ]= LIST_NOT_USED; - } - } + nnz_cache[scan8[0+12]]= nnz_cache[scan8[1+12]]= + nnz_cache[scan8[2+12]]= nnz_cache[scan8[3+12]]= (h->cbp & 0x8000) >> 12; } } @@ -3285,7 +3258,7 @@ static void loop_filter(H264Context *h, int start_x, int end_x){ int mb_xy, mb_type; mb_xy = h->mb_xy = mb_x + mb_y*s->mb_stride; h->slice_num= h->slice_table[mb_xy]; - mb_type= s->current_picture.mb_type[mb_xy]; + mb_type = s->current_picture.f.mb_type[mb_xy]; h->list_count= h->list_counts[mb_xy]; if(FRAME_MBAFF) @@ -3293,9 +3266,9 @@ static void loop_filter(H264Context *h, int start_x, int end_x){ s->mb_x= mb_x; s->mb_y= mb_y; - dest_y = s->current_picture.data[0] + ((mb_x << pixel_shift) + mb_y * s->linesize ) * 16; - dest_cb = s->current_picture.data[1] + ((mb_x << pixel_shift) + mb_y * s->uvlinesize) * (8 << CHROMA444); - dest_cr = s->current_picture.data[2] + ((mb_x << pixel_shift) + mb_y * s->uvlinesize) * (8 << CHROMA444); + dest_y = s->current_picture.f.data[0] + ((mb_x << pixel_shift) + mb_y * s->linesize ) * 16; + dest_cb = s->current_picture.f.data[1] + ((mb_x << pixel_shift) + mb_y * s->uvlinesize) * (8 << CHROMA444); + dest_cr = s->current_picture.f.data[2] + ((mb_x << pixel_shift) + mb_y * s->uvlinesize) * (8 << CHROMA444); //FIXME simplify above if (MB_FIELD) { @@ -3303,8 +3276,8 @@ static void loop_filter(H264Context *h, int start_x, int end_x){ uvlinesize = h->mb_uvlinesize = s->uvlinesize * 2; if(mb_y&1){ //FIXME move out of this function? dest_y -= s->linesize*15; - dest_cb-= s->uvlinesize*7; - dest_cr-= s->uvlinesize*7; + dest_cb-= s->uvlinesize*((8 << CHROMA444)-1); + dest_cr-= s->uvlinesize*((8 << CHROMA444)-1); } } else { linesize = h->mb_linesize = s->linesize; @@ -3313,8 +3286,8 @@ static void loop_filter(H264Context *h, int start_x, int end_x){ backup_mb_border(h, dest_y, dest_cb, dest_cr, linesize, uvlinesize, CHROMA444, 0); if(fill_filter_caches(h, mb_type)) continue; - h->chroma_qp[0] = get_chroma_qp(h, 0, s->current_picture.qscale_table[mb_xy]); - h->chroma_qp[1] = get_chroma_qp(h, 1, s->current_picture.qscale_table[mb_xy]); + h->chroma_qp[0] = get_chroma_qp(h, 0, s->current_picture.f.qscale_table[mb_xy]); + h->chroma_qp[1] = get_chroma_qp(h, 1, s->current_picture.f.qscale_table[mb_xy]); if (FRAME_MBAFF) { ff_h264_filter_mb (h, mb_x, mb_y, dest_y, dest_cb, dest_cr, linesize, uvlinesize); @@ -3335,9 +3308,9 @@ static void predict_field_decoding_flag(H264Context *h){ MpegEncContext * const s = &h->s; const int mb_xy= s->mb_x + s->mb_y*s->mb_stride; int mb_type = (h->slice_table[mb_xy-1] == h->slice_num) - ? s->current_picture.mb_type[mb_xy-1] + ? s->current_picture.f.mb_type[mb_xy - 1] : (h->slice_table[mb_xy-s->mb_stride] == h->slice_num) - ? s->current_picture.mb_type[mb_xy-s->mb_stride] + ? s->current_picture.f.mb_type[mb_xy - s->mb_stride] : 0; h->mb_mbaff = h->mb_field_decoding_flag = IS_INTERLACED(mb_type) ? 1 : 0; } @@ -3509,53 +3482,6 @@ static int decode_slice(struct AVCodecContext *avctx, void *arg){ } } } - -#if 0 - for(;s->mb_y < s->mb_height; s->mb_y++){ - for(;s->mb_x < s->mb_width; s->mb_x++){ - int ret= decode_mb(h); - - ff_h264_hl_decode_mb(h); - - if(ret<0){ - av_log(s->avctx, AV_LOG_ERROR, "error while decoding MB %d %d\n", s->mb_x, s->mb_y); - ff_er_add_slice(s, s->resync_mb_x, s->resync_mb_y, s->mb_x, s->mb_y, (AC_ERROR|DC_ERROR|MV_ERROR)&part_mask); - - return -1; - } - - if(++s->mb_x >= s->mb_width){ - s->mb_x=0; - if(++s->mb_y >= s->mb_height){ - if(get_bits_count(s->gb) == s->gb.size_in_bits){ - ff_er_add_slice(s, s->resync_mb_x, s->resync_mb_y, s->mb_x-1, s->mb_y, (AC_END|DC_END|MV_END)&part_mask); - - return 0; - }else{ - ff_er_add_slice(s, s->resync_mb_x, s->resync_mb_y, s->mb_x, s->mb_y, (AC_END|DC_END|MV_END)&part_mask); - - return -1; - } - } - } - - if(get_bits_count(s->?gb) >= s->gb?.size_in_bits){ - if(get_bits_count(s->gb) == s->gb.size_in_bits){ - ff_er_add_slice(s, s->resync_mb_x, s->resync_mb_y, s->mb_x-1, s->mb_y, (AC_END|DC_END|MV_END)&part_mask); - - return 0; - }else{ - ff_er_add_slice(s, s->resync_mb_x, s->resync_mb_y, s->mb_x, s->mb_y, (AC_ERROR|DC_ERROR|MV_ERROR)&part_mask); - - return -1; - } - } - } - s->mb_x=0; - ff_draw_horiz_band(s, 16*s->mb_y, 16); - } -#endif - return -1; //not reached } /** @@ -3723,7 +3649,7 @@ static int decode_nal_units(H264Context *h, const uint8_t *buf, int buf_size){ if((err = decode_slice_header(hx, h))) break; - s->current_picture_ptr->key_frame |= + s->current_picture_ptr->f.key_frame |= (hx->nal_unit_type == NAL_IDR_SLICE) || (h->sei_recovery_frame_cnt >= 0); @@ -3810,6 +3736,7 @@ static int decode_nal_units(H264Context *h, const uint8_t *buf, int buf_size){ ff_h264dsp_init(&h->h264dsp, h->sps.bit_depth_luma); ff_h264_pred_init(&h->hpc, s->codec_id, h->sps.bit_depth_luma); + s->dsp.dct_bits = h->sps.bit_depth_luma > 8 ? 32 : 16; dsputil_init(&s->dsp, s->avctx); } else { av_log(avctx, AV_LOG_DEBUG, "Unsupported bit depth: %d\n", h->sps.bit_depth_luma); @@ -3893,7 +3820,7 @@ static int decode_frame(AVCodecContext *avctx, //FIXME factorize this with the output code below out = h->delayed_pic[0]; out_idx = 0; - for(i=1; h->delayed_pic[i] && !h->delayed_pic[i]->key_frame && !h->delayed_pic[i]->mmco_reset; i++) + for (i = 1; h->delayed_pic[i] && !h->delayed_pic[i]->f.key_frame && !h->delayed_pic[i]->mmco_reset; i++) if(h->delayed_pic[i]->poc < out->poc){ out = h->delayed_pic[i]; out_idx = i; @@ -3977,6 +3904,7 @@ static inline void fill_mb_avail(H264Context *h){ #undef random #define COUNT 8000 #define SIZE (COUNT*40) +extern AVCodec ff_h264_decoder; int main(void){ int i; uint8_t temp[SIZE]; @@ -3986,6 +3914,8 @@ int main(void){ DSPContext dsp; AVCodecContext avctx; + avcodec_get_context_defaults3(&avctx, &ff_h264_decoder); + dsputil_init(&dsp, &avctx); init_put_bits(&pb, temp, SIZE); @@ -4037,109 +3967,6 @@ int main(void){ STOP_TIMER("get_se_golomb"); } -#if 0 - printf("testing 4x4 (I)DCT\n"); - - DCTELEM block[16]; - uint8_t src[16], ref[16]; - uint64_t error= 0, max_error=0; - - for(i=0; i<COUNT; i++){ - int j; -// printf("%d %d %d\n", r1, r2, (r2-r1)*16); - for(j=0; j<16; j++){ - ref[j]= random()%255; - src[j]= random()%255; - } - - h264_diff_dct_c(block, src, ref, 4); - - //normalize - for(j=0; j<16; j++){ -// printf("%d ", block[j]); - block[j]= block[j]*4; - if(j&1) block[j]= (block[j]*4 + 2)/5; - if(j&4) block[j]= (block[j]*4 + 2)/5; - } -// printf("\n"); - - h->h264dsp.h264_idct_add(ref, block, 4); -/* for(j=0; j<16; j++){ - printf("%d ", ref[j]); - } - printf("\n");*/ - - for(j=0; j<16; j++){ - int diff= FFABS(src[j] - ref[j]); - - error+= diff*diff; - max_error= FFMAX(max_error, diff); - } - } - printf("error=%f max_error=%d\n", ((float)error)/COUNT/16, (int)max_error ); - printf("testing quantizer\n"); - for(qp=0; qp<52; qp++){ - for(i=0; i<16; i++) - src1_block[i]= src2_block[i]= random()%255; - - } - printf("Testing NAL layer\n"); - - uint8_t bitstream[COUNT]; - uint8_t nal[COUNT*2]; - H264Context h; - memset(&h, 0, sizeof(H264Context)); - - for(i=0; i<COUNT; i++){ - int zeros= i; - int nal_length; - int consumed; - int out_length; - uint8_t *out; - int j; - - for(j=0; j<COUNT; j++){ - bitstream[j]= (random() % 255) + 1; - } - - for(j=0; j<zeros; j++){ - int pos= random() % COUNT; - while(bitstream[pos] == 0){ - pos++; - pos %= COUNT; - } - bitstream[pos]=0; - } - - START_TIMER - - nal_length= encode_nal(&h, nal, bitstream, COUNT, COUNT*2); - if(nal_length<0){ - printf("encoding failed\n"); - return -1; - } - - out= ff_h264_decode_nal(&h, nal, &out_length, &consumed, nal_length); - - STOP_TIMER("NAL") - - if(out_length != COUNT){ - printf("incorrect length %d %d\n", out_length, COUNT); - return -1; - } - - if(consumed != nal_length){ - printf("incorrect consumed length %d %d\n", nal_length, consumed); - return -1; - } - - if(memcmp(bitstream, out, COUNT)){ - printf("mismatch\n"); - return -1; - } - } -#endif - printf("Testing RBSP\n"); @@ -4193,16 +4020,15 @@ static const AVProfile profiles[] = { }; AVCodec ff_h264_decoder = { - "h264", - AVMEDIA_TYPE_VIDEO, - CODEC_ID_H264, - sizeof(H264Context), - ff_h264_decode_init, - NULL, - ff_h264_decode_end, - decode_frame, - /*CODEC_CAP_DRAW_HORIZ_BAND |*/ CODEC_CAP_DR1 | CODEC_CAP_DELAY | - CODEC_CAP_SLICE_THREADS | CODEC_CAP_FRAME_THREADS, + .name = "h264", + .type = AVMEDIA_TYPE_VIDEO, + .id = CODEC_ID_H264, + .priv_data_size = sizeof(H264Context), + .init = ff_h264_decode_init, + .close = ff_h264_decode_end, + .decode = decode_frame, + .capabilities = /*CODEC_CAP_DRAW_HORIZ_BAND |*/ CODEC_CAP_DR1 | CODEC_CAP_DELAY | + CODEC_CAP_SLICE_THREADS | CODEC_CAP_FRAME_THREADS, .flush= flush_dpb, .long_name = NULL_IF_CONFIG_SMALL("H.264 / AVC / MPEG-4 AVC / MPEG-4 part 10"), .init_thread_copy = ONLY_IF_THREADS_ENABLED(decode_init_thread_copy), @@ -4212,15 +4038,14 @@ AVCodec ff_h264_decoder = { #if CONFIG_H264_VDPAU_DECODER AVCodec ff_h264_vdpau_decoder = { - "h264_vdpau", - AVMEDIA_TYPE_VIDEO, - CODEC_ID_H264, - sizeof(H264Context), - ff_h264_decode_init, - NULL, - ff_h264_decode_end, - decode_frame, - CODEC_CAP_DR1 | CODEC_CAP_DELAY | CODEC_CAP_HWACCEL_VDPAU, + .name = "h264_vdpau", + .type = AVMEDIA_TYPE_VIDEO, + .id = CODEC_ID_H264, + .priv_data_size = sizeof(H264Context), + .init = ff_h264_decode_init, + .close = ff_h264_decode_end, + .decode = decode_frame, + .capabilities = CODEC_CAP_DR1 | CODEC_CAP_DELAY | CODEC_CAP_HWACCEL_VDPAU, .flush= flush_dpb, .long_name = NULL_IF_CONFIG_SMALL("H.264 / AVC / MPEG-4 AVC / MPEG-4 part 10 (VDPAU acceleration)"), .pix_fmts = (const enum PixelFormat[]){PIX_FMT_VDPAU_H264, PIX_FMT_NONE}, diff --git a/libavcodec/h264.h b/libavcodec/h264.h index 7bad91b215..d448fc3330 100644 --- a/libavcodec/h264.h +++ b/libavcodec/h264.h @@ -70,6 +70,10 @@ #define MB_FIELD h->mb_field_decoding_flag #define FRAME_MBAFF h->mb_aff_frame #define FIELD_PICTURE (s->picture_structure != PICT_FRAME) +#define LEFT_MBS 2 +#define LTOP 0 +#define LBOT 1 +#define LEFT(i) (i) #else #define MB_MBAFF 0 #define MB_FIELD 0 @@ -77,6 +81,10 @@ #define FIELD_PICTURE 0 #undef IS_INTERLACED #define IS_INTERLACED(mb_type) 0 +#define LEFT_MBS 1 +#define LTOP 0 +#define LBOT 0 +#define LEFT(i) 0 #endif #define FIELD_OR_MBAFF_PICTURE (FRAME_MBAFF || FIELD_PICTURE) @@ -272,12 +280,12 @@ typedef struct H264Context{ int topleft_mb_xy; int top_mb_xy; int topright_mb_xy; - int left_mb_xy[2]; + int left_mb_xy[LEFT_MBS]; int topleft_type; int top_type; int topright_type; - int left_type[2]; + int left_type[LEFT_MBS]; const uint8_t * left_block; int topleft_partition; @@ -308,11 +316,6 @@ typedef struct H264Context{ #define PART_NOT_AVAILABLE -2 /** - * is 1 if the specific list MV&references are set to 0,0,-2. - */ - int mv_cache_clean[2]; - - /** * number of neighbors (top and/or left) that used 8x8 dct */ int neighbor_transform_size; @@ -507,7 +510,7 @@ typedef struct H264Context{ int cabac_init_idc; /** - * @defgroup multithreading Members for slice based multithreading + * @name Members for slice based multithreading * @{ */ struct H264Context *thread_context[MAX_THREADS]; @@ -658,7 +661,6 @@ int ff_h264_check_intra4x4_pred_mode(H264Context *h); */ int ff_h264_check_intra_pred_mode(H264Context *h, int mode); -void ff_h264_write_back_intra_pred_mode(H264Context *h); void ff_h264_hl_decode_mb(H264Context *h); int ff_h264_frame_start(H264Context *h); int ff_h264_decode_extradata(H264Context *h); @@ -764,428 +766,14 @@ static av_always_inline uint16_t pack8to16(int a, int b){ /** * gets the chroma qp. */ -static inline int get_chroma_qp(H264Context *h, int t, int qscale){ +static av_always_inline int get_chroma_qp(H264Context *h, int t, int qscale){ return h->pps.chroma_qp_table[t][qscale]; } -static inline void pred_pskip_motion(H264Context * const h, int * const mx, int * const my); - -static void fill_decode_neighbors(H264Context *h, int mb_type){ - MpegEncContext * const s = &h->s; - const int mb_xy= h->mb_xy; - int topleft_xy, top_xy, topright_xy, left_xy[2]; - static const uint8_t left_block_options[4][32]={ - {0,1,2,3,7,10,8,11,3+0*4, 3+1*4, 3+2*4, 3+3*4, 1+4*4, 1+8*4, 1+5*4, 1+9*4}, - {2,2,3,3,8,11,8,11,3+2*4, 3+2*4, 3+3*4, 3+3*4, 1+5*4, 1+9*4, 1+5*4, 1+9*4}, - {0,0,1,1,7,10,7,10,3+0*4, 3+0*4, 3+1*4, 3+1*4, 1+4*4, 1+8*4, 1+4*4, 1+8*4}, - {0,2,0,2,7,10,7,10,3+0*4, 3+2*4, 3+0*4, 3+2*4, 1+4*4, 1+8*4, 1+4*4, 1+8*4} - }; - - h->topleft_partition= -1; - - top_xy = mb_xy - (s->mb_stride << MB_FIELD); - - /* Wow, what a mess, why didn't they simplify the interlacing & intra - * stuff, I can't imagine that these complex rules are worth it. */ - - topleft_xy = top_xy - 1; - topright_xy= top_xy + 1; - left_xy[1] = left_xy[0] = mb_xy-1; - h->left_block = left_block_options[0]; - if(FRAME_MBAFF){ - const int left_mb_field_flag = IS_INTERLACED(s->current_picture.mb_type[mb_xy-1]); - const int curr_mb_field_flag = IS_INTERLACED(mb_type); - if(s->mb_y&1){ - if (left_mb_field_flag != curr_mb_field_flag) { - left_xy[1] = left_xy[0] = mb_xy - s->mb_stride - 1; - if (curr_mb_field_flag) { - left_xy[1] += s->mb_stride; - h->left_block = left_block_options[3]; - } else { - topleft_xy += s->mb_stride; - // take top left mv from the middle of the mb, as opposed to all other modes which use the bottom right partition - h->topleft_partition = 0; - h->left_block = left_block_options[1]; - } - } - }else{ - if(curr_mb_field_flag){ - topleft_xy += s->mb_stride & (((s->current_picture.mb_type[top_xy - 1]>>7)&1)-1); - topright_xy += s->mb_stride & (((s->current_picture.mb_type[top_xy + 1]>>7)&1)-1); - top_xy += s->mb_stride & (((s->current_picture.mb_type[top_xy ]>>7)&1)-1); - } - if (left_mb_field_flag != curr_mb_field_flag) { - if (curr_mb_field_flag) { - left_xy[1] += s->mb_stride; - h->left_block = left_block_options[3]; - } else { - h->left_block = left_block_options[2]; - } - } - } - } - - h->topleft_mb_xy = topleft_xy; - h->top_mb_xy = top_xy; - h->topright_mb_xy= topright_xy; - h->left_mb_xy[0] = left_xy[0]; - h->left_mb_xy[1] = left_xy[1]; - //FIXME do we need all in the context? - - h->topleft_type = s->current_picture.mb_type[topleft_xy] ; - h->top_type = s->current_picture.mb_type[top_xy] ; - h->topright_type= s->current_picture.mb_type[topright_xy]; - h->left_type[0] = s->current_picture.mb_type[left_xy[0]] ; - h->left_type[1] = s->current_picture.mb_type[left_xy[1]] ; - - if(FMO){ - if(h->slice_table[topleft_xy ] != h->slice_num) h->topleft_type = 0; - if(h->slice_table[top_xy ] != h->slice_num) h->top_type = 0; - if(h->slice_table[left_xy[0] ] != h->slice_num) h->left_type[0] = h->left_type[1] = 0; - }else{ - if(h->slice_table[topleft_xy ] != h->slice_num){ - h->topleft_type = 0; - if(h->slice_table[top_xy ] != h->slice_num) h->top_type = 0; - if(h->slice_table[left_xy[0] ] != h->slice_num) h->left_type[0] = h->left_type[1] = 0; - } - } - if(h->slice_table[topright_xy] != h->slice_num) h->topright_type= 0; -} - -static void fill_decode_caches(H264Context *h, int mb_type){ - MpegEncContext * const s = &h->s; - int topleft_xy, top_xy, topright_xy, left_xy[2]; - int topleft_type, top_type, topright_type, left_type[2]; - const uint8_t * left_block= h->left_block; - int i; - - topleft_xy = h->topleft_mb_xy ; - top_xy = h->top_mb_xy ; - topright_xy = h->topright_mb_xy; - left_xy[0] = h->left_mb_xy[0] ; - left_xy[1] = h->left_mb_xy[1] ; - topleft_type = h->topleft_type ; - top_type = h->top_type ; - topright_type= h->topright_type ; - left_type[0] = h->left_type[0] ; - left_type[1] = h->left_type[1] ; - - if(!IS_SKIP(mb_type)){ - if(IS_INTRA(mb_type)){ - int type_mask= h->pps.constrained_intra_pred ? IS_INTRA(-1) : -1; - h->topleft_samples_available= - h->top_samples_available= - h->left_samples_available= 0xFFFF; - h->topright_samples_available= 0xEEEA; - - if(!(top_type & type_mask)){ - h->topleft_samples_available= 0xB3FF; - h->top_samples_available= 0x33FF; - h->topright_samples_available= 0x26EA; - } - if(IS_INTERLACED(mb_type) != IS_INTERLACED(left_type[0])){ - if(IS_INTERLACED(mb_type)){ - if(!(left_type[0] & type_mask)){ - h->topleft_samples_available&= 0xDFFF; - h->left_samples_available&= 0x5FFF; - } - if(!(left_type[1] & type_mask)){ - h->topleft_samples_available&= 0xFF5F; - h->left_samples_available&= 0xFF5F; - } - }else{ - int left_typei = s->current_picture.mb_type[left_xy[0] + s->mb_stride]; - - assert(left_xy[0] == left_xy[1]); - if(!((left_typei & type_mask) && (left_type[0] & type_mask))){ - h->topleft_samples_available&= 0xDF5F; - h->left_samples_available&= 0x5F5F; - } - } - }else{ - if(!(left_type[0] & type_mask)){ - h->topleft_samples_available&= 0xDF5F; - h->left_samples_available&= 0x5F5F; - } - } - - if(!(topleft_type & type_mask)) - h->topleft_samples_available&= 0x7FFF; - - if(!(topright_type & type_mask)) - h->topright_samples_available&= 0xFBFF; - - if(IS_INTRA4x4(mb_type)){ - if(IS_INTRA4x4(top_type)){ - AV_COPY32(h->intra4x4_pred_mode_cache+4+8*0, h->intra4x4_pred_mode + h->mb2br_xy[top_xy]); - }else{ - h->intra4x4_pred_mode_cache[4+8*0]= - h->intra4x4_pred_mode_cache[5+8*0]= - h->intra4x4_pred_mode_cache[6+8*0]= - h->intra4x4_pred_mode_cache[7+8*0]= 2 - 3*!(top_type & type_mask); - } - for(i=0; i<2; i++){ - if(IS_INTRA4x4(left_type[i])){ - int8_t *mode= h->intra4x4_pred_mode + h->mb2br_xy[left_xy[i]]; - h->intra4x4_pred_mode_cache[3+8*1 + 2*8*i]= mode[6-left_block[0+2*i]]; - h->intra4x4_pred_mode_cache[3+8*2 + 2*8*i]= mode[6-left_block[1+2*i]]; - }else{ - h->intra4x4_pred_mode_cache[3+8*1 + 2*8*i]= - h->intra4x4_pred_mode_cache[3+8*2 + 2*8*i]= 2 - 3*!(left_type[i] & type_mask); - } - } - } - } - - -/* -0 . T T. T T T T -1 L . .L . . . . -2 L . .L . . . . -3 . T TL . . . . -4 L . .L . . . . -5 L . .. . . . . -*/ -//FIXME constraint_intra_pred & partitioning & nnz (let us hope this is just a typo in the spec) - if(top_type){ - AV_COPY32(&h->non_zero_count_cache[4+8* 0], &h->non_zero_count[top_xy][4*3]); - if(CHROMA444){ - AV_COPY32(&h->non_zero_count_cache[4+8* 5], &h->non_zero_count[top_xy][4* 7]); - AV_COPY32(&h->non_zero_count_cache[4+8*10], &h->non_zero_count[top_xy][4*11]); - }else{ - AV_COPY32(&h->non_zero_count_cache[4+8* 5], &h->non_zero_count[top_xy][4* 5]); - AV_COPY32(&h->non_zero_count_cache[4+8*10], &h->non_zero_count[top_xy][4* 9]); - } - }else{ - uint32_t top_empty = CABAC && !IS_INTRA(mb_type) ? 0 : 0x40404040; - AV_WN32A(&h->non_zero_count_cache[4+8* 0], top_empty); - AV_WN32A(&h->non_zero_count_cache[4+8* 5], top_empty); - AV_WN32A(&h->non_zero_count_cache[4+8*10], top_empty); - } - - for (i=0; i<2; i++) { - if(left_type[i]){ - h->non_zero_count_cache[3+8* 1 + 2*8*i]= h->non_zero_count[left_xy[i]][left_block[8+0+2*i]]; - h->non_zero_count_cache[3+8* 2 + 2*8*i]= h->non_zero_count[left_xy[i]][left_block[8+1+2*i]]; - if(CHROMA444){ - h->non_zero_count_cache[3+8* 6 + 2*8*i]= h->non_zero_count[left_xy[i]][left_block[8+0+2*i]+4*4]; - h->non_zero_count_cache[3+8* 7 + 2*8*i]= h->non_zero_count[left_xy[i]][left_block[8+1+2*i]+4*4]; - h->non_zero_count_cache[3+8*11 + 2*8*i]= h->non_zero_count[left_xy[i]][left_block[8+0+2*i]+8*4]; - h->non_zero_count_cache[3+8*12 + 2*8*i]= h->non_zero_count[left_xy[i]][left_block[8+1+2*i]+8*4]; - }else{ - h->non_zero_count_cache[3+8* 6 + 8*i]= h->non_zero_count[left_xy[i]][left_block[8+4+2*i]]; - h->non_zero_count_cache[3+8*11 + 8*i]= h->non_zero_count[left_xy[i]][left_block[8+5+2*i]]; - } - }else{ - h->non_zero_count_cache[3+8* 1 + 2*8*i]= - h->non_zero_count_cache[3+8* 2 + 2*8*i]= - h->non_zero_count_cache[3+8* 6 + 2*8*i]= - h->non_zero_count_cache[3+8* 7 + 2*8*i]= - h->non_zero_count_cache[3+8*11 + 2*8*i]= - h->non_zero_count_cache[3+8*12 + 2*8*i]= CABAC && !IS_INTRA(mb_type) ? 0 : 64; - } - } - - if( CABAC ) { - // top_cbp - if(top_type) { - h->top_cbp = h->cbp_table[top_xy]; - } else { - h->top_cbp = IS_INTRA(mb_type) ? 0x7CF : 0x00F; - } - // left_cbp - if (left_type[0]) { - h->left_cbp = (h->cbp_table[left_xy[0]] & 0x7F0) - | ((h->cbp_table[left_xy[0]]>>(left_block[0]&(~1)))&2) - | (((h->cbp_table[left_xy[1]]>>(left_block[2]&(~1)))&2) << 2); - } else { - h->left_cbp = IS_INTRA(mb_type) ? 0x7CF : 0x00F; - } - } - } - - if(IS_INTER(mb_type) || (IS_DIRECT(mb_type) && h->direct_spatial_mv_pred)){ - int list; - for(list=0; list<h->list_count; list++){ - if(!USES_LIST(mb_type, list)){ - /*if(!h->mv_cache_clean[list]){ - memset(h->mv_cache [list], 0, 8*5*2*sizeof(int16_t)); //FIXME clean only input? clean at all? - memset(h->ref_cache[list], PART_NOT_AVAILABLE, 8*5*sizeof(int8_t)); - h->mv_cache_clean[list]= 1; - }*/ - continue; - } - assert(!(IS_DIRECT(mb_type) && !h->direct_spatial_mv_pred)); - - h->mv_cache_clean[list]= 0; - - if(USES_LIST(top_type, list)){ - const int b_xy= h->mb2b_xy[top_xy] + 3*h->b_stride; - AV_COPY128(h->mv_cache[list][scan8[0] + 0 - 1*8], s->current_picture.motion_val[list][b_xy + 0]); - h->ref_cache[list][scan8[0] + 0 - 1*8]= - h->ref_cache[list][scan8[0] + 1 - 1*8]= s->current_picture.ref_index[list][4*top_xy + 2]; - h->ref_cache[list][scan8[0] + 2 - 1*8]= - h->ref_cache[list][scan8[0] + 3 - 1*8]= s->current_picture.ref_index[list][4*top_xy + 3]; - }else{ - AV_ZERO128(h->mv_cache[list][scan8[0] + 0 - 1*8]); - AV_WN32A(&h->ref_cache[list][scan8[0] + 0 - 1*8], ((top_type ? LIST_NOT_USED : PART_NOT_AVAILABLE)&0xFF)*0x01010101); - } - - if(mb_type & (MB_TYPE_16x8|MB_TYPE_8x8)){ - for(i=0; i<2; i++){ - int cache_idx = scan8[0] - 1 + i*2*8; - if(USES_LIST(left_type[i], list)){ - const int b_xy= h->mb2b_xy[left_xy[i]] + 3; - const int b8_xy= 4*left_xy[i] + 1; - AV_COPY32(h->mv_cache[list][cache_idx ], s->current_picture.motion_val[list][b_xy + h->b_stride*left_block[0+i*2]]); - AV_COPY32(h->mv_cache[list][cache_idx+8], s->current_picture.motion_val[list][b_xy + h->b_stride*left_block[1+i*2]]); - h->ref_cache[list][cache_idx ]= s->current_picture.ref_index[list][b8_xy + (left_block[0+i*2]&~1)]; - h->ref_cache[list][cache_idx+8]= s->current_picture.ref_index[list][b8_xy + (left_block[1+i*2]&~1)]; - }else{ - AV_ZERO32(h->mv_cache [list][cache_idx ]); - AV_ZERO32(h->mv_cache [list][cache_idx+8]); - h->ref_cache[list][cache_idx ]= - h->ref_cache[list][cache_idx+8]= (left_type[i]) ? LIST_NOT_USED : PART_NOT_AVAILABLE; - } - } - }else{ - if(USES_LIST(left_type[0], list)){ - const int b_xy= h->mb2b_xy[left_xy[0]] + 3; - const int b8_xy= 4*left_xy[0] + 1; - AV_COPY32(h->mv_cache[list][scan8[0] - 1], s->current_picture.motion_val[list][b_xy + h->b_stride*left_block[0]]); - h->ref_cache[list][scan8[0] - 1]= s->current_picture.ref_index[list][b8_xy + (left_block[0]&~1)]; - }else{ - AV_ZERO32(h->mv_cache [list][scan8[0] - 1]); - h->ref_cache[list][scan8[0] - 1]= left_type[0] ? LIST_NOT_USED : PART_NOT_AVAILABLE; - } - } - - if(USES_LIST(topright_type, list)){ - const int b_xy= h->mb2b_xy[topright_xy] + 3*h->b_stride; - AV_COPY32(h->mv_cache[list][scan8[0] + 4 - 1*8], s->current_picture.motion_val[list][b_xy]); - h->ref_cache[list][scan8[0] + 4 - 1*8]= s->current_picture.ref_index[list][4*topright_xy + 2]; - }else{ - AV_ZERO32(h->mv_cache [list][scan8[0] + 4 - 1*8]); - h->ref_cache[list][scan8[0] + 4 - 1*8]= topright_type ? LIST_NOT_USED : PART_NOT_AVAILABLE; - } - if(h->ref_cache[list][scan8[0] + 4 - 1*8] < 0){ - if(USES_LIST(topleft_type, list)){ - const int b_xy = h->mb2b_xy [topleft_xy] + 3 + h->b_stride + (h->topleft_partition & 2*h->b_stride); - const int b8_xy= 4*topleft_xy + 1 + (h->topleft_partition & 2); - AV_COPY32(h->mv_cache[list][scan8[0] - 1 - 1*8], s->current_picture.motion_val[list][b_xy]); - h->ref_cache[list][scan8[0] - 1 - 1*8]= s->current_picture.ref_index[list][b8_xy]; - }else{ - AV_ZERO32(h->mv_cache[list][scan8[0] - 1 - 1*8]); - h->ref_cache[list][scan8[0] - 1 - 1*8]= topleft_type ? LIST_NOT_USED : PART_NOT_AVAILABLE; - } - } - - if((mb_type&(MB_TYPE_SKIP|MB_TYPE_DIRECT2)) && !FRAME_MBAFF) - continue; - - if(!(mb_type&(MB_TYPE_SKIP|MB_TYPE_DIRECT2))) { - h->ref_cache[list][scan8[4 ]] = - h->ref_cache[list][scan8[12]] = PART_NOT_AVAILABLE; - AV_ZERO32(h->mv_cache [list][scan8[4 ]]); - AV_ZERO32(h->mv_cache [list][scan8[12]]); - - if( CABAC ) { - /* XXX beurk, Load mvd */ - if(USES_LIST(top_type, list)){ - const int b_xy= h->mb2br_xy[top_xy]; - AV_COPY64(h->mvd_cache[list][scan8[0] + 0 - 1*8], h->mvd_table[list][b_xy + 0]); - }else{ - AV_ZERO64(h->mvd_cache[list][scan8[0] + 0 - 1*8]); - } - if(USES_LIST(left_type[0], list)){ - const int b_xy= h->mb2br_xy[left_xy[0]] + 6; - AV_COPY16(h->mvd_cache[list][scan8[0] - 1 + 0*8], h->mvd_table[list][b_xy - left_block[0]]); - AV_COPY16(h->mvd_cache[list][scan8[0] - 1 + 1*8], h->mvd_table[list][b_xy - left_block[1]]); - }else{ - AV_ZERO16(h->mvd_cache [list][scan8[0] - 1 + 0*8]); - AV_ZERO16(h->mvd_cache [list][scan8[0] - 1 + 1*8]); - } - if(USES_LIST(left_type[1], list)){ - const int b_xy= h->mb2br_xy[left_xy[1]] + 6; - AV_COPY16(h->mvd_cache[list][scan8[0] - 1 + 2*8], h->mvd_table[list][b_xy - left_block[2]]); - AV_COPY16(h->mvd_cache[list][scan8[0] - 1 + 3*8], h->mvd_table[list][b_xy - left_block[3]]); - }else{ - AV_ZERO16(h->mvd_cache [list][scan8[0] - 1 + 2*8]); - AV_ZERO16(h->mvd_cache [list][scan8[0] - 1 + 3*8]); - } - AV_ZERO16(h->mvd_cache [list][scan8[4 ]]); - AV_ZERO16(h->mvd_cache [list][scan8[12]]); - if(h->slice_type_nos == AV_PICTURE_TYPE_B){ - fill_rectangle(&h->direct_cache[scan8[0]], 4, 4, 8, MB_TYPE_16x16>>1, 1); - - if(IS_DIRECT(top_type)){ - AV_WN32A(&h->direct_cache[scan8[0] - 1*8], 0x01010101u*(MB_TYPE_DIRECT2>>1)); - }else if(IS_8X8(top_type)){ - int b8_xy = 4*top_xy; - h->direct_cache[scan8[0] + 0 - 1*8]= h->direct_table[b8_xy + 2]; - h->direct_cache[scan8[0] + 2 - 1*8]= h->direct_table[b8_xy + 3]; - }else{ - AV_WN32A(&h->direct_cache[scan8[0] - 1*8], 0x01010101*(MB_TYPE_16x16>>1)); - } - - if(IS_DIRECT(left_type[0])) - h->direct_cache[scan8[0] - 1 + 0*8]= MB_TYPE_DIRECT2>>1; - else if(IS_8X8(left_type[0])) - h->direct_cache[scan8[0] - 1 + 0*8]= h->direct_table[4*left_xy[0] + 1 + (left_block[0]&~1)]; - else - h->direct_cache[scan8[0] - 1 + 0*8]= MB_TYPE_16x16>>1; - - if(IS_DIRECT(left_type[1])) - h->direct_cache[scan8[0] - 1 + 2*8]= MB_TYPE_DIRECT2>>1; - else if(IS_8X8(left_type[1])) - h->direct_cache[scan8[0] - 1 + 2*8]= h->direct_table[4*left_xy[1] + 1 + (left_block[2]&~1)]; - else - h->direct_cache[scan8[0] - 1 + 2*8]= MB_TYPE_16x16>>1; - } - } - } - if(FRAME_MBAFF){ -#define MAP_MVS\ - MAP_F2F(scan8[0] - 1 - 1*8, topleft_type)\ - MAP_F2F(scan8[0] + 0 - 1*8, top_type)\ - MAP_F2F(scan8[0] + 1 - 1*8, top_type)\ - MAP_F2F(scan8[0] + 2 - 1*8, top_type)\ - MAP_F2F(scan8[0] + 3 - 1*8, top_type)\ - MAP_F2F(scan8[0] + 4 - 1*8, topright_type)\ - MAP_F2F(scan8[0] - 1 + 0*8, left_type[0])\ - MAP_F2F(scan8[0] - 1 + 1*8, left_type[0])\ - MAP_F2F(scan8[0] - 1 + 2*8, left_type[1])\ - MAP_F2F(scan8[0] - 1 + 3*8, left_type[1]) - if(MB_FIELD){ -#define MAP_F2F(idx, mb_type)\ - if(!IS_INTERLACED(mb_type) && h->ref_cache[list][idx] >= 0){\ - h->ref_cache[list][idx] <<= 1;\ - h->mv_cache[list][idx][1] /= 2;\ - h->mvd_cache[list][idx][1] >>=1;\ - } - MAP_MVS -#undef MAP_F2F - }else{ -#define MAP_F2F(idx, mb_type)\ - if(IS_INTERLACED(mb_type) && h->ref_cache[list][idx] >= 0){\ - h->ref_cache[list][idx] >>= 1;\ - h->mv_cache[list][idx][1] <<= 1;\ - h->mvd_cache[list][idx][1] <<= 1;\ - } - MAP_MVS -#undef MAP_F2F - } - } - } - } - - h->neighbor_transform_size= !!IS_8x8DCT(top_type) + !!IS_8x8DCT(left_type[0]); -} - /** * gets the predicted intra4x4 prediction mode. */ -static inline int pred_intra_mode(H264Context *h, int n){ +static av_always_inline int pred_intra_mode(H264Context *h, int n){ const int index8= scan8[n]; const int left= h->intra4x4_pred_mode_cache[index8 - 1]; const int top = h->intra4x4_pred_mode_cache[index8 - 8]; @@ -1197,69 +785,84 @@ static inline int pred_intra_mode(H264Context *h, int n){ else return min; } -static inline void write_back_non_zero_count(H264Context *h){ - const int mb_xy= h->mb_xy; +static av_always_inline void write_back_intra_pred_mode(H264Context *h){ + int8_t *i4x4= h->intra4x4_pred_mode + h->mb2br_xy[h->mb_xy]; + int8_t *i4x4_cache= h->intra4x4_pred_mode_cache; - AV_COPY32(&h->non_zero_count[mb_xy][ 0], &h->non_zero_count_cache[4+8* 1]); - AV_COPY32(&h->non_zero_count[mb_xy][ 4], &h->non_zero_count_cache[4+8* 2]); - AV_COPY32(&h->non_zero_count[mb_xy][ 8], &h->non_zero_count_cache[4+8* 3]); - AV_COPY32(&h->non_zero_count[mb_xy][12], &h->non_zero_count_cache[4+8* 4]); - AV_COPY32(&h->non_zero_count[mb_xy][16], &h->non_zero_count_cache[4+8* 6]); - AV_COPY32(&h->non_zero_count[mb_xy][20], &h->non_zero_count_cache[4+8* 7]); - AV_COPY32(&h->non_zero_count[mb_xy][32], &h->non_zero_count_cache[4+8*11]); - AV_COPY32(&h->non_zero_count[mb_xy][36], &h->non_zero_count_cache[4+8*12]); + AV_COPY32(i4x4, i4x4_cache + 4 + 8*4); + i4x4[4]= i4x4_cache[7+8*3]; + i4x4[5]= i4x4_cache[7+8*2]; + i4x4[6]= i4x4_cache[7+8*1]; +} + +static av_always_inline void write_back_non_zero_count(H264Context *h){ + const int mb_xy= h->mb_xy; + uint8_t *nnz = h->non_zero_count[mb_xy]; + uint8_t *nnz_cache = h->non_zero_count_cache; + + AV_COPY32(&nnz[ 0], &nnz_cache[4+8* 1]); + AV_COPY32(&nnz[ 4], &nnz_cache[4+8* 2]); + AV_COPY32(&nnz[ 8], &nnz_cache[4+8* 3]); + AV_COPY32(&nnz[12], &nnz_cache[4+8* 4]); + AV_COPY32(&nnz[16], &nnz_cache[4+8* 6]); + AV_COPY32(&nnz[20], &nnz_cache[4+8* 7]); + AV_COPY32(&nnz[32], &nnz_cache[4+8*11]); + AV_COPY32(&nnz[36], &nnz_cache[4+8*12]); if(CHROMA444){ - AV_COPY32(&h->non_zero_count[mb_xy][24], &h->non_zero_count_cache[4+8* 8]); - AV_COPY32(&h->non_zero_count[mb_xy][28], &h->non_zero_count_cache[4+8* 9]); - AV_COPY32(&h->non_zero_count[mb_xy][40], &h->non_zero_count_cache[4+8*13]); - AV_COPY32(&h->non_zero_count[mb_xy][44], &h->non_zero_count_cache[4+8*14]); + AV_COPY32(&nnz[24], &nnz_cache[4+8* 8]); + AV_COPY32(&nnz[28], &nnz_cache[4+8* 9]); + AV_COPY32(&nnz[40], &nnz_cache[4+8*13]); + AV_COPY32(&nnz[44], &nnz_cache[4+8*14]); + } +} + +static av_always_inline void write_back_motion_list(H264Context *h, MpegEncContext * const s, int b_stride, + int b_xy, int b8_xy, int mb_type, int list ) +{ + int16_t (*mv_dst)[2] = &s->current_picture.f.motion_val[list][b_xy]; + int16_t (*mv_src)[2] = &h->mv_cache[list][scan8[0]]; + AV_COPY128(mv_dst + 0*b_stride, mv_src + 8*0); + AV_COPY128(mv_dst + 1*b_stride, mv_src + 8*1); + AV_COPY128(mv_dst + 2*b_stride, mv_src + 8*2); + AV_COPY128(mv_dst + 3*b_stride, mv_src + 8*3); + if( CABAC ) { + uint8_t (*mvd_dst)[2] = &h->mvd_table[list][FMO ? 8*h->mb_xy : h->mb2br_xy[h->mb_xy]]; + uint8_t (*mvd_src)[2] = &h->mvd_cache[list][scan8[0]]; + if(IS_SKIP(mb_type)) + AV_ZERO128(mvd_dst); + else{ + AV_COPY64(mvd_dst, mvd_src + 8*3); + AV_COPY16(mvd_dst + 3 + 3, mvd_src + 3 + 8*0); + AV_COPY16(mvd_dst + 3 + 2, mvd_src + 3 + 8*1); + AV_COPY16(mvd_dst + 3 + 1, mvd_src + 3 + 8*2); + } + } + + { + int8_t *ref_index = &s->current_picture.f.ref_index[list][b8_xy]; + int8_t *ref_cache = h->ref_cache[list]; + ref_index[0+0*2]= ref_cache[scan8[0]]; + ref_index[1+0*2]= ref_cache[scan8[4]]; + ref_index[0+1*2]= ref_cache[scan8[8]]; + ref_index[1+1*2]= ref_cache[scan8[12]]; } } -static inline void write_back_motion(H264Context *h, int mb_type){ +static av_always_inline void write_back_motion(H264Context *h, int mb_type){ MpegEncContext * const s = &h->s; + const int b_stride = h->b_stride; const int b_xy = 4*s->mb_x + 4*s->mb_y*h->b_stride; //try mb2b(8)_xy const int b8_xy= 4*h->mb_xy; - int list; - - if(!USES_LIST(mb_type, 0)) - fill_rectangle(&s->current_picture.ref_index[0][b8_xy], 2, 2, 2, (uint8_t)LIST_NOT_USED, 1); - - for(list=0; list<h->list_count; list++){ - int y, b_stride; - int16_t (*mv_dst)[2]; - int16_t (*mv_src)[2]; - - if(!USES_LIST(mb_type, list)) - continue; - b_stride = h->b_stride; - mv_dst = &s->current_picture.motion_val[list][b_xy]; - mv_src = &h->mv_cache[list][scan8[0]]; - for(y=0; y<4; y++){ - AV_COPY128(mv_dst + y*b_stride, mv_src + 8*y); - } - if( CABAC ) { - uint8_t (*mvd_dst)[2] = &h->mvd_table[list][FMO ? 8*h->mb_xy : h->mb2br_xy[h->mb_xy]]; - uint8_t (*mvd_src)[2] = &h->mvd_cache[list][scan8[0]]; - if(IS_SKIP(mb_type)) - AV_ZERO128(mvd_dst); - else{ - AV_COPY64(mvd_dst, mvd_src + 8*3); - AV_COPY16(mvd_dst + 3 + 3, mvd_src + 3 + 8*0); - AV_COPY16(mvd_dst + 3 + 2, mvd_src + 3 + 8*1); - AV_COPY16(mvd_dst + 3 + 1, mvd_src + 3 + 8*2); - } - } - - { - int8_t *ref_index = &s->current_picture.ref_index[list][b8_xy]; - ref_index[0+0*2]= h->ref_cache[list][scan8[0]]; - ref_index[1+0*2]= h->ref_cache[list][scan8[4]]; - ref_index[0+1*2]= h->ref_cache[list][scan8[8]]; - ref_index[1+1*2]= h->ref_cache[list][scan8[12]]; - } + if(USES_LIST(mb_type, 0)){ + write_back_motion_list(h, s, b_stride, b_xy, b8_xy, mb_type, 0); + }else{ + fill_rectangle(&s->current_picture.f.ref_index[0][b8_xy], + 2, 2, 2, (uint8_t)LIST_NOT_USED, 1); + } + if(USES_LIST(mb_type, 1)){ + write_back_motion_list(h, s, b_stride, b_xy, b8_xy, mb_type, 1); } if(h->slice_type_nos == AV_PICTURE_TYPE_B && CABAC){ @@ -1272,56 +875,11 @@ static inline void write_back_motion(H264Context *h, int mb_type){ } } -static inline int get_dct8x8_allowed(H264Context *h){ +static av_always_inline int get_dct8x8_allowed(H264Context *h){ if(h->sps.direct_8x8_inference_flag) return !(AV_RN64A(h->sub_mb_type) & ((MB_TYPE_16x8|MB_TYPE_8x16|MB_TYPE_8x8 )*0x0001000100010001ULL)); else return !(AV_RN64A(h->sub_mb_type) & ((MB_TYPE_16x8|MB_TYPE_8x16|MB_TYPE_8x8|MB_TYPE_DIRECT2)*0x0001000100010001ULL)); } -/** - * decodes a P_SKIP or B_SKIP macroblock - */ -static void av_unused decode_mb_skip(H264Context *h){ - MpegEncContext * const s = &h->s; - const int mb_xy= h->mb_xy; - int mb_type=0; - - memset(h->non_zero_count[mb_xy], 0, 48); - - if(MB_FIELD) - mb_type|= MB_TYPE_INTERLACED; - - if( h->slice_type_nos == AV_PICTURE_TYPE_B ) - { - // just for fill_caches. pred_direct_motion will set the real mb_type - mb_type|= MB_TYPE_L0L1|MB_TYPE_DIRECT2|MB_TYPE_SKIP; - if(h->direct_spatial_mv_pred){ - fill_decode_neighbors(h, mb_type); - fill_decode_caches(h, mb_type); //FIXME check what is needed and what not ... - } - ff_h264_pred_direct_motion(h, &mb_type); - mb_type|= MB_TYPE_SKIP; - } - else - { - int mx, my; - mb_type|= MB_TYPE_16x16|MB_TYPE_P0L0|MB_TYPE_P1L0|MB_TYPE_SKIP; - - fill_decode_neighbors(h, mb_type); - fill_decode_caches(h, mb_type); //FIXME check what is needed and what not ... - pred_pskip_motion(h, &mx, &my); - fill_rectangle(&h->ref_cache[0][scan8[0]], 4, 4, 8, 0, 1); - fill_rectangle( h->mv_cache[0][scan8[0]], 4, 4, 8, pack16to32(mx,my), 4); - } - - write_back_motion(h, mb_type); - s->current_picture.mb_type[mb_xy]= mb_type; - s->current_picture.qscale_table[mb_xy]= s->qscale; - h->slice_table[ mb_xy ]= h->slice_num; - h->prev_mb_skipped= 1; -} - -#include "h264_mvpred.h" //For pred_pskip_motion() - #endif /* AVCODEC_H264_H */ diff --git a/libavcodec/h264_cabac.c b/libavcodec/h264_cabac.c index 3975a61699..6bbf87f0ba 100644 --- a/libavcodec/h264_cabac.c +++ b/libavcodec/h264_cabac.c @@ -1284,8 +1284,8 @@ static int decode_cabac_field_decoding_flag(H264Context *h) { unsigned long ctx = 0; - ctx += h->mb_field_decoding_flag & !!s->mb_x; //for FMO:(s->current_picture.mb_type[mba_xy]>>7)&(h->slice_table[mba_xy] == h->slice_num); - ctx += (s->current_picture.mb_type[mbb_xy]>>7)&(h->slice_table[mbb_xy] == h->slice_num); + ctx += h->mb_field_decoding_flag & !!s->mb_x; //for FMO:(s->current_picture.f.mb_type[mba_xy] >> 7) & (h->slice_table[mba_xy] == h->slice_num); + ctx += (s->current_picture.f.mb_type[mbb_xy] >> 7) & (h->slice_table[mbb_xy] == h->slice_num); return get_cabac_noinline( &h->cabac, &(h->cabac_state+70)[ctx] ); } @@ -1296,9 +1296,9 @@ static int decode_cabac_intra_mb_type(H264Context *h, int ctx_base, int intra_sl if(intra_slice){ int ctx=0; - if( h->left_type[0] & (MB_TYPE_INTRA16x16|MB_TYPE_INTRA_PCM)) + if( h->left_type[LTOP] & (MB_TYPE_INTRA16x16|MB_TYPE_INTRA_PCM)) ctx++; - if( h->top_type & (MB_TYPE_INTRA16x16|MB_TYPE_INTRA_PCM)) + if( h->top_type & (MB_TYPE_INTRA16x16|MB_TYPE_INTRA_PCM)) ctx++; if( get_cabac_noinline( &h->cabac, &state[ctx] ) == 0 ) return 0; /* I4x4 */ @@ -1330,13 +1330,13 @@ static int decode_cabac_mb_skip( H264Context *h, int mb_x, int mb_y ) { mba_xy = mb_xy - 1; if( (mb_y&1) && h->slice_table[mba_xy] == h->slice_num - && MB_FIELD == !!IS_INTERLACED( s->current_picture.mb_type[mba_xy] ) ) + && MB_FIELD == !!IS_INTERLACED( s->current_picture.f.mb_type[mba_xy] ) ) mba_xy += s->mb_stride; if( MB_FIELD ){ mbb_xy = mb_xy - s->mb_stride; if( !(mb_y&1) && h->slice_table[mbb_xy] == h->slice_num - && IS_INTERLACED( s->current_picture.mb_type[mbb_xy] ) ) + && IS_INTERLACED( s->current_picture.f.mb_type[mbb_xy] ) ) mbb_xy -= s->mb_stride; }else mbb_xy = mb_x + (mb_y-1)*s->mb_stride; @@ -1346,9 +1346,9 @@ static int decode_cabac_mb_skip( H264Context *h, int mb_x, int mb_y ) { mbb_xy = mb_xy - (s->mb_stride << FIELD_PICTURE); } - if( h->slice_table[mba_xy] == h->slice_num && !IS_SKIP( s->current_picture.mb_type[mba_xy] )) + if( h->slice_table[mba_xy] == h->slice_num && !IS_SKIP( s->current_picture.f.mb_type[mba_xy] )) ctx++; - if( h->slice_table[mbb_xy] == h->slice_num && !IS_SKIP( s->current_picture.mb_type[mbb_xy] )) + if( h->slice_table[mbb_xy] == h->slice_num && !IS_SKIP( s->current_picture.f.mb_type[mbb_xy] )) ctx++; if( h->slice_type_nos == AV_PICTURE_TYPE_B ) @@ -1376,10 +1376,10 @@ static int decode_cabac_mb_chroma_pre_mode( H264Context *h) { int ctx = 0; /* No need to test for IS_INTRA4x4 and IS_INTRA16x16, as we set chroma_pred_mode_table to 0 */ - if( h->left_type[0] && h->chroma_pred_mode_table[mba_xy] != 0 ) + if( h->left_type[LTOP] && h->chroma_pred_mode_table[mba_xy] != 0 ) ctx++; - if( h->top_type && h->chroma_pred_mode_table[mbb_xy] != 0 ) + if( h->top_type && h->chroma_pred_mode_table[mbb_xy] != 0 ) ctx++; if( get_cabac_noinline( &h->cabac, &h->cabac_state[64+ctx] ) == 0 ) @@ -1649,7 +1649,7 @@ static av_always_inline void decode_cabac_residual_internal( H264Context *h, DCT const uint8_t *sig_off = significant_coeff_flag_offset_8x8[MB_FIELD]; #if ARCH_X86 && HAVE_7REGS && HAVE_EBX_AVAILABLE && !defined(BROKEN_RELOCATIONS) coeff_count= decode_significance_8x8_x86(CC, significant_coeff_ctx_base, index, - last_coeff_ctx_base-significant_coeff_ctx_base, sig_off); + last_coeff_ctx_base, sig_off); } else { coeff_count= decode_significance_x86(CC, max_coeff, significant_coeff_ctx_base, index, last_coeff_ctx_base-significant_coeff_ctx_base); @@ -1819,8 +1819,7 @@ static av_always_inline void decode_cabac_luma_residual( H264Context *h, const u } } } else { - uint8_t * const nnz= &h->non_zero_count_cache[ scan8[4*i8x8+16*p] ]; - nnz[0] = nnz[1] = nnz[8] = nnz[9] = 0; + fill_rectangle(&h->non_zero_count_cache[scan8[4*i8x8+16*p]], 2, 2, 8, 0, 1); } } } @@ -1851,7 +1850,7 @@ int ff_h264_decode_mb_cabac(H264Context *h) { /* read skip flags */ if( skip ) { if( FRAME_MBAFF && (s->mb_y&1)==0 ){ - s->current_picture.mb_type[mb_xy] = MB_TYPE_SKIP; + s->current_picture.f.mb_type[mb_xy] = MB_TYPE_SKIP; h->next_mb_skipped = decode_cabac_mb_skip( h, s->mb_x, s->mb_y+1 ); if(!h->next_mb_skipped) h->mb_mbaff = h->mb_field_decoding_flag = decode_cabac_field_decoding_flag(h); @@ -1881,7 +1880,7 @@ int ff_h264_decode_mb_cabac(H264Context *h) { int ctx = 0; assert(h->slice_type_nos == AV_PICTURE_TYPE_B); - if( !IS_DIRECT( h->left_type[0]-1 ) ) + if( !IS_DIRECT( h->left_type[LTOP]-1 ) ) ctx++; if( !IS_DIRECT( h->top_type-1 ) ) ctx++; @@ -1967,10 +1966,10 @@ decode_intra_mb: h->cbp_table[mb_xy] = 0xf7ef; h->chroma_pred_mode_table[mb_xy] = 0; // In deblocking, the quantizer is 0 - s->current_picture.qscale_table[mb_xy]= 0; + s->current_picture.f.qscale_table[mb_xy] = 0; // All coeffs are present memset(h->non_zero_count[mb_xy], 16, 48); - s->current_picture.mb_type[mb_xy]= mb_type; + s->current_picture.f.mb_type[mb_xy] = mb_type; h->last_qscale_diff = 0; return 0; } @@ -2000,7 +1999,7 @@ decode_intra_mb: //av_log( s->avctx, AV_LOG_ERROR, "i4x4 pred=%d mode=%d\n", pred, h->intra4x4_pred_mode_cache[ scan8[i] ] ); } } - ff_h264_write_back_intra_pred_mode(h); + write_back_intra_pred_mode(h); if( ff_h264_check_intra4x4_pred_mode(h) < 0 ) return -1; } else { h->intra16x16_pred_mode= ff_h264_check_intra_pred_mode( h, h->intra16x16_pred_mode ); @@ -2249,24 +2248,25 @@ decode_intra_mb: * the transform mode of the current macroblock there. */ if (CHROMA444 && IS_8x8DCT(mb_type)){ int i; + uint8_t *nnz_cache = h->non_zero_count_cache; for (i = 0; i < 2; i++){ - if (h->left_type[i] && !IS_8x8DCT(h->left_type[i])){ - h->non_zero_count_cache[3+8* 1 + 2*8*i]= - h->non_zero_count_cache[3+8* 2 + 2*8*i]= - h->non_zero_count_cache[3+8* 6 + 2*8*i]= - h->non_zero_count_cache[3+8* 7 + 2*8*i]= - h->non_zero_count_cache[3+8*11 + 2*8*i]= - h->non_zero_count_cache[3+8*12 + 2*8*i]= IS_INTRA(mb_type) ? 64 : 0; + if (h->left_type[LEFT(i)] && !IS_8x8DCT(h->left_type[LEFT(i)])){ + nnz_cache[3+8* 1 + 2*8*i]= + nnz_cache[3+8* 2 + 2*8*i]= + nnz_cache[3+8* 6 + 2*8*i]= + nnz_cache[3+8* 7 + 2*8*i]= + nnz_cache[3+8*11 + 2*8*i]= + nnz_cache[3+8*12 + 2*8*i]= IS_INTRA(mb_type) ? 64 : 0; } } if (h->top_type && !IS_8x8DCT(h->top_type)){ uint32_t top_empty = CABAC && !IS_INTRA(mb_type) ? 0 : 0x40404040; - AV_WN32A(&h->non_zero_count_cache[4+8* 0], top_empty); - AV_WN32A(&h->non_zero_count_cache[4+8* 5], top_empty); - AV_WN32A(&h->non_zero_count_cache[4+8*10], top_empty); + AV_WN32A(&nnz_cache[4+8* 0], top_empty); + AV_WN32A(&nnz_cache[4+8* 5], top_empty); + AV_WN32A(&nnz_cache[4+8*10], top_empty); } } - s->current_picture.mb_type[mb_xy]= mb_type; + s->current_picture.f.mb_type[mb_xy] = mb_type; if( cbp || IS_INTRA16x16( mb_type ) ) { const uint8_t *scan, *scan8x8; @@ -2345,7 +2345,7 @@ decode_intra_mb: h->last_qscale_diff = 0; } - s->current_picture.qscale_table[mb_xy]= s->qscale; + s->current_picture.f.qscale_table[mb_xy] = s->qscale; write_back_non_zero_count(h); if(MB_MBAFF){ diff --git a/libavcodec/h264_cavlc.c b/libavcodec/h264_cavlc.c index 0ddc430661..0cd147fd7a 100644 --- a/libavcodec/h264_cavlc.c +++ b/libavcodec/h264_cavlc.c @@ -689,11 +689,11 @@ decode_intra_mb: } // In deblocking, the quantizer is 0 - s->current_picture.qscale_table[mb_xy]= 0; + s->current_picture.f.qscale_table[mb_xy] = 0; // All coeffs are present memset(h->non_zero_count[mb_xy], 16, 48); - s->current_picture.mb_type[mb_xy]= mb_type; + s->current_picture.f.mb_type[mb_xy] = mb_type; return 0; } @@ -731,7 +731,7 @@ decode_intra_mb: else h->intra4x4_pred_mode_cache[ scan8[i] ] = mode; } - ff_h264_write_back_intra_pred_mode(h); + write_back_intra_pred_mode(h); if( ff_h264_check_intra4x4_pred_mode(h) < 0) return -1; }else{ @@ -990,7 +990,7 @@ decode_intra_mb: } h->cbp= h->cbp_table[mb_xy]= cbp; - s->current_picture.mb_type[mb_xy]= mb_type; + s->current_picture.f.mb_type[mb_xy] = mb_type; if(cbp || IS_INTRA16x16(mb_type)){ int i4x4, chroma_idx; @@ -1063,7 +1063,7 @@ decode_intra_mb: fill_rectangle(&h->non_zero_count_cache[scan8[16]], 4, 4, 8, 0, 1); fill_rectangle(&h->non_zero_count_cache[scan8[32]], 4, 4, 8, 0, 1); } - s->current_picture.qscale_table[mb_xy]= s->qscale; + s->current_picture.f.qscale_table[mb_xy] = s->qscale; write_back_non_zero_count(h); if(MB_MBAFF){ diff --git a/libavcodec/h264_direct.c b/libavcodec/h264_direct.c index 34a4ff727f..691dcf9d57 100644 --- a/libavcodec/h264_direct.c +++ b/libavcodec/h264_direct.c @@ -87,7 +87,7 @@ static void fill_colmap(H264Context *h, int map[2][16+32], int list, int field, poc= (poc&~3) + rfield + 1; for(j=start; j<end; j++){ - if(4*h->ref_list[0][j].frame_num + (h->ref_list[0][j].reference&3) == poc){ + if (4 * h->ref_list[0][j].frame_num + (h->ref_list[0][j].f.reference & 3) == poc) { int cur_ref= mbafi ? (j-16)^field : j; map[list][2*old_ref + (rfield^field) + 16] = cur_ref; if(rfield == field || !interl) @@ -105,12 +105,12 @@ void ff_h264_direct_ref_list_init(H264Context * const h){ Picture * const cur = s->current_picture_ptr; int list, j, field; int sidx= (s->picture_structure&1)^1; - int ref1sidx= (ref1->reference&1)^1; + int ref1sidx = (ref1->f.reference&1)^1; for(list=0; list<2; list++){ cur->ref_count[sidx][list] = h->ref_count[list]; for(j=0; j<h->ref_count[list]; j++) - cur->ref_poc[sidx][list][j] = 4*h->ref_list[list][j].frame_num + (h->ref_list[list][j].reference&3); + cur->ref_poc[sidx][list][j] = 4 * h->ref_list[list][j].frame_num + (h->ref_list[list][j].f.reference & 3); } if(s->picture_structure == PICT_FRAME){ @@ -126,11 +126,11 @@ void ff_h264_direct_ref_list_init(H264Context * const h){ int *col_poc = h->ref_list[1]->field_poc; h->col_parity= (FFABS(col_poc[0] - cur_poc) >= FFABS(col_poc[1] - cur_poc)); ref1sidx=sidx= h->col_parity; - }else if(!(s->picture_structure & h->ref_list[1][0].reference) && !h->ref_list[1][0].mbaff){ // FL -> FL & differ parity - h->col_fieldoff= 2*(h->ref_list[1][0].reference) - 3; + } else if (!(s->picture_structure & h->ref_list[1][0].f.reference) && !h->ref_list[1][0].mbaff) { // FL -> FL & differ parity + h->col_fieldoff = 2 * h->ref_list[1][0].f.reference - 3; } - if(cur->pict_type != AV_PICTURE_TYPE_B || h->direct_spatial_mv_pred) + if (cur->f.pict_type != AV_PICTURE_TYPE_B || h->direct_spatial_mv_pred) return; for(list=0; list<2; list++){ @@ -143,7 +143,7 @@ void ff_h264_direct_ref_list_init(H264Context * const h){ static void await_reference_mb_row(H264Context * const h, Picture *ref, int mb_y) { - int ref_field = ref->reference - 1; + int ref_field = ref->f.reference - 1; int ref_field_picture = ref->field_picture; int ref_height = 16*h->s.mb_height >> ref_field_picture; @@ -234,8 +234,8 @@ static void pred_spatial_direct_motion(H264Context * const h, int *mb_type){ return; } - if(IS_INTERLACED(h->ref_list[1][0].mb_type[mb_xy])){ // AFL/AFR/FR/FL -> AFL/FL - if(!IS_INTERLACED(*mb_type)){ // AFR/FR -> AFL/FL + if (IS_INTERLACED(h->ref_list[1][0].f.mb_type[mb_xy])) { // AFL/AFR/FR/FL -> AFL/FL + if (!IS_INTERLACED(*mb_type)) { // AFR/FR -> AFL/FL mb_y = (s->mb_y&~1) + h->col_parity; mb_xy= s->mb_x + ((s->mb_y&~1) + h->col_parity)*s->mb_stride; b8_stride = 0; @@ -248,8 +248,8 @@ static void pred_spatial_direct_motion(H264Context * const h, int *mb_type){ if(IS_INTERLACED(*mb_type)){ // AFL /FL -> AFR/FR mb_y = s->mb_y&~1; mb_xy= s->mb_x + (s->mb_y&~1)*s->mb_stride; - mb_type_col[0] = h->ref_list[1][0].mb_type[mb_xy]; - mb_type_col[1] = h->ref_list[1][0].mb_type[mb_xy + s->mb_stride]; + mb_type_col[0] = h->ref_list[1][0].f.mb_type[mb_xy]; + mb_type_col[1] = h->ref_list[1][0].f.mb_type[mb_xy + s->mb_stride]; b8_stride = 2+4*s->mb_stride; b4_stride *= 6; @@ -264,7 +264,7 @@ static void pred_spatial_direct_motion(H264Context * const h, int *mb_type){ }else{ // AFR/FR -> AFR/FR single_col: mb_type_col[0] = - mb_type_col[1] = h->ref_list[1][0].mb_type[mb_xy]; + mb_type_col[1] = h->ref_list[1][0].f.mb_type[mb_xy]; sub_mb_type |= MB_TYPE_16x16|MB_TYPE_DIRECT2; /* B_SUB_8x8 */ if(!is_b8x8 && (mb_type_col[0] & MB_TYPE_16x16_OR_INTRA)){ @@ -284,10 +284,10 @@ single_col: await_reference_mb_row(h, &h->ref_list[1][0], mb_y); - l1mv0 = &h->ref_list[1][0].motion_val[0][h->mb2b_xy [mb_xy]]; - l1mv1 = &h->ref_list[1][0].motion_val[1][h->mb2b_xy [mb_xy]]; - l1ref0 = &h->ref_list[1][0].ref_index [0][4*mb_xy]; - l1ref1 = &h->ref_list[1][0].ref_index [1][4*mb_xy]; + l1mv0 = &h->ref_list[1][0].f.motion_val[0][h->mb2b_xy [mb_xy]]; + l1mv1 = &h->ref_list[1][0].f.motion_val[1][h->mb2b_xy [mb_xy]]; + l1ref0 = &h->ref_list[1][0].f.ref_index [0][4 * mb_xy]; + l1ref1 = &h->ref_list[1][0].f.ref_index [1][4 * mb_xy]; if(!b8_stride){ if(s->mb_y&1){ l1ref0 += 2; @@ -420,8 +420,8 @@ static void pred_temp_direct_motion(H264Context * const h, int *mb_type){ await_reference_mb_row(h, &h->ref_list[1][0], s->mb_y + !!IS_INTERLACED(*mb_type)); - if(IS_INTERLACED(h->ref_list[1][0].mb_type[mb_xy])){ // AFL/AFR/FR/FL -> AFL/FL - if(!IS_INTERLACED(*mb_type)){ // AFR/FR -> AFL/FL + if (IS_INTERLACED(h->ref_list[1][0].f.mb_type[mb_xy])) { // AFL/AFR/FR/FL -> AFL/FL + if (!IS_INTERLACED(*mb_type)) { // AFR/FR -> AFL/FL mb_y = (s->mb_y&~1) + h->col_parity; mb_xy= s->mb_x + ((s->mb_y&~1) + h->col_parity)*s->mb_stride; b8_stride = 0; @@ -434,8 +434,8 @@ static void pred_temp_direct_motion(H264Context * const h, int *mb_type){ if(IS_INTERLACED(*mb_type)){ // AFL /FL -> AFR/FR mb_y = s->mb_y&~1; mb_xy= s->mb_x + (s->mb_y&~1)*s->mb_stride; - mb_type_col[0] = h->ref_list[1][0].mb_type[mb_xy]; - mb_type_col[1] = h->ref_list[1][0].mb_type[mb_xy + s->mb_stride]; + mb_type_col[0] = h->ref_list[1][0].f.mb_type[mb_xy]; + mb_type_col[1] = h->ref_list[1][0].f.mb_type[mb_xy + s->mb_stride]; b8_stride = 2+4*s->mb_stride; b4_stride *= 6; @@ -451,7 +451,7 @@ static void pred_temp_direct_motion(H264Context * const h, int *mb_type){ }else{ // AFR/FR -> AFR/FR single_col: mb_type_col[0] = - mb_type_col[1] = h->ref_list[1][0].mb_type[mb_xy]; + mb_type_col[1] = h->ref_list[1][0].f.mb_type[mb_xy]; sub_mb_type = MB_TYPE_16x16|MB_TYPE_P0L0|MB_TYPE_P0L1|MB_TYPE_DIRECT2; /* B_SUB_8x8 */ if(!is_b8x8 && (mb_type_col[0] & MB_TYPE_16x16_OR_INTRA)){ @@ -471,10 +471,10 @@ single_col: await_reference_mb_row(h, &h->ref_list[1][0], mb_y); - l1mv0 = &h->ref_list[1][0].motion_val[0][h->mb2b_xy [mb_xy]]; - l1mv1 = &h->ref_list[1][0].motion_val[1][h->mb2b_xy [mb_xy]]; - l1ref0 = &h->ref_list[1][0].ref_index [0][4*mb_xy]; - l1ref1 = &h->ref_list[1][0].ref_index [1][4*mb_xy]; + l1mv0 = &h->ref_list[1][0].f.motion_val[0][h->mb2b_xy [mb_xy]]; + l1mv1 = &h->ref_list[1][0].f.motion_val[1][h->mb2b_xy [mb_xy]]; + l1ref0 = &h->ref_list[1][0].f.ref_index [0][4 * mb_xy]; + l1ref1 = &h->ref_list[1][0].f.ref_index [1][4 * mb_xy]; if(!b8_stride){ if(s->mb_y&1){ l1ref0 += 2; diff --git a/libavcodec/h264_loopfilter.c b/libavcodec/h264_loopfilter.c index fd1fb5f662..0b7806994f 100644 --- a/libavcodec/h264_loopfilter.c +++ b/libavcodec/h264_loopfilter.c @@ -100,14 +100,14 @@ static const uint8_t tc0_table[52*3][4] = { {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 }, }; -static void av_always_inline filter_mb_edgev( uint8_t *pix, int stride, int16_t bS[4], unsigned int qp, H264Context *h) { - const int qp_bd_offset = 6 * (h->sps.bit_depth_luma - 8); - const unsigned int index_a = qp - qp_bd_offset + h->slice_alpha_c0_offset; +/* intra: 0 if this loopfilter call is guaranteed to be inter (bS < 4), 1 if it might be intra (bS == 4) */ +static void av_always_inline filter_mb_edgev( uint8_t *pix, int stride, const int16_t bS[4], unsigned int qp, int a, int b, H264Context *h, int intra ) { + const unsigned int index_a = qp + a; const int alpha = alpha_table[index_a]; - const int beta = beta_table[qp - qp_bd_offset + h->slice_beta_offset]; + const int beta = beta_table[qp + b]; if (alpha ==0 || beta == 0) return; - if( bS[0] < 4 ) { + if( bS[0] < 4 || !intra ) { int8_t tc[4]; tc[0] = tc0_table[index_a][bS[0]]; tc[1] = tc0_table[index_a][bS[1]]; @@ -118,14 +118,13 @@ static void av_always_inline filter_mb_edgev( uint8_t *pix, int stride, int16_t h->h264dsp.h264_h_loop_filter_luma_intra(pix, stride, alpha, beta); } } -static void av_always_inline filter_mb_edgecv( uint8_t *pix, int stride, int16_t bS[4], unsigned int qp, H264Context *h ) { - const int qp_bd_offset = 6 * (h->sps.bit_depth_luma - 8); - const unsigned int index_a = qp - qp_bd_offset + h->slice_alpha_c0_offset; +static void av_always_inline filter_mb_edgecv( uint8_t *pix, int stride, const int16_t bS[4], unsigned int qp, int a, int b, H264Context *h, int intra ) { + const unsigned int index_a = qp + a; const int alpha = alpha_table[index_a]; - const int beta = beta_table[qp - qp_bd_offset + h->slice_beta_offset]; + const int beta = beta_table[qp + b]; if (alpha ==0 || beta == 0) return; - if( bS[0] < 4 ) { + if( bS[0] < 4 || !intra ) { int8_t tc[4]; tc[0] = tc0_table[index_a][bS[0]]+1; tc[1] = tc0_table[index_a][bS[1]]+1; @@ -137,14 +136,13 @@ static void av_always_inline filter_mb_edgecv( uint8_t *pix, int stride, int16_t } } -static void filter_mb_mbaff_edgev( H264Context *h, uint8_t *pix, int stride, int16_t bS[7], int bsi, int qp ) { - const int qp_bd_offset = 6 * (h->sps.bit_depth_luma - 8); - int index_a = qp - qp_bd_offset + h->slice_alpha_c0_offset; - int alpha = alpha_table[index_a]; - int beta = beta_table[qp - qp_bd_offset + h->slice_beta_offset]; +static void av_always_inline filter_mb_mbaff_edgev( H264Context *h, uint8_t *pix, int stride, const int16_t bS[7], int bsi, int qp, int a, int b, int intra ) { + const unsigned int index_a = qp + a; + const int alpha = alpha_table[index_a]; + const int beta = beta_table[qp + b]; if (alpha ==0 || beta == 0) return; - if( bS[0] < 4 ) { + if( bS[0] < 4 || !intra ) { int8_t tc[4]; tc[0] = tc0_table[index_a][bS[0*bsi]]; tc[1] = tc0_table[index_a][bS[1*bsi]]; @@ -155,14 +153,13 @@ static void filter_mb_mbaff_edgev( H264Context *h, uint8_t *pix, int stride, int h->h264dsp.h264_h_loop_filter_luma_mbaff_intra(pix, stride, alpha, beta); } } -static void filter_mb_mbaff_edgecv( H264Context *h, uint8_t *pix, int stride, int16_t bS[7], int bsi, int qp ) { - const int qp_bd_offset = 6 * (h->sps.bit_depth_luma - 8); - int index_a = qp - qp_bd_offset + h->slice_alpha_c0_offset; - int alpha = alpha_table[index_a]; - int beta = beta_table[qp - qp_bd_offset + h->slice_beta_offset]; +static void av_always_inline filter_mb_mbaff_edgecv( H264Context *h, uint8_t *pix, int stride, const int16_t bS[7], int bsi, int qp, int a, int b, int intra ) { + const unsigned int index_a = qp + a; + const int alpha = alpha_table[index_a]; + const int beta = beta_table[qp + b]; if (alpha ==0 || beta == 0) return; - if( bS[0] < 4 ) { + if( bS[0] < 4 || !intra ) { int8_t tc[4]; tc[0] = tc0_table[index_a][bS[0*bsi]] + 1; tc[1] = tc0_table[index_a][bS[1*bsi]] + 1; @@ -174,14 +171,13 @@ static void filter_mb_mbaff_edgecv( H264Context *h, uint8_t *pix, int stride, in } } -static void av_always_inline filter_mb_edgeh( uint8_t *pix, int stride, int16_t bS[4], unsigned int qp, H264Context *h ) { - const int qp_bd_offset = 6 * (h->sps.bit_depth_luma - 8); - const unsigned int index_a = qp - qp_bd_offset + h->slice_alpha_c0_offset; +static void av_always_inline filter_mb_edgeh( uint8_t *pix, int stride, const int16_t bS[4], unsigned int qp, int a, int b, H264Context *h, int intra ) { + const unsigned int index_a = qp + a; const int alpha = alpha_table[index_a]; - const int beta = beta_table[qp - qp_bd_offset + h->slice_beta_offset]; + const int beta = beta_table[qp + b]; if (alpha ==0 || beta == 0) return; - if( bS[0] < 4 ) { + if( bS[0] < 4 || !intra ) { int8_t tc[4]; tc[0] = tc0_table[index_a][bS[0]]; tc[1] = tc0_table[index_a][bS[1]]; @@ -193,14 +189,13 @@ static void av_always_inline filter_mb_edgeh( uint8_t *pix, int stride, int16_t } } -static void av_always_inline filter_mb_edgech( uint8_t *pix, int stride, int16_t bS[4], unsigned int qp, H264Context *h ) { - const int qp_bd_offset = 6 * (h->sps.bit_depth_luma - 8); - const unsigned int index_a = qp - qp_bd_offset + h->slice_alpha_c0_offset; +static void av_always_inline filter_mb_edgech( uint8_t *pix, int stride, const int16_t bS[4], unsigned int qp, int a, int b, H264Context *h, int intra ) { + const unsigned int index_a = qp + a; const int alpha = alpha_table[index_a]; - const int beta = beta_table[qp - qp_bd_offset + h->slice_beta_offset]; + const int beta = beta_table[qp + b]; if (alpha ==0 || beta == 0) return; - if( bS[0] < 4 ) { + if( bS[0] < 4 || !intra ) { int8_t tc[4]; tc[0] = tc0_table[index_a][bS[0]]+1; tc[1] = tc0_table[index_a][bS[1]]+1; @@ -212,74 +207,108 @@ static void av_always_inline filter_mb_edgech( uint8_t *pix, int stride, int16_t } } -void ff_h264_filter_mb_fast( H264Context *h, int mb_x, int mb_y, uint8_t *img_y, uint8_t *img_cb, uint8_t *img_cr, unsigned int linesize, unsigned int uvlinesize) { +static void av_always_inline h264_filter_mb_fast_internal( H264Context *h, int mb_x, int mb_y, uint8_t *img_y, uint8_t *img_cb, uint8_t *img_cr, + unsigned int linesize, unsigned int uvlinesize, int pixel_shift) { MpegEncContext * const s = &h->s; - int mb_xy; - int mb_type, left_type; - int qp, qp0, qp1, qpc, qpc0, qpc1, qp_thresh; int chroma = !(CONFIG_GRAY && (s->flags&CODEC_FLAG_GRAY)); - - mb_xy = h->mb_xy; - - if(!h->top_type || !h->h264dsp.h264_loop_filter_strength || h->pps.chroma_qp_diff || CHROMA444) { - ff_h264_filter_mb(h, mb_x, mb_y, img_y, img_cb, img_cr, linesize, uvlinesize); - return; - } - assert(!FRAME_MBAFF); - left_type= h->left_type[0]; - - mb_type = s->current_picture.mb_type[mb_xy]; - qp = s->current_picture.qscale_table[mb_xy]; - qp0 = s->current_picture.qscale_table[mb_xy-1]; - qp1 = s->current_picture.qscale_table[h->top_mb_xy]; - qpc = get_chroma_qp( h, 0, qp ); - qpc0 = get_chroma_qp( h, 0, qp0 ); - qpc1 = get_chroma_qp( h, 0, qp1 ); + int chroma444 = CHROMA444; + + int mb_xy = h->mb_xy; + int left_type= h->left_type[LTOP]; + int top_type= h->top_type; + + int qp_bd_offset = 6 * (h->sps.bit_depth_luma - 8); + int a = h->slice_alpha_c0_offset - qp_bd_offset; + int b = h->slice_beta_offset - qp_bd_offset; + + int mb_type = s->current_picture.f.mb_type[mb_xy]; + int qp = s->current_picture.f.qscale_table[mb_xy]; + int qp0 = s->current_picture.f.qscale_table[mb_xy - 1]; + int qp1 = s->current_picture.f.qscale_table[h->top_mb_xy]; + int qpc = get_chroma_qp( h, 0, qp ); + int qpc0 = get_chroma_qp( h, 0, qp0 ); + int qpc1 = get_chroma_qp( h, 0, qp1 ); qp0 = (qp + qp0 + 1) >> 1; qp1 = (qp + qp1 + 1) >> 1; qpc0 = (qpc + qpc0 + 1) >> 1; qpc1 = (qpc + qpc1 + 1) >> 1; - qp_thresh = 15+52 - h->slice_alpha_c0_offset; - if(qp <= qp_thresh && qp0 <= qp_thresh && qp1 <= qp_thresh && - qpc <= qp_thresh && qpc0 <= qp_thresh && qpc1 <= qp_thresh) - return; if( IS_INTRA(mb_type) ) { - int16_t bS4[4] = {4,4,4,4}; - int16_t bS3[4] = {3,3,3,3}; - int16_t *bSH = FIELD_PICTURE ? bS3 : bS4; + static const int16_t bS4[4] = {4,4,4,4}; + static const int16_t bS3[4] = {3,3,3,3}; + const int16_t *bSH = FIELD_PICTURE ? bS3 : bS4; if(left_type) - filter_mb_edgev( &img_y[4*0], linesize, bS4, qp0, h); + filter_mb_edgev( &img_y[4*0<<pixel_shift], linesize, bS4, qp0, a, b, h, 1); if( IS_8x8DCT(mb_type) ) { - filter_mb_edgev( &img_y[4*2], linesize, bS3, qp, h); - filter_mb_edgeh( &img_y[4*0*linesize], linesize, bSH, qp1, h); - filter_mb_edgeh( &img_y[4*2*linesize], linesize, bS3, qp, h); + filter_mb_edgev( &img_y[4*2<<pixel_shift], linesize, bS3, qp, a, b, h, 0); + if(top_type){ + filter_mb_edgeh( &img_y[4*0*linesize], linesize, bSH, qp1, a, b, h, 1); + } + filter_mb_edgeh( &img_y[4*2*linesize], linesize, bS3, qp, a, b, h, 0); } else { - filter_mb_edgev( &img_y[4*1], linesize, bS3, qp, h); - filter_mb_edgev( &img_y[4*2], linesize, bS3, qp, h); - filter_mb_edgev( &img_y[4*3], linesize, bS3, qp, h); - filter_mb_edgeh( &img_y[4*0*linesize], linesize, bSH, qp1, h); - filter_mb_edgeh( &img_y[4*1*linesize], linesize, bS3, qp, h); - filter_mb_edgeh( &img_y[4*2*linesize], linesize, bS3, qp, h); - filter_mb_edgeh( &img_y[4*3*linesize], linesize, bS3, qp, h); + filter_mb_edgev( &img_y[4*1<<pixel_shift], linesize, bS3, qp, a, b, h, 0); + filter_mb_edgev( &img_y[4*2<<pixel_shift], linesize, bS3, qp, a, b, h, 0); + filter_mb_edgev( &img_y[4*3<<pixel_shift], linesize, bS3, qp, a, b, h, 0); + if(top_type){ + filter_mb_edgeh( &img_y[4*0*linesize], linesize, bSH, qp1, a, b, h, 1); + } + filter_mb_edgeh( &img_y[4*1*linesize], linesize, bS3, qp, a, b, h, 0); + filter_mb_edgeh( &img_y[4*2*linesize], linesize, bS3, qp, a, b, h, 0); + filter_mb_edgeh( &img_y[4*3*linesize], linesize, bS3, qp, a, b, h, 0); } if(chroma){ - if(left_type){ - filter_mb_edgecv( &img_cb[2*0], uvlinesize, bS4, qpc0, h); - filter_mb_edgecv( &img_cr[2*0], uvlinesize, bS4, qpc0, h); + if(chroma444){ + if(left_type){ + filter_mb_edgev( &img_cb[4*0<<pixel_shift], linesize, bS4, qpc0, a, b, h, 1); + filter_mb_edgev( &img_cr[4*0<<pixel_shift], linesize, bS4, qpc0, a, b, h, 1); + } + if( IS_8x8DCT(mb_type) ) { + filter_mb_edgev( &img_cb[4*2<<pixel_shift], linesize, bS3, qpc, a, b, h, 0); + filter_mb_edgev( &img_cr[4*2<<pixel_shift], linesize, bS3, qpc, a, b, h, 0); + if(top_type){ + filter_mb_edgeh( &img_cb[4*0*linesize], linesize, bSH, qpc1, a, b, h, 1 ); + filter_mb_edgeh( &img_cr[4*0*linesize], linesize, bSH, qpc1, a, b, h, 1 ); + } + filter_mb_edgeh( &img_cb[4*2*linesize], linesize, bS3, qpc, a, b, h, 0); + filter_mb_edgeh( &img_cr[4*2*linesize], linesize, bS3, qpc, a, b, h, 0); + } else { + filter_mb_edgev( &img_cb[4*1<<pixel_shift], linesize, bS3, qpc, a, b, h, 0); + filter_mb_edgev( &img_cr[4*1<<pixel_shift], linesize, bS3, qpc, a, b, h, 0); + filter_mb_edgev( &img_cb[4*2<<pixel_shift], linesize, bS3, qpc, a, b, h, 0); + filter_mb_edgev( &img_cr[4*2<<pixel_shift], linesize, bS3, qpc, a, b, h, 0); + filter_mb_edgev( &img_cb[4*3<<pixel_shift], linesize, bS3, qpc, a, b, h, 0); + filter_mb_edgev( &img_cr[4*3<<pixel_shift], linesize, bS3, qpc, a, b, h, 0); + if(top_type){ + filter_mb_edgeh( &img_cb[4*0*linesize], linesize, bSH, qpc1, a, b, h, 1); + filter_mb_edgeh( &img_cr[4*0*linesize], linesize, bSH, qpc1, a, b, h, 1); + } + filter_mb_edgeh( &img_cb[4*1*linesize], linesize, bS3, qpc, a, b, h, 0); + filter_mb_edgeh( &img_cr[4*1*linesize], linesize, bS3, qpc, a, b, h, 0); + filter_mb_edgeh( &img_cb[4*2*linesize], linesize, bS3, qpc, a, b, h, 0); + filter_mb_edgeh( &img_cr[4*2*linesize], linesize, bS3, qpc, a, b, h, 0); + filter_mb_edgeh( &img_cb[4*3*linesize], linesize, bS3, qpc, a, b, h, 0); + filter_mb_edgeh( &img_cr[4*3*linesize], linesize, bS3, qpc, a, b, h, 0); + } + }else{ + if(left_type){ + filter_mb_edgecv( &img_cb[2*0<<pixel_shift], uvlinesize, bS4, qpc0, a, b, h, 1); + filter_mb_edgecv( &img_cr[2*0<<pixel_shift], uvlinesize, bS4, qpc0, a, b, h, 1); + } + filter_mb_edgecv( &img_cb[2*2<<pixel_shift], uvlinesize, bS3, qpc, a, b, h, 0); + filter_mb_edgecv( &img_cr[2*2<<pixel_shift], uvlinesize, bS3, qpc, a, b, h, 0); + if(top_type){ + filter_mb_edgech( &img_cb[2*0*uvlinesize], uvlinesize, bSH, qpc1, a, b, h, 1); + filter_mb_edgech( &img_cr[2*0*uvlinesize], uvlinesize, bSH, qpc1, a, b, h, 1); + } + filter_mb_edgech( &img_cb[2*2*uvlinesize], uvlinesize, bS3, qpc, a, b, h, 0); + filter_mb_edgech( &img_cr[2*2*uvlinesize], uvlinesize, bS3, qpc, a, b, h, 0); } - filter_mb_edgecv( &img_cb[2*2], uvlinesize, bS3, qpc, h); - filter_mb_edgecv( &img_cr[2*2], uvlinesize, bS3, qpc, h); - filter_mb_edgech( &img_cb[2*0*uvlinesize], uvlinesize, bSH, qpc1, h); - filter_mb_edgech( &img_cb[2*2*uvlinesize], uvlinesize, bS3, qpc, h); - filter_mb_edgech( &img_cr[2*0*uvlinesize], uvlinesize, bSH, qpc1, h); - filter_mb_edgech( &img_cr[2*2*uvlinesize], uvlinesize, bS3, qpc, h); } return; } else { LOCAL_ALIGNED_8(int16_t, bS, [2], [4][4]); int edges; - if( IS_8x8DCT(mb_type) && (h->cbp&7) == 7 ) { + if( IS_8x8DCT(mb_type) && (h->cbp&7) == 7 && !chroma444 ) { edges = 4; AV_WN64A(bS[0][0], 0x0002000200020002ULL); AV_WN64A(bS[0][2], 0x0002000200020002ULL); @@ -287,7 +316,7 @@ void ff_h264_filter_mb_fast( H264Context *h, int mb_x, int mb_y, uint8_t *img_y, AV_WN64A(bS[1][2], 0x0002000200020002ULL); } else { int mask_edge1 = (3*(((5*mb_type)>>5)&1)) | (mb_type>>4); //(mb_type & (MB_TYPE_16x16 | MB_TYPE_8x16)) ? 3 : (mb_type & MB_TYPE_16x8) ? 1 : 0; - int mask_edge0 = 3*((mask_edge1>>1) & ((5*left_type)>>5)&1); // (mb_type & (MB_TYPE_16x16 | MB_TYPE_8x16)) && (h->left_type[0] & (MB_TYPE_16x16 | MB_TYPE_8x16)) ? 3 : 0; + int mask_edge0 = 3*((mask_edge1>>1) & ((5*left_type)>>5)&1); // (mb_type & (MB_TYPE_16x16 | MB_TYPE_8x16)) && (h->left_type[LTOP] & (MB_TYPE_16x16 | MB_TYPE_8x16)) ? 3 : 0; int step = 1+(mb_type>>24); //IS_8x8DCT(mb_type) ? 2 : 1; edges = 4 - 3*((mb_type>>3) & !(h->cbp & 15)); //(mb_type & MB_TYPE_16x16) && !(h->cbp & 15) ? 1 : 4; h->h264dsp.h264_loop_filter_strength( bS, h->non_zero_count_cache, h->ref_cache, h->mv_cache, @@ -295,38 +324,64 @@ void ff_h264_filter_mb_fast( H264Context *h, int mb_x, int mb_y, uint8_t *img_y, } if( IS_INTRA(left_type) ) AV_WN64A(bS[0][0], 0x0004000400040004ULL); - if( IS_INTRA(h->top_type) ) + if( IS_INTRA(top_type) ) AV_WN64A(bS[1][0], FIELD_PICTURE ? 0x0003000300030003ULL : 0x0004000400040004ULL); -#define FILTER(hv,dir,edge)\ +#define FILTER(hv,dir,edge,intra)\ if(AV_RN64A(bS[dir][edge])) { \ - filter_mb_edge##hv( &img_y[4*edge*(dir?linesize:1)], linesize, bS[dir][edge], edge ? qp : qp##dir, h );\ - if(chroma && !(edge&1)) {\ - filter_mb_edgec##hv( &img_cb[2*edge*(dir?uvlinesize:1)], uvlinesize, bS[dir][edge], edge ? qpc : qpc##dir, h );\ - filter_mb_edgec##hv( &img_cr[2*edge*(dir?uvlinesize:1)], uvlinesize, bS[dir][edge], edge ? qpc : qpc##dir, h );\ + filter_mb_edge##hv( &img_y[4*edge*(dir?linesize:1<<pixel_shift)], linesize, bS[dir][edge], edge ? qp : qp##dir, a, b, h, intra );\ + if(chroma){\ + if(chroma444){\ + filter_mb_edge##hv( &img_cb[4*edge*(dir?linesize:1<<pixel_shift)], linesize, bS[dir][edge], edge ? qpc : qpc##dir, a, b, h, intra );\ + filter_mb_edge##hv( &img_cr[4*edge*(dir?linesize:1<<pixel_shift)], linesize, bS[dir][edge], edge ? qpc : qpc##dir, a, b, h, intra );\ + } else if(!(edge&1)) {\ + filter_mb_edgec##hv( &img_cb[2*edge*(dir?uvlinesize:1<<pixel_shift)], uvlinesize, bS[dir][edge], edge ? qpc : qpc##dir, a, b, h, intra );\ + filter_mb_edgec##hv( &img_cr[2*edge*(dir?uvlinesize:1<<pixel_shift)], uvlinesize, bS[dir][edge], edge ? qpc : qpc##dir, a, b, h, intra );\ + }\ }\ } if(left_type) - FILTER(v,0,0); + FILTER(v,0,0,1); if( edges == 1 ) { - FILTER(h,1,0); + if(top_type) + FILTER(h,1,0,1); } else if( IS_8x8DCT(mb_type) ) { - FILTER(v,0,2); - FILTER(h,1,0); - FILTER(h,1,2); + FILTER(v,0,2,0); + if(top_type) + FILTER(h,1,0,1); + FILTER(h,1,2,0); } else { - FILTER(v,0,1); - FILTER(v,0,2); - FILTER(v,0,3); - FILTER(h,1,0); - FILTER(h,1,1); - FILTER(h,1,2); - FILTER(h,1,3); + FILTER(v,0,1,0); + FILTER(v,0,2,0); + FILTER(v,0,3,0); + if(top_type) + FILTER(h,1,0,1); + FILTER(h,1,1,0); + FILTER(h,1,2,0); + FILTER(h,1,3,0); } #undef FILTER } } +void ff_h264_filter_mb_fast( H264Context *h, int mb_x, int mb_y, uint8_t *img_y, uint8_t *img_cb, uint8_t *img_cr, unsigned int linesize, unsigned int uvlinesize) { + assert(!FRAME_MBAFF); + if(!h->h264dsp.h264_loop_filter_strength || h->pps.chroma_qp_diff) { + ff_h264_filter_mb(h, mb_x, mb_y, img_y, img_cb, img_cr, linesize, uvlinesize); + return; + } + +#if CONFIG_SMALL + h264_filter_mb_fast_internal(h, mb_x, mb_y, img_y, img_cb, img_cr, linesize, uvlinesize, h->pixel_shift); +#else + if(h->pixel_shift){ + h264_filter_mb_fast_internal(h, mb_x, mb_y, img_y, img_cb, img_cr, linesize, uvlinesize, 1); + }else{ + h264_filter_mb_fast_internal(h, mb_x, mb_y, img_y, img_cb, img_cr, linesize, uvlinesize, 0); + } +#endif +} + static int check_mv(H264Context *h, long b_idx, long bn_idx, int mvy_limit){ int v; @@ -356,12 +411,12 @@ static int check_mv(H264Context *h, long b_idx, long bn_idx, int mvy_limit){ return v; } -static av_always_inline void filter_mb_dir(H264Context *h, int mb_x, int mb_y, uint8_t *img_y, uint8_t *img_cb, uint8_t *img_cr, unsigned int linesize, unsigned int uvlinesize, int mb_xy, int mb_type, int mvy_limit, int first_vertical_edge_done, int chroma, int chroma444, int dir) { +static av_always_inline void filter_mb_dir(H264Context *h, int mb_x, int mb_y, uint8_t *img_y, uint8_t *img_cb, uint8_t *img_cr, unsigned int linesize, unsigned int uvlinesize, int mb_xy, int mb_type, int mvy_limit, int first_vertical_edge_done, int a, int b, int chroma, int chroma444, int dir) { MpegEncContext * const s = &h->s; int edge; int chroma_qp_avg[2]; const int mbm_xy = dir == 0 ? mb_xy -1 : h->top_mb_xy; - const int mbm_type = dir == 0 ? h->left_type[0] : h->top_type; + const int mbm_type = dir == 0 ? h->left_type[LTOP] : h->top_type; // how often to recheck mv-based bS when iterating between edges static const uint8_t mask_edge_tab[2][8]={{0,3,3,3,1,1,1,1}, @@ -389,14 +444,14 @@ static av_always_inline void filter_mb_dir(H264Context *h, int mb_x, int mb_y, u for(j=0; j<2; j++, mbn_xy += s->mb_stride){ DECLARE_ALIGNED(8, int16_t, bS)[4]; int qp; - if( IS_INTRA(mb_type|s->current_picture.mb_type[mbn_xy]) ) { + if (IS_INTRA(mb_type | s->current_picture.f.mb_type[mbn_xy])) { AV_WN64A(bS, 0x0003000300030003ULL); } else { - if(!CABAC && IS_8x8DCT(s->current_picture.mb_type[mbn_xy])){ - bS[0]= 1+((h->cbp_table[mbn_xy] & 4)||h->non_zero_count_cache[scan8[0]+0]); - bS[1]= 1+((h->cbp_table[mbn_xy] & 4)||h->non_zero_count_cache[scan8[0]+1]); - bS[2]= 1+((h->cbp_table[mbn_xy] & 8)||h->non_zero_count_cache[scan8[0]+2]); - bS[3]= 1+((h->cbp_table[mbn_xy] & 8)||h->non_zero_count_cache[scan8[0]+3]); + if (!CABAC && IS_8x8DCT(s->current_picture.f.mb_type[mbn_xy])) { + bS[0]= 1+((h->cbp_table[mbn_xy] & 0x4000)||h->non_zero_count_cache[scan8[0]+0]); + bS[1]= 1+((h->cbp_table[mbn_xy] & 0x4000)||h->non_zero_count_cache[scan8[0]+1]); + bS[2]= 1+((h->cbp_table[mbn_xy] & 0x8000)||h->non_zero_count_cache[scan8[0]+2]); + bS[3]= 1+((h->cbp_table[mbn_xy] & 0x8000)||h->non_zero_count_cache[scan8[0]+3]); }else{ const uint8_t *mbn_nnz = h->non_zero_count[mbn_xy] + 3*4; int i; @@ -407,19 +462,19 @@ static av_always_inline void filter_mb_dir(H264Context *h, int mb_x, int mb_y, u } // Do not use s->qscale as luma quantizer because it has not the same // value in IPCM macroblocks. - qp = ( s->current_picture.qscale_table[mb_xy] + s->current_picture.qscale_table[mbn_xy] + 1 ) >> 1; + qp = (s->current_picture.f.qscale_table[mb_xy] + s->current_picture.f.qscale_table[mbn_xy] + 1) >> 1; tprintf(s->avctx, "filter mb:%d/%d dir:%d edge:%d, QPy:%d ls:%d uvls:%d", mb_x, mb_y, dir, edge, qp, tmp_linesize, tmp_uvlinesize); { int i; for (i = 0; i < 4; i++) tprintf(s->avctx, " bS[%d]:%d", i, bS[i]); tprintf(s->avctx, "\n"); } - filter_mb_edgeh( &img_y[j*linesize], tmp_linesize, bS, qp, h ); - chroma_qp_avg[0] = (h->chroma_qp[0] + get_chroma_qp( h, 0, s->current_picture.qscale_table[mbn_xy] ) + 1 ) >> 1; - chroma_qp_avg[1] = (h->chroma_qp[1] + get_chroma_qp( h, 1, s->current_picture.qscale_table[mbn_xy] ) + 1 ) >> 1; + filter_mb_edgeh( &img_y[j*linesize], tmp_linesize, bS, qp, a, b, h, 0 ); + chroma_qp_avg[0] = (h->chroma_qp[0] + get_chroma_qp(h, 0, s->current_picture.f.qscale_table[mbn_xy]) + 1) >> 1; + chroma_qp_avg[1] = (h->chroma_qp[1] + get_chroma_qp(h, 1, s->current_picture.f.qscale_table[mbn_xy]) + 1) >> 1; if (chroma) { if (chroma444) { - filter_mb_edgeh (&img_cb[j*uvlinesize], tmp_uvlinesize, bS, chroma_qp_avg[0], h); - filter_mb_edgeh (&img_cr[j*uvlinesize], tmp_uvlinesize, bS, chroma_qp_avg[1], h); + filter_mb_edgeh (&img_cb[j*uvlinesize], tmp_uvlinesize, bS, chroma_qp_avg[0], a, b, h, 0); + filter_mb_edgeh (&img_cr[j*uvlinesize], tmp_uvlinesize, bS, chroma_qp_avg[1], a, b, h, 0); } else { - filter_mb_edgech(&img_cb[j*uvlinesize], tmp_uvlinesize, bS, chroma_qp_avg[0], h); - filter_mb_edgech(&img_cr[j*uvlinesize], tmp_uvlinesize, bS, chroma_qp_avg[1], h); + filter_mb_edgech(&img_cb[j*uvlinesize], tmp_uvlinesize, bS, chroma_qp_avg[0], a, b, h, 0); + filter_mb_edgech(&img_cr[j*uvlinesize], tmp_uvlinesize, bS, chroma_qp_avg[1], a, b, h, 0); } } } @@ -472,32 +527,32 @@ static av_always_inline void filter_mb_dir(H264Context *h, int mb_x, int mb_y, u // Do not use s->qscale as luma quantizer because it has not the same // value in IPCM macroblocks. if(bS[0]+bS[1]+bS[2]+bS[3]){ - qp = ( s->current_picture.qscale_table[mb_xy] + s->current_picture.qscale_table[mbm_xy] + 1 ) >> 1; + qp = (s->current_picture.f.qscale_table[mb_xy] + s->current_picture.f.qscale_table[mbm_xy] + 1) >> 1; //tprintf(s->avctx, "filter mb:%d/%d dir:%d edge:%d, QPy:%d, QPc:%d, QPcn:%d\n", mb_x, mb_y, dir, edge, qp, h->chroma_qp[0], s->current_picture.qscale_table[mbn_xy]); tprintf(s->avctx, "filter mb:%d/%d dir:%d edge:%d, QPy:%d ls:%d uvls:%d", mb_x, mb_y, dir, edge, qp, linesize, uvlinesize); //{ int i; for (i = 0; i < 4; i++) tprintf(s->avctx, " bS[%d]:%d", i, bS[i]); tprintf(s->avctx, "\n"); } - chroma_qp_avg[0] = (h->chroma_qp[0] + get_chroma_qp( h, 0, s->current_picture.qscale_table[mbm_xy] ) + 1 ) >> 1; - chroma_qp_avg[1] = (h->chroma_qp[1] + get_chroma_qp( h, 1, s->current_picture.qscale_table[mbm_xy] ) + 1 ) >> 1; + chroma_qp_avg[0] = (h->chroma_qp[0] + get_chroma_qp(h, 0, s->current_picture.f.qscale_table[mbm_xy]) + 1) >> 1; + chroma_qp_avg[1] = (h->chroma_qp[1] + get_chroma_qp(h, 1, s->current_picture.f.qscale_table[mbm_xy]) + 1) >> 1; if( dir == 0 ) { - filter_mb_edgev( &img_y[0], linesize, bS, qp, h ); + filter_mb_edgev( &img_y[0], linesize, bS, qp, a, b, h, 1 ); if (chroma) { if (chroma444) { - filter_mb_edgev ( &img_cb[0], uvlinesize, bS, chroma_qp_avg[0], h); - filter_mb_edgev ( &img_cr[0], uvlinesize, bS, chroma_qp_avg[1], h); + filter_mb_edgev ( &img_cb[0], uvlinesize, bS, chroma_qp_avg[0], a, b, h, 1); + filter_mb_edgev ( &img_cr[0], uvlinesize, bS, chroma_qp_avg[1], a, b, h, 1); } else { - filter_mb_edgecv( &img_cb[0], uvlinesize, bS, chroma_qp_avg[0], h); - filter_mb_edgecv( &img_cr[0], uvlinesize, bS, chroma_qp_avg[1], h); + filter_mb_edgecv( &img_cb[0], uvlinesize, bS, chroma_qp_avg[0], a, b, h, 1); + filter_mb_edgecv( &img_cr[0], uvlinesize, bS, chroma_qp_avg[1], a, b, h, 1); } } } else { - filter_mb_edgeh( &img_y[0], linesize, bS, qp, h ); + filter_mb_edgeh( &img_y[0], linesize, bS, qp, a, b, h, 1 ); if (chroma) { if (chroma444) { - filter_mb_edgeh ( &img_cb[0], uvlinesize, bS, chroma_qp_avg[0], h); - filter_mb_edgeh ( &img_cr[0], uvlinesize, bS, chroma_qp_avg[1], h); + filter_mb_edgeh ( &img_cb[0], uvlinesize, bS, chroma_qp_avg[0], a, b, h, 1); + filter_mb_edgeh ( &img_cr[0], uvlinesize, bS, chroma_qp_avg[1], a, b, h, 1); } else { - filter_mb_edgech( &img_cb[0], uvlinesize, bS, chroma_qp_avg[0], h); - filter_mb_edgech( &img_cr[0], uvlinesize, bS, chroma_qp_avg[1], h); + filter_mb_edgech( &img_cb[0], uvlinesize, bS, chroma_qp_avg[0], a, b, h, 1); + filter_mb_edgech( &img_cr[0], uvlinesize, bS, chroma_qp_avg[1], a, b, h, 1); } } } @@ -556,30 +611,30 @@ static av_always_inline void filter_mb_dir(H264Context *h, int mb_x, int mb_y, u /* Filter edge */ // Do not use s->qscale as luma quantizer because it has not the same // value in IPCM macroblocks. - qp = s->current_picture.qscale_table[mb_xy]; + qp = s->current_picture.f.qscale_table[mb_xy]; //tprintf(s->avctx, "filter mb:%d/%d dir:%d edge:%d, QPy:%d, QPc:%d, QPcn:%d\n", mb_x, mb_y, dir, edge, qp, h->chroma_qp[0], s->current_picture.qscale_table[mbn_xy]); tprintf(s->avctx, "filter mb:%d/%d dir:%d edge:%d, QPy:%d ls:%d uvls:%d", mb_x, mb_y, dir, edge, qp, linesize, uvlinesize); //{ int i; for (i = 0; i < 4; i++) tprintf(s->avctx, " bS[%d]:%d", i, bS[i]); tprintf(s->avctx, "\n"); } if( dir == 0 ) { - filter_mb_edgev( &img_y[4*edge << h->pixel_shift], linesize, bS, qp, h ); + filter_mb_edgev( &img_y[4*edge << h->pixel_shift], linesize, bS, qp, a, b, h, 0 ); if (chroma) { if (chroma444) { - filter_mb_edgev ( &img_cb[4*edge << h->pixel_shift], uvlinesize, bS, h->chroma_qp[0], h); - filter_mb_edgev ( &img_cr[4*edge << h->pixel_shift], uvlinesize, bS, h->chroma_qp[1], h); + filter_mb_edgev ( &img_cb[4*edge << h->pixel_shift], uvlinesize, bS, h->chroma_qp[0], a, b, h, 0); + filter_mb_edgev ( &img_cr[4*edge << h->pixel_shift], uvlinesize, bS, h->chroma_qp[1], a, b, h, 0); } else if( (edge&1) == 0 ) { - filter_mb_edgecv( &img_cb[2*edge << h->pixel_shift], uvlinesize, bS, h->chroma_qp[0], h); - filter_mb_edgecv( &img_cr[2*edge << h->pixel_shift], uvlinesize, bS, h->chroma_qp[1], h); + filter_mb_edgecv( &img_cb[2*edge << h->pixel_shift], uvlinesize, bS, h->chroma_qp[0], a, b, h, 0); + filter_mb_edgecv( &img_cr[2*edge << h->pixel_shift], uvlinesize, bS, h->chroma_qp[1], a, b, h, 0); } } } else { - filter_mb_edgeh( &img_y[4*edge*linesize], linesize, bS, qp, h ); + filter_mb_edgeh( &img_y[4*edge*linesize], linesize, bS, qp, a, b, h, 0 ); if (chroma) { if (chroma444) { - filter_mb_edgeh ( &img_cb[4*edge*uvlinesize], uvlinesize, bS, h->chroma_qp[0], h); - filter_mb_edgeh ( &img_cr[4*edge*uvlinesize], uvlinesize, bS, h->chroma_qp[1], h); + filter_mb_edgeh ( &img_cb[4*edge*uvlinesize], uvlinesize, bS, h->chroma_qp[0], a, b, h, 0); + filter_mb_edgeh ( &img_cr[4*edge*uvlinesize], uvlinesize, bS, h->chroma_qp[1], a, b, h, 0); } else if( (edge&1) == 0 ) { - filter_mb_edgech( &img_cb[2*edge*uvlinesize], uvlinesize, bS, h->chroma_qp[0], h); - filter_mb_edgech( &img_cr[2*edge*uvlinesize], uvlinesize, bS, h->chroma_qp[1], h); + filter_mb_edgech( &img_cb[2*edge*uvlinesize], uvlinesize, bS, h->chroma_qp[0], a, b, h, 0); + filter_mb_edgech( &img_cr[2*edge*uvlinesize], uvlinesize, bS, h->chroma_qp[1], a, b, h, 0); } } } @@ -589,17 +644,20 @@ static av_always_inline void filter_mb_dir(H264Context *h, int mb_x, int mb_y, u void ff_h264_filter_mb( H264Context *h, int mb_x, int mb_y, uint8_t *img_y, uint8_t *img_cb, uint8_t *img_cr, unsigned int linesize, unsigned int uvlinesize) { MpegEncContext * const s = &h->s; const int mb_xy= mb_x + mb_y*s->mb_stride; - const int mb_type = s->current_picture.mb_type[mb_xy]; + const int mb_type = s->current_picture.f.mb_type[mb_xy]; const int mvy_limit = IS_INTERLACED(mb_type) ? 2 : 4; int first_vertical_edge_done = 0; av_unused int dir; int chroma = !(CONFIG_GRAY && (s->flags&CODEC_FLAG_GRAY)); + int qp_bd_offset = 6 * (h->sps.bit_depth_luma - 8); + int a = h->slice_alpha_c0_offset - qp_bd_offset; + int b = h->slice_beta_offset - qp_bd_offset; if (FRAME_MBAFF // and current and left pair do not have the same interlaced type - && IS_INTERLACED(mb_type^h->left_type[0]) + && IS_INTERLACED(mb_type^h->left_type[LTOP]) // and left mb is in available to us - && h->left_type[0]) { + && h->left_type[LTOP]) { /* First vertical edge is different in MBAFF frames * There are 8 different bS to compute and 2 different Qp */ @@ -627,24 +685,24 @@ void ff_h264_filter_mb( H264Context *h, int mb_x, int mb_y, uint8_t *img_y, uint const uint8_t *off= offset[MB_FIELD][mb_y&1]; for( i = 0; i < 8; i++ ) { int j= MB_FIELD ? i>>2 : i&1; - int mbn_xy = h->left_mb_xy[j]; - int mbn_type= h->left_type[j]; + int mbn_xy = h->left_mb_xy[LEFT(j)]; + int mbn_type= h->left_type[LEFT(j)]; if( IS_INTRA( mbn_type ) ) bS[i] = 4; else{ bS[i] = 1 + !!(h->non_zero_count_cache[12+8*(i>>1)] | ((!h->pps.cabac && IS_8x8DCT(mbn_type)) ? - (h->cbp_table[mbn_xy] & ((MB_FIELD ? (i&2) : (mb_y&1)) ? 8 : 2)) + (h->cbp_table[mbn_xy] & (((MB_FIELD ? (i&2) : (mb_y&1)) ? 8 : 2) << 12)) : h->non_zero_count[mbn_xy][ off[i] ])); } } } - mb_qp = s->current_picture.qscale_table[mb_xy]; - mbn0_qp = s->current_picture.qscale_table[h->left_mb_xy[0]]; - mbn1_qp = s->current_picture.qscale_table[h->left_mb_xy[1]]; + mb_qp = s->current_picture.f.qscale_table[mb_xy]; + mbn0_qp = s->current_picture.f.qscale_table[h->left_mb_xy[0]]; + mbn1_qp = s->current_picture.f.qscale_table[h->left_mb_xy[1]]; qp[0] = ( mb_qp + mbn0_qp + 1 ) >> 1; bqp[0] = ( get_chroma_qp( h, 0, mb_qp ) + get_chroma_qp( h, 0, mbn0_qp ) + 1 ) >> 1; @@ -660,31 +718,45 @@ void ff_h264_filter_mb( H264Context *h, int mb_x, int mb_y, uint8_t *img_y, uint tprintf(s->avctx, "filter mb:%d/%d MBAFF, QPy:%d/%d, QPb:%d/%d QPr:%d/%d ls:%d uvls:%d", mb_x, mb_y, qp[0], qp[1], bqp[0], bqp[1], rqp[0], rqp[1], linesize, uvlinesize); { int i; for (i = 0; i < 8; i++) tprintf(s->avctx, " bS[%d]:%d", i, bS[i]); tprintf(s->avctx, "\n"); } if(MB_FIELD){ - filter_mb_mbaff_edgev ( h, img_y , linesize, bS , 1, qp [0] ); - filter_mb_mbaff_edgev ( h, img_y + 8* linesize, linesize, bS+4, 1, qp [1] ); + filter_mb_mbaff_edgev ( h, img_y , linesize, bS , 1, qp [0], a, b, 1 ); + filter_mb_mbaff_edgev ( h, img_y + 8* linesize, linesize, bS+4, 1, qp [1], a, b, 1 ); if (chroma){ - filter_mb_mbaff_edgecv( h, img_cb, uvlinesize, bS , 1, bqp[0] ); - filter_mb_mbaff_edgecv( h, img_cb + 4*uvlinesize, uvlinesize, bS+4, 1, bqp[1] ); - filter_mb_mbaff_edgecv( h, img_cr, uvlinesize, bS , 1, rqp[0] ); - filter_mb_mbaff_edgecv( h, img_cr + 4*uvlinesize, uvlinesize, bS+4, 1, rqp[1] ); + if (CHROMA444) { + filter_mb_mbaff_edgev ( h, img_cb, uvlinesize, bS , 1, bqp[0], a, b, 1 ); + filter_mb_mbaff_edgev ( h, img_cb + 8*uvlinesize, uvlinesize, bS+4, 1, bqp[1], a, b, 1 ); + filter_mb_mbaff_edgev ( h, img_cr, uvlinesize, bS , 1, rqp[0], a, b, 1 ); + filter_mb_mbaff_edgev ( h, img_cr + 8*uvlinesize, uvlinesize, bS+4, 1, rqp[1], a, b, 1 ); + }else{ + filter_mb_mbaff_edgecv( h, img_cb, uvlinesize, bS , 1, bqp[0], a, b, 1 ); + filter_mb_mbaff_edgecv( h, img_cb + 4*uvlinesize, uvlinesize, bS+4, 1, bqp[1], a, b, 1 ); + filter_mb_mbaff_edgecv( h, img_cr, uvlinesize, bS , 1, rqp[0], a, b, 1 ); + filter_mb_mbaff_edgecv( h, img_cr + 4*uvlinesize, uvlinesize, bS+4, 1, rqp[1], a, b, 1 ); + } } }else{ - filter_mb_mbaff_edgev ( h, img_y , 2* linesize, bS , 2, qp [0] ); - filter_mb_mbaff_edgev ( h, img_y + linesize, 2* linesize, bS+1, 2, qp [1] ); + filter_mb_mbaff_edgev ( h, img_y , 2* linesize, bS , 2, qp [0], a, b, 1 ); + filter_mb_mbaff_edgev ( h, img_y + linesize, 2* linesize, bS+1, 2, qp [1], a, b, 1 ); if (chroma){ - filter_mb_mbaff_edgecv( h, img_cb, 2*uvlinesize, bS , 2, bqp[0] ); - filter_mb_mbaff_edgecv( h, img_cb + uvlinesize, 2*uvlinesize, bS+1, 2, bqp[1] ); - filter_mb_mbaff_edgecv( h, img_cr, 2*uvlinesize, bS , 2, rqp[0] ); - filter_mb_mbaff_edgecv( h, img_cr + uvlinesize, 2*uvlinesize, bS+1, 2, rqp[1] ); + if (CHROMA444) { + filter_mb_mbaff_edgev ( h, img_cb, 2*uvlinesize, bS , 2, bqp[0], a, b, 1 ); + filter_mb_mbaff_edgev ( h, img_cb + uvlinesize, 2*uvlinesize, bS+1, 2, bqp[1], a, b, 1 ); + filter_mb_mbaff_edgev ( h, img_cr, 2*uvlinesize, bS , 2, rqp[0], a, b, 1 ); + filter_mb_mbaff_edgev ( h, img_cr + uvlinesize, 2*uvlinesize, bS+1, 2, rqp[1], a, b, 1 ); + }else{ + filter_mb_mbaff_edgecv( h, img_cb, 2*uvlinesize, bS , 2, bqp[0], a, b, 1 ); + filter_mb_mbaff_edgecv( h, img_cb + uvlinesize, 2*uvlinesize, bS+1, 2, bqp[1], a, b, 1 ); + filter_mb_mbaff_edgecv( h, img_cr, 2*uvlinesize, bS , 2, rqp[0], a, b, 1 ); + filter_mb_mbaff_edgecv( h, img_cr + uvlinesize, 2*uvlinesize, bS+1, 2, rqp[1], a, b, 1 ); + } } } } #if CONFIG_SMALL for( dir = 0; dir < 2; dir++ ) - filter_mb_dir(h, mb_x, mb_y, img_y, img_cb, img_cr, linesize, uvlinesize, mb_xy, mb_type, mvy_limit, dir ? 0 : first_vertical_edge_done, chroma, CHROMA444, dir); + filter_mb_dir(h, mb_x, mb_y, img_y, img_cb, img_cr, linesize, uvlinesize, mb_xy, mb_type, mvy_limit, dir ? 0 : first_vertical_edge_done, a, b, chroma, CHROMA444, dir); #else - filter_mb_dir(h, mb_x, mb_y, img_y, img_cb, img_cr, linesize, uvlinesize, mb_xy, mb_type, mvy_limit, first_vertical_edge_done, chroma, CHROMA444, 0); - filter_mb_dir(h, mb_x, mb_y, img_y, img_cb, img_cr, linesize, uvlinesize, mb_xy, mb_type, mvy_limit, 0, chroma, CHROMA444, 1); + filter_mb_dir(h, mb_x, mb_y, img_y, img_cb, img_cr, linesize, uvlinesize, mb_xy, mb_type, mvy_limit, first_vertical_edge_done, a, b, chroma, CHROMA444, 0); + filter_mb_dir(h, mb_x, mb_y, img_y, img_cb, img_cr, linesize, uvlinesize, mb_xy, mb_type, mvy_limit, 0, a, b, chroma, CHROMA444, 1); #endif } diff --git a/libavcodec/h264_mvpred.h b/libavcodec/h264_mvpred.h index 661ef6c381..d2677eaa6c 100644 --- a/libavcodec/h264_mvpred.h +++ b/libavcodec/h264_mvpred.h @@ -35,7 +35,7 @@ //#undef NDEBUG #include <assert.h> -static inline int fetch_diagonal_mv(H264Context *h, const int16_t **C, int i, int list, int part_width){ +static av_always_inline int fetch_diagonal_mv(H264Context *h, const int16_t **C, int i, int list, int part_width){ const int topright_ref= h->ref_cache[list][ i - 8 + part_width ]; MpegEncContext *s = &h->s; @@ -48,15 +48,15 @@ static inline int fetch_diagonal_mv(H264Context *h, const int16_t **C, int i, in const int mb_type = mb_types[xy+(y4>>2)*s->mb_stride];\ if(!USES_LIST(mb_type,list))\ return LIST_NOT_USED;\ - mv = s->current_picture_ptr->motion_val[list][h->mb2b_xy[xy]+3 + y4*h->b_stride];\ + mv = s->current_picture_ptr->f.motion_val[list][h->mb2b_xy[xy] + 3 + y4*h->b_stride];\ h->mv_cache[list][scan8[0]-2][0] = mv[0];\ h->mv_cache[list][scan8[0]-2][1] = mv[1] MV_OP;\ - return s->current_picture_ptr->ref_index[list][4*xy+1 + (y4&~1)] REF_OP; + return s->current_picture_ptr->f.ref_index[list][4*xy + 1 + (y4 & ~1)] REF_OP; if(topright_ref == PART_NOT_AVAILABLE && i >= scan8[0]+8 && (i&7)==4 && h->ref_cache[list][scan8[0]-1] != PART_NOT_AVAILABLE){ - const uint32_t *mb_types = s->current_picture_ptr->mb_type; + const uint32_t *mb_types = s->current_picture_ptr->f.mb_type; const int16_t *mv; AV_ZERO32(h->mv_cache[list][scan8[0]-2]); *C = h->mv_cache[list][scan8[0]-2]; @@ -64,7 +64,6 @@ static inline int fetch_diagonal_mv(H264Context *h, const int16_t **C, int i, in if(!MB_FIELD && IS_INTERLACED(h->left_type[0])){ SET_DIAG_MV(*2, >>1, h->left_mb_xy[0]+s->mb_stride, (s->mb_y&1)*2+(i>>5)); - assert(h->left_mb_xy[0] == h->left_mb_xy[1]); } if(MB_FIELD && !IS_INTERLACED(h->left_type[0])){ @@ -93,7 +92,7 @@ static inline int fetch_diagonal_mv(H264Context *h, const int16_t **C, int i, in * @param mx the x component of the predicted motion vector * @param my the y component of the predicted motion vector */ -static inline void pred_motion(H264Context * const h, int n, int part_width, int list, int ref, int * const mx, int * const my){ +static av_always_inline void pred_motion(H264Context * const h, int n, int part_width, int list, int ref, int * const mx, int * const my){ const int index8= scan8[n]; const int top_ref= h->ref_cache[list][ index8 - 8 ]; const int left_ref= h->ref_cache[list][ index8 - 1 ]; @@ -148,7 +147,7 @@ static inline void pred_motion(H264Context * const h, int n, int part_width, int * @param mx the x component of the predicted motion vector * @param my the y component of the predicted motion vector */ -static inline void pred_16x8_motion(H264Context * const h, int n, int list, int ref, int * const mx, int * const my){ +static av_always_inline void pred_16x8_motion(H264Context * const h, int n, int list, int ref, int * const mx, int * const my){ if(n==0){ const int top_ref= h->ref_cache[list][ scan8[0] - 8 ]; const int16_t * const B= h->mv_cache[list][ scan8[0] - 8 ]; @@ -183,7 +182,7 @@ static inline void pred_16x8_motion(H264Context * const h, int n, int list, int * @param mx the x component of the predicted motion vector * @param my the y component of the predicted motion vector */ -static inline void pred_8x16_motion(H264Context * const h, int n, int list, int ref, int * const mx, int * const my){ +static av_always_inline void pred_8x16_motion(H264Context * const h, int n, int list, int ref, int * const mx, int * const my){ if(n==0){ const int left_ref= h->ref_cache[list][ scan8[0] - 1 ]; const int16_t * const A= h->mv_cache[list][ scan8[0] - 1 ]; @@ -214,23 +213,575 @@ static inline void pred_8x16_motion(H264Context * const h, int n, int list, int pred_motion(h, n, 2, list, ref, mx, my); } -static inline void pred_pskip_motion(H264Context * const h, int * const mx, int * const my){ - const int top_ref = h->ref_cache[0][ scan8[0] - 8 ]; - const int left_ref= h->ref_cache[0][ scan8[0] - 1 ]; +#define FIX_MV_MBAFF(type, refn, mvn, idx)\ + if(FRAME_MBAFF){\ + if(MB_FIELD){\ + if(!IS_INTERLACED(type)){\ + refn <<= 1;\ + AV_COPY32(mvbuf[idx], mvn);\ + mvbuf[idx][1] /= 2;\ + mvn = mvbuf[idx];\ + }\ + }else{\ + if(IS_INTERLACED(type)){\ + refn >>= 1;\ + AV_COPY32(mvbuf[idx], mvn);\ + mvbuf[idx][1] <<= 1;\ + mvn = mvbuf[idx];\ + }\ + }\ + } - tprintf(h->s.avctx, "pred_pskip: (%d) (%d) at %2d %2d\n", top_ref, left_ref, h->s.mb_x, h->s.mb_y); +static av_always_inline void pred_pskip_motion(H264Context * const h){ + DECLARE_ALIGNED(4, static const int16_t, zeromv)[2] = {0}; + DECLARE_ALIGNED(4, int16_t, mvbuf)[3][2]; + MpegEncContext * const s = &h->s; + int8_t *ref = s->current_picture.f.ref_index[0]; + int16_t (*mv)[2] = s->current_picture.f.motion_val[0]; + int top_ref, left_ref, diagonal_ref, match_count, mx, my; + const int16_t *A, *B, *C; + int b_stride = h->b_stride; + + fill_rectangle(&h->ref_cache[0][scan8[0]], 4, 4, 8, 0, 1); + + /* To avoid doing an entire fill_decode_caches, we inline the relevant parts here. + * FIXME: this is a partial duplicate of the logic in fill_decode_caches, but it's + * faster this way. Is there a way to avoid this duplication? + */ + if(USES_LIST(h->left_type[LTOP], 0)){ + left_ref = ref[4*h->left_mb_xy[LTOP] + 1 + (h->left_block[0]&~1)]; + A = mv[h->mb2b_xy[h->left_mb_xy[LTOP]] + 3 + b_stride*h->left_block[0]]; + FIX_MV_MBAFF(h->left_type[LTOP], left_ref, A, 0); + if(!(left_ref | AV_RN32A(A))){ + goto zeromv; + } + }else if(h->left_type[LTOP]){ + left_ref = LIST_NOT_USED; + A = zeromv; + }else{ + goto zeromv; + } - if(top_ref == PART_NOT_AVAILABLE || left_ref == PART_NOT_AVAILABLE - || !( top_ref | AV_RN32A(h->mv_cache[0][ scan8[0] - 8 ])) - || !(left_ref | AV_RN32A(h->mv_cache[0][ scan8[0] - 1 ]))){ + if(USES_LIST(h->top_type, 0)){ + top_ref = ref[4*h->top_mb_xy + 2]; + B = mv[h->mb2b_xy[h->top_mb_xy] + 3*b_stride]; + FIX_MV_MBAFF(h->top_type, top_ref, B, 1); + if(!(top_ref | AV_RN32A(B))){ + goto zeromv; + } + }else if(h->top_type){ + top_ref = LIST_NOT_USED; + B = zeromv; + }else{ + goto zeromv; + } - *mx = *my = 0; - return; + tprintf(h->s.avctx, "pred_pskip: (%d) (%d) at %2d %2d\n", top_ref, left_ref, h->s.mb_x, h->s.mb_y); + + if(USES_LIST(h->topright_type, 0)){ + diagonal_ref = ref[4*h->topright_mb_xy + 2]; + C = mv[h->mb2b_xy[h->topright_mb_xy] + 3*b_stride]; + FIX_MV_MBAFF(h->topright_type, diagonal_ref, C, 2); + }else if(h->topright_type){ + diagonal_ref = LIST_NOT_USED; + C = zeromv; + }else{ + if(USES_LIST(h->topleft_type, 0)){ + diagonal_ref = ref[4*h->topleft_mb_xy + 1 + (h->topleft_partition & 2)]; + C = mv[h->mb2b_xy[h->topleft_mb_xy] + 3 + b_stride + (h->topleft_partition & 2*b_stride)]; + FIX_MV_MBAFF(h->topleft_type, diagonal_ref, C, 2); + }else if(h->topleft_type){ + diagonal_ref = LIST_NOT_USED; + C = zeromv; + }else{ + diagonal_ref = PART_NOT_AVAILABLE; + C = zeromv; + } } - pred_motion(h, 0, 4, 0, 0, mx, my); + match_count= !diagonal_ref + !top_ref + !left_ref; + tprintf(h->s.avctx, "pred_pskip_motion match_count=%d\n", match_count); + if(match_count > 1){ + mx = mid_pred(A[0], B[0], C[0]); + my = mid_pred(A[1], B[1], C[1]); + }else if(match_count==1){ + if(!left_ref){ + mx = A[0]; + my = A[1]; + }else if(!top_ref){ + mx = B[0]; + my = B[1]; + }else{ + mx = C[0]; + my = C[1]; + } + }else{ + mx = mid_pred(A[0], B[0], C[0]); + my = mid_pred(A[1], B[1], C[1]); + } + fill_rectangle( h->mv_cache[0][scan8[0]], 4, 4, 8, pack16to32(mx,my), 4); + return; +zeromv: + fill_rectangle( h->mv_cache[0][scan8[0]], 4, 4, 8, 0, 4); return; } +static void fill_decode_neighbors(H264Context *h, int mb_type){ + MpegEncContext * const s = &h->s; + const int mb_xy= h->mb_xy; + int topleft_xy, top_xy, topright_xy, left_xy[LEFT_MBS]; + static const uint8_t left_block_options[4][32]={ + {0,1,2,3,7,10,8,11,3+0*4, 3+1*4, 3+2*4, 3+3*4, 1+4*4, 1+8*4, 1+5*4, 1+9*4}, + {2,2,3,3,8,11,8,11,3+2*4, 3+2*4, 3+3*4, 3+3*4, 1+5*4, 1+9*4, 1+5*4, 1+9*4}, + {0,0,1,1,7,10,7,10,3+0*4, 3+0*4, 3+1*4, 3+1*4, 1+4*4, 1+8*4, 1+4*4, 1+8*4}, + {0,2,0,2,7,10,7,10,3+0*4, 3+2*4, 3+0*4, 3+2*4, 1+4*4, 1+8*4, 1+4*4, 1+8*4} + }; + + h->topleft_partition= -1; + + top_xy = mb_xy - (s->mb_stride << MB_FIELD); + + /* Wow, what a mess, why didn't they simplify the interlacing & intra + * stuff, I can't imagine that these complex rules are worth it. */ + + topleft_xy = top_xy - 1; + topright_xy= top_xy + 1; + left_xy[LBOT] = left_xy[LTOP] = mb_xy-1; + h->left_block = left_block_options[0]; + if(FRAME_MBAFF){ + const int left_mb_field_flag = IS_INTERLACED(s->current_picture.f.mb_type[mb_xy - 1]); + const int curr_mb_field_flag = IS_INTERLACED(mb_type); + if(s->mb_y&1){ + if (left_mb_field_flag != curr_mb_field_flag) { + left_xy[LBOT] = left_xy[LTOP] = mb_xy - s->mb_stride - 1; + if (curr_mb_field_flag) { + left_xy[LBOT] += s->mb_stride; + h->left_block = left_block_options[3]; + } else { + topleft_xy += s->mb_stride; + // take top left mv from the middle of the mb, as opposed to all other modes which use the bottom right partition + h->topleft_partition = 0; + h->left_block = left_block_options[1]; + } + } + }else{ + if(curr_mb_field_flag){ + topleft_xy += s->mb_stride & (((s->current_picture.f.mb_type[top_xy - 1] >> 7) & 1) - 1); + topright_xy += s->mb_stride & (((s->current_picture.f.mb_type[top_xy + 1] >> 7) & 1) - 1); + top_xy += s->mb_stride & (((s->current_picture.f.mb_type[top_xy ] >> 7) & 1) - 1); + } + if (left_mb_field_flag != curr_mb_field_flag) { + if (curr_mb_field_flag) { + left_xy[LBOT] += s->mb_stride; + h->left_block = left_block_options[3]; + } else { + h->left_block = left_block_options[2]; + } + } + } + } + + h->topleft_mb_xy = topleft_xy; + h->top_mb_xy = top_xy; + h->topright_mb_xy= topright_xy; + h->left_mb_xy[LTOP] = left_xy[LTOP]; + h->left_mb_xy[LBOT] = left_xy[LBOT]; + //FIXME do we need all in the context? + + h->topleft_type = s->current_picture.f.mb_type[topleft_xy]; + h->top_type = s->current_picture.f.mb_type[top_xy]; + h->topright_type = s->current_picture.f.mb_type[topright_xy]; + h->left_type[LTOP] = s->current_picture.f.mb_type[left_xy[LTOP]]; + h->left_type[LBOT] = s->current_picture.f.mb_type[left_xy[LBOT]]; + + if(FMO){ + if(h->slice_table[topleft_xy ] != h->slice_num) h->topleft_type = 0; + if(h->slice_table[top_xy ] != h->slice_num) h->top_type = 0; + if(h->slice_table[left_xy[LTOP] ] != h->slice_num) h->left_type[LTOP] = h->left_type[LBOT] = 0; + }else{ + if(h->slice_table[topleft_xy ] != h->slice_num){ + h->topleft_type = 0; + if(h->slice_table[top_xy ] != h->slice_num) h->top_type = 0; + if(h->slice_table[left_xy[LTOP] ] != h->slice_num) h->left_type[LTOP] = h->left_type[LBOT] = 0; + } + } + if(h->slice_table[topright_xy] != h->slice_num) h->topright_type= 0; +} + +static void fill_decode_caches(H264Context *h, int mb_type){ + MpegEncContext * const s = &h->s; + int topleft_xy, top_xy, topright_xy, left_xy[LEFT_MBS]; + int topleft_type, top_type, topright_type, left_type[LEFT_MBS]; + const uint8_t * left_block= h->left_block; + int i; + uint8_t *nnz; + uint8_t *nnz_cache; + + topleft_xy = h->topleft_mb_xy; + top_xy = h->top_mb_xy; + topright_xy = h->topright_mb_xy; + left_xy[LTOP] = h->left_mb_xy[LTOP]; + left_xy[LBOT] = h->left_mb_xy[LBOT]; + topleft_type = h->topleft_type; + top_type = h->top_type; + topright_type = h->topright_type; + left_type[LTOP]= h->left_type[LTOP]; + left_type[LBOT]= h->left_type[LBOT]; + + if(!IS_SKIP(mb_type)){ + if(IS_INTRA(mb_type)){ + int type_mask= h->pps.constrained_intra_pred ? IS_INTRA(-1) : -1; + h->topleft_samples_available= + h->top_samples_available= + h->left_samples_available= 0xFFFF; + h->topright_samples_available= 0xEEEA; + + if(!(top_type & type_mask)){ + h->topleft_samples_available= 0xB3FF; + h->top_samples_available= 0x33FF; + h->topright_samples_available= 0x26EA; + } + if(IS_INTERLACED(mb_type) != IS_INTERLACED(left_type[LTOP])){ + if(IS_INTERLACED(mb_type)){ + if(!(left_type[LTOP] & type_mask)){ + h->topleft_samples_available&= 0xDFFF; + h->left_samples_available&= 0x5FFF; + } + if(!(left_type[LBOT] & type_mask)){ + h->topleft_samples_available&= 0xFF5F; + h->left_samples_available&= 0xFF5F; + } + }else{ + int left_typei = s->current_picture.f.mb_type[left_xy[LTOP] + s->mb_stride]; + + assert(left_xy[LTOP] == left_xy[LBOT]); + if(!((left_typei & type_mask) && (left_type[LTOP] & type_mask))){ + h->topleft_samples_available&= 0xDF5F; + h->left_samples_available&= 0x5F5F; + } + } + }else{ + if(!(left_type[LTOP] & type_mask)){ + h->topleft_samples_available&= 0xDF5F; + h->left_samples_available&= 0x5F5F; + } + } + + if(!(topleft_type & type_mask)) + h->topleft_samples_available&= 0x7FFF; + + if(!(topright_type & type_mask)) + h->topright_samples_available&= 0xFBFF; + + if(IS_INTRA4x4(mb_type)){ + if(IS_INTRA4x4(top_type)){ + AV_COPY32(h->intra4x4_pred_mode_cache+4+8*0, h->intra4x4_pred_mode + h->mb2br_xy[top_xy]); + }else{ + h->intra4x4_pred_mode_cache[4+8*0]= + h->intra4x4_pred_mode_cache[5+8*0]= + h->intra4x4_pred_mode_cache[6+8*0]= + h->intra4x4_pred_mode_cache[7+8*0]= 2 - 3*!(top_type & type_mask); + } + for(i=0; i<2; i++){ + if(IS_INTRA4x4(left_type[LEFT(i)])){ + int8_t *mode= h->intra4x4_pred_mode + h->mb2br_xy[left_xy[LEFT(i)]]; + h->intra4x4_pred_mode_cache[3+8*1 + 2*8*i]= mode[6-left_block[0+2*i]]; + h->intra4x4_pred_mode_cache[3+8*2 + 2*8*i]= mode[6-left_block[1+2*i]]; + }else{ + h->intra4x4_pred_mode_cache[3+8*1 + 2*8*i]= + h->intra4x4_pred_mode_cache[3+8*2 + 2*8*i]= 2 - 3*!(left_type[LEFT(i)] & type_mask); + } + } + } + } + + +/* +0 . T T. T T T T +1 L . .L . . . . +2 L . .L . . . . +3 . T TL . . . . +4 L . .L . . . . +5 L . .. . . . . +*/ +//FIXME constraint_intra_pred & partitioning & nnz (let us hope this is just a typo in the spec) + nnz_cache = h->non_zero_count_cache; + if(top_type){ + nnz = h->non_zero_count[top_xy]; + AV_COPY32(&nnz_cache[4+8* 0], &nnz[4*3]); + if(CHROMA444){ + AV_COPY32(&nnz_cache[4+8* 5], &nnz[4* 7]); + AV_COPY32(&nnz_cache[4+8*10], &nnz[4*11]); + }else{ + AV_COPY32(&nnz_cache[4+8* 5], &nnz[4* 5]); + AV_COPY32(&nnz_cache[4+8*10], &nnz[4* 9]); + } + }else{ + uint32_t top_empty = CABAC && !IS_INTRA(mb_type) ? 0 : 0x40404040; + AV_WN32A(&nnz_cache[4+8* 0], top_empty); + AV_WN32A(&nnz_cache[4+8* 5], top_empty); + AV_WN32A(&nnz_cache[4+8*10], top_empty); + } + + for (i=0; i<2; i++) { + if(left_type[LEFT(i)]){ + nnz = h->non_zero_count[left_xy[LEFT(i)]]; + nnz_cache[3+8* 1 + 2*8*i]= nnz[left_block[8+0+2*i]]; + nnz_cache[3+8* 2 + 2*8*i]= nnz[left_block[8+1+2*i]]; + if(CHROMA444){ + nnz_cache[3+8* 6 + 2*8*i]= nnz[left_block[8+0+2*i]+4*4]; + nnz_cache[3+8* 7 + 2*8*i]= nnz[left_block[8+1+2*i]+4*4]; + nnz_cache[3+8*11 + 2*8*i]= nnz[left_block[8+0+2*i]+8*4]; + nnz_cache[3+8*12 + 2*8*i]= nnz[left_block[8+1+2*i]+8*4]; + }else{ + nnz_cache[3+8* 6 + 8*i]= nnz[left_block[8+4+2*i]]; + nnz_cache[3+8*11 + 8*i]= nnz[left_block[8+5+2*i]]; + } + }else{ + nnz_cache[3+8* 1 + 2*8*i]= + nnz_cache[3+8* 2 + 2*8*i]= + nnz_cache[3+8* 6 + 2*8*i]= + nnz_cache[3+8* 7 + 2*8*i]= + nnz_cache[3+8*11 + 2*8*i]= + nnz_cache[3+8*12 + 2*8*i]= CABAC && !IS_INTRA(mb_type) ? 0 : 64; + } + } + + if( CABAC ) { + // top_cbp + if(top_type) { + h->top_cbp = h->cbp_table[top_xy]; + } else { + h->top_cbp = IS_INTRA(mb_type) ? 0x7CF : 0x00F; + } + // left_cbp + if (left_type[LTOP]) { + h->left_cbp = (h->cbp_table[left_xy[LTOP]] & 0x7F0) + | ((h->cbp_table[left_xy[LTOP]]>>(left_block[0]&(~1)))&2) + | (((h->cbp_table[left_xy[LBOT]]>>(left_block[2]&(~1)))&2) << 2); + } else { + h->left_cbp = IS_INTRA(mb_type) ? 0x7CF : 0x00F; + } + } + } + + if(IS_INTER(mb_type) || (IS_DIRECT(mb_type) && h->direct_spatial_mv_pred)){ + int list; + int b_stride = h->b_stride; + for(list=0; list<h->list_count; list++){ + int8_t *ref_cache = &h->ref_cache[list][scan8[0]]; + int8_t *ref = s->current_picture.f.ref_index[list]; + int16_t (*mv_cache)[2] = &h->mv_cache[list][scan8[0]]; + int16_t (*mv)[2] = s->current_picture.f.motion_val[list]; + if(!USES_LIST(mb_type, list)){ + continue; + } + assert(!(IS_DIRECT(mb_type) && !h->direct_spatial_mv_pred)); + + if(USES_LIST(top_type, list)){ + const int b_xy= h->mb2b_xy[top_xy] + 3*b_stride; + AV_COPY128(mv_cache[0 - 1*8], mv[b_xy + 0]); + ref_cache[0 - 1*8]= + ref_cache[1 - 1*8]= ref[4*top_xy + 2]; + ref_cache[2 - 1*8]= + ref_cache[3 - 1*8]= ref[4*top_xy + 3]; + }else{ + AV_ZERO128(mv_cache[0 - 1*8]); + AV_WN32A(&ref_cache[0 - 1*8], ((top_type ? LIST_NOT_USED : PART_NOT_AVAILABLE)&0xFF)*0x01010101); + } + + if(mb_type & (MB_TYPE_16x8|MB_TYPE_8x8)){ + for(i=0; i<2; i++){ + int cache_idx = -1 + i*2*8; + if(USES_LIST(left_type[LEFT(i)], list)){ + const int b_xy= h->mb2b_xy[left_xy[LEFT(i)]] + 3; + const int b8_xy= 4*left_xy[LEFT(i)] + 1; + AV_COPY32(mv_cache[cache_idx ], mv[b_xy + b_stride*left_block[0+i*2]]); + AV_COPY32(mv_cache[cache_idx+8], mv[b_xy + b_stride*left_block[1+i*2]]); + ref_cache[cache_idx ]= ref[b8_xy + (left_block[0+i*2]&~1)]; + ref_cache[cache_idx+8]= ref[b8_xy + (left_block[1+i*2]&~1)]; + }else{ + AV_ZERO32(mv_cache[cache_idx ]); + AV_ZERO32(mv_cache[cache_idx+8]); + ref_cache[cache_idx ]= + ref_cache[cache_idx+8]= (left_type[LEFT(i)]) ? LIST_NOT_USED : PART_NOT_AVAILABLE; + } + } + }else{ + if(USES_LIST(left_type[LTOP], list)){ + const int b_xy= h->mb2b_xy[left_xy[LTOP]] + 3; + const int b8_xy= 4*left_xy[LTOP] + 1; + AV_COPY32(mv_cache[-1], mv[b_xy + b_stride*left_block[0]]); + ref_cache[-1]= ref[b8_xy + (left_block[0]&~1)]; + }else{ + AV_ZERO32(mv_cache[-1]); + ref_cache[-1]= left_type[LTOP] ? LIST_NOT_USED : PART_NOT_AVAILABLE; + } + } + + if(USES_LIST(topright_type, list)){ + const int b_xy= h->mb2b_xy[topright_xy] + 3*b_stride; + AV_COPY32(mv_cache[4 - 1*8], mv[b_xy]); + ref_cache[4 - 1*8]= ref[4*topright_xy + 2]; + }else{ + AV_ZERO32(mv_cache[4 - 1*8]); + ref_cache[4 - 1*8]= topright_type ? LIST_NOT_USED : PART_NOT_AVAILABLE; + } + if(ref_cache[4 - 1*8] < 0){ + if(USES_LIST(topleft_type, list)){ + const int b_xy = h->mb2b_xy[topleft_xy] + 3 + b_stride + (h->topleft_partition & 2*b_stride); + const int b8_xy= 4*topleft_xy + 1 + (h->topleft_partition & 2); + AV_COPY32(mv_cache[-1 - 1*8], mv[b_xy]); + ref_cache[-1 - 1*8]= ref[b8_xy]; + }else{ + AV_ZERO32(mv_cache[-1 - 1*8]); + ref_cache[-1 - 1*8]= topleft_type ? LIST_NOT_USED : PART_NOT_AVAILABLE; + } + } + + if((mb_type&(MB_TYPE_SKIP|MB_TYPE_DIRECT2)) && !FRAME_MBAFF) + continue; + + if(!(mb_type&(MB_TYPE_SKIP|MB_TYPE_DIRECT2))){ + uint8_t (*mvd_cache)[2] = &h->mvd_cache[list][scan8[0]]; + uint8_t (*mvd)[2] = h->mvd_table[list]; + ref_cache[2+8*0] = + ref_cache[2+8*2] = PART_NOT_AVAILABLE; + AV_ZERO32(mv_cache[2+8*0]); + AV_ZERO32(mv_cache[2+8*2]); + + if( CABAC ) { + if(USES_LIST(top_type, list)){ + const int b_xy= h->mb2br_xy[top_xy]; + AV_COPY64(mvd_cache[0 - 1*8], mvd[b_xy + 0]); + }else{ + AV_ZERO64(mvd_cache[0 - 1*8]); + } + if(USES_LIST(left_type[LTOP], list)){ + const int b_xy= h->mb2br_xy[left_xy[LTOP]] + 6; + AV_COPY16(mvd_cache[-1 + 0*8], mvd[b_xy - left_block[0]]); + AV_COPY16(mvd_cache[-1 + 1*8], mvd[b_xy - left_block[1]]); + }else{ + AV_ZERO16(mvd_cache[-1 + 0*8]); + AV_ZERO16(mvd_cache[-1 + 1*8]); + } + if(USES_LIST(left_type[LBOT], list)){ + const int b_xy= h->mb2br_xy[left_xy[LBOT]] + 6; + AV_COPY16(mvd_cache[-1 + 2*8], mvd[b_xy - left_block[2]]); + AV_COPY16(mvd_cache[-1 + 3*8], mvd[b_xy - left_block[3]]); + }else{ + AV_ZERO16(mvd_cache[-1 + 2*8]); + AV_ZERO16(mvd_cache[-1 + 3*8]); + } + AV_ZERO16(mvd_cache[2+8*0]); + AV_ZERO16(mvd_cache[2+8*2]); + if(h->slice_type_nos == AV_PICTURE_TYPE_B){ + uint8_t *direct_cache = &h->direct_cache[scan8[0]]; + uint8_t *direct_table = h->direct_table; + fill_rectangle(direct_cache, 4, 4, 8, MB_TYPE_16x16>>1, 1); + + if(IS_DIRECT(top_type)){ + AV_WN32A(&direct_cache[-1*8], 0x01010101u*(MB_TYPE_DIRECT2>>1)); + }else if(IS_8X8(top_type)){ + int b8_xy = 4*top_xy; + direct_cache[0 - 1*8]= direct_table[b8_xy + 2]; + direct_cache[2 - 1*8]= direct_table[b8_xy + 3]; + }else{ + AV_WN32A(&direct_cache[-1*8], 0x01010101*(MB_TYPE_16x16>>1)); + } + + if(IS_DIRECT(left_type[LTOP])) + direct_cache[-1 + 0*8]= MB_TYPE_DIRECT2>>1; + else if(IS_8X8(left_type[LTOP])) + direct_cache[-1 + 0*8]= direct_table[4*left_xy[LTOP] + 1 + (left_block[0]&~1)]; + else + direct_cache[-1 + 0*8]= MB_TYPE_16x16>>1; + + if(IS_DIRECT(left_type[LBOT])) + direct_cache[-1 + 2*8]= MB_TYPE_DIRECT2>>1; + else if(IS_8X8(left_type[LBOT])) + direct_cache[-1 + 2*8]= direct_table[4*left_xy[LBOT] + 1 + (left_block[2]&~1)]; + else + direct_cache[-1 + 2*8]= MB_TYPE_16x16>>1; + } + } + } + if(FRAME_MBAFF){ +#define MAP_MVS\ + MAP_F2F(scan8[0] - 1 - 1*8, topleft_type)\ + MAP_F2F(scan8[0] + 0 - 1*8, top_type)\ + MAP_F2F(scan8[0] + 1 - 1*8, top_type)\ + MAP_F2F(scan8[0] + 2 - 1*8, top_type)\ + MAP_F2F(scan8[0] + 3 - 1*8, top_type)\ + MAP_F2F(scan8[0] + 4 - 1*8, topright_type)\ + MAP_F2F(scan8[0] - 1 + 0*8, left_type[LTOP])\ + MAP_F2F(scan8[0] - 1 + 1*8, left_type[LTOP])\ + MAP_F2F(scan8[0] - 1 + 2*8, left_type[LBOT])\ + MAP_F2F(scan8[0] - 1 + 3*8, left_type[LBOT]) + if(MB_FIELD){ +#define MAP_F2F(idx, mb_type)\ + if(!IS_INTERLACED(mb_type) && h->ref_cache[list][idx] >= 0){\ + h->ref_cache[list][idx] <<= 1;\ + h->mv_cache[list][idx][1] /= 2;\ + h->mvd_cache[list][idx][1] >>=1;\ + } + MAP_MVS +#undef MAP_F2F + }else{ +#define MAP_F2F(idx, mb_type)\ + if(IS_INTERLACED(mb_type) && h->ref_cache[list][idx] >= 0){\ + h->ref_cache[list][idx] >>= 1;\ + h->mv_cache[list][idx][1] <<= 1;\ + h->mvd_cache[list][idx][1] <<= 1;\ + } + MAP_MVS +#undef MAP_F2F + } + } + } + } + + h->neighbor_transform_size= !!IS_8x8DCT(top_type) + !!IS_8x8DCT(left_type[LTOP]); +} + +/** + * decodes a P_SKIP or B_SKIP macroblock + */ +static void av_unused decode_mb_skip(H264Context *h){ + MpegEncContext * const s = &h->s; + const int mb_xy= h->mb_xy; + int mb_type=0; + + memset(h->non_zero_count[mb_xy], 0, 48); + + if(MB_FIELD) + mb_type|= MB_TYPE_INTERLACED; + + if( h->slice_type_nos == AV_PICTURE_TYPE_B ) + { + // just for fill_caches. pred_direct_motion will set the real mb_type + mb_type|= MB_TYPE_L0L1|MB_TYPE_DIRECT2|MB_TYPE_SKIP; + if(h->direct_spatial_mv_pred){ + fill_decode_neighbors(h, mb_type); + fill_decode_caches(h, mb_type); //FIXME check what is needed and what not ... + } + ff_h264_pred_direct_motion(h, &mb_type); + mb_type|= MB_TYPE_SKIP; + } + else + { + mb_type|= MB_TYPE_16x16|MB_TYPE_P0L0|MB_TYPE_P1L0|MB_TYPE_SKIP; + + fill_decode_neighbors(h, mb_type); + pred_pskip_motion(h); + } + + write_back_motion(h, mb_type); + s->current_picture.f.mb_type[mb_xy] = mb_type; + s->current_picture.f.qscale_table[mb_xy] = s->qscale; + h->slice_table[ mb_xy ]= h->slice_num; + h->prev_mb_skipped= 1; +} + #endif /* AVCODEC_H264_MVPRED_H */ diff --git a/libavcodec/h264_parser.c b/libavcodec/h264_parser.c index 080b6a93b5..27fba4b628 100644 --- a/libavcodec/h264_parser.c +++ b/libavcodec/h264_parser.c @@ -97,7 +97,7 @@ found: return i-(state&5); } -/*! +/** * Parse NAL units of found picture and decode some basic information. * * @param s parser context. diff --git a/libavcodec/h264_ps.c b/libavcodec/h264_ps.c index ddfe1d2c64..61fb12ce0c 100644 --- a/libavcodec/h264_ps.c +++ b/libavcodec/h264_ps.c @@ -281,12 +281,12 @@ static void decode_scaling_matrices(H264Context *h, SPS *sps, PPS *pps, int is_s decode_scaling_list(h,scaling_matrix4[5],16,default_scaling4[1],scaling_matrix4[4]); // Inter, Cb if(is_sps || pps->transform_8x8_mode){ decode_scaling_list(h,scaling_matrix8[0],64,default_scaling8[0],fallback[2]); // Intra, Y - if(h->sps.chroma_format_idc == 3){ + if(sps->chroma_format_idc == 3){ decode_scaling_list(h,scaling_matrix8[1],64,default_scaling8[0],scaling_matrix8[0]); // Intra, Cr decode_scaling_list(h,scaling_matrix8[2],64,default_scaling8[0],scaling_matrix8[1]); // Intra, Cb } decode_scaling_list(h,scaling_matrix8[3],64,default_scaling8[1],fallback[3]); // Inter, Y - if(h->sps.chroma_format_idc == 3){ + if(sps->chroma_format_idc == 3){ decode_scaling_list(h,scaling_matrix8[4],64,default_scaling8[1],scaling_matrix8[3]); // Inter, Cr decode_scaling_list(h,scaling_matrix8[5],64,default_scaling8[1],scaling_matrix8[4]); // Inter, Cb } @@ -396,6 +396,7 @@ int ff_h264_decode_seq_parameter_set(H264Context *h){ #endif sps->crop= get_bits1(&s->gb); if(sps->crop){ + int crop_limit = sps->chroma_format_idc == 3 ? 16 : 8; sps->crop_left = get_ue_golomb(&s->gb); sps->crop_right = get_ue_golomb(&s->gb); sps->crop_top = get_ue_golomb(&s->gb); @@ -403,7 +404,7 @@ int ff_h264_decode_seq_parameter_set(H264Context *h){ if(sps->crop_left || sps->crop_top){ av_log(h->s.avctx, AV_LOG_ERROR, "insane cropping not completely supported, this could look slightly wrong ...\n"); } - if(sps->crop_right >= (8<<CHROMA444) || sps->crop_bottom >= (8<<CHROMA444)){ + if(sps->crop_right >= crop_limit || sps->crop_bottom >= crop_limit){ av_log(h->s.avctx, AV_LOG_ERROR, "brainfart cropping not supported, this could look slightly wrong ...\n"); } }else{ diff --git a/libavcodec/h264_refs.c b/libavcodec/h264_refs.c index 9554201522..6794bf3c9f 100644 --- a/libavcodec/h264_refs.c +++ b/libavcodec/h264_refs.c @@ -40,16 +40,16 @@ static void pic_as_field(Picture *pic, const int parity){ int i; for (i = 0; i < 4; ++i) { if (parity == PICT_BOTTOM_FIELD) - pic->data[i] += pic->linesize[i]; - pic->reference = parity; - pic->linesize[i] *= 2; + pic->f.data[i] += pic->f.linesize[i]; + pic->f.reference = parity; + pic->f.linesize[i] *= 2; } pic->poc= pic->field_poc[parity == PICT_BOTTOM_FIELD]; } static int split_field_copy(Picture *dest, Picture *src, int parity, int id_add){ - int match = !!(src->reference & parity); + int match = !!(src->f.reference & parity); if (match) { *dest = *src; @@ -68,9 +68,9 @@ static int build_def_list(Picture *def, Picture **in, int len, int is_long, int int index=0; while(i[0]<len || i[1]<len){ - while(i[0]<len && !(in[ i[0] ] && (in[ i[0] ]->reference & sel))) + while (i[0] < len && !(in[ i[0] ] && (in[ i[0] ]->f.reference & sel))) i[0]++; - while(i[1]<len && !(in[ i[1] ] && (in[ i[1] ]->reference & (sel^3)))) + while (i[1] < len && !(in[ i[1] ] && (in[ i[1] ]->f.reference & (sel^3)))) i[1]++; if(i[0] < len){ in[ i[0] ]->pic_id= is_long ? i[0] : in[ i[0] ]->frame_num; @@ -134,7 +134,7 @@ int ff_h264_fill_default_ref_list(H264Context *h){ } if(lens[0] == lens[1] && lens[1] > 1){ - for(i=0; h->default_ref_list[0][i].data[0] == h->default_ref_list[1][i].data[0] && i<lens[0]; i++); + for (i = 0; h->default_ref_list[0][i].f.data[0] == h->default_ref_list[1][i].f.data[0] && i < lens[0]; i++); if(i == lens[0]) FFSWAP(Picture, h->default_ref_list[1][0], h->default_ref_list[1][1]); } @@ -230,11 +230,11 @@ int ff_h264_decode_ref_pic_list_reordering(H264Context *h){ for(i= h->short_ref_count-1; i>=0; i--){ ref = h->short_ref[i]; - assert(ref->reference); + assert(ref->f.reference); assert(!ref->long_ref); if( ref->frame_num == frame_num && - (ref->reference & pic_structure) + (ref->f.reference & pic_structure) ) break; } @@ -251,8 +251,8 @@ int ff_h264_decode_ref_pic_list_reordering(H264Context *h){ return -1; } ref = h->long_ref[long_idx]; - assert(!(ref && !ref->reference)); - if(ref && (ref->reference & pic_structure)){ + assert(!(ref && !ref->f.reference)); + if (ref && (ref->f.reference & pic_structure)) { ref->pic_id= pic_id; assert(ref->long_ref); i=0; @@ -286,9 +286,9 @@ int ff_h264_decode_ref_pic_list_reordering(H264Context *h){ } for(list=0; list<h->list_count; list++){ for(index= 0; index < h->ref_count[list]; index++){ - if(!h->ref_list[list][index].data[0]){ + if (!h->ref_list[list][index].f.data[0]) { av_log(h->s.avctx, AV_LOG_ERROR, "Missing reference picture\n"); - if(h->default_ref_list[list][0].data[0]) + if (h->default_ref_list[list][0].f.data[0]) h->ref_list[list][index]= h->default_ref_list[list][0]; else return -1; @@ -307,13 +307,13 @@ void ff_h264_fill_mbaff_ref_list(H264Context *h){ Picture *field = &h->ref_list[list][16+2*i]; field[0] = *frame; for(j=0; j<3; j++) - field[0].linesize[j] <<= 1; - field[0].reference = PICT_TOP_FIELD; + field[0].f.linesize[j] <<= 1; + field[0].f.reference = PICT_TOP_FIELD; field[0].poc= field[0].field_poc[0]; field[1] = field[0]; for(j=0; j<3; j++) - field[1].data[j] += frame->linesize[j]; - field[1].reference = PICT_BOTTOM_FIELD; + field[1].f.data[j] += frame->f.linesize[j]; + field[1].f.reference = PICT_BOTTOM_FIELD; field[1].poc= field[1].field_poc[1]; h->luma_weight[16+2*i][list][0] = h->luma_weight[16+2*i+1][list][0] = h->luma_weight[i][list][0]; @@ -339,12 +339,12 @@ void ff_h264_fill_mbaff_ref_list(H264Context *h){ */ static inline int unreference_pic(H264Context *h, Picture *pic, int refmask){ int i; - if (pic->reference &= refmask) { + if (pic->f.reference &= refmask) { return 0; } else { for(i = 0; h->delayed_pic[i]; i++) if(pic == h->delayed_pic[i]){ - pic->reference=DELAYED_PIC_REF; + pic->f.reference = DELAYED_PIC_REF; break; } return 1; @@ -454,7 +454,8 @@ static void print_short_term(H264Context *h) { av_log(h->s.avctx, AV_LOG_DEBUG, "short term list:\n"); for(i=0; i<h->short_ref_count; i++){ Picture *pic= h->short_ref[i]; - av_log(h->s.avctx, AV_LOG_DEBUG, "%d fn:%d poc:%d %p\n", i, pic->frame_num, pic->poc, pic->data[0]); + av_log(h->s.avctx, AV_LOG_DEBUG, "%d fn:%d poc:%d %p\n", + i, pic->frame_num, pic->poc, pic->f.data[0]); } } } @@ -469,7 +470,8 @@ static void print_long_term(H264Context *h) { for(i = 0; i < 16; i++){ Picture *pic= h->long_ref[i]; if (pic) { - av_log(h->s.avctx, AV_LOG_DEBUG, "%d fn:%d poc:%d %p\n", i, pic->frame_num, pic->poc, pic->data[0]); + av_log(h->s.avctx, AV_LOG_DEBUG, "%d fn:%d poc:%d %p\n", + i, pic->frame_num, pic->poc, pic->f.data[0]); } } } @@ -481,7 +483,7 @@ void ff_generate_sliding_window_mmcos(H264Context *h) { h->mmco_index= 0; if(h->short_ref_count && h->long_ref_count + h->short_ref_count == h->sps.ref_frame_count && - !(FIELD_PICTURE && !s->first_field && s->current_picture_ptr->reference)) { + !(FIELD_PICTURE && !s->first_field && s->current_picture_ptr->f.reference)) { h->mmco[0].opcode= MMCO_SHORT2UNUSED; h->mmco[0].short_pic_num= h->short_ref[ h->short_ref_count - 1 ]->frame_num; h->mmco_index= 1; @@ -562,7 +564,7 @@ int ff_h264_execute_ref_pic_marking(H264Context *h, MMCO *mmco, int mmco_count){ h->long_ref_count++; } - s->current_picture_ptr->reference |= s->picture_structure; + s->current_picture_ptr->f.reference |= s->picture_structure; current_ref_assigned=1; break; case MMCO_SET_MAX_LONG: @@ -601,7 +603,7 @@ int ff_h264_execute_ref_pic_marking(H264Context *h, MMCO *mmco, int mmco_count){ */ if (h->short_ref_count && h->short_ref[0] == s->current_picture_ptr) { /* Just mark the second field valid */ - s->current_picture_ptr->reference = PICT_FRAME; + s->current_picture_ptr->f.reference = PICT_FRAME; } else if (s->current_picture_ptr->long_ref) { av_log(h->s.avctx, AV_LOG_ERROR, "illegal short term reference " "assignment for second field " @@ -618,7 +620,7 @@ int ff_h264_execute_ref_pic_marking(H264Context *h, MMCO *mmco, int mmco_count){ h->short_ref[0]= s->current_picture_ptr; h->short_ref_count++; - s->current_picture_ptr->reference |= s->picture_structure; + s->current_picture_ptr->f.reference |= s->picture_structure; } } @@ -678,7 +680,7 @@ int ff_h264_decode_ref_pic_marking(H264Context *h, GetBitContext *gb){ } if(opcode==MMCO_SHORT2LONG || opcode==MMCO_LONG2UNUSED || opcode==MMCO_LONG || opcode==MMCO_SET_MAX_LONG){ unsigned int long_arg= get_ue_golomb_31(gb); - if(long_arg >= 32 || (long_arg >= 16 && !(opcode == MMCO_LONG2UNUSED && FIELD_PICTURE))){ + if(long_arg >= 32 || (long_arg >= 16 && !(opcode == MMCO_SET_MAX_LONG && long_arg == 16) && !(opcode == MMCO_LONG2UNUSED && FIELD_PICTURE))){ av_log(h->s.avctx, AV_LOG_ERROR, "illegal long ref in memory management control operation %d\n", opcode); return -1; } diff --git a/libavcodec/h264dsp_template.c b/libavcodec/h264dsp_template.c index eb336f7e62..906d99f739 100644 --- a/libavcodec/h264dsp_template.c +++ b/libavcodec/h264dsp_template.c @@ -25,7 +25,7 @@ * @author Michael Niedermayer <michaelni@gmx.at> */ -#include "high_bit_depth.h" +#include "bit_depth_template.c" #define op_scale1(x) block[x] = av_clip_pixel( (block[x]*weight + offset) >> log2_denom ) #define op_scale2(x) dst[x] = av_clip_pixel( (src[x]*weights + dst[x]*weightd + offset) >> (log2_denom+1)) diff --git a/libavcodec/h264idct_template.c b/libavcodec/h264idct_template.c index 83f6f38691..3b386695a2 100644 --- a/libavcodec/h264idct_template.c +++ b/libavcodec/h264idct_template.c @@ -25,7 +25,7 @@ * @author Michael Niedermayer <michaelni@gmx.at> */ -#include "high_bit_depth.h" +#include "bit_depth_template.c" #ifndef AVCODEC_H264IDCT_INTERNAL_H #define AVCODEC_H264IDCT_INTERNAL_H @@ -46,57 +46,46 @@ static const uint8_t scan8[16*3]={ }; #endif -static av_always_inline void FUNCC(idct_internal)(uint8_t *p_dst, DCTELEM *p_block, int stride, int block_stride, int shift, int add){ +void FUNCC(ff_h264_idct_add)(uint8_t *_dst, DCTELEM *_block, int stride) +{ int i; INIT_CLIP - pixel *dst = (pixel*)p_dst; - dctcoef *block = (dctcoef*)p_block; + pixel *dst = (pixel*)_dst; + dctcoef *block = (dctcoef*)_block; stride >>= sizeof(pixel)-1; - block[0] += 1<<(shift-1); + block[0] += 1 << 5; for(i=0; i<4; i++){ - const int z0= block[i + block_stride*0] + block[i + block_stride*2]; - const int z1= block[i + block_stride*0] - block[i + block_stride*2]; - const int z2= (block[i + block_stride*1]>>1) - block[i + block_stride*3]; - const int z3= block[i + block_stride*1] + (block[i + block_stride*3]>>1); - - block[i + block_stride*0]= z0 + z3; - block[i + block_stride*1]= z1 + z2; - block[i + block_stride*2]= z1 - z2; - block[i + block_stride*3]= z0 - z3; + const int z0= block[i + 4*0] + block[i + 4*2]; + const int z1= block[i + 4*0] - block[i + 4*2]; + const int z2= (block[i + 4*1]>>1) - block[i + 4*3]; + const int z3= block[i + 4*1] + (block[i + 4*3]>>1); + + block[i + 4*0]= z0 + z3; + block[i + 4*1]= z1 + z2; + block[i + 4*2]= z1 - z2; + block[i + 4*3]= z0 - z3; } for(i=0; i<4; i++){ - const int z0= block[0 + block_stride*i] + block[2 + block_stride*i]; - const int z1= block[0 + block_stride*i] - block[2 + block_stride*i]; - const int z2= (block[1 + block_stride*i]>>1) - block[3 + block_stride*i]; - const int z3= block[1 + block_stride*i] + (block[3 + block_stride*i]>>1); - - dst[i + 0*stride]= CLIP(add*dst[i + 0*stride] + ((z0 + z3) >> shift)); - dst[i + 1*stride]= CLIP(add*dst[i + 1*stride] + ((z1 + z2) >> shift)); - dst[i + 2*stride]= CLIP(add*dst[i + 2*stride] + ((z1 - z2) >> shift)); - dst[i + 3*stride]= CLIP(add*dst[i + 3*stride] + ((z0 - z3) >> shift)); + const int z0= block[0 + 4*i] + block[2 + 4*i]; + const int z1= block[0 + 4*i] - block[2 + 4*i]; + const int z2= (block[1 + 4*i]>>1) - block[3 + 4*i]; + const int z3= block[1 + 4*i] + (block[3 + 4*i]>>1); + + dst[i + 0*stride]= CLIP(dst[i + 0*stride] + ((z0 + z3) >> 6)); + dst[i + 1*stride]= CLIP(dst[i + 1*stride] + ((z1 + z2) >> 6)); + dst[i + 2*stride]= CLIP(dst[i + 2*stride] + ((z1 - z2) >> 6)); + dst[i + 3*stride]= CLIP(dst[i + 3*stride] + ((z0 - z3) >> 6)); } } -void FUNCC(ff_h264_idct_add)(uint8_t *dst, DCTELEM *block, int stride){ - FUNCC(idct_internal)(dst, block, stride, 4, 6, 1); -} - -void FUNCC(ff_h264_lowres_idct_add)(uint8_t *dst, int stride, DCTELEM *block){ - FUNCC(idct_internal)(dst, block, stride, 8, 3, 1); -} - -void FUNCC(ff_h264_lowres_idct_put)(uint8_t *dst, int stride, DCTELEM *block){ - FUNCC(idct_internal)(dst, block, stride, 8, 3, 0); -} - -void FUNCC(ff_h264_idct8_add)(uint8_t *p_dst, DCTELEM *p_block, int stride){ +void FUNCC(ff_h264_idct8_add)(uint8_t *_dst, DCTELEM *_block, int stride){ int i; INIT_CLIP - pixel *dst = (pixel*)p_dst; - dctcoef *block = (dctcoef*)p_block; + pixel *dst = (pixel*)_dst; + dctcoef *block = (dctcoef*)_block; stride >>= sizeof(pixel)-1; block[0] += 32; @@ -200,7 +189,7 @@ void FUNCC(ff_h264_idct_add16)(uint8_t *dst, const int *block_offset, DCTELEM *b int nnz = nnzc[ scan8[i] ]; if(nnz){ if(nnz==1 && ((dctcoef*)block)[i*16]) FUNCC(ff_h264_idct_dc_add)(dst + block_offset[i], block + i*16*sizeof(pixel), stride); - else FUNCC(idct_internal )(dst + block_offset[i], block + i*16*sizeof(pixel), stride, 4, 6, 1); + else FUNCC(ff_h264_idct_add )(dst + block_offset[i], block + i*16*sizeof(pixel), stride); } } } @@ -208,7 +197,7 @@ void FUNCC(ff_h264_idct_add16)(uint8_t *dst, const int *block_offset, DCTELEM *b void FUNCC(ff_h264_idct_add16intra)(uint8_t *dst, const int *block_offset, DCTELEM *block, int stride, const uint8_t nnzc[15*8]){ int i; for(i=0; i<16; i++){ - if(nnzc[ scan8[i] ]) FUNCC(idct_internal )(dst + block_offset[i], block + i*16*sizeof(pixel), stride, 4, 6, 1); + if(nnzc[ scan8[i] ]) FUNCC(ff_h264_idct_add )(dst + block_offset[i], block + i*16*sizeof(pixel), stride); else if(((dctcoef*)block)[i*16]) FUNCC(ff_h264_idct_dc_add)(dst + block_offset[i], block + i*16*sizeof(pixel), stride); } } @@ -237,7 +226,7 @@ void FUNCC(ff_h264_idct_add8)(uint8_t **dest, const int *block_offset, DCTELEM * } /** * IDCT transforms the 16 dc values and dequantizes them. - * @param qp quantization parameter + * @param qmul quantization parameter */ void FUNCC(ff_h264_luma_dc_dequant_idct)(DCTELEM *p_output, DCTELEM *p_input, int qmul){ #define stride 16 diff --git a/libavcodec/h264pred_template.c b/libavcodec/h264pred_template.c index 3cd4463d76..36f6d4e12f 100644 --- a/libavcodec/h264pred_template.c +++ b/libavcodec/h264pred_template.c @@ -26,7 +26,8 @@ */ #include "mathops.h" -#include "high_bit_depth.h" + +#include "bit_depth_template.c" static void FUNCC(pred4x4_vertical)(uint8_t *_src, const uint8_t *topright, int _stride){ pixel *src = (pixel*)_src; diff --git a/libavcodec/huffman.c b/libavcodec/huffman.c index 853fa64a48..7b33bdd7f3 100644 --- a/libavcodec/huffman.c +++ b/libavcodec/huffman.c @@ -1,6 +1,4 @@ -/** - * @file - * huffman tree builder and VLC generator +/* * Copyright (c) 2006 Konstantin Shishkov * * This file is part of FFmpeg. @@ -20,6 +18,11 @@ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ +/** + * @file + * huffman tree builder and VLC generator + */ + #include "avcodec.h" #include "get_bits.h" #include "huffman.h" diff --git a/libavcodec/huffman.h b/libavcodec/huffman.h index 3c08e6fb1b..5e0787a5e8 100644 --- a/libavcodec/huffman.h +++ b/libavcodec/huffman.h @@ -1,6 +1,4 @@ -/** - * @file - * huffman tree builder and VLC generator +/* * Copyright (C) 2007 Aurelien Jacobs <aurel@gnuage.org> * * This file is part of FFmpeg. @@ -20,6 +18,11 @@ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ +/** + * @file + * huffman tree builder and VLC generator + */ + #ifndef AVCODEC_HUFFMAN_H #define AVCODEC_HUFFMAN_H diff --git a/libavcodec/huffyuv.c b/libavcodec/huffyuv.c index 0f59421bb7..b7a79a857b 100644 --- a/libavcodec/huffyuv.c +++ b/libavcodec/huffyuv.c @@ -1436,16 +1436,14 @@ static av_cold int encode_end(AVCodecContext *avctx) #if CONFIG_HUFFYUV_DECODER AVCodec ff_huffyuv_decoder = { - "huffyuv", - AVMEDIA_TYPE_VIDEO, - CODEC_ID_HUFFYUV, - sizeof(HYuvContext), - decode_init, - NULL, - decode_end, - decode_frame, - CODEC_CAP_DR1 | CODEC_CAP_DRAW_HORIZ_BAND | CODEC_CAP_FRAME_THREADS, - NULL, + .name = "huffyuv", + .type = AVMEDIA_TYPE_VIDEO, + .id = CODEC_ID_HUFFYUV, + .priv_data_size = sizeof(HYuvContext), + .init = decode_init, + .close = decode_end, + .decode = decode_frame, + .capabilities = CODEC_CAP_DR1 | CODEC_CAP_DRAW_HORIZ_BAND | CODEC_CAP_FRAME_THREADS, .init_thread_copy = ONLY_IF_THREADS_ENABLED(decode_init_thread_copy), .long_name = NULL_IF_CONFIG_SMALL("Huffyuv / HuffYUV"), }; @@ -1453,16 +1451,14 @@ AVCodec ff_huffyuv_decoder = { #if CONFIG_FFVHUFF_DECODER AVCodec ff_ffvhuff_decoder = { - "ffvhuff", - AVMEDIA_TYPE_VIDEO, - CODEC_ID_FFVHUFF, - sizeof(HYuvContext), - decode_init, - NULL, - decode_end, - decode_frame, - CODEC_CAP_DR1 | CODEC_CAP_DRAW_HORIZ_BAND | CODEC_CAP_FRAME_THREADS, - NULL, + .name = "ffvhuff", + .type = AVMEDIA_TYPE_VIDEO, + .id = CODEC_ID_FFVHUFF, + .priv_data_size = sizeof(HYuvContext), + .init = decode_init, + .close = decode_end, + .decode = decode_frame, + .capabilities = CODEC_CAP_DR1 | CODEC_CAP_DRAW_HORIZ_BAND | CODEC_CAP_FRAME_THREADS, .init_thread_copy = ONLY_IF_THREADS_ENABLED(decode_init_thread_copy), .long_name = NULL_IF_CONFIG_SMALL("Huffyuv FFmpeg variant"), }; @@ -1470,13 +1466,13 @@ AVCodec ff_ffvhuff_decoder = { #if CONFIG_HUFFYUV_ENCODER AVCodec ff_huffyuv_encoder = { - "huffyuv", - AVMEDIA_TYPE_VIDEO, - CODEC_ID_HUFFYUV, - sizeof(HYuvContext), - encode_init, - encode_frame, - encode_end, + .name = "huffyuv", + .type = AVMEDIA_TYPE_VIDEO, + .id = CODEC_ID_HUFFYUV, + .priv_data_size = sizeof(HYuvContext), + .init = encode_init, + .encode = encode_frame, + .close = encode_end, .pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUV422P, PIX_FMT_RGB32, PIX_FMT_NONE}, .long_name = NULL_IF_CONFIG_SMALL("Huffyuv / HuffYUV"), }; @@ -1484,13 +1480,13 @@ AVCodec ff_huffyuv_encoder = { #if CONFIG_FFVHUFF_ENCODER AVCodec ff_ffvhuff_encoder = { - "ffvhuff", - AVMEDIA_TYPE_VIDEO, - CODEC_ID_FFVHUFF, - sizeof(HYuvContext), - encode_init, - encode_frame, - encode_end, + .name = "ffvhuff", + .type = AVMEDIA_TYPE_VIDEO, + .id = CODEC_ID_FFVHUFF, + .priv_data_size = sizeof(HYuvContext), + .init = encode_init, + .encode = encode_frame, + .close = encode_end, .pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_YUV422P, PIX_FMT_RGB32, PIX_FMT_NONE}, .long_name = NULL_IF_CONFIG_SMALL("Huffyuv FFmpeg variant"), }; diff --git a/libavcodec/idcinvideo.c b/libavcodec/idcinvideo.c index 78ac77bf25..e441435e2c 100644 --- a/libavcodec/idcinvideo.c +++ b/libavcodec/idcinvideo.c @@ -255,15 +255,14 @@ static av_cold int idcin_decode_end(AVCodecContext *avctx) } AVCodec ff_idcin_decoder = { - "idcinvideo", - AVMEDIA_TYPE_VIDEO, - CODEC_ID_IDCIN, - sizeof(IdcinContext), - idcin_decode_init, - NULL, - idcin_decode_end, - idcin_decode_frame, - CODEC_CAP_DR1, + .name = "idcinvideo", + .type = AVMEDIA_TYPE_VIDEO, + .id = CODEC_ID_IDCIN, + .priv_data_size = sizeof(IdcinContext), + .init = idcin_decode_init, + .close = idcin_decode_end, + .decode = idcin_decode_frame, + .capabilities = CODEC_CAP_DR1, .long_name = NULL_IF_CONFIG_SMALL("id Quake II CIN video"), }; diff --git a/libavcodec/iff.c b/libavcodec/iff.c index 195ef10ac7..31aa6f0947 100644 --- a/libavcodec/iff.c +++ b/libavcodec/iff.c @@ -576,27 +576,25 @@ static av_cold int decode_end(AVCodecContext *avctx) } AVCodec ff_iff_ilbm_decoder = { - "iff_ilbm", - AVMEDIA_TYPE_VIDEO, - CODEC_ID_IFF_ILBM, - sizeof(IffContext), - decode_init, - NULL, - decode_end, - decode_frame_ilbm, - CODEC_CAP_DR1, + .name = "iff_ilbm", + .type = AVMEDIA_TYPE_VIDEO, + .id = CODEC_ID_IFF_ILBM, + .priv_data_size = sizeof(IffContext), + .init = decode_init, + .close = decode_end, + .decode = decode_frame_ilbm, + .capabilities = CODEC_CAP_DR1, .long_name = NULL_IF_CONFIG_SMALL("IFF ILBM"), }; AVCodec ff_iff_byterun1_decoder = { - "iff_byterun1", - AVMEDIA_TYPE_VIDEO, - CODEC_ID_IFF_BYTERUN1, - sizeof(IffContext), - decode_init, - NULL, - decode_end, - decode_frame_byterun1, - CODEC_CAP_DR1, + .name = "iff_byterun1", + .type = AVMEDIA_TYPE_VIDEO, + .id = CODEC_ID_IFF_BYTERUN1, + .priv_data_size = sizeof(IffContext), + .init = decode_init, + .close = decode_end, + .decode = decode_frame_byterun1, + .capabilities = CODEC_CAP_DR1, .long_name = NULL_IF_CONFIG_SMALL("IFF ByteRun1"), }; diff --git a/libavcodec/iirfilter.c b/libavcodec/iirfilter.c index 4b9aae2a5d..4785a7c7c2 100644 --- a/libavcodec/iirfilter.c +++ b/libavcodec/iirfilter.c @@ -311,6 +311,9 @@ av_cold void ff_iir_filter_free_coeffs(struct FFIIRFilterCoeffs *coeffs) } #ifdef TEST +#undef printf +#include <stdio.h> + #define FILT_ORDER 4 #define SIZE 1024 int main(void) @@ -320,7 +323,6 @@ int main(void) float cutoff_coeff = 0.4; int16_t x[SIZE], y[SIZE]; int i; - FILE* fd; fcoeffs = ff_iir_filter_init_coeffs(NULL, FF_FILTER_TYPE_BUTTERWORTH, FF_FILTER_MODE_LOWPASS, FILT_ORDER, @@ -333,13 +335,8 @@ int main(void) ff_iir_filter(fcoeffs, fstate, SIZE, x, 1, y, 1); - fd = fopen("in.bin", "w"); - fwrite(x, sizeof(x[0]), SIZE, fd); - fclose(fd); - - fd = fopen("out.bin", "w"); - fwrite(y, sizeof(y[0]), SIZE, fd); - fclose(fd); + for (i = 0; i < SIZE; i++) + printf("%6d %6d\n", x[i], y[i]); ff_iir_filter_free_coeffs(fcoeffs); ff_iir_filter_free_state(fstate); diff --git a/libavcodec/imgconvert.c b/libavcodec/imgconvert.c index 78ac2d83bb..96246bd16a 100644 --- a/libavcodec/imgconvert.c +++ b/libavcodec/imgconvert.c @@ -42,18 +42,11 @@ #include "x86/dsputil_mmx.h" #endif -#define xglue(x, y) x ## y -#define glue(x, y) xglue(x, y) - #define FF_COLOR_RGB 0 /**< RGB color space */ #define FF_COLOR_GRAY 1 /**< gray color space */ #define FF_COLOR_YUV 2 /**< YUV color space. 16 <= Y <= 235, 16 <= U, V <= 240 */ #define FF_COLOR_YUV_JPEG 3 /**< YUV color space. 0 <= Y <= 255, 0 <= U, V <= 255 */ -#define FF_PIXEL_PLANAR 0 /**< each channel has one component in AVPicture */ -#define FF_PIXEL_PACKED 1 /**< only one components containing all the channels */ -#define FF_PIXEL_PALETTE 2 /**< one components containing indexes for a palette */ - #if HAVE_MMX && HAVE_YASM #define deinterlace_line_inplace ff_deinterlace_line_inplace_mmx #define deinterlace_line ff_deinterlace_line_mmx @@ -63,351 +56,203 @@ #endif typedef struct PixFmtInfo { - uint8_t nb_channels; /**< number of channels (including alpha) */ uint8_t color_type; /**< color type (see FF_COLOR_xxx constants) */ - uint8_t pixel_type; /**< pixel storage type (see FF_PIXEL_xxx constants) */ uint8_t is_alpha : 1; /**< true if alpha can be specified */ - uint8_t depth; /**< bit depth of the color components */ + uint8_t padded_size; /**< padded size in bits if different from the non-padded size */ } PixFmtInfo; /* this table gives more information about formats */ static const PixFmtInfo pix_fmt_info[PIX_FMT_NB] = { /* YUV formats */ [PIX_FMT_YUV420P] = { - .nb_channels = 3, .color_type = FF_COLOR_YUV, - .pixel_type = FF_PIXEL_PLANAR, - .depth = 8, }, [PIX_FMT_YUV422P] = { - .nb_channels = 3, .color_type = FF_COLOR_YUV, - .pixel_type = FF_PIXEL_PLANAR, - .depth = 8, }, [PIX_FMT_YUV444P] = { - .nb_channels = 3, .color_type = FF_COLOR_YUV, - .pixel_type = FF_PIXEL_PLANAR, - .depth = 8, }, [PIX_FMT_YUYV422] = { - .nb_channels = 1, .color_type = FF_COLOR_YUV, - .pixel_type = FF_PIXEL_PACKED, - .depth = 8, }, [PIX_FMT_UYVY422] = { - .nb_channels = 1, .color_type = FF_COLOR_YUV, - .pixel_type = FF_PIXEL_PACKED, - .depth = 8, }, [PIX_FMT_YUV410P] = { - .nb_channels = 3, .color_type = FF_COLOR_YUV, - .pixel_type = FF_PIXEL_PLANAR, - .depth = 8, }, [PIX_FMT_YUV411P] = { - .nb_channels = 3, .color_type = FF_COLOR_YUV, - .pixel_type = FF_PIXEL_PLANAR, - .depth = 8, }, [PIX_FMT_YUV440P] = { - .nb_channels = 3, .color_type = FF_COLOR_YUV, - .pixel_type = FF_PIXEL_PLANAR, - .depth = 8, }, [PIX_FMT_YUV420P16LE] = { - .nb_channels = 3, .color_type = FF_COLOR_YUV, - .pixel_type = FF_PIXEL_PLANAR, - .depth = 16, }, [PIX_FMT_YUV422P16LE] = { - .nb_channels = 3, .color_type = FF_COLOR_YUV, - .pixel_type = FF_PIXEL_PLANAR, - .depth = 16, }, [PIX_FMT_YUV444P16LE] = { - .nb_channels = 3, .color_type = FF_COLOR_YUV, - .pixel_type = FF_PIXEL_PLANAR, - .depth = 16, }, [PIX_FMT_YUV420P16BE] = { - .nb_channels = 3, .color_type = FF_COLOR_YUV, - .pixel_type = FF_PIXEL_PLANAR, - .depth = 16, }, [PIX_FMT_YUV422P16BE] = { - .nb_channels = 3, .color_type = FF_COLOR_YUV, - .pixel_type = FF_PIXEL_PLANAR, - .depth = 16, }, [PIX_FMT_YUV444P16BE] = { - .nb_channels = 3, .color_type = FF_COLOR_YUV, - .pixel_type = FF_PIXEL_PLANAR, - .depth = 16, }, - /* YUV formats with alpha plane */ [PIX_FMT_YUVA420P] = { - .nb_channels = 4, .color_type = FF_COLOR_YUV, - .pixel_type = FF_PIXEL_PLANAR, - .depth = 8, }, /* JPEG YUV */ [PIX_FMT_YUVJ420P] = { - .nb_channels = 3, .color_type = FF_COLOR_YUV_JPEG, - .pixel_type = FF_PIXEL_PLANAR, - .depth = 8, }, [PIX_FMT_YUVJ422P] = { - .nb_channels = 3, .color_type = FF_COLOR_YUV_JPEG, - .pixel_type = FF_PIXEL_PLANAR, - .depth = 8, }, [PIX_FMT_YUVJ444P] = { - .nb_channels = 3, .color_type = FF_COLOR_YUV_JPEG, - .pixel_type = FF_PIXEL_PLANAR, - .depth = 8, }, [PIX_FMT_YUVJ440P] = { - .nb_channels = 3, .color_type = FF_COLOR_YUV_JPEG, - .pixel_type = FF_PIXEL_PLANAR, - .depth = 8, }, /* RGB formats */ [PIX_FMT_RGB24] = { - .nb_channels = 3, .color_type = FF_COLOR_RGB, - .pixel_type = FF_PIXEL_PACKED, - .depth = 8, }, [PIX_FMT_BGR24] = { - .nb_channels = 3, .color_type = FF_COLOR_RGB, - .pixel_type = FF_PIXEL_PACKED, - .depth = 8, }, [PIX_FMT_ARGB] = { - .nb_channels = 4, .is_alpha = 1, + .is_alpha = 1, .color_type = FF_COLOR_RGB, - .pixel_type = FF_PIXEL_PACKED, - .depth = 8, }, [PIX_FMT_RGB48BE] = { - .nb_channels = 3, .color_type = FF_COLOR_RGB, - .pixel_type = FF_PIXEL_PACKED, - .depth = 16, }, [PIX_FMT_RGB48LE] = { - .nb_channels = 3, .color_type = FF_COLOR_RGB, - .pixel_type = FF_PIXEL_PACKED, - .depth = 16, }, [PIX_FMT_RGB565BE] = { - .nb_channels = 3, .color_type = FF_COLOR_RGB, - .pixel_type = FF_PIXEL_PACKED, - .depth = 5, }, [PIX_FMT_RGB565LE] = { - .nb_channels = 3, .color_type = FF_COLOR_RGB, - .pixel_type = FF_PIXEL_PACKED, - .depth = 5, }, [PIX_FMT_RGB555BE] = { - .nb_channels = 3, .color_type = FF_COLOR_RGB, - .pixel_type = FF_PIXEL_PACKED, - .depth = 5, + .padded_size = 16, }, [PIX_FMT_RGB555LE] = { - .nb_channels = 3, .color_type = FF_COLOR_RGB, - .pixel_type = FF_PIXEL_PACKED, - .depth = 5, + .padded_size = 16, }, [PIX_FMT_RGB444BE] = { - .nb_channels = 3, .color_type = FF_COLOR_RGB, - .pixel_type = FF_PIXEL_PACKED, - .depth = 4, + .padded_size = 16, }, [PIX_FMT_RGB444LE] = { - .nb_channels = 3, .color_type = FF_COLOR_RGB, - .pixel_type = FF_PIXEL_PACKED, - .depth = 4, + .padded_size = 16, }, /* gray / mono formats */ [PIX_FMT_GRAY16BE] = { - .nb_channels = 1, .color_type = FF_COLOR_GRAY, - .pixel_type = FF_PIXEL_PLANAR, - .depth = 16, }, [PIX_FMT_GRAY16LE] = { - .nb_channels = 1, .color_type = FF_COLOR_GRAY, - .pixel_type = FF_PIXEL_PLANAR, - .depth = 16, }, [PIX_FMT_GRAY8] = { - .nb_channels = 1, .color_type = FF_COLOR_GRAY, - .pixel_type = FF_PIXEL_PLANAR, - .depth = 8, }, [PIX_FMT_MONOWHITE] = { - .nb_channels = 1, .color_type = FF_COLOR_GRAY, - .pixel_type = FF_PIXEL_PLANAR, - .depth = 1, }, [PIX_FMT_MONOBLACK] = { - .nb_channels = 1, .color_type = FF_COLOR_GRAY, - .pixel_type = FF_PIXEL_PLANAR, - .depth = 1, }, /* paletted formats */ [PIX_FMT_PAL8] = { - .nb_channels = 4, .is_alpha = 1, + .is_alpha = 1, .color_type = FF_COLOR_RGB, - .pixel_type = FF_PIXEL_PALETTE, - .depth = 8, }, [PIX_FMT_UYYVYY411] = { - .nb_channels = 1, .color_type = FF_COLOR_YUV, - .pixel_type = FF_PIXEL_PACKED, - .depth = 8, }, [PIX_FMT_ABGR] = { - .nb_channels = 4, .is_alpha = 1, + .is_alpha = 1, .color_type = FF_COLOR_RGB, - .pixel_type = FF_PIXEL_PACKED, - .depth = 8, }, [PIX_FMT_BGR565BE] = { - .nb_channels = 3, .color_type = FF_COLOR_RGB, - .pixel_type = FF_PIXEL_PACKED, - .depth = 5, + .padded_size = 16, }, [PIX_FMT_BGR565LE] = { - .nb_channels = 3, .color_type = FF_COLOR_RGB, - .pixel_type = FF_PIXEL_PACKED, - .depth = 5, + .padded_size = 16, }, [PIX_FMT_BGR555BE] = { - .nb_channels = 3, .color_type = FF_COLOR_RGB, - .pixel_type = FF_PIXEL_PACKED, - .depth = 5, + .padded_size = 16, }, [PIX_FMT_BGR555LE] = { - .nb_channels = 3, .color_type = FF_COLOR_RGB, - .pixel_type = FF_PIXEL_PACKED, - .depth = 5, + .padded_size = 16, }, [PIX_FMT_BGR444BE] = { - .nb_channels = 3, .color_type = FF_COLOR_RGB, - .pixel_type = FF_PIXEL_PACKED, - .depth = 4, + .padded_size = 16, }, [PIX_FMT_BGR444LE] = { - .nb_channels = 3, .color_type = FF_COLOR_RGB, - .pixel_type = FF_PIXEL_PACKED, - .depth = 4, + .padded_size = 16, }, [PIX_FMT_RGB8] = { - .nb_channels = 1, .color_type = FF_COLOR_RGB, - .pixel_type = FF_PIXEL_PACKED, - .depth = 8, }, [PIX_FMT_RGB4] = { - .nb_channels = 1, .color_type = FF_COLOR_RGB, - .pixel_type = FF_PIXEL_PACKED, - .depth = 4, }, [PIX_FMT_RGB4_BYTE] = { - .nb_channels = 1, .color_type = FF_COLOR_RGB, - .pixel_type = FF_PIXEL_PACKED, - .depth = 8, + .padded_size = 8, }, [PIX_FMT_BGR8] = { - .nb_channels = 1, .color_type = FF_COLOR_RGB, - .pixel_type = FF_PIXEL_PACKED, - .depth = 8, }, [PIX_FMT_BGR4] = { - .nb_channels = 1, .color_type = FF_COLOR_RGB, - .pixel_type = FF_PIXEL_PACKED, - .depth = 4, }, [PIX_FMT_BGR4_BYTE] = { - .nb_channels = 1, .color_type = FF_COLOR_RGB, - .pixel_type = FF_PIXEL_PACKED, - .depth = 8, + .padded_size = 8, }, [PIX_FMT_NV12] = { - .nb_channels = 2, .color_type = FF_COLOR_YUV, - .pixel_type = FF_PIXEL_PLANAR, - .depth = 8, }, [PIX_FMT_NV21] = { - .nb_channels = 2, .color_type = FF_COLOR_YUV, - .pixel_type = FF_PIXEL_PLANAR, - .depth = 8, }, [PIX_FMT_BGRA] = { - .nb_channels = 4, .is_alpha = 1, + .is_alpha = 1, .color_type = FF_COLOR_RGB, - .pixel_type = FF_PIXEL_PACKED, - .depth = 8, }, [PIX_FMT_RGBA] = { - .nb_channels = 4, .is_alpha = 1, + .is_alpha = 1, .color_type = FF_COLOR_RGB, - .pixel_type = FF_PIXEL_PACKED, - .depth = 8, }, }; @@ -522,28 +367,50 @@ int avpicture_get_size(enum PixelFormat pix_fmt, int width, int height) return avpicture_fill(&dummy_pict, NULL, pix_fmt, width, height); } +static int get_pix_fmt_depth(int *min, int *max, enum PixelFormat pix_fmt) +{ + const AVPixFmtDescriptor *desc = &av_pix_fmt_descriptors[pix_fmt]; + int i; + + if (!desc->nb_components) { + *min = *max = 0; + return AVERROR(EINVAL); + } + + *min = INT_MAX, *max = -INT_MAX; + for (i = 0; i < desc->nb_components; i++) { + *min = FFMIN(desc->comp[i].depth_minus1+1, *min); + *max = FFMAX(desc->comp[i].depth_minus1+1, *max); + } + return 0; +} + int avcodec_get_pix_fmt_loss(enum PixelFormat dst_pix_fmt, enum PixelFormat src_pix_fmt, int has_alpha) { const PixFmtInfo *pf, *ps; const AVPixFmtDescriptor *src_desc = &av_pix_fmt_descriptors[src_pix_fmt]; const AVPixFmtDescriptor *dst_desc = &av_pix_fmt_descriptors[dst_pix_fmt]; - int loss; + int src_min_depth, src_max_depth, dst_min_depth, dst_max_depth; + int ret, loss; ps = &pix_fmt_info[src_pix_fmt]; /* compute loss */ loss = 0; - pf = &pix_fmt_info[dst_pix_fmt]; - if (pf->depth < ps->depth || - ((dst_pix_fmt == PIX_FMT_RGB555BE || dst_pix_fmt == PIX_FMT_RGB555LE || - dst_pix_fmt == PIX_FMT_BGR555BE || dst_pix_fmt == PIX_FMT_BGR555LE) && - (src_pix_fmt == PIX_FMT_RGB565BE || src_pix_fmt == PIX_FMT_RGB565LE || - src_pix_fmt == PIX_FMT_BGR565BE || src_pix_fmt == PIX_FMT_BGR565LE))) + + if ((ret = get_pix_fmt_depth(&src_min_depth, &src_max_depth, src_pix_fmt)) < 0) + return ret; + if ((ret = get_pix_fmt_depth(&dst_min_depth, &dst_max_depth, dst_pix_fmt)) < 0) + return ret; + if (dst_min_depth < src_min_depth || + dst_max_depth < src_max_depth) loss |= FF_LOSS_DEPTH; if (dst_desc->log2_chroma_w > src_desc->log2_chroma_w || dst_desc->log2_chroma_h > src_desc->log2_chroma_h) loss |= FF_LOSS_RESOLUTION; + + pf = &pix_fmt_info[dst_pix_fmt]; switch(pf->color_type) { case FF_COLOR_RGB: if (ps->color_type != FF_COLOR_RGB && @@ -575,62 +442,19 @@ int avcodec_get_pix_fmt_loss(enum PixelFormat dst_pix_fmt, enum PixelFormat src_ loss |= FF_LOSS_CHROMA; if (!pf->is_alpha && (ps->is_alpha && has_alpha)) loss |= FF_LOSS_ALPHA; - if (pf->pixel_type == FF_PIXEL_PALETTE && - (ps->pixel_type != FF_PIXEL_PALETTE && ps->color_type != FF_COLOR_GRAY)) + if (dst_pix_fmt == PIX_FMT_PAL8 && + (src_pix_fmt != PIX_FMT_PAL8 && ps->color_type != FF_COLOR_GRAY)) loss |= FF_LOSS_COLORQUANT; return loss; } static int avg_bits_per_pixel(enum PixelFormat pix_fmt) { - int bits; - const PixFmtInfo *pf; + const PixFmtInfo *info = &pix_fmt_info[pix_fmt]; const AVPixFmtDescriptor *desc = &av_pix_fmt_descriptors[pix_fmt]; - pf = &pix_fmt_info[pix_fmt]; - switch(pf->pixel_type) { - case FF_PIXEL_PACKED: - switch(pix_fmt) { - case PIX_FMT_YUYV422: - case PIX_FMT_UYVY422: - case PIX_FMT_RGB565BE: - case PIX_FMT_RGB565LE: - case PIX_FMT_RGB555BE: - case PIX_FMT_RGB555LE: - case PIX_FMT_RGB444BE: - case PIX_FMT_RGB444LE: - case PIX_FMT_BGR565BE: - case PIX_FMT_BGR565LE: - case PIX_FMT_BGR555BE: - case PIX_FMT_BGR555LE: - case PIX_FMT_BGR444BE: - case PIX_FMT_BGR444LE: - bits = 16; - break; - case PIX_FMT_UYYVYY411: - bits = 12; - break; - default: - bits = pf->depth * pf->nb_channels; - break; - } - break; - case FF_PIXEL_PLANAR: - if (desc->log2_chroma_w == 0 && desc->log2_chroma_h == 0) { - bits = pf->depth * pf->nb_channels; - } else { - bits = pf->depth + ((2 * pf->depth) >> - (desc->log2_chroma_w + desc->log2_chroma_h)); - } - break; - case FF_PIXEL_PALETTE: - bits = 8; - break; - default: - bits = -1; - break; - } - return bits; + return info->padded_size ? + info->padded_size : av_get_bits_per_pixel(desc); } static enum PixelFormat avcodec_find_best_pix_fmt1(int64_t pix_fmt_mask, @@ -827,11 +651,26 @@ void avpicture_free(AVPicture *picture) } /* return true if yuv planar */ -static inline int is_yuv_planar(const PixFmtInfo *ps) +static inline int is_yuv_planar(enum PixelFormat fmt) { - return (ps->color_type == FF_COLOR_YUV || - ps->color_type == FF_COLOR_YUV_JPEG) && - ps->pixel_type == FF_PIXEL_PLANAR; + const PixFmtInfo *info = &pix_fmt_info[fmt]; + const AVPixFmtDescriptor *desc = &av_pix_fmt_descriptors[fmt]; + int i; + int planes[4] = { 0 }; + + if (info->color_type != FF_COLOR_YUV && + info->color_type != FF_COLOR_YUV_JPEG) + return 0; + + /* set the used planes */ + for (i = 0; i < desc->nb_components; i++) + planes[desc->comp[i].plane] = 1; + + /* if there is an unused plane, the format is not planar */ + for (i = 0; i < desc->nb_components; i++) + if (!planes[i]) + return 0; + return 1; } int av_picture_crop(AVPicture *dst, const AVPicture *src, @@ -846,7 +685,7 @@ int av_picture_crop(AVPicture *dst, const AVPicture *src, y_shift = av_pix_fmt_descriptors[pix_fmt].log2_chroma_h; x_shift = av_pix_fmt_descriptors[pix_fmt].log2_chroma_w; - if (is_yuv_planar(&pix_fmt_info[pix_fmt])) { + if (is_yuv_planar(pix_fmt)) { dst->data[0] = src->data[0] + (top_band * src->linesize[0]) + left_band; dst->data[1] = src->data[1] + ((top_band >> y_shift) * src->linesize[1]) + (left_band >> x_shift); dst->data[2] = src->data[2] + ((top_band >> y_shift) * src->linesize[2]) + (left_band >> x_shift); @@ -875,7 +714,7 @@ int av_picture_pad(AVPicture *dst, const AVPicture *src, int height, int width, int i, y; if (pix_fmt < 0 || pix_fmt >= PIX_FMT_NB || - !is_yuv_planar(&pix_fmt_info[pix_fmt])) return -1; + !is_yuv_planar(pix_fmt)) return -1; for (i = 0; i < 3; i++) { x_shift = i ? av_pix_fmt_descriptors[pix_fmt].log2_chroma_w : 0; diff --git a/libavcodec/indeo2.c b/libavcodec/indeo2.c index f58804bab3..30d4758064 100644 --- a/libavcodec/indeo2.c +++ b/libavcodec/indeo2.c @@ -223,14 +223,13 @@ static av_cold int ir2_decode_end(AVCodecContext *avctx){ } AVCodec ff_indeo2_decoder = { - "indeo2", - AVMEDIA_TYPE_VIDEO, - CODEC_ID_INDEO2, - sizeof(Ir2Context), - ir2_decode_init, - NULL, - ir2_decode_end, - ir2_decode_frame, - CODEC_CAP_DR1, + .name = "indeo2", + .type = AVMEDIA_TYPE_VIDEO, + .id = CODEC_ID_INDEO2, + .priv_data_size = sizeof(Ir2Context), + .init = ir2_decode_init, + .close = ir2_decode_end, + .decode = ir2_decode_frame, + .capabilities = CODEC_CAP_DR1, .long_name = NULL_IF_CONFIG_SMALL("Intel Indeo 2"), }; diff --git a/libavcodec/indeo3.c b/libavcodec/indeo3.c index 8e55fbe443..fbbffe230a 100644 --- a/libavcodec/indeo3.c +++ b/libavcodec/indeo3.c @@ -1147,15 +1147,13 @@ static av_cold int indeo3_decode_end(AVCodecContext *avctx) } AVCodec ff_indeo3_decoder = { - "indeo3", - AVMEDIA_TYPE_VIDEO, - CODEC_ID_INDEO3, - sizeof(Indeo3DecodeContext), - indeo3_decode_init, - NULL, - indeo3_decode_end, - indeo3_decode_frame, - CODEC_CAP_DR1, - NULL, + .name = "indeo3", + .type = AVMEDIA_TYPE_VIDEO, + .id = CODEC_ID_INDEO3, + .priv_data_size = sizeof(Indeo3DecodeContext), + .init = indeo3_decode_init, + .close = indeo3_decode_end, + .decode = indeo3_decode_frame, + .capabilities = CODEC_CAP_DR1, .long_name = NULL_IF_CONFIG_SMALL("Intel Indeo 3"), }; diff --git a/libavcodec/intelh263dec.c b/libavcodec/intelh263dec.c index a011a9f597..836e98ee88 100644 --- a/libavcodec/intelh263dec.c +++ b/libavcodec/intelh263dec.c @@ -125,15 +125,14 @@ int ff_intel_h263_decode_picture_header(MpegEncContext *s) } AVCodec ff_h263i_decoder = { - "h263i", - AVMEDIA_TYPE_VIDEO, - CODEC_ID_H263I, - sizeof(MpegEncContext), - ff_h263_decode_init, - NULL, - ff_h263_decode_end, - ff_h263_decode_frame, - CODEC_CAP_DRAW_HORIZ_BAND | CODEC_CAP_DR1, + .name = "h263i", + .type = AVMEDIA_TYPE_VIDEO, + .id = CODEC_ID_H263I, + .priv_data_size = sizeof(MpegEncContext), + .init = ff_h263_decode_init, + .close = ff_h263_decode_end, + .decode = ff_h263_decode_frame, + .capabilities = CODEC_CAP_DRAW_HORIZ_BAND | CODEC_CAP_DR1, .long_name = NULL_IF_CONFIG_SMALL("Intel H.263"), .pix_fmts= ff_pixfmt_list_420, }; diff --git a/libavcodec/internal.h b/libavcodec/internal.h index 24aca6b28b..75d7f37716 100644 --- a/libavcodec/internal.h +++ b/libavcodec/internal.h @@ -27,6 +27,11 @@ #include <stdint.h> #include "avcodec.h" +struct AVCodecDefault { + const uint8_t *key; + const uint8_t *value; +}; + /** * Determine whether pix_fmt is a hardware accelerated format. */ diff --git a/libavcodec/interplayvideo.c b/libavcodec/interplayvideo.c index 5c61af39e4..fdd4d8de11 100644 --- a/libavcodec/interplayvideo.c +++ b/libavcodec/interplayvideo.c @@ -1093,14 +1093,13 @@ static av_cold int ipvideo_decode_end(AVCodecContext *avctx) } AVCodec ff_interplay_video_decoder = { - "interplayvideo", - AVMEDIA_TYPE_VIDEO, - CODEC_ID_INTERPLAY_VIDEO, - sizeof(IpvideoContext), - ipvideo_decode_init, - NULL, - ipvideo_decode_end, - ipvideo_decode_frame, - CODEC_CAP_DR1, + .name = "interplayvideo", + .type = AVMEDIA_TYPE_VIDEO, + .id = CODEC_ID_INTERPLAY_VIDEO, + .priv_data_size = sizeof(IpvideoContext), + .init = ipvideo_decode_init, + .close = ipvideo_decode_end, + .decode = ipvideo_decode_frame, + .capabilities = CODEC_CAP_DR1, .long_name = NULL_IF_CONFIG_SMALL("Interplay MVE video"), }; diff --git a/libavcodec/intrax8.c b/libavcodec/intrax8.c index 75166e8ffd..4b0886280b 100644 --- a/libavcodec/intrax8.c +++ b/libavcodec/intrax8.c @@ -304,7 +304,7 @@ static int x8_setup_spatial_predictor(IntraX8Context * const w, const int chroma int quant; s->dsp.x8_setup_spatial_compensation(s->dest[chroma], s->edge_emu_buffer, - s->current_picture.linesize[chroma>0], + s->current_picture.f.linesize[chroma>0], &range, &sum, w->edges); if(chroma){ w->orient=w->chroma_orient; @@ -613,7 +613,7 @@ static int x8_decode_intra_mb(IntraX8Context* const w, const int chroma){ dc_level+= (w->predicted_dc*divide_quant + (1<<12) )>>13; dsp_x8_put_solidcolor( av_clip_uint8((dc_level*dc_quant+4)>>3), - s->dest[chroma], s->current_picture.linesize[!!chroma]); + s->dest[chroma], s->current_picture.f.linesize[!!chroma]); goto block_placed; } @@ -637,15 +637,15 @@ static int x8_decode_intra_mb(IntraX8Context* const w, const int chroma){ } if(w->flat_dc){ - dsp_x8_put_solidcolor(w->predicted_dc, s->dest[chroma], s->current_picture.linesize[!!chroma]); + dsp_x8_put_solidcolor(w->predicted_dc, s->dest[chroma], s->current_picture.f.linesize[!!chroma]); }else{ s->dsp.x8_spatial_compensation[w->orient]( s->edge_emu_buffer, s->dest[chroma], - s->current_picture.linesize[!!chroma] ); + s->current_picture.f.linesize[!!chroma] ); } if(!zeros_only) s->dsp.idct_add ( s->dest[chroma], - s->current_picture.linesize[!!chroma], + s->current_picture.f.linesize[!!chroma], s->block[0] ); block_placed: @@ -656,7 +656,7 @@ block_placed: if(s->loop_filter){ uint8_t* ptr = s->dest[chroma]; - int linesize = s->current_picture.linesize[!!chroma]; + int linesize = s->current_picture.f.linesize[!!chroma]; if(!( (w->edges&2) || ( zeros_only && (w->orient|4)==4 ) )){ s->dsp.x8_h_loop_filter(ptr, linesize, w->quant); @@ -671,12 +671,12 @@ block_placed: static void x8_init_block_index(MpegEncContext *s){ //FIXME maybe merge with ff_* //not s->linesize as this would be wrong for field pics //not that IntraX8 has interlacing support ;) - const int linesize = s->current_picture.linesize[0]; - const int uvlinesize= s->current_picture.linesize[1]; + const int linesize = s->current_picture.f.linesize[0]; + const int uvlinesize = s->current_picture.f.linesize[1]; - s->dest[0] = s->current_picture.data[0]; - s->dest[1] = s->current_picture.data[1]; - s->dest[2] = s->current_picture.data[2]; + s->dest[0] = s->current_picture.f.data[0]; + s->dest[1] = s->current_picture.f.data[1]; + s->dest[2] = s->current_picture.f.data[2]; s->dest[0] += s->mb_y * linesize << 3; s->dest[1] += ( s->mb_y&(~1) ) * uvlinesize << 2;//chroma blocks are on add rows @@ -771,7 +771,7 @@ int ff_intrax8_decode_picture(IntraX8Context * const w, int dquant, int quant_of /*emulate MB info in the relevant tables*/ s->mbskip_table [mb_xy]=0; s->mbintra_table[mb_xy]=1; - s->current_picture.qscale_table[mb_xy]=w->quant; + s->current_picture.f.qscale_table[mb_xy] = w->quant; mb_xy++; } s->dest[0]+= 8; diff --git a/libavcodec/ituh263dec.c b/libavcodec/ituh263dec.c index b1e67231fd..b0976801ed 100644 --- a/libavcodec/ituh263dec.c +++ b/libavcodec/ituh263dec.c @@ -30,6 +30,7 @@ //#define DEBUG #include <limits.h> +#include "libavutil/mathematics.h" #include "dsputil.h" #include "avcodec.h" #include "mpegvideo.h" @@ -352,20 +353,20 @@ static void preview_obmc(MpegEncContext *s){ do{ if (get_bits1(&s->gb)) { /* skip mb */ - mot_val = s->current_picture.motion_val[0][ s->block_index[0] ]; + mot_val = s->current_picture.f.motion_val[0][s->block_index[0]]; mot_val[0 ]= mot_val[2 ]= mot_val[0+stride]= mot_val[2+stride]= 0; mot_val[1 ]= mot_val[3 ]= mot_val[1+stride]= mot_val[3+stride]= 0; - s->current_picture.mb_type[xy]= MB_TYPE_SKIP | MB_TYPE_16x16 | MB_TYPE_L0; + s->current_picture.f.mb_type[xy] = MB_TYPE_SKIP | MB_TYPE_16x16 | MB_TYPE_L0; goto end; } cbpc = get_vlc2(&s->gb, ff_h263_inter_MCBPC_vlc.table, INTER_MCBPC_VLC_BITS, 2); }while(cbpc == 20); if(cbpc & 4){ - s->current_picture.mb_type[xy]= MB_TYPE_INTRA; + s->current_picture.f.mb_type[xy] = MB_TYPE_INTRA; }else{ get_vlc2(&s->gb, ff_h263_cbpy_vlc.table, CBPY_VLC_BITS, 1); if (cbpc & 8) { @@ -377,7 +378,7 @@ static void preview_obmc(MpegEncContext *s){ } if ((cbpc & 16) == 0) { - s->current_picture.mb_type[xy]= MB_TYPE_16x16 | MB_TYPE_L0; + s->current_picture.f.mb_type[xy] = MB_TYPE_16x16 | MB_TYPE_L0; /* 16x16 motion prediction */ mot_val= h263_pred_motion(s, 0, 0, &pred_x, &pred_y); if (s->umvplus) @@ -395,7 +396,7 @@ static void preview_obmc(MpegEncContext *s){ mot_val[1 ]= mot_val[3 ]= mot_val[1+stride]= mot_val[3+stride]= my; } else { - s->current_picture.mb_type[xy]= MB_TYPE_8x8 | MB_TYPE_L0; + s->current_picture.f.mb_type[xy] = MB_TYPE_8x8 | MB_TYPE_L0; for(i=0;i<4;i++) { mot_val = h263_pred_motion(s, i, 0, &pred_x, &pred_y); if (s->umvplus) @@ -617,7 +618,7 @@ int ff_h263_decode_mb(MpegEncContext *s, s->block_last_index[i] = -1; s->mv_dir = MV_DIR_FORWARD; s->mv_type = MV_TYPE_16X16; - s->current_picture.mb_type[xy]= MB_TYPE_SKIP | MB_TYPE_16x16 | MB_TYPE_L0; + s->current_picture.f.mb_type[xy] = MB_TYPE_SKIP | MB_TYPE_16x16 | MB_TYPE_L0; s->mv[0][0][0] = 0; s->mv[0][0][1] = 0; s->mb_skipped = !(s->obmc | s->loop_filter); @@ -650,7 +651,7 @@ int ff_h263_decode_mb(MpegEncContext *s, s->mv_dir = MV_DIR_FORWARD; if ((cbpc & 16) == 0) { - s->current_picture.mb_type[xy]= MB_TYPE_16x16 | MB_TYPE_L0; + s->current_picture.f.mb_type[xy] = MB_TYPE_16x16 | MB_TYPE_L0; /* 16x16 motion prediction */ s->mv_type = MV_TYPE_16X16; h263_pred_motion(s, 0, 0, &pred_x, &pred_y); @@ -675,7 +676,7 @@ int ff_h263_decode_mb(MpegEncContext *s, if (s->umvplus && (mx - pred_x) == 1 && (my - pred_y) == 1) skip_bits1(&s->gb); /* Bit stuffing to prevent PSC */ } else { - s->current_picture.mb_type[xy]= MB_TYPE_8x8 | MB_TYPE_L0; + s->current_picture.f.mb_type[xy] = MB_TYPE_8x8 | MB_TYPE_L0; s->mv_type = MV_TYPE_8X8; for(i=0;i<4;i++) { mot_val = h263_pred_motion(s, i, 0, &pred_x, &pred_y); @@ -703,8 +704,8 @@ int ff_h263_decode_mb(MpegEncContext *s, } else if(s->pict_type==AV_PICTURE_TYPE_B) { int mb_type; const int stride= s->b8_stride; - int16_t *mot_val0 = s->current_picture.motion_val[0][ 2*(s->mb_x + s->mb_y*stride) ]; - int16_t *mot_val1 = s->current_picture.motion_val[1][ 2*(s->mb_x + s->mb_y*stride) ]; + int16_t *mot_val0 = s->current_picture.f.motion_val[0][2 * (s->mb_x + s->mb_y * stride)]; + int16_t *mot_val1 = s->current_picture.f.motion_val[1][2 * (s->mb_x + s->mb_y * stride)]; // const int mv_xy= s->mb_x + 1 + s->mb_y * s->mb_stride; //FIXME ugly @@ -787,7 +788,7 @@ int ff_h263_decode_mb(MpegEncContext *s, } } - s->current_picture.mb_type[xy]= mb_type; + s->current_picture.f.mb_type[xy] = mb_type; } else { /* I-Frame */ do{ cbpc = get_vlc2(&s->gb, ff_h263_intra_MCBPC_vlc.table, INTRA_MCBPC_VLC_BITS, 2); @@ -802,11 +803,11 @@ int ff_h263_decode_mb(MpegEncContext *s, dquant = cbpc & 4; s->mb_intra = 1; intra: - s->current_picture.mb_type[xy]= MB_TYPE_INTRA; + s->current_picture.f.mb_type[xy] = MB_TYPE_INTRA; if (s->h263_aic) { s->ac_pred = get_bits1(&s->gb); if(s->ac_pred){ - s->current_picture.mb_type[xy]= MB_TYPE_INTRA | MB_TYPE_ACPRED; + s->current_picture.f.mb_type[xy] = MB_TYPE_INTRA | MB_TYPE_ACPRED; s->h263_aic_dir = get_bits1(&s->gb); } @@ -888,7 +889,7 @@ int h263_decode_picture_header(MpegEncContext *s) i = get_bits(&s->gb, 8); /* picture timestamp */ if( (s->picture_number&~0xFF)+i < s->picture_number) i+= 256; - s->current_picture_ptr->pts= + s->current_picture_ptr->f.pts = s->picture_number= (s->picture_number&~0xFF) + i; /* PTYPE starts here */ diff --git a/libavcodec/ituh263enc.c b/libavcodec/ituh263enc.c index 320f82a83f..557ed48a56 100644 --- a/libavcodec/ituh263enc.c +++ b/libavcodec/ituh263enc.c @@ -275,7 +275,7 @@ void h263_encode_gob_header(MpegEncContext * s, int mb_line) */ void ff_clean_h263_qscales(MpegEncContext *s){ int i; - int8_t * const qscale_table= s->current_picture.qscale_table; + int8_t * const qscale_table = s->current_picture.f.qscale_table; ff_init_qscale_tab(s); @@ -529,8 +529,8 @@ void h263_encode_mb(MpegEncContext * s, /* motion vectors: 8x8 mode*/ h263_pred_motion(s, i, 0, &pred_x, &pred_y); - motion_x= s->current_picture.motion_val[0][ s->block_index[i] ][0]; - motion_y= s->current_picture.motion_val[0][ s->block_index[i] ][1]; + motion_x = s->current_picture.f.motion_val[0][s->block_index[i]][0]; + motion_y = s->current_picture.f.motion_val[0][s->block_index[i]][1]; if (!s->umvplus) { ff_h263_encode_motion_vector(s, motion_x - pred_x, motion_y - pred_y, 1); diff --git a/libavcodec/jfdctint.c b/libavcodec/jfdctint.c index 072c7440b5..0482bc5643 100644 --- a/libavcodec/jfdctint.c +++ b/libavcodec/jfdctint.c @@ -1,402 +1,25 @@ -/* - * jfdctint.c - * - * This file is part of the Independent JPEG Group's software. - * - * The authors make NO WARRANTY or representation, either express or implied, - * with respect to this software, its quality, accuracy, merchantability, or - * fitness for a particular purpose. This software is provided "AS IS", and - * you, its user, assume the entire risk as to its quality and accuracy. - * - * This software is copyright (C) 1991-1996, Thomas G. Lane. - * All Rights Reserved except as specified below. - * - * Permission is hereby granted to use, copy, modify, and distribute this - * software (or portions thereof) for any purpose, without fee, subject to - * these conditions: - * (1) If any part of the source code for this software is distributed, then - * this README file must be included, with this copyright and no-warranty - * notice unaltered; and any additions, deletions, or changes to the original - * files must be clearly indicated in accompanying documentation. - * (2) If only executable code is distributed, then the accompanying - * documentation must state that "this software is based in part on the work - * of the Independent JPEG Group". - * (3) Permission for use of this software is granted only if the user accepts - * full responsibility for any undesirable consequences; the authors accept - * NO LIABILITY for damages of any kind. - * - * These conditions apply to any software derived from or based on the IJG - * code, not just to the unmodified library. If you use our work, you ought - * to acknowledge us. - * - * Permission is NOT granted for the use of any IJG author's name or company - * name in advertising or publicity relating to this software or products - * derived from it. This software may be referred to only as "the Independent - * JPEG Group's software". - * - * We specifically permit and encourage the use of this software as the basis - * of commercial products, provided that all warranty or liability claims are - * assumed by the product vendor. - * - * This file contains a slow-but-accurate integer implementation of the - * forward DCT (Discrete Cosine Transform). - * - * A 2-D DCT can be done by 1-D DCT on each row followed by 1-D DCT - * on each column. Direct algorithms are also available, but they are - * much more complex and seem not to be any faster when reduced to code. - * - * This implementation is based on an algorithm described in - * C. Loeffler, A. Ligtenberg and G. Moschytz, "Practical Fast 1-D DCT - * Algorithms with 11 Multiplications", Proc. Int'l. Conf. on Acoustics, - * Speech, and Signal Processing 1989 (ICASSP '89), pp. 988-991. - * The primary algorithm described there uses 11 multiplies and 29 adds. - * We use their alternate method with 12 multiplies and 32 adds. - * The advantage of this method is that no data path contains more than one - * multiplication; this allows a very simple and accurate implementation in - * scaled fixed-point arithmetic, with a minimal number of shifts. - */ - /** - * @file - * Independent JPEG Group's slow & accurate dct. - */ - -#include <stdlib.h> -#include <stdio.h> -#include "libavutil/common.h" -#include "dsputil.h" - -#define DCTSIZE 8 -#define BITS_IN_JSAMPLE 8 -#define GLOBAL(x) x -#define RIGHT_SHIFT(x, n) ((x) >> (n)) -#define MULTIPLY16C16(var,const) ((var)*(const)) - -#if 1 //def USE_ACCURATE_ROUNDING -#define DESCALE(x,n) RIGHT_SHIFT((x) + (1 << ((n) - 1)), n) -#else -#define DESCALE(x,n) RIGHT_SHIFT(x, n) -#endif - - -/* - * This module is specialized to the case DCTSIZE = 8. - */ - -#if DCTSIZE != 8 - Sorry, this code only copes with 8x8 DCTs. /* deliberate syntax err */ -#endif - - -/* - * The poop on this scaling stuff is as follows: - * - * Each 1-D DCT step produces outputs which are a factor of sqrt(N) - * larger than the true DCT outputs. The final outputs are therefore - * a factor of N larger than desired; since N=8 this can be cured by - * a simple right shift at the end of the algorithm. The advantage of - * this arrangement is that we save two multiplications per 1-D DCT, - * because the y0 and y4 outputs need not be divided by sqrt(N). - * In the IJG code, this factor of 8 is removed by the quantization step - * (in jcdctmgr.c), NOT in this module. + * This file is part of Libav. * - * We have to do addition and subtraction of the integer inputs, which - * is no problem, and multiplication by fractional constants, which is - * a problem to do in integer arithmetic. We multiply all the constants - * by CONST_SCALE and convert them to integer constants (thus retaining - * CONST_BITS bits of precision in the constants). After doing a - * multiplication we have to divide the product by CONST_SCALE, with proper - * rounding, to produce the correct output. This division can be done - * cheaply as a right shift of CONST_BITS bits. We postpone shifting - * as long as possible so that partial sums can be added together with - * full fractional precision. + * Libav is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. * - * The outputs of the first pass are scaled up by PASS1_BITS bits so that - * they are represented to better-than-integral precision. These outputs - * require BITS_IN_JSAMPLE + PASS1_BITS + 3 bits; this fits in a 16-bit word - * with the recommended scaling. (For 12-bit sample data, the intermediate - * array is int32_t anyway.) + * Libav is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. * - * To avoid overflow of the 32-bit intermediate results in pass 2, we must - * have BITS_IN_JSAMPLE + CONST_BITS + PASS1_BITS <= 26. Error analysis - * shows that the values given below are the most effective. - */ - -#if BITS_IN_JSAMPLE == 8 -#define CONST_BITS 13 -#define PASS1_BITS 4 /* set this to 2 if 16x16 multiplies are faster */ -#else -#define CONST_BITS 13 -#define PASS1_BITS 1 /* lose a little precision to avoid overflow */ -#endif - -/* Some C compilers fail to reduce "FIX(constant)" at compile time, thus - * causing a lot of useless floating-point operations at run time. - * To get around this we use the following pre-calculated constants. - * If you change CONST_BITS you may want to add appropriate values. - * (With a reasonable C compiler, you can just rely on the FIX() macro...) - */ - -#if CONST_BITS == 13 -#define FIX_0_298631336 ((int32_t) 2446) /* FIX(0.298631336) */ -#define FIX_0_390180644 ((int32_t) 3196) /* FIX(0.390180644) */ -#define FIX_0_541196100 ((int32_t) 4433) /* FIX(0.541196100) */ -#define FIX_0_765366865 ((int32_t) 6270) /* FIX(0.765366865) */ -#define FIX_0_899976223 ((int32_t) 7373) /* FIX(0.899976223) */ -#define FIX_1_175875602 ((int32_t) 9633) /* FIX(1.175875602) */ -#define FIX_1_501321110 ((int32_t) 12299) /* FIX(1.501321110) */ -#define FIX_1_847759065 ((int32_t) 15137) /* FIX(1.847759065) */ -#define FIX_1_961570560 ((int32_t) 16069) /* FIX(1.961570560) */ -#define FIX_2_053119869 ((int32_t) 16819) /* FIX(2.053119869) */ -#define FIX_2_562915447 ((int32_t) 20995) /* FIX(2.562915447) */ -#define FIX_3_072711026 ((int32_t) 25172) /* FIX(3.072711026) */ -#else -#define FIX_0_298631336 FIX(0.298631336) -#define FIX_0_390180644 FIX(0.390180644) -#define FIX_0_541196100 FIX(0.541196100) -#define FIX_0_765366865 FIX(0.765366865) -#define FIX_0_899976223 FIX(0.899976223) -#define FIX_1_175875602 FIX(1.175875602) -#define FIX_1_501321110 FIX(1.501321110) -#define FIX_1_847759065 FIX(1.847759065) -#define FIX_1_961570560 FIX(1.961570560) -#define FIX_2_053119869 FIX(2.053119869) -#define FIX_2_562915447 FIX(2.562915447) -#define FIX_3_072711026 FIX(3.072711026) -#endif - - -/* Multiply an int32_t variable by an int32_t constant to yield an int32_t result. - * For 8-bit samples with the recommended scaling, all the variable - * and constant values involved are no more than 16 bits wide, so a - * 16x16->32 bit multiply can be used instead of a full 32x32 multiply. - * For 12-bit samples, a full 32-bit multiplication will be needed. + * You should have received a copy of the GNU Lesser General Public + * License along with Libav; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ -#if BITS_IN_JSAMPLE == 8 && CONST_BITS<=13 && PASS1_BITS<=2 -#define MULTIPLY(var,const) MULTIPLY16C16(var,const) -#else -#define MULTIPLY(var,const) ((var) * (const)) -#endif - - -static av_always_inline void row_fdct(DCTELEM * data){ - int tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7; - int tmp10, tmp11, tmp12, tmp13; - int z1, z2, z3, z4, z5; - DCTELEM *dataptr; - int ctr; - - /* Pass 1: process rows. */ - /* Note results are scaled up by sqrt(8) compared to a true DCT; */ - /* furthermore, we scale the results by 2**PASS1_BITS. */ - - dataptr = data; - for (ctr = DCTSIZE-1; ctr >= 0; ctr--) { - tmp0 = dataptr[0] + dataptr[7]; - tmp7 = dataptr[0] - dataptr[7]; - tmp1 = dataptr[1] + dataptr[6]; - tmp6 = dataptr[1] - dataptr[6]; - tmp2 = dataptr[2] + dataptr[5]; - tmp5 = dataptr[2] - dataptr[5]; - tmp3 = dataptr[3] + dataptr[4]; - tmp4 = dataptr[3] - dataptr[4]; - - /* Even part per LL&M figure 1 --- note that published figure is faulty; - * rotator "sqrt(2)*c1" should be "sqrt(2)*c6". - */ - - tmp10 = tmp0 + tmp3; - tmp13 = tmp0 - tmp3; - tmp11 = tmp1 + tmp2; - tmp12 = tmp1 - tmp2; - - dataptr[0] = (DCTELEM) ((tmp10 + tmp11) << PASS1_BITS); - dataptr[4] = (DCTELEM) ((tmp10 - tmp11) << PASS1_BITS); - - z1 = MULTIPLY(tmp12 + tmp13, FIX_0_541196100); - dataptr[2] = (DCTELEM) DESCALE(z1 + MULTIPLY(tmp13, FIX_0_765366865), - CONST_BITS-PASS1_BITS); - dataptr[6] = (DCTELEM) DESCALE(z1 + MULTIPLY(tmp12, - FIX_1_847759065), - CONST_BITS-PASS1_BITS); - - /* Odd part per figure 8 --- note paper omits factor of sqrt(2). - * cK represents cos(K*pi/16). - * i0..i3 in the paper are tmp4..tmp7 here. - */ - - z1 = tmp4 + tmp7; - z2 = tmp5 + tmp6; - z3 = tmp4 + tmp6; - z4 = tmp5 + tmp7; - z5 = MULTIPLY(z3 + z4, FIX_1_175875602); /* sqrt(2) * c3 */ - - tmp4 = MULTIPLY(tmp4, FIX_0_298631336); /* sqrt(2) * (-c1+c3+c5-c7) */ - tmp5 = MULTIPLY(tmp5, FIX_2_053119869); /* sqrt(2) * ( c1+c3-c5+c7) */ - tmp6 = MULTIPLY(tmp6, FIX_3_072711026); /* sqrt(2) * ( c1+c3+c5-c7) */ - tmp7 = MULTIPLY(tmp7, FIX_1_501321110); /* sqrt(2) * ( c1+c3-c5-c7) */ - z1 = MULTIPLY(z1, - FIX_0_899976223); /* sqrt(2) * (c7-c3) */ - z2 = MULTIPLY(z2, - FIX_2_562915447); /* sqrt(2) * (-c1-c3) */ - z3 = MULTIPLY(z3, - FIX_1_961570560); /* sqrt(2) * (-c3-c5) */ - z4 = MULTIPLY(z4, - FIX_0_390180644); /* sqrt(2) * (c5-c3) */ - - z3 += z5; - z4 += z5; - - dataptr[7] = (DCTELEM) DESCALE(tmp4 + z1 + z3, CONST_BITS-PASS1_BITS); - dataptr[5] = (DCTELEM) DESCALE(tmp5 + z2 + z4, CONST_BITS-PASS1_BITS); - dataptr[3] = (DCTELEM) DESCALE(tmp6 + z2 + z3, CONST_BITS-PASS1_BITS); - dataptr[1] = (DCTELEM) DESCALE(tmp7 + z1 + z4, CONST_BITS-PASS1_BITS); - - dataptr += DCTSIZE; /* advance pointer to next row */ - } -} - -/* - * Perform the forward DCT on one block of samples. - */ - -GLOBAL(void) -ff_jpeg_fdct_islow (DCTELEM * data) -{ - int tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7; - int tmp10, tmp11, tmp12, tmp13; - int z1, z2, z3, z4, z5; - DCTELEM *dataptr; - int ctr; - - row_fdct(data); - - /* Pass 2: process columns. - * We remove the PASS1_BITS scaling, but leave the results scaled up - * by an overall factor of 8. - */ - - dataptr = data; - for (ctr = DCTSIZE-1; ctr >= 0; ctr--) { - tmp0 = dataptr[DCTSIZE*0] + dataptr[DCTSIZE*7]; - tmp7 = dataptr[DCTSIZE*0] - dataptr[DCTSIZE*7]; - tmp1 = dataptr[DCTSIZE*1] + dataptr[DCTSIZE*6]; - tmp6 = dataptr[DCTSIZE*1] - dataptr[DCTSIZE*6]; - tmp2 = dataptr[DCTSIZE*2] + dataptr[DCTSIZE*5]; - tmp5 = dataptr[DCTSIZE*2] - dataptr[DCTSIZE*5]; - tmp3 = dataptr[DCTSIZE*3] + dataptr[DCTSIZE*4]; - tmp4 = dataptr[DCTSIZE*3] - dataptr[DCTSIZE*4]; - - /* Even part per LL&M figure 1 --- note that published figure is faulty; - * rotator "sqrt(2)*c1" should be "sqrt(2)*c6". - */ - - tmp10 = tmp0 + tmp3; - tmp13 = tmp0 - tmp3; - tmp11 = tmp1 + tmp2; - tmp12 = tmp1 - tmp2; - - dataptr[DCTSIZE*0] = (DCTELEM) DESCALE(tmp10 + tmp11, PASS1_BITS); - dataptr[DCTSIZE*4] = (DCTELEM) DESCALE(tmp10 - tmp11, PASS1_BITS); - - z1 = MULTIPLY(tmp12 + tmp13, FIX_0_541196100); - dataptr[DCTSIZE*2] = (DCTELEM) DESCALE(z1 + MULTIPLY(tmp13, FIX_0_765366865), - CONST_BITS+PASS1_BITS); - dataptr[DCTSIZE*6] = (DCTELEM) DESCALE(z1 + MULTIPLY(tmp12, - FIX_1_847759065), - CONST_BITS+PASS1_BITS); - - /* Odd part per figure 8 --- note paper omits factor of sqrt(2). - * cK represents cos(K*pi/16). - * i0..i3 in the paper are tmp4..tmp7 here. - */ - - z1 = tmp4 + tmp7; - z2 = tmp5 + tmp6; - z3 = tmp4 + tmp6; - z4 = tmp5 + tmp7; - z5 = MULTIPLY(z3 + z4, FIX_1_175875602); /* sqrt(2) * c3 */ - - tmp4 = MULTIPLY(tmp4, FIX_0_298631336); /* sqrt(2) * (-c1+c3+c5-c7) */ - tmp5 = MULTIPLY(tmp5, FIX_2_053119869); /* sqrt(2) * ( c1+c3-c5+c7) */ - tmp6 = MULTIPLY(tmp6, FIX_3_072711026); /* sqrt(2) * ( c1+c3+c5-c7) */ - tmp7 = MULTIPLY(tmp7, FIX_1_501321110); /* sqrt(2) * ( c1+c3-c5-c7) */ - z1 = MULTIPLY(z1, - FIX_0_899976223); /* sqrt(2) * (c7-c3) */ - z2 = MULTIPLY(z2, - FIX_2_562915447); /* sqrt(2) * (-c1-c3) */ - z3 = MULTIPLY(z3, - FIX_1_961570560); /* sqrt(2) * (-c3-c5) */ - z4 = MULTIPLY(z4, - FIX_0_390180644); /* sqrt(2) * (c5-c3) */ - - z3 += z5; - z4 += z5; - - dataptr[DCTSIZE*7] = (DCTELEM) DESCALE(tmp4 + z1 + z3, - CONST_BITS+PASS1_BITS); - dataptr[DCTSIZE*5] = (DCTELEM) DESCALE(tmp5 + z2 + z4, - CONST_BITS+PASS1_BITS); - dataptr[DCTSIZE*3] = (DCTELEM) DESCALE(tmp6 + z2 + z3, - CONST_BITS+PASS1_BITS); - dataptr[DCTSIZE*1] = (DCTELEM) DESCALE(tmp7 + z1 + z4, - CONST_BITS+PASS1_BITS); - - dataptr++; /* advance pointer to next column */ - } -} - -/* - * The secret of DCT2-4-8 is really simple -- you do the usual 1-DCT - * on the rows and then, instead of doing even and odd, part on the colums - * you do even part two times. - */ -GLOBAL(void) -ff_fdct248_islow (DCTELEM * data) -{ - int tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7; - int tmp10, tmp11, tmp12, tmp13; - int z1; - DCTELEM *dataptr; - int ctr; - - row_fdct(data); - - /* Pass 2: process columns. - * We remove the PASS1_BITS scaling, but leave the results scaled up - * by an overall factor of 8. - */ - - dataptr = data; - for (ctr = DCTSIZE-1; ctr >= 0; ctr--) { - tmp0 = dataptr[DCTSIZE*0] + dataptr[DCTSIZE*1]; - tmp1 = dataptr[DCTSIZE*2] + dataptr[DCTSIZE*3]; - tmp2 = dataptr[DCTSIZE*4] + dataptr[DCTSIZE*5]; - tmp3 = dataptr[DCTSIZE*6] + dataptr[DCTSIZE*7]; - tmp4 = dataptr[DCTSIZE*0] - dataptr[DCTSIZE*1]; - tmp5 = dataptr[DCTSIZE*2] - dataptr[DCTSIZE*3]; - tmp6 = dataptr[DCTSIZE*4] - dataptr[DCTSIZE*5]; - tmp7 = dataptr[DCTSIZE*6] - dataptr[DCTSIZE*7]; - - tmp10 = tmp0 + tmp3; - tmp11 = tmp1 + tmp2; - tmp12 = tmp1 - tmp2; - tmp13 = tmp0 - tmp3; - - dataptr[DCTSIZE*0] = (DCTELEM) DESCALE(tmp10 + tmp11, PASS1_BITS); - dataptr[DCTSIZE*4] = (DCTELEM) DESCALE(tmp10 - tmp11, PASS1_BITS); - - z1 = MULTIPLY(tmp12 + tmp13, FIX_0_541196100); - dataptr[DCTSIZE*2] = (DCTELEM) DESCALE(z1 + MULTIPLY(tmp13, FIX_0_765366865), - CONST_BITS+PASS1_BITS); - dataptr[DCTSIZE*6] = (DCTELEM) DESCALE(z1 + MULTIPLY(tmp12, - FIX_1_847759065), - CONST_BITS+PASS1_BITS); - - tmp10 = tmp4 + tmp7; - tmp11 = tmp5 + tmp6; - tmp12 = tmp5 - tmp6; - tmp13 = tmp4 - tmp7; - - dataptr[DCTSIZE*1] = (DCTELEM) DESCALE(tmp10 + tmp11, PASS1_BITS); - dataptr[DCTSIZE*5] = (DCTELEM) DESCALE(tmp10 - tmp11, PASS1_BITS); - - z1 = MULTIPLY(tmp12 + tmp13, FIX_0_541196100); - dataptr[DCTSIZE*3] = (DCTELEM) DESCALE(z1 + MULTIPLY(tmp13, FIX_0_765366865), - CONST_BITS+PASS1_BITS); - dataptr[DCTSIZE*7] = (DCTELEM) DESCALE(z1 + MULTIPLY(tmp12, - FIX_1_847759065), - CONST_BITS+PASS1_BITS); +#define BIT_DEPTH 8 +#include "jfdctint_template.c" +#undef BIT_DEPTH - dataptr++; /* advance pointer to next column */ - } -} +#define BIT_DEPTH 10 +#include "jfdctint_template.c" +#undef BIT_DEPTH diff --git a/libavcodec/jfdctint_template.c b/libavcodec/jfdctint_template.c new file mode 100644 index 0000000000..e60e72a412 --- /dev/null +++ b/libavcodec/jfdctint_template.c @@ -0,0 +1,405 @@ +/* + * jfdctint.c + * + * This file is part of the Independent JPEG Group's software. + * + * The authors make NO WARRANTY or representation, either express or implied, + * with respect to this software, its quality, accuracy, merchantability, or + * fitness for a particular purpose. This software is provided "AS IS", and + * you, its user, assume the entire risk as to its quality and accuracy. + * + * This software is copyright (C) 1991-1996, Thomas G. Lane. + * All Rights Reserved except as specified below. + * + * Permission is hereby granted to use, copy, modify, and distribute this + * software (or portions thereof) for any purpose, without fee, subject to + * these conditions: + * (1) If any part of the source code for this software is distributed, then + * this README file must be included, with this copyright and no-warranty + * notice unaltered; and any additions, deletions, or changes to the original + * files must be clearly indicated in accompanying documentation. + * (2) If only executable code is distributed, then the accompanying + * documentation must state that "this software is based in part on the work + * of the Independent JPEG Group". + * (3) Permission for use of this software is granted only if the user accepts + * full responsibility for any undesirable consequences; the authors accept + * NO LIABILITY for damages of any kind. + * + * These conditions apply to any software derived from or based on the IJG + * code, not just to the unmodified library. If you use our work, you ought + * to acknowledge us. + * + * Permission is NOT granted for the use of any IJG author's name or company + * name in advertising or publicity relating to this software or products + * derived from it. This software may be referred to only as "the Independent + * JPEG Group's software". + * + * We specifically permit and encourage the use of this software as the basis + * of commercial products, provided that all warranty or liability claims are + * assumed by the product vendor. + * + * This file contains a slow-but-accurate integer implementation of the + * forward DCT (Discrete Cosine Transform). + * + * A 2-D DCT can be done by 1-D DCT on each row followed by 1-D DCT + * on each column. Direct algorithms are also available, but they are + * much more complex and seem not to be any faster when reduced to code. + * + * This implementation is based on an algorithm described in + * C. Loeffler, A. Ligtenberg and G. Moschytz, "Practical Fast 1-D DCT + * Algorithms with 11 Multiplications", Proc. Int'l. Conf. on Acoustics, + * Speech, and Signal Processing 1989 (ICASSP '89), pp. 988-991. + * The primary algorithm described there uses 11 multiplies and 29 adds. + * We use their alternate method with 12 multiplies and 32 adds. + * The advantage of this method is that no data path contains more than one + * multiplication; this allows a very simple and accurate implementation in + * scaled fixed-point arithmetic, with a minimal number of shifts. + */ + +/** + * @file + * Independent JPEG Group's slow & accurate dct. + */ + +#include "libavutil/common.h" +#include "dsputil.h" + +#include "bit_depth_template.c" + +#define DCTSIZE 8 +#define BITS_IN_JSAMPLE BIT_DEPTH +#define GLOBAL(x) x +#define RIGHT_SHIFT(x, n) ((x) >> (n)) +#define MULTIPLY16C16(var,const) ((var)*(const)) + +#if 1 //def USE_ACCURATE_ROUNDING +#define DESCALE(x,n) RIGHT_SHIFT((x) + (1 << ((n) - 1)), n) +#else +#define DESCALE(x,n) RIGHT_SHIFT(x, n) +#endif + + +/* + * This module is specialized to the case DCTSIZE = 8. + */ + +#if DCTSIZE != 8 +#error "Sorry, this code only copes with 8x8 DCTs." +#endif + + +/* + * The poop on this scaling stuff is as follows: + * + * Each 1-D DCT step produces outputs which are a factor of sqrt(N) + * larger than the true DCT outputs. The final outputs are therefore + * a factor of N larger than desired; since N=8 this can be cured by + * a simple right shift at the end of the algorithm. The advantage of + * this arrangement is that we save two multiplications per 1-D DCT, + * because the y0 and y4 outputs need not be divided by sqrt(N). + * In the IJG code, this factor of 8 is removed by the quantization step + * (in jcdctmgr.c), NOT in this module. + * + * We have to do addition and subtraction of the integer inputs, which + * is no problem, and multiplication by fractional constants, which is + * a problem to do in integer arithmetic. We multiply all the constants + * by CONST_SCALE and convert them to integer constants (thus retaining + * CONST_BITS bits of precision in the constants). After doing a + * multiplication we have to divide the product by CONST_SCALE, with proper + * rounding, to produce the correct output. This division can be done + * cheaply as a right shift of CONST_BITS bits. We postpone shifting + * as long as possible so that partial sums can be added together with + * full fractional precision. + * + * The outputs of the first pass are scaled up by PASS1_BITS bits so that + * they are represented to better-than-integral precision. These outputs + * require BITS_IN_JSAMPLE + PASS1_BITS + 3 bits; this fits in a 16-bit word + * with the recommended scaling. (For 12-bit sample data, the intermediate + * array is int32_t anyway.) + * + * To avoid overflow of the 32-bit intermediate results in pass 2, we must + * have BITS_IN_JSAMPLE + CONST_BITS + PASS1_BITS <= 26. Error analysis + * shows that the values given below are the most effective. + */ + +#undef CONST_BITS +#undef PASS1_BITS +#undef OUT_SHIFT + +#if BITS_IN_JSAMPLE == 8 +#define CONST_BITS 13 +#define PASS1_BITS 4 /* set this to 2 if 16x16 multiplies are faster */ +#define OUT_SHIFT PASS1_BITS +#else +#define CONST_BITS 13 +#define PASS1_BITS 1 /* lose a little precision to avoid overflow */ +#define OUT_SHIFT (PASS1_BITS + 1) +#endif + +/* Some C compilers fail to reduce "FIX(constant)" at compile time, thus + * causing a lot of useless floating-point operations at run time. + * To get around this we use the following pre-calculated constants. + * If you change CONST_BITS you may want to add appropriate values. + * (With a reasonable C compiler, you can just rely on the FIX() macro...) + */ + +#if CONST_BITS == 13 +#define FIX_0_298631336 ((int32_t) 2446) /* FIX(0.298631336) */ +#define FIX_0_390180644 ((int32_t) 3196) /* FIX(0.390180644) */ +#define FIX_0_541196100 ((int32_t) 4433) /* FIX(0.541196100) */ +#define FIX_0_765366865 ((int32_t) 6270) /* FIX(0.765366865) */ +#define FIX_0_899976223 ((int32_t) 7373) /* FIX(0.899976223) */ +#define FIX_1_175875602 ((int32_t) 9633) /* FIX(1.175875602) */ +#define FIX_1_501321110 ((int32_t) 12299) /* FIX(1.501321110) */ +#define FIX_1_847759065 ((int32_t) 15137) /* FIX(1.847759065) */ +#define FIX_1_961570560 ((int32_t) 16069) /* FIX(1.961570560) */ +#define FIX_2_053119869 ((int32_t) 16819) /* FIX(2.053119869) */ +#define FIX_2_562915447 ((int32_t) 20995) /* FIX(2.562915447) */ +#define FIX_3_072711026 ((int32_t) 25172) /* FIX(3.072711026) */ +#else +#define FIX_0_298631336 FIX(0.298631336) +#define FIX_0_390180644 FIX(0.390180644) +#define FIX_0_541196100 FIX(0.541196100) +#define FIX_0_765366865 FIX(0.765366865) +#define FIX_0_899976223 FIX(0.899976223) +#define FIX_1_175875602 FIX(1.175875602) +#define FIX_1_501321110 FIX(1.501321110) +#define FIX_1_847759065 FIX(1.847759065) +#define FIX_1_961570560 FIX(1.961570560) +#define FIX_2_053119869 FIX(2.053119869) +#define FIX_2_562915447 FIX(2.562915447) +#define FIX_3_072711026 FIX(3.072711026) +#endif + + +/* Multiply an int32_t variable by an int32_t constant to yield an int32_t result. + * For 8-bit samples with the recommended scaling, all the variable + * and constant values involved are no more than 16 bits wide, so a + * 16x16->32 bit multiply can be used instead of a full 32x32 multiply. + * For 12-bit samples, a full 32-bit multiplication will be needed. + */ + +#if BITS_IN_JSAMPLE == 8 && CONST_BITS<=13 && PASS1_BITS<=2 +#define MULTIPLY(var,const) MULTIPLY16C16(var,const) +#else +#define MULTIPLY(var,const) ((var) * (const)) +#endif + + +static av_always_inline void FUNC(row_fdct)(DCTELEM *data) +{ + int tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7; + int tmp10, tmp11, tmp12, tmp13; + int z1, z2, z3, z4, z5; + DCTELEM *dataptr; + int ctr; + + /* Pass 1: process rows. */ + /* Note results are scaled up by sqrt(8) compared to a true DCT; */ + /* furthermore, we scale the results by 2**PASS1_BITS. */ + + dataptr = data; + for (ctr = DCTSIZE-1; ctr >= 0; ctr--) { + tmp0 = dataptr[0] + dataptr[7]; + tmp7 = dataptr[0] - dataptr[7]; + tmp1 = dataptr[1] + dataptr[6]; + tmp6 = dataptr[1] - dataptr[6]; + tmp2 = dataptr[2] + dataptr[5]; + tmp5 = dataptr[2] - dataptr[5]; + tmp3 = dataptr[3] + dataptr[4]; + tmp4 = dataptr[3] - dataptr[4]; + + /* Even part per LL&M figure 1 --- note that published figure is faulty; + * rotator "sqrt(2)*c1" should be "sqrt(2)*c6". + */ + + tmp10 = tmp0 + tmp3; + tmp13 = tmp0 - tmp3; + tmp11 = tmp1 + tmp2; + tmp12 = tmp1 - tmp2; + + dataptr[0] = (DCTELEM) ((tmp10 + tmp11) << PASS1_BITS); + dataptr[4] = (DCTELEM) ((tmp10 - tmp11) << PASS1_BITS); + + z1 = MULTIPLY(tmp12 + tmp13, FIX_0_541196100); + dataptr[2] = (DCTELEM) DESCALE(z1 + MULTIPLY(tmp13, FIX_0_765366865), + CONST_BITS-PASS1_BITS); + dataptr[6] = (DCTELEM) DESCALE(z1 + MULTIPLY(tmp12, - FIX_1_847759065), + CONST_BITS-PASS1_BITS); + + /* Odd part per figure 8 --- note paper omits factor of sqrt(2). + * cK represents cos(K*pi/16). + * i0..i3 in the paper are tmp4..tmp7 here. + */ + + z1 = tmp4 + tmp7; + z2 = tmp5 + tmp6; + z3 = tmp4 + tmp6; + z4 = tmp5 + tmp7; + z5 = MULTIPLY(z3 + z4, FIX_1_175875602); /* sqrt(2) * c3 */ + + tmp4 = MULTIPLY(tmp4, FIX_0_298631336); /* sqrt(2) * (-c1+c3+c5-c7) */ + tmp5 = MULTIPLY(tmp5, FIX_2_053119869); /* sqrt(2) * ( c1+c3-c5+c7) */ + tmp6 = MULTIPLY(tmp6, FIX_3_072711026); /* sqrt(2) * ( c1+c3+c5-c7) */ + tmp7 = MULTIPLY(tmp7, FIX_1_501321110); /* sqrt(2) * ( c1+c3-c5-c7) */ + z1 = MULTIPLY(z1, - FIX_0_899976223); /* sqrt(2) * (c7-c3) */ + z2 = MULTIPLY(z2, - FIX_2_562915447); /* sqrt(2) * (-c1-c3) */ + z3 = MULTIPLY(z3, - FIX_1_961570560); /* sqrt(2) * (-c3-c5) */ + z4 = MULTIPLY(z4, - FIX_0_390180644); /* sqrt(2) * (c5-c3) */ + + z3 += z5; + z4 += z5; + + dataptr[7] = (DCTELEM) DESCALE(tmp4 + z1 + z3, CONST_BITS-PASS1_BITS); + dataptr[5] = (DCTELEM) DESCALE(tmp5 + z2 + z4, CONST_BITS-PASS1_BITS); + dataptr[3] = (DCTELEM) DESCALE(tmp6 + z2 + z3, CONST_BITS-PASS1_BITS); + dataptr[1] = (DCTELEM) DESCALE(tmp7 + z1 + z4, CONST_BITS-PASS1_BITS); + + dataptr += DCTSIZE; /* advance pointer to next row */ + } +} + +/* + * Perform the forward DCT on one block of samples. + */ + +GLOBAL(void) +FUNC(ff_jpeg_fdct_islow)(DCTELEM *data) +{ + int tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7; + int tmp10, tmp11, tmp12, tmp13; + int z1, z2, z3, z4, z5; + DCTELEM *dataptr; + int ctr; + + FUNC(row_fdct)(data); + + /* Pass 2: process columns. + * We remove the PASS1_BITS scaling, but leave the results scaled up + * by an overall factor of 8. + */ + + dataptr = data; + for (ctr = DCTSIZE-1; ctr >= 0; ctr--) { + tmp0 = dataptr[DCTSIZE*0] + dataptr[DCTSIZE*7]; + tmp7 = dataptr[DCTSIZE*0] - dataptr[DCTSIZE*7]; + tmp1 = dataptr[DCTSIZE*1] + dataptr[DCTSIZE*6]; + tmp6 = dataptr[DCTSIZE*1] - dataptr[DCTSIZE*6]; + tmp2 = dataptr[DCTSIZE*2] + dataptr[DCTSIZE*5]; + tmp5 = dataptr[DCTSIZE*2] - dataptr[DCTSIZE*5]; + tmp3 = dataptr[DCTSIZE*3] + dataptr[DCTSIZE*4]; + tmp4 = dataptr[DCTSIZE*3] - dataptr[DCTSIZE*4]; + + /* Even part per LL&M figure 1 --- note that published figure is faulty; + * rotator "sqrt(2)*c1" should be "sqrt(2)*c6". + */ + + tmp10 = tmp0 + tmp3; + tmp13 = tmp0 - tmp3; + tmp11 = tmp1 + tmp2; + tmp12 = tmp1 - tmp2; + + dataptr[DCTSIZE*0] = DESCALE(tmp10 + tmp11, OUT_SHIFT); + dataptr[DCTSIZE*4] = DESCALE(tmp10 - tmp11, OUT_SHIFT); + + z1 = MULTIPLY(tmp12 + tmp13, FIX_0_541196100); + dataptr[DCTSIZE*2] = DESCALE(z1 + MULTIPLY(tmp13, FIX_0_765366865), + CONST_BITS + OUT_SHIFT); + dataptr[DCTSIZE*6] = DESCALE(z1 + MULTIPLY(tmp12, - FIX_1_847759065), + CONST_BITS + OUT_SHIFT); + + /* Odd part per figure 8 --- note paper omits factor of sqrt(2). + * cK represents cos(K*pi/16). + * i0..i3 in the paper are tmp4..tmp7 here. + */ + + z1 = tmp4 + tmp7; + z2 = tmp5 + tmp6; + z3 = tmp4 + tmp6; + z4 = tmp5 + tmp7; + z5 = MULTIPLY(z3 + z4, FIX_1_175875602); /* sqrt(2) * c3 */ + + tmp4 = MULTIPLY(tmp4, FIX_0_298631336); /* sqrt(2) * (-c1+c3+c5-c7) */ + tmp5 = MULTIPLY(tmp5, FIX_2_053119869); /* sqrt(2) * ( c1+c3-c5+c7) */ + tmp6 = MULTIPLY(tmp6, FIX_3_072711026); /* sqrt(2) * ( c1+c3+c5-c7) */ + tmp7 = MULTIPLY(tmp7, FIX_1_501321110); /* sqrt(2) * ( c1+c3-c5-c7) */ + z1 = MULTIPLY(z1, - FIX_0_899976223); /* sqrt(2) * (c7-c3) */ + z2 = MULTIPLY(z2, - FIX_2_562915447); /* sqrt(2) * (-c1-c3) */ + z3 = MULTIPLY(z3, - FIX_1_961570560); /* sqrt(2) * (-c3-c5) */ + z4 = MULTIPLY(z4, - FIX_0_390180644); /* sqrt(2) * (c5-c3) */ + + z3 += z5; + z4 += z5; + + dataptr[DCTSIZE*7] = DESCALE(tmp4 + z1 + z3, CONST_BITS + OUT_SHIFT); + dataptr[DCTSIZE*5] = DESCALE(tmp5 + z2 + z4, CONST_BITS + OUT_SHIFT); + dataptr[DCTSIZE*3] = DESCALE(tmp6 + z2 + z3, CONST_BITS + OUT_SHIFT); + dataptr[DCTSIZE*1] = DESCALE(tmp7 + z1 + z4, CONST_BITS + OUT_SHIFT); + + dataptr++; /* advance pointer to next column */ + } +} + +/* + * The secret of DCT2-4-8 is really simple -- you do the usual 1-DCT + * on the rows and then, instead of doing even and odd, part on the colums + * you do even part two times. + */ +GLOBAL(void) +FUNC(ff_fdct248_islow)(DCTELEM *data) +{ + int tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7; + int tmp10, tmp11, tmp12, tmp13; + int z1; + DCTELEM *dataptr; + int ctr; + + FUNC(row_fdct)(data); + + /* Pass 2: process columns. + * We remove the PASS1_BITS scaling, but leave the results scaled up + * by an overall factor of 8. + */ + + dataptr = data; + for (ctr = DCTSIZE-1; ctr >= 0; ctr--) { + tmp0 = dataptr[DCTSIZE*0] + dataptr[DCTSIZE*1]; + tmp1 = dataptr[DCTSIZE*2] + dataptr[DCTSIZE*3]; + tmp2 = dataptr[DCTSIZE*4] + dataptr[DCTSIZE*5]; + tmp3 = dataptr[DCTSIZE*6] + dataptr[DCTSIZE*7]; + tmp4 = dataptr[DCTSIZE*0] - dataptr[DCTSIZE*1]; + tmp5 = dataptr[DCTSIZE*2] - dataptr[DCTSIZE*3]; + tmp6 = dataptr[DCTSIZE*4] - dataptr[DCTSIZE*5]; + tmp7 = dataptr[DCTSIZE*6] - dataptr[DCTSIZE*7]; + + tmp10 = tmp0 + tmp3; + tmp11 = tmp1 + tmp2; + tmp12 = tmp1 - tmp2; + tmp13 = tmp0 - tmp3; + + dataptr[DCTSIZE*0] = DESCALE(tmp10 + tmp11, OUT_SHIFT); + dataptr[DCTSIZE*4] = DESCALE(tmp10 - tmp11, OUT_SHIFT); + + z1 = MULTIPLY(tmp12 + tmp13, FIX_0_541196100); + dataptr[DCTSIZE*2] = DESCALE(z1 + MULTIPLY(tmp13, FIX_0_765366865), + CONST_BITS+OUT_SHIFT); + dataptr[DCTSIZE*6] = DESCALE(z1 + MULTIPLY(tmp12, - FIX_1_847759065), + CONST_BITS+OUT_SHIFT); + + tmp10 = tmp4 + tmp7; + tmp11 = tmp5 + tmp6; + tmp12 = tmp5 - tmp6; + tmp13 = tmp4 - tmp7; + + dataptr[DCTSIZE*1] = DESCALE(tmp10 + tmp11, OUT_SHIFT); + dataptr[DCTSIZE*5] = DESCALE(tmp10 - tmp11, OUT_SHIFT); + + z1 = MULTIPLY(tmp12 + tmp13, FIX_0_541196100); + dataptr[DCTSIZE*3] = DESCALE(z1 + MULTIPLY(tmp13, FIX_0_765366865), + CONST_BITS + OUT_SHIFT); + dataptr[DCTSIZE*7] = DESCALE(z1 + MULTIPLY(tmp12, - FIX_1_847759065), + CONST_BITS + OUT_SHIFT); + + dataptr++; /* advance pointer to next column */ + } +} diff --git a/libavcodec/jpeglsdec.c b/libavcodec/jpeglsdec.c index 7278e020da..8ea2cb5bdb 100644 --- a/libavcodec/jpeglsdec.c +++ b/libavcodec/jpeglsdec.c @@ -364,14 +364,13 @@ int ff_jpegls_decode_picture(MJpegDecodeContext *s, int near, int point_transfor AVCodec ff_jpegls_decoder = { - "jpegls", - AVMEDIA_TYPE_VIDEO, - CODEC_ID_JPEGLS, - sizeof(MJpegDecodeContext), - ff_mjpeg_decode_init, - NULL, - ff_mjpeg_decode_end, - ff_mjpeg_decode_frame, - CODEC_CAP_DR1, + .name = "jpegls", + .type = AVMEDIA_TYPE_VIDEO, + .id = CODEC_ID_JPEGLS, + .priv_data_size = sizeof(MJpegDecodeContext), + .init = ff_mjpeg_decode_init, + .close = ff_mjpeg_decode_end, + .decode = ff_mjpeg_decode_frame, + .capabilities = CODEC_CAP_DR1, .long_name = NULL_IF_CONFIG_SMALL("JPEG-LS"), }; diff --git a/libavcodec/kgv1dec.c b/libavcodec/kgv1dec.c index c364cfc593..398b8af0d5 100644 --- a/libavcodec/kgv1dec.c +++ b/libavcodec/kgv1dec.c @@ -166,14 +166,12 @@ static av_cold int decode_end(AVCodecContext *avctx) } AVCodec ff_kgv1_decoder = { - "kgv1", - AVMEDIA_TYPE_VIDEO, - CODEC_ID_KGV1, - sizeof(KgvContext), - decode_init, - NULL, - decode_end, - decode_frame, - .max_lowres = 1, + .name = "kgv1", + .type = AVMEDIA_TYPE_VIDEO, + .id = CODEC_ID_KGV1, + .priv_data_size = sizeof(KgvContext), + .init = decode_init, + .close = decode_end, + .decode = decode_frame, .long_name = NULL_IF_CONFIG_SMALL("Kega Game Video"), }; diff --git a/libavcodec/kmvc.c b/libavcodec/kmvc.c index 9ea18e87d6..aa0b55bd4a 100644 --- a/libavcodec/kmvc.c +++ b/libavcodec/kmvc.c @@ -403,14 +403,13 @@ static av_cold int decode_end(AVCodecContext * avctx) } AVCodec ff_kmvc_decoder = { - "kmvc", - AVMEDIA_TYPE_VIDEO, - CODEC_ID_KMVC, - sizeof(KmvcContext), - decode_init, - NULL, - decode_end, - decode_frame, - CODEC_CAP_DR1, + .name = "kmvc", + .type = AVMEDIA_TYPE_VIDEO, + .id = CODEC_ID_KMVC, + .priv_data_size = sizeof(KmvcContext), + .init = decode_init, + .close = decode_end, + .decode = decode_frame, + .capabilities = CODEC_CAP_DR1, .long_name = NULL_IF_CONFIG_SMALL("Karl Morton's video codec"), }; diff --git a/libavcodec/lagarith.c b/libavcodec/lagarith.c index 02d3533b0c..a632e52b0d 100644 --- a/libavcodec/lagarith.c +++ b/libavcodec/lagarith.c @@ -32,25 +32,25 @@ #include "lagarithrac.h" enum LagarithFrameType { - FRAME_RAW = 1, /*!< uncompressed */ - FRAME_U_RGB24 = 2, /*!< unaligned RGB24 */ - FRAME_ARITH_YUY2 = 3, /*!< arithmetic coded YUY2 */ - FRAME_ARITH_RGB24 = 4, /*!< arithmetic coded RGB24 */ - FRAME_SOLID_GRAY = 5, /*!< solid grayscale color frame */ - FRAME_SOLID_COLOR = 6, /*!< solid non-grayscale color frame */ - FRAME_OLD_ARITH_RGB = 7, /*!< obsolete arithmetic coded RGB (no longer encoded by upstream since version 1.1.0) */ - FRAME_ARITH_RGBA = 8, /*!< arithmetic coded RGBA */ - FRAME_SOLID_RGBA = 9, /*!< solid RGBA color frame */ - FRAME_ARITH_YV12 = 10, /*!< arithmetic coded YV12 */ - FRAME_REDUCED_RES = 11, /*!< reduced resolution YV12 frame */ + FRAME_RAW = 1, /**< uncompressed */ + FRAME_U_RGB24 = 2, /**< unaligned RGB24 */ + FRAME_ARITH_YUY2 = 3, /**< arithmetic coded YUY2 */ + FRAME_ARITH_RGB24 = 4, /**< arithmetic coded RGB24 */ + FRAME_SOLID_GRAY = 5, /**< solid grayscale color frame */ + FRAME_SOLID_COLOR = 6, /**< solid non-grayscale color frame */ + FRAME_OLD_ARITH_RGB = 7, /**< obsolete arithmetic coded RGB (no longer encoded by upstream since version 1.1.0) */ + FRAME_ARITH_RGBA = 8, /**< arithmetic coded RGBA */ + FRAME_SOLID_RGBA = 9, /**< solid RGBA color frame */ + FRAME_ARITH_YV12 = 10, /**< arithmetic coded YV12 */ + FRAME_REDUCED_RES = 11, /**< reduced resolution YV12 frame */ }; typedef struct LagarithContext { AVCodecContext *avctx; AVFrame picture; DSPContext dsp; - int zeros; /*!< number of consecutive zero bytes encountered */ - int zeros_rem; /*!< number of zero bytes remaining to output */ + int zeros; /**< number of consecutive zero bytes encountered */ + int zeros_rem; /**< number of zero bytes remaining to output */ } LagarithContext; /** @@ -509,14 +509,13 @@ static av_cold int lag_decode_end(AVCodecContext *avctx) } AVCodec ff_lagarith_decoder = { - "lagarith", - AVMEDIA_TYPE_VIDEO, - CODEC_ID_LAGARITH, - sizeof(LagarithContext), - lag_decode_init, - NULL, - lag_decode_end, - lag_decode_frame, - CODEC_CAP_DR1, + .name = "lagarith", + .type = AVMEDIA_TYPE_VIDEO, + .id = CODEC_ID_LAGARITH, + .priv_data_size = sizeof(LagarithContext), + .init = lag_decode_init, + .close = lag_decode_end, + .decode = lag_decode_frame, + .capabilities = CODEC_CAP_DR1, .long_name = NULL_IF_CONFIG_SMALL("Lagarith lossless"), }; diff --git a/libavcodec/lagarithrac.h b/libavcodec/lagarithrac.h index 2cb7323076..8c78538f21 100644 --- a/libavcodec/lagarithrac.h +++ b/libavcodec/lagarithrac.h @@ -40,15 +40,15 @@ typedef struct lag_rac { AVCodecContext *avctx; unsigned low; unsigned range; - unsigned scale; /*!< Number of bits of precision in range. */ - unsigned hash_shift; /*!< Number of bits to shift to calculate hash for radix search. */ + unsigned scale; /**< Number of bits of precision in range. */ + unsigned hash_shift; /**< Number of bits to shift to calculate hash for radix search. */ - const uint8_t *bytestream_start; /*!< Start of input bytestream. */ - const uint8_t *bytestream; /*!< Current position in input bytestream. */ - const uint8_t *bytestream_end; /*!< End position of input bytestream. */ + const uint8_t *bytestream_start; /**< Start of input bytestream. */ + const uint8_t *bytestream; /**< Current position in input bytestream. */ + const uint8_t *bytestream_end; /**< End position of input bytestream. */ - uint32_t prob[258]; /*!< Table of cumulative probability for each symbol. */ - uint8_t range_hash[256]; /*!< Hash table mapping upper byte to approximate symbol. */ + uint32_t prob[258]; /**< Table of cumulative probability for each symbol. */ + uint8_t range_hash[256]; /**< Hash table mapping upper byte to approximate symbol. */ } lag_rac; void lag_rac_init(lag_rac *l, GetBitContext *gb, int length); diff --git a/libavcodec/lcldec.c b/libavcodec/lcldec.c index 57735ac6ff..e288fc3f63 100644 --- a/libavcodec/lcldec.c +++ b/libavcodec/lcldec.c @@ -73,8 +73,8 @@ typedef struct LclDecContext { /** - * \param srcptr compressed source buffer, must be padded with at least 5 extra bytes - * \param destptr must be padded sufficiently for av_memcpy_backptr + * @param srcptr compressed source buffer, must be padded with at least 5 extra bytes + * @param destptr must be padded sufficiently for av_memcpy_backptr */ static unsigned int mszh_decomp(const unsigned char * srcptr, int srclen, unsigned char * destptr, unsigned int destsize) { @@ -119,11 +119,11 @@ static unsigned int mszh_decomp(const unsigned char * srcptr, int srclen, unsign #if CONFIG_ZLIB_DECODER /** - * \brief decompress a zlib-compressed data block into decomp_buf - * \param src compressed input buffer - * \param src_len data length in input buffer - * \param offset offset in decomp_buf - * \param expected expected decompressed length + * @brief decompress a zlib-compressed data block into decomp_buf + * @param src compressed input buffer + * @param src_len data length in input buffer + * @param offset offset in decomp_buf + * @param expected expected decompressed length */ static int zlib_decomp(AVCodecContext *avctx, const uint8_t *src, int src_len, int offset, int expected) { @@ -611,30 +611,28 @@ static av_cold int decode_end(AVCodecContext *avctx) #if CONFIG_MSZH_DECODER AVCodec ff_mszh_decoder = { - "mszh", - AVMEDIA_TYPE_VIDEO, - CODEC_ID_MSZH, - sizeof(LclDecContext), - decode_init, - NULL, - decode_end, - decode_frame, - CODEC_CAP_DR1, + .name = "mszh", + .type = AVMEDIA_TYPE_VIDEO, + .id = CODEC_ID_MSZH, + .priv_data_size = sizeof(LclDecContext), + .init = decode_init, + .close = decode_end, + .decode = decode_frame, + .capabilities = CODEC_CAP_DR1, .long_name = NULL_IF_CONFIG_SMALL("LCL (LossLess Codec Library) MSZH"), }; #endif #if CONFIG_ZLIB_DECODER AVCodec ff_zlib_decoder = { - "zlib", - AVMEDIA_TYPE_VIDEO, - CODEC_ID_ZLIB, - sizeof(LclDecContext), - decode_init, - NULL, - decode_end, - decode_frame, - CODEC_CAP_DR1, + .name = "zlib", + .type = AVMEDIA_TYPE_VIDEO, + .id = CODEC_ID_ZLIB, + .priv_data_size = sizeof(LclDecContext), + .init = decode_init, + .close = decode_end, + .decode = decode_frame, + .capabilities = CODEC_CAP_DR1, .long_name = NULL_IF_CONFIG_SMALL("LCL (LossLess Codec Library) ZLIB"), }; #endif diff --git a/libavcodec/lclenc.c b/libavcodec/lclenc.c index 178fe0ae26..9f66960910 100644 --- a/libavcodec/lclenc.c +++ b/libavcodec/lclenc.c @@ -171,13 +171,13 @@ static av_cold int encode_end(AVCodecContext *avctx) } AVCodec ff_zlib_encoder = { - "zlib", - AVMEDIA_TYPE_VIDEO, - CODEC_ID_ZLIB, - sizeof(LclEncContext), - encode_init, - encode_frame, - encode_end, + .name = "zlib", + .type = AVMEDIA_TYPE_VIDEO, + .id = CODEC_ID_ZLIB, + .priv_data_size = sizeof(LclEncContext), + .init = encode_init, + .encode = encode_frame, + .close = encode_end, .pix_fmts = (const enum PixelFormat[]) { PIX_FMT_BGR24, PIX_FMT_NONE }, .long_name = NULL_IF_CONFIG_SMALL("LCL (LossLess Codec Library) ZLIB"), }; diff --git a/libavcodec/libdiracdec.c b/libavcodec/libdiracdec.c index fb6ff45f0a..24a4a06929 100644 --- a/libavcodec/libdiracdec.c +++ b/libavcodec/libdiracdec.c @@ -195,15 +195,14 @@ static void libdirac_flush(AVCodecContext *avccontext) AVCodec ff_libdirac_decoder = { - "libdirac", - AVMEDIA_TYPE_VIDEO, - CODEC_ID_DIRAC, - sizeof(FfmpegDiracDecoderParams), - libdirac_decode_init, - NULL, - libdirac_decode_close, - libdirac_decode_frame, - CODEC_CAP_DELAY, + .name = "libdirac", + .type = AVMEDIA_TYPE_VIDEO, + .id = CODEC_ID_DIRAC, + .priv_data_size = sizeof(FfmpegDiracDecoderParams), + .init = libdirac_decode_init, + .close = libdirac_decode_close, + .decode = libdirac_decode_frame, + .capabilities = CODEC_CAP_DELAY, .flush = libdirac_flush, .long_name = NULL_IF_CONFIG_SMALL("libdirac Dirac 2.2"), }; diff --git a/libavcodec/libdiracenc.c b/libavcodec/libdiracenc.c index a9cc6803b9..8bf0da4948 100644 --- a/libavcodec/libdiracenc.c +++ b/libavcodec/libdiracenc.c @@ -392,13 +392,13 @@ static av_cold int libdirac_encode_close(AVCodecContext *avccontext) AVCodec ff_libdirac_encoder = { - "libdirac", - AVMEDIA_TYPE_VIDEO, - CODEC_ID_DIRAC, - sizeof(FfmpegDiracEncoderParams), - libdirac_encode_init, - libdirac_encode_frame, - libdirac_encode_close, + .name = "libdirac", + .type = AVMEDIA_TYPE_VIDEO, + .id = CODEC_ID_DIRAC, + .priv_data_size = sizeof(FfmpegDiracEncoderParams), + .init = libdirac_encode_init, + .encode = libdirac_encode_frame, + .close = libdirac_encode_close, .capabilities = CODEC_CAP_DELAY, .pix_fmts = (const enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_YUV422P, PIX_FMT_YUV444P, PIX_FMT_NONE}, .long_name = NULL_IF_CONFIG_SMALL("libdirac Dirac 2.2"), diff --git a/libavcodec/libfaac.c b/libavcodec/libfaac.c index 2acc682581..31dc1a41ed 100644 --- a/libavcodec/libfaac.c +++ b/libavcodec/libfaac.c @@ -165,13 +165,13 @@ static const AVProfile profiles[] = { }; AVCodec ff_libfaac_encoder = { - "libfaac", - AVMEDIA_TYPE_AUDIO, - CODEC_ID_AAC, - sizeof(FaacAudioContext), - Faac_encode_init, - Faac_encode_frame, - Faac_encode_close, + .name = "libfaac", + .type = AVMEDIA_TYPE_AUDIO, + .id = CODEC_ID_AAC, + .priv_data_size = sizeof(FaacAudioContext), + .init = Faac_encode_init, + .encode = Faac_encode_frame, + .close = Faac_encode_close, .capabilities = CODEC_CAP_SMALL_LAST_FRAME | CODEC_CAP_DELAY, .sample_fmts = (const enum AVSampleFormat[]){AV_SAMPLE_FMT_S16,AV_SAMPLE_FMT_NONE}, .long_name = NULL_IF_CONFIG_SMALL("libfaac AAC (Advanced Audio Codec)"), diff --git a/libavcodec/libgsm.c b/libavcodec/libgsm.c index 1f76f82d55..92f1e98cdd 100644 --- a/libavcodec/libgsm.c +++ b/libavcodec/libgsm.c @@ -113,25 +113,23 @@ static int libgsm_encode_frame(AVCodecContext *avctx, AVCodec ff_libgsm_encoder = { - "libgsm", - AVMEDIA_TYPE_AUDIO, - CODEC_ID_GSM, - 0, - libgsm_init, - libgsm_encode_frame, - libgsm_close, + .name = "libgsm", + .type = AVMEDIA_TYPE_AUDIO, + .id = CODEC_ID_GSM, + .init = libgsm_init, + .encode = libgsm_encode_frame, + .close = libgsm_close, .sample_fmts = (const enum AVSampleFormat[]){AV_SAMPLE_FMT_S16,AV_SAMPLE_FMT_NONE}, .long_name = NULL_IF_CONFIG_SMALL("libgsm GSM"), }; AVCodec ff_libgsm_ms_encoder = { - "libgsm_ms", - AVMEDIA_TYPE_AUDIO, - CODEC_ID_GSM_MS, - 0, - libgsm_init, - libgsm_encode_frame, - libgsm_close, + .name = "libgsm_ms", + .type = AVMEDIA_TYPE_AUDIO, + .id = CODEC_ID_GSM_MS, + .init = libgsm_init, + .encode = libgsm_encode_frame, + .close = libgsm_close, .sample_fmts = (const enum AVSampleFormat[]){AV_SAMPLE_FMT_S16,AV_SAMPLE_FMT_NONE}, .long_name = NULL_IF_CONFIG_SMALL("libgsm GSM Microsoft variant"), }; @@ -139,7 +137,7 @@ AVCodec ff_libgsm_ms_encoder = { static int libgsm_decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPacket *avpkt) { - const uint8_t *buf = avpkt->data; + uint8_t *buf = avpkt->data; int buf_size = avpkt->size; *data_size = 0; /* In case of error */ if(buf_size < avctx->block_align) return -1; @@ -157,25 +155,21 @@ static int libgsm_decode_frame(AVCodecContext *avctx, } AVCodec ff_libgsm_decoder = { - "libgsm", - AVMEDIA_TYPE_AUDIO, - CODEC_ID_GSM, - 0, - libgsm_init, - NULL, - libgsm_close, - libgsm_decode_frame, + .name = "libgsm", + .type = AVMEDIA_TYPE_AUDIO, + .id = CODEC_ID_GSM, + .init = libgsm_init, + .close = libgsm_close, + .decode = libgsm_decode_frame, .long_name = NULL_IF_CONFIG_SMALL("libgsm GSM"), }; AVCodec ff_libgsm_ms_decoder = { - "libgsm_ms", - AVMEDIA_TYPE_AUDIO, - CODEC_ID_GSM_MS, - 0, - libgsm_init, - NULL, - libgsm_close, - libgsm_decode_frame, + .name = "libgsm_ms", + .type = AVMEDIA_TYPE_AUDIO, + .id = CODEC_ID_GSM_MS, + .init = libgsm_init, + .close = libgsm_close, + .decode = libgsm_decode_frame, .long_name = NULL_IF_CONFIG_SMALL("libgsm GSM Microsoft variant"), }; diff --git a/libavcodec/libmp3lame.c b/libavcodec/libmp3lame.c index 05893830c1..349363eda1 100644 --- a/libavcodec/libmp3lame.c +++ b/libavcodec/libmp3lame.c @@ -269,13 +269,13 @@ static av_cold int MP3lame_encode_close(AVCodecContext *avctx) AVCodec ff_libmp3lame_encoder = { - "libmp3lame", - AVMEDIA_TYPE_AUDIO, - CODEC_ID_MP3, - sizeof(Mp3AudioContext), - MP3lame_encode_init, - MP3lame_encode_frame, - MP3lame_encode_close, + .name = "libmp3lame", + .type = AVMEDIA_TYPE_AUDIO, + .id = CODEC_ID_MP3, + .priv_data_size = sizeof(Mp3AudioContext), + .init = MP3lame_encode_init, + .encode = MP3lame_encode_frame, + .close = MP3lame_encode_close, .capabilities= CODEC_CAP_DELAY, .sample_fmts = (const enum AVSampleFormat[]){AV_SAMPLE_FMT_S16, #if 2147483647 == INT_MAX diff --git a/libavcodec/libopencore-amr.c b/libavcodec/libopencore-amr.c index 73abd758a1..bef60799f1 100644 --- a/libavcodec/libopencore-amr.c +++ b/libavcodec/libopencore-amr.c @@ -88,7 +88,7 @@ typedef struct AMRContext { } AMRContext; static const AVOption options[] = { - { "dtx", "Allow DTX (generate comfort noise)", offsetof(AMRContext, enc_dtx), FF_OPT_TYPE_INT, 0, 0, 1, AV_OPT_FLAG_AUDIO_PARAM | AV_OPT_FLAG_ENCODING_PARAM }, + { "dtx", "Allow DTX (generate comfort noise)", offsetof(AMRContext, enc_dtx), FF_OPT_TYPE_INT, { 0 }, 0, 1, AV_OPT_FLAG_AUDIO_PARAM | AV_OPT_FLAG_ENCODING_PARAM }, { NULL } }; @@ -158,14 +158,13 @@ static int amr_nb_decode_frame(AVCodecContext *avctx, void *data, } AVCodec ff_libopencore_amrnb_decoder = { - "libopencore_amrnb", - AVMEDIA_TYPE_AUDIO, - CODEC_ID_AMR_NB, - sizeof(AMRContext), - amr_nb_decode_init, - NULL, - amr_nb_decode_close, - amr_nb_decode_frame, + .name = "libopencore_amrnb", + .type = AVMEDIA_TYPE_AUDIO, + .id = CODEC_ID_AMR_NB, + .priv_data_size = sizeof(AMRContext), + .init = amr_nb_decode_init, + .close = amr_nb_decode_close, + .decode = amr_nb_decode_frame, .long_name = NULL_IF_CONFIG_SMALL("OpenCORE Adaptive Multi-Rate (AMR) Narrow-Band"), }; @@ -230,14 +229,13 @@ static int amr_nb_encode_frame(AVCodecContext *avctx, } AVCodec ff_libopencore_amrnb_encoder = { - "libopencore_amrnb", - AVMEDIA_TYPE_AUDIO, - CODEC_ID_AMR_NB, - sizeof(AMRContext), - amr_nb_encode_init, - amr_nb_encode_frame, - amr_nb_encode_close, - NULL, + .name = "libopencore_amrnb", + .type = AVMEDIA_TYPE_AUDIO, + .id = CODEC_ID_AMR_NB, + .priv_data_size = sizeof(AMRContext), + .init = amr_nb_encode_init, + .encode = amr_nb_encode_frame, + .close = amr_nb_encode_close, .sample_fmts = (const enum AVSampleFormat[]){AV_SAMPLE_FMT_S16,AV_SAMPLE_FMT_NONE}, .long_name = NULL_IF_CONFIG_SMALL("OpenCORE Adaptive Multi-Rate (AMR) Narrow-Band"), .priv_class = &class, @@ -308,14 +306,13 @@ static int amr_wb_decode_close(AVCodecContext *avctx) } AVCodec ff_libopencore_amrwb_decoder = { - "libopencore_amrwb", - AVMEDIA_TYPE_AUDIO, - CODEC_ID_AMR_WB, - sizeof(AMRWBContext), - amr_wb_decode_init, - NULL, - amr_wb_decode_close, - amr_wb_decode_frame, + .name = "libopencore_amrwb", + .type = AVMEDIA_TYPE_AUDIO, + .id = CODEC_ID_AMR_WB, + .priv_data_size = sizeof(AMRWBContext), + .init = amr_wb_decode_init, + .close = amr_wb_decode_close, + .decode = amr_wb_decode_frame, .long_name = NULL_IF_CONFIG_SMALL("OpenCORE Adaptive Multi-Rate (AMR) Wide-Band"), }; diff --git a/libavcodec/libopenjpeg.c b/libavcodec/libopenjpeg.c index 39747e78ea..67201a2594 100644 --- a/libavcodec/libopenjpeg.c +++ b/libavcodec/libopenjpeg.c @@ -62,7 +62,7 @@ static int libopenjpeg_decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPacket *avpkt) { - const uint8_t *buf = avpkt->data; + uint8_t *buf = avpkt->data; int buf_size = avpkt->size; LibOpenJPEGContext *ctx = avctx->priv_data; AVFrame *picture = &ctx->image, *output = data; diff --git a/libavcodec/libschroedingerdec.c b/libavcodec/libschroedingerdec.c index 7603f7e15f..2c019b2fa6 100644 --- a/libavcodec/libschroedingerdec.c +++ b/libavcodec/libschroedingerdec.c @@ -208,7 +208,6 @@ static int libschroedinger_decode_frame(AVCodecContext *avccontext, FfmpegSchroDecoderParams *p_schro_params = avccontext->priv_data; SchroDecoder *decoder = p_schro_params->decoder; - SchroVideoFormat *format; AVPicture *picture = data; SchroBuffer *enc_buf; SchroFrame* frame; @@ -240,7 +239,6 @@ static int libschroedinger_decode_frame(AVCodecContext *avccontext, go = 1; } else outer = 0; - format = p_schro_params->format; while (go) { /* Parse data and process result. */ @@ -347,15 +345,14 @@ static void libschroedinger_flush(AVCodecContext *avccontext) } AVCodec ff_libschroedinger_decoder = { - "libschroedinger", - AVMEDIA_TYPE_VIDEO, - CODEC_ID_DIRAC, - sizeof(FfmpegSchroDecoderParams), - libschroedinger_decode_init, - NULL, - libschroedinger_decode_close, - libschroedinger_decode_frame, - CODEC_CAP_DELAY, + .name = "libschroedinger", + .type = AVMEDIA_TYPE_VIDEO, + .id = CODEC_ID_DIRAC, + .priv_data_size = sizeof(FfmpegSchroDecoderParams), + .init = libschroedinger_decode_init, + .close = libschroedinger_decode_close, + .decode = libschroedinger_decode_frame, + .capabilities = CODEC_CAP_DELAY, .flush = libschroedinger_flush, .long_name = NULL_IF_CONFIG_SMALL("libschroedinger Dirac 2.2"), }; diff --git a/libavcodec/libschroedingerenc.c b/libavcodec/libschroedingerenc.c index 4f42d28b3b..f36b90e4eb 100644 --- a/libavcodec/libschroedingerenc.c +++ b/libavcodec/libschroedingerenc.c @@ -423,13 +423,13 @@ static int libschroedinger_encode_close(AVCodecContext *avccontext) AVCodec ff_libschroedinger_encoder = { - "libschroedinger", - AVMEDIA_TYPE_VIDEO, - CODEC_ID_DIRAC, - sizeof(FfmpegSchroEncoderParams), - libschroedinger_encode_init, - libschroedinger_encode_frame, - libschroedinger_encode_close, + .name = "libschroedinger", + .type = AVMEDIA_TYPE_VIDEO, + .id = CODEC_ID_DIRAC, + .priv_data_size = sizeof(FfmpegSchroEncoderParams), + .init = libschroedinger_encode_init, + .encode = libschroedinger_encode_frame, + .close = libschroedinger_encode_close, .capabilities = CODEC_CAP_DELAY, .pix_fmts = (const enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_YUV422P, PIX_FMT_YUV444P, PIX_FMT_NONE}, .long_name = NULL_IF_CONFIG_SMALL("libschroedinger Dirac 2.2"), diff --git a/libavcodec/libspeexdec.c b/libavcodec/libspeexdec.c index 1caf738536..7ee53b04e5 100644 --- a/libavcodec/libspeexdec.c +++ b/libavcodec/libspeexdec.c @@ -96,7 +96,7 @@ static int libspeex_decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPacket *avpkt) { - const uint8_t *buf = avpkt->data; + uint8_t *buf = avpkt->data; int buf_size = avpkt->size; LibSpeexContext *s = avctx->priv_data; int16_t *output = data, *end; @@ -139,13 +139,12 @@ static av_cold int libspeex_decode_close(AVCodecContext *avctx) } AVCodec ff_libspeex_decoder = { - "libspeex", - AVMEDIA_TYPE_AUDIO, - CODEC_ID_SPEEX, - sizeof(LibSpeexContext), - libspeex_decode_init, - NULL, - libspeex_decode_close, - libspeex_decode_frame, + .name = "libspeex", + .type = AVMEDIA_TYPE_AUDIO, + .id = CODEC_ID_SPEEX, + .priv_data_size = sizeof(LibSpeexContext), + .init = libspeex_decode_init, + .close = libspeex_decode_close, + .decode = libspeex_decode_frame, .long_name = NULL_IF_CONFIG_SMALL("libspeex Speex"), }; diff --git a/libavcodec/libvo-aacenc.c b/libavcodec/libvo-aacenc.c index 0efb79b1c1..c99b53f225 100644 --- a/libavcodec/libvo-aacenc.c +++ b/libavcodec/libvo-aacenc.c @@ -116,14 +116,13 @@ static int aac_encode_frame(AVCodecContext *avctx, } AVCodec ff_libvo_aacenc_encoder = { - "libvo_aacenc", - AVMEDIA_TYPE_AUDIO, - CODEC_ID_AAC, - sizeof(AACContext), - aac_encode_init, - aac_encode_frame, - aac_encode_close, - NULL, + .name = "libvo_aacenc", + .type = AVMEDIA_TYPE_AUDIO, + .id = CODEC_ID_AAC, + .priv_data_size = sizeof(AACContext), + .init = aac_encode_init, + .encode = aac_encode_frame, + .close = aac_encode_close, .sample_fmts = (const enum AVSampleFormat[]){AV_SAMPLE_FMT_S16,AV_SAMPLE_FMT_NONE}, .long_name = NULL_IF_CONFIG_SMALL("Android VisualOn AAC"), }; diff --git a/libavcodec/libvo-amrwbenc.c b/libavcodec/libvo-amrwbenc.c index ec23aeb7f8..c7f827fc19 100644 --- a/libavcodec/libvo-amrwbenc.c +++ b/libavcodec/libvo-amrwbenc.c @@ -34,7 +34,7 @@ typedef struct AMRWBContext { } AMRWBContext; static const AVOption options[] = { - { "dtx", "Allow DTX (generate comfort noise)", offsetof(AMRWBContext, allow_dtx), FF_OPT_TYPE_INT, 0, 0, 1, AV_OPT_FLAG_AUDIO_PARAM | AV_OPT_FLAG_ENCODING_PARAM }, + { "dtx", "Allow DTX (generate comfort noise)", offsetof(AMRWBContext, allow_dtx), FF_OPT_TYPE_INT, { 0 }, 0, 1, AV_OPT_FLAG_AUDIO_PARAM | AV_OPT_FLAG_ENCODING_PARAM }, { NULL } }; @@ -118,14 +118,13 @@ static int amr_wb_encode_frame(AVCodecContext *avctx, } AVCodec ff_libvo_amrwbenc_encoder = { - "libvo_amrwbenc", - AVMEDIA_TYPE_AUDIO, - CODEC_ID_AMR_WB, - sizeof(AMRWBContext), - amr_wb_encode_init, - amr_wb_encode_frame, - amr_wb_encode_close, - NULL, + .name = "libvo_amrwbenc", + .type = AVMEDIA_TYPE_AUDIO, + .id = CODEC_ID_AMR_WB, + .priv_data_size = sizeof(AMRWBContext), + .init = amr_wb_encode_init, + .encode = amr_wb_encode_frame, + .close = amr_wb_encode_close, .sample_fmts = (const enum AVSampleFormat[]){AV_SAMPLE_FMT_S16,AV_SAMPLE_FMT_NONE}, .long_name = NULL_IF_CONFIG_SMALL("Android VisualOn Adaptive Multi-Rate " "(AMR) Wide-Band"), diff --git a/libavcodec/libvorbis.c b/libavcodec/libvorbis.c index bc219ded9b..4e05268439 100644 --- a/libavcodec/libvorbis.c +++ b/libavcodec/libvorbis.c @@ -30,6 +30,7 @@ #include "avcodec.h" #include "bytestream.h" #include "vorbis.h" +#include "libavutil/mathematics.h" #undef NDEBUG #include <assert.h> diff --git a/libavcodec/libvpxdec.c b/libavcodec/libvpxdec.c index 29b8eec43e..15329f3f31 100644 --- a/libavcodec/libvpxdec.c +++ b/libavcodec/libvpxdec.c @@ -112,14 +112,12 @@ static av_cold int vp8_free(AVCodecContext *avctx) } AVCodec ff_libvpx_decoder = { - "libvpx", - AVMEDIA_TYPE_VIDEO, - CODEC_ID_VP8, - sizeof(VP8Context), - vp8_init, - NULL, /* encode */ - vp8_free, - vp8_decode, - 0, /* capabilities */ + .name = "libvpx", + .type = AVMEDIA_TYPE_VIDEO, + .id = CODEC_ID_VP8, + .priv_data_size = sizeof(VP8Context), + .init = vp8_init, + .close = vp8_free, + .decode = vp8_decode, .long_name = NULL_IF_CONFIG_SMALL("libvpx VP8"), }; diff --git a/libavcodec/libvpxenc.c b/libavcodec/libvpxenc.c index f5c942e0fc..7e7a3012b8 100644 --- a/libavcodec/libvpxenc.c +++ b/libavcodec/libvpxenc.c @@ -31,6 +31,7 @@ #include "avcodec.h" #include "libavutil/base64.h" #include "libavutil/opt.h" +#include "libavutil/mathematics.h" /** * Portion of struct vpx_codec_cx_pkt from vpx_encoder.h. @@ -545,15 +546,14 @@ static int vp8_encode(AVCodecContext *avctx, uint8_t *buf, int buf_size, } AVCodec ff_libvpx_encoder = { - "libvpx", - AVMEDIA_TYPE_VIDEO, - CODEC_ID_VP8, - sizeof(VP8Context), - vp8_init, - vp8_encode, - vp8_free, - NULL, - CODEC_CAP_DELAY, + .name = "libvpx", + .type = AVMEDIA_TYPE_VIDEO, + .id = CODEC_ID_VP8, + .priv_data_size = sizeof(VP8Context), + .init = vp8_init, + .encode = vp8_encode, + .close = vp8_free, + .capabilities = CODEC_CAP_DELAY, .pix_fmts = (const enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_NONE}, .long_name = NULL_IF_CONFIG_SMALL("libvpx VP8"), .priv_class= &class, diff --git a/libavcodec/libx264.c b/libavcodec/libx264.c index cc5b9837f8..670a7802e1 100644 --- a/libavcodec/libx264.c +++ b/libavcodec/libx264.c @@ -198,14 +198,19 @@ static void check_default_settings(AVCodecContext *avctx) } } -#define OPT_STR(opt, param) \ - do { \ - if (param && x264_param_parse(&x4->params, opt, param) < 0) { \ - av_log(avctx, AV_LOG_ERROR, \ - "bad value for '%s': '%s'\n", opt, param); \ - return -1; \ - } \ - } while (0); \ +#define OPT_STR(opt, param) \ + do { \ + int ret; \ + if (param && (ret = x264_param_parse(&x4->params, opt, param)) < 0) { \ + if(ret == X264_PARAM_BAD_NAME) \ + av_log(avctx, AV_LOG_ERROR, \ + "bad option '%s': '%s'\n", opt, param); \ + else \ + av_log(avctx, AV_LOG_ERROR, \ + "bad value for '%s': '%s'\n", opt, param); \ + return -1; \ + } \ + } while (0); static av_cold int X264_init(AVCodecContext *avctx) { diff --git a/libavcodec/libxvidff.c b/libavcodec/libxvidff.c index effd2db158..d8d44bf42b 100644 --- a/libavcodec/libxvidff.c +++ b/libavcodec/libxvidff.c @@ -30,6 +30,7 @@ #include "avcodec.h" #include "libavutil/cpu.h" #include "libavutil/intreadwrite.h" +#include "libavutil/mathematics.h" #include "libxvid_internal.h" #if !HAVE_MKSTEMP #include <fcntl.h> @@ -749,7 +750,7 @@ static int xvid_ff_2pass_before(struct xvid_context *ref, static int xvid_ff_2pass_after(struct xvid_context *ref, xvid_plg_data_t *param) { char *log = ref->twopassbuffer; - char *frame_types = " ipbs"; + const char *frame_types = " ipbs"; char frame_type; /* Quick bounds check */ @@ -809,13 +810,13 @@ int xvid_ff_2pass(void *ref, int cmd, void *p1, void *p2) { * Xvid codec definition for libavcodec. */ AVCodec ff_libxvid_encoder = { - "libxvid", - AVMEDIA_TYPE_VIDEO, - CODEC_ID_MPEG4, - sizeof(struct xvid_context), - xvid_encode_init, - xvid_encode_frame, - xvid_encode_close, + .name = "libxvid", + .type = AVMEDIA_TYPE_VIDEO, + .id = CODEC_ID_MPEG4, + .priv_data_size = sizeof(struct xvid_context), + .init = xvid_encode_init, + .encode = xvid_encode_frame, + .close = xvid_encode_close, .pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_NONE}, .long_name= NULL_IF_CONFIG_SMALL("libxvidcore MPEG-4 part 2"), }; diff --git a/libavcodec/loco.c b/libavcodec/loco.c index f5807b8f0a..505f566ba6 100644 --- a/libavcodec/loco.c +++ b/libavcodec/loco.c @@ -288,14 +288,13 @@ static av_cold int decode_end(AVCodecContext *avctx){ } AVCodec ff_loco_decoder = { - "loco", - AVMEDIA_TYPE_VIDEO, - CODEC_ID_LOCO, - sizeof(LOCOContext), - decode_init, - NULL, - decode_end, - decode_frame, - CODEC_CAP_DR1, + .name = "loco", + .type = AVMEDIA_TYPE_VIDEO, + .id = CODEC_ID_LOCO, + .priv_data_size = sizeof(LOCOContext), + .init = decode_init, + .close = decode_end, + .decode = decode_frame, + .capabilities = CODEC_CAP_DR1, .long_name = NULL_IF_CONFIG_SMALL("LOCO"), }; diff --git a/libavcodec/lpc.c b/libavcodec/lpc.c index d041cafe85..c27208823d 100644 --- a/libavcodec/lpc.c +++ b/libavcodec/lpc.c @@ -149,10 +149,8 @@ static int estimate_best_order(double *ref, int min_order, int max_order) /** * Calculate LPC coefficients for multiple orders * - * @param use_lpc LPC method for determining coefficients - * 0 = LPC with fixed pre-defined coeffs - * 1 = LPC with coeffs determined by Levinson-Durbin recursion - * 2+ = LPC with coeffs determined by Cholesky factorization using (use_lpc-1) passes. + * @param lpc_type LPC method for determining coefficients, + * see #FFLPCType for details */ int ff_lpc_calc_coefs(LPCContext *s, const int32_t *samples, int blocksize, int min_order, diff --git a/libavcodec/lsp.c b/libavcodec/lsp.c index 98ca490a76..42a32d7971 100644 --- a/libavcodec/lsp.c +++ b/libavcodec/lsp.c @@ -74,9 +74,9 @@ void ff_acelp_lsf2lspd(double *lsp, const float *lsf, int lp_order) } /** - * \brief decodes polynomial coefficients from LSP - * \param f [out] decoded polynomial coefficients (-0x20000000 <= (3.22) <= 0x1fffffff) - * \param lsp LSP coefficients (-0x8000 <= (0.15) <= 0x7fff) + * @brief decodes polynomial coefficients from LSP + * @param f [out] decoded polynomial coefficients (-0x20000000 <= (3.22) <= 0x1fffffff) + * @param lsp LSP coefficients (-0x8000 <= (0.15) <= 0x7fff) */ static void lsp2poly(int* f, const int16_t* lsp, int lp_half_order) { @@ -120,8 +120,8 @@ void ff_acelp_lsp2lpc(int16_t* lp, const int16_t* lsp, int lp_half_order) void ff_amrwb_lsp2lpc(const double *lsp, float *lp, int lp_order) { int lp_half_order = lp_order >> 1; - double buf[lp_half_order + 1]; - double pa[lp_half_order + 1]; + double buf[MAX_LP_HALF_ORDER + 1]; + double pa[MAX_LP_HALF_ORDER + 1]; double *qa = buf + 1; int i,j; @@ -150,11 +150,7 @@ void ff_acelp_lp_decode(int16_t* lp_1st, int16_t* lp_2nd, const int16_t* lsp_2nd /* LSP values for first subframe (3.2.5 of G.729, Equation 24)*/ for(i=0; i<lp_order; i++) -#ifdef G729_BITEXACT - lsp_1st[i] = (lsp_2nd[i] >> 1) + (lsp_prev[i] >> 1); -#else lsp_1st[i] = (lsp_2nd[i] + lsp_prev[i]) >> 1; -#endif ff_acelp_lsp2lpc(lp_1st, lsp_1st, lp_order >> 1); diff --git a/libavcodec/lsp.h b/libavcodec/lsp.h index e3af30d300..46a2d47beb 100644 --- a/libavcodec/lsp.h +++ b/libavcodec/lsp.h @@ -30,12 +30,12 @@ */ /** - * \brief ensure a minimum distance between LSFs - * \param[in,out] lsfq LSF to check and adjust - * \param lsfq_min_distance minimum distance between LSFs - * \param lsfq_min minimum allowed LSF value - * \param lsfq_max maximum allowed LSF value - * \param lp_order LP filter order + * @brief ensure a minimum distance between LSFs + * @param[in,out] lsfq LSF to check and adjust + * @param lsfq_min_distance minimum distance between LSFs + * @param lsfq_min minimum allowed LSF value + * @param lsfq_max maximum allowed LSF value + * @param lp_order LP filter order */ void ff_acelp_reorder_lsf(int16_t* lsfq, int lsfq_min_distance, int lsfq_min, int lsfq_max, int lp_order); @@ -53,12 +53,12 @@ void ff_acelp_reorder_lsf(int16_t* lsfq, int lsfq_min_distance, int lsfq_min, in void ff_set_min_dist_lsf(float *lsf, double min_spacing, int size); /** - * \brief Convert LSF to LSP - * \param[out] lsp LSP coefficients (-0x8000 <= (0.15) < 0x8000) - * \param lsf normalized LSF coefficients (0 <= (2.13) < 0x2000 * PI) - * \param lp_order LP filter order + * @brief Convert LSF to LSP + * @param[out] lsp LSP coefficients (-0x8000 <= (0.15) < 0x8000) + * @param lsf normalized LSF coefficients (0 <= (2.13) < 0x2000 * PI) + * @param lp_order LP filter order * - * \remark It is safe to pass the same array into the lsf and lsp parameters. + * @remark It is safe to pass the same array into the lsf and lsp parameters. */ void ff_acelp_lsf2lsp(int16_t *lsp, const int16_t *lsf, int lp_order); @@ -68,10 +68,10 @@ void ff_acelp_lsf2lsp(int16_t *lsp, const int16_t *lsf, int lp_order); void ff_acelp_lsf2lspd(double *lsp, const float *lsf, int lp_order); /** - * \brief LSP to LP conversion (3.2.6 of G.729) - * \param[out] lp decoded LP coefficients (-0x8000 <= (3.12) < 0x8000) - * \param lsp LSP coefficients (-0x8000 <= (0.15) < 0x8000) - * \param lp_half_order LP filter order, divided by 2 + * @brief LSP to LP conversion (3.2.6 of G.729) + * @param[out] lp decoded LP coefficients (-0x8000 <= (3.12) < 0x8000) + * @param lsp LSP coefficients (-0x8000 <= (0.15) < 0x8000) + * @param lp_half_order LP filter order, divided by 2 */ void ff_acelp_lsp2lpc(int16_t* lp, const int16_t* lsp, int lp_half_order); @@ -81,17 +81,17 @@ void ff_acelp_lsp2lpc(int16_t* lp, const int16_t* lsp, int lp_half_order); void ff_amrwb_lsp2lpc(const double *lsp, float *lp, int lp_order); /** - * \brief Interpolate LSP for the first subframe and convert LSP -> LP for both subframes (3.2.5 and 3.2.6 of G.729) - * \param[out] lp_1st decoded LP coefficients for first subframe (-0x8000 <= (3.12) < 0x8000) - * \param[out] lp_2nd decoded LP coefficients for second subframe (-0x8000 <= (3.12) < 0x8000) - * \param lsp_2nd LSP coefficients of the second subframe (-0x8000 <= (0.15) < 0x8000) - * \param lsp_prev LSP coefficients from the second subframe of the previous frame (-0x8000 <= (0.15) < 0x8000) - * \param lp_order LP filter order + * @brief Interpolate LSP for the first subframe and convert LSP -> LP for both subframes (3.2.5 and 3.2.6 of G.729) + * @param[out] lp_1st decoded LP coefficients for first subframe (-0x8000 <= (3.12) < 0x8000) + * @param[out] lp_2nd decoded LP coefficients for second subframe (-0x8000 <= (3.12) < 0x8000) + * @param lsp_2nd LSP coefficients of the second subframe (-0x8000 <= (0.15) < 0x8000) + * @param lsp_prev LSP coefficients from the second subframe of the previous frame (-0x8000 <= (0.15) < 0x8000) + * @param lp_order LP filter order */ void ff_acelp_lp_decode(int16_t* lp_1st, int16_t* lp_2nd, const int16_t* lsp_2nd, const int16_t* lsp_prev, int lp_order); -#define MAX_LP_HALF_ORDER 8 +#define MAX_LP_HALF_ORDER 10 #define MAX_LP_ORDER (2*MAX_LP_HALF_ORDER) /** diff --git a/libavcodec/lzw.c b/libavcodec/lzw.c index aa886910be..185a05d6ab 100644 --- a/libavcodec/lzw.c +++ b/libavcodec/lzw.c @@ -24,7 +24,7 @@ * @file * @brief LZW decoding routines * @author Fabrice Bellard - * Modified for use in TIFF by Konstantin Shishkov + * @author modified for use in TIFF by Konstantin Shishkov */ #include "avcodec.h" diff --git a/libavcodec/lzw.h b/libavcodec/lzw.h index 76a5b6752e..115ca4edb4 100644 --- a/libavcodec/lzw.h +++ b/libavcodec/lzw.h @@ -24,7 +24,7 @@ * @file * @brief LZW decoding routines * @author Fabrice Bellard - * Modified for use in TIFF by Konstantin Shishkov + * @author modified for use in TIFF by Konstantin Shishkov */ #ifndef AVCODEC_LZW_H diff --git a/libavcodec/lzwenc.c b/libavcodec/lzwenc.c index 23248a6034..0757d02ab4 100644 --- a/libavcodec/lzwenc.c +++ b/libavcodec/lzwenc.c @@ -20,8 +20,8 @@ */ /** - * LZW encoder * @file + * LZW encoder * @author Bartlomiej Wolowiec */ diff --git a/libavcodec/mace.c b/libavcodec/mace.c index 1b8c9d0836..9f8749110e 100644 --- a/libavcodec/mace.c +++ b/libavcodec/mace.c @@ -280,26 +280,22 @@ static int mace_decode_frame(AVCodecContext *avctx, } AVCodec ff_mace3_decoder = { - "mace3", - AVMEDIA_TYPE_AUDIO, - CODEC_ID_MACE3, - sizeof(MACEContext), - mace_decode_init, - NULL, - NULL, - mace_decode_frame, + .name = "mace3", + .type = AVMEDIA_TYPE_AUDIO, + .id = CODEC_ID_MACE3, + .priv_data_size = sizeof(MACEContext), + .init = mace_decode_init, + .decode = mace_decode_frame, .long_name = NULL_IF_CONFIG_SMALL("MACE (Macintosh Audio Compression/Expansion) 3:1"), }; AVCodec ff_mace6_decoder = { - "mace6", - AVMEDIA_TYPE_AUDIO, - CODEC_ID_MACE6, - sizeof(MACEContext), - mace_decode_init, - NULL, - NULL, - mace_decode_frame, + .name = "mace6", + .type = AVMEDIA_TYPE_AUDIO, + .id = CODEC_ID_MACE6, + .priv_data_size = sizeof(MACEContext), + .init = mace_decode_init, + .decode = mace_decode_frame, .long_name = NULL_IF_CONFIG_SMALL("MACE (Macintosh Audio Compression/Expansion) 6:1"), }; diff --git a/libavcodec/mdec.c b/libavcodec/mdec.c index 5f540f05f2..cf606935a6 100644 --- a/libavcodec/mdec.c +++ b/libavcodec/mdec.c @@ -267,15 +267,14 @@ static av_cold int decode_end(AVCodecContext *avctx){ } AVCodec ff_mdec_decoder = { - "mdec", - AVMEDIA_TYPE_VIDEO, - CODEC_ID_MDEC, - sizeof(MDECContext), - decode_init, - NULL, - decode_end, - decode_frame, - CODEC_CAP_DR1 | CODEC_CAP_FRAME_THREADS, + .name = "mdec", + .type = AVMEDIA_TYPE_VIDEO, + .id = CODEC_ID_MDEC, + .priv_data_size = sizeof(MDECContext), + .init = decode_init, + .close = decode_end, + .decode = decode_frame, + .capabilities = CODEC_CAP_DR1 | CODEC_CAP_FRAME_THREADS, .long_name= NULL_IF_CONFIG_SMALL("Sony PlayStation MDEC (Motion DECoder)"), .init_thread_copy= ONLY_IF_THREADS_ENABLED(decode_init_thread_copy) }; diff --git a/libavcodec/mimic.c b/libavcodec/mimic.c index ee625d0dbf..1deed0b87f 100644 --- a/libavcodec/mimic.c +++ b/libavcodec/mimic.c @@ -416,15 +416,14 @@ static av_cold int mimic_decode_end(AVCodecContext *avctx) } AVCodec ff_mimic_decoder = { - "mimic", - AVMEDIA_TYPE_VIDEO, - CODEC_ID_MIMIC, - sizeof(MimicContext), - mimic_decode_init, - NULL, - mimic_decode_end, - mimic_decode_frame, - CODEC_CAP_DR1 | CODEC_CAP_FRAME_THREADS, + .name = "mimic", + .type = AVMEDIA_TYPE_VIDEO, + .id = CODEC_ID_MIMIC, + .priv_data_size = sizeof(MimicContext), + .init = mimic_decode_init, + .close = mimic_decode_end, + .decode = mimic_decode_frame, + .capabilities = CODEC_CAP_DR1 | CODEC_CAP_FRAME_THREADS, .long_name = NULL_IF_CONFIG_SMALL("Mimic"), .update_thread_context = ONLY_IF_THREADS_ENABLED(mimic_decode_update_thread_context) }; diff --git a/libavcodec/mjpegbdec.c b/libavcodec/mjpegbdec.c index 5f863433ef..0ad9cb49c9 100644 --- a/libavcodec/mjpegbdec.c +++ b/libavcodec/mjpegbdec.c @@ -146,16 +146,14 @@ read_header: } AVCodec ff_mjpegb_decoder = { - "mjpegb", - AVMEDIA_TYPE_VIDEO, - CODEC_ID_MJPEGB, - sizeof(MJpegDecodeContext), - ff_mjpeg_decode_init, - NULL, - ff_mjpeg_decode_end, - mjpegb_decode_frame, - CODEC_CAP_DR1, - NULL, + .name = "mjpegb", + .type = AVMEDIA_TYPE_VIDEO, + .id = CODEC_ID_MJPEGB, + .priv_data_size = sizeof(MJpegDecodeContext), + .init = ff_mjpeg_decode_init, + .close = ff_mjpeg_decode_end, + .decode = mjpegb_decode_frame, + .capabilities = CODEC_CAP_DR1, .max_lowres = 3, .long_name = NULL_IF_CONFIG_SMALL("Apple MJPEG-B"), }; diff --git a/libavcodec/mjpegdec.c b/libavcodec/mjpegdec.c index 02c66f504b..4e38f46f7a 100644 --- a/libavcodec/mjpegdec.c +++ b/libavcodec/mjpegdec.c @@ -881,7 +881,7 @@ static int mjpeg_decode_scan(MJpegDecodeContext *s, int nb_components, int Ah, i } } - if (s->restart_interval && show_bits(&s->gb, 8) == 0xFF){/* skip RSTn */ + if (s->restart_interval && show_bits(&s->gb, 8) == 0xFF){ /* skip RSTn */ --s->restart_count; align_get_bits(&s->gb); while(show_bits(&s->gb, 8) == 0xFF) @@ -1258,29 +1258,6 @@ static int mjpeg_decode_com(MJpegDecodeContext *s) return 0; } -#if 0 -static int valid_marker_list[] = -{ - /* 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, a, b, c, d, e, f */ -/* 0 */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -/* 1 */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -/* 2 */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -/* 3 */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -/* 4 */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -/* 5 */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -/* 6 */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -/* 7 */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -/* 8 */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -/* 9 */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -/* a */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -/* b */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -/* c */ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, -/* d */ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, -/* e */ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, -/* f */ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, -} -#endif - /* return the 8 bit start code value and update the search state. Return -1 if no start code found */ static int find_marker(const uint8_t **pbuf_ptr, const uint8_t *buf_end) @@ -1505,29 +1482,26 @@ eoi_parser: av_log(avctx, AV_LOG_WARNING, "Found EOI before any SOF, ignoring\n"); break; } - { - if (s->interlaced) { - s->bottom_field ^= 1; - /* if not bottom field, do not output image yet */ - if (s->bottom_field == !s->interlace_polarity) - goto not_the_end; - } - *picture = *s->picture_ptr; - *data_size = sizeof(AVFrame); - - if(!s->lossless){ - picture->quality= FFMAX3(s->qscale[0], s->qscale[1], s->qscale[2]); - picture->qstride= 0; - picture->qscale_table= s->qscale_table; - memset(picture->qscale_table, picture->quality, (s->width+15)/16); - if(avctx->debug & FF_DEBUG_QP) - av_log(avctx, AV_LOG_DEBUG, "QP: %d\n", picture->quality); - picture->quality*= FF_QP2LAMBDA; - } - - goto the_end; + if (s->interlaced) { + s->bottom_field ^= 1; + /* if not bottom field, do not output image yet */ + if (s->bottom_field == !s->interlace_polarity) + goto not_the_end; } - break; + *picture = *s->picture_ptr; + *data_size = sizeof(AVFrame); + + if(!s->lossless){ + picture->quality= FFMAX3(s->qscale[0], s->qscale[1], s->qscale[2]); + picture->qstride= 0; + picture->qscale_table= s->qscale_table; + memset(picture->qscale_table, picture->quality, (s->width+15)/16); + if(avctx->debug & FF_DEBUG_QP) + av_log(avctx, AV_LOG_DEBUG, "QP: %d\n", picture->quality); + picture->quality*= FF_QP2LAMBDA; + } + + goto the_end; case SOS: if (!s->got_picture) { av_log(avctx, AV_LOG_WARNING, "Can not process SOS before SOF, skipping\n"); @@ -1604,31 +1578,27 @@ av_cold int ff_mjpeg_decode_end(AVCodecContext *avctx) } AVCodec ff_mjpeg_decoder = { - "mjpeg", - AVMEDIA_TYPE_VIDEO, - CODEC_ID_MJPEG, - sizeof(MJpegDecodeContext), - ff_mjpeg_decode_init, - NULL, - ff_mjpeg_decode_end, - ff_mjpeg_decode_frame, - CODEC_CAP_DR1, - NULL, + .name = "mjpeg", + .type = AVMEDIA_TYPE_VIDEO, + .id = CODEC_ID_MJPEG, + .priv_data_size = sizeof(MJpegDecodeContext), + .init = ff_mjpeg_decode_init, + .close = ff_mjpeg_decode_end, + .decode = ff_mjpeg_decode_frame, + .capabilities = CODEC_CAP_DR1, .max_lowres = 3, .long_name = NULL_IF_CONFIG_SMALL("MJPEG (Motion JPEG)"), }; AVCodec ff_thp_decoder = { - "thp", - AVMEDIA_TYPE_VIDEO, - CODEC_ID_THP, - sizeof(MJpegDecodeContext), - ff_mjpeg_decode_init, - NULL, - ff_mjpeg_decode_end, - ff_mjpeg_decode_frame, - CODEC_CAP_DR1, - NULL, + .name = "thp", + .type = AVMEDIA_TYPE_VIDEO, + .id = CODEC_ID_THP, + .priv_data_size = sizeof(MJpegDecodeContext), + .init = ff_mjpeg_decode_init, + .close = ff_mjpeg_decode_end, + .decode = ff_mjpeg_decode_frame, + .capabilities = CODEC_CAP_DR1, .max_lowres = 3, .long_name = NULL_IF_CONFIG_SMALL("Nintendo Gamecube THP video"), }; diff --git a/libavcodec/mjpegenc.c b/libavcodec/mjpegenc.c index b721ab3580..48b84701b6 100644 --- a/libavcodec/mjpegenc.c +++ b/libavcodec/mjpegenc.c @@ -446,13 +446,13 @@ void ff_mjpeg_encode_mb(MpegEncContext *s, DCTELEM block[6][64]) } AVCodec ff_mjpeg_encoder = { - "mjpeg", - AVMEDIA_TYPE_VIDEO, - CODEC_ID_MJPEG, - sizeof(MpegEncContext), - MPV_encode_init, - MPV_encode_picture, - MPV_encode_end, + .name = "mjpeg", + .type = AVMEDIA_TYPE_VIDEO, + .id = CODEC_ID_MJPEG, + .priv_data_size = sizeof(MpegEncContext), + .init = MPV_encode_init, + .encode = MPV_encode_picture, + .close = MPV_encode_end, .pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUVJ420P, PIX_FMT_YUVJ422P, PIX_FMT_NONE}, .long_name= NULL_IF_CONFIG_SMALL("MJPEG (Motion JPEG)"), }; diff --git a/libavcodec/mlib/dsputil_mlib.c b/libavcodec/mlib/dsputil_mlib.c index 1a18a8a223..b5594a9a03 100644 --- a/libavcodec/mlib/dsputil_mlib.c +++ b/libavcodec/mlib/dsputil_mlib.c @@ -421,13 +421,14 @@ static void ff_fdct_mlib(DCTELEM *data) void dsputil_init_mlib(DSPContext* c, AVCodecContext *avctx) { - const int high_bit_depth = avctx->codec_id == CODEC_ID_H264 && avctx->bits_per_raw_sample > 8; + const int high_bit_depth = avctx->bits_per_raw_sample > 8; - c->get_pixels = get_pixels_mlib; c->diff_pixels = diff_pixels_mlib; c->add_pixels_clamped = add_pixels_clamped_mlib; if (!high_bit_depth) { + c->get_pixels = get_pixels_mlib; + c->put_pixels_tab[0][0] = put_pixels16_mlib; c->put_pixels_tab[0][1] = put_pixels16_x2_mlib; c->put_pixels_tab[0][2] = put_pixels16_y2_mlib; diff --git a/libavcodec/mlpdec.c b/libavcodec/mlpdec.c index b13d0795cd..9e59b92342 100644 --- a/libavcodec/mlpdec.c +++ b/libavcodec/mlpdec.c @@ -1179,27 +1179,23 @@ error: } AVCodec ff_mlp_decoder = { - "mlp", - AVMEDIA_TYPE_AUDIO, - CODEC_ID_MLP, - sizeof(MLPDecodeContext), - mlp_decode_init, - NULL, - NULL, - read_access_unit, + .name = "mlp", + .type = AVMEDIA_TYPE_AUDIO, + .id = CODEC_ID_MLP, + .priv_data_size = sizeof(MLPDecodeContext), + .init = mlp_decode_init, + .decode = read_access_unit, .long_name = NULL_IF_CONFIG_SMALL("MLP (Meridian Lossless Packing)"), }; #if CONFIG_TRUEHD_DECODER AVCodec ff_truehd_decoder = { - "truehd", - AVMEDIA_TYPE_AUDIO, - CODEC_ID_TRUEHD, - sizeof(MLPDecodeContext), - mlp_decode_init, - NULL, - NULL, - read_access_unit, + .name = "truehd", + .type = AVMEDIA_TYPE_AUDIO, + .id = CODEC_ID_TRUEHD, + .priv_data_size = sizeof(MLPDecodeContext), + .init = mlp_decode_init, + .decode = read_access_unit, .long_name = NULL_IF_CONFIG_SMALL("TrueHD"), }; #endif /* CONFIG_TRUEHD_DECODER */ diff --git a/libavcodec/mmvideo.c b/libavcodec/mmvideo.c index 707ddc5f7e..0f30e9d35e 100644 --- a/libavcodec/mmvideo.c +++ b/libavcodec/mmvideo.c @@ -215,14 +215,13 @@ static av_cold int mm_decode_end(AVCodecContext *avctx) } AVCodec ff_mmvideo_decoder = { - "mmvideo", - AVMEDIA_TYPE_VIDEO, - CODEC_ID_MMVIDEO, - sizeof(MmContext), - mm_decode_init, - NULL, - mm_decode_end, - mm_decode_frame, - CODEC_CAP_DR1, + .name = "mmvideo", + .type = AVMEDIA_TYPE_VIDEO, + .id = CODEC_ID_MMVIDEO, + .priv_data_size = sizeof(MmContext), + .init = mm_decode_init, + .close = mm_decode_end, + .decode = mm_decode_frame, + .capabilities = CODEC_CAP_DR1, .long_name = NULL_IF_CONFIG_SMALL("American Laser Games MM Video"), }; diff --git a/libavcodec/motion-test.c b/libavcodec/motion-test.c index 994b262bc0..85ea1045b4 100644 --- a/libavcodec/motion-test.c +++ b/libavcodec/motion-test.c @@ -144,7 +144,7 @@ int main(int argc, char **argv) printf("ffmpeg motion test\n"); - ctx = avcodec_alloc_context(); + ctx = avcodec_alloc_context3(NULL); ctx->dsp_mask = AV_CPU_FLAG_FORCE; dsputil_init(&cctx, ctx); for (c = 0; c < flags_size; c++) { diff --git a/libavcodec/motion_est.c b/libavcodec/motion_est.c index c12ebf4c7c..2517da5f9d 100644 --- a/libavcodec/motion_est.c +++ b/libavcodec/motion_est.c @@ -248,7 +248,7 @@ static int cmp_internal(MpegEncContext *s, const int x, const int y, const int s } } -/*! \brief compares a block (either a full macroblock or a partition thereof) +/** @brief compares a block (either a full macroblock or a partition thereof) against a proposed motion-compensated prediction of that block */ static av_always_inline int cmp(MpegEncContext *s, const int x, const int y, const int subx, const int suby, @@ -374,30 +374,6 @@ int ff_init_me(MpegEncContext *s){ return 0; } -#if 0 -static int pix_dev(uint8_t * pix, int line_size, int mean) -{ - int s, i, j; - - s = 0; - for (i = 0; i < 16; i++) { - for (j = 0; j < 16; j += 8) { - s += FFABS(pix[0]-mean); - s += FFABS(pix[1]-mean); - s += FFABS(pix[2]-mean); - s += FFABS(pix[3]-mean); - s += FFABS(pix[4]-mean); - s += FFABS(pix[5]-mean); - s += FFABS(pix[6]-mean); - s += FFABS(pix[7]-mean); - pix += 8; - } - pix += line_size - 16; - } - return s; -} -#endif - static inline void no_motion_search(MpegEncContext * s, int *mx_ptr, int *my_ptr) { @@ -533,16 +509,16 @@ static inline void set_p_mv_tables(MpegEncContext * s, int mx, int my, int mv4) if(mv4){ int mot_xy= s->block_index[0]; - s->current_picture.motion_val[0][mot_xy ][0]= mx; - s->current_picture.motion_val[0][mot_xy ][1]= my; - s->current_picture.motion_val[0][mot_xy+1][0]= mx; - s->current_picture.motion_val[0][mot_xy+1][1]= my; + s->current_picture.f.motion_val[0][mot_xy ][0] = mx; + s->current_picture.f.motion_val[0][mot_xy ][1] = my; + s->current_picture.f.motion_val[0][mot_xy + 1][0] = mx; + s->current_picture.f.motion_val[0][mot_xy + 1][1] = my; mot_xy += s->b8_stride; - s->current_picture.motion_val[0][mot_xy ][0]= mx; - s->current_picture.motion_val[0][mot_xy ][1]= my; - s->current_picture.motion_val[0][mot_xy+1][0]= mx; - s->current_picture.motion_val[0][mot_xy+1][1]= my; + s->current_picture.f.motion_val[0][mot_xy ][0] = mx; + s->current_picture.f.motion_val[0][mot_xy ][1] = my; + s->current_picture.f.motion_val[0][mot_xy + 1][0] = mx; + s->current_picture.f.motion_val[0][mot_xy + 1][1] = my; } } @@ -615,8 +591,8 @@ static inline int h263_mv4_search(MpegEncContext *s, int mx, int my, int shift) const int mot_stride = s->b8_stride; const int mot_xy = s->block_index[block]; - P_LEFT[0] = s->current_picture.motion_val[0][mot_xy - 1][0]; - P_LEFT[1] = s->current_picture.motion_val[0][mot_xy - 1][1]; + P_LEFT[0] = s->current_picture.f.motion_val[0][mot_xy - 1][0]; + P_LEFT[1] = s->current_picture.f.motion_val[0][mot_xy - 1][1]; if(P_LEFT[0] > (c->xmax<<shift)) P_LEFT[0] = (c->xmax<<shift); @@ -625,10 +601,10 @@ static inline int h263_mv4_search(MpegEncContext *s, int mx, int my, int shift) c->pred_x= pred_x4= P_LEFT[0]; c->pred_y= pred_y4= P_LEFT[1]; } else { - P_TOP[0] = s->current_picture.motion_val[0][mot_xy - mot_stride ][0]; - P_TOP[1] = s->current_picture.motion_val[0][mot_xy - mot_stride ][1]; - P_TOPRIGHT[0] = s->current_picture.motion_val[0][mot_xy - mot_stride + off[block]][0]; - P_TOPRIGHT[1] = s->current_picture.motion_val[0][mot_xy - mot_stride + off[block]][1]; + P_TOP[0] = s->current_picture.f.motion_val[0][mot_xy - mot_stride ][0]; + P_TOP[1] = s->current_picture.f.motion_val[0][mot_xy - mot_stride ][1]; + P_TOPRIGHT[0] = s->current_picture.f.motion_val[0][mot_xy - mot_stride + off[block]][0]; + P_TOPRIGHT[1] = s->current_picture.f.motion_val[0][mot_xy - mot_stride + off[block]][1]; if(P_TOP[1] > (c->ymax<<shift)) P_TOP[1] = (c->ymax<<shift); if(P_TOPRIGHT[0] < (c->xmin<<shift)) P_TOPRIGHT[0]= (c->xmin<<shift); if(P_TOPRIGHT[0] > (c->xmax<<shift)) P_TOPRIGHT[0]= (c->xmax<<shift); @@ -680,8 +656,8 @@ static inline int h263_mv4_search(MpegEncContext *s, int mx, int my, int shift) my4_sum+= my4; } - s->current_picture.motion_val[0][ s->block_index[block] ][0]= mx4; - s->current_picture.motion_val[0][ s->block_index[block] ][1]= my4; + s->current_picture.f.motion_val[0][s->block_index[block]][0] = mx4; + s->current_picture.f.motion_val[0][s->block_index[block]][1] = my4; if(mx4 != mx || my4 != my) same=0; } @@ -690,7 +666,7 @@ static inline int h263_mv4_search(MpegEncContext *s, int mx, int my, int shift) return INT_MAX; if(s->dsp.me_sub_cmp[0] != s->dsp.mb_cmp[0]){ - dmin_sum += s->dsp.mb_cmp[0](s, s->new_picture.data[0] + s->mb_x*16 + s->mb_y*16*stride, c->scratchpad, stride, 16); + dmin_sum += s->dsp.mb_cmp[0](s, s->new_picture.f.data[0] + s->mb_x*16 + s->mb_y*16*stride, c->scratchpad, stride, 16); } if(c->avctx->mb_cmp&FF_CMP_CHROMA){ @@ -705,15 +681,15 @@ static inline int h263_mv4_search(MpegEncContext *s, int mx, int my, int shift) offset= (s->mb_x*8 + (mx>>1)) + (s->mb_y*8 + (my>>1))*s->uvlinesize; if(s->no_rounding){ - s->dsp.put_no_rnd_pixels_tab[1][dxy](c->scratchpad , s->last_picture.data[1] + offset, s->uvlinesize, 8); - s->dsp.put_no_rnd_pixels_tab[1][dxy](c->scratchpad+8 , s->last_picture.data[2] + offset, s->uvlinesize, 8); + s->dsp.put_no_rnd_pixels_tab[1][dxy](c->scratchpad , s->last_picture.f.data[1] + offset, s->uvlinesize, 8); + s->dsp.put_no_rnd_pixels_tab[1][dxy](c->scratchpad + 8, s->last_picture.f.data[2] + offset, s->uvlinesize, 8); }else{ - s->dsp.put_pixels_tab [1][dxy](c->scratchpad , s->last_picture.data[1] + offset, s->uvlinesize, 8); - s->dsp.put_pixels_tab [1][dxy](c->scratchpad+8 , s->last_picture.data[2] + offset, s->uvlinesize, 8); + s->dsp.put_pixels_tab [1][dxy](c->scratchpad , s->last_picture.f.data[1] + offset, s->uvlinesize, 8); + s->dsp.put_pixels_tab [1][dxy](c->scratchpad + 8, s->last_picture.f.data[2] + offset, s->uvlinesize, 8); } - dmin_sum += s->dsp.mb_cmp[1](s, s->new_picture.data[1] + s->mb_x*8 + s->mb_y*8*s->uvlinesize, c->scratchpad , s->uvlinesize, 8); - dmin_sum += s->dsp.mb_cmp[1](s, s->new_picture.data[2] + s->mb_x*8 + s->mb_y*8*s->uvlinesize, c->scratchpad+8, s->uvlinesize, 8); + dmin_sum += s->dsp.mb_cmp[1](s, s->new_picture.f.data[1] + s->mb_x*8 + s->mb_y*8*s->uvlinesize, c->scratchpad , s->uvlinesize, 8); + dmin_sum += s->dsp.mb_cmp[1](s, s->new_picture.f.data[2] + s->mb_x*8 + s->mb_y*8*s->uvlinesize, c->scratchpad+8, s->uvlinesize, 8); } c->pred_x= mx; @@ -879,7 +855,7 @@ static inline int check_input_motion(MpegEncContext * s, int mb_x, int mb_y, int Picture *p= s->current_picture_ptr; int mb_xy= mb_x + mb_y*s->mb_stride; int xy= 2*mb_x + 2*mb_y*s->b8_stride; - int mb_type= s->current_picture.mb_type[mb_xy]; + int mb_type= s->current_picture.f.mb_type[mb_xy]; int flags= c->flags; int shift= (flags&FLAG_QPEL) + 1; int mask= (1<<shift)-1; @@ -896,8 +872,8 @@ static inline int check_input_motion(MpegEncContext * s, int mb_x, int mb_y, int for(i=0; i<4; i++){ int xy= s->block_index[i]; - clip_input_mv(s, p->motion_val[0][xy], !!IS_INTERLACED(mb_type)); - clip_input_mv(s, p->motion_val[1][xy], !!IS_INTERLACED(mb_type)); + clip_input_mv(s, p->f.motion_val[0][xy], !!IS_INTERLACED(mb_type)); + clip_input_mv(s, p->f.motion_val[1][xy], !!IS_INTERLACED(mb_type)); } if(IS_INTERLACED(mb_type)){ @@ -912,8 +888,8 @@ static inline int check_input_motion(MpegEncContext * s, int mb_x, int mb_y, int } if(USES_LIST(mb_type, 0)){ - int field_select0= p->ref_index[0][4*mb_xy ]; - int field_select1= p->ref_index[0][4*mb_xy+2]; + int field_select0= p->f.ref_index[0][4*mb_xy ]; + int field_select1= p->f.ref_index[0][4*mb_xy+2]; assert(field_select0==0 ||field_select0==1); assert(field_select1==0 ||field_select1==1); init_interlaced_ref(s, 0); @@ -921,46 +897,46 @@ static inline int check_input_motion(MpegEncContext * s, int mb_x, int mb_y, int if(p_type){ s->p_field_select_table[0][mb_xy]= field_select0; s->p_field_select_table[1][mb_xy]= field_select1; - *(uint32_t*)s->p_field_mv_table[0][field_select0][mb_xy]= *(uint32_t*)p->motion_val[0][xy ]; - *(uint32_t*)s->p_field_mv_table[1][field_select1][mb_xy]= *(uint32_t*)p->motion_val[0][xy2]; + *(uint32_t*)s->p_field_mv_table[0][field_select0][mb_xy] = *(uint32_t*)p->f.motion_val[0][xy ]; + *(uint32_t*)s->p_field_mv_table[1][field_select1][mb_xy] = *(uint32_t*)p->f.motion_val[0][xy2]; s->mb_type[mb_xy]=CANDIDATE_MB_TYPE_INTER_I; }else{ s->b_field_select_table[0][0][mb_xy]= field_select0; s->b_field_select_table[0][1][mb_xy]= field_select1; - *(uint32_t*)s->b_field_mv_table[0][0][field_select0][mb_xy]= *(uint32_t*)p->motion_val[0][xy ]; - *(uint32_t*)s->b_field_mv_table[0][1][field_select1][mb_xy]= *(uint32_t*)p->motion_val[0][xy2]; + *(uint32_t*)s->b_field_mv_table[0][0][field_select0][mb_xy] = *(uint32_t*)p->f.motion_val[0][xy ]; + *(uint32_t*)s->b_field_mv_table[0][1][field_select1][mb_xy] = *(uint32_t*)p->f.motion_val[0][xy2]; s->mb_type[mb_xy]= CANDIDATE_MB_TYPE_FORWARD_I; } - x= p->motion_val[0][xy ][0]; - y= p->motion_val[0][xy ][1]; + x = p->f.motion_val[0][xy ][0]; + y = p->f.motion_val[0][xy ][1]; d = cmp(s, x>>shift, y>>shift, x&mask, y&mask, 0, 8, field_select0, 0, cmpf, chroma_cmpf, flags); - x= p->motion_val[0][xy2][0]; - y= p->motion_val[0][xy2][1]; + x = p->f.motion_val[0][xy2][0]; + y = p->f.motion_val[0][xy2][1]; d+= cmp(s, x>>shift, y>>shift, x&mask, y&mask, 0, 8, field_select1, 1, cmpf, chroma_cmpf, flags); } if(USES_LIST(mb_type, 1)){ - int field_select0= p->ref_index[1][4*mb_xy ]; - int field_select1= p->ref_index[1][4*mb_xy+2]; + int field_select0 = p->f.ref_index[1][4 * mb_xy ]; + int field_select1 = p->f.ref_index[1][4 * mb_xy + 2]; assert(field_select0==0 ||field_select0==1); assert(field_select1==0 ||field_select1==1); init_interlaced_ref(s, 2); s->b_field_select_table[1][0][mb_xy]= field_select0; s->b_field_select_table[1][1][mb_xy]= field_select1; - *(uint32_t*)s->b_field_mv_table[1][0][field_select0][mb_xy]= *(uint32_t*)p->motion_val[1][xy ]; - *(uint32_t*)s->b_field_mv_table[1][1][field_select1][mb_xy]= *(uint32_t*)p->motion_val[1][xy2]; + *(uint32_t*)s->b_field_mv_table[1][0][field_select0][mb_xy] = *(uint32_t*)p->f.motion_val[1][xy ]; + *(uint32_t*)s->b_field_mv_table[1][1][field_select1][mb_xy] = *(uint32_t*)p->f.motion_val[1][xy2]; if(USES_LIST(mb_type, 0)){ s->mb_type[mb_xy]= CANDIDATE_MB_TYPE_BIDIR_I; }else{ s->mb_type[mb_xy]= CANDIDATE_MB_TYPE_BACKWARD_I; } - x= p->motion_val[1][xy ][0]; - y= p->motion_val[1][xy ][1]; + x = p->f.motion_val[1][xy ][0]; + y = p->f.motion_val[1][xy ][1]; d = cmp(s, x>>shift, y>>shift, x&mask, y&mask, 0, 8, field_select0+2, 0, cmpf, chroma_cmpf, flags); - x= p->motion_val[1][xy2][0]; - y= p->motion_val[1][xy2][1]; + x = p->f.motion_val[1][xy2][0]; + y = p->f.motion_val[1][xy2][1]; d+= cmp(s, x>>shift, y>>shift, x&mask, y&mask, 0, 8, field_select1+2, 1, cmpf, chroma_cmpf, flags); //FIXME bidir scores } @@ -976,33 +952,33 @@ static inline int check_input_motion(MpegEncContext * s, int mb_x, int mb_y, int init_mv4_ref(c); for(i=0; i<4; i++){ xy= s->block_index[i]; - x= p->motion_val[0][xy][0]; - y= p->motion_val[0][xy][1]; + x= p->f.motion_val[0][xy][0]; + y= p->f.motion_val[0][xy][1]; d+= cmp(s, x>>shift, y>>shift, x&mask, y&mask, 1, 8, i, i, cmpf, chroma_cmpf, flags); } s->mb_type[mb_xy]=CANDIDATE_MB_TYPE_INTER4V; }else{ if(USES_LIST(mb_type, 0)){ if(p_type){ - *(uint32_t*)s->p_mv_table[mb_xy]= *(uint32_t*)p->motion_val[0][xy]; + *(uint32_t*)s->p_mv_table[mb_xy] = *(uint32_t*)p->f.motion_val[0][xy]; s->mb_type[mb_xy]=CANDIDATE_MB_TYPE_INTER; }else if(USES_LIST(mb_type, 1)){ - *(uint32_t*)s->b_bidir_forw_mv_table[mb_xy]= *(uint32_t*)p->motion_val[0][xy]; - *(uint32_t*)s->b_bidir_back_mv_table[mb_xy]= *(uint32_t*)p->motion_val[1][xy]; + *(uint32_t*)s->b_bidir_forw_mv_table[mb_xy] = *(uint32_t*)p->f.motion_val[0][xy]; + *(uint32_t*)s->b_bidir_back_mv_table[mb_xy] = *(uint32_t*)p->f.motion_val[1][xy]; s->mb_type[mb_xy]=CANDIDATE_MB_TYPE_BIDIR; }else{ - *(uint32_t*)s->b_forw_mv_table[mb_xy]= *(uint32_t*)p->motion_val[0][xy]; + *(uint32_t*)s->b_forw_mv_table[mb_xy] = *(uint32_t*)p->f.motion_val[0][xy]; s->mb_type[mb_xy]=CANDIDATE_MB_TYPE_FORWARD; } - x= p->motion_val[0][xy][0]; - y= p->motion_val[0][xy][1]; + x = p->f.motion_val[0][xy][0]; + y = p->f.motion_val[0][xy][1]; d = cmp(s, x>>shift, y>>shift, x&mask, y&mask, 0, 16, 0, 0, cmpf, chroma_cmpf, flags); }else if(USES_LIST(mb_type, 1)){ - *(uint32_t*)s->b_back_mv_table[mb_xy]= *(uint32_t*)p->motion_val[1][xy]; + *(uint32_t*)s->b_back_mv_table[mb_xy] = *(uint32_t*)p->f.motion_val[1][xy]; s->mb_type[mb_xy]=CANDIDATE_MB_TYPE_BACKWARD; - x= p->motion_val[1][xy][0]; - y= p->motion_val[1][xy][1]; + x = p->f.motion_val[1][xy][0]; + y = p->f.motion_val[1][xy][1]; d = cmp(s, x>>shift, y>>shift, x&mask, y&mask, 0, 16, 2, 0, cmpf, chroma_cmpf, flags); }else s->mb_type[mb_xy]=CANDIDATE_MB_TYPE_INTRA; @@ -1023,7 +999,7 @@ void ff_estimate_p_frame_motion(MpegEncContext * s, int mb_type=0; Picture * const pic= &s->current_picture; - init_ref(c, s->new_picture.data, s->last_picture.data, NULL, 16*mb_x, 16*mb_y, 0); + init_ref(c, s->new_picture.f.data, s->last_picture.f.data, NULL, 16*mb_x, 16*mb_y, 0); assert(s->quarter_sample==0 || s->quarter_sample==1); assert(s->linesize == c->stride); @@ -1075,16 +1051,16 @@ void ff_estimate_p_frame_motion(MpegEncContext * s, const int mot_stride = s->b8_stride; const int mot_xy = s->block_index[0]; - P_LEFT[0] = s->current_picture.motion_val[0][mot_xy - 1][0]; - P_LEFT[1] = s->current_picture.motion_val[0][mot_xy - 1][1]; + P_LEFT[0] = s->current_picture.f.motion_val[0][mot_xy - 1][0]; + P_LEFT[1] = s->current_picture.f.motion_val[0][mot_xy - 1][1]; if(P_LEFT[0] > (c->xmax<<shift)) P_LEFT[0] = (c->xmax<<shift); if(!s->first_slice_line) { - P_TOP[0] = s->current_picture.motion_val[0][mot_xy - mot_stride ][0]; - P_TOP[1] = s->current_picture.motion_val[0][mot_xy - mot_stride ][1]; - P_TOPRIGHT[0] = s->current_picture.motion_val[0][mot_xy - mot_stride + 2][0]; - P_TOPRIGHT[1] = s->current_picture.motion_val[0][mot_xy - mot_stride + 2][1]; + P_TOP[0] = s->current_picture.f.motion_val[0][mot_xy - mot_stride ][0]; + P_TOP[1] = s->current_picture.f.motion_val[0][mot_xy - mot_stride ][1]; + P_TOPRIGHT[0] = s->current_picture.f.motion_val[0][mot_xy - mot_stride + 2][0]; + P_TOPRIGHT[1] = s->current_picture.f.motion_val[0][mot_xy - mot_stride + 2][1]; if(P_TOP[1] > (c->ymax<<shift)) P_TOP[1] = (c->ymax<<shift); if(P_TOPRIGHT[0] < (c->xmin<<shift)) P_TOPRIGHT[0]= (c->xmin<<shift); if(P_TOPRIGHT[1] > (c->ymax<<shift)) P_TOPRIGHT[1]= (c->ymax<<shift); @@ -1214,37 +1190,13 @@ void ff_estimate_p_frame_motion(MpegEncContext * s, intra_score= s->dsp.mb_cmp[0](s, c->scratchpad, pix, s->linesize, 16); } -#if 0 //FIXME - /* get chroma score */ - if(c->avctx->mb_cmp&FF_CMP_CHROMA){ - for(i=1; i<3; i++){ - uint8_t *dest_c; - int mean; - - if(s->out_format == FMT_H263){ - mean= (s->dc_val[i][mb_x + mb_y*s->b8_stride] + 4)>>3; //FIXME not exact but simple ;) - }else{ - mean= (s->last_dc[i] + 4)>>3; - } - dest_c = s->new_picture.data[i] + (mb_y * 8 * (s->uvlinesize)) + mb_x * 8; - - mean*= 0x01010101; - for(i=0; i<8; i++){ - *(uint32_t*)(&c->scratchpad[i*s->uvlinesize+ 0]) = mean; - *(uint32_t*)(&c->scratchpad[i*s->uvlinesize+ 4]) = mean; - } - - intra_score+= s->dsp.mb_cmp[1](s, c->scratchpad, dest_c, s->uvlinesize); - } - } -#endif intra_score += c->mb_penalty_factor*16; if(intra_score < dmin){ mb_type= CANDIDATE_MB_TYPE_INTRA; - s->current_picture.mb_type[mb_y*s->mb_stride + mb_x]= CANDIDATE_MB_TYPE_INTRA; //FIXME cleanup + s->current_picture.f.mb_type[mb_y*s->mb_stride + mb_x] = CANDIDATE_MB_TYPE_INTRA; //FIXME cleanup }else - s->current_picture.mb_type[mb_y*s->mb_stride + mb_x]= 0; + s->current_picture.f.mb_type[mb_y*s->mb_stride + mb_x] = 0; { int p_score= FFMIN(vard, varc-500+(s->lambda2>>FF_LAMBDA_SHIFT)*100); @@ -1264,7 +1216,7 @@ int ff_pre_estimate_p_frame_motion(MpegEncContext * s, int P[10][2]; const int shift= 1+s->quarter_sample; const int xy= mb_x + mb_y*s->mb_stride; - init_ref(c, s->new_picture.data, s->last_picture.data, NULL, 16*mb_x, 16*mb_y, 0); + init_ref(c, s->new_picture.f.data, s->last_picture.f.data, NULL, 16*mb_x, 16*mb_y, 0); assert(s->quarter_sample==0 || s->quarter_sample==1); @@ -1615,7 +1567,7 @@ static inline int direct_search(MpegEncContext * s, int mb_x, int mb_y) ymin= xmin=(-32)>>shift; ymax= xmax= 31>>shift; - if(IS_8X8(s->next_picture.mb_type[mot_xy])){ + if (IS_8X8(s->next_picture.f.mb_type[mot_xy])) { s->mv_type= MV_TYPE_8X8; }else{ s->mv_type= MV_TYPE_16X16; @@ -1625,8 +1577,8 @@ static inline int direct_search(MpegEncContext * s, int mb_x, int mb_y) int index= s->block_index[i]; int min, max; - c->co_located_mv[i][0]= s->next_picture.motion_val[0][index][0]; - c->co_located_mv[i][1]= s->next_picture.motion_val[0][index][1]; + c->co_located_mv[i][0] = s->next_picture.f.motion_val[0][index][0]; + c->co_located_mv[i][1] = s->next_picture.f.motion_val[0][index][1]; c->direct_basis_mv[i][0]= c->co_located_mv[i][0]*time_pb/time_pp + ((i& 1)<<(shift+3)); c->direct_basis_mv[i][1]= c->co_located_mv[i][1]*time_pb/time_pp + ((i>>1)<<(shift+3)); // c->direct_basis_mv[1][i][0]= c->co_located_mv[i][0]*(time_pb - time_pp)/time_pp + ((i &1)<<(shift+3); @@ -1708,13 +1660,14 @@ void ff_estimate_b_frame_motion(MpegEncContext * s, int fmin, bmin, dmin, fbmin, bimin, fimin; int type=0; const int xy = mb_y*s->mb_stride + mb_x; - init_ref(c, s->new_picture.data, s->last_picture.data, s->next_picture.data, 16*mb_x, 16*mb_y, 2); + init_ref(c, s->new_picture.f.data, s->last_picture.f.data, + s->next_picture.f.data, 16 * mb_x, 16 * mb_y, 2); get_limits(s, 16*mb_x, 16*mb_y); c->skip=0; - if(s->codec_id == CODEC_ID_MPEG4 && s->next_picture.mbskip_table[xy]){ + if (s->codec_id == CODEC_ID_MPEG4 && s->next_picture.f.mbskip_table[xy]) { int score= direct_search(s, mb_x, mb_y); //FIXME just check 0,0 score= ((unsigned)(score*score + 128*256))>>16; @@ -1849,10 +1802,6 @@ void ff_estimate_b_frame_motion(MpegEncContext * s, if(dmin>256*256*16) type&= ~CANDIDATE_MB_TYPE_DIRECT; //do not try direct mode if it is invalid for this MB if(s->codec_id == CODEC_ID_MPEG4 && type&CANDIDATE_MB_TYPE_DIRECT && s->flags&CODEC_FLAG_MV0 && *(uint32_t*)s->b_direct_mv_table[xy]) type |= CANDIDATE_MB_TYPE_DIRECT0; -#if 0 - if(s->out_format == FMT_MPEG1) - type |= CANDIDATE_MB_TYPE_INTRA; -#endif } s->mb_type[mb_y*s->mb_stride + mb_x]= type; @@ -1947,14 +1896,14 @@ void ff_fix_long_p_mvs(MpegEncContext * s) int block; for(block=0; block<4; block++){ int off= (block& 1) + (block>>1)*wrap; - int mx= s->current_picture.motion_val[0][ xy + off ][0]; - int my= s->current_picture.motion_val[0][ xy + off ][1]; + int mx = s->current_picture.f.motion_val[0][ xy + off ][0]; + int my = s->current_picture.f.motion_val[0][ xy + off ][1]; if( mx >=range || mx <-range || my >=range || my <-range){ s->mb_type[i] &= ~CANDIDATE_MB_TYPE_INTER4V; s->mb_type[i] |= CANDIDATE_MB_TYPE_INTRA; - s->current_picture.mb_type[i]= CANDIDATE_MB_TYPE_INTRA; + s->current_picture.f.mb_type[i] = CANDIDATE_MB_TYPE_INTRA; } } } diff --git a/libavcodec/motion_est_template.c b/libavcodec/motion_est_template.c index 461e85932b..72150b4092 100644 --- a/libavcodec/motion_est_template.c +++ b/libavcodec/motion_est_template.c @@ -44,75 +44,6 @@ COPY3_IF_LT(dmin, d, bx, hx, by, hy)\ } -#if 0 -static int hpel_motion_search)(MpegEncContext * s, - int *mx_ptr, int *my_ptr, int dmin, - uint8_t *ref_data[3], - int size) -{ - const int xx = 16 * s->mb_x + 8*(n&1); - const int yy = 16 * s->mb_y + 8*(n>>1); - const int mx = *mx_ptr; - const int my = *my_ptr; - const int penalty_factor= c->sub_penalty_factor; - - LOAD_COMMON - - // INIT; - //FIXME factorize - me_cmp_func cmp, chroma_cmp, cmp_sub, chroma_cmp_sub; - - if(s->no_rounding /*FIXME b_type*/){ - hpel_put= &s->dsp.put_no_rnd_pixels_tab[size]; - chroma_hpel_put= &s->dsp.put_no_rnd_pixels_tab[size+1]; - }else{ - hpel_put=& s->dsp.put_pixels_tab[size]; - chroma_hpel_put= &s->dsp.put_pixels_tab[size+1]; - } - cmpf= s->dsp.me_cmp[size]; - chroma_cmpf= s->dsp.me_cmp[size+1]; - cmp_sub= s->dsp.me_sub_cmp[size]; - chroma_cmp_sub= s->dsp.me_sub_cmp[size+1]; - - if(c->skip){ //FIXME somehow move up (benchmark) - *mx_ptr = 0; - *my_ptr = 0; - return dmin; - } - - if(c->avctx->me_cmp != c->avctx->me_sub_cmp){ - CMP_HPEL(dmin, 0, 0, mx, my, size); - if(mx || my) - dmin += (mv_penalty[2*mx - pred_x] + mv_penalty[2*my - pred_y])*penalty_factor; - } - - if (mx > xmin && mx < xmax && - my > ymin && my < ymax) { - int bx=2*mx, by=2*my; - int d= dmin; - - CHECK_HALF_MV(1, 1, mx-1, my-1) - CHECK_HALF_MV(0, 1, mx , my-1) - CHECK_HALF_MV(1, 1, mx , my-1) - CHECK_HALF_MV(1, 0, mx-1, my ) - CHECK_HALF_MV(1, 0, mx , my ) - CHECK_HALF_MV(1, 1, mx-1, my ) - CHECK_HALF_MV(0, 1, mx , my ) - CHECK_HALF_MV(1, 1, mx , my ) - - assert(bx >= xmin*2 || bx <= xmax*2 || by >= ymin*2 || by <= ymax*2); - - *mx_ptr = bx; - *my_ptr = by; - }else{ - *mx_ptr =2*mx; - *my_ptr =2*my; - } - - return dmin; -} - -#else static int hpel_motion_search(MpegEncContext * s, int *mx_ptr, int *my_ptr, int dmin, int src_index, int ref_index, @@ -220,7 +151,6 @@ static int hpel_motion_search(MpegEncContext * s, return dmin; } -#endif static int no_sub_motion_search(MpegEncContext * s, int *mx_ptr, int *my_ptr, int dmin, @@ -323,7 +253,6 @@ static int qpel_motion_search(MpegEncContext * s, int best_pos[8][2]; memset(best, 64, sizeof(int)*8); -#if 1 if(s->me.dia_size>=2){ const int tl= score_map[(index-(1<<ME_MAP_SHIFT)-1)&(ME_MAP_SIZE-1)]; const int bl= score_map[(index+(1<<ME_MAP_SHIFT)-1)&(ME_MAP_SIZE-1)]; @@ -412,76 +341,6 @@ static int qpel_motion_search(MpegEncContext * s, CHECK_QUARTER_MV(nx&3, ny&3, nx>>2, ny>>2) } -#if 0 - const int tl= score_map[(index-(1<<ME_MAP_SHIFT)-1)&(ME_MAP_SIZE-1)]; - const int bl= score_map[(index+(1<<ME_MAP_SHIFT)-1)&(ME_MAP_SIZE-1)]; - const int tr= score_map[(index-(1<<ME_MAP_SHIFT)+1)&(ME_MAP_SIZE-1)]; - const int br= score_map[(index+(1<<ME_MAP_SHIFT)+1)&(ME_MAP_SIZE-1)]; -// if(l < r && l < t && l < b && l < tl && l < bl && l < tr && l < br && bl < tl){ - if(tl<br){ - -// nx= FFMAX(4*mx - bx, bx - 4*mx); -// ny= FFMAX(4*my - by, by - 4*my); - - static int stats[7][7], count; - count++; - stats[4*mx - bx + 3][4*my - by + 3]++; - if(256*256*256*64 % count ==0){ - for(i=0; i<49; i++){ - if((i%7)==0) printf("\n"); - printf("%6d ", stats[0][i]); - } - printf("\n"); - } - } -#endif -#else - - CHECK_QUARTER_MV(2, 2, mx-1, my-1) - CHECK_QUARTER_MV(0, 2, mx , my-1) - CHECK_QUARTER_MV(2, 2, mx , my-1) - CHECK_QUARTER_MV(2, 0, mx , my ) - CHECK_QUARTER_MV(2, 2, mx , my ) - CHECK_QUARTER_MV(0, 2, mx , my ) - CHECK_QUARTER_MV(2, 2, mx-1, my ) - CHECK_QUARTER_MV(2, 0, mx-1, my ) - - nx= bx; - ny= by; - - for(i=0; i<8; i++){ - int ox[8]= {0, 1, 1, 1, 0,-1,-1,-1}; - int oy[8]= {1, 1, 0,-1,-1,-1, 0, 1}; - CHECK_QUARTER_MV((nx + ox[i])&3, (ny + oy[i])&3, (nx + ox[i])>>2, (ny + oy[i])>>2) - } -#endif -#if 0 - //outer ring - CHECK_QUARTER_MV(1, 3, mx-1, my-1) - CHECK_QUARTER_MV(1, 2, mx-1, my-1) - CHECK_QUARTER_MV(1, 1, mx-1, my-1) - CHECK_QUARTER_MV(2, 1, mx-1, my-1) - CHECK_QUARTER_MV(3, 1, mx-1, my-1) - CHECK_QUARTER_MV(0, 1, mx , my-1) - CHECK_QUARTER_MV(1, 1, mx , my-1) - CHECK_QUARTER_MV(2, 1, mx , my-1) - CHECK_QUARTER_MV(3, 1, mx , my-1) - CHECK_QUARTER_MV(3, 2, mx , my-1) - CHECK_QUARTER_MV(3, 3, mx , my-1) - CHECK_QUARTER_MV(3, 0, mx , my ) - CHECK_QUARTER_MV(3, 1, mx , my ) - CHECK_QUARTER_MV(3, 2, mx , my ) - CHECK_QUARTER_MV(3, 3, mx , my ) - CHECK_QUARTER_MV(2, 3, mx , my ) - CHECK_QUARTER_MV(1, 3, mx , my ) - CHECK_QUARTER_MV(0, 3, mx , my ) - CHECK_QUARTER_MV(3, 3, mx-1, my ) - CHECK_QUARTER_MV(2, 3, mx-1, my ) - CHECK_QUARTER_MV(1, 3, mx-1, my ) - CHECK_QUARTER_MV(1, 2, mx-1, my ) - CHECK_QUARTER_MV(1, 1, mx-1, my ) - CHECK_QUARTER_MV(1, 0, mx-1, my ) -#endif assert(bx >= xmin*4 && bx <= xmax*4 && by >= ymin*4 && by <= ymax*4); *mx_ptr = bx; @@ -992,8 +851,8 @@ static av_always_inline int diamond_search(MpegEncContext * s, int *best, int dm return var_diamond_search(s, best, dmin, src_index, ref_index, penalty_factor, size, h, flags); } -/*! - \param P[10][2] a list of candidate mvs to check before starting the +/** + @param P a list of candidate mvs to check before starting the iterative search. If one of the candidates is close to the optimal mv, then it takes fewer iterations. And it increases the chance that we find the optimal mv. @@ -1003,12 +862,12 @@ static av_always_inline int epzs_motion_search_internal(MpegEncContext * s, int int ref_mv_scale, int flags, int size, int h) { MotionEstContext * const c= &s->me; - int best[2]={0, 0}; /*!< x and y coordinates of the best motion vector. + int best[2]={0, 0}; /**< x and y coordinates of the best motion vector. i.e. the difference between the position of the block currently being encoded and the position of the block chosen to predict it from. */ int d; ///< the score (cmp + penalty) of any given mv - int dmin; /*!< the best value of d, i.e. the score + int dmin; /**< the best value of d, i.e. the score corresponding to the mv stored in best[]. */ int map_generation; int penalty_factor; diff --git a/libavcodec/motionpixels.c b/libavcodec/motionpixels.c index 01558ab95b..a3868e10cc 100644 --- a/libavcodec/motionpixels.c +++ b/libavcodec/motionpixels.c @@ -304,14 +304,13 @@ static av_cold int mp_decode_end(AVCodecContext *avctx) } AVCodec ff_motionpixels_decoder = { - "motionpixels", - AVMEDIA_TYPE_VIDEO, - CODEC_ID_MOTIONPIXELS, - sizeof(MotionPixelsContext), - mp_decode_init, - NULL, - mp_decode_end, - mp_decode_frame, - CODEC_CAP_DR1, + .name = "motionpixels", + .type = AVMEDIA_TYPE_VIDEO, + .id = CODEC_ID_MOTIONPIXELS, + .priv_data_size = sizeof(MotionPixelsContext), + .init = mp_decode_init, + .close = mp_decode_end, + .decode = mp_decode_frame, + .capabilities = CODEC_CAP_DR1, .long_name = NULL_IF_CONFIG_SMALL("Motion Pixels video"), }; diff --git a/libavcodec/motionpixels_tablegen.h b/libavcodec/motionpixels_tablegen.h index b56dec64a4..b9802e589d 100644 --- a/libavcodec/motionpixels_tablegen.h +++ b/libavcodec/motionpixels_tablegen.h @@ -30,7 +30,7 @@ typedef struct YuvPixel { } YuvPixel; static int mp_yuv_to_rgb(int y, int v, int u, int clip_rgb) { - static const uint8_t *cm = ff_cropTbl + MAX_NEG_CROP; + const uint8_t *cm = ff_cropTbl + MAX_NEG_CROP; int r, g, b; r = (1000 * y + 701 * v) / 1000; diff --git a/libavcodec/mpc7.c b/libavcodec/mpc7.c index bb21469356..ba8828eb52 100644 --- a/libavcodec/mpc7.c +++ b/libavcodec/mpc7.c @@ -291,14 +291,12 @@ static void mpc7_decode_flush(AVCodecContext *avctx) } AVCodec ff_mpc7_decoder = { - "mpc7", - AVMEDIA_TYPE_AUDIO, - CODEC_ID_MUSEPACK7, - sizeof(MPCContext), - mpc7_decode_init, - NULL, - NULL, - mpc7_decode_frame, + .name = "mpc7", + .type = AVMEDIA_TYPE_AUDIO, + .id = CODEC_ID_MUSEPACK7, + .priv_data_size = sizeof(MPCContext), + .init = mpc7_decode_init, + .decode = mpc7_decode_frame, .flush = mpc7_decode_flush, .long_name = NULL_IF_CONFIG_SMALL("Musepack SV7"), }; diff --git a/libavcodec/mpc8.c b/libavcodec/mpc8.c index 2864b1a010..cae7244ed5 100644 --- a/libavcodec/mpc8.c +++ b/libavcodec/mpc8.c @@ -406,13 +406,11 @@ static int mpc8_decode_frame(AVCodecContext * avctx, } AVCodec ff_mpc8_decoder = { - "mpc8", - AVMEDIA_TYPE_AUDIO, - CODEC_ID_MUSEPACK8, - sizeof(MPCContext), - mpc8_decode_init, - NULL, - NULL, - mpc8_decode_frame, + .name = "mpc8", + .type = AVMEDIA_TYPE_AUDIO, + .id = CODEC_ID_MUSEPACK8, + .priv_data_size = sizeof(MPCContext), + .init = mpc8_decode_init, + .decode = mpc8_decode_frame, .long_name = NULL_IF_CONFIG_SMALL("Musepack SV8"), }; diff --git a/libavcodec/mpeg12.c b/libavcodec/mpeg12.c index 219d5c854d..c1e38f12c9 100644 --- a/libavcodec/mpeg12.c +++ b/libavcodec/mpeg12.c @@ -217,20 +217,20 @@ static int mpeg_decode_mb(MpegEncContext *s, if (s->mb_skip_run-- != 0) { if (s->pict_type == AV_PICTURE_TYPE_P) { s->mb_skipped = 1; - s->current_picture.mb_type[ s->mb_x + s->mb_y*s->mb_stride ]= MB_TYPE_SKIP | MB_TYPE_L0 | MB_TYPE_16x16; + s->current_picture.f.mb_type[s->mb_x + s->mb_y * s->mb_stride] = MB_TYPE_SKIP | MB_TYPE_L0 | MB_TYPE_16x16; } else { int mb_type; if(s->mb_x) - mb_type= s->current_picture.mb_type[ s->mb_x + s->mb_y*s->mb_stride - 1]; + mb_type = s->current_picture.f.mb_type[s->mb_x + s->mb_y * s->mb_stride - 1]; else - mb_type= s->current_picture.mb_type[ s->mb_width + (s->mb_y-1)*s->mb_stride - 1]; // FIXME not sure if this is allowed in MPEG at all + mb_type = s->current_picture.f.mb_type[s->mb_width + (s->mb_y - 1) * s->mb_stride - 1]; // FIXME not sure if this is allowed in MPEG at all if(IS_INTRA(mb_type)) return -1; - s->current_picture.mb_type[ s->mb_x + s->mb_y*s->mb_stride ]= + s->current_picture.f.mb_type[s->mb_x + s->mb_y*s->mb_stride] = mb_type | MB_TYPE_SKIP; -// assert(s->current_picture.mb_type[ s->mb_x + s->mb_y*s->mb_stride - 1]&(MB_TYPE_16x16|MB_TYPE_16x8)); +// assert(s->current_picture.f.mb_type[s->mb_x + s->mb_y * s->mb_stride - 1]&(MB_TYPE_16x16|MB_TYPE_16x8)); if((s->mv[0][0][0]|s->mv[0][0][1]|s->mv[1][0][0]|s->mv[1][0][1])==0) s->mb_skipped = 1; @@ -581,7 +581,7 @@ static int mpeg_decode_mb(MpegEncContext *s, } } - s->current_picture.mb_type[ s->mb_x + s->mb_y*s->mb_stride ]= mb_type; + s->current_picture.f.mb_type[s->mb_x + s->mb_y * s->mb_stride] = mb_type; return 0; } @@ -1423,8 +1423,8 @@ static int mpeg1_decode_picture(AVCodecContext *avctx, s->mpeg_f_code[1][0] = f_code; s->mpeg_f_code[1][1] = f_code; } - s->current_picture.pict_type= s->pict_type; - s->current_picture.key_frame= s->pict_type == AV_PICTURE_TYPE_I; + s->current_picture.f.pict_type = s->pict_type; + s->current_picture.f.key_frame = s->pict_type == AV_PICTURE_TYPE_I; if(avctx->debug & FF_DEBUG_PICT_INFO) av_log(avctx, AV_LOG_DEBUG, "vbv_delay %d, ref %d type:%d\n", vbv_delay, ref, s->pict_type); @@ -1577,8 +1577,8 @@ static void mpeg_decode_picture_coding_extension(Mpeg1Context *s1) s->pict_type= AV_PICTURE_TYPE_P; }else s->pict_type= AV_PICTURE_TYPE_B; - s->current_picture.pict_type= s->pict_type; - s->current_picture.key_frame= s->pict_type == AV_PICTURE_TYPE_I; + s->current_picture.f.pict_type = s->pict_type; + s->current_picture.f.key_frame = s->pict_type == AV_PICTURE_TYPE_I; } s->intra_dc_precision = get_bits(&s->gb, 2); s->picture_structure = get_bits(&s->gb, 2); @@ -1655,19 +1655,19 @@ static int mpeg_field_start(MpegEncContext *s, const uint8_t *buf, int buf_size) ff_er_frame_start(s); /* first check if we must repeat the frame */ - s->current_picture_ptr->repeat_pict = 0; + s->current_picture_ptr->f.repeat_pict = 0; if (s->repeat_first_field) { if (s->progressive_sequence) { if (s->top_field_first) - s->current_picture_ptr->repeat_pict = 4; + s->current_picture_ptr->f.repeat_pict = 4; else - s->current_picture_ptr->repeat_pict = 2; + s->current_picture_ptr->f.repeat_pict = 2; } else if (s->progressive_frame) { - s->current_picture_ptr->repeat_pict = 1; + s->current_picture_ptr->f.repeat_pict = 1; } } - *s->current_picture_ptr->pan_scan= s1->pan_scan; + *s->current_picture_ptr->f.pan_scan = s1->pan_scan; if (HAVE_PTHREADS && (avctx->active_thread_type & FF_THREAD_FRAME)) ff_thread_finish_setup(avctx); @@ -1680,9 +1680,9 @@ static int mpeg_field_start(MpegEncContext *s, const uint8_t *buf, int buf_size) } for(i=0; i<4; i++){ - s->current_picture.data[i] = s->current_picture_ptr->data[i]; + s->current_picture.f.data[i] = s->current_picture_ptr->f.data[i]; if(s->picture_structure == PICT_BOTTOM_FIELD){ - s->current_picture.data[i] += s->current_picture_ptr->linesize[i]; + s->current_picture.f.data[i] += s->current_picture_ptr->f.linesize[i]; } } } @@ -1804,7 +1804,7 @@ static int mpeg_decode_slice(Mpeg1Context *s1, int mb_y, if(mpeg_decode_mb(s, s->block) < 0) return -1; - if(s->current_picture.motion_val[0] && !s->encoding){ //note motion_val is normally NULL unless we want to extract the MVs + if (s->current_picture.f.motion_val[0] && !s->encoding) { //note motion_val is normally NULL unless we want to extract the MVs const int wrap = s->b8_stride; int xy = s->mb_x*2 + s->mb_y*2*wrap; int b8_xy= 4*(s->mb_x + s->mb_y*s->mb_stride); @@ -1822,12 +1822,12 @@ static int mpeg_decode_slice(Mpeg1Context *s1, int mb_y, motion_y = s->mv[dir][i][1]; } - s->current_picture.motion_val[dir][xy ][0] = motion_x; - s->current_picture.motion_val[dir][xy ][1] = motion_y; - s->current_picture.motion_val[dir][xy + 1][0] = motion_x; - s->current_picture.motion_val[dir][xy + 1][1] = motion_y; - s->current_picture.ref_index [dir][b8_xy ]= - s->current_picture.ref_index [dir][b8_xy + 1]= s->field_select[dir][i]; + s->current_picture.f.motion_val[dir][xy ][0] = motion_x; + s->current_picture.f.motion_val[dir][xy ][1] = motion_y; + s->current_picture.f.motion_val[dir][xy + 1][0] = motion_x; + s->current_picture.f.motion_val[dir][xy + 1][1] = motion_y; + s->current_picture.f.ref_index [dir][b8_xy ] = + s->current_picture.f.ref_index [dir][b8_xy + 1] = s->field_select[dir][i]; assert(s->field_select[dir][i]==0 || s->field_select[dir][i]==1); } xy += wrap; @@ -1965,8 +1965,6 @@ static int slice_decode_thread(AVCodecContext *c, void *arg){ if(mb_y < 0 || mb_y >= s->end_mb_y) return -1; } - - return 0; //not reached } /** @@ -1993,7 +1991,7 @@ static int slice_end(AVCodecContext *avctx, AVFrame *pict) if (/*s->mb_y<<field_pic == s->mb_height &&*/ !s->first_field) { /* end of image */ - s->current_picture_ptr->qscale_type= FF_QSCALE_TYPE_MPEG2; + s->current_picture_ptr->f.qscale_type = FF_QSCALE_TYPE_MPEG2; ff_er_frame_end(s); @@ -2579,15 +2577,14 @@ static const AVProfile mpeg2_video_profiles[] = { AVCodec ff_mpeg1video_decoder = { - "mpeg1video", - AVMEDIA_TYPE_VIDEO, - CODEC_ID_MPEG1VIDEO, - sizeof(Mpeg1Context), - mpeg_decode_init, - NULL, - mpeg_decode_end, - mpeg_decode_frame, - CODEC_CAP_DRAW_HORIZ_BAND | CODEC_CAP_DR1 | CODEC_CAP_TRUNCATED | CODEC_CAP_DELAY, + .name = "mpeg1video", + .type = AVMEDIA_TYPE_VIDEO, + .id = CODEC_ID_MPEG1VIDEO, + .priv_data_size = sizeof(Mpeg1Context), + .init = mpeg_decode_init, + .close = mpeg_decode_end, + .decode = mpeg_decode_frame, + .capabilities = CODEC_CAP_DRAW_HORIZ_BAND | CODEC_CAP_DR1 | CODEC_CAP_TRUNCATED | CODEC_CAP_DELAY | CODEC_CAP_SLICE_THREADS, .flush= flush, .max_lowres= 3, .long_name= NULL_IF_CONFIG_SMALL("MPEG-1 video"), @@ -2595,15 +2592,14 @@ AVCodec ff_mpeg1video_decoder = { }; AVCodec ff_mpeg2video_decoder = { - "mpeg2video", - AVMEDIA_TYPE_VIDEO, - CODEC_ID_MPEG2VIDEO, - sizeof(Mpeg1Context), - mpeg_decode_init, - NULL, - mpeg_decode_end, - mpeg_decode_frame, - CODEC_CAP_DRAW_HORIZ_BAND | CODEC_CAP_DR1 | CODEC_CAP_TRUNCATED | CODEC_CAP_DELAY | CODEC_CAP_SLICE_THREADS, + .name = "mpeg2video", + .type = AVMEDIA_TYPE_VIDEO, + .id = CODEC_ID_MPEG2VIDEO, + .priv_data_size = sizeof(Mpeg1Context), + .init = mpeg_decode_init, + .close = mpeg_decode_end, + .decode = mpeg_decode_frame, + .capabilities = CODEC_CAP_DRAW_HORIZ_BAND | CODEC_CAP_DR1 | CODEC_CAP_TRUNCATED | CODEC_CAP_DELAY | CODEC_CAP_SLICE_THREADS, .flush= flush, .max_lowres= 3, .long_name= NULL_IF_CONFIG_SMALL("MPEG-2 video"), @@ -2612,15 +2608,14 @@ AVCodec ff_mpeg2video_decoder = { //legacy decoder AVCodec ff_mpegvideo_decoder = { - "mpegvideo", - AVMEDIA_TYPE_VIDEO, - CODEC_ID_MPEG2VIDEO, - sizeof(Mpeg1Context), - mpeg_decode_init, - NULL, - mpeg_decode_end, - mpeg_decode_frame, - CODEC_CAP_DRAW_HORIZ_BAND | CODEC_CAP_DR1 | CODEC_CAP_TRUNCATED | CODEC_CAP_DELAY | CODEC_CAP_SLICE_THREADS, + .name = "mpegvideo", + .type = AVMEDIA_TYPE_VIDEO, + .id = CODEC_ID_MPEG2VIDEO, + .priv_data_size = sizeof(Mpeg1Context), + .init = mpeg_decode_init, + .close = mpeg_decode_end, + .decode = mpeg_decode_frame, + .capabilities = CODEC_CAP_DRAW_HORIZ_BAND | CODEC_CAP_DR1 | CODEC_CAP_TRUNCATED | CODEC_CAP_DELAY | CODEC_CAP_SLICE_THREADS, .flush= flush, .max_lowres= 3, .long_name= NULL_IF_CONFIG_SMALL("MPEG-1 video"), @@ -2644,15 +2639,14 @@ static av_cold int mpeg_mc_decode_init(AVCodecContext *avctx){ } AVCodec ff_mpeg_xvmc_decoder = { - "mpegvideo_xvmc", - AVMEDIA_TYPE_VIDEO, - CODEC_ID_MPEG2VIDEO_XVMC, - sizeof(Mpeg1Context), - mpeg_mc_decode_init, - NULL, - mpeg_decode_end, - mpeg_decode_frame, - CODEC_CAP_DRAW_HORIZ_BAND | CODEC_CAP_DR1 | CODEC_CAP_TRUNCATED| CODEC_CAP_HWACCEL | CODEC_CAP_DELAY, + .name = "mpegvideo_xvmc", + .type = AVMEDIA_TYPE_VIDEO, + .id = CODEC_ID_MPEG2VIDEO_XVMC, + .priv_data_size = sizeof(Mpeg1Context), + .init = mpeg_mc_decode_init, + .close = mpeg_decode_end, + .decode = mpeg_decode_frame, + .capabilities = CODEC_CAP_DRAW_HORIZ_BAND | CODEC_CAP_DR1 | CODEC_CAP_TRUNCATED| CODEC_CAP_HWACCEL | CODEC_CAP_DELAY, .flush= flush, .long_name = NULL_IF_CONFIG_SMALL("MPEG-1/2 video XvMC (X-Video Motion Compensation)"), }; @@ -2661,15 +2655,14 @@ AVCodec ff_mpeg_xvmc_decoder = { #if CONFIG_MPEG_VDPAU_DECODER AVCodec ff_mpeg_vdpau_decoder = { - "mpegvideo_vdpau", - AVMEDIA_TYPE_VIDEO, - CODEC_ID_MPEG2VIDEO, - sizeof(Mpeg1Context), - mpeg_decode_init, - NULL, - mpeg_decode_end, - mpeg_decode_frame, - CODEC_CAP_DR1 | CODEC_CAP_TRUNCATED | CODEC_CAP_HWACCEL_VDPAU | CODEC_CAP_DELAY, + .name = "mpegvideo_vdpau", + .type = AVMEDIA_TYPE_VIDEO, + .id = CODEC_ID_MPEG2VIDEO, + .priv_data_size = sizeof(Mpeg1Context), + .init = mpeg_decode_init, + .close = mpeg_decode_end, + .decode = mpeg_decode_frame, + .capabilities = CODEC_CAP_DR1 | CODEC_CAP_TRUNCATED | CODEC_CAP_HWACCEL_VDPAU | CODEC_CAP_DELAY, .flush= flush, .long_name = NULL_IF_CONFIG_SMALL("MPEG-1/2 video (VDPAU acceleration)"), }; @@ -2677,15 +2670,14 @@ AVCodec ff_mpeg_vdpau_decoder = { #if CONFIG_MPEG1_VDPAU_DECODER AVCodec ff_mpeg1_vdpau_decoder = { - "mpeg1video_vdpau", - AVMEDIA_TYPE_VIDEO, - CODEC_ID_MPEG1VIDEO, - sizeof(Mpeg1Context), - mpeg_decode_init, - NULL, - mpeg_decode_end, - mpeg_decode_frame, - CODEC_CAP_DR1 | CODEC_CAP_TRUNCATED | CODEC_CAP_HWACCEL_VDPAU | CODEC_CAP_DELAY, + .name = "mpeg1video_vdpau", + .type = AVMEDIA_TYPE_VIDEO, + .id = CODEC_ID_MPEG1VIDEO, + .priv_data_size = sizeof(Mpeg1Context), + .init = mpeg_decode_init, + .close = mpeg_decode_end, + .decode = mpeg_decode_frame, + .capabilities = CODEC_CAP_DR1 | CODEC_CAP_TRUNCATED | CODEC_CAP_HWACCEL_VDPAU | CODEC_CAP_DELAY, .flush= flush, .long_name = NULL_IF_CONFIG_SMALL("MPEG-1 video (VDPAU acceleration)"), }; diff --git a/libavcodec/mpeg12enc.c b/libavcodec/mpeg12enc.c index 41344562b2..852fba5d74 100644 --- a/libavcodec/mpeg12enc.c +++ b/libavcodec/mpeg12enc.c @@ -200,7 +200,7 @@ static void mpeg1_encode_sequence_header(MpegEncContext *s) if(aspect_ratio==0.0) aspect_ratio= 1.0; //pixel aspect 1:1 (VGA) - if (s->current_picture.key_frame) { + if (s->current_picture.f.key_frame) { AVRational framerate= ff_frame_rate_tab[s->frame_rate_index]; /* mpeg1 header repeated every gop */ @@ -287,9 +287,9 @@ static void mpeg1_encode_sequence_header(MpegEncContext *s) /* time code : we must convert from the real frame rate to a fake mpeg frame rate in case of low frame rate */ fps = (framerate.num + framerate.den/2)/ framerate.den; - time_code = s->current_picture_ptr->coded_picture_number + s->avctx->timecode_frame_start; + time_code = s->current_picture_ptr->f.coded_picture_number + s->avctx->timecode_frame_start; - s->gop_picture_number = s->current_picture_ptr->coded_picture_number; + s->gop_picture_number = s->current_picture_ptr->f.coded_picture_number; if (s->avctx->flags2 & CODEC_FLAG2_DROP_FRAME_TIMECODE) { /* only works for NTSC 29.97 */ int d = time_code / 17982; @@ -396,7 +396,7 @@ void mpeg1_encode_picture_header(MpegEncContext *s, int picture_number) if (s->progressive_sequence) { put_bits(&s->pb, 1, 0); /* no repeat */ } else { - put_bits(&s->pb, 1, s->current_picture_ptr->top_field_first); + put_bits(&s->pb, 1, s->current_picture_ptr->f.top_field_first); } /* XXX: optimize the generation of this flag with entropy measures */ @@ -926,13 +926,13 @@ static void mpeg1_encode_block(MpegEncContext *s, } AVCodec ff_mpeg1video_encoder = { - "mpeg1video", - AVMEDIA_TYPE_VIDEO, - CODEC_ID_MPEG1VIDEO, - sizeof(MpegEncContext), - encode_init, - MPV_encode_picture, - MPV_encode_end, + .name = "mpeg1video", + .type = AVMEDIA_TYPE_VIDEO, + .id = CODEC_ID_MPEG1VIDEO, + .priv_data_size = sizeof(MpegEncContext), + .init = encode_init, + .encode = MPV_encode_picture, + .close = MPV_encode_end, .supported_framerates= ff_frame_rate_tab+1, .pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_NONE}, .capabilities= CODEC_CAP_DELAY | CODEC_CAP_SLICE_THREADS, @@ -940,13 +940,13 @@ AVCodec ff_mpeg1video_encoder = { }; AVCodec ff_mpeg2video_encoder = { - "mpeg2video", - AVMEDIA_TYPE_VIDEO, - CODEC_ID_MPEG2VIDEO, - sizeof(MpegEncContext), - encode_init, - MPV_encode_picture, - MPV_encode_end, + .name = "mpeg2video", + .type = AVMEDIA_TYPE_VIDEO, + .id = CODEC_ID_MPEG2VIDEO, + .priv_data_size = sizeof(MpegEncContext), + .init = encode_init, + .encode = MPV_encode_picture, + .close = MPV_encode_end, .supported_framerates= ff_frame_rate_tab+1, .pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_YUV422P, PIX_FMT_NONE}, .capabilities= CODEC_CAP_DELAY | CODEC_CAP_SLICE_THREADS, diff --git a/libavcodec/mpeg4video.c b/libavcodec/mpeg4video.c index f4e9a8a1f8..9a093511d5 100644 --- a/libavcodec/mpeg4video.c +++ b/libavcodec/mpeg4video.c @@ -89,7 +89,7 @@ static inline void ff_mpeg4_set_one_direct_mv(MpegEncContext *s, int mx, int my, uint16_t time_pb= s->pb_time; int p_mx, p_my; - p_mx= s->next_picture.motion_val[0][xy][0]; + p_mx = s->next_picture.f.motion_val[0][xy][0]; if((unsigned)(p_mx + tab_bias) < tab_size){ s->mv[0][i][0] = s->direct_scale_mv[0][p_mx + tab_bias] + mx; s->mv[1][i][0] = mx ? s->mv[0][i][0] - p_mx @@ -99,7 +99,7 @@ static inline void ff_mpeg4_set_one_direct_mv(MpegEncContext *s, int mx, int my, s->mv[1][i][0] = mx ? s->mv[0][i][0] - p_mx : p_mx*(time_pb - time_pp)/time_pp; } - p_my= s->next_picture.motion_val[0][xy][1]; + p_my = s->next_picture.f.motion_val[0][xy][1]; if((unsigned)(p_my + tab_bias) < tab_size){ s->mv[0][i][1] = s->direct_scale_mv[0][p_my + tab_bias] + my; s->mv[1][i][1] = my ? s->mv[0][i][1] - p_my @@ -120,7 +120,7 @@ static inline void ff_mpeg4_set_one_direct_mv(MpegEncContext *s, int mx, int my, */ int ff_mpeg4_set_direct_mv(MpegEncContext *s, int mx, int my){ const int mb_index= s->mb_x + s->mb_y*s->mb_stride; - const int colocated_mb_type= s->next_picture.mb_type[mb_index]; + const int colocated_mb_type = s->next_picture.f.mb_type[mb_index]; uint16_t time_pp; uint16_t time_pb; int i; @@ -137,7 +137,7 @@ int ff_mpeg4_set_direct_mv(MpegEncContext *s, int mx, int my){ } else if(IS_INTERLACED(colocated_mb_type)){ s->mv_type = MV_TYPE_FIELD; for(i=0; i<2; i++){ - int field_select= s->next_picture.ref_index[0][4*mb_index + 2*i]; + int field_select = s->next_picture.f.ref_index[0][4 * mb_index + 2 * i]; s->field_select[0][i]= field_select; s->field_select[1][i]= i; if(s->top_field_first){ diff --git a/libavcodec/mpeg4videodec.c b/libavcodec/mpeg4videodec.c index 6b7b4bfbc0..cb5231a4dd 100644 --- a/libavcodec/mpeg4videodec.c +++ b/libavcodec/mpeg4videodec.c @@ -55,7 +55,7 @@ void mpeg4_pred_ac(MpegEncContext * s, DCTELEM *block, int n, { int i; int16_t *ac_val, *ac_val1; - int8_t * const qscale_table= s->current_picture.qscale_table; + int8_t * const qscale_table = s->current_picture.f.qscale_table; /* find prediction */ ac_val = s->ac_val[0][0] + s->block_index[n] * 16; @@ -113,7 +113,7 @@ static inline int mpeg4_is_resync(MpegEncContext *s){ int bits_count= get_bits_count(&s->gb); int v= show_bits(&s->gb, 16); - if(s->workaround_bugs&FF_BUG_NO_PADDING){ + if(s->workaround_bugs&FF_BUG_NO_PADDING && !s->resync_marker){ return 0; } @@ -376,7 +376,7 @@ int mpeg4_decode_video_packet_header(MpegEncContext *s) if(s->pict_type == AV_PICTURE_TYPE_B){ int mb_x = 0, mb_y = 0; - while(s->next_picture.mbskip_table[ s->mb_index2xy[ mb_num ] ]) { + while (s->next_picture.f.mbskip_table[s->mb_index2xy[mb_num]]) { if (!mb_x) ff_thread_await_progress((AVFrame*)s->next_picture_ptr, mb_y++, 0); mb_num++; if (++mb_x == s->mb_width) mb_x = 0; @@ -570,13 +570,13 @@ static int mpeg4_decode_partition_a(MpegEncContext *s){ }while(cbpc == 8); s->cbp_table[xy]= cbpc & 3; - s->current_picture.mb_type[xy]= MB_TYPE_INTRA; + s->current_picture.f.mb_type[xy] = MB_TYPE_INTRA; s->mb_intra = 1; if(cbpc & 4) { ff_set_qscale(s, s->qscale + quant_tab[get_bits(&s->gb, 2)]); } - s->current_picture.qscale_table[xy]= s->qscale; + s->current_picture.f.qscale_table[xy]= s->qscale; s->mbintra_table[xy]= 1; for(i=0; i<6; i++){ @@ -592,7 +592,7 @@ static int mpeg4_decode_partition_a(MpegEncContext *s){ s->pred_dir_table[xy]= dir; }else{ /* P/S_TYPE */ int mx, my, pred_x, pred_y, bits; - int16_t * const mot_val= s->current_picture.motion_val[0][s->block_index[0]]; + int16_t * const mot_val = s->current_picture.f.motion_val[0][s->block_index[0]]; const int stride= s->b8_stride*2; try_again: @@ -604,11 +604,11 @@ try_again: if(bits&0x10000){ /* skip mb */ if(s->pict_type==AV_PICTURE_TYPE_S && s->vol_sprite_usage==GMC_SPRITE){ - s->current_picture.mb_type[xy]= MB_TYPE_SKIP | MB_TYPE_16x16 | MB_TYPE_GMC | MB_TYPE_L0; + s->current_picture.f.mb_type[xy] = MB_TYPE_SKIP | MB_TYPE_16x16 | MB_TYPE_GMC | MB_TYPE_L0; mx= get_amv(s, 0); my= get_amv(s, 1); }else{ - s->current_picture.mb_type[xy]= MB_TYPE_SKIP | MB_TYPE_16x16 | MB_TYPE_L0; + s->current_picture.f.mb_type[xy] = MB_TYPE_SKIP | MB_TYPE_16x16 | MB_TYPE_L0; mx=my=0; } mot_val[0 ]= mot_val[2 ]= @@ -634,7 +634,7 @@ try_again: s->mb_intra = ((cbpc & 4) != 0); if(s->mb_intra){ - s->current_picture.mb_type[xy]= MB_TYPE_INTRA; + s->current_picture.f.mb_type[xy] = MB_TYPE_INTRA; s->mbintra_table[xy]= 1; mot_val[0 ]= mot_val[2 ]= mot_val[0+stride]= mot_val[2+stride]= 0; @@ -660,11 +660,11 @@ try_again: my = h263_decode_motion(s, pred_y, s->f_code); if (my >= 0xffff) return -1; - s->current_picture.mb_type[xy]= MB_TYPE_16x16 | MB_TYPE_L0; + s->current_picture.f.mb_type[xy] = MB_TYPE_16x16 | MB_TYPE_L0; } else { mx = get_amv(s, 0); my = get_amv(s, 1); - s->current_picture.mb_type[xy]= MB_TYPE_16x16 | MB_TYPE_GMC | MB_TYPE_L0; + s->current_picture.f.mb_type[xy] = MB_TYPE_16x16 | MB_TYPE_GMC | MB_TYPE_L0; } mot_val[0 ]= mot_val[2 ] = @@ -673,7 +673,7 @@ try_again: mot_val[1+stride]= mot_val[3+stride]= my; } else { int i; - s->current_picture.mb_type[xy]= MB_TYPE_8x8 | MB_TYPE_L0; + s->current_picture.f.mb_type[xy] = MB_TYPE_8x8 | MB_TYPE_L0; for(i=0;i<4;i++) { int16_t *mot_val= h263_pred_motion(s, i, 0, &pred_x, &pred_y); mx = h263_decode_motion(s, pred_x, s->f_code); @@ -725,9 +725,9 @@ static int mpeg4_decode_partition_b(MpegEncContext *s, int mb_count){ } s->cbp_table[xy]|= cbpy<<2; - s->current_picture.mb_type[xy] |= ac_pred*MB_TYPE_ACPRED; + s->current_picture.f.mb_type[xy] |= ac_pred*MB_TYPE_ACPRED; }else{ /* P || S_TYPE */ - if(IS_INTRA(s->current_picture.mb_type[xy])){ + if (IS_INTRA(s->current_picture.f.mb_type[xy])) { int dir=0,i; int ac_pred = get_bits1(&s->gb); int cbpy = get_vlc2(&s->gb, ff_h263_cbpy_vlc.table, CBPY_VLC_BITS, 1); @@ -740,7 +740,7 @@ static int mpeg4_decode_partition_b(MpegEncContext *s, int mb_count){ if(s->cbp_table[xy] & 8) { ff_set_qscale(s, s->qscale + quant_tab[get_bits(&s->gb, 2)]); } - s->current_picture.qscale_table[xy]= s->qscale; + s->current_picture.f.qscale_table[xy] = s->qscale; for(i=0; i<6; i++){ int dc_pred_dir; @@ -754,10 +754,10 @@ static int mpeg4_decode_partition_b(MpegEncContext *s, int mb_count){ } s->cbp_table[xy]&= 3; //remove dquant s->cbp_table[xy]|= cbpy<<2; - s->current_picture.mb_type[xy] |= ac_pred*MB_TYPE_ACPRED; + s->current_picture.f.mb_type[xy] |= ac_pred*MB_TYPE_ACPRED; s->pred_dir_table[xy]= dir; - }else if(IS_SKIP(s->current_picture.mb_type[xy])){ - s->current_picture.qscale_table[xy]= s->qscale; + } else if (IS_SKIP(s->current_picture.f.mb_type[xy])) { + s->current_picture.f.qscale_table[xy] = s->qscale; s->cbp_table[xy]= 0; }else{ int cbpy = get_vlc2(&s->gb, ff_h263_cbpy_vlc.table, CBPY_VLC_BITS, 1); @@ -770,7 +770,7 @@ static int mpeg4_decode_partition_b(MpegEncContext *s, int mb_count){ if(s->cbp_table[xy] & 8) { ff_set_qscale(s, s->qscale + quant_tab[get_bits(&s->gb, 2)]); } - s->current_picture.qscale_table[xy]= s->qscale; + s->current_picture.f.qscale_table[xy] = s->qscale; s->cbp_table[xy]&= 3; //remove dquant s->cbp_table[xy]|= (cbpy^0xf)<<2; @@ -1091,20 +1091,20 @@ static int mpeg4_decode_partitioned_mb(MpegEncContext *s, DCTELEM block[6][64]) int cbp, mb_type; const int xy= s->mb_x + s->mb_y*s->mb_stride; - mb_type= s->current_picture.mb_type[xy]; + mb_type = s->current_picture.f.mb_type[xy]; cbp = s->cbp_table[xy]; s->use_intra_dc_vlc= s->qscale < s->intra_dc_threshold; - if(s->current_picture.qscale_table[xy] != s->qscale){ - ff_set_qscale(s, s->current_picture.qscale_table[xy] ); + if (s->current_picture.f.qscale_table[xy] != s->qscale) { + ff_set_qscale(s, s->current_picture.f.qscale_table[xy]); } if (s->pict_type == AV_PICTURE_TYPE_P || s->pict_type==AV_PICTURE_TYPE_S) { int i; for(i=0; i<4; i++){ - s->mv[0][i][0] = s->current_picture.motion_val[0][ s->block_index[i] ][0]; - s->mv[0][i][1] = s->current_picture.motion_val[0][ s->block_index[i] ][1]; + s->mv[0][i][0] = s->current_picture.f.motion_val[0][s->block_index[i]][0]; + s->mv[0][i][1] = s->current_picture.f.motion_val[0][s->block_index[i]][1]; } s->mb_intra = IS_INTRA(mb_type); @@ -1122,7 +1122,7 @@ static int mpeg4_decode_partitioned_mb(MpegEncContext *s, DCTELEM block[6][64]) s->mb_skipped = 1; } }else if(s->mb_intra){ - s->ac_pred = IS_ACPRED(s->current_picture.mb_type[xy]); + s->ac_pred = IS_ACPRED(s->current_picture.f.mb_type[xy]); }else if(!s->mb_intra){ // s->mcsel= 0; //FIXME do we need to init that @@ -1135,7 +1135,7 @@ static int mpeg4_decode_partitioned_mb(MpegEncContext *s, DCTELEM block[6][64]) } } else { /* I-Frame */ s->mb_intra = 1; - s->ac_pred = IS_ACPRED(s->current_picture.mb_type[xy]); + s->ac_pred = IS_ACPRED(s->current_picture.f.mb_type[xy]); } if (!IS_SKIP(mb_type)) { @@ -1188,14 +1188,14 @@ static int mpeg4_decode_mb(MpegEncContext *s, s->mv_dir = MV_DIR_FORWARD; s->mv_type = MV_TYPE_16X16; if(s->pict_type==AV_PICTURE_TYPE_S && s->vol_sprite_usage==GMC_SPRITE){ - s->current_picture.mb_type[xy]= MB_TYPE_SKIP | MB_TYPE_GMC | MB_TYPE_16x16 | MB_TYPE_L0; + s->current_picture.f.mb_type[xy] = MB_TYPE_SKIP | MB_TYPE_GMC | MB_TYPE_16x16 | MB_TYPE_L0; s->mcsel=1; s->mv[0][0][0]= get_amv(s, 0); s->mv[0][0][1]= get_amv(s, 1); s->mb_skipped = 0; }else{ - s->current_picture.mb_type[xy]= MB_TYPE_SKIP | MB_TYPE_16x16 | MB_TYPE_L0; + s->current_picture.f.mb_type[xy] = MB_TYPE_SKIP | MB_TYPE_16x16 | MB_TYPE_L0; s->mcsel=0; s->mv[0][0][0] = 0; s->mv[0][0][1] = 0; @@ -1230,7 +1230,7 @@ static int mpeg4_decode_mb(MpegEncContext *s, s->mv_dir = MV_DIR_FORWARD; if ((cbpc & 16) == 0) { if(s->mcsel){ - s->current_picture.mb_type[xy]= MB_TYPE_GMC | MB_TYPE_16x16 | MB_TYPE_L0; + s->current_picture.f.mb_type[xy] = MB_TYPE_GMC | MB_TYPE_16x16 | MB_TYPE_L0; /* 16x16 global motion prediction */ s->mv_type = MV_TYPE_16X16; mx= get_amv(s, 0); @@ -1238,7 +1238,7 @@ static int mpeg4_decode_mb(MpegEncContext *s, s->mv[0][0][0] = mx; s->mv[0][0][1] = my; }else if((!s->progressive_sequence) && get_bits1(&s->gb)){ - s->current_picture.mb_type[xy]= MB_TYPE_16x8 | MB_TYPE_L0 | MB_TYPE_INTERLACED; + s->current_picture.f.mb_type[xy] = MB_TYPE_16x8 | MB_TYPE_L0 | MB_TYPE_INTERLACED; /* 16x8 field motion prediction */ s->mv_type= MV_TYPE_FIELD; @@ -1260,7 +1260,7 @@ static int mpeg4_decode_mb(MpegEncContext *s, s->mv[0][i][1] = my; } }else{ - s->current_picture.mb_type[xy]= MB_TYPE_16x16 | MB_TYPE_L0; + s->current_picture.f.mb_type[xy] = MB_TYPE_16x16 | MB_TYPE_L0; /* 16x16 motion prediction */ s->mv_type = MV_TYPE_16X16; h263_pred_motion(s, 0, 0, &pred_x, &pred_y); @@ -1277,7 +1277,7 @@ static int mpeg4_decode_mb(MpegEncContext *s, s->mv[0][0][1] = my; } } else { - s->current_picture.mb_type[xy]= MB_TYPE_8x8 | MB_TYPE_L0; + s->current_picture.f.mb_type[xy] = MB_TYPE_8x8 | MB_TYPE_L0; s->mv_type = MV_TYPE_8X8; for(i=0;i<4;i++) { mot_val = h263_pred_motion(s, i, 0, &pred_x, &pred_y); @@ -1314,7 +1314,7 @@ static int mpeg4_decode_mb(MpegEncContext *s, } /* if we skipped it in the future P Frame than skip it now too */ - s->mb_skipped= s->next_picture.mbskip_table[s->mb_y * s->mb_stride + s->mb_x]; // Note, skiptab=0 if last was GMC + s->mb_skipped = s->next_picture.f.mbskip_table[s->mb_y * s->mb_stride + s->mb_x]; // Note, skiptab=0 if last was GMC if(s->mb_skipped){ /* skip mb */ @@ -1327,7 +1327,7 @@ static int mpeg4_decode_mb(MpegEncContext *s, s->mv[0][0][1] = 0; s->mv[1][0][0] = 0; s->mv[1][0][1] = 0; - s->current_picture.mb_type[xy]= MB_TYPE_SKIP | MB_TYPE_16x16 | MB_TYPE_L0; + s->current_picture.f.mb_type[xy] = MB_TYPE_SKIP | MB_TYPE_16x16 | MB_TYPE_L0; goto end; } @@ -1433,7 +1433,7 @@ static int mpeg4_decode_mb(MpegEncContext *s, s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD | MV_DIRECT; mb_type |= ff_mpeg4_set_direct_mv(s, mx, my); } - s->current_picture.mb_type[xy]= mb_type; + s->current_picture.f.mb_type[xy] = mb_type; } else { /* I-Frame */ do{ cbpc = get_vlc2(&s->gb, ff_h263_intra_MCBPC_vlc.table, INTRA_MCBPC_VLC_BITS, 2); @@ -1448,9 +1448,9 @@ static int mpeg4_decode_mb(MpegEncContext *s, intra: s->ac_pred = get_bits1(&s->gb); if(s->ac_pred) - s->current_picture.mb_type[xy]= MB_TYPE_INTRA | MB_TYPE_ACPRED; + s->current_picture.f.mb_type[xy] = MB_TYPE_INTRA | MB_TYPE_ACPRED; else - s->current_picture.mb_type[xy]= MB_TYPE_INTRA; + s->current_picture.f.mb_type[xy] = MB_TYPE_INTRA; cbpy = get_vlc2(&s->gb, ff_h263_cbpy_vlc.table, CBPY_VLC_BITS, 1); if(cbpy<0){ @@ -1496,7 +1496,7 @@ end: (s->mb_x + delta >= s->mb_width) ? FFMIN(s->mb_y+1, s->mb_height-1) : s->mb_y, 0); } - if(s->pict_type==AV_PICTURE_TYPE_B && s->next_picture.mbskip_table[xy + delta]) + if (s->pict_type == AV_PICTURE_TYPE_B && s->next_picture.f.mbskip_table[xy + delta]) return SLICE_OK; return SLICE_END; } @@ -1527,6 +1527,19 @@ static int mpeg4_decode_gop_header(MpegEncContext * s, GetBitContext *gb){ return 0; } +static int mpeg4_decode_profile_level(MpegEncContext * s, GetBitContext *gb){ + + s->avctx->profile = get_bits(gb, 4); + s->avctx->level = get_bits(gb, 4); + + // for Simple profile, level 0 + if (s->avctx->profile == 0 && s->avctx->level == 8) { + s->avctx->level = 0; + } + + return 0; +} + static int decode_vol_header(MpegEncContext *s, GetBitContext *gb){ int width, height, vo_ver_id; @@ -1965,11 +1978,12 @@ static int decode_vop_header(MpegEncContext *s, GetBitContext *gb){ } if(s->avctx->time_base.num) - s->current_picture_ptr->pts= (s->time + s->avctx->time_base.num/2) / s->avctx->time_base.num; + s->current_picture_ptr->f.pts = (s->time + s->avctx->time_base.num / 2) / s->avctx->time_base.num; else - s->current_picture_ptr->pts= AV_NOPTS_VALUE; + s->current_picture_ptr->f.pts = AV_NOPTS_VALUE; if(s->avctx->debug&FF_DEBUG_PTS) - av_log(s->avctx, AV_LOG_DEBUG, "MPEG4 PTS: %"PRId64"\n", s->current_picture_ptr->pts); + av_log(s->avctx, AV_LOG_DEBUG, "MPEG4 PTS: %"PRId64"\n", + s->current_picture_ptr->f.pts); check_marker(gb, "before vop_coded"); @@ -2180,6 +2194,9 @@ int ff_mpeg4_decode_picture_header(MpegEncContext * s, GetBitContext *gb) else if(startcode == GOP_STARTCODE){ mpeg4_decode_gop_header(s, gb); } + else if(startcode == VOS_STARTCODE){ + mpeg4_decode_profile_level(s, gb); + } else if(startcode == VOP_STARTCODE){ break; } @@ -2240,35 +2257,53 @@ static av_cold int decode_init(AVCodecContext *avctx) return 0; } +static const AVProfile mpeg4_video_profiles[] = { + { FF_PROFILE_MPEG4_SIMPLE, "Simple Profile" }, + { FF_PROFILE_MPEG4_SIMPLE_SCALABLE, "Simple Scalable Profile" }, + { FF_PROFILE_MPEG4_CORE, "Core Profile" }, + { FF_PROFILE_MPEG4_MAIN, "Main Profile" }, + { FF_PROFILE_MPEG4_N_BIT, "N-bit Profile" }, + { FF_PROFILE_MPEG4_SCALABLE_TEXTURE, "Scalable Texture Profile" }, + { FF_PROFILE_MPEG4_SIMPLE_FACE_ANIMATION, "Simple Face Animation Profile" }, + { FF_PROFILE_MPEG4_BASIC_ANIMATED_TEXTURE, "Basic Animated Texture Profile" }, + { FF_PROFILE_MPEG4_HYBRID, "Hybrid Profile" }, + { FF_PROFILE_MPEG4_ADVANCED_REAL_TIME, "Advanced Real Time Simple Profile" }, + { FF_PROFILE_MPEG4_CORE_SCALABLE, "Code Scalable Profile" }, + { FF_PROFILE_MPEG4_ADVANCED_CODING, "Advanced Coding Profile" }, + { FF_PROFILE_MPEG4_ADVANCED_CORE, "Advanced Core Profile" }, + { FF_PROFILE_MPEG4_ADVANCED_SCALABLE_TEXTURE, "Advanced Scalable Texture Profile" }, + { FF_PROFILE_MPEG4_SIMPLE_STUDIO, "Simple Studio Profile" }, + { FF_PROFILE_MPEG4_ADVANCED_SIMPLE, "Advanced Simple Profile" }, +}; + AVCodec ff_mpeg4_decoder = { - "mpeg4", - AVMEDIA_TYPE_VIDEO, - CODEC_ID_MPEG4, - sizeof(MpegEncContext), - decode_init, - NULL, - ff_h263_decode_end, - ff_h263_decode_frame, - CODEC_CAP_DRAW_HORIZ_BAND | CODEC_CAP_DR1 | CODEC_CAP_TRUNCATED | CODEC_CAP_DELAY | CODEC_CAP_FRAME_THREADS, + .name = "mpeg4", + .type = AVMEDIA_TYPE_VIDEO, + .id = CODEC_ID_MPEG4, + .priv_data_size = sizeof(MpegEncContext), + .init = decode_init, + .close = ff_h263_decode_end, + .decode = ff_h263_decode_frame, + .capabilities = CODEC_CAP_DRAW_HORIZ_BAND | CODEC_CAP_DR1 | CODEC_CAP_TRUNCATED | CODEC_CAP_DELAY | CODEC_CAP_FRAME_THREADS, .flush= ff_mpeg_flush, .max_lowres= 3, .long_name= NULL_IF_CONFIG_SMALL("MPEG-4 part 2"), .pix_fmts= ff_hwaccel_pixfmt_list_420, + .profiles = NULL_IF_CONFIG_SMALL(mpeg4_video_profiles), .update_thread_context= ONLY_IF_THREADS_ENABLED(ff_mpeg_update_thread_context) }; #if CONFIG_MPEG4_VDPAU_DECODER AVCodec ff_mpeg4_vdpau_decoder = { - "mpeg4_vdpau", - AVMEDIA_TYPE_VIDEO, - CODEC_ID_MPEG4, - sizeof(MpegEncContext), - decode_init, - NULL, - ff_h263_decode_end, - ff_h263_decode_frame, - CODEC_CAP_DR1 | CODEC_CAP_TRUNCATED | CODEC_CAP_DELAY | CODEC_CAP_HWACCEL_VDPAU, + .name = "mpeg4_vdpau", + .type = AVMEDIA_TYPE_VIDEO, + .id = CODEC_ID_MPEG4, + .priv_data_size = sizeof(MpegEncContext), + .init = decode_init, + .close = ff_h263_decode_end, + .decode = ff_h263_decode_frame, + .capabilities = CODEC_CAP_DR1 | CODEC_CAP_TRUNCATED | CODEC_CAP_DELAY | CODEC_CAP_HWACCEL_VDPAU, .long_name= NULL_IF_CONFIG_SMALL("MPEG-4 part 2 (VDPAU)"), .pix_fmts= (const enum PixelFormat[]){PIX_FMT_VDPAU_MPEG4, PIX_FMT_NONE}, }; diff --git a/libavcodec/mpeg4videoenc.c b/libavcodec/mpeg4videoenc.c index bdff535a99..db0db045e8 100644 --- a/libavcodec/mpeg4videoenc.c +++ b/libavcodec/mpeg4videoenc.c @@ -124,7 +124,7 @@ static inline int decide_ac_pred(MpegEncContext * s, DCTELEM block[6][64], const { int score= 0; int i, n; - int8_t * const qscale_table= s->current_picture.qscale_table; + int8_t * const qscale_table = s->current_picture.f.qscale_table; memcpy(zigzag_last_index, s->block_last_index, sizeof(int)*6); @@ -201,7 +201,7 @@ static inline int decide_ac_pred(MpegEncContext * s, DCTELEM block[6][64], const */ void ff_clean_mpeg4_qscales(MpegEncContext *s){ int i; - int8_t * const qscale_table= s->current_picture.qscale_table; + int8_t * const qscale_table = s->current_picture.f.qscale_table; ff_clean_h263_qscales(s); @@ -296,10 +296,6 @@ static inline void mpeg4_encode_block(MpegEncContext * s, DCTELEM * block, int n uint8_t *scan_table, PutBitContext *dc_pb, PutBitContext *ac_pb) { int i, last_non_zero; -#if 0 //variables for the outcommented version - int code, sign, last; -#endif - const RLTable *rl; uint32_t *bits_tab; uint8_t *len_tab; const int last_index = s->block_last_index[n]; @@ -309,20 +305,17 @@ static inline void mpeg4_encode_block(MpegEncContext * s, DCTELEM * block, int n mpeg4_encode_dc(dc_pb, intra_dc, n); if(last_index<1) return; i = 1; - rl = &ff_mpeg4_rl_intra; bits_tab= uni_mpeg4_intra_rl_bits; len_tab = uni_mpeg4_intra_rl_len; } else { if(last_index<0) return; i = 0; - rl = &ff_h263_rl_inter; bits_tab= uni_mpeg4_inter_rl_bits; len_tab = uni_mpeg4_inter_rl_len; } /* AC coefs */ last_non_zero = i - 1; -#if 1 for (; i < last_index; i++) { int level = block[ scan_table[i] ]; if (level) { @@ -348,64 +341,6 @@ static inline void mpeg4_encode_block(MpegEncContext * s, DCTELEM * block, int n put_bits(ac_pb, 7+2+1+6+1+12+1, (3<<23)+(3<<21)+(1<<20)+(run<<14)+(1<<13)+(((level-64)&0xfff)<<1)+1); } } -#else - for (; i <= last_index; i++) { - const int slevel = block[ scan_table[i] ]; - if (slevel) { - int level; - int run = i - last_non_zero - 1; - last = (i == last_index); - sign = 0; - level = slevel; - if (level < 0) { - sign = 1; - level = -level; - } - code = get_rl_index(rl, last, run, level); - put_bits(ac_pb, rl->table_vlc[code][1], rl->table_vlc[code][0]); - if (code == rl->n) { - int level1, run1; - level1 = level - rl->max_level[last][run]; - if (level1 < 1) - goto esc2; - code = get_rl_index(rl, last, run, level1); - if (code == rl->n) { - esc2: - put_bits(ac_pb, 1, 1); - if (level > MAX_LEVEL) - goto esc3; - run1 = run - rl->max_run[last][level] - 1; - if (run1 < 0) - goto esc3; - code = get_rl_index(rl, last, run1, level); - if (code == rl->n) { - esc3: - /* third escape */ - put_bits(ac_pb, 1, 1); - put_bits(ac_pb, 1, last); - put_bits(ac_pb, 6, run); - put_bits(ac_pb, 1, 1); - put_sbits(ac_pb, 12, slevel); - put_bits(ac_pb, 1, 1); - } else { - /* second escape */ - put_bits(ac_pb, 1, 0); - put_bits(ac_pb, rl->table_vlc[code][1], rl->table_vlc[code][0]); - put_bits(ac_pb, 1, sign); - } - } else { - /* first escape */ - put_bits(ac_pb, 1, 0); - put_bits(ac_pb, rl->table_vlc[code][1], rl->table_vlc[code][0]); - put_bits(ac_pb, 1, sign); - } - } else { - put_bits(ac_pb, 1, sign); - } - last_non_zero = i; - } - } -#endif } static int mpeg4_get_block_length(MpegEncContext * s, DCTELEM * block, int n, int intra_dc, @@ -522,7 +457,7 @@ void mpeg4_encode_mb(MpegEncContext * s, assert(mb_type>=0); /* nothing to do if this MB was skipped in the next P Frame */ - if(s->next_picture.mbskip_table[s->mb_y * s->mb_stride + s->mb_x]){ //FIXME avoid DCT & ... + if (s->next_picture.f.mbskip_table[s->mb_y * s->mb_stride + s->mb_x]) { //FIXME avoid DCT & ... s->skip_count++; s->mv[0][0][0]= s->mv[0][0][1]= @@ -652,7 +587,7 @@ void mpeg4_encode_mb(MpegEncContext * s, y= s->mb_y*16; offset= x + y*s->linesize; - p_pic= s->new_picture.data[0] + offset; + p_pic = s->new_picture.f.data[0] + offset; s->mb_skipped=1; for(i=0; i<s->max_b_frames; i++){ @@ -660,10 +595,11 @@ void mpeg4_encode_mb(MpegEncContext * s, int diff; Picture *pic= s->reordered_input_picture[i+1]; - if(pic==NULL || pic->pict_type!=AV_PICTURE_TYPE_B) break; + if (pic == NULL || pic->f.pict_type != AV_PICTURE_TYPE_B) + break; - b_pic= pic->data[0] + offset; - if(pic->type != FF_BUFFER_TYPE_SHARED) + b_pic = pic->f.data[0] + offset; + if (pic->f.type != FF_BUFFER_TYPE_SHARED) b_pic+= INPLACE_OFFSET; if(x+16 > s->width || y+16 > s->height){ @@ -781,8 +717,8 @@ void mpeg4_encode_mb(MpegEncContext * s, /* motion vectors: 8x8 mode*/ h263_pred_motion(s, i, 0, &pred_x, &pred_y); - ff_h263_encode_motion_vector(s, s->current_picture.motion_val[0][ s->block_index[i] ][0] - pred_x, - s->current_picture.motion_val[0][ s->block_index[i] ][1] - pred_y, s->f_code); + ff_h263_encode_motion_vector(s, s->current_picture.f.motion_val[0][ s->block_index[i] ][0] - pred_x, + s->current_picture.f.motion_val[0][ s->block_index[i] ][1] - pred_y, s->f_code); } } @@ -891,9 +827,9 @@ static void mpeg4_encode_gop_header(MpegEncContext * s){ put_bits(&s->pb, 16, 0); put_bits(&s->pb, 16, GOP_STARTCODE); - time= s->current_picture_ptr->pts; + time = s->current_picture_ptr->f.pts; if(s->reordered_input_picture[1]) - time= FFMIN(time, s->reordered_input_picture[1]->pts); + time = FFMIN(time, s->reordered_input_picture[1]->f.pts); time= time*s->avctx->time_base.num; s->last_time_base= FFUDIV(time, s->avctx->time_base.den); @@ -1101,7 +1037,7 @@ void mpeg4_encode_picture_header(MpegEncContext * s, int picture_number) } put_bits(&s->pb, 3, 0); /* intra dc VLC threshold */ if(!s->progressive_sequence){ - put_bits(&s->pb, 1, s->current_picture_ptr->top_field_first); + put_bits(&s->pb, 1, s->current_picture_ptr->f.top_field_first); put_bits(&s->pb, 1, s->alternate_scan); } //FIXME sprite stuff @@ -1349,13 +1285,13 @@ void ff_mpeg4_encode_video_packet_header(MpegEncContext *s) } AVCodec ff_mpeg4_encoder = { - "mpeg4", - AVMEDIA_TYPE_VIDEO, - CODEC_ID_MPEG4, - sizeof(MpegEncContext), - encode_init, - MPV_encode_picture, - MPV_encode_end, + .name = "mpeg4", + .type = AVMEDIA_TYPE_VIDEO, + .id = CODEC_ID_MPEG4, + .priv_data_size = sizeof(MpegEncContext), + .init = encode_init, + .encode = MPV_encode_picture, + .close = MPV_encode_end, .pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_NONE}, .capabilities= CODEC_CAP_DELAY | CODEC_CAP_SLICE_THREADS, .long_name= NULL_IF_CONFIG_SMALL("MPEG-4 part 2"), diff --git a/libavcodec/mpegaudioenc.c b/libavcodec/mpegaudioenc.c index a5859837ef..0ca99bac15 100644 --- a/libavcodec/mpegaudioenc.c +++ b/libavcodec/mpegaudioenc.c @@ -315,8 +315,6 @@ static void filter(MpegAudioContext *s, int ch, const short *samples, int incr) int tmp1[32]; int *out; - // print_pow1(samples, 1152); - offset = s->samples_offset[ch]; out = &s->sb_samples[ch][0][0][0]; for(j=0;j<36;j++) { @@ -360,8 +358,6 @@ static void filter(MpegAudioContext *s, int ch, const short *samples, int incr) } } s->samples_offset[ch] = offset; - - // print_pow(s->sb_samples, 1152); } static void compute_scale_factors(unsigned char scale_code[SBLIMIT], @@ -768,14 +764,13 @@ static av_cold int MPA_encode_close(AVCodecContext *avctx) } AVCodec ff_mp2_encoder = { - "mp2", - AVMEDIA_TYPE_AUDIO, - CODEC_ID_MP2, - sizeof(MpegAudioContext), - MPA_encode_init, - MPA_encode_frame, - MPA_encode_close, - NULL, + .name = "mp2", + .type = AVMEDIA_TYPE_AUDIO, + .id = CODEC_ID_MP2, + .priv_data_size = sizeof(MpegAudioContext), + .init = MPA_encode_init, + .encode = MPA_encode_frame, + .close = MPA_encode_close, .sample_fmts = (const enum AVSampleFormat[]){AV_SAMPLE_FMT_S16,AV_SAMPLE_FMT_NONE}, .supported_samplerates= (const int[]){44100, 48000, 32000, 22050, 24000, 16000, 0}, .long_name = NULL_IF_CONFIG_SMALL("MP2 (MPEG audio layer 2)"), diff --git a/libavcodec/mpegvideo.c b/libavcodec/mpegvideo.c index cdbda34185..ba6115e2aa 100644 --- a/libavcodec/mpegvideo.c +++ b/libavcodec/mpegvideo.c @@ -155,6 +155,8 @@ const uint8_t *ff_find_start_code(const uint8_t * restrict p, const uint8_t *end /* init common dct for both encoder and decoder */ av_cold int ff_dct_common_init(MpegEncContext *s) { + dsputil_init(&s->dsp, s->avctx); + s->dct_unquantize_h263_intra = dct_unquantize_h263_intra_c; s->dct_unquantize_h263_inter = dct_unquantize_h263_inter_c; s->dct_unquantize_mpeg1_intra = dct_unquantize_mpeg1_intra_c; @@ -198,7 +200,7 @@ av_cold int ff_dct_common_init(MpegEncContext *s) void ff_copy_picture(Picture *dst, Picture *src){ *dst = *src; - dst->type= FF_BUFFER_TYPE_COPY; + dst->f.type= FF_BUFFER_TYPE_COPY; } /** @@ -207,7 +209,7 @@ void ff_copy_picture(Picture *dst, Picture *src){ static void free_frame_buffer(MpegEncContext *s, Picture *pic) { ff_thread_release_buffer(s->avctx, (AVFrame*)pic); - av_freep(&pic->hwaccel_picture_private); + av_freep(&pic->f.hwaccel_picture_private); } /** @@ -220,8 +222,8 @@ static int alloc_frame_buffer(MpegEncContext *s, Picture *pic) if (s->avctx->hwaccel) { assert(!pic->hwaccel_picture_private); if (s->avctx->hwaccel->priv_data_size) { - pic->hwaccel_picture_private = av_mallocz(s->avctx->hwaccel->priv_data_size); - if (!pic->hwaccel_picture_private) { + pic->f.hwaccel_picture_private = av_mallocz(s->avctx->hwaccel->priv_data_size); + if (!pic->f.hwaccel_picture_private) { av_log(s->avctx, AV_LOG_ERROR, "alloc_frame_buffer() failed (hwaccel private data allocation)\n"); return -1; } @@ -230,19 +232,20 @@ static int alloc_frame_buffer(MpegEncContext *s, Picture *pic) r = ff_thread_get_buffer(s->avctx, (AVFrame*)pic); - if (r<0 || !pic->age || !pic->type || !pic->data[0]) { - av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (%d %d %d %p)\n", r, pic->age, pic->type, pic->data[0]); - av_freep(&pic->hwaccel_picture_private); + if (r < 0 || !pic->f.age || !pic->f.type || !pic->f.data[0]) { + av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (%d %d %d %p)\n", + r, pic->f.age, pic->f.type, pic->f.data[0]); + av_freep(&pic->f.hwaccel_picture_private); return -1; } - if (s->linesize && (s->linesize != pic->linesize[0] || s->uvlinesize != pic->linesize[1])) { + if (s->linesize && (s->linesize != pic->f.linesize[0] || s->uvlinesize != pic->f.linesize[1])) { av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (stride changed)\n"); free_frame_buffer(s, pic); return -1; } - if (pic->linesize[1] != pic->linesize[2]) { + if (pic->f.linesize[1] != pic->f.linesize[2]) { av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (uv stride mismatch)\n"); free_frame_buffer(s, pic); return -1; @@ -264,59 +267,60 @@ int ff_alloc_picture(MpegEncContext *s, Picture *pic, int shared){ int r= -1; if(shared){ - assert(pic->data[0]); + assert(pic->f.data[0]); assert(pic->type == 0 || pic->type == FF_BUFFER_TYPE_SHARED); - pic->type= FF_BUFFER_TYPE_SHARED; + pic->f.type = FF_BUFFER_TYPE_SHARED; }else{ - assert(!pic->data[0]); + assert(!pic->f.data[0]); if (alloc_frame_buffer(s, pic) < 0) return -1; - s->linesize = pic->linesize[0]; - s->uvlinesize= pic->linesize[1]; + s->linesize = pic->f.linesize[0]; + s->uvlinesize = pic->f.linesize[1]; } - if(pic->qscale_table==NULL){ + if (pic->f.qscale_table == NULL) { if (s->encoding) { FF_ALLOCZ_OR_GOTO(s->avctx, pic->mb_var , mb_array_size * sizeof(int16_t) , fail) FF_ALLOCZ_OR_GOTO(s->avctx, pic->mc_mb_var, mb_array_size * sizeof(int16_t) , fail) FF_ALLOCZ_OR_GOTO(s->avctx, pic->mb_mean , mb_array_size * sizeof(int8_t ) , fail) } - FF_ALLOCZ_OR_GOTO(s->avctx, pic->mbskip_table , mb_array_size * sizeof(uint8_t)+2, fail) //the +2 is for the slice end check - FF_ALLOCZ_OR_GOTO(s->avctx, pic->qscale_table , mb_array_size * sizeof(uint8_t) , fail) + FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.mbskip_table, mb_array_size * sizeof(uint8_t) + 2, fail) //the +2 is for the slice end check + FF_ALLOCZ_OR_GOTO(s->avctx, pic->qscale_table_base , (big_mb_num + s->mb_stride) * sizeof(uint8_t) , fail) FF_ALLOCZ_OR_GOTO(s->avctx, pic->mb_type_base , (big_mb_num + s->mb_stride) * sizeof(uint32_t), fail) - pic->mb_type= pic->mb_type_base + 2*s->mb_stride+1; + pic->f.mb_type = pic->mb_type_base + 2*s->mb_stride + 1; + pic->f.qscale_table = pic->qscale_table_base + 2*s->mb_stride + 1; if(s->out_format == FMT_H264){ for(i=0; i<2; i++){ FF_ALLOCZ_OR_GOTO(s->avctx, pic->motion_val_base[i], 2 * (b4_array_size+4) * sizeof(int16_t), fail) - pic->motion_val[i]= pic->motion_val_base[i]+4; - FF_ALLOCZ_OR_GOTO(s->avctx, pic->ref_index[i], 4*mb_array_size * sizeof(uint8_t), fail) + pic->f.motion_val[i] = pic->motion_val_base[i] + 4; + FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.ref_index[i], 4*mb_array_size * sizeof(uint8_t), fail) } - pic->motion_subsample_log2= 2; + pic->f.motion_subsample_log2 = 2; }else if(s->out_format == FMT_H263 || s->encoding || (s->avctx->debug&FF_DEBUG_MV) || (s->avctx->debug_mv)){ for(i=0; i<2; i++){ FF_ALLOCZ_OR_GOTO(s->avctx, pic->motion_val_base[i], 2 * (b8_array_size+4) * sizeof(int16_t), fail) - pic->motion_val[i]= pic->motion_val_base[i]+4; - FF_ALLOCZ_OR_GOTO(s->avctx, pic->ref_index[i], 4*mb_array_size * sizeof(uint8_t), fail) + pic->f.motion_val[i] = pic->motion_val_base[i] + 4; + FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.ref_index[i], 4*mb_array_size * sizeof(uint8_t), fail) } - pic->motion_subsample_log2= 3; + pic->f.motion_subsample_log2 = 3; } if(s->avctx->debug&FF_DEBUG_DCT_COEFF) { - FF_ALLOCZ_OR_GOTO(s->avctx, pic->dct_coeff, 64 * mb_array_size * sizeof(DCTELEM)*6, fail) + FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.dct_coeff, 64 * mb_array_size * sizeof(DCTELEM) * 6, fail) } - pic->qstride= s->mb_stride; - FF_ALLOCZ_OR_GOTO(s->avctx, pic->pan_scan , 1 * sizeof(AVPanScan), fail) + pic->f.qstride = s->mb_stride; + FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.pan_scan , 1 * sizeof(AVPanScan), fail) } /* It might be nicer if the application would keep track of these * but it would require an API change. */ memmove(s->prev_pict_types+1, s->prev_pict_types, PREV_PICT_TYPES_BUFFER_SIZE-1); s->prev_pict_types[0]= s->dropable ? AV_PICTURE_TYPE_B : s->pict_type; - if(pic->age < PREV_PICT_TYPES_BUFFER_SIZE && s->prev_pict_types[pic->age] == AV_PICTURE_TYPE_B) - pic->age= INT_MAX; // Skipped MBs in B-frames are quite rare in MPEG-1/2 and it is a bit tricky to skip them anyway. - pic->owner2 = NULL; + if (pic->f.age < PREV_PICT_TYPES_BUFFER_SIZE && s->prev_pict_types[pic->f.age] == AV_PICTURE_TYPE_B) + pic->f.age = INT_MAX; // Skipped MBs in B-frames are quite rare in MPEG-1/2 and it is a bit tricky to skip them anyway. + pic->owner2 = s; return 0; fail: //for the FF_ALLOCZ_OR_GOTO macro @@ -331,30 +335,30 @@ fail: //for the FF_ALLOCZ_OR_GOTO macro static void free_picture(MpegEncContext *s, Picture *pic){ int i; - if(pic->data[0] && pic->type!=FF_BUFFER_TYPE_SHARED){ + if (pic->f.data[0] && pic->f.type != FF_BUFFER_TYPE_SHARED) { free_frame_buffer(s, pic); } av_freep(&pic->mb_var); av_freep(&pic->mc_mb_var); av_freep(&pic->mb_mean); - av_freep(&pic->mbskip_table); - av_freep(&pic->qscale_table); + av_freep(&pic->f.mbskip_table); + av_freep(&pic->qscale_table_base); av_freep(&pic->mb_type_base); - av_freep(&pic->dct_coeff); - av_freep(&pic->pan_scan); - pic->mb_type= NULL; + av_freep(&pic->f.dct_coeff); + av_freep(&pic->f.pan_scan); + pic->f.mb_type = NULL; for(i=0; i<2; i++){ av_freep(&pic->motion_val_base[i]); - av_freep(&pic->ref_index[i]); + av_freep(&pic->f.ref_index[i]); } - if(pic->type == FF_BUFFER_TYPE_SHARED){ + if (pic->f.type == FF_BUFFER_TYPE_SHARED) { for(i=0; i<4; i++){ - pic->base[i]= - pic->data[i]= NULL; + pic->f.base[i] = + pic->f.data[i] = NULL; } - pic->type= 0; + pic->f.type = 0; } } @@ -525,7 +529,7 @@ int ff_mpeg_update_thread_context(AVCodecContext *dst, const AVCodecContext *src if(!s1->first_field){ s->last_pict_type= s1->pict_type; - if (s1->current_picture_ptr) s->last_lambda_for[s1->pict_type] = s1->current_picture_ptr->quality; + if (s1->current_picture_ptr) s->last_lambda_for[s1->pict_type] = s1->current_picture_ptr->f.quality; if(s1->pict_type!=FF_B_TYPE){ s->last_non_b_pict_type= s1->pict_type; @@ -574,7 +578,11 @@ void MPV_decode_defaults(MpegEncContext *s){ */ av_cold int MPV_common_init(MpegEncContext *s) { - int y_size, c_size, yc_size, i, mb_array_size, mv_table_size, x, y, threads; + int y_size, c_size, yc_size, i, mb_array_size, mv_table_size, x, y, + threads = (s->encoding || + (HAVE_THREADS && + s->avctx->active_thread_type & FF_THREAD_SLICE)) ? + s->avctx->thread_count : 1; if(s->codec_id == CODEC_ID_MPEG2VIDEO && !s->progressive_sequence) s->mb_height = (s->height + 31) / 32 * 2; @@ -588,14 +596,15 @@ av_cold int MPV_common_init(MpegEncContext *s) if((s->encoding || (s->avctx->active_thread_type & FF_THREAD_SLICE)) && (s->avctx->thread_count > MAX_THREADS || (s->avctx->thread_count > s->mb_height && s->mb_height))){ - av_log(s->avctx, AV_LOG_ERROR, "too many threads\n"); - return -1; + int max_threads = FFMIN(MAX_THREADS, s->mb_height); + av_log(s->avctx, AV_LOG_WARNING, "too many threads (%d), reducing to %d\n", + s->avctx->thread_count, max_threads); + threads = max_threads; } if((s->width || s->height) && av_image_check_size(s->width, s->height, 0, s->avctx)) return -1; - dsputil_init(&s->dsp, s->avctx); ff_dct_common_init(s); s->flags= s->avctx->flags; @@ -746,8 +755,6 @@ av_cold int MPV_common_init(MpegEncContext *s) s->thread_context[0]= s; if (s->encoding || (HAVE_THREADS && s->avctx->active_thread_type&FF_THREAD_SLICE)) { - threads = s->avctx->thread_count; - for(i=1; i<threads; i++){ s->thread_context[i]= av_malloc(sizeof(MpegEncContext)); memcpy(s->thread_context[i], s, sizeof(MpegEncContext)); @@ -955,7 +962,7 @@ void ff_release_unused_pictures(MpegEncContext *s, int remove_current) /* release non reference frames */ for(i=0; i<s->picture_count; i++){ - if(s->picture[i].data[0] && !s->picture[i].reference + if (s->picture[i].f.data[0] && !s->picture[i].f.reference && (!s->picture[i].owner2 || s->picture[i].owner2 == s) && (remove_current || &s->picture[i] != s->current_picture_ptr) /*&& s->picture[i].type!=FF_BUFFER_TYPE_SHARED*/){ @@ -969,14 +976,17 @@ int ff_find_unused_picture(MpegEncContext *s, int shared){ if(shared){ for(i=s->picture_range_start; i<s->picture_range_end; i++){ - if(s->picture[i].data[0]==NULL && s->picture[i].type==0) return i; + if (s->picture[i].f.data[0] == NULL && s->picture[i].f.type == 0) + return i; } }else{ for(i=s->picture_range_start; i<s->picture_range_end; i++){ - if(s->picture[i].data[0]==NULL && s->picture[i].type!=0) return i; //FIXME + if (s->picture[i].f.data[0] == NULL && s->picture[i].f.type != 0) + return i; //FIXME } for(i=s->picture_range_start; i<s->picture_range_end; i++){ - if(s->picture[i].data[0]==NULL) return i; + if (s->picture[i].f.data[0] == NULL) + return i; } } @@ -1025,16 +1035,18 @@ int MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx) assert(s->last_picture_ptr==NULL || s->out_format != FMT_H264 || s->codec_id == CODEC_ID_SVQ3); /* mark&release old frames */ - if (s->pict_type != AV_PICTURE_TYPE_B && s->last_picture_ptr && s->last_picture_ptr != s->next_picture_ptr && s->last_picture_ptr->data[0]) { + if (s->pict_type != AV_PICTURE_TYPE_B && s->last_picture_ptr && s->last_picture_ptr != s->next_picture_ptr && s->last_picture_ptr->f.data[0]) { if(s->out_format != FMT_H264 || s->codec_id == CODEC_ID_SVQ3){ - free_frame_buffer(s, s->last_picture_ptr); + if (s->last_picture_ptr->owner2 == s) + free_frame_buffer(s, s->last_picture_ptr); /* release forgotten pictures */ /* if(mpeg124/h263) */ if(!s->encoding){ for(i=0; i<s->picture_count; i++){ - if(s->picture[i].data[0] && &s->picture[i] != s->next_picture_ptr && s->picture[i].reference){ - av_log(avctx, AV_LOG_ERROR, "releasing zombie picture\n"); + if (s->picture[i].owner2 == s && s->picture[i].f.data[0] && &s->picture[i] != s->next_picture_ptr && s->picture[i].f.reference) { + if (!(avctx->active_thread_type & FF_THREAD_FRAME)) + av_log(avctx, AV_LOG_ERROR, "releasing zombie picture\n"); free_frame_buffer(s, &s->picture[i]); } } @@ -1045,41 +1057,41 @@ int MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx) if(!s->encoding){ ff_release_unused_pictures(s, 1); - if(s->current_picture_ptr && s->current_picture_ptr->data[0]==NULL) + if (s->current_picture_ptr && s->current_picture_ptr->f.data[0] == NULL) pic= s->current_picture_ptr; //we already have a unused image (maybe it was set before reading the header) else{ i= ff_find_unused_picture(s, 0); pic= &s->picture[i]; } - pic->reference= 0; + pic->f.reference = 0; if (!s->dropable){ if (s->codec_id == CODEC_ID_H264) - pic->reference = s->picture_structure; + pic->f.reference = s->picture_structure; else if (s->pict_type != AV_PICTURE_TYPE_B) - pic->reference = 3; + pic->f.reference = 3; } - pic->coded_picture_number= s->coded_picture_number++; + pic->f.coded_picture_number = s->coded_picture_number++; if(ff_alloc_picture(s, pic, 0) < 0) return -1; s->current_picture_ptr= pic; //FIXME use only the vars from current_pic - s->current_picture_ptr->top_field_first= s->top_field_first; + s->current_picture_ptr->f.top_field_first = s->top_field_first; if(s->codec_id == CODEC_ID_MPEG1VIDEO || s->codec_id == CODEC_ID_MPEG2VIDEO) { if(s->picture_structure != PICT_FRAME) - s->current_picture_ptr->top_field_first= (s->picture_structure == PICT_TOP_FIELD) == s->first_field; + s->current_picture_ptr->f.top_field_first = (s->picture_structure == PICT_TOP_FIELD) == s->first_field; } - s->current_picture_ptr->interlaced_frame= !s->progressive_frame && !s->progressive_sequence; - s->current_picture_ptr->field_picture= s->picture_structure != PICT_FRAME; + s->current_picture_ptr->f.interlaced_frame = !s->progressive_frame && !s->progressive_sequence; + s->current_picture_ptr->field_picture = s->picture_structure != PICT_FRAME; } - s->current_picture_ptr->pict_type= s->pict_type; + s->current_picture_ptr->f.pict_type = s->pict_type; // if(s->flags && CODEC_FLAG_QSCALE) // s->current_picture_ptr->quality= s->new_picture_ptr->quality; - s->current_picture_ptr->key_frame= s->pict_type == AV_PICTURE_TYPE_I; + s->current_picture_ptr->f.key_frame = s->pict_type == AV_PICTURE_TYPE_I; ff_copy_picture(&s->current_picture, s->current_picture_ptr); @@ -1089,13 +1101,13 @@ int MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx) s->next_picture_ptr= s->current_picture_ptr; } /* av_log(s->avctx, AV_LOG_DEBUG, "L%p N%p C%p L%p N%p C%p type:%d drop:%d\n", s->last_picture_ptr, s->next_picture_ptr,s->current_picture_ptr, - s->last_picture_ptr ? s->last_picture_ptr->data[0] : NULL, - s->next_picture_ptr ? s->next_picture_ptr->data[0] : NULL, - s->current_picture_ptr ? s->current_picture_ptr->data[0] : NULL, + s->last_picture_ptr ? s->last_picture_ptr->f.data[0] : NULL, + s->next_picture_ptr ? s->next_picture_ptr->f.data[0] : NULL, + s->current_picture_ptr ? s->current_picture_ptr->f.data[0] : NULL, s->pict_type, s->dropable);*/ if(s->codec_id != CODEC_ID_H264){ - if((s->last_picture_ptr==NULL || s->last_picture_ptr->data[0]==NULL) && + if ((s->last_picture_ptr == NULL || s->last_picture_ptr->f.data[0] == NULL) && (s->pict_type!=AV_PICTURE_TYPE_I || s->picture_structure != PICT_FRAME)){ if (s->pict_type != AV_PICTURE_TYPE_I) av_log(avctx, AV_LOG_ERROR, "warning: first frame is no keyframe\n"); @@ -1105,15 +1117,17 @@ int MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx) /* Allocate a dummy frame */ i= ff_find_unused_picture(s, 0); s->last_picture_ptr= &s->picture[i]; + s->last_picture_ptr->f.key_frame = 0; if(ff_alloc_picture(s, s->last_picture_ptr, 0) < 0) return -1; ff_thread_report_progress((AVFrame*)s->last_picture_ptr, INT_MAX, 0); ff_thread_report_progress((AVFrame*)s->last_picture_ptr, INT_MAX, 1); } - if((s->next_picture_ptr==NULL || s->next_picture_ptr->data[0]==NULL) && s->pict_type==AV_PICTURE_TYPE_B){ + if ((s->next_picture_ptr == NULL || s->next_picture_ptr->f.data[0] == NULL) && s->pict_type == AV_PICTURE_TYPE_B) { /* Allocate a dummy frame */ i= ff_find_unused_picture(s, 0); s->next_picture_ptr= &s->picture[i]; + s->next_picture_ptr->f.key_frame = 0; if(ff_alloc_picture(s, s->next_picture_ptr, 0) < 0) return -1; ff_thread_report_progress((AVFrame*)s->next_picture_ptr, INT_MAX, 0); @@ -1124,17 +1138,17 @@ int MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx) if(s->last_picture_ptr) ff_copy_picture(&s->last_picture, s->last_picture_ptr); if(s->next_picture_ptr) ff_copy_picture(&s->next_picture, s->next_picture_ptr); - assert(s->pict_type == AV_PICTURE_TYPE_I || (s->last_picture_ptr && s->last_picture_ptr->data[0])); + assert(s->pict_type == AV_PICTURE_TYPE_I || (s->last_picture_ptr && s->last_picture_ptr->f.data[0])); if(s->picture_structure!=PICT_FRAME && s->out_format != FMT_H264){ int i; for(i=0; i<4; i++){ if(s->picture_structure == PICT_BOTTOM_FIELD){ - s->current_picture.data[i] += s->current_picture.linesize[i]; + s->current_picture.f.data[i] += s->current_picture.f.linesize[i]; } - s->current_picture.linesize[i] *= 2; - s->last_picture.linesize[i] *=2; - s->next_picture.linesize[i] *=2; + s->current_picture.f.linesize[i] *= 2; + s->last_picture.f.linesize[i] *= 2; + s->next_picture.f.linesize[i] *= 2; } } @@ -1180,18 +1194,18 @@ void MPV_frame_end(MpegEncContext *s) && !s->avctx->hwaccel && !(s->avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU) && s->unrestricted_mv - && s->current_picture.reference + && s->current_picture.f.reference && !s->intra_only && !(s->flags&CODEC_FLAG_EMU_EDGE)) { int hshift = av_pix_fmt_descriptors[s->avctx->pix_fmt].log2_chroma_w; int vshift = av_pix_fmt_descriptors[s->avctx->pix_fmt].log2_chroma_h; - s->dsp.draw_edges(s->current_picture.data[0], s->linesize , + s->dsp.draw_edges(s->current_picture.f.data[0], s->linesize, s->h_edge_pos , s->v_edge_pos, EDGE_WIDTH , EDGE_WIDTH , EDGE_TOP | EDGE_BOTTOM); - s->dsp.draw_edges(s->current_picture.data[1], s->uvlinesize, + s->dsp.draw_edges(s->current_picture.f.data[1], s->uvlinesize, s->h_edge_pos>>hshift, s->v_edge_pos>>vshift, EDGE_WIDTH>>hshift, EDGE_WIDTH>>vshift, EDGE_TOP | EDGE_BOTTOM); - s->dsp.draw_edges(s->current_picture.data[2], s->uvlinesize, + s->dsp.draw_edges(s->current_picture.f.data[2], s->uvlinesize, s->h_edge_pos>>hshift, s->v_edge_pos>>vshift, EDGE_WIDTH>>hshift, EDGE_WIDTH>>vshift, EDGE_TOP | EDGE_BOTTOM); } @@ -1199,14 +1213,14 @@ void MPV_frame_end(MpegEncContext *s) emms_c(); s->last_pict_type = s->pict_type; - s->last_lambda_for[s->pict_type]= s->current_picture_ptr->quality; + s->last_lambda_for[s->pict_type] = s->current_picture_ptr->f.quality; if(s->pict_type!=AV_PICTURE_TYPE_B){ s->last_non_b_pict_type= s->pict_type; } #if 0 /* copy back current_picture variables */ for(i=0; i<MAX_PICTURE_COUNT; i++){ - if(s->picture[i].data[0] == s->current_picture.data[0]){ + if(s->picture[i].f.data[0] == s->current_picture.f.data[0]){ s->picture[i]= s->current_picture; break; } @@ -1217,7 +1231,7 @@ void MPV_frame_end(MpegEncContext *s) if(s->encoding){ /* release non-reference frames */ for(i=0; i<s->picture_count; i++){ - if(s->picture[i].data[0] && !s->picture[i].reference /*&& s->picture[i].type!=FF_BUFFER_TYPE_SHARED*/){ + if (s->picture[i].f.data[0] && !s->picture[i].f.reference /*&& s->picture[i].type != FF_BUFFER_TYPE_SHARED*/) { free_frame_buffer(s, &s->picture[i]); } } @@ -1230,7 +1244,7 @@ void MPV_frame_end(MpegEncContext *s) #endif s->avctx->coded_frame= (AVFrame*)s->current_picture_ptr; - if (s->codec_id != CODEC_ID_H264 && s->current_picture.reference) { + if (s->codec_id != CODEC_ID_H264 && s->current_picture.f.reference) { ff_thread_report_progress((AVFrame*)s->current_picture_ptr, s->mb_height-1, 0); } } @@ -1631,8 +1645,8 @@ static av_always_inline void mpeg_motion_lowres(MpegEncContext *s, const int s_mask= (2<<lowres)-1; const int h_edge_pos = s->h_edge_pos >> lowres; const int v_edge_pos = s->v_edge_pos >> lowres; - linesize = s->current_picture.linesize[0] << field_based; - uvlinesize = s->current_picture.linesize[1] << field_based; + linesize = s->current_picture.f.linesize[0] << field_based; + uvlinesize = s->current_picture.f.linesize[1] << field_based; if(s->quarter_sample){ //FIXME obviously not perfect but qpel will not work in lowres anyway motion_x/=2; @@ -1706,7 +1720,7 @@ static av_always_inline void mpeg_motion_lowres(MpegEncContext *s, } } - if(bottom_field){ //FIXME use this for field pix too instead of the obnoxious hack which changes picture.data + if(bottom_field){ //FIXME use this for field pix too instead of the obnoxious hack which changes picture.f.data dest_y += s->linesize; dest_cb+= s->uvlinesize; dest_cr+= s->uvlinesize; @@ -1848,7 +1862,7 @@ static inline void MPV_motion_lowres(MpegEncContext *s, s->mv[dir][1][0], s->mv[dir][1][1], block_s, mb_y); } else { if(s->picture_structure != s->field_select[dir][0] + 1 && s->pict_type != AV_PICTURE_TYPE_B && !s->first_field){ - ref_picture= s->current_picture_ptr->data; + ref_picture = s->current_picture_ptr->f.data; } mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr, @@ -1864,7 +1878,7 @@ static inline void MPV_motion_lowres(MpegEncContext *s, if(s->picture_structure == s->field_select[dir][i] + 1 || s->pict_type == AV_PICTURE_TYPE_B || s->first_field){ ref2picture= ref_picture; }else{ - ref2picture= s->current_picture_ptr->data; + ref2picture = s->current_picture_ptr->f.data; } mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr, @@ -1901,7 +1915,7 @@ static inline void MPV_motion_lowres(MpegEncContext *s, //opposite parity is always in the same frame if this is second field if(!s->first_field){ - ref_picture = s->current_picture_ptr->data; + ref_picture = s->current_picture_ptr->f.data; } } } @@ -2030,7 +2044,7 @@ void MPV_decode_mb_internal(MpegEncContext *s, DCTELEM block[12][64], if(s->avctx->debug&FF_DEBUG_DCT_COEFF) { /* save DCT coefficients */ int i,j; - DCTELEM *dct = &s->current_picture.dct_coeff[mb_xy*64*6]; + DCTELEM *dct = &s->current_picture.f.dct_coeff[mb_xy * 64 * 6]; av_log(s->avctx, AV_LOG_DEBUG, "DCT coeffs of MB at %dx%d:\n", s->mb_x, s->mb_y); for(i=0; i<6; i++){ for(j=0; j<64; j++){ @@ -2041,7 +2055,7 @@ void MPV_decode_mb_internal(MpegEncContext *s, DCTELEM block[12][64], } } - s->current_picture.qscale_table[mb_xy]= s->qscale; + s->current_picture.f.qscale_table[mb_xy] = s->qscale; /* update DC predictors for P macroblocks */ if (!s->mb_intra) { @@ -2062,8 +2076,8 @@ void MPV_decode_mb_internal(MpegEncContext *s, DCTELEM block[12][64], int dct_linesize, dct_offset; op_pixels_func (*op_pix)[4]; qpel_mc_func (*op_qpix)[16]; - const int linesize= s->current_picture.linesize[0]; //not s->linesize as this would be wrong for field pics - const int uvlinesize= s->current_picture.linesize[1]; + const int linesize = s->current_picture.f.linesize[0]; //not s->linesize as this would be wrong for field pics + const int uvlinesize = s->current_picture.f.linesize[1]; const int readable= s->pict_type != AV_PICTURE_TYPE_B || s->encoding || s->avctx->draw_horiz_band || lowres_flag; const int block_size= lowres_flag ? 8>>s->avctx->lowres : 8; @@ -2071,7 +2085,7 @@ void MPV_decode_mb_internal(MpegEncContext *s, DCTELEM block[12][64], /* skip only during decoding as we might trash the buffers during encoding a bit */ if(!s->encoding){ uint8_t *mbskip_ptr = &s->mbskip_table[mb_xy]; - const int age= s->current_picture.age; + const int age = s->current_picture.f.age; assert(age); @@ -2083,10 +2097,10 @@ void MPV_decode_mb_internal(MpegEncContext *s, DCTELEM block[12][64], if(*mbskip_ptr >99) *mbskip_ptr= 99; /* if previous was skipped too, then nothing to do ! */ - if (*mbskip_ptr >= age && s->current_picture.reference){ + if (*mbskip_ptr >= age && s->current_picture.f.reference){ return; } - } else if(!s->current_picture.reference){ + } else if(!s->current_picture.f.reference) { (*mbskip_ptr) ++; /* increase counter so the age can be compared cleanly */ if(*mbskip_ptr >99) *mbskip_ptr= 99; } else{ @@ -2125,11 +2139,11 @@ void MPV_decode_mb_internal(MpegEncContext *s, DCTELEM block[12][64], h264_chroma_mc_func *op_pix = s->dsp.put_h264_chroma_pixels_tab; if (s->mv_dir & MV_DIR_FORWARD) { - MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.data, op_pix); + MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f.data, op_pix); op_pix = s->dsp.avg_h264_chroma_pixels_tab; } if (s->mv_dir & MV_DIR_BACKWARD) { - MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.data, op_pix); + MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f.data, op_pix); } }else{ op_qpix= s->me.qpel_put; @@ -2139,12 +2153,12 @@ void MPV_decode_mb_internal(MpegEncContext *s, DCTELEM block[12][64], op_pix = s->dsp.put_no_rnd_pixels_tab; } if (s->mv_dir & MV_DIR_FORWARD) { - MPV_motion(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.data, op_pix, op_qpix); + MPV_motion(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f.data, op_pix, op_qpix); op_pix = s->dsp.avg_pixels_tab; op_qpix= s->me.qpel_avg; } if (s->mv_dir & MV_DIR_BACKWARD) { - MPV_motion(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.data, op_pix, op_qpix); + MPV_motion(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f.data, op_pix, op_qpix); } } } @@ -2296,7 +2310,7 @@ void ff_draw_horiz_band(MpegEncContext *s, int y, int h){ if (!s->avctx->hwaccel && !(s->avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU) && s->unrestricted_mv - && s->current_picture.reference + && s->current_picture.f.reference && !s->intra_only && !(s->flags&CODEC_FLAG_EMU_EDGE)) { int sides = 0, edge_h; @@ -2307,11 +2321,11 @@ void ff_draw_horiz_band(MpegEncContext *s, int y, int h){ edge_h= FFMIN(h, s->v_edge_pos - y); - s->dsp.draw_edges(s->current_picture_ptr->data[0] + y *s->linesize , s->linesize, + s->dsp.draw_edges(s->current_picture_ptr->f.data[0] + y *s->linesize , s->linesize, s->h_edge_pos , edge_h , EDGE_WIDTH , EDGE_WIDTH , sides); - s->dsp.draw_edges(s->current_picture_ptr->data[1] + (y>>vshift)*s->uvlinesize, s->uvlinesize, + s->dsp.draw_edges(s->current_picture_ptr->f.data[1] + (y>>vshift)*s->uvlinesize, s->uvlinesize, s->h_edge_pos>>hshift, edge_h>>hshift, EDGE_WIDTH>>hshift, EDGE_WIDTH>>vshift, sides); - s->dsp.draw_edges(s->current_picture_ptr->data[2] + (y>>vshift)*s->uvlinesize, s->uvlinesize, + s->dsp.draw_edges(s->current_picture_ptr->f.data[2] + (y>>vshift)*s->uvlinesize, s->uvlinesize, s->h_edge_pos>>hshift, edge_h>>hshift, EDGE_WIDTH>>hshift, EDGE_WIDTH>>vshift, sides); } @@ -2350,8 +2364,8 @@ void ff_draw_horiz_band(MpegEncContext *s, int y, int h){ } void ff_init_block_index(MpegEncContext *s){ //FIXME maybe rename - const int linesize= s->current_picture.linesize[0]; //not s->linesize as this would be wrong for field pics - const int uvlinesize= s->current_picture.linesize[1]; + const int linesize = s->current_picture.f.linesize[0]; //not s->linesize as this would be wrong for field pics + const int uvlinesize = s->current_picture.f.linesize[1]; const int mb_size= 4 - s->avctx->lowres; s->block_index[0]= s->b8_stride*(s->mb_y*2 ) - 2 + s->mb_x*2; @@ -2362,9 +2376,9 @@ void ff_init_block_index(MpegEncContext *s){ //FIXME maybe rename s->block_index[5]= s->mb_stride*(s->mb_y + s->mb_height + 2) + s->b8_stride*s->mb_height*2 + s->mb_x - 1; //block_index is not used by mpeg2, so it is not affected by chroma_format - s->dest[0] = s->current_picture.data[0] + ((s->mb_x - 1) << mb_size); - s->dest[1] = s->current_picture.data[1] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift)); - s->dest[2] = s->current_picture.data[2] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift)); + s->dest[0] = s->current_picture.f.data[0] + ((s->mb_x - 1) << mb_size); + s->dest[1] = s->current_picture.f.data[1] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift)); + s->dest[2] = s->current_picture.f.data[2] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift)); if(!(s->pict_type==AV_PICTURE_TYPE_B && s->avctx->draw_horiz_band && s->picture_structure==PICT_FRAME)) { @@ -2389,8 +2403,9 @@ void ff_mpeg_flush(AVCodecContext *avctx){ return; for(i=0; i<s->picture_count; i++){ - if(s->picture[i].data[0] && ( s->picture[i].type == FF_BUFFER_TYPE_INTERNAL - || s->picture[i].type == FF_BUFFER_TYPE_USER)) + if (s->picture[i].f.data[0] && + (s->picture[i].f.type == FF_BUFFER_TYPE_INTERNAL || + s->picture[i].f.type == FF_BUFFER_TYPE_USER)) free_frame_buffer(s, &s->picture[i]); } s->current_picture_ptr = s->last_picture_ptr = s->next_picture_ptr = NULL; diff --git a/libavcodec/mpegvideo.h b/libavcodec/mpegvideo.h index 499792dff7..4d4437b1cc 100644 --- a/libavcodec/mpegvideo.h +++ b/libavcodec/mpegvideo.h @@ -28,6 +28,7 @@ #ifndef AVCODEC_MPEGVIDEO_H #define AVCODEC_MPEGVIDEO_H +#include "avcodec.h" #include "dsputil.h" #include "get_bits.h" #include "put_bits.h" @@ -82,12 +83,13 @@ struct MpegEncContext; * Picture. */ typedef struct Picture{ - FF_COMMON_FRAME + struct AVFrame f; /** * halfpel luma planes. */ uint8_t *interpolated[3]; + int8_t *qscale_table_base; int16_t (*motion_val_base[2])[2]; uint32_t *mb_type_base; #define MB_TYPE_INTRA MB_TYPE_INTRA4x4 //default mb_type if there is just one type @@ -154,7 +156,7 @@ typedef struct MotionEstContext{ uint32_t *score_map; ///< map to store the scores int map_generation; int pre_penalty_factor; - int penalty_factor; /*!< an estimate of the bits required to + int penalty_factor; /**< an estimate of the bits required to code a given mv value, e.g. (1,0) takes more bits than (0,0). We have to estimate whether any reduction in @@ -209,7 +211,6 @@ typedef struct MpegEncContext { /* the following codec id fields are deprecated in favor of codec_id */ int h263_plus; ///< h263 plus headers - int h263_msmpeg4; ///< generate MSMPEG4 compatible stream (deprecated, use msmpeg4_version instead) int h263_flv; ///< use flv h263 header enum CodecID codec_id; /* see CODEC_ID_xxx */ @@ -233,7 +234,6 @@ typedef struct MpegEncContext { int picture_number; //FIXME remove, unclear definition int picture_in_gop_number; ///< 0-> first pic in gop, ... int b_frames_since_non_b; ///< used for encoding, relative to not yet reordered input - int64_t user_specified_pts;///< last non zero pts from AVFrame which was passed into avcodec_encode_video() int mb_width, mb_height; ///< number of MBs horizontally & vertically int mb_stride; ///< mb_width+1 used for some arrays to allow simple addressing of left & top MBs without sig11 int b8_stride; ///< 2*mb_width+1 used for some 8x8 block arrays to allow simple addressing @@ -260,6 +260,8 @@ typedef struct MpegEncContext { /* WARNING: changes above this line require updates to hardcoded * offsets used in asm. */ + int64_t user_specified_pts;///< last non zero pts from AVFrame which was passed into avcodec_encode_video() + /** bit output */ PutBitContext pb; diff --git a/libavcodec/mpegvideo_common.h b/libavcodec/mpegvideo_common.h index 18e49a63c3..a4d0167f36 100644 --- a/libavcodec/mpegvideo_common.h +++ b/libavcodec/mpegvideo_common.h @@ -255,8 +255,8 @@ if(s->quarter_sample) #endif v_edge_pos = s->v_edge_pos >> field_based; - linesize = s->current_picture.linesize[0] << field_based; - uvlinesize = s->current_picture.linesize[1] << field_based; + linesize = s->current_picture.f.linesize[0] << field_based; + uvlinesize = s->current_picture.f.linesize[1] << field_based; dxy = ((motion_y & 1) << 1) | (motion_x & 1); src_x = s->mb_x* 16 + (motion_x >> 1); @@ -657,30 +657,30 @@ static av_always_inline void MPV_motion_internal(MpegEncContext *s, assert(!s->mb_skipped); - memcpy(mv_cache[1][1], s->current_picture.motion_val[0][mot_xy ], sizeof(int16_t)*4); - memcpy(mv_cache[2][1], s->current_picture.motion_val[0][mot_xy+mot_stride], sizeof(int16_t)*4); - memcpy(mv_cache[3][1], s->current_picture.motion_val[0][mot_xy+mot_stride], sizeof(int16_t)*4); + memcpy(mv_cache[1][1], s->current_picture.f.motion_val[0][mot_xy ], sizeof(int16_t) * 4); + memcpy(mv_cache[2][1], s->current_picture.f.motion_val[0][mot_xy + mot_stride], sizeof(int16_t) * 4); + memcpy(mv_cache[3][1], s->current_picture.f.motion_val[0][mot_xy + mot_stride], sizeof(int16_t) * 4); - if(mb_y==0 || IS_INTRA(s->current_picture.mb_type[xy-s->mb_stride])){ + if (mb_y == 0 || IS_INTRA(s->current_picture.f.mb_type[xy - s->mb_stride])) { memcpy(mv_cache[0][1], mv_cache[1][1], sizeof(int16_t)*4); }else{ - memcpy(mv_cache[0][1], s->current_picture.motion_val[0][mot_xy-mot_stride], sizeof(int16_t)*4); + memcpy(mv_cache[0][1], s->current_picture.f.motion_val[0][mot_xy - mot_stride], sizeof(int16_t) * 4); } - if(mb_x==0 || IS_INTRA(s->current_picture.mb_type[xy-1])){ + if (mb_x == 0 || IS_INTRA(s->current_picture.f.mb_type[xy - 1])) { AV_COPY32(mv_cache[1][0], mv_cache[1][1]); AV_COPY32(mv_cache[2][0], mv_cache[2][1]); }else{ - AV_COPY32(mv_cache[1][0], s->current_picture.motion_val[0][mot_xy-1]); - AV_COPY32(mv_cache[2][0], s->current_picture.motion_val[0][mot_xy-1+mot_stride]); + AV_COPY32(mv_cache[1][0], s->current_picture.f.motion_val[0][mot_xy - 1]); + AV_COPY32(mv_cache[2][0], s->current_picture.f.motion_val[0][mot_xy - 1 + mot_stride]); } - if(mb_x+1>=s->mb_width || IS_INTRA(s->current_picture.mb_type[xy+1])){ + if (mb_x + 1 >= s->mb_width || IS_INTRA(s->current_picture.f.mb_type[xy + 1])) { AV_COPY32(mv_cache[1][3], mv_cache[1][2]); AV_COPY32(mv_cache[2][3], mv_cache[2][2]); }else{ - AV_COPY32(mv_cache[1][3], s->current_picture.motion_val[0][mot_xy+2]); - AV_COPY32(mv_cache[2][3], s->current_picture.motion_val[0][mot_xy+2+mot_stride]); + AV_COPY32(mv_cache[1][3], s->current_picture.f.motion_val[0][mot_xy + 2]); + AV_COPY32(mv_cache[2][3], s->current_picture.f.motion_val[0][mot_xy + 2 + mot_stride]); } mx = 0; @@ -817,7 +817,7 @@ static av_always_inline void MPV_motion_internal(MpegEncContext *s, } } else { if(s->picture_structure != s->field_select[dir][0] + 1 && s->pict_type != AV_PICTURE_TYPE_B && !s->first_field){ - ref_picture= s->current_picture_ptr->data; + ref_picture = s->current_picture_ptr->f.data; } mpeg_motion(s, dest_y, dest_cb, dest_cr, @@ -834,7 +834,7 @@ static av_always_inline void MPV_motion_internal(MpegEncContext *s, || s->pict_type == AV_PICTURE_TYPE_B || s->first_field){ ref2picture= ref_picture; }else{ - ref2picture= s->current_picture_ptr->data; + ref2picture = s->current_picture_ptr->f.data; } mpeg_motion(s, dest_y, dest_cb, dest_cr, @@ -871,7 +871,7 @@ static av_always_inline void MPV_motion_internal(MpegEncContext *s, //opposite parity is always in the same frame if this is second field if(!s->first_field){ - ref_picture = s->current_picture_ptr->data; + ref_picture = s->current_picture_ptr->f.data; } } } diff --git a/libavcodec/mpegvideo_enc.c b/libavcodec/mpegvideo_enc.c index a6e9c7c7be..57b44c3199 100644 --- a/libavcodec/mpegvideo_enc.c +++ b/libavcodec/mpegvideo_enc.c @@ -28,6 +28,7 @@ */ #include "libavutil/intmath.h" +#include "libavutil/mathematics.h" #include "avcodec.h" #include "dsputil.h" #include "mpegvideo.h" @@ -68,7 +69,8 @@ void ff_convert_matrix(DSPContext *dsp, int (*qmat)[64], uint16_t (*qmat16)[2][6 for(qscale=qmin; qscale<=qmax; qscale++){ int i; - if (dsp->fdct == ff_jpeg_fdct_islow + if (dsp->fdct == ff_jpeg_fdct_islow_8 || + dsp->fdct == ff_jpeg_fdct_islow_10 #ifdef FAAN_POSTSCALE || dsp->fdct == ff_faandct #endif @@ -157,7 +159,7 @@ void ff_write_quant_matrix(PutBitContext *pb, uint16_t *matrix){ * init s->current_picture.qscale_table from s->lambda_table */ void ff_init_qscale_tab(MpegEncContext *s){ - int8_t * const qscale_table= s->current_picture.qscale_table; + int8_t * const qscale_table = s->current_picture.f.qscale_table; int i; for(i=0; i<s->mb_num; i++){ @@ -914,12 +916,12 @@ static int skip_check(MpegEncContext *s, Picture *p, Picture *ref){ int64_t score64=0; for(plane=0; plane<3; plane++){ - const int stride= p->linesize[plane]; + const int stride = p->f.linesize[plane]; const int bw= plane ? 1 : 2; for(y=0; y<s->mb_height*bw; y++){ for(x=0; x<s->mb_width*bw; x++){ - int off= p->type == FF_BUFFER_TYPE_SHARED ? 0: 16; - int v= s->dsp.frame_skip_cmp[1](s, p->data[plane] + 8*(x + y*stride)+off, ref->data[plane] + 8*(x + y*stride), stride, 8); + int off = p->f.type == FF_BUFFER_TYPE_SHARED ? 0: 16; + int v = s->dsp.frame_skip_cmp[1](s, p->f.data[plane] + 8*(x + y*stride)+off, ref->f.data[plane] + 8*(x + y*stride), stride, 8); switch(s->avctx->frame_skip_exp){ case 0: score= FFMAX(score, v); break; @@ -943,7 +945,7 @@ static int skip_check(MpegEncContext *s, Picture *p, Picture *ref){ static int estimate_best_b_count(MpegEncContext *s){ AVCodec *codec= avcodec_find_encoder(s->avctx->codec_id); - AVCodecContext *c= avcodec_alloc_context(); + AVCodecContext *c = avcodec_alloc_context3(NULL); AVFrame input[FF_MAX_B_FRAMES+2]; const int scale= s->avctx->brd_scale; int i, j, out_size, p_lambda, b_lambda, lambda2; @@ -972,7 +974,7 @@ static int estimate_best_b_count(MpegEncContext *s){ c->time_base= s->avctx->time_base; c->max_b_frames= s->max_b_frames; - if (avcodec_open(c, codec) < 0) + if (avcodec_open2(c, codec, NULL) < 0) return -1; for(i=0; i<s->max_b_frames+2; i++){ @@ -991,15 +993,15 @@ static int estimate_best_b_count(MpegEncContext *s){ if(pre_input_ptr && (!i || s->input_picture[i-1])) { pre_input= *pre_input_ptr; - if(pre_input.type != FF_BUFFER_TYPE_SHARED && i) { - pre_input.data[0]+=INPLACE_OFFSET; - pre_input.data[1]+=INPLACE_OFFSET; - pre_input.data[2]+=INPLACE_OFFSET; + if (pre_input.f.type != FF_BUFFER_TYPE_SHARED && i) { + pre_input.f.data[0] += INPLACE_OFFSET; + pre_input.f.data[1] += INPLACE_OFFSET; + pre_input.f.data[2] += INPLACE_OFFSET; } - s->dsp.shrink[scale](input[i].data[0], input[i].linesize[0], pre_input.data[0], pre_input.linesize[0], c->width, c->height); - s->dsp.shrink[scale](input[i].data[1], input[i].linesize[1], pre_input.data[1], pre_input.linesize[1], c->width>>1, c->height>>1); - s->dsp.shrink[scale](input[i].data[2], input[i].linesize[2], pre_input.data[2], pre_input.linesize[2], c->width>>1, c->height>>1); + s->dsp.shrink[scale](input[i].data[0], input[i].linesize[0], pre_input.f.data[0], pre_input.f.linesize[0], c->width, c->height); + s->dsp.shrink[scale](input[i].data[1], input[i].linesize[1], pre_input.f.data[1], pre_input.f.linesize[1], c->width >> 1, c->height >> 1); + s->dsp.shrink[scale](input[i].data[2], input[i].linesize[2], pre_input.f.data[2], pre_input.f.linesize[2], c->width >> 1, c->height >> 1); } } @@ -1061,20 +1063,20 @@ static int select_input_picture(MpegEncContext *s){ if(s->reordered_input_picture[0]==NULL && s->input_picture[0]){ if(/*s->picture_in_gop_number >= s->gop_size ||*/ s->next_picture_ptr==NULL || s->intra_only){ s->reordered_input_picture[0]= s->input_picture[0]; - s->reordered_input_picture[0]->pict_type= AV_PICTURE_TYPE_I; - s->reordered_input_picture[0]->coded_picture_number= s->coded_picture_number++; + s->reordered_input_picture[0]->f.pict_type = AV_PICTURE_TYPE_I; + s->reordered_input_picture[0]->f.coded_picture_number = s->coded_picture_number++; }else{ int b_frames; if(s->avctx->frame_skip_threshold || s->avctx->frame_skip_factor){ if(s->picture_in_gop_number < s->gop_size && skip_check(s, s->input_picture[0], s->next_picture_ptr)){ //FIXME check that te gop check above is +-1 correct -//av_log(NULL, AV_LOG_DEBUG, "skip %p %"PRId64"\n", s->input_picture[0]->data[0], s->input_picture[0]->pts); +//av_log(NULL, AV_LOG_DEBUG, "skip %p %"PRId64"\n", s->input_picture[0]->f.data[0], s->input_picture[0]->pts); - if(s->input_picture[0]->type == FF_BUFFER_TYPE_SHARED){ + if (s->input_picture[0]->f.type == FF_BUFFER_TYPE_SHARED) { for(i=0; i<4; i++) - s->input_picture[0]->data[i]= NULL; - s->input_picture[0]->type= 0; + s->input_picture[0]->f.data[i] = NULL; + s->input_picture[0]->f.type = 0; }else{ assert( s->input_picture[0]->type==FF_BUFFER_TYPE_USER || s->input_picture[0]->type==FF_BUFFER_TYPE_INTERNAL); @@ -1091,7 +1093,7 @@ static int select_input_picture(MpegEncContext *s){ if(s->flags&CODEC_FLAG_PASS2){ for(i=0; i<s->max_b_frames+1; i++){ - int pict_num= s->input_picture[0]->display_picture_number + i; + int pict_num = s->input_picture[0]->f.display_picture_number + i; if(pict_num >= s->rc_context.num_entries) break; @@ -1100,7 +1102,7 @@ static int select_input_picture(MpegEncContext *s){ break; } - s->input_picture[i]->pict_type= + s->input_picture[i]->f.pict_type = s->rc_context.entry[pict_num].new_pict_type; } } @@ -1112,8 +1114,8 @@ static int select_input_picture(MpegEncContext *s){ for(i=1; i<s->max_b_frames+1; i++){ if(s->input_picture[i] && s->input_picture[i]->b_frame_score==0){ s->input_picture[i]->b_frame_score= - get_intra_count(s, s->input_picture[i ]->data[0], - s->input_picture[i-1]->data[0], s->linesize) + 1; + get_intra_count(s, s->input_picture[i ]->f.data[0], + s->input_picture[i-1]->f.data[0], s->linesize) + 1; } } for(i=0; i<s->max_b_frames+1; i++){ @@ -1139,11 +1141,11 @@ static int select_input_picture(MpegEncContext *s){ //av_log(s->avctx, AV_LOG_DEBUG, "b_frames: %d\n", b_count); for(i= b_frames - 1; i>=0; i--){ - int type= s->input_picture[i]->pict_type; + int type = s->input_picture[i]->f.pict_type; if(type && type != AV_PICTURE_TYPE_B) b_frames= i; } - if(s->input_picture[b_frames]->pict_type == AV_PICTURE_TYPE_B && b_frames == s->max_b_frames){ + if (s->input_picture[b_frames]->f.pict_type == AV_PICTURE_TYPE_B && b_frames == s->max_b_frames){ av_log(s->avctx, AV_LOG_ERROR, "warning, too many b frames in a row\n"); } @@ -1153,49 +1155,49 @@ static int select_input_picture(MpegEncContext *s){ }else{ if(s->flags & CODEC_FLAG_CLOSED_GOP) b_frames=0; - s->input_picture[b_frames]->pict_type= AV_PICTURE_TYPE_I; + s->input_picture[b_frames]->f.pict_type = AV_PICTURE_TYPE_I; } } if( (s->flags & CODEC_FLAG_CLOSED_GOP) && b_frames - && s->input_picture[b_frames]->pict_type== AV_PICTURE_TYPE_I) + && s->input_picture[b_frames]->f.pict_type== AV_PICTURE_TYPE_I) b_frames--; s->reordered_input_picture[0]= s->input_picture[b_frames]; - if(s->reordered_input_picture[0]->pict_type != AV_PICTURE_TYPE_I) - s->reordered_input_picture[0]->pict_type= AV_PICTURE_TYPE_P; - s->reordered_input_picture[0]->coded_picture_number= s->coded_picture_number++; + if (s->reordered_input_picture[0]->f.pict_type != AV_PICTURE_TYPE_I) + s->reordered_input_picture[0]->f.pict_type = AV_PICTURE_TYPE_P; + s->reordered_input_picture[0]->f.coded_picture_number = s->coded_picture_number++; for(i=0; i<b_frames; i++){ - s->reordered_input_picture[i+1]= s->input_picture[i]; - s->reordered_input_picture[i+1]->pict_type= AV_PICTURE_TYPE_B; - s->reordered_input_picture[i+1]->coded_picture_number= s->coded_picture_number++; + s->reordered_input_picture[i + 1] = s->input_picture[i]; + s->reordered_input_picture[i + 1]->f.pict_type = AV_PICTURE_TYPE_B; + s->reordered_input_picture[i + 1]->f.coded_picture_number = s->coded_picture_number++; } } } no_output_pic: if(s->reordered_input_picture[0]){ - s->reordered_input_picture[0]->reference= s->reordered_input_picture[0]->pict_type!=AV_PICTURE_TYPE_B ? 3 : 0; + s->reordered_input_picture[0]->f.reference = s->reordered_input_picture[0]->f.pict_type!=AV_PICTURE_TYPE_B ? 3 : 0; ff_copy_picture(&s->new_picture, s->reordered_input_picture[0]); - if(s->reordered_input_picture[0]->type == FF_BUFFER_TYPE_SHARED || s->avctx->rc_buffer_size){ + if (s->reordered_input_picture[0]->f.type == FF_BUFFER_TYPE_SHARED || s->avctx->rc_buffer_size) { // input is a shared pix, so we can't modifiy it -> alloc a new one & ensure that the shared one is reuseable int i= ff_find_unused_picture(s, 0); Picture *pic= &s->picture[i]; - pic->reference = s->reordered_input_picture[0]->reference; + pic->f.reference = s->reordered_input_picture[0]->f.reference; if(ff_alloc_picture(s, pic, 0) < 0){ return -1; } /* mark us unused / free shared pic */ - if(s->reordered_input_picture[0]->type == FF_BUFFER_TYPE_INTERNAL) + if (s->reordered_input_picture[0]->f.type == FF_BUFFER_TYPE_INTERNAL) s->avctx->release_buffer(s->avctx, (AVFrame*)s->reordered_input_picture[0]); for(i=0; i<4; i++) - s->reordered_input_picture[0]->data[i]= NULL; - s->reordered_input_picture[0]->type= 0; + s->reordered_input_picture[0]->f.data[i] = NULL; + s->reordered_input_picture[0]->f.type = 0; copy_picture_attributes(s, (AVFrame*)pic, (AVFrame*)s->reordered_input_picture[0]); @@ -1208,12 +1210,12 @@ no_output_pic: s->current_picture_ptr= s->reordered_input_picture[0]; for(i=0; i<4; i++){ - s->new_picture.data[i]+= INPLACE_OFFSET; + s->new_picture.f.data[i] += INPLACE_OFFSET; } } ff_copy_picture(&s->current_picture, s->current_picture_ptr); - s->picture_number= s->new_picture.display_picture_number; + s->picture_number = s->new_picture.f.display_picture_number; //printf("dpn:%d\n", s->picture_number); }else{ memset(&s->new_picture, 0, sizeof(Picture)); @@ -1248,8 +1250,8 @@ int MPV_encode_picture(AVCodecContext *avctx, } /* output? */ - if(s->new_picture.data[0]){ - s->pict_type= s->new_picture.pict_type; + if (s->new_picture.f.data[0]) { + s->pict_type = s->new_picture.f.pict_type; //emms_c(); //printf("qs:%f %f %d\n", s->new_picture.quality, s->current_picture.quality, s->qscale); MPV_frame_start(s, avctx); @@ -1306,8 +1308,8 @@ vbv_retry: ff_write_pass1_stats(s); for(i=0; i<4; i++){ - s->current_picture_ptr->error[i]= s->current_picture.error[i]; - avctx->error[i] += s->current_picture_ptr->error[i]; + s->current_picture_ptr->f.error[i] = s->current_picture.f.error[i]; + avctx->error[i] += s->current_picture_ptr->f.error[i]; } if(s->flags&CODEC_FLAG_PASS1) @@ -1507,7 +1509,7 @@ static av_always_inline void encode_mb_internal(MpegEncContext *s, int motion_x, update_qscale(s); if(!(s->flags&CODEC_FLAG_QP_RD)){ - s->qscale= s->current_picture_ptr->qscale_table[mb_xy]; + s->qscale = s->current_picture_ptr->f.qscale_table[mb_xy]; s->dquant= s->qscale - last_qp; if(s->out_format==FMT_H263){ @@ -1531,9 +1533,9 @@ static av_always_inline void encode_mb_internal(MpegEncContext *s, int motion_x, wrap_y = s->linesize; wrap_c = s->uvlinesize; - ptr_y = s->new_picture.data[0] + (mb_y * 16 * wrap_y) + mb_x * 16; - ptr_cb = s->new_picture.data[1] + (mb_y * mb_block_height * wrap_c) + mb_x * 8; - ptr_cr = s->new_picture.data[2] + (mb_y * mb_block_height * wrap_c) + mb_x * 8; + ptr_y = s->new_picture.f.data[0] + (mb_y * 16 * wrap_y) + mb_x * 16; + ptr_cb = s->new_picture.f.data[1] + (mb_y * mb_block_height * wrap_c) + mb_x * 8; + ptr_cr = s->new_picture.f.data[2] + (mb_y * mb_block_height * wrap_c) + mb_x * 8; if(mb_x*16+16 > s->width || mb_y*16+16 > s->height){ uint8_t *ebuf= s->edge_emu_buffer + 32; @@ -1601,12 +1603,12 @@ static av_always_inline void encode_mb_internal(MpegEncContext *s, int motion_x, } if (s->mv_dir & MV_DIR_FORWARD) { - MPV_motion(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.data, op_pix, op_qpix); + MPV_motion(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f.data, op_pix, op_qpix); op_pix = s->dsp.avg_pixels_tab; op_qpix= s->dsp.avg_qpel_pixels_tab; } if (s->mv_dir & MV_DIR_BACKWARD) { - MPV_motion(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.data, op_pix, op_qpix); + MPV_motion(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f.data, op_pix, op_qpix); } if(s->flags&CODEC_FLAG_INTERLACED_DCT){ @@ -1787,7 +1789,7 @@ static av_always_inline void encode_mb(MpegEncContext *s, int motion_x, int moti static inline void copy_context_before_encode(MpegEncContext *d, MpegEncContext *s, int type){ int i; - memcpy(d->last_mv, s->last_mv, 2*2*2*sizeof(int)); //FIXME is memcpy faster then a loop? + memcpy(d->last_mv, s->last_mv, 2*2*2*sizeof(int)); //FIXME is memcpy faster than a loop? /* mpeg1 */ d->mb_skip_run= s->mb_skip_run; @@ -1816,7 +1818,7 @@ static inline void copy_context_after_encode(MpegEncContext *d, MpegEncContext * int i; memcpy(d->mv, s->mv, 2*4*2*sizeof(int)); - memcpy(d->last_mv, s->last_mv, 2*2*2*sizeof(int)); //FIXME is memcpy faster then a loop? + memcpy(d->last_mv, s->last_mv, 2*2*2*sizeof(int)); //FIXME is memcpy faster than a loop? /* mpeg1 */ d->mb_skip_run= s->mb_skip_run; @@ -1932,18 +1934,18 @@ static int sse_mb(MpegEncContext *s){ if(w==16 && h==16) if(s->avctx->mb_cmp == FF_CMP_NSSE){ - return s->dsp.nsse[0](s, s->new_picture.data[0] + s->mb_x*16 + s->mb_y*s->linesize*16, s->dest[0], s->linesize, 16) - +s->dsp.nsse[1](s, s->new_picture.data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[1], s->uvlinesize, 8) - +s->dsp.nsse[1](s, s->new_picture.data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[2], s->uvlinesize, 8); + return s->dsp.nsse[0](s, s->new_picture.f.data[0] + s->mb_x*16 + s->mb_y*s->linesize*16, s->dest[0], s->linesize, 16) + +s->dsp.nsse[1](s, s->new_picture.f.data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[1], s->uvlinesize, 8) + +s->dsp.nsse[1](s, s->new_picture.f.data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[2], s->uvlinesize, 8); }else{ - return s->dsp.sse[0](NULL, s->new_picture.data[0] + s->mb_x*16 + s->mb_y*s->linesize*16, s->dest[0], s->linesize, 16) - +s->dsp.sse[1](NULL, s->new_picture.data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[1], s->uvlinesize, 8) - +s->dsp.sse[1](NULL, s->new_picture.data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[2], s->uvlinesize, 8); + return s->dsp.sse[0](NULL, s->new_picture.f.data[0] + s->mb_x*16 + s->mb_y*s->linesize*16, s->dest[0], s->linesize, 16) + +s->dsp.sse[1](NULL, s->new_picture.f.data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[1], s->uvlinesize, 8) + +s->dsp.sse[1](NULL, s->new_picture.f.data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[2], s->uvlinesize, 8); } else - return sse(s, s->new_picture.data[0] + s->mb_x*16 + s->mb_y*s->linesize*16, s->dest[0], w, h, s->linesize) - +sse(s, s->new_picture.data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[1], w>>1, h>>1, s->uvlinesize) - +sse(s, s->new_picture.data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[2], w>>1, h>>1, s->uvlinesize); + return sse(s, s->new_picture.f.data[0] + s->mb_x*16 + s->mb_y*s->linesize*16, s->dest[0], w, h, s->linesize) + +sse(s, s->new_picture.f.data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[1], w>>1, h>>1, s->uvlinesize) + +sse(s, s->new_picture.f.data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[2], w>>1, h>>1, s->uvlinesize); } static int pre_estimate_motion_thread(AVCodecContext *c, void *arg){ @@ -2002,7 +2004,7 @@ static int mb_var_thread(AVCodecContext *c, void *arg){ for(mb_x=0; mb_x < s->mb_width; mb_x++) { int xx = mb_x * 16; int yy = mb_y * 16; - uint8_t *pix = s->new_picture.data[0] + (yy * s->linesize) + xx; + uint8_t *pix = s->new_picture.f.data[0] + (yy * s->linesize) + xx; int varc; int sum = s->dsp.pix_sum(pix, s->linesize); @@ -2069,7 +2071,7 @@ static int encode_thread(AVCodecContext *c, void *arg){ /* note: quant matrix value (8) is implied here */ s->last_dc[i] = 128 << s->intra_dc_precision; - s->current_picture.error[i] = 0; + s->current_picture.f.error[i] = 0; } s->mb_skip_run = 0; memset(s->last_mv, 0, sizeof(s->last_mv)); @@ -2170,9 +2172,7 @@ static int encode_thread(AVCodecContext *c, void *arg){ int d= 100 / s->avctx->error_rate; if(r % d == 0){ current_packet_size=0; -#ifndef ALT_BITSTREAM_WRITER s->pb.buf_ptr= s->ptr_lastgob; -#endif assert(put_bits_ptr(&s->pb) == s->ptr_lastgob); } } @@ -2272,8 +2272,8 @@ static int encode_thread(AVCodecContext *c, void *arg){ s->mv_type = MV_TYPE_8X8; s->mb_intra= 0; for(i=0; i<4; i++){ - s->mv[0][i][0] = s->current_picture.motion_val[0][s->block_index[i]][0]; - s->mv[0][i][1] = s->current_picture.motion_val[0][s->block_index[i]][1]; + s->mv[0][i][0] = s->current_picture.f.motion_val[0][s->block_index[i]][0]; + s->mv[0][i][1] = s->current_picture.f.motion_val[0][s->block_index[i]][1]; } encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER4V, pb, pb2, tex_pb, &dmin, &next_block, 0, 0); @@ -2459,7 +2459,7 @@ static int encode_thread(AVCodecContext *c, void *arg){ } } - s->current_picture.qscale_table[xy]= best_s.qscale; + s->current_picture.f.qscale_table[xy] = best_s.qscale; copy_context_after_encode(s, &best_s, -1); @@ -2526,8 +2526,8 @@ static int encode_thread(AVCodecContext *c, void *arg){ s->mv_type = MV_TYPE_8X8; s->mb_intra= 0; for(i=0; i<4; i++){ - s->mv[0][i][0] = s->current_picture.motion_val[0][s->block_index[i]][0]; - s->mv[0][i][1] = s->current_picture.motion_val[0][s->block_index[i]][1]; + s->mv[0][i][0] = s->current_picture.f.motion_val[0][s->block_index[i]][0]; + s->mv[0][i][1] = s->current_picture.f.motion_val[0][s->block_index[i]][1]; } break; case CANDIDATE_MB_TYPE_DIRECT: @@ -2628,14 +2628,14 @@ static int encode_thread(AVCodecContext *c, void *arg){ if(s->mb_x*16 + 16 > s->width ) w= s->width - s->mb_x*16; if(s->mb_y*16 + 16 > s->height) h= s->height- s->mb_y*16; - s->current_picture.error[0] += sse( - s, s->new_picture.data[0] + s->mb_x*16 + s->mb_y*s->linesize*16, + s->current_picture.f.error[0] += sse( + s, s->new_picture.f.data[0] + s->mb_x*16 + s->mb_y*s->linesize*16, s->dest[0], w, h, s->linesize); - s->current_picture.error[1] += sse( - s, s->new_picture.data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*chr_h, + s->current_picture.f.error[1] += sse( + s, s->new_picture.f.data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*chr_h, s->dest[1], w>>1, h>>s->chroma_y_shift, s->uvlinesize); - s->current_picture.error[2] += sse( - s, s->new_picture.data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*chr_h, + s->current_picture.f.error[2] += sse( + s, s->new_picture.f.data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*chr_h, s->dest[2], w>>1, h>>s->chroma_y_shift, s->uvlinesize); } if(s->loop_filter){ @@ -2686,9 +2686,9 @@ static void merge_context_after_encode(MpegEncContext *dst, MpegEncContext *src) MERGE(misc_bits); MERGE(error_count); MERGE(padding_bug_score); - MERGE(current_picture.error[0]); - MERGE(current_picture.error[1]); - MERGE(current_picture.error[2]); + MERGE(current_picture.f.error[0]); + MERGE(current_picture.f.error[1]); + MERGE(current_picture.f.error[2]); if(dst->avctx->noise_reduction){ for(i=0; i<64; i++){ @@ -2705,13 +2705,13 @@ static void merge_context_after_encode(MpegEncContext *dst, MpegEncContext *src) static int estimate_qp(MpegEncContext *s, int dry_run){ if (s->next_lambda){ - s->current_picture_ptr->quality= - s->current_picture.quality = s->next_lambda; + s->current_picture_ptr->f.quality = + s->current_picture.f.quality = s->next_lambda; if(!dry_run) s->next_lambda= 0; } else if (!s->fixed_qscale) { - s->current_picture_ptr->quality= - s->current_picture.quality = ff_rate_estimate_qscale(s, dry_run); - if (s->current_picture.quality < 0) + s->current_picture_ptr->f.quality = + s->current_picture.f.quality = ff_rate_estimate_qscale(s, dry_run); + if (s->current_picture.f.quality < 0) return -1; } @@ -2734,7 +2734,7 @@ static int estimate_qp(MpegEncContext *s, int dry_run){ s->lambda= s->lambda_table[0]; //FIXME broken }else - s->lambda= s->current_picture.quality; + s->lambda = s->current_picture.f.quality; //printf("%d %d\n", s->avctx->global_quality, s->current_picture.quality); update_qscale(s); return 0; @@ -2743,7 +2743,7 @@ static int estimate_qp(MpegEncContext *s, int dry_run){ /* must be called before writing the header */ static void set_frame_distances(MpegEncContext * s){ assert(s->current_picture_ptr->pts != AV_NOPTS_VALUE); - s->time= s->current_picture_ptr->pts*s->avctx->time_base.num; + s->time = s->current_picture_ptr->f.pts * s->avctx->time_base.num; if(s->pict_type==AV_PICTURE_TYPE_B){ s->pb_time= s->pp_time - (s->last_non_b_time - s->time); @@ -2917,12 +2917,12 @@ static int encode_picture(MpegEncContext *s, int picture_number) } //FIXME var duplication - s->current_picture_ptr->key_frame= - s->current_picture.key_frame= s->pict_type == AV_PICTURE_TYPE_I; //FIXME pic_ptr - s->current_picture_ptr->pict_type= - s->current_picture.pict_type= s->pict_type; + s->current_picture_ptr->f.key_frame = + s->current_picture.f.key_frame = s->pict_type == AV_PICTURE_TYPE_I; //FIXME pic_ptr + s->current_picture_ptr->f.pict_type = + s->current_picture.f.pict_type = s->pict_type; - if(s->current_picture.key_frame) + if (s->current_picture.f.key_frame) s->picture_in_gop_number=0; s->last_bits= put_bits_count(&s->pb); @@ -3770,62 +3770,62 @@ int dct_quantize_c(MpegEncContext *s, } AVCodec ff_h263_encoder = { - "h263", - AVMEDIA_TYPE_VIDEO, - CODEC_ID_H263, - sizeof(MpegEncContext), - MPV_encode_init, - MPV_encode_picture, - MPV_encode_end, + .name = "h263", + .type = AVMEDIA_TYPE_VIDEO, + .id = CODEC_ID_H263, + .priv_data_size = sizeof(MpegEncContext), + .init = MPV_encode_init, + .encode = MPV_encode_picture, + .close = MPV_encode_end, .pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_NONE}, .long_name= NULL_IF_CONFIG_SMALL("H.263 / H.263-1996"), }; AVCodec ff_h263p_encoder = { - "h263p", - AVMEDIA_TYPE_VIDEO, - CODEC_ID_H263P, - sizeof(MpegEncContext), - MPV_encode_init, - MPV_encode_picture, - MPV_encode_end, + .name = "h263p", + .type = AVMEDIA_TYPE_VIDEO, + .id = CODEC_ID_H263P, + .priv_data_size = sizeof(MpegEncContext), + .init = MPV_encode_init, + .encode = MPV_encode_picture, + .close = MPV_encode_end, .capabilities = CODEC_CAP_SLICE_THREADS, .pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_NONE}, .long_name= NULL_IF_CONFIG_SMALL("H.263+ / H.263-1998 / H.263 version 2"), }; AVCodec ff_msmpeg4v2_encoder = { - "msmpeg4v2", - AVMEDIA_TYPE_VIDEO, - CODEC_ID_MSMPEG4V2, - sizeof(MpegEncContext), - MPV_encode_init, - MPV_encode_picture, - MPV_encode_end, + .name = "msmpeg4v2", + .type = AVMEDIA_TYPE_VIDEO, + .id = CODEC_ID_MSMPEG4V2, + .priv_data_size = sizeof(MpegEncContext), + .init = MPV_encode_init, + .encode = MPV_encode_picture, + .close = MPV_encode_end, .pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_NONE}, .long_name= NULL_IF_CONFIG_SMALL("MPEG-4 part 2 Microsoft variant version 2"), }; AVCodec ff_msmpeg4v3_encoder = { - "msmpeg4", - AVMEDIA_TYPE_VIDEO, - CODEC_ID_MSMPEG4V3, - sizeof(MpegEncContext), - MPV_encode_init, - MPV_encode_picture, - MPV_encode_end, + .name = "msmpeg4", + .type = AVMEDIA_TYPE_VIDEO, + .id = CODEC_ID_MSMPEG4V3, + .priv_data_size = sizeof(MpegEncContext), + .init = MPV_encode_init, + .encode = MPV_encode_picture, + .close = MPV_encode_end, .pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_NONE}, .long_name= NULL_IF_CONFIG_SMALL("MPEG-4 part 2 Microsoft variant version 3"), }; AVCodec ff_wmv1_encoder = { - "wmv1", - AVMEDIA_TYPE_VIDEO, - CODEC_ID_WMV1, - sizeof(MpegEncContext), - MPV_encode_init, - MPV_encode_picture, - MPV_encode_end, + .name = "wmv1", + .type = AVMEDIA_TYPE_VIDEO, + .id = CODEC_ID_WMV1, + .priv_data_size = sizeof(MpegEncContext), + .init = MPV_encode_init, + .encode = MPV_encode_picture, + .close = MPV_encode_end, .pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_NONE}, .long_name= NULL_IF_CONFIG_SMALL("Windows Media Video 7"), }; diff --git a/libavcodec/mpegvideo_xvmc.c b/libavcodec/mpegvideo_xvmc.c index 76794076f9..6247e6240c 100644 --- a/libavcodec/mpegvideo_xvmc.c +++ b/libavcodec/mpegvideo_xvmc.c @@ -41,7 +41,7 @@ */ void ff_xvmc_init_block(MpegEncContext *s) { - struct xvmc_pix_fmt *render = (struct xvmc_pix_fmt*)s->current_picture.data[2]; + struct xvmc_pix_fmt *render = (struct xvmc_pix_fmt*)s->current_picture.f.data[2]; assert(render && render->xvmc_id == AV_XVMC_ID); s->block = (DCTELEM (*)[64])(render->data_blocks + render->next_free_data_block_num * 64); @@ -73,7 +73,7 @@ void ff_xvmc_pack_pblocks(MpegEncContext *s, int cbp) */ int ff_xvmc_field_start(MpegEncContext *s, AVCodecContext *avctx) { - struct xvmc_pix_fmt *last, *next, *render = (struct xvmc_pix_fmt*)s->current_picture.data[2]; + struct xvmc_pix_fmt *last, *next, *render = (struct xvmc_pix_fmt*)s->current_picture.f.data[2]; const int mb_block_count = 4 + (1 << s->chroma_format); assert(avctx); @@ -113,7 +113,7 @@ int ff_xvmc_field_start(MpegEncContext *s, AVCodecContext *avctx) case AV_PICTURE_TYPE_I: return 0; // no prediction from other frames case AV_PICTURE_TYPE_B: - next = (struct xvmc_pix_fmt*)s->next_picture.data[2]; + next = (struct xvmc_pix_fmt*)s->next_picture.f.data[2]; if (!next) return -1; if (next->xvmc_id != AV_XVMC_ID) @@ -121,7 +121,7 @@ int ff_xvmc_field_start(MpegEncContext *s, AVCodecContext *avctx) render->p_future_surface = next->p_surface; // no return here, going to set forward prediction case AV_PICTURE_TYPE_P: - last = (struct xvmc_pix_fmt*)s->last_picture.data[2]; + last = (struct xvmc_pix_fmt*)s->last_picture.f.data[2]; if (!last) last = render; // predict second field from the first if (last->xvmc_id != AV_XVMC_ID) @@ -141,7 +141,7 @@ return -1; */ void ff_xvmc_field_end(MpegEncContext *s) { - struct xvmc_pix_fmt *render = (struct xvmc_pix_fmt*)s->current_picture.data[2]; + struct xvmc_pix_fmt *render = (struct xvmc_pix_fmt*)s->current_picture.f.data[2]; assert(render); if (render->filled_mv_blocks_num > 0) @@ -179,10 +179,10 @@ void ff_xvmc_decode_mb(MpegEncContext *s) // Do I need to export quant when I could not perform postprocessing? // Anyway, it doesn't hurt. - s->current_picture.qscale_table[mb_xy] = s->qscale; + s->current_picture.f.qscale_table[mb_xy] = s->qscale; // start of XVMC-specific code - render = (struct xvmc_pix_fmt*)s->current_picture.data[2]; + render = (struct xvmc_pix_fmt*)s->current_picture.f.data[2]; assert(render); assert(render->xvmc_id == AV_XVMC_ID); assert(render->mv_blocks); diff --git a/libavcodec/msmpeg4.c b/libavcodec/msmpeg4.c index 06098b04a1..5ed03c4ddf 100644 --- a/libavcodec/msmpeg4.c +++ b/libavcodec/msmpeg4.c @@ -780,10 +780,10 @@ static inline int msmpeg4_pred_dc(MpegEncContext * s, int n, }else{ if(n<4){ wrap= s->linesize; - dest= s->current_picture.data[0] + (((n>>1) + 2*s->mb_y) * 8* wrap ) + ((n&1) + 2*s->mb_x) * 8; + dest= s->current_picture.f.data[0] + (((n >> 1) + 2*s->mb_y) * 8* wrap ) + ((n & 1) + 2*s->mb_x) * 8; }else{ wrap= s->uvlinesize; - dest= s->current_picture.data[n-3] + (s->mb_y * 8 * wrap) + s->mb_x * 8; + dest= s->current_picture.f.data[n - 3] + (s->mb_y * 8 * wrap) + s->mb_x * 8; } if(s->mb_x==0) a= (1024 + (scale>>1))/scale; else a= get_dc(dest-8, wrap, scale*8); @@ -1172,7 +1172,7 @@ static int msmpeg4v34_decode_mb(MpegEncContext *s, DCTELEM block[6][64]) { int cbp, code, i; uint8_t *coded_val; - uint32_t * const mb_type_ptr= &s->current_picture.mb_type[ s->mb_x + s->mb_y*s->mb_stride ]; + uint32_t * const mb_type_ptr = &s->current_picture.f.mb_type[s->mb_x + s->mb_y*s->mb_stride]; if (s->pict_type == AV_PICTURE_TYPE_P) { if (s->use_skip_mb_code) { @@ -1884,60 +1884,56 @@ int ff_msmpeg4_decode_motion(MpegEncContext * s, } AVCodec ff_msmpeg4v1_decoder = { - "msmpeg4v1", - AVMEDIA_TYPE_VIDEO, - CODEC_ID_MSMPEG4V1, - sizeof(MpegEncContext), - ff_msmpeg4_decode_init, - NULL, - ff_h263_decode_end, - ff_h263_decode_frame, - CODEC_CAP_DRAW_HORIZ_BAND | CODEC_CAP_DR1 | CODEC_CAP_EXPERIMENTAL, + .name = "msmpeg4v1", + .type = AVMEDIA_TYPE_VIDEO, + .id = CODEC_ID_MSMPEG4V1, + .priv_data_size = sizeof(MpegEncContext), + .init = ff_msmpeg4_decode_init, + .close = ff_h263_decode_end, + .decode = ff_h263_decode_frame, + .capabilities = CODEC_CAP_DRAW_HORIZ_BAND | CODEC_CAP_DR1, .max_lowres= 3, .long_name= NULL_IF_CONFIG_SMALL("MPEG-4 part 2 Microsoft variant version 1"), .pix_fmts= ff_pixfmt_list_420, }; AVCodec ff_msmpeg4v2_decoder = { - "msmpeg4v2", - AVMEDIA_TYPE_VIDEO, - CODEC_ID_MSMPEG4V2, - sizeof(MpegEncContext), - ff_msmpeg4_decode_init, - NULL, - ff_h263_decode_end, - ff_h263_decode_frame, - CODEC_CAP_DRAW_HORIZ_BAND | CODEC_CAP_DR1, + .name = "msmpeg4v2", + .type = AVMEDIA_TYPE_VIDEO, + .id = CODEC_ID_MSMPEG4V2, + .priv_data_size = sizeof(MpegEncContext), + .init = ff_msmpeg4_decode_init, + .close = ff_h263_decode_end, + .decode = ff_h263_decode_frame, + .capabilities = CODEC_CAP_DRAW_HORIZ_BAND | CODEC_CAP_DR1, .max_lowres= 3, .long_name= NULL_IF_CONFIG_SMALL("MPEG-4 part 2 Microsoft variant version 2"), .pix_fmts= ff_pixfmt_list_420, }; AVCodec ff_msmpeg4v3_decoder = { - "msmpeg4", - AVMEDIA_TYPE_VIDEO, - CODEC_ID_MSMPEG4V3, - sizeof(MpegEncContext), - ff_msmpeg4_decode_init, - NULL, - ff_h263_decode_end, - ff_h263_decode_frame, - CODEC_CAP_DRAW_HORIZ_BAND | CODEC_CAP_DR1, + .name = "msmpeg4", + .type = AVMEDIA_TYPE_VIDEO, + .id = CODEC_ID_MSMPEG4V3, + .priv_data_size = sizeof(MpegEncContext), + .init = ff_msmpeg4_decode_init, + .close = ff_h263_decode_end, + .decode = ff_h263_decode_frame, + .capabilities = CODEC_CAP_DRAW_HORIZ_BAND | CODEC_CAP_DR1, .max_lowres= 3, .long_name= NULL_IF_CONFIG_SMALL("MPEG-4 part 2 Microsoft variant version 3"), .pix_fmts= ff_pixfmt_list_420, }; AVCodec ff_wmv1_decoder = { - "wmv1", - AVMEDIA_TYPE_VIDEO, - CODEC_ID_WMV1, - sizeof(MpegEncContext), - ff_msmpeg4_decode_init, - NULL, - ff_h263_decode_end, - ff_h263_decode_frame, - CODEC_CAP_DRAW_HORIZ_BAND | CODEC_CAP_DR1, + .name = "wmv1", + .type = AVMEDIA_TYPE_VIDEO, + .id = CODEC_ID_WMV1, + .priv_data_size = sizeof(MpegEncContext), + .init = ff_msmpeg4_decode_init, + .close = ff_h263_decode_end, + .decode = ff_h263_decode_frame, + .capabilities = CODEC_CAP_DRAW_HORIZ_BAND | CODEC_CAP_DR1, .max_lowres= 3, .long_name= NULL_IF_CONFIG_SMALL("Windows Media Video 7"), .pix_fmts= ff_pixfmt_list_420, diff --git a/libavcodec/msmpeg4.h b/libavcodec/msmpeg4.h index d8e3727a40..c9d42b48e8 100644 --- a/libavcodec/msmpeg4.h +++ b/libavcodec/msmpeg4.h @@ -19,10 +19,6 @@ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ -/** - * @file - */ - #ifndef AVCODEC_MSMPEG4_H #define AVCODEC_MSMPEG4_H diff --git a/libavcodec/msrle.c b/libavcodec/msrle.c index cd81200c37..e864aab6c6 100644 --- a/libavcodec/msrle.c +++ b/libavcodec/msrle.c @@ -148,14 +148,13 @@ static av_cold int msrle_decode_end(AVCodecContext *avctx) } AVCodec ff_msrle_decoder = { - "msrle", - AVMEDIA_TYPE_VIDEO, - CODEC_ID_MSRLE, - sizeof(MsrleContext), - msrle_decode_init, - NULL, - msrle_decode_end, - msrle_decode_frame, - CODEC_CAP_DR1, + .name = "msrle", + .type = AVMEDIA_TYPE_VIDEO, + .id = CODEC_ID_MSRLE, + .priv_data_size = sizeof(MsrleContext), + .init = msrle_decode_init, + .close = msrle_decode_end, + .decode = msrle_decode_frame, + .capabilities = CODEC_CAP_DR1, .long_name= NULL_IF_CONFIG_SMALL("Microsoft RLE"), }; diff --git a/libavcodec/msvideo1.c b/libavcodec/msvideo1.c index bd55cad3b9..8e490570fe 100644 --- a/libavcodec/msvideo1.c +++ b/libavcodec/msvideo1.c @@ -333,14 +333,13 @@ static av_cold int msvideo1_decode_end(AVCodecContext *avctx) } AVCodec ff_msvideo1_decoder = { - "msvideo1", - AVMEDIA_TYPE_VIDEO, - CODEC_ID_MSVIDEO1, - sizeof(Msvideo1Context), - msvideo1_decode_init, - NULL, - msvideo1_decode_end, - msvideo1_decode_frame, - CODEC_CAP_DR1, + .name = "msvideo1", + .type = AVMEDIA_TYPE_VIDEO, + .id = CODEC_ID_MSVIDEO1, + .priv_data_size = sizeof(Msvideo1Context), + .init = msvideo1_decode_init, + .close = msvideo1_decode_end, + .decode = msvideo1_decode_frame, + .capabilities = CODEC_CAP_DR1, .long_name= NULL_IF_CONFIG_SMALL("Microsoft Video 1"), }; diff --git a/libavcodec/nellymoserdec.c b/libavcodec/nellymoserdec.c index 59c1b3bdd8..a153dc0603 100644 --- a/libavcodec/nellymoserdec.c +++ b/libavcodec/nellymoserdec.c @@ -194,14 +194,13 @@ static av_cold int decode_end(AVCodecContext * avctx) { } AVCodec ff_nellymoser_decoder = { - "nellymoser", - AVMEDIA_TYPE_AUDIO, - CODEC_ID_NELLYMOSER, - sizeof(NellyMoserDecodeContext), - decode_init, - NULL, - decode_end, - decode_tag, + .name = "nellymoser", + .type = AVMEDIA_TYPE_AUDIO, + .id = CODEC_ID_NELLYMOSER, + .priv_data_size = sizeof(NellyMoserDecodeContext), + .init = decode_init, + .close = decode_end, + .decode = decode_tag, .long_name = NULL_IF_CONFIG_SMALL("Nellymoser Asao"), }; diff --git a/libavcodec/nuv.c b/libavcodec/nuv.c index 6eb6de3101..2a60fe47fc 100644 --- a/libavcodec/nuv.c +++ b/libavcodec/nuv.c @@ -63,11 +63,11 @@ static const uint8_t fallback_cquant[] = { }; /** - * \brief copy frame data from buffer to AVFrame, handling stride. - * \param f destination AVFrame - * \param src source buffer, does not use any line-stride - * \param width width of the video frame - * \param height height of the video frame + * @brief copy frame data from buffer to AVFrame, handling stride. + * @param f destination AVFrame + * @param src source buffer, does not use any line-stride + * @param width width of the video frame + * @param height height of the video frame */ static void copy_frame(AVFrame *f, const uint8_t *src, int width, int height) { @@ -77,7 +77,7 @@ static void copy_frame(AVFrame *f, const uint8_t *src, } /** - * \brief extract quantization tables from codec data into our context + * @brief extract quantization tables from codec data into our context */ static int get_quant(AVCodecContext *avctx, NuvContext *c, const uint8_t *buf, int size) { @@ -94,7 +94,7 @@ static int get_quant(AVCodecContext *avctx, NuvContext *c, } /** - * \brief set quantization tables from a quality value + * @brief set quantization tables from a quality value */ static void get_quant_quality(NuvContext *c, int quality) { int i; @@ -273,15 +273,14 @@ static av_cold int decode_end(AVCodecContext *avctx) { } AVCodec ff_nuv_decoder = { - "nuv", - AVMEDIA_TYPE_VIDEO, - CODEC_ID_NUV, - sizeof(NuvContext), - decode_init, - NULL, - decode_end, - decode_frame, - CODEC_CAP_DR1, + .name = "nuv", + .type = AVMEDIA_TYPE_VIDEO, + .id = CODEC_ID_NUV, + .priv_data_size = sizeof(NuvContext), + .init = decode_init, + .close = decode_end, + .decode = decode_frame, + .capabilities = CODEC_CAP_DR1, .long_name = NULL_IF_CONFIG_SMALL("NuppelVideo/RTJPEG"), }; diff --git a/libavcodec/options.c b/libavcodec/options.c index f35cb3cc0f..b4d84ddb06 100644 --- a/libavcodec/options.c +++ b/libavcodec/options.c @@ -25,6 +25,8 @@ */ #include "avcodec.h" +#include "internal.h" +#include "libavutil/avassert.h" #include "libavutil/opt.h" #include <float.h> /* FLT_MIN, FLT_MAX */ @@ -181,7 +183,10 @@ static const AVOption options[]={ {"careful", NULL, 0, FF_OPT_TYPE_CONST, {.dbl = FF_ER_CAREFUL }, INT_MIN, INT_MAX, V|D, "er"}, {"compliant", NULL, 0, FF_OPT_TYPE_CONST, {.dbl = FF_ER_COMPLIANT }, INT_MIN, INT_MAX, V|D, "er"}, {"aggressive", NULL, 0, FF_OPT_TYPE_CONST, {.dbl = FF_ER_AGGRESSIVE }, INT_MIN, INT_MAX, V|D, "er"}, +#if FF_API_VERY_AGGRESSIVE {"very_aggressive", NULL, 0, FF_OPT_TYPE_CONST, {.dbl = FF_ER_VERY_AGGRESSIVE }, INT_MIN, INT_MAX, V|D, "er"}, +#endif /* FF_API_VERY_AGGRESSIVE */ +{"explode", "abort decoding on error recognition", 0, FF_OPT_TYPE_CONST, {.dbl = FF_ER_EXPLODE }, INT_MIN, INT_MAX, V|D, "er"}, {"has_b_frames", NULL, OFFSET(has_b_frames), FF_OPT_TYPE_INT, {.dbl = DEFAULT }, INT_MIN, INT_MAX}, {"block_align", NULL, OFFSET(block_align), FF_OPT_TYPE_INT, {.dbl = DEFAULT }, INT_MIN, INT_MAX}, {"parse_only", NULL, OFFSET(parse_only), FF_OPT_TYPE_INT, {.dbl = DEFAULT }, INT_MIN, INT_MAX}, @@ -413,7 +418,9 @@ static const AVOption options[]={ #if FF_API_REQUEST_CHANNELS {"request_channels", "set desired number of audio channels", OFFSET(request_channels), FF_OPT_TYPE_INT, {.dbl = DEFAULT }, 0, INT_MAX, A|D}, #endif +#if FF_API_DRC_SCALE {"drc_scale", "percentage of dynamic range compression to apply", OFFSET(drc_scale), FF_OPT_TYPE_FLOAT, {.dbl = 1.0 }, 0.0, 1.0, A|D}, +#endif {"reservoir", "use bit reservoir", 0, FF_OPT_TYPE_CONST, {.dbl = CODEC_FLAG2_BIT_RESERVOIR }, INT_MIN, INT_MAX, A|E, "flags2"}, {"mbtree", "use macroblock tree ratecontrol (x264 only)", 0, FF_OPT_TYPE_CONST, {.dbl = CODEC_FLAG2_MBTREE }, INT_MIN, INT_MAX, V|E, "flags2"}, {"bits_per_raw_sample", NULL, OFFSET(bits_per_raw_sample), FF_OPT_TYPE_INT, {.dbl = DEFAULT }, INT_MIN, INT_MAX}, @@ -523,6 +530,15 @@ int avcodec_get_context_defaults3(AVCodecContext *s, AVCodec *codec){ av_opt_set_defaults(s->priv_data); } } + if (codec && codec->defaults) { + int ret; + AVCodecDefault *d = codec->defaults; + while (d->key) { + ret = av_set_string3(s, d->key, d->value, 0, NULL); + av_assert0(ret >= 0); + d++; + } + } return 0; } @@ -539,6 +555,7 @@ AVCodecContext *avcodec_alloc_context3(AVCodec *codec){ return avctx; } +#if FF_API_ALLOC_CONTEXT AVCodecContext *avcodec_alloc_context2(enum AVMediaType codec_type){ AVCodecContext *avctx= av_malloc(sizeof(AVCodecContext)); @@ -548,14 +565,17 @@ AVCodecContext *avcodec_alloc_context2(enum AVMediaType codec_type){ return avctx; } +#endif void avcodec_get_context_defaults(AVCodecContext *s){ avcodec_get_context_defaults2(s, AVMEDIA_TYPE_UNKNOWN); } +#if FF_API_ALLOC_CONTEXT AVCodecContext *avcodec_alloc_context(void){ return avcodec_alloc_context2(AVMEDIA_TYPE_UNKNOWN); } +#endif int avcodec_copy_context(AVCodecContext *dest, const AVCodecContext *src) { diff --git a/libavcodec/pamenc.c b/libavcodec/pamenc.c index b6d58dec49..d9d849e8ad 100644 --- a/libavcodec/pamenc.c +++ b/libavcodec/pamenc.c @@ -109,12 +109,12 @@ static int pam_encode_frame(AVCodecContext *avctx, unsigned char *outbuf, AVCodec ff_pam_encoder = { - "pam", - AVMEDIA_TYPE_VIDEO, - CODEC_ID_PAM, - sizeof(PNMContext), - ff_pnm_init, - pam_encode_frame, + .name = "pam", + .type = AVMEDIA_TYPE_VIDEO, + .id = CODEC_ID_PAM, + .priv_data_size = sizeof(PNMContext), + .init = ff_pnm_init, + .encode = pam_encode_frame, .pix_fmts = (const enum PixelFormat[]){PIX_FMT_RGB24, PIX_FMT_RGB32, PIX_FMT_GRAY8, PIX_FMT_MONOWHITE, PIX_FMT_NONE}, .long_name = NULL_IF_CONFIG_SMALL("PAM (Portable AnyMap) image"), }; diff --git a/libavcodec/pcm-mpeg.c b/libavcodec/pcm-mpeg.c index 030507502d..1e8a39ef76 100644 --- a/libavcodec/pcm-mpeg.c +++ b/libavcodec/pcm-mpeg.c @@ -297,14 +297,10 @@ static int pcm_bluray_decode_frame(AVCodecContext *avctx, } AVCodec ff_pcm_bluray_decoder = { - "pcm_bluray", - AVMEDIA_TYPE_AUDIO, - CODEC_ID_PCM_BLURAY, - 0, - NULL, - NULL, - NULL, - pcm_bluray_decode_frame, + .name = "pcm_bluray", + .type = AVMEDIA_TYPE_AUDIO, + .id = CODEC_ID_PCM_BLURAY, + .decode = pcm_bluray_decode_frame, .sample_fmts = (const enum AVSampleFormat[]){AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_NONE}, .long_name = NULL_IF_CONFIG_SMALL("PCM signed 16|20|24-bit big-endian for Blu-ray media"), diff --git a/libavcodec/pcm.c b/libavcodec/pcm.c index 111ce6193f..852e34981f 100644 --- a/libavcodec/pcm.c +++ b/libavcodec/pcm.c @@ -440,7 +440,6 @@ static int pcm_decode_frame(AVCodecContext *avctx, default: av_log(avctx, AV_LOG_ERROR, "PCM DVD unsupported sample depth\n"); return -1; - break; } samples = (short *) dst_int32_t; break; diff --git a/libavcodec/pcx.c b/libavcodec/pcx.c index 2c9f8c07d5..b2d433a13d 100644 --- a/libavcodec/pcx.c +++ b/libavcodec/pcx.c @@ -248,15 +248,13 @@ static av_cold int pcx_end(AVCodecContext *avctx) { } AVCodec ff_pcx_decoder = { - "pcx", - AVMEDIA_TYPE_VIDEO, - CODEC_ID_PCX, - sizeof(PCXContext), - pcx_init, - NULL, - pcx_end, - pcx_decode_frame, - CODEC_CAP_DR1, - NULL, + .name = "pcx", + .type = AVMEDIA_TYPE_VIDEO, + .id = CODEC_ID_PCX, + .priv_data_size = sizeof(PCXContext), + .init = pcx_init, + .close = pcx_end, + .decode = pcx_decode_frame, + .capabilities = CODEC_CAP_DR1, .long_name = NULL_IF_CONFIG_SMALL("PC Paintbrush PCX image"), }; diff --git a/libavcodec/pcxenc.c b/libavcodec/pcxenc.c index bf7cebbbe2..816223e736 100644 --- a/libavcodec/pcxenc.c +++ b/libavcodec/pcxenc.c @@ -20,10 +20,10 @@ */ /** - * PCX image encoder * @file + * PCX image encoder * @author Daniel Verkamp - * @sa http://www.qzx.com/pc-gpe/pcx.txt + * @see http://www.qzx.com/pc-gpe/pcx.txt */ #include "avcodec.h" @@ -190,13 +190,12 @@ static int pcx_encode_frame(AVCodecContext *avctx, } AVCodec ff_pcx_encoder = { - "pcx", - AVMEDIA_TYPE_VIDEO, - CODEC_ID_PCX, - sizeof(PCXContext), - pcx_encode_init, - pcx_encode_frame, - NULL, + .name = "pcx", + .type = AVMEDIA_TYPE_VIDEO, + .id = CODEC_ID_PCX, + .priv_data_size = sizeof(PCXContext), + .init = pcx_encode_init, + .encode = pcx_encode_frame, .pix_fmts = (const enum PixelFormat[]){ PIX_FMT_RGB24, PIX_FMT_RGB8, PIX_FMT_BGR8, PIX_FMT_RGB4_BYTE, PIX_FMT_BGR4_BYTE, PIX_FMT_GRAY8, PIX_FMT_PAL8, diff --git a/libavcodec/pgssubdec.c b/libavcodec/pgssubdec.c index 315dbbd779..5a070e435d 100644 --- a/libavcodec/pgssubdec.c +++ b/libavcodec/pgssubdec.c @@ -198,7 +198,7 @@ static int parse_picture_segment(AVCodecContext *avctx, /* Make sure the bitmap is not too large */ if (avctx->width < width || avctx->height < height) { - av_log(avctx, AV_LOG_ERROR, "Bitmap dimensions larger then video.\n"); + av_log(avctx, AV_LOG_ERROR, "Bitmap dimensions larger than video.\n"); return -1; } @@ -468,13 +468,12 @@ static int decode(AVCodecContext *avctx, void *data, int *data_size, } AVCodec ff_pgssub_decoder = { - "pgssub", - AVMEDIA_TYPE_SUBTITLE, - CODEC_ID_HDMV_PGS_SUBTITLE, - sizeof(PGSSubContext), - init_decoder, - NULL, - close_decoder, - decode, + .name = "pgssub", + .type = AVMEDIA_TYPE_SUBTITLE, + .id = CODEC_ID_HDMV_PGS_SUBTITLE, + .priv_data_size = sizeof(PGSSubContext), + .init = init_decoder, + .close = close_decoder, + .decode = decode, .long_name = NULL_IF_CONFIG_SMALL("HDMV Presentation Graphic Stream subtitles"), }; diff --git a/libavcodec/pictordec.c b/libavcodec/pictordec.c index b87c8643d0..2bbb63633d 100644 --- a/libavcodec/pictordec.c +++ b/libavcodec/pictordec.c @@ -246,14 +246,13 @@ static av_cold int decode_end(AVCodecContext *avctx) } AVCodec ff_pictor_decoder = { - "pictor", - AVMEDIA_TYPE_VIDEO, - CODEC_ID_PICTOR, - sizeof(PicContext), + .name = "pictor", + .type = AVMEDIA_TYPE_VIDEO, + .id = CODEC_ID_PICTOR, + .priv_data_size = sizeof(PicContext), decode_init, - NULL, - decode_end, - decode_frame, - CODEC_CAP_DR1, + .close = decode_end, + .decode = decode_frame, + .capabilities = CODEC_CAP_DR1, .long_name = NULL_IF_CONFIG_SMALL("Pictor/PC Paint"), }; diff --git a/libavcodec/pngdec.c b/libavcodec/pngdec.c index 05ba027802..70635671c2 100644 --- a/libavcodec/pngdec.c +++ b/libavcodec/pngdec.c @@ -647,15 +647,13 @@ static av_cold int png_dec_end(AVCodecContext *avctx) } AVCodec ff_png_decoder = { - "png", - AVMEDIA_TYPE_VIDEO, - CODEC_ID_PNG, - sizeof(PNGDecContext), - png_dec_init, - NULL, - png_dec_end, - decode_frame, - CODEC_CAP_DR1 /*| CODEC_CAP_DRAW_HORIZ_BAND*/, - NULL, + .name = "png", + .type = AVMEDIA_TYPE_VIDEO, + .id = CODEC_ID_PNG, + .priv_data_size = sizeof(PNGDecContext), + .init = png_dec_init, + .close = png_dec_end, + .decode = decode_frame, + .capabilities = CODEC_CAP_DR1 /*| CODEC_CAP_DRAW_HORIZ_BAND*/, .long_name = NULL_IF_CONFIG_SMALL("PNG image"), }; diff --git a/libavcodec/pngenc.c b/libavcodec/pngenc.c index c4ef2fd945..29f4e1a0df 100644 --- a/libavcodec/pngenc.c +++ b/libavcodec/pngenc.c @@ -437,13 +437,12 @@ static av_cold int png_enc_init(AVCodecContext *avctx){ } AVCodec ff_png_encoder = { - "png", - AVMEDIA_TYPE_VIDEO, - CODEC_ID_PNG, - sizeof(PNGEncContext), - png_enc_init, - encode_frame, - NULL, //encode_end, + .name = "png", + .type = AVMEDIA_TYPE_VIDEO, + .id = CODEC_ID_PNG, + .priv_data_size = sizeof(PNGEncContext), + .init = png_enc_init, + .encode = encode_frame, .pix_fmts= (const enum PixelFormat[]){PIX_FMT_RGB24, PIX_FMT_RGB32, PIX_FMT_PAL8, PIX_FMT_GRAY8, PIX_FMT_MONOBLACK, PIX_FMT_NONE}, .long_name= NULL_IF_CONFIG_SMALL("PNG image"), }; diff --git a/libavcodec/pnmdec.c b/libavcodec/pnmdec.c index ebecad4006..dc64f9b70e 100644 --- a/libavcodec/pnmdec.c +++ b/libavcodec/pnmdec.c @@ -196,15 +196,14 @@ static int pnm_decode_frame(AVCodecContext *avctx, void *data, #if CONFIG_PGM_DECODER AVCodec ff_pgm_decoder = { - "pgm", - AVMEDIA_TYPE_VIDEO, - CODEC_ID_PGM, - sizeof(PNMContext), - ff_pnm_init, - NULL, - ff_pnm_end, - pnm_decode_frame, - CODEC_CAP_DR1, + .name = "pgm", + .type = AVMEDIA_TYPE_VIDEO, + .id = CODEC_ID_PGM, + .priv_data_size = sizeof(PNMContext), + .init = ff_pnm_init, + .close = ff_pnm_end, + .decode = pnm_decode_frame, + .capabilities = CODEC_CAP_DR1, .pix_fmts = (const enum PixelFormat[]){PIX_FMT_GRAY8, PIX_FMT_GRAY16BE, PIX_FMT_NONE}, .long_name = NULL_IF_CONFIG_SMALL("PGM (Portable GrayMap) image"), }; @@ -212,15 +211,14 @@ AVCodec ff_pgm_decoder = { #if CONFIG_PGMYUV_DECODER AVCodec ff_pgmyuv_decoder = { - "pgmyuv", - AVMEDIA_TYPE_VIDEO, - CODEC_ID_PGMYUV, - sizeof(PNMContext), - ff_pnm_init, - NULL, - ff_pnm_end, - pnm_decode_frame, - CODEC_CAP_DR1, + .name = "pgmyuv", + .type = AVMEDIA_TYPE_VIDEO, + .id = CODEC_ID_PGMYUV, + .priv_data_size = sizeof(PNMContext), + .init = ff_pnm_init, + .close = ff_pnm_end, + .decode = pnm_decode_frame, + .capabilities = CODEC_CAP_DR1, .pix_fmts = (const enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_NONE}, .long_name = NULL_IF_CONFIG_SMALL("PGMYUV (Portable GrayMap YUV) image"), }; @@ -228,15 +226,14 @@ AVCodec ff_pgmyuv_decoder = { #if CONFIG_PPM_DECODER AVCodec ff_ppm_decoder = { - "ppm", - AVMEDIA_TYPE_VIDEO, - CODEC_ID_PPM, - sizeof(PNMContext), - ff_pnm_init, - NULL, - ff_pnm_end, - pnm_decode_frame, - CODEC_CAP_DR1, + .name = "ppm", + .type = AVMEDIA_TYPE_VIDEO, + .id = CODEC_ID_PPM, + .priv_data_size = sizeof(PNMContext), + .init = ff_pnm_init, + .close = ff_pnm_end, + .decode = pnm_decode_frame, + .capabilities = CODEC_CAP_DR1, .pix_fmts = (const enum PixelFormat[]){PIX_FMT_RGB24, PIX_FMT_RGB48BE, PIX_FMT_NONE}, .long_name = NULL_IF_CONFIG_SMALL("PPM (Portable PixelMap) image"), }; @@ -244,15 +241,14 @@ AVCodec ff_ppm_decoder = { #if CONFIG_PBM_DECODER AVCodec ff_pbm_decoder = { - "pbm", - AVMEDIA_TYPE_VIDEO, - CODEC_ID_PBM, - sizeof(PNMContext), - ff_pnm_init, - NULL, - ff_pnm_end, - pnm_decode_frame, - CODEC_CAP_DR1, + .name = "pbm", + .type = AVMEDIA_TYPE_VIDEO, + .id = CODEC_ID_PBM, + .priv_data_size = sizeof(PNMContext), + .init = ff_pnm_init, + .close = ff_pnm_end, + .decode = pnm_decode_frame, + .capabilities = CODEC_CAP_DR1, .pix_fmts = (const enum PixelFormat[]){PIX_FMT_MONOWHITE, PIX_FMT_NONE}, .long_name = NULL_IF_CONFIG_SMALL("PBM (Portable BitMap) image"), }; @@ -260,15 +256,14 @@ AVCodec ff_pbm_decoder = { #if CONFIG_PAM_DECODER AVCodec ff_pam_decoder = { - "pam", - AVMEDIA_TYPE_VIDEO, - CODEC_ID_PAM, - sizeof(PNMContext), - ff_pnm_init, - NULL, - ff_pnm_end, - pnm_decode_frame, - CODEC_CAP_DR1, + .name = "pam", + .type = AVMEDIA_TYPE_VIDEO, + .id = CODEC_ID_PAM, + .priv_data_size = sizeof(PNMContext), + .init = ff_pnm_init, + .close = ff_pnm_end, + .decode = pnm_decode_frame, + .capabilities = CODEC_CAP_DR1, .pix_fmts = (const enum PixelFormat[]){PIX_FMT_RGB24, PIX_FMT_RGB32, PIX_FMT_GRAY8, PIX_FMT_MONOWHITE, PIX_FMT_NONE}, .long_name = NULL_IF_CONFIG_SMALL("PAM (Portable AnyMap) image"), }; diff --git a/libavcodec/pnmenc.c b/libavcodec/pnmenc.c index 42c32dc94a..15c71f2514 100644 --- a/libavcodec/pnmenc.c +++ b/libavcodec/pnmenc.c @@ -114,12 +114,12 @@ static int pnm_encode_frame(AVCodecContext *avctx, unsigned char *outbuf, #if CONFIG_PGM_ENCODER AVCodec ff_pgm_encoder = { - "pgm", - AVMEDIA_TYPE_VIDEO, - CODEC_ID_PGM, - sizeof(PNMContext), - ff_pnm_init, - pnm_encode_frame, + .name = "pgm", + .type = AVMEDIA_TYPE_VIDEO, + .id = CODEC_ID_PGM, + .priv_data_size = sizeof(PNMContext), + .init = ff_pnm_init, + .encode = pnm_encode_frame, .pix_fmts = (const enum PixelFormat[]){PIX_FMT_GRAY8, PIX_FMT_GRAY16BE, PIX_FMT_NONE}, .long_name = NULL_IF_CONFIG_SMALL("PGM (Portable GrayMap) image"), }; @@ -127,12 +127,12 @@ AVCodec ff_pgm_encoder = { #if CONFIG_PGMYUV_ENCODER AVCodec ff_pgmyuv_encoder = { - "pgmyuv", - AVMEDIA_TYPE_VIDEO, - CODEC_ID_PGMYUV, - sizeof(PNMContext), - ff_pnm_init, - pnm_encode_frame, + .name = "pgmyuv", + .type = AVMEDIA_TYPE_VIDEO, + .id = CODEC_ID_PGMYUV, + .priv_data_size = sizeof(PNMContext), + .init = ff_pnm_init, + .encode = pnm_encode_frame, .pix_fmts = (const enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_NONE}, .long_name = NULL_IF_CONFIG_SMALL("PGMYUV (Portable GrayMap YUV) image"), }; @@ -140,12 +140,12 @@ AVCodec ff_pgmyuv_encoder = { #if CONFIG_PPM_ENCODER AVCodec ff_ppm_encoder = { - "ppm", - AVMEDIA_TYPE_VIDEO, - CODEC_ID_PPM, - sizeof(PNMContext), - ff_pnm_init, - pnm_encode_frame, + .name = "ppm", + .type = AVMEDIA_TYPE_VIDEO, + .id = CODEC_ID_PPM, + .priv_data_size = sizeof(PNMContext), + .init = ff_pnm_init, + .encode = pnm_encode_frame, .pix_fmts = (const enum PixelFormat[]){PIX_FMT_RGB24, PIX_FMT_RGB48BE, PIX_FMT_NONE}, .long_name = NULL_IF_CONFIG_SMALL("PPM (Portable PixelMap) image"), }; @@ -153,12 +153,12 @@ AVCodec ff_ppm_encoder = { #if CONFIG_PBM_ENCODER AVCodec ff_pbm_encoder = { - "pbm", - AVMEDIA_TYPE_VIDEO, - CODEC_ID_PBM, - sizeof(PNMContext), - ff_pnm_init, - pnm_encode_frame, + .name = "pbm", + .type = AVMEDIA_TYPE_VIDEO, + .id = CODEC_ID_PBM, + .priv_data_size = sizeof(PNMContext), + .init = ff_pnm_init, + .encode = pnm_encode_frame, .pix_fmts = (const enum PixelFormat[]){PIX_FMT_MONOWHITE, PIX_FMT_NONE}, .long_name = NULL_IF_CONFIG_SMALL("PBM (Portable BitMap) image"), }; diff --git a/libavcodec/ppc/dsputil_altivec.c b/libavcodec/ppc/dsputil_altivec.c index bd432beb87..7f36fa9aad 100644 --- a/libavcodec/ppc/dsputil_altivec.c +++ b/libavcodec/ppc/dsputil_altivec.c @@ -627,16 +627,6 @@ void put_pixels16_altivec(uint8_t *block, const uint8_t *pixels, int line_size, // it's faster than -funroll-loops, but using // -funroll-loops w/ this is bad - 74 cycles again. // all this is on a 7450, tuning for the 7450 -#if 0 - for (i = 0; i < h; i++) { - pixelsv1 = vec_ld(0, pixels); - pixelsv2 = vec_ld(16, pixels); - vec_st(vec_perm(pixelsv1, pixelsv2, perm), - 0, block); - pixels+=line_size; - block +=line_size; - } -#else for (i = 0; i < h; i += 4) { pixelsv1 = vec_ld( 0, pixels); pixelsv2 = vec_ld(15, pixels); @@ -657,7 +647,6 @@ void put_pixels16_altivec(uint8_t *block, const uint8_t *pixels, int line_size, pixels+=line_size_4; block +=line_size_4; } -#endif } /* next one assumes that ((line_size % 16) == 0) */ @@ -1384,7 +1373,7 @@ static void avg_pixels8_xy2_altivec(uint8_t *block, const uint8_t *pixels, int l void dsputil_init_altivec(DSPContext* c, AVCodecContext *avctx) { - const int high_bit_depth = avctx->codec_id == CODEC_ID_H264 && avctx->bits_per_raw_sample > 8; + const int high_bit_depth = avctx->bits_per_raw_sample > 8; c->pix_abs[0][1] = sad16_x2_altivec; c->pix_abs[0][2] = sad16_y2_altivec; @@ -1398,11 +1387,10 @@ void dsputil_init_altivec(DSPContext* c, AVCodecContext *avctx) c->sse[0]= sse16_altivec; c->pix_sum = pix_sum_altivec; c->diff_pixels = diff_pixels_altivec; - c->get_pixels = get_pixels_altivec; - if (!high_bit_depth) - c->clear_block = clear_block_altivec; c->add_bytes= add_bytes_altivec; if (!high_bit_depth) { + c->get_pixels = get_pixels_altivec; + c->clear_block = clear_block_altivec; c->put_pixels_tab[0][0] = put_pixels16_altivec; /* the two functions do the same thing, so use the same code */ c->put_no_rnd_pixels_tab[0][0] = put_pixels16_altivec; diff --git a/libavcodec/ppc/dsputil_ppc.c b/libavcodec/ppc/dsputil_ppc.c index 327fe2c72f..6e85241ee9 100644 --- a/libavcodec/ppc/dsputil_ppc.c +++ b/libavcodec/ppc/dsputil_ppc.c @@ -48,7 +48,6 @@ static void clear_blocks_dcbz32_ppc(DCTELEM *blocks) { register int misal = ((unsigned long)blocks & 0x00000010); register int i = 0; -#if 1 if (misal) { ((unsigned long*)blocks)[0] = 0L; ((unsigned long*)blocks)[1] = 0L; @@ -66,9 +65,6 @@ static void clear_blocks_dcbz32_ppc(DCTELEM *blocks) ((unsigned long*)blocks)[191] = 0L; i += 16; } -#else - memset(blocks, 0, sizeof(DCTELEM)*6*64); -#endif } /* same as above, when dcbzl clear a whole 128B cache line @@ -78,7 +74,6 @@ static void clear_blocks_dcbz128_ppc(DCTELEM *blocks) { register int misal = ((unsigned long)blocks & 0x0000007f); register int i = 0; -#if 1 if (misal) { // we could probably also optimize this case, // but there's not much point as the machines @@ -89,9 +84,6 @@ static void clear_blocks_dcbz128_ppc(DCTELEM *blocks) for ( ; i < sizeof(DCTELEM)*6*64 ; i += 128) { __asm__ volatile("dcbzl %0,%1" : : "b" (blocks), "r" (i) : "memory"); } -#else - memset(blocks, 0, sizeof(DCTELEM)*6*64); -#endif } #else static void clear_blocks_dcbz128_ppc(DCTELEM *blocks) @@ -153,7 +145,7 @@ static void prefetch_ppc(void *mem, int stride, int h) void dsputil_init_ppc(DSPContext* c, AVCodecContext *avctx) { - const int high_bit_depth = avctx->codec_id == CODEC_ID_H264 && avctx->bits_per_raw_sample > 8; + const int high_bit_depth = avctx->bits_per_raw_sample > 8; // Common optimizations whether AltiVec is available or not c->prefetch = prefetch_ppc; @@ -180,13 +172,14 @@ void dsputil_init_ppc(DSPContext* c, AVCodecContext *avctx) c->gmc1 = gmc1_altivec; #if CONFIG_ENCODERS - if (avctx->dct_algo == FF_DCT_AUTO || - avctx->dct_algo == FF_DCT_ALTIVEC) { + if (avctx->bits_per_raw_sample <= 8 && + (avctx->dct_algo == FF_DCT_AUTO || + avctx->dct_algo == FF_DCT_ALTIVEC)) { c->fdct = fdct_altivec; } #endif //CONFIG_ENCODERS - if (avctx->lowres==0) { + if (avctx->lowres == 0 && avctx->bits_per_raw_sample <= 8) { if ((avctx->idct_algo == FF_IDCT_AUTO) || (avctx->idct_algo == FF_IDCT_ALTIVEC)) { c->idct_put = idct_put_altivec; diff --git a/libavcodec/ppc/fdct_altivec.c b/libavcodec/ppc/fdct_altivec.c index 6309a47f32..1cc6f89f4d 100644 --- a/libavcodec/ppc/fdct_altivec.c +++ b/libavcodec/ppc/fdct_altivec.c @@ -265,7 +265,6 @@ void fdct_altivec(int16_t *block) * conversion to vector float. The following code section takes advantage * of this. */ -#if 1 /* fdct rows {{{ */ x0 = ((vector float)vec_add(vs16(b00), vs16(b70))); x7 = ((vector float)vec_sub(vs16(b00), vs16(b70))); @@ -389,29 +388,6 @@ void fdct_altivec(int16_t *block) b31 = vec_add(b31, x2); b11 = vec_add(b11, x3); /* }}} */ -#else - /* convert to float {{{ */ -#define CTF(n) \ - vs32(b##n##1) = vec_unpackl(vs16(b##n##0)); \ - vs32(b##n##0) = vec_unpackh(vs16(b##n##0)); \ - b##n##1 = vec_ctf(vs32(b##n##1), 0); \ - b##n##0 = vec_ctf(vs32(b##n##0), 0); \ - - CTF(0); - CTF(1); - CTF(2); - CTF(3); - CTF(4); - CTF(5); - CTF(6); - CTF(7); - -#undef CTF - /* }}} */ - - FDCTROW(b00, b10, b20, b30, b40, b50, b60, b70); - FDCTROW(b01, b11, b21, b31, b41, b51, b61, b71); -#endif /* 8x8 matrix transpose (vector float[8][2]) {{{ */ diff --git a/libavcodec/ppc/fft_altivec.c b/libavcodec/ppc/fft_altivec.c index 1ea2369f55..e171665b37 100644 --- a/libavcodec/ppc/fft_altivec.c +++ b/libavcodec/ppc/fft_altivec.c @@ -141,7 +141,9 @@ av_cold void ff_fft_init_altivec(FFTContext *s) { #if HAVE_GNU_AS s->fft_calc = ff_fft_calc_interleave_altivec; - s->imdct_calc = ff_imdct_calc_altivec; - s->imdct_half = ff_imdct_half_altivec; + if (s->mdct_bits >= 5) { + s->imdct_calc = ff_imdct_calc_altivec; + s->imdct_half = ff_imdct_half_altivec; + } #endif } diff --git a/libavcodec/ppc/h264_altivec.c b/libavcodec/ppc/h264_altivec.c index 9ba6bbaf2e..223971bd1a 100644 --- a/libavcodec/ppc/h264_altivec.c +++ b/libavcodec/ppc/h264_altivec.c @@ -967,7 +967,7 @@ H264_WEIGHT( 8, 8) H264_WEIGHT( 8, 4) void dsputil_h264_init_ppc(DSPContext* c, AVCodecContext *avctx) { - const int high_bit_depth = avctx->codec_id == CODEC_ID_H264 && avctx->bits_per_raw_sample > 8; + const int high_bit_depth = avctx->bits_per_raw_sample > 8; if (av_get_cpu_flags() & AV_CPU_FLAG_ALTIVEC) { if (!high_bit_depth) { diff --git a/libavcodec/ppc/mpegvideo_altivec.c b/libavcodec/ppc/mpegvideo_altivec.c index 64898a10d0..465971653d 100644 --- a/libavcodec/ppc/mpegvideo_altivec.c +++ b/libavcodec/ppc/mpegvideo_altivec.c @@ -515,21 +515,6 @@ static void dct_unquantize_h263_altivec(MpegEncContext *s, qaddv = vec_splat((vec_s16)vec_lde(0, &qadd8), 0); nqaddv = vec_sub(vczero, qaddv); -#if 0 // block *is* 16 bytes-aligned, it seems. - // first make sure block[j] is 16 bytes-aligned - for(j = 0; (j <= nCoeffs) && ((((unsigned long)block) + (j << 1)) & 0x0000000F) ; j++) { - level = block[j]; - if (level) { - if (level < 0) { - level = level * qmul - qadd; - } else { - level = level * qmul + qadd; - } - block[j] = level; - } - } -#endif - // vectorize all the 16 bytes-aligned blocks // of 8 elements for(; (j + 7) <= nCoeffs ; j+=8) { @@ -573,15 +558,6 @@ void MPV_common_init_altivec(MpegEncContext *s) { if (!(av_get_cpu_flags() & AV_CPU_FLAG_ALTIVEC)) return; - if (s->avctx->lowres==0) { - if ((s->avctx->idct_algo == FF_IDCT_AUTO) || - (s->avctx->idct_algo == FF_IDCT_ALTIVEC)) { - s->dsp.idct_put = idct_put_altivec; - s->dsp.idct_add = idct_add_altivec; - s->dsp.idct_permutation_type = FF_TRANSPOSE_IDCT_PERM; - } - } - // Test to make sure that the dct required alignments are met. if ((((long)(s->q_intra_matrix) & 0x0f) != 0) || (((long)(s->q_inter_matrix) & 0x0f) != 0)) { @@ -599,9 +575,6 @@ void MPV_common_init_altivec(MpegEncContext *s) if ((s->avctx->dct_algo == FF_DCT_AUTO) || (s->avctx->dct_algo == FF_DCT_ALTIVEC)) { -#if 0 /* seems to cause trouble under some circumstances */ - s->dct_quantize = dct_quantize_altivec; -#endif s->dct_unquantize_h263_intra = dct_unquantize_h263_altivec; s->dct_unquantize_h263_inter = dct_unquantize_h263_altivec; } diff --git a/libavcodec/ps2/dsputil_mmi.c b/libavcodec/ps2/dsputil_mmi.c index 349583f1ba..d04a425b49 100644 --- a/libavcodec/ps2/dsputil_mmi.c +++ b/libavcodec/ps2/dsputil_mmi.c @@ -142,7 +142,7 @@ static void put_pixels16_mmi(uint8_t *block, const uint8_t *pixels, int line_siz void dsputil_init_mmi(DSPContext* c, AVCodecContext *avctx) { const int idct_algo= avctx->idct_algo; - const int high_bit_depth = avctx->codec_id == CODEC_ID_H264 && avctx->bits_per_raw_sample > 8; + const int high_bit_depth = avctx->bits_per_raw_sample > 8; if (!high_bit_depth) { c->clear_blocks = clear_blocks_mmi; @@ -152,11 +152,12 @@ void dsputil_init_mmi(DSPContext* c, AVCodecContext *avctx) c->put_pixels_tab[0][0] = put_pixels16_mmi; c->put_no_rnd_pixels_tab[0][0] = put_pixels16_mmi; - } c->get_pixels = get_pixels_mmi; + } - if(idct_algo==FF_IDCT_AUTO || idct_algo==FF_IDCT_PS2){ + if (avctx->bits_per_raw_sample <= 8 && + (idct_algo==FF_IDCT_AUTO || idct_algo==FF_IDCT_PS2)) { c->idct_put= ff_mmi_idct_put; c->idct_add= ff_mmi_idct_add; c->idct = ff_mmi_idct; diff --git a/libavcodec/psymodel.c b/libavcodec/psymodel.c index 133a85f5c1..faadb1b870 100644 --- a/libavcodec/psymodel.c +++ b/libavcodec/psymodel.c @@ -25,16 +25,31 @@ extern const FFPsyModel ff_aac_psy_model; -av_cold int ff_psy_init(FFPsyContext *ctx, AVCodecContext *avctx, - int num_lens, - const uint8_t **bands, const int* num_bands) +av_cold int ff_psy_init(FFPsyContext *ctx, AVCodecContext *avctx, int num_lens, + const uint8_t **bands, const int* num_bands, + int num_groups, const uint8_t *group_map) { + int i, j, k = 0; + ctx->avctx = avctx; - ctx->psy_bands = av_mallocz(sizeof(FFPsyBand) * PSY_MAX_BANDS * avctx->channels); + ctx->ch = av_mallocz(sizeof(ctx->ch[0]) * avctx->channels * 2); + ctx->group = av_mallocz(sizeof(ctx->group[0]) * num_groups); ctx->bands = av_malloc (sizeof(ctx->bands[0]) * num_lens); ctx->num_bands = av_malloc (sizeof(ctx->num_bands[0]) * num_lens); memcpy(ctx->bands, bands, sizeof(ctx->bands[0]) * num_lens); memcpy(ctx->num_bands, num_bands, sizeof(ctx->num_bands[0]) * num_lens); + + /* assign channels to groups (with virtual channels for coupling) */ + for (i = 0; i < num_groups; i++) { + /* NOTE: Add 1 to handle the AAC chan_config without modification. + * This has the side effect of allowing an array of 0s to map + * to one channel per group. + */ + ctx->group[i].num_ch = group_map[i] + 1; + for (j = 0; j < ctx->group[i].num_ch * 2; j++) + ctx->group[i].ch[j] = &ctx->ch[k++]; + } + switch (ctx->avctx->codec_id) { case CODEC_ID_AAC: ctx->model = &ff_aac_psy_model; @@ -45,13 +60,24 @@ av_cold int ff_psy_init(FFPsyContext *ctx, AVCodecContext *avctx, return 0; } +FFPsyChannelGroup *ff_psy_find_group(FFPsyContext *ctx, int channel) +{ + int i = 0, ch = 0; + + while (ch <= channel) + ch += ctx->group[i++].num_ch; + + return &ctx->group[i-1]; +} + av_cold void ff_psy_end(FFPsyContext *ctx) { if (ctx->model->end) ctx->model->end(ctx); av_freep(&ctx->bands); av_freep(&ctx->num_bands); - av_freep(&ctx->psy_bands); + av_freep(&ctx->group); + av_freep(&ctx->ch); } typedef struct FFPsyPreprocessContext{ diff --git a/libavcodec/psymodel.h b/libavcodec/psymodel.h index c65614a151..a7b7948cd2 100644 --- a/libavcodec/psymodel.h +++ b/libavcodec/psymodel.h @@ -41,6 +41,23 @@ typedef struct FFPsyBand { } FFPsyBand; /** + * single channel psychoacoustic information + */ +typedef struct FFPsyChannel { + FFPsyBand psy_bands[PSY_MAX_BANDS]; ///< channel bands information + float entropy; ///< total PE for this channel +} FFPsyChannel; + +/** + * psychoacoustic information for an arbitrary group of channels + */ +typedef struct FFPsyChannelGroup { + FFPsyChannel *ch[PSY_MAX_CHANS]; ///< pointers to the individual channels in the group + uint8_t num_ch; ///< number of channels in this group + uint8_t coupling[PSY_MAX_BANDS]; ///< allow coupling for this band in the group +} FFPsyChannelGroup; + +/** * windowing related information */ typedef struct FFPsyWindowInfo { @@ -58,14 +75,14 @@ typedef struct FFPsyContext { AVCodecContext *avctx; ///< encoder context const struct FFPsyModel *model; ///< encoder-specific model functions - FFPsyBand *psy_bands; ///< frame bands information + FFPsyChannel *ch; ///< single channel information + FFPsyChannelGroup *group; ///< channel group information + int num_groups; ///< number of channel groups uint8_t **bands; ///< scalefactor band sizes for possible frame sizes int *num_bands; ///< number of scalefactor bands for possible frame sizes int num_lens; ///< number of scalefactor band sets - float pe[PSY_MAX_CHANS]; ///< total PE for each channel in the frame - struct { int size; ///< size of the bitresevoir in bits int bits; ///< number of bits used in the bitresevoir @@ -95,14 +112,14 @@ typedef struct FFPsyModel { FFPsyWindowInfo (*window)(FFPsyContext *ctx, const int16_t *audio, const int16_t *la, int channel, int prev_type); /** - * Perform psychoacoustic analysis and set band info (threshold, energy). + * Perform psychoacoustic analysis and set band info (threshold, energy) for a group of channels. * - * @param ctx model context - * @param channel audio channel number - * @param coeffs pointer to the transformed coefficients - * @param wi window information + * @param ctx model context + * @param channel channel number of the first channel in the group to perform analysis on + * @param coeffs array of pointers to the transformed coefficients + * @param wi window information for the channels in the group */ - void (*analyze)(FFPsyContext *ctx, int channel, const float *coeffs, const FFPsyWindowInfo *wi); + void (*analyze)(FFPsyContext *ctx, int channel, const float **coeffs, const FFPsyWindowInfo *wi); void (*end) (FFPsyContext *apc); } FFPsyModel; @@ -115,12 +132,24 @@ typedef struct FFPsyModel { * @param num_lens number of possible frame lengths * @param bands scalefactor band lengths for all frame lengths * @param num_bands number of scalefactor bands for all frame lengths + * @param num_groups number of channel groups + * @param group_map array with # of channels in group - 1, for each group * * @return zero if successful, a negative value if not */ -av_cold int ff_psy_init(FFPsyContext *ctx, AVCodecContext *avctx, - int num_lens, - const uint8_t **bands, const int* num_bands); +av_cold int ff_psy_init(FFPsyContext *ctx, AVCodecContext *avctx, int num_lens, + const uint8_t **bands, const int* num_bands, + int num_groups, const uint8_t *group_map); + +/** + * Determine what group a channel belongs to. + * + * @param ctx psymodel context + * @param channel channel to locate the group for + * + * @return pointer to the FFPsyChannelGroup this channel belongs to + */ +FFPsyChannelGroup *ff_psy_find_group(FFPsyContext *ctx, int channel); /** * Cleanup model context at the end. diff --git a/libavcodec/pthread.c b/libavcodec/pthread.c index e84f2aea4c..4c0d4210ad 100644 --- a/libavcodec/pthread.c +++ b/libavcodec/pthread.c @@ -411,9 +411,10 @@ static void release_delayed_buffers(PerThreadContext *p) FrameThreadContext *fctx = p->parent; while (p->num_released_buffers > 0) { - AVFrame *f = &p->released_buffers[--p->num_released_buffers]; + AVFrame *f; pthread_mutex_lock(&fctx->buffer_mutex); + f = &p->released_buffers[--p->num_released_buffers]; free_progress(f); f->thread_opaque = NULL; @@ -749,9 +750,12 @@ void ff_thread_flush(AVCodecContext *avctx) if (!avctx->thread_opaque) return; park_frame_worker_threads(fctx, avctx->thread_count); - - if (fctx->prev_thread) - update_context_from_thread(fctx->threads->avctx, fctx->prev_thread->avctx, 0); + if (fctx->prev_thread) { + if (fctx->prev_thread != &fctx->threads[0]) + update_context_from_thread(fctx->threads[0].avctx, fctx->prev_thread->avctx, 0); + if (avctx->codec->flush) + avctx->codec->flush(fctx->threads[0].avctx); + } fctx->next_decoding = fctx->next_finished = 0; fctx->delaying = 1; @@ -839,6 +843,7 @@ int ff_thread_get_buffer(AVCodecContext *avctx, AVFrame *f) void ff_thread_release_buffer(AVCodecContext *avctx, AVFrame *f) { PerThreadContext *p = avctx->thread_opaque; + FrameThreadContext *fctx; if (!(avctx->active_thread_type&FF_THREAD_FRAME)) { avctx->release_buffer(avctx, f); @@ -854,7 +859,10 @@ void ff_thread_release_buffer(AVCodecContext *avctx, AVFrame *f) av_log(avctx, AV_LOG_DEBUG, "thread_release_buffer called on pic %p, %d buffers used\n", f, f->owner->internal_buffer_count); + fctx = p->parent; + pthread_mutex_lock(&fctx->buffer_mutex); p->released_buffers[p->num_released_buffers++] = *f; + pthread_mutex_unlock(&fctx->buffer_mutex); memset(f->data, 0, sizeof(f->data)); } diff --git a/libavcodec/ptx.c b/libavcodec/ptx.c index 3273fd2f8e..0b809784ab 100644 --- a/libavcodec/ptx.c +++ b/libavcodec/ptx.c @@ -107,15 +107,13 @@ static av_cold int ptx_end(AVCodecContext *avctx) { } AVCodec ff_ptx_decoder = { - "ptx", - AVMEDIA_TYPE_VIDEO, - CODEC_ID_PTX, - sizeof(PTXContext), - ptx_init, - NULL, - ptx_end, - ptx_decode_frame, - CODEC_CAP_DR1, - NULL, + .name = "ptx", + .type = AVMEDIA_TYPE_VIDEO, + .id = CODEC_ID_PTX, + .priv_data_size = sizeof(PTXContext), + .init = ptx_init, + .close = ptx_end, + .decode = ptx_decode_frame, + .capabilities = CODEC_CAP_DR1, .long_name = NULL_IF_CONFIG_SMALL("V.Flash PTX image"), }; diff --git a/libavcodec/put_bits.h b/libavcodec/put_bits.h index 79016912d5..a3fc5f16f1 100644 --- a/libavcodec/put_bits.h +++ b/libavcodec/put_bits.h @@ -36,19 +36,10 @@ #include "mathops.h" #include "config.h" -//#define ALT_BITSTREAM_WRITER -//#define ALIGNED_BITSTREAM_WRITER - -/* buf and buf_end must be present and used by every alternative writer. */ typedef struct PutBitContext { -#ifdef ALT_BITSTREAM_WRITER - uint8_t *buf, *buf_end; - int index; -#else uint32_t bit_buf; int bit_left; uint8_t *buf, *buf_ptr, *buf_end; -#endif int size_in_bits; } PutBitContext; @@ -68,15 +59,9 @@ static inline void init_put_bits(PutBitContext *s, uint8_t *buffer, int buffer_s s->size_in_bits= 8*buffer_size; s->buf = buffer; s->buf_end = s->buf + buffer_size; -#ifdef ALT_BITSTREAM_WRITER - s->index=0; - ((uint32_t*)(s->buf))[0]=0; -// memset(buffer, 0, buffer_size); -#else s->buf_ptr = s->buf; s->bit_left=32; s->bit_buf=0; -#endif } /** @@ -84,11 +69,7 @@ static inline void init_put_bits(PutBitContext *s, uint8_t *buffer, int buffer_s */ static inline int put_bits_count(PutBitContext *s) { -#ifdef ALT_BITSTREAM_WRITER - return s->index; -#else return (s->buf_ptr - s->buf) * 8 + 32 - s->bit_left; -#endif } /** @@ -96,9 +77,6 @@ static inline int put_bits_count(PutBitContext *s) */ static inline void flush_put_bits(PutBitContext *s) { -#ifdef ALT_BITSTREAM_WRITER - align_put_bits(s); -#else #ifndef BITSTREAM_WRITER_LE s->bit_buf<<= s->bit_left; #endif @@ -115,10 +93,9 @@ static inline void flush_put_bits(PutBitContext *s) } s->bit_left=32; s->bit_buf=0; -#endif } -#if defined(ALT_BITSTREAM_WRITER) || defined(BITSTREAM_WRITER_LE) +#ifdef BITSTREAM_WRITER_LE #define align_put_bits align_put_bits_unsupported_here #define ff_put_string ff_put_string_unsupported_here #define ff_copy_bits ff_copy_bits_unsupported_here @@ -148,7 +125,6 @@ void ff_copy_bits(PutBitContext *pb, const uint8_t *src, int length); * Use put_bits32 to write 32 bits. */ static inline void put_bits(PutBitContext *s, int n, unsigned int value) -#ifndef ALT_BITSTREAM_WRITER { unsigned int bit_buf; int bit_left; @@ -164,12 +140,7 @@ static inline void put_bits(PutBitContext *s, int n, unsigned int value) #ifdef BITSTREAM_WRITER_LE bit_buf |= value << (32 - bit_left); if (n >= bit_left) { -#if !HAVE_FAST_UNALIGNED - if (3 & (intptr_t) s->buf_ptr) { - AV_WL32(s->buf_ptr, bit_buf); - } else -#endif - *(uint32_t *)s->buf_ptr = av_le2ne32(bit_buf); + AV_WL32(s->buf_ptr, bit_buf); s->buf_ptr+=4; bit_buf = (bit_left==32)?0:value >> bit_left; bit_left+=32; @@ -182,12 +153,7 @@ static inline void put_bits(PutBitContext *s, int n, unsigned int value) } else { bit_buf<<=bit_left; bit_buf |= value >> (n - bit_left); -#if !HAVE_FAST_UNALIGNED - if (3 & (intptr_t) s->buf_ptr) { - AV_WB32(s->buf_ptr, bit_buf); - } else -#endif - *(uint32_t *)s->buf_ptr = av_be2ne32(bit_buf); + AV_WB32(s->buf_ptr, bit_buf); //printf("bitbuf = %08x\n", bit_buf); s->buf_ptr+=4; bit_left+=32 - n; @@ -198,70 +164,6 @@ static inline void put_bits(PutBitContext *s, int n, unsigned int value) s->bit_buf = bit_buf; s->bit_left = bit_left; } -#else /* ALT_BITSTREAM_WRITER defined */ -{ -# ifdef ALIGNED_BITSTREAM_WRITER -# if ARCH_X86 - __asm__ volatile( - "movl %0, %%ecx \n\t" - "xorl %%eax, %%eax \n\t" - "shrdl %%cl, %1, %%eax \n\t" - "shrl %%cl, %1 \n\t" - "movl %0, %%ecx \n\t" - "shrl $3, %%ecx \n\t" - "andl $0xFFFFFFFC, %%ecx \n\t" - "bswapl %1 \n\t" - "orl %1, (%2, %%ecx) \n\t" - "bswapl %%eax \n\t" - "addl %3, %0 \n\t" - "movl %%eax, 4(%2, %%ecx) \n\t" - : "=&r" (s->index), "=&r" (value) - : "r" (s->buf), "r" (n), "0" (s->index), "1" (value<<(-n)) - : "%eax", "%ecx" - ); -# else - int index= s->index; - uint32_t *ptr= ((uint32_t *)s->buf)+(index>>5); - - value<<= 32-n; - - ptr[0] |= av_be2ne32(value>>(index&31)); - ptr[1] = av_be2ne32(value<<(32-(index&31))); -//if(n>24) printf("%d %d\n", n, value); - index+= n; - s->index= index; -# endif -# else //ALIGNED_BITSTREAM_WRITER -# if ARCH_X86 - __asm__ volatile( - "movl $7, %%ecx \n\t" - "andl %0, %%ecx \n\t" - "addl %3, %%ecx \n\t" - "negl %%ecx \n\t" - "shll %%cl, %1 \n\t" - "bswapl %1 \n\t" - "movl %0, %%ecx \n\t" - "shrl $3, %%ecx \n\t" - "orl %1, (%%ecx, %2) \n\t" - "addl %3, %0 \n\t" - "movl $0, 4(%%ecx, %2) \n\t" - : "=&r" (s->index), "=&r" (value) - : "r" (s->buf), "r" (n), "0" (s->index), "1" (value) - : "%ecx" - ); -# else - int index= s->index; - uint32_t *ptr= (uint32_t*)(((uint8_t *)s->buf)+(index>>3)); - - ptr[0] |= av_be2ne32(value<<(32-n-(index&7) )); - ptr[1] = 0; -//if(n>24) printf("%d %d\n", n, value); - index+= n; - s->index= index; -# endif -# endif //!ALIGNED_BITSTREAM_WRITER -} -#endif static inline void put_sbits(PutBitContext *pb, int n, int32_t value) { @@ -292,11 +194,7 @@ static void av_unused put_bits32(PutBitContext *s, uint32_t value) */ static inline uint8_t* put_bits_ptr(PutBitContext *s) { -#ifdef ALT_BITSTREAM_WRITER - return s->buf + (s->index>>3); -#else return s->buf_ptr; -#endif } /** @@ -306,13 +204,8 @@ static inline uint8_t* put_bits_ptr(PutBitContext *s) static inline void skip_put_bytes(PutBitContext *s, int n) { assert((put_bits_count(s)&7)==0); -#ifdef ALT_BITSTREAM_WRITER - FIXME may need some cleaning of the buffer - s->index += n<<3; -#else assert(s->bit_left==32); s->buf_ptr += n; -#endif } /** @@ -322,13 +215,9 @@ static inline void skip_put_bytes(PutBitContext *s, int n) */ static inline void skip_put_bits(PutBitContext *s, int n) { -#ifdef ALT_BITSTREAM_WRITER - s->index += n; -#else s->bit_left -= n; s->buf_ptr-= 4*(s->bit_left>>5); s->bit_left &= 31; -#endif } /** diff --git a/libavcodec/qcelpdata.h b/libavcodec/qcelpdata.h index d79cea9f6c..e71ee9fdb7 100644 --- a/libavcodec/qcelpdata.h +++ b/libavcodec/qcelpdata.h @@ -38,14 +38,14 @@ * QCELP unpacked data frame */ typedef struct { -/// @defgroup qcelp_codebook_parameters QCELP excitation codebook parameters +/// @name QCELP excitation codebook parameters /// @{ uint8_t cbsign[16]; ///!< sign of the codebook gain for each codebook subframe uint8_t cbgain[16]; ///!< unsigned codebook gain for each codebook subframe uint8_t cindex[16]; ///!< codebook index for each codebook subframe /// @} -/// @defgroup qcelp_pitch_parameters QCELP pitch prediction parameters +/// @name QCELP pitch prediction parameters /// @{ uint8_t plag[4]; ///!< pitch lag for each pitch subframe uint8_t pfrac[4]; ///!< fractional pitch lag for each pitch subframe @@ -74,9 +74,9 @@ typedef struct { static const float qcelp_hammsinc_table[4] = { -0.006822, 0.041249, -0.143459, 0.588863}; typedef struct { - uint8_t index; /*!< index into the QCELPContext structure */ - uint8_t bitpos; /*!< position of the lowest bit in the value's byte */ - uint8_t bitlen; /*!< number of bits to read */ + uint8_t index; /**< index into the QCELPContext structure */ + uint8_t bitpos; /**< position of the lowest bit in the value's byte */ + uint8_t bitlen; /**< number of bits to read */ } QCELPBitmap; #define QCELP_OF(variable, bit, len) {offsetof(QCELPFrame, variable), bit, len} diff --git a/libavcodec/qcelpdec.c b/libavcodec/qcelpdec.c index 3ed821c81e..d565003a9e 100644 --- a/libavcodec/qcelpdec.c +++ b/libavcodec/qcelpdec.c @@ -46,7 +46,7 @@ typedef enum { - I_F_Q = -1, /*!< insufficient frame quality */ + I_F_Q = -1, /**< insufficient frame quality */ SILENCE, RATE_OCTAVE, RATE_QUARTER, @@ -58,12 +58,12 @@ typedef struct { GetBitContext gb; qcelp_packet_rate bitrate; - QCELPFrame frame; /*!< unpacked data frame */ + QCELPFrame frame; /**< unpacked data frame */ uint8_t erasure_count; - uint8_t octave_count; /*!< count the consecutive RATE_OCTAVE frames */ + uint8_t octave_count; /**< count the consecutive RATE_OCTAVE frames */ float prev_lspf[10]; - float predictor_lspf[10];/*!< LSP predictor for RATE_OCTAVE and I_F_Q */ + float predictor_lspf[10];/**< LSP predictor for RATE_OCTAVE and I_F_Q */ float pitch_synthesis_filter_mem[303]; float pitch_pre_filter_mem[303]; float rnd_fir_filter_mem[180]; diff --git a/libavcodec/qdm2.c b/libavcodec/qdm2.c index 6eb836456c..7a1f3e1021 100644 --- a/libavcodec/qdm2.c +++ b/libavcodec/qdm2.c @@ -26,6 +26,7 @@ * @file * QDM2 decoder * @author Ewald Snel, Benjamin Larsson, Alex Beregszaszi, Roberto Togni + * * The decoder is not perfect yet, there are still some distortions * especially on files encoded with 16 or 8 subbands. */ diff --git a/libavcodec/qdrw.c b/libavcodec/qdrw.c index cd3146388e..4c9224c5e9 100644 --- a/libavcodec/qdrw.c +++ b/libavcodec/qdrw.c @@ -152,14 +152,13 @@ static av_cold int decode_end(AVCodecContext *avctx){ } AVCodec ff_qdraw_decoder = { - "qdraw", - AVMEDIA_TYPE_VIDEO, - CODEC_ID_QDRAW, - sizeof(QdrawContext), - decode_init, - NULL, - decode_end, - decode_frame, - CODEC_CAP_DR1, + .name = "qdraw", + .type = AVMEDIA_TYPE_VIDEO, + .id = CODEC_ID_QDRAW, + .priv_data_size = sizeof(QdrawContext), + .init = decode_init, + .close = decode_end, + .decode = decode_frame, + .capabilities = CODEC_CAP_DR1, .long_name = NULL_IF_CONFIG_SMALL("Apple QuickDraw"), }; diff --git a/libavcodec/qpeg.c b/libavcodec/qpeg.c index 39d8171951..595420bcb1 100644 --- a/libavcodec/qpeg.c +++ b/libavcodec/qpeg.c @@ -320,14 +320,13 @@ static av_cold int decode_end(AVCodecContext *avctx){ } AVCodec ff_qpeg_decoder = { - "qpeg", - AVMEDIA_TYPE_VIDEO, - CODEC_ID_QPEG, - sizeof(QpegContext), - decode_init, - NULL, - decode_end, - decode_frame, - CODEC_CAP_DR1, + .name = "qpeg", + .type = AVMEDIA_TYPE_VIDEO, + .id = CODEC_ID_QPEG, + .priv_data_size = sizeof(QpegContext), + .init = decode_init, + .close = decode_end, + .decode = decode_frame, + .capabilities = CODEC_CAP_DR1, .long_name = NULL_IF_CONFIG_SMALL("Q-team QPEG"), }; diff --git a/libavcodec/qtrle.c b/libavcodec/qtrle.c index a2b6c7f991..28d8eed82f 100644 --- a/libavcodec/qtrle.c +++ b/libavcodec/qtrle.c @@ -539,15 +539,14 @@ static av_cold int qtrle_decode_end(AVCodecContext *avctx) } AVCodec ff_qtrle_decoder = { - "qtrle", - AVMEDIA_TYPE_VIDEO, - CODEC_ID_QTRLE, - sizeof(QtrleContext), - qtrle_decode_init, - NULL, - qtrle_decode_end, - qtrle_decode_frame, - CODEC_CAP_DR1, + .name = "qtrle", + .type = AVMEDIA_TYPE_VIDEO, + .id = CODEC_ID_QTRLE, + .priv_data_size = sizeof(QtrleContext), + .init = qtrle_decode_init, + .close = qtrle_decode_end, + .decode = qtrle_decode_frame, + .capabilities = CODEC_CAP_DR1, .long_name = NULL_IF_CONFIG_SMALL("QuickTime Animation (RLE) video"), }; diff --git a/libavcodec/qtrleenc.c b/libavcodec/qtrleenc.c index 6258b143ad..0e10c76365 100644 --- a/libavcodec/qtrleenc.c +++ b/libavcodec/qtrleenc.c @@ -344,13 +344,13 @@ static av_cold int qtrle_encode_end(AVCodecContext *avctx) } AVCodec ff_qtrle_encoder = { - "qtrle", - AVMEDIA_TYPE_VIDEO, - CODEC_ID_QTRLE, - sizeof(QtrleEncContext), - qtrle_encode_init, - qtrle_encode_frame, - qtrle_encode_end, + .name = "qtrle", + .type = AVMEDIA_TYPE_VIDEO, + .id = CODEC_ID_QTRLE, + .priv_data_size = sizeof(QtrleEncContext), + .init = qtrle_encode_init, + .encode = qtrle_encode_frame, + .close = qtrle_encode_end, .pix_fmts = (const enum PixelFormat[]){PIX_FMT_RGB24, PIX_FMT_RGB555BE, PIX_FMT_ARGB, PIX_FMT_GRAY8, PIX_FMT_NONE}, .long_name = NULL_IF_CONFIG_SMALL("QuickTime Animation (RLE) video"), }; diff --git a/libavcodec/r210dec.c b/libavcodec/r210dec.c index 293fe654ad..18086c6916 100644 --- a/libavcodec/r210dec.c +++ b/libavcodec/r210dec.c @@ -98,29 +98,25 @@ static av_cold int decode_close(AVCodecContext *avctx) #if CONFIG_R210_DECODER AVCodec ff_r210_decoder = { - "r210", - AVMEDIA_TYPE_VIDEO, - CODEC_ID_R210, - 0, - decode_init, - NULL, - decode_close, - decode_frame, - CODEC_CAP_DR1, + .name = "r210", + .type = AVMEDIA_TYPE_VIDEO, + .id = CODEC_ID_R210, + .init = decode_init, + .close = decode_close, + .decode = decode_frame, + .capabilities = CODEC_CAP_DR1, .long_name = NULL_IF_CONFIG_SMALL("Uncompressed RGB 10-bit"), }; #endif #if CONFIG_R10K_DECODER AVCodec ff_r10k_decoder = { - "r10k", - AVMEDIA_TYPE_VIDEO, - CODEC_ID_R10K, - 0, - decode_init, - NULL, - decode_close, - decode_frame, - CODEC_CAP_DR1, + .name = "r10k", + .type = AVMEDIA_TYPE_VIDEO, + .id = CODEC_ID_R10K, + .init = decode_init, + .close = decode_close, + .decode = decode_frame, + .capabilities = CODEC_CAP_DR1, .long_name = NULL_IF_CONFIG_SMALL("AJA Kona 10-bit RGB Codec"), }; #endif diff --git a/libavcodec/ratecontrol.c b/libavcodec/ratecontrol.c index 6874fc7034..af8289e312 100644 --- a/libavcodec/ratecontrol.c +++ b/libavcodec/ratecontrol.c @@ -44,9 +44,9 @@ static double get_qscale(MpegEncContext *s, RateControlEntry *rce, double rate_f void ff_write_pass1_stats(MpegEncContext *s){ snprintf(s->avctx->stats_out, 256, "in:%d out:%d type:%d q:%d itex:%d ptex:%d mv:%d misc:%d fcode:%d bcode:%d mc-var:%d var:%d icount:%d skipcount:%d hbits:%d;\n", - s->current_picture_ptr->display_picture_number, s->current_picture_ptr->coded_picture_number, s->pict_type, - s->current_picture.quality, s->i_tex_bits, s->p_tex_bits, s->mv_bits, s->misc_bits, - s->f_code, s->b_code, s->current_picture.mc_mb_var_sum, s->current_picture.mb_var_sum, s->i_count, s->skip_count, s->header_bits); + s->current_picture_ptr->f.display_picture_number, s->current_picture_ptr->f.coded_picture_number, s->pict_type, + s->current_picture.f.quality, s->i_tex_bits, s->p_tex_bits, s->mv_bits, s->misc_bits, + s->f_code, s->b_code, s->current_picture.mc_mb_var_sum, s->current_picture.mb_var_sum, s->i_count, s->skip_count, s->header_bits); } static inline double qp2bits(RateControlEntry *rce, double qp){ @@ -707,10 +707,10 @@ float ff_rate_estimate_qscale(MpegEncContext *s, int dry_run) //if(dts_pic) // av_log(NULL, AV_LOG_ERROR, "%Ld %Ld %Ld %d\n", s->current_picture_ptr->pts, s->user_specified_pts, dts_pic->pts, picture_number); - if(!dts_pic || dts_pic->pts == AV_NOPTS_VALUE) + if (!dts_pic || dts_pic->f.pts == AV_NOPTS_VALUE) wanted_bits= (uint64_t)(s->bit_rate*(double)picture_number/fps); else - wanted_bits= (uint64_t)(s->bit_rate*(double)dts_pic->pts/fps); + wanted_bits = (uint64_t)(s->bit_rate*(double)dts_pic->f.pts / fps); } diff= s->total_bits - wanted_bits; diff --git a/libavcodec/raw.c b/libavcodec/raw.c index 0bc04df071..a26dea8146 100644 --- a/libavcodec/raw.c +++ b/libavcodec/raw.c @@ -36,6 +36,7 @@ const PixelFormatTag ff_raw_pix_fmt_tags[] = { { PIX_FMT_YUV411P, MKTAG('Y', '4', '1', 'B') }, { PIX_FMT_YUV422P, MKTAG('Y', '4', '2', 'B') }, { PIX_FMT_YUV422P, MKTAG('P', '4', '2', '2') }, + { PIX_FMT_YUV422P, MKTAG('Y', 'V', '1', '6') }, /* yuvjXXX formats are deprecated hacks specific to libav*, they are identical to yuvXXX */ { PIX_FMT_YUVJ420P, MKTAG('I', '4', '2', '0') }, /* Planar formats */ @@ -44,7 +45,7 @@ const PixelFormatTag ff_raw_pix_fmt_tags[] = { { PIX_FMT_YUVJ422P, MKTAG('Y', '4', '2', 'B') }, { PIX_FMT_YUVJ422P, MKTAG('P', '4', '2', '2') }, { PIX_FMT_GRAY8, MKTAG('Y', '8', '0', '0') }, - { PIX_FMT_GRAY8, MKTAG(' ', ' ', 'Y', '8') }, + { PIX_FMT_GRAY8, MKTAG('Y', '8', ' ', ' ') }, { PIX_FMT_YUYV422, MKTAG('Y', 'U', 'Y', '2') }, /* Packed formats */ { PIX_FMT_YUYV422, MKTAG('Y', '4', '2', '2') }, @@ -135,6 +136,7 @@ const PixelFormatTag ff_raw_pix_fmt_tags[] = { /* special */ { PIX_FMT_RGB565LE,MKTAG( 3 , 0 , 0 , 0 ) }, /* flipped RGB565LE */ + { PIX_FMT_YUV444P, MKTAG('Y', 'V', '2', '4') }, /* YUV444P, swapped UV */ { PIX_FMT_NONE, 0 }, }; diff --git a/libavcodec/rawdec.c b/libavcodec/rawdec.c index d6791c39e1..571783d3e0 100644 --- a/libavcodec/rawdec.c +++ b/libavcodec/rawdec.c @@ -103,13 +103,15 @@ static av_cold int raw_init_decoder(AVCodecContext *avctx) } ff_set_systematic_pal2(context->palette, avctx->pix_fmt); - context->length = avpicture_get_size(avctx->pix_fmt, avctx->width, avctx->height); if((avctx->bits_per_coded_sample == 4 || avctx->bits_per_coded_sample == 2) && avctx->pix_fmt==PIX_FMT_PAL8 && (!avctx->codec_tag || avctx->codec_tag == MKTAG('r','a','w',' '))){ + context->length = avpicture_get_size(avctx->pix_fmt, (avctx->width+3)&~3, avctx->height); context->buffer = av_malloc(context->length); if (!context->buffer) return -1; + } else { + context->length = avpicture_get_size(avctx->pix_fmt, avctx->width, avctx->height); } context->pic.pict_type = AV_PICTURE_TYPE_I; context->pic.key_frame = 1; @@ -188,13 +190,21 @@ static int raw_decode(AVCodecContext *avctx, memcpy(frame->data[1], avctx->palctrl->palette, AVPALETTE_SIZE); avctx->palctrl->palette_changed = 0; } - if(avctx->pix_fmt==PIX_FMT_BGR24 && ((frame->linesize[0]+3)&~3)*avctx->height <= buf_size) + if((avctx->pix_fmt==PIX_FMT_BGR24 || + avctx->pix_fmt==PIX_FMT_GRAY8 || + avctx->pix_fmt==PIX_FMT_RGB555LE || + avctx->pix_fmt==PIX_FMT_RGB555BE || + avctx->pix_fmt==PIX_FMT_RGB565LE || + avctx->pix_fmt==PIX_FMT_PAL8) && + ((frame->linesize[0]+3)&~3)*avctx->height <= buf_size) frame->linesize[0] = (frame->linesize[0]+3)&~3; if(context->flip) flip(avctx, picture); if ( avctx->codec_tag == MKTAG('Y', 'V', '1', '2') + || avctx->codec_tag == MKTAG('Y', 'V', '1', '6') + || avctx->codec_tag == MKTAG('Y', 'V', '2', '4') || avctx->codec_tag == MKTAG('Y', 'V', 'U', '9')) FFSWAP(uint8_t *, picture->data[1], picture->data[2]); @@ -222,14 +232,13 @@ static av_cold int raw_close_decoder(AVCodecContext *avctx) } AVCodec ff_rawvideo_decoder = { - "rawvideo", - AVMEDIA_TYPE_VIDEO, - CODEC_ID_RAWVIDEO, - sizeof(RawVideoContext), - raw_init_decoder, - NULL, - raw_close_decoder, - raw_decode, + .name = "rawvideo", + .type = AVMEDIA_TYPE_VIDEO, + .id = CODEC_ID_RAWVIDEO, + .priv_data_size = sizeof(RawVideoContext), + .init = raw_init_decoder, + .close = raw_close_decoder, + .decode = raw_decode, .long_name = NULL_IF_CONFIG_SMALL("raw video"), .priv_class= &class, }; diff --git a/libavcodec/rawenc.c b/libavcodec/rawenc.c index 772ce94067..7077de170f 100644 --- a/libavcodec/rawenc.c +++ b/libavcodec/rawenc.c @@ -56,11 +56,11 @@ static int raw_encode(AVCodecContext *avctx, } AVCodec ff_rawvideo_encoder = { - "rawvideo", - AVMEDIA_TYPE_VIDEO, - CODEC_ID_RAWVIDEO, - sizeof(AVFrame), - raw_init_encoder, - raw_encode, + .name = "rawvideo", + .type = AVMEDIA_TYPE_VIDEO, + .id = CODEC_ID_RAWVIDEO, + .priv_data_size = sizeof(AVFrame), + .init = raw_init_encoder, + .encode = raw_encode, .long_name = NULL_IF_CONFIG_SMALL("raw video"), }; diff --git a/libavcodec/rl2.c b/libavcodec/rl2.c index 8a553539ab..7d1cf16aee 100644 --- a/libavcodec/rl2.c +++ b/libavcodec/rl2.c @@ -20,11 +20,10 @@ */ /** - * RL2 Video Decoder * @file + * RL2 Video Decoder * @author Sascha Sommer (saschasommer@freenet.de) - * For more information about the RL2 format, visit: - * http://wiki.multimedia.cx/index.php?title=RL2 + * @see http://wiki.multimedia.cx/index.php?title=RL2 */ #include <stdio.h> @@ -221,15 +220,14 @@ static av_cold int rl2_decode_end(AVCodecContext *avctx) AVCodec ff_rl2_decoder = { - "rl2", - AVMEDIA_TYPE_VIDEO, - CODEC_ID_RL2, - sizeof(Rl2Context), - rl2_decode_init, - NULL, - rl2_decode_end, - rl2_decode_frame, - CODEC_CAP_DR1, + .name = "rl2", + .type = AVMEDIA_TYPE_VIDEO, + .id = CODEC_ID_RL2, + .priv_data_size = sizeof(Rl2Context), + .init = rl2_decode_init, + .close = rl2_decode_end, + .decode = rl2_decode_frame, + .capabilities = CODEC_CAP_DR1, .long_name = NULL_IF_CONFIG_SMALL("RL2 video"), }; diff --git a/libavcodec/roqaudioenc.c b/libavcodec/roqaudioenc.c index f6bd726c4f..ac8c94a045 100644 --- a/libavcodec/roqaudioenc.c +++ b/libavcodec/roqaudioenc.c @@ -154,14 +154,13 @@ static av_cold int roq_dpcm_encode_close(AVCodecContext *avctx) } AVCodec ff_roq_dpcm_encoder = { - "roq_dpcm", - AVMEDIA_TYPE_AUDIO, - CODEC_ID_ROQ_DPCM, - sizeof(ROQDPCMContext), - roq_dpcm_encode_init, - roq_dpcm_encode_frame, - roq_dpcm_encode_close, - NULL, + .name = "roq_dpcm", + .type = AVMEDIA_TYPE_AUDIO, + .id = CODEC_ID_ROQ_DPCM, + .priv_data_size = sizeof(ROQDPCMContext), + .init = roq_dpcm_encode_init, + .encode = roq_dpcm_encode_frame, + .close = roq_dpcm_encode_close, .sample_fmts = (const enum AVSampleFormat[]){AV_SAMPLE_FMT_S16,AV_SAMPLE_FMT_NONE}, .long_name = NULL_IF_CONFIG_SMALL("id RoQ DPCM"), }; diff --git a/libavcodec/roqvideodec.c b/libavcodec/roqvideodec.c index f0977f6491..4af7ede9ad 100644 --- a/libavcodec/roqvideodec.c +++ b/libavcodec/roqvideodec.c @@ -213,14 +213,13 @@ static av_cold int roq_decode_end(AVCodecContext *avctx) } AVCodec ff_roq_decoder = { - "roqvideo", - AVMEDIA_TYPE_VIDEO, - CODEC_ID_ROQ, - sizeof(RoqContext), - roq_decode_init, - NULL, - roq_decode_end, - roq_decode_frame, - CODEC_CAP_DR1, + .name = "roqvideo", + .type = AVMEDIA_TYPE_VIDEO, + .id = CODEC_ID_ROQ, + .priv_data_size = sizeof(RoqContext), + .init = roq_decode_init, + .close = roq_decode_end, + .decode = roq_decode_frame, + .capabilities = CODEC_CAP_DR1, .long_name = NULL_IF_CONFIG_SMALL("id RoQ video"), }; diff --git a/libavcodec/rpza.c b/libavcodec/rpza.c index 12558563c6..ea90ed6756 100644 --- a/libavcodec/rpza.c +++ b/libavcodec/rpza.c @@ -277,14 +277,13 @@ static av_cold int rpza_decode_end(AVCodecContext *avctx) } AVCodec ff_rpza_decoder = { - "rpza", - AVMEDIA_TYPE_VIDEO, - CODEC_ID_RPZA, - sizeof(RpzaContext), - rpza_decode_init, - NULL, - rpza_decode_end, - rpza_decode_frame, - CODEC_CAP_DR1, + .name = "rpza", + .type = AVMEDIA_TYPE_VIDEO, + .id = CODEC_ID_RPZA, + .priv_data_size = sizeof(RpzaContext), + .init = rpza_decode_init, + .close = rpza_decode_end, + .decode = rpza_decode_frame, + .capabilities = CODEC_CAP_DR1, .long_name = NULL_IF_CONFIG_SMALL("QuickTime video (RPZA)"), }; diff --git a/libavcodec/rtjpeg.c b/libavcodec/rtjpeg.c index 4c48f25b2c..303183f230 100644 --- a/libavcodec/rtjpeg.c +++ b/libavcodec/rtjpeg.c @@ -33,12 +33,12 @@ if (n) {skip_bits(gb, n);} /** - * \brief read one block from stream - * \param gb contains stream data - * \param block where data is written to - * \param scan array containing the mapping stream address -> block position - * \param quant quantization factors - * \return 0 means the block is not coded, < 0 means an error occurred. + * @brief read one block from stream + * @param gb contains stream data + * @param block where data is written to + * @param scan array containing the mapping stream address -> block position + * @param quant quantization factors + * @return 0 means the block is not coded, < 0 means an error occurred. * * Note: GetBitContext is used to make the code simpler, since all data is * aligned this could be done faster in a different way, e.g. as it is done @@ -96,13 +96,13 @@ static inline int get_block(GetBitContext *gb, DCTELEM *block, const uint8_t *sc } /** - * \brief decode one rtjpeg YUV420 frame - * \param c context, must be initialized via rtjpeg_decode_init - * \param f AVFrame to place decoded frame into. If parts of the frame + * @brief decode one rtjpeg YUV420 frame + * @param c context, must be initialized via rtjpeg_decode_init + * @param f AVFrame to place decoded frame into. If parts of the frame * are not coded they are left unchanged, so consider initializing it - * \param buf buffer containing input data - * \param buf_size length of input data in bytes - * \return number of bytes consumed from the input buffer + * @param buf buffer containing input data + * @param buf_size length of input data in bytes + * @return number of bytes consumed from the input buffer */ int rtjpeg_decode_frame_yuv420(RTJpegContext *c, AVFrame *f, const uint8_t *buf, int buf_size) { @@ -143,15 +143,15 @@ int rtjpeg_decode_frame_yuv420(RTJpegContext *c, AVFrame *f, } /** - * \brief initialize an RTJpegContext, may be called multiple times - * \param c context to initialize - * \param dsp specifies the idct to use for decoding - * \param width width of image, will be rounded down to the nearest multiple + * @brief initialize an RTJpegContext, may be called multiple times + * @param c context to initialize + * @param dsp specifies the idct to use for decoding + * @param width width of image, will be rounded down to the nearest multiple * of 16 for decoding - * \param height height of image, will be rounded down to the nearest multiple + * @param height height of image, will be rounded down to the nearest multiple * of 16 for decoding - * \param lquant luma quantization table to use - * \param cquant chroma quantization table to use + * @param lquant luma quantization table to use + * @param cquant chroma quantization table to use */ void rtjpeg_decode_init(RTJpegContext *c, DSPContext *dsp, int width, int height, diff --git a/libavcodec/rv10.c b/libavcodec/rv10.c index 2f822a8ac2..dfd384a63b 100644 --- a/libavcodec/rv10.c +++ b/libavcodec/rv10.c @@ -710,30 +710,28 @@ static int rv10_decode_frame(AVCodecContext *avctx, } AVCodec ff_rv10_decoder = { - "rv10", - AVMEDIA_TYPE_VIDEO, - CODEC_ID_RV10, - sizeof(MpegEncContext), - rv10_decode_init, - NULL, - rv10_decode_end, - rv10_decode_frame, - CODEC_CAP_DR1, + .name = "rv10", + .type = AVMEDIA_TYPE_VIDEO, + .id = CODEC_ID_RV10, + .priv_data_size = sizeof(MpegEncContext), + .init = rv10_decode_init, + .close = rv10_decode_end, + .decode = rv10_decode_frame, + .capabilities = CODEC_CAP_DR1, .max_lowres = 3, .long_name = NULL_IF_CONFIG_SMALL("RealVideo 1.0"), .pix_fmts= ff_pixfmt_list_420, }; AVCodec ff_rv20_decoder = { - "rv20", - AVMEDIA_TYPE_VIDEO, - CODEC_ID_RV20, - sizeof(MpegEncContext), - rv10_decode_init, - NULL, - rv10_decode_end, - rv10_decode_frame, - CODEC_CAP_DR1 | CODEC_CAP_DELAY, + .name = "rv20", + .type = AVMEDIA_TYPE_VIDEO, + .id = CODEC_ID_RV20, + .priv_data_size = sizeof(MpegEncContext), + .init = rv10_decode_init, + .close = rv10_decode_end, + .decode = rv10_decode_frame, + .capabilities = CODEC_CAP_DR1 | CODEC_CAP_DELAY, .flush= ff_mpeg_flush, .max_lowres = 3, .long_name = NULL_IF_CONFIG_SMALL("RealVideo 2.0"), diff --git a/libavcodec/rv10enc.c b/libavcodec/rv10enc.c index 82b1258799..db9043ceea 100644 --- a/libavcodec/rv10enc.c +++ b/libavcodec/rv10enc.c @@ -57,13 +57,13 @@ void rv10_encode_picture_header(MpegEncContext *s, int picture_number) } AVCodec ff_rv10_encoder = { - "rv10", - AVMEDIA_TYPE_VIDEO, - CODEC_ID_RV10, - sizeof(MpegEncContext), - MPV_encode_init, - MPV_encode_picture, - MPV_encode_end, + .name = "rv10", + .type = AVMEDIA_TYPE_VIDEO, + .id = CODEC_ID_RV10, + .priv_data_size = sizeof(MpegEncContext), + .init = MPV_encode_init, + .encode = MPV_encode_picture, + .close = MPV_encode_end, .pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_NONE}, .long_name= NULL_IF_CONFIG_SMALL("RealVideo 1.0"), }; diff --git a/libavcodec/rv20enc.c b/libavcodec/rv20enc.c index a10998450c..af7ef6bb72 100644 --- a/libavcodec/rv20enc.c +++ b/libavcodec/rv20enc.c @@ -58,13 +58,13 @@ void rv20_encode_picture_header(MpegEncContext *s, int picture_number){ } AVCodec ff_rv20_encoder = { - "rv20", - AVMEDIA_TYPE_VIDEO, - CODEC_ID_RV20, - sizeof(MpegEncContext), - MPV_encode_init, - MPV_encode_picture, - MPV_encode_end, + .name = "rv20", + .type = AVMEDIA_TYPE_VIDEO, + .id = CODEC_ID_RV20, + .priv_data_size = sizeof(MpegEncContext), + .init = MPV_encode_init, + .encode = MPV_encode_picture, + .close = MPV_encode_end, .pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_NONE}, .long_name= NULL_IF_CONFIG_SMALL("RealVideo 2.0"), }; diff --git a/libavcodec/rv30.c b/libavcodec/rv30.c index b43859b9cb..d2cc533d80 100644 --- a/libavcodec/rv30.c +++ b/libavcodec/rv30.c @@ -142,7 +142,7 @@ static void rv30_loop_filter(RV34DecContext *r, int row) mb_pos = row * s->mb_stride; for(mb_x = 0; mb_x < s->mb_width; mb_x++, mb_pos++){ - int mbtype = s->current_picture_ptr->mb_type[mb_pos]; + int mbtype = s->current_picture_ptr->f.mb_type[mb_pos]; if(IS_INTRA(mbtype) || IS_SEPARATE_DC(mbtype)) r->deblock_coefs[mb_pos] = 0xFFFF; if(IS_INTRA(mbtype)) @@ -154,11 +154,11 @@ static void rv30_loop_filter(RV34DecContext *r, int row) */ mb_pos = row * s->mb_stride; for(mb_x = 0; mb_x < s->mb_width; mb_x++, mb_pos++){ - cur_lim = rv30_loop_filt_lim[s->current_picture_ptr->qscale_table[mb_pos]]; + cur_lim = rv30_loop_filt_lim[s->current_picture_ptr->f.qscale_table[mb_pos]]; if(mb_x) - left_lim = rv30_loop_filt_lim[s->current_picture_ptr->qscale_table[mb_pos - 1]]; + left_lim = rv30_loop_filt_lim[s->current_picture_ptr->f.qscale_table[mb_pos - 1]]; for(j = 0; j < 16; j += 4){ - Y = s->current_picture_ptr->data[0] + mb_x*16 + (row*16 + j) * s->linesize + 4 * !mb_x; + Y = s->current_picture_ptr->f.data[0] + mb_x*16 + (row*16 + j) * s->linesize + 4 * !mb_x; for(i = !mb_x; i < 4; i++, Y += 4){ int ij = i + j; loc_lim = 0; @@ -178,7 +178,7 @@ static void rv30_loop_filter(RV34DecContext *r, int row) if(mb_x) left_cbp = (r->cbp_chroma[mb_pos - 1] >> (k*4)) & 0xF; for(j = 0; j < 8; j += 4){ - C = s->current_picture_ptr->data[k+1] + mb_x*8 + (row*8 + j) * s->uvlinesize + 4 * !mb_x; + C = s->current_picture_ptr->f.data[k + 1] + mb_x*8 + (row*8 + j) * s->uvlinesize + 4 * !mb_x; for(i = !mb_x; i < 2; i++, C += 4){ int ij = i + (j >> 1); loc_lim = 0; @@ -196,11 +196,11 @@ static void rv30_loop_filter(RV34DecContext *r, int row) } mb_pos = row * s->mb_stride; for(mb_x = 0; mb_x < s->mb_width; mb_x++, mb_pos++){ - cur_lim = rv30_loop_filt_lim[s->current_picture_ptr->qscale_table[mb_pos]]; + cur_lim = rv30_loop_filt_lim[s->current_picture_ptr->f.qscale_table[mb_pos]]; if(row) - top_lim = rv30_loop_filt_lim[s->current_picture_ptr->qscale_table[mb_pos - s->mb_stride]]; + top_lim = rv30_loop_filt_lim[s->current_picture_ptr->f.qscale_table[mb_pos - s->mb_stride]]; for(j = 4*!row; j < 16; j += 4){ - Y = s->current_picture_ptr->data[0] + mb_x*16 + (row*16 + j) * s->linesize; + Y = s->current_picture_ptr->f.data[0] + mb_x*16 + (row*16 + j) * s->linesize; for(i = 0; i < 4; i++, Y += 4){ int ij = i + j; loc_lim = 0; @@ -220,7 +220,7 @@ static void rv30_loop_filter(RV34DecContext *r, int row) if(row) top_cbp = (r->cbp_chroma[mb_pos - s->mb_stride] >> (k*4)) & 0xF; for(j = 4*!row; j < 8; j += 4){ - C = s->current_picture_ptr->data[k+1] + mb_x*8 + (row*8 + j) * s->uvlinesize; + C = s->current_picture_ptr->f.data[k+1] + mb_x*8 + (row*8 + j) * s->uvlinesize; for(i = 0; i < 2; i++, C += 4){ int ij = i + (j >> 1); loc_lim = 0; @@ -267,15 +267,14 @@ static av_cold int rv30_decode_init(AVCodecContext *avctx) } AVCodec ff_rv30_decoder = { - "rv30", - AVMEDIA_TYPE_VIDEO, - CODEC_ID_RV30, - sizeof(RV34DecContext), - rv30_decode_init, - NULL, - ff_rv34_decode_end, - ff_rv34_decode_frame, - CODEC_CAP_DR1 | CODEC_CAP_DELAY, + .name = "rv30", + .type = AVMEDIA_TYPE_VIDEO, + .id = CODEC_ID_RV30, + .priv_data_size = sizeof(RV34DecContext), + .init = rv30_decode_init, + .close = ff_rv34_decode_end, + .decode = ff_rv34_decode_frame, + .capabilities = CODEC_CAP_DR1 | CODEC_CAP_DELAY, .flush = ff_mpeg_flush, .long_name = NULL_IF_CONFIG_SMALL("RealVideo 3.0"), .pix_fmts= ff_pixfmt_list_420, diff --git a/libavcodec/rv34.c b/libavcodec/rv34.c index a5db0b0255..97091c96b0 100644 --- a/libavcodec/rv34.c +++ b/libavcodec/rv34.c @@ -62,8 +62,10 @@ static const int rv34_mb_type_to_lavc[12] = { static RV34VLC intra_vlcs[NUM_INTRA_TABLES], inter_vlcs[NUM_INTER_TABLES]; +static int rv34_decode_mv(RV34DecContext *r, int block_type); + /** - * @defgroup vlc RV30/40 VLC generating functions + * @name RV30/40 VLC generating functions * @{ */ @@ -171,7 +173,7 @@ static av_cold void rv34_init_tables(void) /** - * @defgroup transform RV30/40 inverse transform functions + * @name RV30/40 inverse transform functions * @{ */ @@ -246,7 +248,7 @@ static void rv34_inv_transform_noround(DCTELEM *block){ /** - * @defgroup block RV30/40 4x4 block decoding functions + * @name RV30/40 4x4 block decoding functions * @{ */ @@ -393,7 +395,7 @@ static inline void rv34_dequant4x4_16x16(DCTELEM *block, int Qdc, int Q) /** - * @defgroup rv3040_bitstream RV30/40 bitstream parsing + * @name RV30/40 bitstream parsing * @{ */ @@ -422,20 +424,75 @@ static inline RV34VLC* choose_vlc_set(int quant, int mod, int type) } /** - * Decode quantizer difference and return modified quantizer. + * Decode macroblock header and return CBP in case of success, -1 otherwise. */ -static inline int rv34_decode_dquant(GetBitContext *gb, int quant) +static int rv34_decode_mb_header(RV34DecContext *r, int8_t *intra_types) { - if(get_bits1(gb)) - return rv34_dquant_tab[get_bits1(gb)][quant]; - else - return get_bits(gb, 5); + MpegEncContext *s = &r->s; + GetBitContext *gb = &s->gb; + int mb_pos = s->mb_x + s->mb_y * s->mb_stride; + int i, t; + + if(!r->si.type){ + r->is16 = get_bits1(gb); + if(!r->is16 && !r->rv30){ + if(!get_bits1(gb)) + av_log(s->avctx, AV_LOG_ERROR, "Need DQUANT\n"); + } + s->current_picture_ptr->f.mb_type[mb_pos] = r->is16 ? MB_TYPE_INTRA16x16 : MB_TYPE_INTRA; + r->block_type = r->is16 ? RV34_MB_TYPE_INTRA16x16 : RV34_MB_TYPE_INTRA; + }else{ + r->block_type = r->decode_mb_info(r); + if(r->block_type == -1) + return -1; + s->current_picture_ptr->f.mb_type[mb_pos] = rv34_mb_type_to_lavc[r->block_type]; + r->mb_type[mb_pos] = r->block_type; + if(r->block_type == RV34_MB_SKIP){ + if(s->pict_type == AV_PICTURE_TYPE_P) + r->mb_type[mb_pos] = RV34_MB_P_16x16; + if(s->pict_type == AV_PICTURE_TYPE_B) + r->mb_type[mb_pos] = RV34_MB_B_DIRECT; + } + r->is16 = !!IS_INTRA16x16(s->current_picture_ptr->f.mb_type[mb_pos]); + rv34_decode_mv(r, r->block_type); + if(r->block_type == RV34_MB_SKIP){ + fill_rectangle(intra_types, 4, 4, r->intra_types_stride, 0, sizeof(intra_types[0])); + return 0; + } + r->chroma_vlc = 1; + r->luma_vlc = 0; + } + if(IS_INTRA(s->current_picture_ptr->f.mb_type[mb_pos])){ + if(r->is16){ + t = get_bits(gb, 2); + fill_rectangle(intra_types, 4, 4, r->intra_types_stride, t, sizeof(intra_types[0])); + r->luma_vlc = 2; + }else{ + if(r->decode_intra_types(r, gb, intra_types) < 0) + return -1; + r->luma_vlc = 1; + } + r->chroma_vlc = 0; + r->cur_vlcs = choose_vlc_set(r->si.quant, r->si.vlc_set, 0); + }else{ + for(i = 0; i < 16; i++) + intra_types[(i & 3) + (i>>2) * r->intra_types_stride] = 0; + r->cur_vlcs = choose_vlc_set(r->si.quant, r->si.vlc_set, 1); + if(r->mb_type[mb_pos] == RV34_MB_P_MIX16x16){ + r->is16 = 1; + r->chroma_vlc = 1; + r->luma_vlc = 2; + r->cur_vlcs = choose_vlc_set(r->si.quant, r->si.vlc_set, 0); + } + } + + return rv34_decode_cbp(gb, r->cur_vlcs, r->is16); } /** @} */ //bitstream functions /** - * @defgroup mv motion vector related code (prediction, reconstruction, motion compensation) + * @name motion vector related code (prediction, reconstruction, motion compensation) * @{ */ @@ -470,27 +527,27 @@ static void rv34_pred_mv(RV34DecContext *r, int block_type, int subblock_no, int c_off = -1; if(r->avail_cache[avail_index - 1]){ - A[0] = s->current_picture_ptr->motion_val[0][mv_pos-1][0]; - A[1] = s->current_picture_ptr->motion_val[0][mv_pos-1][1]; + A[0] = s->current_picture_ptr->f.motion_val[0][mv_pos-1][0]; + A[1] = s->current_picture_ptr->f.motion_val[0][mv_pos-1][1]; } if(r->avail_cache[avail_index - 4]){ - B[0] = s->current_picture_ptr->motion_val[0][mv_pos-s->b8_stride][0]; - B[1] = s->current_picture_ptr->motion_val[0][mv_pos-s->b8_stride][1]; + B[0] = s->current_picture_ptr->f.motion_val[0][mv_pos-s->b8_stride][0]; + B[1] = s->current_picture_ptr->f.motion_val[0][mv_pos-s->b8_stride][1]; }else{ B[0] = A[0]; B[1] = A[1]; } if(!r->avail_cache[avail_index - 4 + c_off]){ if(r->avail_cache[avail_index - 4] && (r->avail_cache[avail_index - 1] || r->rv30)){ - C[0] = s->current_picture_ptr->motion_val[0][mv_pos-s->b8_stride-1][0]; - C[1] = s->current_picture_ptr->motion_val[0][mv_pos-s->b8_stride-1][1]; + C[0] = s->current_picture_ptr->f.motion_val[0][mv_pos-s->b8_stride-1][0]; + C[1] = s->current_picture_ptr->f.motion_val[0][mv_pos-s->b8_stride-1][1]; }else{ C[0] = A[0]; C[1] = A[1]; } }else{ - C[0] = s->current_picture_ptr->motion_val[0][mv_pos-s->b8_stride+c_off][0]; - C[1] = s->current_picture_ptr->motion_val[0][mv_pos-s->b8_stride+c_off][1]; + C[0] = s->current_picture_ptr->f.motion_val[0][mv_pos-s->b8_stride+c_off][0]; + C[1] = s->current_picture_ptr->f.motion_val[0][mv_pos-s->b8_stride+c_off][1]; } mx = mid_pred(A[0], B[0], C[0]); my = mid_pred(A[1], B[1], C[1]); @@ -498,8 +555,8 @@ static void rv34_pred_mv(RV34DecContext *r, int block_type, int subblock_no, int my += r->dmv[dmv_no][1]; for(j = 0; j < part_sizes_h[block_type]; j++){ for(i = 0; i < part_sizes_w[block_type]; i++){ - s->current_picture_ptr->motion_val[0][mv_pos + i + j*s->b8_stride][0] = mx; - s->current_picture_ptr->motion_val[0][mv_pos + i + j*s->b8_stride][1] = my; + s->current_picture_ptr->f.motion_val[0][mv_pos + i + j*s->b8_stride][0] = mx; + s->current_picture_ptr->f.motion_val[0][mv_pos + i + j*s->b8_stride][1] = my; } } } @@ -554,28 +611,28 @@ static void rv34_pred_mv_b(RV34DecContext *r, int block_type, int dir) int i, j; Picture *cur_pic = s->current_picture_ptr; const int mask = dir ? MB_TYPE_L1 : MB_TYPE_L0; - int type = cur_pic->mb_type[mb_pos]; + int type = cur_pic->f.mb_type[mb_pos]; memset(A, 0, sizeof(A)); memset(B, 0, sizeof(B)); memset(C, 0, sizeof(C)); if((r->avail_cache[6-1] & type) & mask){ - A[0] = cur_pic->motion_val[dir][mv_pos - 1][0]; - A[1] = cur_pic->motion_val[dir][mv_pos - 1][1]; + A[0] = cur_pic->f.motion_val[dir][mv_pos - 1][0]; + A[1] = cur_pic->f.motion_val[dir][mv_pos - 1][1]; has_A = 1; } if((r->avail_cache[6-4] & type) & mask){ - B[0] = cur_pic->motion_val[dir][mv_pos - s->b8_stride][0]; - B[1] = cur_pic->motion_val[dir][mv_pos - s->b8_stride][1]; + B[0] = cur_pic->f.motion_val[dir][mv_pos - s->b8_stride][0]; + B[1] = cur_pic->f.motion_val[dir][mv_pos - s->b8_stride][1]; has_B = 1; } if(r->avail_cache[6-4] && (r->avail_cache[6-2] & type) & mask){ - C[0] = cur_pic->motion_val[dir][mv_pos - s->b8_stride + 2][0]; - C[1] = cur_pic->motion_val[dir][mv_pos - s->b8_stride + 2][1]; + C[0] = cur_pic->f.motion_val[dir][mv_pos - s->b8_stride + 2][0]; + C[1] = cur_pic->f.motion_val[dir][mv_pos - s->b8_stride + 2][1]; has_C = 1; }else if((s->mb_x+1) == s->mb_width && (r->avail_cache[6-5] & type) & mask){ - C[0] = cur_pic->motion_val[dir][mv_pos - s->b8_stride - 1][0]; - C[1] = cur_pic->motion_val[dir][mv_pos - s->b8_stride - 1][1]; + C[0] = cur_pic->f.motion_val[dir][mv_pos - s->b8_stride - 1][0]; + C[1] = cur_pic->f.motion_val[dir][mv_pos - s->b8_stride - 1][1]; has_C = 1; } @@ -586,12 +643,12 @@ static void rv34_pred_mv_b(RV34DecContext *r, int block_type, int dir) for(j = 0; j < 2; j++){ for(i = 0; i < 2; i++){ - cur_pic->motion_val[dir][mv_pos + i + j*s->b8_stride][0] = mx; - cur_pic->motion_val[dir][mv_pos + i + j*s->b8_stride][1] = my; + cur_pic->f.motion_val[dir][mv_pos + i + j*s->b8_stride][0] = mx; + cur_pic->f.motion_val[dir][mv_pos + i + j*s->b8_stride][1] = my; } } if(block_type == RV34_MB_B_BACKWARD || block_type == RV34_MB_B_FORWARD){ - ZERO8x2(cur_pic->motion_val[!dir][mv_pos], s->b8_stride); + ZERO8x2(cur_pic->f.motion_val[!dir][mv_pos], s->b8_stride); } } @@ -608,27 +665,27 @@ static void rv34_pred_mv_rv3(RV34DecContext *r, int block_type, int dir) int avail_index = avail_indexes[0]; if(r->avail_cache[avail_index - 1]){ - A[0] = s->current_picture_ptr->motion_val[0][mv_pos-1][0]; - A[1] = s->current_picture_ptr->motion_val[0][mv_pos-1][1]; + A[0] = s->current_picture_ptr->f.motion_val[0][mv_pos - 1][0]; + A[1] = s->current_picture_ptr->f.motion_val[0][mv_pos - 1][1]; } if(r->avail_cache[avail_index - 4]){ - B[0] = s->current_picture_ptr->motion_val[0][mv_pos-s->b8_stride][0]; - B[1] = s->current_picture_ptr->motion_val[0][mv_pos-s->b8_stride][1]; + B[0] = s->current_picture_ptr->f.motion_val[0][mv_pos - s->b8_stride][0]; + B[1] = s->current_picture_ptr->f.motion_val[0][mv_pos - s->b8_stride][1]; }else{ B[0] = A[0]; B[1] = A[1]; } if(!r->avail_cache[avail_index - 4 + 2]){ if(r->avail_cache[avail_index - 4] && (r->avail_cache[avail_index - 1])){ - C[0] = s->current_picture_ptr->motion_val[0][mv_pos-s->b8_stride-1][0]; - C[1] = s->current_picture_ptr->motion_val[0][mv_pos-s->b8_stride-1][1]; + C[0] = s->current_picture_ptr->f.motion_val[0][mv_pos - s->b8_stride - 1][0]; + C[1] = s->current_picture_ptr->f.motion_val[0][mv_pos - s->b8_stride - 1][1]; }else{ C[0] = A[0]; C[1] = A[1]; } }else{ - C[0] = s->current_picture_ptr->motion_val[0][mv_pos-s->b8_stride+2][0]; - C[1] = s->current_picture_ptr->motion_val[0][mv_pos-s->b8_stride+2][1]; + C[0] = s->current_picture_ptr->f.motion_val[0][mv_pos - s->b8_stride + 2][0]; + C[1] = s->current_picture_ptr->f.motion_val[0][mv_pos - s->b8_stride + 2][1]; } mx = mid_pred(A[0], B[0], C[0]); my = mid_pred(A[1], B[1], C[1]); @@ -637,8 +694,8 @@ static void rv34_pred_mv_rv3(RV34DecContext *r, int block_type, int dir) for(j = 0; j < 2; j++){ for(i = 0; i < 2; i++){ for(k = 0; k < 2; k++){ - s->current_picture_ptr->motion_val[k][mv_pos + i + j*s->b8_stride][0] = mx; - s->current_picture_ptr->motion_val[k][mv_pos + i + j*s->b8_stride][1] = my; + s->current_picture_ptr->f.motion_val[k][mv_pos + i + j*s->b8_stride][0] = mx; + s->current_picture_ptr->f.motion_val[k][mv_pos + i + j*s->b8_stride][1] = my; } } } @@ -676,24 +733,24 @@ static inline void rv34_mc(RV34DecContext *r, const int block_type, if(thirdpel){ int chroma_mx, chroma_my; - mx = (s->current_picture_ptr->motion_val[dir][mv_pos][0] + (3 << 24)) / 3 - (1 << 24); - my = (s->current_picture_ptr->motion_val[dir][mv_pos][1] + (3 << 24)) / 3 - (1 << 24); - lx = (s->current_picture_ptr->motion_val[dir][mv_pos][0] + (3 << 24)) % 3; - ly = (s->current_picture_ptr->motion_val[dir][mv_pos][1] + (3 << 24)) % 3; - chroma_mx = (s->current_picture_ptr->motion_val[dir][mv_pos][0] + 1) >> 1; - chroma_my = (s->current_picture_ptr->motion_val[dir][mv_pos][1] + 1) >> 1; + mx = (s->current_picture_ptr->f.motion_val[dir][mv_pos][0] + (3 << 24)) / 3 - (1 << 24); + my = (s->current_picture_ptr->f.motion_val[dir][mv_pos][1] + (3 << 24)) / 3 - (1 << 24); + lx = (s->current_picture_ptr->f.motion_val[dir][mv_pos][0] + (3 << 24)) % 3; + ly = (s->current_picture_ptr->f.motion_val[dir][mv_pos][1] + (3 << 24)) % 3; + chroma_mx = s->current_picture_ptr->f.motion_val[dir][mv_pos][0] / 2; + chroma_my = s->current_picture_ptr->f.motion_val[dir][mv_pos][1] / 2; umx = (chroma_mx + (3 << 24)) / 3 - (1 << 24); umy = (chroma_my + (3 << 24)) / 3 - (1 << 24); uvmx = chroma_coeffs[(chroma_mx + (3 << 24)) % 3]; uvmy = chroma_coeffs[(chroma_my + (3 << 24)) % 3]; }else{ int cx, cy; - mx = s->current_picture_ptr->motion_val[dir][mv_pos][0] >> 2; - my = s->current_picture_ptr->motion_val[dir][mv_pos][1] >> 2; - lx = s->current_picture_ptr->motion_val[dir][mv_pos][0] & 3; - ly = s->current_picture_ptr->motion_val[dir][mv_pos][1] & 3; - cx = s->current_picture_ptr->motion_val[dir][mv_pos][0] / 2; - cy = s->current_picture_ptr->motion_val[dir][mv_pos][1] / 2; + mx = s->current_picture_ptr->f.motion_val[dir][mv_pos][0] >> 2; + my = s->current_picture_ptr->f.motion_val[dir][mv_pos][1] >> 2; + lx = s->current_picture_ptr->f.motion_val[dir][mv_pos][0] & 3; + ly = s->current_picture_ptr->f.motion_val[dir][mv_pos][1] & 3; + cx = s->current_picture_ptr->f.motion_val[dir][mv_pos][0] / 2; + cy = s->current_picture_ptr->f.motion_val[dir][mv_pos][1] / 2; umx = cx >> 2; umy = cy >> 2; uvmx = (cx & 3) << 1; @@ -703,9 +760,9 @@ static inline void rv34_mc(RV34DecContext *r, const int block_type, uvmx = uvmy = 4; } dxy = ly*4 + lx; - srcY = dir ? s->next_picture_ptr->data[0] : s->last_picture_ptr->data[0]; - srcU = dir ? s->next_picture_ptr->data[1] : s->last_picture_ptr->data[1]; - srcV = dir ? s->next_picture_ptr->data[2] : s->last_picture_ptr->data[2]; + srcY = dir ? s->next_picture_ptr->f.data[0] : s->last_picture_ptr->f.data[0]; + srcU = dir ? s->next_picture_ptr->f.data[1] : s->last_picture_ptr->f.data[1]; + srcV = dir ? s->next_picture_ptr->f.data[2] : s->last_picture_ptr->f.data[2]; src_x = s->mb_x * 16 + xoff + mx; src_y = s->mb_y * 16 + yoff + my; uvsrc_x = s->mb_x * 8 + (xoff >> 1) + umx; @@ -813,31 +870,31 @@ static int rv34_decode_mv(RV34DecContext *r, int block_type) switch(block_type){ case RV34_MB_TYPE_INTRA: case RV34_MB_TYPE_INTRA16x16: - ZERO8x2(s->current_picture_ptr->motion_val[0][s->mb_x * 2 + s->mb_y * 2 * s->b8_stride], s->b8_stride); + ZERO8x2(s->current_picture_ptr->f.motion_val[0][s->mb_x * 2 + s->mb_y * 2 * s->b8_stride], s->b8_stride); return 0; case RV34_MB_SKIP: if(s->pict_type == AV_PICTURE_TYPE_P){ - ZERO8x2(s->current_picture_ptr->motion_val[0][s->mb_x * 2 + s->mb_y * 2 * s->b8_stride], s->b8_stride); + ZERO8x2(s->current_picture_ptr->f.motion_val[0][s->mb_x * 2 + s->mb_y * 2 * s->b8_stride], s->b8_stride); rv34_mc_1mv (r, block_type, 0, 0, 0, 2, 2, 0); break; } case RV34_MB_B_DIRECT: //surprisingly, it uses motion scheme from next reference frame - next_bt = s->next_picture_ptr->mb_type[s->mb_x + s->mb_y * s->mb_stride]; + next_bt = s->next_picture_ptr->f.mb_type[s->mb_x + s->mb_y * s->mb_stride]; if(IS_INTRA(next_bt) || IS_SKIP(next_bt)){ - ZERO8x2(s->current_picture_ptr->motion_val[0][s->mb_x * 2 + s->mb_y * 2 * s->b8_stride], s->b8_stride); - ZERO8x2(s->current_picture_ptr->motion_val[1][s->mb_x * 2 + s->mb_y * 2 * s->b8_stride], s->b8_stride); + ZERO8x2(s->current_picture_ptr->f.motion_val[0][s->mb_x * 2 + s->mb_y * 2 * s->b8_stride], s->b8_stride); + ZERO8x2(s->current_picture_ptr->f.motion_val[1][s->mb_x * 2 + s->mb_y * 2 * s->b8_stride], s->b8_stride); }else for(j = 0; j < 2; j++) for(i = 0; i < 2; i++) for(k = 0; k < 2; k++) for(l = 0; l < 2; l++) - s->current_picture_ptr->motion_val[l][mv_pos + i + j*s->b8_stride][k] = calc_add_mv(r, l, s->next_picture_ptr->motion_val[0][mv_pos + i + j*s->b8_stride][k]); + s->current_picture_ptr->f.motion_val[l][mv_pos + i + j*s->b8_stride][k] = calc_add_mv(r, l, s->next_picture_ptr->f.motion_val[0][mv_pos + i + j*s->b8_stride][k]); if(!(IS_16X8(next_bt) || IS_8X16(next_bt) || IS_8X8(next_bt))) //we can use whole macroblock MC rv34_mc_2mv(r, block_type); else rv34_mc_2mv_skip(r); - ZERO8x2(s->current_picture_ptr->motion_val[0][s->mb_x * 2 + s->mb_y * 2 * s->b8_stride], s->b8_stride); + ZERO8x2(s->current_picture_ptr->f.motion_val[0][s->mb_x * 2 + s->mb_y * 2 * s->b8_stride], s->b8_stride); break; case RV34_MB_P_16x16: case RV34_MB_P_MIX16x16: @@ -885,7 +942,7 @@ static int rv34_decode_mv(RV34DecContext *r, int block_type) /** @} */ // mv group /** - * @defgroup recons Macroblock reconstruction functions + * @name Macroblock reconstruction functions * @{ */ /** mapping of RV30/40 intra prediction types to standard H.264 types */ @@ -1027,79 +1084,6 @@ static void rv34_output_macroblock(RV34DecContext *r, int8_t *intra_types, int c } } -/** @} */ // recons group - -/** - * @addtogroup bitstream - * Decode macroblock header and return CBP in case of success, -1 otherwise. - */ -static int rv34_decode_mb_header(RV34DecContext *r, int8_t *intra_types) -{ - MpegEncContext *s = &r->s; - GetBitContext *gb = &s->gb; - int mb_pos = s->mb_x + s->mb_y * s->mb_stride; - int i, t; - - if(!r->si.type){ - r->is16 = get_bits1(gb); - if(!r->is16 && !r->rv30){ - if(!get_bits1(gb)) - av_log(s->avctx, AV_LOG_ERROR, "Need DQUANT\n"); - } - s->current_picture_ptr->mb_type[mb_pos] = r->is16 ? MB_TYPE_INTRA16x16 : MB_TYPE_INTRA; - r->block_type = r->is16 ? RV34_MB_TYPE_INTRA16x16 : RV34_MB_TYPE_INTRA; - }else{ - r->block_type = r->decode_mb_info(r); - if(r->block_type == -1) - return -1; - s->current_picture_ptr->mb_type[mb_pos] = rv34_mb_type_to_lavc[r->block_type]; - r->mb_type[mb_pos] = r->block_type; - if(r->block_type == RV34_MB_SKIP){ - if(s->pict_type == AV_PICTURE_TYPE_P) - r->mb_type[mb_pos] = RV34_MB_P_16x16; - if(s->pict_type == AV_PICTURE_TYPE_B) - r->mb_type[mb_pos] = RV34_MB_B_DIRECT; - } - r->is16 = !!IS_INTRA16x16(s->current_picture_ptr->mb_type[mb_pos]); - rv34_decode_mv(r, r->block_type); - if(r->block_type == RV34_MB_SKIP){ - fill_rectangle(intra_types, 4, 4, r->intra_types_stride, 0, sizeof(intra_types[0])); - return 0; - } - r->chroma_vlc = 1; - r->luma_vlc = 0; - } - if(IS_INTRA(s->current_picture_ptr->mb_type[mb_pos])){ - if(r->is16){ - t = get_bits(gb, 2); - fill_rectangle(intra_types, 4, 4, r->intra_types_stride, t, sizeof(intra_types[0])); - r->luma_vlc = 2; - }else{ - if(r->decode_intra_types(r, gb, intra_types) < 0) - return -1; - r->luma_vlc = 1; - } - r->chroma_vlc = 0; - r->cur_vlcs = choose_vlc_set(r->si.quant, r->si.vlc_set, 0); - }else{ - for(i = 0; i < 16; i++) - intra_types[(i & 3) + (i>>2) * r->intra_types_stride] = 0; - r->cur_vlcs = choose_vlc_set(r->si.quant, r->si.vlc_set, 1); - if(r->mb_type[mb_pos] == RV34_MB_P_MIX16x16){ - r->is16 = 1; - r->chroma_vlc = 1; - r->luma_vlc = 2; - r->cur_vlcs = choose_vlc_set(r->si.quant, r->si.vlc_set, 0); - } - } - - return rv34_decode_cbp(gb, r->cur_vlcs, r->is16); -} - -/** - * @addtogroup recons - * @{ - */ /** * mask for retrieving all bits in coded block pattern * corresponding to one 8x8 block @@ -1109,6 +1093,8 @@ static int rv34_decode_mb_header(RV34DecContext *r, int8_t *intra_types) #define U_CBP_MASK 0x0F0000 #define V_CBP_MASK 0xF00000 +/** @} */ // recons group + static void rv34_apply_differences(RV34DecContext *r, int cbp) { @@ -1142,7 +1128,7 @@ static int rv34_set_deblock_coef(RV34DecContext *r) MpegEncContext *s = &r->s; int hmvmask = 0, vmvmask = 0, i, j; int midx = s->mb_x * 2 + s->mb_y * 2 * s->b8_stride; - int16_t (*motion_val)[2] = &s->current_picture_ptr->motion_val[0][midx]; + int16_t (*motion_val)[2] = &s->current_picture_ptr->f.motion_val[0][midx]; for(j = 0; j < 16; j += 8){ for(i = 0; i < 2; i++){ if(is_mv_diff_gt_3(motion_val + i, 1)) @@ -1184,14 +1170,14 @@ static int rv34_decode_macroblock(RV34DecContext *r, int8_t *intra_types) dist = (s->mb_x - s->resync_mb_x) + (s->mb_y - s->resync_mb_y) * s->mb_width; if(s->mb_x && dist) r->avail_cache[5] = - r->avail_cache[9] = s->current_picture_ptr->mb_type[mb_pos - 1]; + r->avail_cache[9] = s->current_picture_ptr->f.mb_type[mb_pos - 1]; if(dist >= s->mb_width) r->avail_cache[2] = - r->avail_cache[3] = s->current_picture_ptr->mb_type[mb_pos - s->mb_stride]; + r->avail_cache[3] = s->current_picture_ptr->f.mb_type[mb_pos - s->mb_stride]; if(((s->mb_x+1) < s->mb_width) && dist >= s->mb_width - 1) - r->avail_cache[4] = s->current_picture_ptr->mb_type[mb_pos - s->mb_stride + 1]; + r->avail_cache[4] = s->current_picture_ptr->f.mb_type[mb_pos - s->mb_stride + 1]; if(s->mb_x && dist > s->mb_width) - r->avail_cache[1] = s->current_picture_ptr->mb_type[mb_pos - s->mb_stride - 1]; + r->avail_cache[1] = s->current_picture_ptr->f.mb_type[mb_pos - s->mb_stride - 1]; s->qscale = r->si.quant; cbp = cbp2 = rv34_decode_mb_header(r, intra_types); @@ -1201,7 +1187,7 @@ static int rv34_decode_macroblock(RV34DecContext *r, int8_t *intra_types) r->deblock_coefs[mb_pos] = 0xFFFF; else r->deblock_coefs[mb_pos] = rv34_set_deblock_coef(r) | r->cbp_luma[mb_pos]; - s->current_picture_ptr->qscale_table[mb_pos] = s->qscale; + s->current_picture_ptr->f.qscale_table[mb_pos] = s->qscale; if(cbp == -1) return -1; @@ -1235,7 +1221,7 @@ static int rv34_decode_macroblock(RV34DecContext *r, int8_t *intra_types) rv34_dequant4x4(s->block[blknum] + blkoff, rv34_qscale_tab[rv34_chroma_quant[1][s->qscale]],rv34_qscale_tab[rv34_chroma_quant[0][s->qscale]]); rv34_inv_transform(s->block[blknum] + blkoff); } - if(IS_INTRA(s->current_picture_ptr->mb_type[mb_pos])) + if (IS_INTRA(s->current_picture_ptr->f.mb_type[mb_pos])) rv34_output_macroblock(r, intra_types, cbp2, r->is16); else rv34_apply_differences(r, cbp2); @@ -1258,15 +1244,6 @@ static int check_slice_end(RV34DecContext *r, MpegEncContext *s) return 0; } -static inline int slice_compare(SliceInfo *si1, SliceInfo *si2) -{ - return si1->type != si2->type || - si1->start >= si2->start || - si1->width != si2->width || - si1->height != si2->height|| - si1->pts != si2->pts; -} - static int rv34_decode_slice(RV34DecContext *r, int end, const uint8_t* buf, int buf_size) { MpegEncContext *s = &r->s; @@ -1452,7 +1429,7 @@ int ff_rv34_decode_frame(AVCodecContext *avctx, av_log(avctx, AV_LOG_ERROR, "First slice header is incorrect\n"); return -1; } - if((!s->last_picture_ptr || !s->last_picture_ptr->data[0]) && si.type == AV_PICTURE_TYPE_B) + if ((!s->last_picture_ptr || !s->last_picture_ptr->f.data[0]) && si.type == AV_PICTURE_TYPE_B) return -1; #if FF_API_HURRY_UP /* skip b frames if we are in a hurry */ diff --git a/libavcodec/rv40.c b/libavcodec/rv40.c index 54d786a3a6..67676c26cc 100644 --- a/libavcodec/rv40.c +++ b/libavcodec/rv40.c @@ -475,7 +475,7 @@ static void rv40_loop_filter(RV34DecContext *r, int row) mb_pos = row * s->mb_stride; for(mb_x = 0; mb_x < s->mb_width; mb_x++, mb_pos++){ - int mbtype = s->current_picture_ptr->mb_type[mb_pos]; + int mbtype = s->current_picture_ptr->f.mb_type[mb_pos]; if(IS_INTRA(mbtype) || IS_SEPARATE_DC(mbtype)) r->cbp_luma [mb_pos] = r->deblock_coefs[mb_pos] = 0xFFFF; if(IS_INTRA(mbtype)) @@ -489,7 +489,7 @@ static void rv40_loop_filter(RV34DecContext *r, int row) int avail[4]; int y_to_deblock, c_to_deblock[2]; - q = s->current_picture_ptr->qscale_table[mb_pos]; + q = s->current_picture_ptr->f.qscale_table[mb_pos]; alpha = rv40_alpha_tab[q]; beta = rv40_beta_tab [q]; betaY = betaC = beta * 3; @@ -504,7 +504,7 @@ static void rv40_loop_filter(RV34DecContext *r, int row) if(avail[i]){ int pos = mb_pos + neighbour_offs_x[i] + neighbour_offs_y[i]*s->mb_stride; mvmasks[i] = r->deblock_coefs[pos]; - mbtype [i] = s->current_picture_ptr->mb_type[pos]; + mbtype [i] = s->current_picture_ptr->f.mb_type[pos]; cbp [i] = r->cbp_luma[pos]; uvcbp[i][0] = r->cbp_chroma[pos] & 0xF; uvcbp[i][1] = r->cbp_chroma[pos] >> 4; @@ -563,7 +563,7 @@ static void rv40_loop_filter(RV34DecContext *r, int row) } for(j = 0; j < 16; j += 4){ - Y = s->current_picture_ptr->data[0] + mb_x*16 + (row*16 + j) * s->linesize; + Y = s->current_picture_ptr->f.data[0] + mb_x*16 + (row*16 + j) * s->linesize; for(i = 0; i < 4; i++, Y += 4){ int ij = i + j; int clip_cur = y_to_deblock & (MASK_CUR << ij) ? clip[POS_CUR] : 0; @@ -607,7 +607,7 @@ static void rv40_loop_filter(RV34DecContext *r, int row) } for(k = 0; k < 2; k++){ for(j = 0; j < 2; j++){ - C = s->current_picture_ptr->data[k+1] + mb_x*8 + (row*8 + j*4) * s->uvlinesize; + C = s->current_picture_ptr->f.data[k + 1] + mb_x*8 + (row*8 + j*4) * s->uvlinesize; for(i = 0; i < 2; i++, C += 4){ int ij = i + j*2; int clip_cur = c_to_deblock[k] & (MASK_CUR << ij) ? clip[POS_CUR] : 0; @@ -669,15 +669,14 @@ static av_cold int rv40_decode_init(AVCodecContext *avctx) } AVCodec ff_rv40_decoder = { - "rv40", - AVMEDIA_TYPE_VIDEO, - CODEC_ID_RV40, - sizeof(RV34DecContext), - rv40_decode_init, - NULL, - ff_rv34_decode_end, - ff_rv34_decode_frame, - CODEC_CAP_DR1 | CODEC_CAP_DELAY, + .name = "rv40", + .type = AVMEDIA_TYPE_VIDEO, + .id = CODEC_ID_RV40, + .priv_data_size = sizeof(RV34DecContext), + .init = rv40_decode_init, + .close = ff_rv34_decode_end, + .decode = ff_rv34_decode_frame, + .capabilities = CODEC_CAP_DR1 | CODEC_CAP_DELAY, .flush = ff_mpeg_flush, .long_name = NULL_IF_CONFIG_SMALL("RealVideo 4.0"), .pix_fmts= ff_pixfmt_list_420, diff --git a/libavcodec/rv40data.h b/libavcodec/rv40data.h index 7912919ba1..436afa84e2 100644 --- a/libavcodec/rv40data.h +++ b/libavcodec/rv40data.h @@ -65,7 +65,7 @@ static const uint8_t rv40_luma_dc_quant[2][32] = { }; /** - * @defgroup loopfilter coefficients used by the RV40 loop filter + * @name Coefficients used by the RV40 loop filter * @{ */ /** diff --git a/libavcodec/s302m.c b/libavcodec/s302m.c index 98a5e73a8c..4db18eb2d0 100644 --- a/libavcodec/s302m.c +++ b/libavcodec/s302m.c @@ -58,9 +58,9 @@ static int s302m_parse_frame_header(AVCodecContext *avctx, const uint8_t *buf, /* Set output properties */ avctx->bits_per_coded_sample = bits; if (bits > 16) - avctx->sample_fmt = SAMPLE_FMT_S32; + avctx->sample_fmt = AV_SAMPLE_FMT_S32; else - avctx->sample_fmt = SAMPLE_FMT_S16; + avctx->sample_fmt = AV_SAMPLE_FMT_S16; avctx->channels = channels; switch(channels) { diff --git a/libavcodec/sbr.h b/libavcodec/sbr.h index 82a996fdc6..69c847ac15 100644 --- a/libavcodec/sbr.h +++ b/libavcodec/sbr.h @@ -42,7 +42,7 @@ typedef struct { uint8_t bs_xover_band; /** - * @defgroup bs_header_extra_1 Variables associated with bs_header_extra_1 + * @name Variables associated with bs_header_extra_1 * @{ */ uint8_t bs_freq_scale; @@ -58,7 +58,7 @@ typedef struct { */ typedef struct { /** - * @defgroup aac_bitstream Main bitstream data variables + * @name Main bitstream data variables * @{ */ unsigned bs_frame_class; @@ -74,7 +74,7 @@ typedef struct { /** @} */ /** - * @defgroup state State variables + * @name State variables * @{ */ DECLARE_ALIGNED(16, float, synthesis_filterbank_samples)[SBR_SYNTHESIS_BUF_SIZE]; @@ -116,7 +116,7 @@ typedef struct { SpectrumParameters spectrum_params; int bs_amp_res_header; /** - * @defgroup bs_header_extra_2 variables associated with bs_header_extra_2 + * @name Variables associated with bs_header_extra_2 * @{ */ unsigned bs_limiter_bands; diff --git a/libavcodec/sgidec.c b/libavcodec/sgidec.c index 360a25ced2..b0a0b20a5e 100644 --- a/libavcodec/sgidec.c +++ b/libavcodec/sgidec.c @@ -260,14 +260,13 @@ static av_cold int sgi_end(AVCodecContext *avctx) } AVCodec ff_sgi_decoder = { - "sgi", - AVMEDIA_TYPE_VIDEO, - CODEC_ID_SGI, - sizeof(SgiState), - sgi_init, - NULL, - sgi_end, - decode_frame, + .name = "sgi", + .type = AVMEDIA_TYPE_VIDEO, + .id = CODEC_ID_SGI, + .priv_data_size = sizeof(SgiState), + .init = sgi_init, + .close = sgi_end, + .decode = decode_frame, .long_name = NULL_IF_CONFIG_SMALL("SGI image"), }; diff --git a/libavcodec/sgienc.c b/libavcodec/sgienc.c index 1e2af8e7ae..1fc6dcb244 100644 --- a/libavcodec/sgienc.c +++ b/libavcodec/sgienc.c @@ -160,13 +160,12 @@ static int encode_frame(AVCodecContext *avctx, unsigned char *buf, } AVCodec ff_sgi_encoder = { - "sgi", - AVMEDIA_TYPE_VIDEO, - CODEC_ID_SGI, - sizeof(SgiContext), - encode_init, - encode_frame, - NULL, + .name = "sgi", + .type = AVMEDIA_TYPE_VIDEO, + .id = CODEC_ID_SGI, + .priv_data_size = sizeof(SgiContext), + .init = encode_init, + .encode = encode_frame, .pix_fmts= (const enum PixelFormat[]){PIX_FMT_RGB24, PIX_FMT_RGBA, PIX_FMT_GRAY8, PIX_FMT_NONE}, .long_name= NULL_IF_CONFIG_SMALL("SGI image"), }; diff --git a/libavcodec/sh4/dsputil_align.c b/libavcodec/sh4/dsputil_align.c index 8be9318cdb..e91893683f 100644 --- a/libavcodec/sh4/dsputil_align.c +++ b/libavcodec/sh4/dsputil_align.c @@ -333,7 +333,7 @@ DEFFUNC(avg,no_rnd,xy,16,OP_XY,PACK) void dsputil_init_align(DSPContext* c, AVCodecContext *avctx) { - const int high_bit_depth = avctx->codec_id == CODEC_ID_H264 && avctx->bits_per_raw_sample > 8; + const int high_bit_depth = avctx->bits_per_raw_sample > 8; if (!high_bit_depth) { c->put_pixels_tab[0][0] = put_rnd_pixels16_o; diff --git a/libavcodec/sh4/dsputil_sh4.c b/libavcodec/sh4/dsputil_sh4.c index d254e1db6b..905e8b15e0 100644 --- a/libavcodec/sh4/dsputil_sh4.c +++ b/libavcodec/sh4/dsputil_sh4.c @@ -92,12 +92,13 @@ static void idct_add(uint8_t *dest, int line_size, DCTELEM *block) void dsputil_init_sh4(DSPContext* c, AVCodecContext *avctx) { const int idct_algo= avctx->idct_algo; - const int high_bit_depth = avctx->codec_id == CODEC_ID_H264 && avctx->bits_per_raw_sample > 8; + const int high_bit_depth = avctx->bits_per_raw_sample > 8; dsputil_init_align(c,avctx); if (!high_bit_depth) c->clear_blocks = clear_blocks_sh4; - if(idct_algo==FF_IDCT_AUTO || idct_algo==FF_IDCT_SH4){ + if (avctx->bits_per_raw_sample <= 8 && + (idct_algo==FF_IDCT_AUTO || idct_algo==FF_IDCT_SH4)) { c->idct_put = idct_put; c->idct_add = idct_add; c->idct = idct_sh4; diff --git a/libavcodec/shorten.c b/libavcodec/shorten.c index f593d0a164..b39fcbd0ef 100644 --- a/libavcodec/shorten.c +++ b/libavcodec/shorten.c @@ -471,7 +471,6 @@ static int shorten_decode_frame(AVCodecContext *avctx, s->cur_chan = 0; goto frame_done; } - break; } break; case FN_VERBATIM: @@ -489,11 +488,9 @@ static int shorten_decode_frame(AVCodecContext *avctx, case FN_QUIT: *data_size = 0; return buf_size; - break; default: av_log(avctx, AV_LOG_ERROR, "unknown shorten function %d\n", cmd); return -1; - break; } } frame_done: @@ -539,14 +536,13 @@ static void shorten_flush(AVCodecContext *avctx){ } AVCodec ff_shorten_decoder = { - "shorten", - AVMEDIA_TYPE_AUDIO, - CODEC_ID_SHORTEN, - sizeof(ShortenContext), - shorten_decode_init, - NULL, - shorten_decode_close, - shorten_decode_frame, + .name = "shorten", + .type = AVMEDIA_TYPE_AUDIO, + .id = CODEC_ID_SHORTEN, + .priv_data_size = sizeof(ShortenContext), + .init = shorten_decode_init, + .close = shorten_decode_close, + .decode = shorten_decode_frame, .flush= shorten_flush, .long_name= NULL_IF_CONFIG_SMALL("Shorten"), }; diff --git a/libavcodec/simple_idct.c b/libavcodec/simple_idct.c index 475be6d2d4..ffe5a0b070 100644 --- a/libavcodec/simple_idct.c +++ b/libavcodec/simple_idct.c @@ -25,378 +25,19 @@ * simpleidct in C. */ -/* - based upon some outcommented c code from mpeg2dec (idct_mmx.c - written by Aaron Holtzman <aholtzma@ess.engr.uvic.ca>) - */ +#include "libavutil/intreadwrite.h" #include "avcodec.h" #include "dsputil.h" #include "mathops.h" #include "simple_idct.h" -#if 0 -#define W1 2841 /* 2048*sqrt (2)*cos (1*pi/16) */ -#define W2 2676 /* 2048*sqrt (2)*cos (2*pi/16) */ -#define W3 2408 /* 2048*sqrt (2)*cos (3*pi/16) */ -#define W4 2048 /* 2048*sqrt (2)*cos (4*pi/16) */ -#define W5 1609 /* 2048*sqrt (2)*cos (5*pi/16) */ -#define W6 1108 /* 2048*sqrt (2)*cos (6*pi/16) */ -#define W7 565 /* 2048*sqrt (2)*cos (7*pi/16) */ -#define ROW_SHIFT 8 -#define COL_SHIFT 17 -#else -#define W1 22725 //cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5 -#define W2 21407 //cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5 -#define W3 19266 //cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5 -#define W4 16383 //cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5 -#define W5 12873 //cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5 -#define W6 8867 //cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5 -#define W7 4520 //cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5 -#define ROW_SHIFT 11 -#define COL_SHIFT 20 // 6 -#endif - -static inline void idctRowCondDC (DCTELEM * row) -{ - int a0, a1, a2, a3, b0, b1, b2, b3; -#if HAVE_FAST_64BIT - uint64_t temp; -#else - uint32_t temp; -#endif - -#if HAVE_FAST_64BIT -#if HAVE_BIGENDIAN -#define ROW0_MASK 0xffff000000000000LL -#else -#define ROW0_MASK 0xffffLL -#endif - if(sizeof(DCTELEM)==2){ - if ( ((((uint64_t *)row)[0] & ~ROW0_MASK) | - ((uint64_t *)row)[1]) == 0) { - temp = (row[0] << 3) & 0xffff; - temp += temp << 16; - temp += temp << 32; - ((uint64_t *)row)[0] = temp; - ((uint64_t *)row)[1] = temp; - return; - } - }else{ - if (!(row[1]|row[2]|row[3]|row[4]|row[5]|row[6]|row[7])) { - row[0]=row[1]=row[2]=row[3]=row[4]=row[5]=row[6]=row[7]= row[0] << 3; - return; - } - } -#else - if(sizeof(DCTELEM)==2){ - if (!(((uint32_t*)row)[1] | - ((uint32_t*)row)[2] | - ((uint32_t*)row)[3] | - row[1])) { - temp = (row[0] << 3) & 0xffff; - temp += temp << 16; - ((uint32_t*)row)[0]=((uint32_t*)row)[1] = - ((uint32_t*)row)[2]=((uint32_t*)row)[3] = temp; - return; - } - }else{ - if (!(row[1]|row[2]|row[3]|row[4]|row[5]|row[6]|row[7])) { - row[0]=row[1]=row[2]=row[3]=row[4]=row[5]=row[6]=row[7]= row[0] << 3; - return; - } - } -#endif - - a0 = (W4 * row[0]) + (1 << (ROW_SHIFT - 1)); - a1 = a0; - a2 = a0; - a3 = a0; - - /* no need to optimize : gcc does it */ - a0 += W2 * row[2]; - a1 += W6 * row[2]; - a2 -= W6 * row[2]; - a3 -= W2 * row[2]; - - b0 = MUL16(W1, row[1]); - MAC16(b0, W3, row[3]); - b1 = MUL16(W3, row[1]); - MAC16(b1, -W7, row[3]); - b2 = MUL16(W5, row[1]); - MAC16(b2, -W1, row[3]); - b3 = MUL16(W7, row[1]); - MAC16(b3, -W5, row[3]); - -#if HAVE_FAST_64BIT - temp = ((uint64_t*)row)[1]; -#else - temp = ((uint32_t*)row)[2] | ((uint32_t*)row)[3]; -#endif - if (temp != 0) { - a0 += W4*row[4] + W6*row[6]; - a1 += - W4*row[4] - W2*row[6]; - a2 += - W4*row[4] + W2*row[6]; - a3 += W4*row[4] - W6*row[6]; - - MAC16(b0, W5, row[5]); - MAC16(b0, W7, row[7]); - - MAC16(b1, -W1, row[5]); - MAC16(b1, -W5, row[7]); - - MAC16(b2, W7, row[5]); - MAC16(b2, W3, row[7]); - - MAC16(b3, W3, row[5]); - MAC16(b3, -W1, row[7]); - } - - row[0] = (a0 + b0) >> ROW_SHIFT; - row[7] = (a0 - b0) >> ROW_SHIFT; - row[1] = (a1 + b1) >> ROW_SHIFT; - row[6] = (a1 - b1) >> ROW_SHIFT; - row[2] = (a2 + b2) >> ROW_SHIFT; - row[5] = (a2 - b2) >> ROW_SHIFT; - row[3] = (a3 + b3) >> ROW_SHIFT; - row[4] = (a3 - b3) >> ROW_SHIFT; -} - -static inline void idctSparseColPut (uint8_t *dest, int line_size, - DCTELEM * col) -{ - int a0, a1, a2, a3, b0, b1, b2, b3; - uint8_t *cm = ff_cropTbl + MAX_NEG_CROP; - - /* XXX: I did that only to give same values as previous code */ - a0 = W4 * (col[8*0] + ((1<<(COL_SHIFT-1))/W4)); - a1 = a0; - a2 = a0; - a3 = a0; - - a0 += + W2*col[8*2]; - a1 += + W6*col[8*2]; - a2 += - W6*col[8*2]; - a3 += - W2*col[8*2]; - - b0 = MUL16(W1, col[8*1]); - b1 = MUL16(W3, col[8*1]); - b2 = MUL16(W5, col[8*1]); - b3 = MUL16(W7, col[8*1]); - - MAC16(b0, + W3, col[8*3]); - MAC16(b1, - W7, col[8*3]); - MAC16(b2, - W1, col[8*3]); - MAC16(b3, - W5, col[8*3]); - - if(col[8*4]){ - a0 += + W4*col[8*4]; - a1 += - W4*col[8*4]; - a2 += - W4*col[8*4]; - a3 += + W4*col[8*4]; - } - - if (col[8*5]) { - MAC16(b0, + W5, col[8*5]); - MAC16(b1, - W1, col[8*5]); - MAC16(b2, + W7, col[8*5]); - MAC16(b3, + W3, col[8*5]); - } - - if(col[8*6]){ - a0 += + W6*col[8*6]; - a1 += - W2*col[8*6]; - a2 += + W2*col[8*6]; - a3 += - W6*col[8*6]; - } - - if (col[8*7]) { - MAC16(b0, + W7, col[8*7]); - MAC16(b1, - W5, col[8*7]); - MAC16(b2, + W3, col[8*7]); - MAC16(b3, - W1, col[8*7]); - } - - dest[0] = cm[(a0 + b0) >> COL_SHIFT]; - dest += line_size; - dest[0] = cm[(a1 + b1) >> COL_SHIFT]; - dest += line_size; - dest[0] = cm[(a2 + b2) >> COL_SHIFT]; - dest += line_size; - dest[0] = cm[(a3 + b3) >> COL_SHIFT]; - dest += line_size; - dest[0] = cm[(a3 - b3) >> COL_SHIFT]; - dest += line_size; - dest[0] = cm[(a2 - b2) >> COL_SHIFT]; - dest += line_size; - dest[0] = cm[(a1 - b1) >> COL_SHIFT]; - dest += line_size; - dest[0] = cm[(a0 - b0) >> COL_SHIFT]; -} - -static inline void idctSparseColAdd (uint8_t *dest, int line_size, - DCTELEM * col) -{ - int a0, a1, a2, a3, b0, b1, b2, b3; - uint8_t *cm = ff_cropTbl + MAX_NEG_CROP; - - /* XXX: I did that only to give same values as previous code */ - a0 = W4 * (col[8*0] + ((1<<(COL_SHIFT-1))/W4)); - a1 = a0; - a2 = a0; - a3 = a0; - - a0 += + W2*col[8*2]; - a1 += + W6*col[8*2]; - a2 += - W6*col[8*2]; - a3 += - W2*col[8*2]; - - b0 = MUL16(W1, col[8*1]); - b1 = MUL16(W3, col[8*1]); - b2 = MUL16(W5, col[8*1]); - b3 = MUL16(W7, col[8*1]); - - MAC16(b0, + W3, col[8*3]); - MAC16(b1, - W7, col[8*3]); - MAC16(b2, - W1, col[8*3]); - MAC16(b3, - W5, col[8*3]); +#define BIT_DEPTH 8 +#include "simple_idct_template.c" +#undef BIT_DEPTH - if(col[8*4]){ - a0 += + W4*col[8*4]; - a1 += - W4*col[8*4]; - a2 += - W4*col[8*4]; - a3 += + W4*col[8*4]; - } - - if (col[8*5]) { - MAC16(b0, + W5, col[8*5]); - MAC16(b1, - W1, col[8*5]); - MAC16(b2, + W7, col[8*5]); - MAC16(b3, + W3, col[8*5]); - } - - if(col[8*6]){ - a0 += + W6*col[8*6]; - a1 += - W2*col[8*6]; - a2 += + W2*col[8*6]; - a3 += - W6*col[8*6]; - } - - if (col[8*7]) { - MAC16(b0, + W7, col[8*7]); - MAC16(b1, - W5, col[8*7]); - MAC16(b2, + W3, col[8*7]); - MAC16(b3, - W1, col[8*7]); - } - - dest[0] = cm[dest[0] + ((a0 + b0) >> COL_SHIFT)]; - dest += line_size; - dest[0] = cm[dest[0] + ((a1 + b1) >> COL_SHIFT)]; - dest += line_size; - dest[0] = cm[dest[0] + ((a2 + b2) >> COL_SHIFT)]; - dest += line_size; - dest[0] = cm[dest[0] + ((a3 + b3) >> COL_SHIFT)]; - dest += line_size; - dest[0] = cm[dest[0] + ((a3 - b3) >> COL_SHIFT)]; - dest += line_size; - dest[0] = cm[dest[0] + ((a2 - b2) >> COL_SHIFT)]; - dest += line_size; - dest[0] = cm[dest[0] + ((a1 - b1) >> COL_SHIFT)]; - dest += line_size; - dest[0] = cm[dest[0] + ((a0 - b0) >> COL_SHIFT)]; -} - -static inline void idctSparseCol (DCTELEM * col) -{ - int a0, a1, a2, a3, b0, b1, b2, b3; - - /* XXX: I did that only to give same values as previous code */ - a0 = W4 * (col[8*0] + ((1<<(COL_SHIFT-1))/W4)); - a1 = a0; - a2 = a0; - a3 = a0; - - a0 += + W2*col[8*2]; - a1 += + W6*col[8*2]; - a2 += - W6*col[8*2]; - a3 += - W2*col[8*2]; - - b0 = MUL16(W1, col[8*1]); - b1 = MUL16(W3, col[8*1]); - b2 = MUL16(W5, col[8*1]); - b3 = MUL16(W7, col[8*1]); - - MAC16(b0, + W3, col[8*3]); - MAC16(b1, - W7, col[8*3]); - MAC16(b2, - W1, col[8*3]); - MAC16(b3, - W5, col[8*3]); - - if(col[8*4]){ - a0 += + W4*col[8*4]; - a1 += - W4*col[8*4]; - a2 += - W4*col[8*4]; - a3 += + W4*col[8*4]; - } - - if (col[8*5]) { - MAC16(b0, + W5, col[8*5]); - MAC16(b1, - W1, col[8*5]); - MAC16(b2, + W7, col[8*5]); - MAC16(b3, + W3, col[8*5]); - } - - if(col[8*6]){ - a0 += + W6*col[8*6]; - a1 += - W2*col[8*6]; - a2 += + W2*col[8*6]; - a3 += - W6*col[8*6]; - } - - if (col[8*7]) { - MAC16(b0, + W7, col[8*7]); - MAC16(b1, - W5, col[8*7]); - MAC16(b2, + W3, col[8*7]); - MAC16(b3, - W1, col[8*7]); - } - - col[0 ] = ((a0 + b0) >> COL_SHIFT); - col[8 ] = ((a1 + b1) >> COL_SHIFT); - col[16] = ((a2 + b2) >> COL_SHIFT); - col[24] = ((a3 + b3) >> COL_SHIFT); - col[32] = ((a3 - b3) >> COL_SHIFT); - col[40] = ((a2 - b2) >> COL_SHIFT); - col[48] = ((a1 - b1) >> COL_SHIFT); - col[56] = ((a0 - b0) >> COL_SHIFT); -} - -void ff_simple_idct_put(uint8_t *dest, int line_size, DCTELEM *block) -{ - int i; - for(i=0; i<8; i++) - idctRowCondDC(block + i*8); - - for(i=0; i<8; i++) - idctSparseColPut(dest + i, line_size, block + i); -} - -void ff_simple_idct_add(uint8_t *dest, int line_size, DCTELEM *block) -{ - int i; - for(i=0; i<8; i++) - idctRowCondDC(block + i*8); - - for(i=0; i<8; i++) - idctSparseColAdd(dest + i, line_size, block + i); -} - -void ff_simple_idct(DCTELEM *block) -{ - int i; - for(i=0; i<8; i++) - idctRowCondDC(block + i*8); - - for(i=0; i<8; i++) - idctSparseCol(block + i); -} +#define BIT_DEPTH 10 +#include "simple_idct_template.c" +#undef BIT_DEPTH /* 2x4x8 idct */ @@ -467,7 +108,7 @@ void ff_simple_idct248_put(uint8_t *dest, int line_size, DCTELEM *block) /* IDCT8 on each line */ for(i=0; i<8; i++) { - idctRowCondDC(block + i*8); + idctRowCondDC_8(block + i*8); } /* IDCT4 and store */ @@ -542,7 +183,7 @@ void ff_simple_idct84_add(uint8_t *dest, int line_size, DCTELEM *block) /* IDCT8 on each line */ for(i=0; i<4; i++) { - idctRowCondDC(block + i*8); + idctRowCondDC_8(block + i*8); } /* IDCT4 and store */ @@ -562,7 +203,7 @@ void ff_simple_idct48_add(uint8_t *dest, int line_size, DCTELEM *block) /* IDCT8 and store */ for(i=0; i<4; i++){ - idctSparseColAdd(dest + i, line_size, block + i); + idctSparseColAdd_8(dest + i, line_size, block + i); } } diff --git a/libavcodec/simple_idct.h b/libavcodec/simple_idct.h index 24f6a6d5db..10ac7da5e6 100644 --- a/libavcodec/simple_idct.h +++ b/libavcodec/simple_idct.h @@ -31,12 +31,17 @@ #include <stdint.h> #include "dsputil.h" -void ff_simple_idct_put(uint8_t *dest, int line_size, DCTELEM *block); -void ff_simple_idct_add(uint8_t *dest, int line_size, DCTELEM *block); +void ff_simple_idct_put_8(uint8_t *dest, int line_size, DCTELEM *block); +void ff_simple_idct_add_8(uint8_t *dest, int line_size, DCTELEM *block); +void ff_simple_idct_8(DCTELEM *block); + +void ff_simple_idct_put_10(uint8_t *dest, int line_size, DCTELEM *block); +void ff_simple_idct_add_10(uint8_t *dest, int line_size, DCTELEM *block); +void ff_simple_idct_10(DCTELEM *block); + void ff_simple_idct_mmx(int16_t *block); void ff_simple_idct_add_mmx(uint8_t *dest, int line_size, int16_t *block); void ff_simple_idct_put_mmx(uint8_t *dest, int line_size, int16_t *block); -void ff_simple_idct(DCTELEM *block); void ff_simple_idct248_put(uint8_t *dest, int line_size, DCTELEM *block); diff --git a/libavcodec/simple_idct_template.c b/libavcodec/simple_idct_template.c new file mode 100644 index 0000000000..6d3f6f764d --- /dev/null +++ b/libavcodec/simple_idct_template.c @@ -0,0 +1,316 @@ +/* + * Simple IDCT + * + * Copyright (c) 2001 Michael Niedermayer <michaelni@gmx.at> + * + * This file is part of Libav. + * + * Libav is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * Libav is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with Libav; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/** + * @file + * simpleidct in C. + */ + +/* + based upon some outcommented c code from mpeg2dec (idct_mmx.c + written by Aaron Holtzman <aholtzma@ess.engr.uvic.ca>) + */ + +#include "bit_depth_template.c" + +#undef W1 +#undef W2 +#undef W3 +#undef W4 +#undef W5 +#undef W6 +#undef W7 +#undef ROW_SHIFT +#undef COL_SHIFT +#undef DC_SHIFT +#undef MUL +#undef MAC + +#if BIT_DEPTH == 8 + +#define W1 22725 //cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5 +#define W2 21407 //cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5 +#define W3 19266 //cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5 +#define W4 16383 //cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5 +#define W5 12873 //cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5 +#define W6 8867 //cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5 +#define W7 4520 //cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5 + +#define ROW_SHIFT 11 +#define COL_SHIFT 20 +#define DC_SHIFT 3 + +#define MUL(a, b) MUL16(a, b) +#define MAC(a, b, c) MAC16(a, b, c) + +#elif BIT_DEPTH == 10 + +#define W1 90901 +#define W2 85627 +#define W3 77062 +#define W4 65535 +#define W5 51491 +#define W6 35468 +#define W7 18081 + +#define ROW_SHIFT 15 +#define COL_SHIFT 20 +#define DC_SHIFT 1 + +#define MUL(a, b) ((a) * (b)) +#define MAC(a, b, c) ((a) += (b) * (c)) + +#else + +#error "Unsupported bitdepth" + +#endif + +static inline void FUNC(idctRowCondDC)(DCTELEM *row) +{ + int a0, a1, a2, a3, b0, b1, b2, b3; + +#if HAVE_FAST_64BIT +#define ROW0_MASK (0xffffLL << 48 * HAVE_BIGENDIAN) + if (((((uint64_t *)row)[0] & ~ROW0_MASK) | ((uint64_t *)row)[1]) == 0) { + uint64_t temp = (row[0] << DC_SHIFT) & 0xffff; + temp += temp << 16; + temp += temp << 32; + ((uint64_t *)row)[0] = temp; + ((uint64_t *)row)[1] = temp; + return; + } +#else + if (!(((uint32_t*)row)[1] | + ((uint32_t*)row)[2] | + ((uint32_t*)row)[3] | + row[1])) { + uint32_t temp = (row[0] << DC_SHIFT) & 0xffff; + temp += temp << 16; + ((uint32_t*)row)[0]=((uint32_t*)row)[1] = + ((uint32_t*)row)[2]=((uint32_t*)row)[3] = temp; + return; + } +#endif + + a0 = (W4 * row[0]) + (1 << (ROW_SHIFT - 1)); + a1 = a0; + a2 = a0; + a3 = a0; + + a0 += W2 * row[2]; + a1 += W6 * row[2]; + a2 -= W6 * row[2]; + a3 -= W2 * row[2]; + + b0 = MUL(W1, row[1]); + MAC(b0, W3, row[3]); + b1 = MUL(W3, row[1]); + MAC(b1, -W7, row[3]); + b2 = MUL(W5, row[1]); + MAC(b2, -W1, row[3]); + b3 = MUL(W7, row[1]); + MAC(b3, -W5, row[3]); + + if (AV_RN64A(row + 4)) { + a0 += W4*row[4] + W6*row[6]; + a1 += - W4*row[4] - W2*row[6]; + a2 += - W4*row[4] + W2*row[6]; + a3 += W4*row[4] - W6*row[6]; + + MAC(b0, W5, row[5]); + MAC(b0, W7, row[7]); + + MAC(b1, -W1, row[5]); + MAC(b1, -W5, row[7]); + + MAC(b2, W7, row[5]); + MAC(b2, W3, row[7]); + + MAC(b3, W3, row[5]); + MAC(b3, -W1, row[7]); + } + + row[0] = (a0 + b0) >> ROW_SHIFT; + row[7] = (a0 - b0) >> ROW_SHIFT; + row[1] = (a1 + b1) >> ROW_SHIFT; + row[6] = (a1 - b1) >> ROW_SHIFT; + row[2] = (a2 + b2) >> ROW_SHIFT; + row[5] = (a2 - b2) >> ROW_SHIFT; + row[3] = (a3 + b3) >> ROW_SHIFT; + row[4] = (a3 - b3) >> ROW_SHIFT; +} + +#define IDCT_COLS do { \ + a0 = W4 * (col[8*0] + ((1<<(COL_SHIFT-1))/W4)); \ + a1 = a0; \ + a2 = a0; \ + a3 = a0; \ + \ + a0 += W2*col[8*2]; \ + a1 += W6*col[8*2]; \ + a2 += -W6*col[8*2]; \ + a3 += -W2*col[8*2]; \ + \ + b0 = MUL(W1, col[8*1]); \ + b1 = MUL(W3, col[8*1]); \ + b2 = MUL(W5, col[8*1]); \ + b3 = MUL(W7, col[8*1]); \ + \ + MAC(b0, W3, col[8*3]); \ + MAC(b1, -W7, col[8*3]); \ + MAC(b2, -W1, col[8*3]); \ + MAC(b3, -W5, col[8*3]); \ + \ + if (col[8*4]) { \ + a0 += W4*col[8*4]; \ + a1 += -W4*col[8*4]; \ + a2 += -W4*col[8*4]; \ + a3 += W4*col[8*4]; \ + } \ + \ + if (col[8*5]) { \ + MAC(b0, W5, col[8*5]); \ + MAC(b1, -W1, col[8*5]); \ + MAC(b2, W7, col[8*5]); \ + MAC(b3, W3, col[8*5]); \ + } \ + \ + if (col[8*6]) { \ + a0 += W6*col[8*6]; \ + a1 += -W2*col[8*6]; \ + a2 += W2*col[8*6]; \ + a3 += -W6*col[8*6]; \ + } \ + \ + if (col[8*7]) { \ + MAC(b0, W7, col[8*7]); \ + MAC(b1, -W5, col[8*7]); \ + MAC(b2, W3, col[8*7]); \ + MAC(b3, -W1, col[8*7]); \ + } \ + } while (0) + +static inline void FUNC(idctSparseColPut)(pixel *dest, int line_size, + DCTELEM *col) +{ + int a0, a1, a2, a3, b0, b1, b2, b3; + INIT_CLIP; + + IDCT_COLS; + + dest[0] = CLIP((a0 + b0) >> COL_SHIFT); + dest += line_size; + dest[0] = CLIP((a1 + b1) >> COL_SHIFT); + dest += line_size; + dest[0] = CLIP((a2 + b2) >> COL_SHIFT); + dest += line_size; + dest[0] = CLIP((a3 + b3) >> COL_SHIFT); + dest += line_size; + dest[0] = CLIP((a3 - b3) >> COL_SHIFT); + dest += line_size; + dest[0] = CLIP((a2 - b2) >> COL_SHIFT); + dest += line_size; + dest[0] = CLIP((a1 - b1) >> COL_SHIFT); + dest += line_size; + dest[0] = CLIP((a0 - b0) >> COL_SHIFT); +} + +static inline void FUNC(idctSparseColAdd)(pixel *dest, int line_size, + DCTELEM *col) +{ + int a0, a1, a2, a3, b0, b1, b2, b3; + INIT_CLIP; + + IDCT_COLS; + + dest[0] = CLIP(dest[0] + ((a0 + b0) >> COL_SHIFT)); + dest += line_size; + dest[0] = CLIP(dest[0] + ((a1 + b1) >> COL_SHIFT)); + dest += line_size; + dest[0] = CLIP(dest[0] + ((a2 + b2) >> COL_SHIFT)); + dest += line_size; + dest[0] = CLIP(dest[0] + ((a3 + b3) >> COL_SHIFT)); + dest += line_size; + dest[0] = CLIP(dest[0] + ((a3 - b3) >> COL_SHIFT)); + dest += line_size; + dest[0] = CLIP(dest[0] + ((a2 - b2) >> COL_SHIFT)); + dest += line_size; + dest[0] = CLIP(dest[0] + ((a1 - b1) >> COL_SHIFT)); + dest += line_size; + dest[0] = CLIP(dest[0] + ((a0 - b0) >> COL_SHIFT)); +} + +static inline void FUNC(idctSparseCol)(DCTELEM *col) +{ + int a0, a1, a2, a3, b0, b1, b2, b3; + + IDCT_COLS; + + col[0 ] = ((a0 + b0) >> COL_SHIFT); + col[8 ] = ((a1 + b1) >> COL_SHIFT); + col[16] = ((a2 + b2) >> COL_SHIFT); + col[24] = ((a3 + b3) >> COL_SHIFT); + col[32] = ((a3 - b3) >> COL_SHIFT); + col[40] = ((a2 - b2) >> COL_SHIFT); + col[48] = ((a1 - b1) >> COL_SHIFT); + col[56] = ((a0 - b0) >> COL_SHIFT); +} + +void FUNC(ff_simple_idct_put)(uint8_t *dest_, int line_size, DCTELEM *block) +{ + pixel *dest = (pixel *)dest_; + int i; + + line_size /= sizeof(pixel); + + for (i = 0; i < 8; i++) + FUNC(idctRowCondDC)(block + i*8); + + for (i = 0; i < 8; i++) + FUNC(idctSparseColPut)(dest + i, line_size, block + i); +} + +void FUNC(ff_simple_idct_add)(uint8_t *dest_, int line_size, DCTELEM *block) +{ + pixel *dest = (pixel *)dest_; + int i; + + line_size /= sizeof(pixel); + + for (i = 0; i < 8; i++) + FUNC(idctRowCondDC)(block + i*8); + + for (i = 0; i < 8; i++) + FUNC(idctSparseColAdd)(dest + i, line_size, block + i); +} + +void FUNC(ff_simple_idct)(DCTELEM *block) +{ + int i; + + for (i = 0; i < 8; i++) + FUNC(idctRowCondDC)(block + i*8); + + for (i = 0; i < 8; i++) + FUNC(idctSparseCol)(block + i); +} diff --git a/libavcodec/sipr.c b/libavcodec/sipr.c index 9befe8a158..d6179a8edf 100644 --- a/libavcodec/sipr.c +++ b/libavcodec/sipr.c @@ -549,13 +549,11 @@ static int sipr_decode_frame(AVCodecContext *avctx, void *datap, } AVCodec ff_sipr_decoder = { - "sipr", - AVMEDIA_TYPE_AUDIO, - CODEC_ID_SIPR, - sizeof(SiprContext), - sipr_decoder_init, - NULL, - NULL, - sipr_decode_frame, + .name = "sipr", + .type = AVMEDIA_TYPE_AUDIO, + .id = CODEC_ID_SIPR, + .priv_data_size = sizeof(SiprContext), + .init = sipr_decoder_init, + .decode = sipr_decode_frame, .long_name = NULL_IF_CONFIG_SMALL("RealAudio SIPR / ACELP.NET"), }; diff --git a/libavcodec/smacker.c b/libavcodec/smacker.c index b8eab837ff..bcfde4c8ba 100644 --- a/libavcodec/smacker.c +++ b/libavcodec/smacker.c @@ -687,27 +687,23 @@ static int smka_decode_frame(AVCodecContext *avctx, void *data, int *data_size, } AVCodec ff_smacker_decoder = { - "smackvid", - AVMEDIA_TYPE_VIDEO, - CODEC_ID_SMACKVIDEO, - sizeof(SmackVContext), - decode_init, - NULL, - decode_end, - decode_frame, - CODEC_CAP_DR1, + .name = "smackvid", + .type = AVMEDIA_TYPE_VIDEO, + .id = CODEC_ID_SMACKVIDEO, + .priv_data_size = sizeof(SmackVContext), + .init = decode_init, + .close = decode_end, + .decode = decode_frame, + .capabilities = CODEC_CAP_DR1, .long_name = NULL_IF_CONFIG_SMALL("Smacker video"), }; AVCodec ff_smackaud_decoder = { - "smackaud", - AVMEDIA_TYPE_AUDIO, - CODEC_ID_SMACKAUDIO, - 0, - smka_decode_init, - NULL, - NULL, - smka_decode_frame, + .name = "smackaud", + .type = AVMEDIA_TYPE_AUDIO, + .id = CODEC_ID_SMACKAUDIO, + .init = smka_decode_init, + .decode = smka_decode_frame, .long_name = NULL_IF_CONFIG_SMALL("Smacker audio"), }; diff --git a/libavcodec/smc.c b/libavcodec/smc.c index fddd5ab32a..24c5c14e49 100644 --- a/libavcodec/smc.c +++ b/libavcodec/smc.c @@ -473,14 +473,13 @@ static av_cold int smc_decode_end(AVCodecContext *avctx) } AVCodec ff_smc_decoder = { - "smc", - AVMEDIA_TYPE_VIDEO, - CODEC_ID_SMC, - sizeof(SmcContext), - smc_decode_init, - NULL, - smc_decode_end, - smc_decode_frame, - CODEC_CAP_DR1, + .name = "smc", + .type = AVMEDIA_TYPE_VIDEO, + .id = CODEC_ID_SMC, + .priv_data_size = sizeof(SmcContext), + .init = smc_decode_init, + .close = smc_decode_end, + .decode = smc_decode_frame, + .capabilities = CODEC_CAP_DR1, .long_name = NULL_IF_CONFIG_SMALL("QuickTime Graphics (SMC)"), }; diff --git a/libavcodec/snow.c b/libavcodec/snow.c index 43a71904eb..734ff44735 100644 --- a/libavcodec/snow.c +++ b/libavcodec/snow.c @@ -33,42 +33,6 @@ #undef NDEBUG #include <assert.h> -static const int8_t quant3[256]={ - 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, --1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1, --1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1, --1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1, --1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1, --1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1, --1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1, --1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1, --1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1, 0, -}; -static const int8_t quant3b[256]={ - 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, --1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1, --1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1, --1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1, --1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1, --1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1, --1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1, --1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1, --1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1, -}; static const int8_t quant3bA[256]={ 0, 0, 1,-1, 1,-1, 1,-1, 1,-1, 1,-1, 1,-1, 1,-1, 1,-1, 1,-1, 1,-1, 1,-1, 1,-1, 1,-1, 1,-1, 1,-1, @@ -87,153 +51,7 @@ static const int8_t quant3bA[256]={ 1,-1, 1,-1, 1,-1, 1,-1, 1,-1, 1,-1, 1,-1, 1,-1, 1,-1, 1,-1, 1,-1, 1,-1, 1,-1, 1,-1, 1,-1, 1,-1, }; -static const int8_t quant5[256]={ - 0, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, - 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, - 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, - 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, - 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, - 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, - 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, - 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, --2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2, --2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2, --2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2, --2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2, --2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2, --2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2, --2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2, --2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-1,-1,-1, -}; -static const int8_t quant7[256]={ - 0, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, - 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, - 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, - 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, - 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, - 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, - 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, - 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, --3,-3,-3,-3,-3,-3,-3,-3,-3,-3,-3,-3,-3,-3,-3,-3, --3,-3,-3,-3,-3,-3,-3,-3,-3,-3,-3,-3,-3,-3,-3,-3, --3,-3,-3,-3,-3,-3,-3,-3,-3,-3,-3,-3,-3,-3,-3,-3, --3,-3,-3,-3,-3,-3,-3,-3,-3,-3,-3,-3,-3,-3,-3,-3, --3,-3,-3,-3,-3,-3,-3,-3,-3,-3,-3,-3,-3,-3,-3,-3, --3,-3,-3,-3,-3,-3,-3,-3,-3,-2,-2,-2,-2,-2,-2,-2, --2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2, --2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-1,-1, -}; -static const int8_t quant9[256]={ - 0, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, - 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, - 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, - 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, - 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, - 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, - 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, - 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, --4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4, --4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4, --4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4, --4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4, --4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4, --4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4, --4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-3,-3,-3,-3, --3,-3,-3,-3,-3,-3,-3,-3,-3,-3,-2,-2,-2,-2,-1,-1, -}; -static const int8_t quant11[256]={ - 0, 1, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, - 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, - 4, 4, 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, - 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, - 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, - 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, - 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, - 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, --5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5, --5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5, --5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5, --5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5, --5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5, --5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-4,-4, --4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4, --4,-4,-4,-4,-4,-3,-3,-3,-3,-3,-3,-3,-2,-2,-2,-1, -}; -static const int8_t quant13[256]={ - 0, 1, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, - 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, - 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, - 5, 5, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, - 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, - 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, - 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, - 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, --6,-6,-6,-6,-6,-6,-6,-6,-6,-6,-6,-6,-6,-6,-6,-6, --6,-6,-6,-6,-6,-6,-6,-6,-6,-6,-6,-6,-6,-6,-6,-6, --6,-6,-6,-6,-6,-6,-6,-6,-6,-6,-6,-6,-6,-6,-6,-6, --6,-6,-6,-6,-6,-6,-6,-6,-6,-6,-6,-6,-6,-6,-6,-6, --6,-6,-6,-6,-6,-6,-6,-6,-6,-6,-6,-6,-6,-6,-6,-5, --5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5, --5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5, --4,-4,-4,-4,-4,-4,-4,-4,-4,-3,-3,-3,-3,-2,-2,-1, -}; -#if 0 //64*cubic -static const uint8_t obmc32[1024]={ - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 4, 4, 4, 4, 4, 4, 4, 4, 8, 8, 8, 8, 8, 8, 4, 4, 4, 4, 4, 4, 4, 4, 0, 0, 0, 0, 0, - 0, 0, 0, 4, 4, 4, 4, 8, 8, 12, 12, 12, 16, 16, 16, 16, 16, 16, 16, 16, 12, 12, 12, 8, 8, 4, 4, 4, 4, 0, 0, 0, - 0, 0, 4, 4, 8, 8, 12, 16, 16, 20, 24, 24, 28, 28, 32, 32, 32, 32, 28, 28, 24, 24, 20, 16, 16, 12, 8, 8, 4, 4, 0, 0, - 0, 0, 4, 8, 8, 12, 16, 24, 28, 32, 36, 40, 44, 48, 48, 48, 48, 48, 48, 44, 40, 36, 32, 28, 24, 16, 12, 8, 8, 4, 0, 0, - 0, 4, 4, 8, 12, 20, 24, 32, 40, 44, 52, 56, 60, 64, 68, 72, 72, 68, 64, 60, 56, 52, 44, 40, 32, 24, 20, 12, 8, 4, 4, 0, - 0, 4, 4, 12, 16, 24, 32, 40, 52, 60, 68, 76, 80, 88, 88, 92, 92, 88, 88, 80, 76, 68, 60, 52, 40, 32, 24, 16, 12, 4, 4, 0, - 0, 4, 8, 16, 24, 32, 40, 52, 64, 76, 84, 92,100,108,112,116,116,112,108,100, 92, 84, 76, 64, 52, 40, 32, 24, 16, 8, 4, 0, - 0, 4, 8, 16, 28, 40, 52, 64, 76, 88,100,112,124,132,136,140,140,136,132,124,112,100, 88, 76, 64, 52, 40, 28, 16, 8, 4, 0, - 0, 4, 12, 20, 32, 44, 60, 76, 88,104,120,132,144,152,160,164,164,160,152,144,132,120,104, 88, 76, 60, 44, 32, 20, 12, 4, 0, - 0, 4, 12, 24, 36, 48, 68, 84,100,120,136,152,164,176,180,184,184,180,176,164,152,136,120,100, 84, 68, 48, 36, 24, 12, 4, 0, - 0, 4, 12, 24, 40, 56, 76, 92,112,132,152,168,180,192,204,208,208,204,192,180,168,152,132,112, 92, 76, 56, 40, 24, 12, 4, 0, - 0, 4, 16, 28, 44, 60, 80,100,124,144,164,180,196,208,220,224,224,220,208,196,180,164,144,124,100, 80, 60, 44, 28, 16, 4, 0, - 0, 8, 16, 28, 48, 64, 88,108,132,152,176,192,208,224,232,240,240,232,224,208,192,176,152,132,108, 88, 64, 48, 28, 16, 8, 0, - 0, 4, 16, 32, 48, 68, 88,112,136,160,180,204,220,232,244,248,248,244,232,220,204,180,160,136,112, 88, 68, 48, 32, 16, 4, 0, - 1, 8, 16, 32, 48, 72, 92,116,140,164,184,208,224,240,248,255,255,248,240,224,208,184,164,140,116, 92, 72, 48, 32, 16, 8, 1, - 1, 8, 16, 32, 48, 72, 92,116,140,164,184,208,224,240,248,255,255,248,240,224,208,184,164,140,116, 92, 72, 48, 32, 16, 8, 1, - 0, 4, 16, 32, 48, 68, 88,112,136,160,180,204,220,232,244,248,248,244,232,220,204,180,160,136,112, 88, 68, 48, 32, 16, 4, 0, - 0, 8, 16, 28, 48, 64, 88,108,132,152,176,192,208,224,232,240,240,232,224,208,192,176,152,132,108, 88, 64, 48, 28, 16, 8, 0, - 0, 4, 16, 28, 44, 60, 80,100,124,144,164,180,196,208,220,224,224,220,208,196,180,164,144,124,100, 80, 60, 44, 28, 16, 4, 0, - 0, 4, 12, 24, 40, 56, 76, 92,112,132,152,168,180,192,204,208,208,204,192,180,168,152,132,112, 92, 76, 56, 40, 24, 12, 4, 0, - 0, 4, 12, 24, 36, 48, 68, 84,100,120,136,152,164,176,180,184,184,180,176,164,152,136,120,100, 84, 68, 48, 36, 24, 12, 4, 0, - 0, 4, 12, 20, 32, 44, 60, 76, 88,104,120,132,144,152,160,164,164,160,152,144,132,120,104, 88, 76, 60, 44, 32, 20, 12, 4, 0, - 0, 4, 8, 16, 28, 40, 52, 64, 76, 88,100,112,124,132,136,140,140,136,132,124,112,100, 88, 76, 64, 52, 40, 28, 16, 8, 4, 0, - 0, 4, 8, 16, 24, 32, 40, 52, 64, 76, 84, 92,100,108,112,116,116,112,108,100, 92, 84, 76, 64, 52, 40, 32, 24, 16, 8, 4, 0, - 0, 4, 4, 12, 16, 24, 32, 40, 52, 60, 68, 76, 80, 88, 88, 92, 92, 88, 88, 80, 76, 68, 60, 52, 40, 32, 24, 16, 12, 4, 4, 0, - 0, 4, 4, 8, 12, 20, 24, 32, 40, 44, 52, 56, 60, 64, 68, 72, 72, 68, 64, 60, 56, 52, 44, 40, 32, 24, 20, 12, 8, 4, 4, 0, - 0, 0, 4, 8, 8, 12, 16, 24, 28, 32, 36, 40, 44, 48, 48, 48, 48, 48, 48, 44, 40, 36, 32, 28, 24, 16, 12, 8, 8, 4, 0, 0, - 0, 0, 4, 4, 8, 8, 12, 16, 16, 20, 24, 24, 28, 28, 32, 32, 32, 32, 28, 28, 24, 24, 20, 16, 16, 12, 8, 8, 4, 4, 0, 0, - 0, 0, 0, 4, 4, 4, 4, 8, 8, 12, 12, 12, 16, 16, 16, 16, 16, 16, 16, 16, 12, 12, 12, 8, 8, 4, 4, 4, 4, 0, 0, 0, - 0, 0, 0, 0, 0, 4, 4, 4, 4, 4, 4, 4, 4, 8, 8, 8, 8, 8, 8, 4, 4, 4, 4, 4, 4, 4, 4, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -//error:0.000022 -}; -static const uint8_t obmc16[256]={ - 0, 0, 0, 0, 0, 0, 4, 4, 4, 4, 0, 0, 0, 0, 0, 0, - 0, 4, 4, 8, 16, 20, 20, 24, 24, 20, 20, 16, 8, 4, 4, 0, - 0, 4, 16, 24, 36, 44, 52, 60, 60, 52, 44, 36, 24, 16, 4, 0, - 0, 8, 24, 44, 60, 80, 96,104,104, 96, 80, 60, 44, 24, 8, 0, - 0, 16, 36, 60, 92,116,136,152,152,136,116, 92, 60, 36, 16, 0, - 0, 20, 44, 80,116,152,180,196,196,180,152,116, 80, 44, 20, 0, - 4, 20, 52, 96,136,180,212,228,228,212,180,136, 96, 52, 20, 4, - 4, 24, 60,104,152,196,228,248,248,228,196,152,104, 60, 24, 4, - 4, 24, 60,104,152,196,228,248,248,228,196,152,104, 60, 24, 4, - 4, 20, 52, 96,136,180,212,228,228,212,180,136, 96, 52, 20, 4, - 0, 20, 44, 80,116,152,180,196,196,180,152,116, 80, 44, 20, 0, - 0, 16, 36, 60, 92,116,136,152,152,136,116, 92, 60, 36, 16, 0, - 0, 8, 24, 44, 60, 80, 96,104,104, 96, 80, 60, 44, 24, 8, 0, - 0, 4, 16, 24, 36, 44, 52, 60, 60, 52, 44, 36, 24, 16, 4, 0, - 0, 4, 4, 8, 16, 20, 20, 24, 24, 20, 20, 16, 8, 4, 4, 0, - 0, 0, 0, 0, 0, 0, 4, 4, 4, 4, 0, 0, 0, 0, 0, 0, -//error:0.000033 -}; -#elif 1 // 64*linear static const uint8_t obmc32[1024]={ 0, 0, 0, 0, 4, 4, 4, 4, 4, 4, 4, 4, 8, 8, 8, 8, 8, 8, 8, 8, 4, 4, 4, 4, 4, 4, 4, 4, 0, 0, 0, 0, 0, 4, 4, 4, 8, 8, 8, 12, 12, 16, 16, 16, 20, 20, 20, 24, 24, 20, 20, 20, 16, 16, 16, 12, 12, 8, 8, 8, 4, 4, 4, 0, @@ -288,62 +106,6 @@ static const uint8_t obmc16[256]={ 0, 4, 4, 8, 8, 12, 12, 16, 16, 12, 12, 8, 8, 4, 4, 0, //error:0.000015 }; -#else //64*cos -static const uint8_t obmc32[1024]={ - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 4, 4, 4, 4, 4, 4, 4, 4, 8, 4, 4, 8, 4, 4, 4, 4, 4, 4, 4, 4, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 4, 4, 4, 4, 8, 8, 12, 12, 12, 12, 16, 16, 16, 16, 16, 16, 12, 12, 12, 12, 8, 8, 4, 4, 4, 4, 0, 0, 0, - 0, 0, 4, 4, 4, 8, 8, 12, 16, 20, 20, 24, 28, 28, 28, 28, 28, 28, 28, 28, 24, 20, 20, 16, 12, 8, 8, 4, 4, 4, 0, 0, - 0, 0, 4, 4, 8, 12, 16, 20, 24, 28, 36, 40, 44, 44, 48, 48, 48, 48, 44, 44, 40, 36, 28, 24, 20, 16, 12, 8, 4, 4, 0, 0, - 0, 0, 4, 8, 12, 20, 24, 32, 36, 44, 48, 56, 60, 64, 68, 68, 68, 68, 64, 60, 56, 48, 44, 36, 32, 24, 20, 12, 8, 4, 0, 0, - 0, 4, 4, 8, 16, 24, 32, 40, 48, 60, 68, 76, 80, 84, 88, 92, 92, 88, 84, 80, 76, 68, 60, 48, 40, 32, 24, 16, 8, 4, 4, 0, - 0, 4, 8, 12, 20, 32, 40, 52, 64, 76, 84, 96,104,108,112,116,116,112,108,104, 96, 84, 76, 64, 52, 40, 32, 20, 12, 8, 4, 0, - 0, 4, 8, 16, 24, 36, 48, 64, 76, 92,104,116,124,132,136,140,140,136,132,124,116,104, 92, 76, 64, 48, 36, 24, 16, 8, 4, 0, - 0, 4, 12, 20, 28, 44, 60, 76, 92,104,120,136,148,156,160,164,164,160,156,148,136,120,104, 92, 76, 60, 44, 28, 20, 12, 4, 0, - 0, 4, 12, 20, 36, 48, 68, 84,104,120,140,152,168,176,184,188,188,184,176,168,152,140,120,104, 84, 68, 48, 36, 20, 12, 4, 0, - 0, 4, 12, 24, 36, 56, 76, 96,116,136,152,172,184,196,204,208,208,204,196,184,172,152,136,116, 96, 76, 56, 36, 24, 12, 4, 0, - 0, 4, 12, 24, 44, 60, 80,104,124,148,168,184,200,212,224,228,228,224,212,200,184,168,148,124,104, 80, 60, 44, 24, 12, 4, 0, - 0, 4, 12, 28, 44, 64, 84,108,132,156,176,196,212,228,236,240,240,236,228,212,196,176,156,132,108, 84, 64, 44, 28, 12, 4, 0, - 0, 4, 16, 28, 48, 68, 88,112,136,160,184,204,224,236,244,252,252,244,236,224,204,184,160,136,112, 88, 68, 48, 28, 16, 4, 0, - 1, 4, 16, 28, 48, 68, 92,116,140,164,188,208,228,240,252,255,255,252,240,228,208,188,164,140,116, 92, 68, 48, 28, 16, 4, 1, - 1, 4, 16, 28, 48, 68, 92,116,140,164,188,208,228,240,252,255,255,252,240,228,208,188,164,140,116, 92, 68, 48, 28, 16, 4, 1, - 0, 4, 16, 28, 48, 68, 88,112,136,160,184,204,224,236,244,252,252,244,236,224,204,184,160,136,112, 88, 68, 48, 28, 16, 4, 0, - 0, 4, 12, 28, 44, 64, 84,108,132,156,176,196,212,228,236,240,240,236,228,212,196,176,156,132,108, 84, 64, 44, 28, 12, 4, 0, - 0, 4, 12, 24, 44, 60, 80,104,124,148,168,184,200,212,224,228,228,224,212,200,184,168,148,124,104, 80, 60, 44, 24, 12, 4, 0, - 0, 4, 12, 24, 36, 56, 76, 96,116,136,152,172,184,196,204,208,208,204,196,184,172,152,136,116, 96, 76, 56, 36, 24, 12, 4, 0, - 0, 4, 12, 20, 36, 48, 68, 84,104,120,140,152,168,176,184,188,188,184,176,168,152,140,120,104, 84, 68, 48, 36, 20, 12, 4, 0, - 0, 4, 12, 20, 28, 44, 60, 76, 92,104,120,136,148,156,160,164,164,160,156,148,136,120,104, 92, 76, 60, 44, 28, 20, 12, 4, 0, - 0, 4, 8, 16, 24, 36, 48, 64, 76, 92,104,116,124,132,136,140,140,136,132,124,116,104, 92, 76, 64, 48, 36, 24, 16, 8, 4, 0, - 0, 4, 8, 12, 20, 32, 40, 52, 64, 76, 84, 96,104,108,112,116,116,112,108,104, 96, 84, 76, 64, 52, 40, 32, 20, 12, 8, 4, 0, - 0, 4, 4, 8, 16, 24, 32, 40, 48, 60, 68, 76, 80, 84, 88, 92, 92, 88, 84, 80, 76, 68, 60, 48, 40, 32, 24, 16, 8, 4, 4, 0, - 0, 0, 4, 8, 12, 20, 24, 32, 36, 44, 48, 56, 60, 64, 68, 68, 68, 68, 64, 60, 56, 48, 44, 36, 32, 24, 20, 12, 8, 4, 0, 0, - 0, 0, 4, 4, 8, 12, 16, 20, 24, 28, 36, 40, 44, 44, 48, 48, 48, 48, 44, 44, 40, 36, 28, 24, 20, 16, 12, 8, 4, 4, 0, 0, - 0, 0, 4, 4, 4, 8, 8, 12, 16, 20, 20, 24, 28, 28, 28, 28, 28, 28, 28, 28, 24, 20, 20, 16, 12, 8, 8, 4, 4, 4, 0, 0, - 0, 0, 0, 4, 4, 4, 4, 8, 8, 12, 12, 12, 12, 16, 16, 16, 16, 16, 16, 12, 12, 12, 12, 8, 8, 4, 4, 4, 4, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 4, 4, 4, 4, 4, 4, 4, 4, 8, 4, 4, 8, 4, 4, 4, 4, 4, 4, 4, 4, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -//error:0.000022 -}; -static const uint8_t obmc16[256]={ - 0, 0, 0, 0, 0, 4, 4, 4, 4, 4, 4, 0, 0, 0, 0, 0, - 0, 0, 4, 8, 12, 16, 20, 20, 20, 20, 16, 12, 8, 4, 0, 0, - 0, 4, 12, 24, 32, 44, 52, 56, 56, 52, 44, 32, 24, 12, 4, 0, - 0, 8, 24, 40, 60, 80, 96,104,104, 96, 80, 60, 40, 24, 8, 0, - 0, 12, 32, 64, 92,120,140,152,152,140,120, 92, 64, 32, 12, 0, - 4, 16, 44, 80,120,156,184,196,196,184,156,120, 80, 44, 16, 4, - 4, 20, 52, 96,140,184,216,232,232,216,184,140, 96, 52, 20, 4, - 0, 20, 56,104,152,196,232,252,252,232,196,152,104, 56, 20, 0, - 0, 20, 56,104,152,196,232,252,252,232,196,152,104, 56, 20, 0, - 4, 20, 52, 96,140,184,216,232,232,216,184,140, 96, 52, 20, 4, - 4, 16, 44, 80,120,156,184,196,196,184,156,120, 80, 44, 16, 4, - 0, 12, 32, 64, 92,120,140,152,152,140,120, 92, 64, 32, 12, 0, - 0, 8, 24, 40, 60, 80, 96,104,104, 96, 80, 60, 40, 24, 8, 0, - 0, 4, 12, 24, 32, 44, 52, 56, 56, 52, 44, 32, 24, 12, 4, 0, - 0, 0, 4, 8, 12, 16, 20, 20, 20, 20, 16, 12, 8, 4, 0, 0, - 0, 0, 0, 0, 0, 4, 4, 4, 4, 4, 4, 0, 0, 0, 0, 0, -//error:0.000022 -}; -#endif /* 0 */ //linear *64 static const uint8_t obmc8[64]={ @@ -509,7 +271,6 @@ static inline void put_symbol(RangeCoder *c, uint8_t *state, int v, int is_signe if(v){ const int a= FFABS(v); const int e= av_log2(a); -#if 1 const int el= FFMIN(e, 10); put_rac(c, state+0, 0); @@ -530,35 +291,6 @@ static inline void put_symbol(RangeCoder *c, uint8_t *state, int v, int is_signe if(is_signed) put_rac(c, state+11 + el, v < 0); //11..21 -#else - - put_rac(c, state+0, 0); - if(e<=9){ - for(i=0; i<e; i++){ - put_rac(c, state+1+i, 1); //1..10 - } - put_rac(c, state+1+i, 0); - - for(i=e-1; i>=0; i--){ - put_rac(c, state+22+i, (a>>i)&1); //22..31 - } - - if(is_signed) - put_rac(c, state+11 + e, v < 0); //11..21 - }else{ - for(i=0; i<e; i++){ - put_rac(c, state+1+FFMIN(i,9), 1); //1..10 - } - put_rac(c, state+1+9, 0); - - for(i=e-1; i>=0; i--){ - put_rac(c, state+22+FFMIN(i,9), (a>>i)&1); //22..31 - } - - if(is_signed) - put_rac(c, state+11 + 10, v < 0); //11..21 - } -#endif /* 1 */ }else{ put_rac(c, state+0, 1); } @@ -789,14 +521,6 @@ static int alloc_blocks(SnowContext *s){ return 0; } -static inline void copy_rac_state(RangeCoder *d, RangeCoder *s){ - uint8_t *bytestream= d->bytestream; - uint8_t *bytestream_start= d->bytestream_start; - *d= *s; - d->bytestream= bytestream; - d->bytestream_start= bytestream_start; -} - static inline void set_blocks(SnowContext *s, int level, int x, int y, int l, int cb, int cr, int mx, int my, int ref, int type){ const int w= s->b_width << s->block_max_depth; const int rem_depth= s->block_max_depth - level; @@ -1323,40 +1047,6 @@ static av_always_inline void add_yblock(SnowContext *s, int sliced, slice_buffer block[3]= ptmp; pred_block(s, block[3], tmp, src_stride, src_x, src_y, b_w, b_h, rb, plane_index, w, h); } -#if 0 - for(y=0; y<b_h; y++){ - for(x=0; x<b_w; x++){ - int v= obmc [x + y*obmc_stride] * block[3][x + y*src_stride] * (256/OBMC_MAX); - if(add) dst[x + y*dst_stride] += v; - else dst[x + y*dst_stride] -= v; - } - } - for(y=0; y<b_h; y++){ - uint8_t *obmc2= obmc + (obmc_stride>>1); - for(x=0; x<b_w; x++){ - int v= obmc2[x + y*obmc_stride] * block[2][x + y*src_stride] * (256/OBMC_MAX); - if(add) dst[x + y*dst_stride] += v; - else dst[x + y*dst_stride] -= v; - } - } - for(y=0; y<b_h; y++){ - uint8_t *obmc3= obmc + obmc_stride*(obmc_stride>>1); - for(x=0; x<b_w; x++){ - int v= obmc3[x + y*obmc_stride] * block[1][x + y*src_stride] * (256/OBMC_MAX); - if(add) dst[x + y*dst_stride] += v; - else dst[x + y*dst_stride] -= v; - } - } - for(y=0; y<b_h; y++){ - uint8_t *obmc3= obmc + obmc_stride*(obmc_stride>>1); - uint8_t *obmc4= obmc3+ (obmc_stride>>1); - for(x=0; x<b_w; x++){ - int v= obmc4[x + y*obmc_stride] * block[0][x + y*src_stride] * (256/OBMC_MAX); - if(add) dst[x + y*dst_stride] += v; - else dst[x + y*dst_stride] -= v; - } - } -#else if(sliced){ s->dwt.inner_add_yblock(obmc, obmc_stride, block, b_w, b_h, src_x,src_y, src_stride, sb, add, dst8); }else{ @@ -1387,7 +1077,6 @@ static av_always_inline void add_yblock(SnowContext *s, int sliced, slice_buffer } } } -#endif /* 0 */ } static av_always_inline void predict_slice_buffered(SnowContext *s, slice_buffer * sb, IDWTELEM * old_buffer, int plane_index, int add, int mb_y){ @@ -2241,16 +1930,14 @@ static av_cold int decode_end(AVCodecContext *avctx) } AVCodec ff_snow_decoder = { - "snow", - AVMEDIA_TYPE_VIDEO, - CODEC_ID_SNOW, - sizeof(SnowContext), - decode_init, - NULL, - decode_end, - decode_frame, - CODEC_CAP_DR1 /*| CODEC_CAP_DRAW_HORIZ_BAND*/, - NULL, + .name = "snow", + .type = AVMEDIA_TYPE_VIDEO, + .id = CODEC_ID_SNOW, + .priv_data_size = sizeof(SnowContext), + .init = decode_init, + .close = decode_end, + .decode = decode_frame, + .capabilities = CODEC_CAP_DR1 /*| CODEC_CAP_DRAW_HORIZ_BAND*/, .long_name = NULL_IF_CONFIG_SMALL("Snow"), }; @@ -3752,8 +3439,8 @@ static int encode_frame(AVCodecContext *avctx, unsigned char *buf, int buf_size, frame_start(s); s->m.current_picture_ptr= &s->m.current_picture; - s->m.last_picture.pts= s->m.current_picture.pts; - s->m.current_picture.pts= pict->pts; + s->m.last_picture.f.pts = s->m.current_picture.f.pts; + s->m.current_picture.f.pts = pict->pts; if(pict->pict_type == AV_PICTURE_TYPE_P){ int block_width = (width +15)>>4; int block_height= (height+15)>>4; @@ -3763,14 +3450,14 @@ static int encode_frame(AVCodecContext *avctx, unsigned char *buf, int buf_size, assert(s->last_picture[0].data[0]); s->m.avctx= s->avctx; - s->m.current_picture.data[0]= s->current_picture.data[0]; - s->m. last_picture.data[0]= s->last_picture[0].data[0]; - s->m. new_picture.data[0]= s-> input_picture.data[0]; + s->m.current_picture.f.data[0] = s->current_picture.data[0]; + s->m. last_picture.f.data[0] = s->last_picture[0].data[0]; + s->m. new_picture.f.data[0] = s-> input_picture.data[0]; s->m. last_picture_ptr= &s->m. last_picture; s->m.linesize= - s->m. last_picture.linesize[0]= - s->m. new_picture.linesize[0]= - s->m.current_picture.linesize[0]= stride; + s->m. last_picture.f.linesize[0] = + s->m. new_picture.f.linesize[0] = + s->m.current_picture.f.linesize[0] = stride; s->m.uvlinesize= s->current_picture.linesize[1]; s->m.width = width; s->m.height= height; @@ -3957,9 +3644,9 @@ redo_frame: s->current_picture.quality = pict->quality; s->m.frame_bits = 8*(s->c.bytestream - s->c.bytestream_start); s->m.p_tex_bits = s->m.frame_bits - s->m.misc_bits - s->m.mv_bits; - s->m.current_picture.display_picture_number = - s->m.current_picture.coded_picture_number = avctx->frame_number; - s->m.current_picture.quality = pict->quality; + s->m.current_picture.f.display_picture_number = + s->m.current_picture.f.coded_picture_number = avctx->frame_number; + s->m.current_picture.f.quality = pict->quality; s->m.total_bits += 8*(s->c.bytestream - s->c.bytestream_start); if(s->pass1_rc) if (ff_rate_estimate_qscale(&s->m, 0) < 0) @@ -3990,13 +3677,13 @@ static av_cold int encode_end(AVCodecContext *avctx) } AVCodec ff_snow_encoder = { - "snow", - AVMEDIA_TYPE_VIDEO, - CODEC_ID_SNOW, - sizeof(SnowContext), - encode_init, - encode_frame, - encode_end, + .name = "snow", + .type = AVMEDIA_TYPE_VIDEO, + .id = CODEC_ID_SNOW, + .priv_data_size = sizeof(SnowContext), + .init = encode_init, + .encode = encode_frame, + .close = encode_end, .long_name = NULL_IF_CONFIG_SMALL("Snow"), }; #endif @@ -4008,6 +3695,7 @@ AVCodec ff_snow_encoder = { #undef printf #include "libavutil/lfg.h" +#include "libavutil/mathematics.h" int main(void){ int width=256; @@ -4042,27 +3730,6 @@ int main(void){ for(i=0; i<width*height; i++) if(FFABS(buffer[0][i] - buffer[1][i])>20) printf("fsck: %6d %12d %7d\n",i, buffer[0][i], buffer[1][i]); -#if 0 - printf("testing AC coder\n"); - memset(s.header_state, 0, sizeof(s.header_state)); - ff_init_range_encoder(&s.c, buffer[0], 256*256); - ff_init_cabac_states(&s.c, ff_h264_lps_range, ff_h264_mps_state, ff_h264_lps_state, 64); - - for(i=-256; i<256; i++){ - put_symbol(&s.c, s.header_state, i*i*i/3*FFABS(i), 1); - } - ff_rac_terminate(&s.c); - - memset(s.header_state, 0, sizeof(s.header_state)); - ff_init_range_decoder(&s.c, buffer[0], 256*256); - ff_init_cabac_states(&s.c, ff_h264_lps_range, ff_h264_mps_state, ff_h264_lps_state, 64); - - for(i=-256; i<256; i++){ - int j; - j= get_symbol(&s.c, s.header_state, 1); - if(j!=i*i*i/3*FFABS(i)) printf("fsck: %d != %d\n", i, j); - } -#endif { int level, orientation, x, y; int64_t errors[8][4]; @@ -4120,7 +3787,6 @@ int main(void){ buf+=stride>>1; memset(buffer[0], 0, sizeof(int)*width*height); -#if 1 for(y=0; y<height; y++){ for(x=0; x<width; x++){ int tab[4]={0,2,3,1}; @@ -4128,15 +3794,6 @@ int main(void){ } } ff_spatial_dwt(buffer[0], width, height, width, s.spatial_decomposition_type, s.spatial_decomposition_count); -#else - for(y=0; y<h; y++){ - for(x=0; x<w; x++){ - buf[x + y*stride ]=169; - buf[x + y*stride-w]=64; - } - } - ff_spatial_idwt(buffer[0], width, height, width, s.spatial_decomposition_type, s.spatial_decomposition_count); -#endif for(y=0; y<height; y++){ for(x=0; x<width; x++){ int64_t d= buffer[0][x + y*width]; diff --git a/libavcodec/sp5x.h b/libavcodec/sp5x.h index b2c53cc4fe..004fcbbc93 100644 --- a/libavcodec/sp5x.h +++ b/libavcodec/sp5x.h @@ -235,100 +235,4 @@ static const uint8_t sp5x_quant_table[20][64]= 124,124,124,124,124,124,124,124,124,124,124,124,124,124,124,124 } }; -#if 0 -/* 4NF-M, not ZigZag */ -static const uint8_t sp5x_quant_table_orig[18][64] = -{ - /* index 0, Q50 */ - { 16, 11, 10, 16, 24, 40, 51, 61, 12, 12, 14, 19, 26, 58, 60, 55, - 14, 13, 16, 24, 40, 57, 69, 56, 14, 17, 22, 29, 51, 87, 80, 62, - 18, 22, 37, 56, 68,109,103, 77, 24, 35, 55, 64, 81,104,113, 92, - 49, 64, 78, 87,103,121,120,101, 72, 92, 95, 98,112,100,103, 99 }, - { 17, 18, 24, 47, 99, 99, 99, 99, 18, 21, 26, 66, 99, 99, 99, 99, - 24, 26, 56, 99, 99, 99, 99, 99, 47, 66, 99, 99, 99, 99, 99, 99, - 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, - 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99 }, - - /* index 1, Q70 */ - { 10, 7, 6, 10, 14, 24, 31, 37, 7, 7, 8, 11, 16, 35, 36, 33, - 8, 8, 10, 14, 24, 34, 41, 34, 8, 10, 13, 17, 31, 52, 48, 37, - 11, 13, 22, 34, 41, 65, 62, 46, 14, 21, 33, 38, 49, 62, 68, 55, - 29, 38, 47, 52, 62, 73, 72, 61, 43, 55, 57, 59, 67, 60, 62, 59 }, - { 10, 11, 14, 28, 59, 59, 59, 59, 11, 13, 16, 40, 59, 59, 59, 59, - 14, 16, 34, 59, 59, 59, 59, 59, 28, 40, 59, 59, 59, 59, 59, 59, - 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, - 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59 }, - - /* index 2, Q80 */ - { 6, 4, 4, 6, 10, 16, 20, 24, 5, 5, 6, 8, 10, 23, 24, 22, - 6, 5, 6, 10, 16, 23, 28, 22, 6, 7, 9, 12, 20, 35, 32, 25, - 7, 9, 15, 22, 27, 44, 41, 31, 10, 14, 22, 26, 32, 42, 45, 37, - 20, 26, 31, 35, 41, 48, 48, 40, 29, 37, 38, 39, 45, 40, 41, 40 }, - { 7, 7, 10, 19, 40, 40, 40, 40, 7, 8, 10, 26, 40, 40, 40, 40, - 10, 10, 22, 40, 40, 40, 40, 40, 19, 26, 40, 40, 40, 40, 40, 40, - 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, - 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40 }, - - /* index 3, Q85 */ - { 5, 3, 3, 5, 7, 12, 15, 18, 4, 4, 4, 6, 8, 17, 18, 17, - 4, 4, 5, 7, 12, 17, 21, 17, 4, 5, 7, 9, 15, 26, 24, 19, - 5, 7, 11, 17, 20, 33, 31, 23, 7, 11, 17, 19, 24, 31, 34, 28, - 15, 19, 23, 26, 31, 36, 36, 30, 22, 28, 29, 29, 34, 30, 31, 30 }, - { 5, 5, 7, 14, 30, 30, 30, 30, 5, 6, 8, 20, 30, 30, 30, 30, - 7, 8, 17, 30, 30, 30, 30, 30, 14, 20, 30, 30, 30, 30, 30, 30, - 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, - 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30 }, - - /* index 4, Q90 */ - { 3, 2, 2, 3, 5, 8, 10, 12, 2, 2, 3, 4, 5, 12, 12, 11, - 3, 3, 3, 5, 8, 11, 14, 11, 3, 3, 4, 6, 10, 17, 16, 12, - 4, 4, 7, 11, 14, 22, 21, 15, 5, 7, 11, 13, 16, 21, 23, 18, - 10, 13, 16, 17, 21, 24, 24, 20, 14, 18, 19, 20, 22, 20, 21, 20 }, - { 3, 4, 5, 9, 20, 20, 20, 20, 4, 4, 5, 13, 20, 20, 20, 20, - 5, 5, 11, 20, 20, 20, 20, 20, 9, 13, 20, 20, 20, 20, 20, 20, - 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, - 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20 }, - - /* index 5, Q60 */ - { 13, 9, 8, 13, 19, 32, 41, 49, 10, 10, 11, 15, 21, 46, 48, 44, - 11, 10, 13, 19, 32, 46, 55, 45, 11, 14, 18, 23, 41, 70, 64, 50, - 14, 18, 30, 45, 54, 87, 82, 62, 19, 28, 44, 51, 65, 83, 90, 74, - 39, 51, 62, 70, 82, 97, 96, 81, 58, 74, 76, 78, 90, 80, 82, 79 }, - { 14, 14, 19, 38, 79, 79, 79, 79, 14, 17, 21, 53, 79, 79, 79, 79, - 19, 21, 45, 79, 79, 79, 79, 79, 38, 53, 79, 79, 79, 79, 79, 79, - 79, 79, 79, 79, 79, 79, 79, 79, 79, 79, 79, 79, 79, 79, 79, 79, - 79, 79, 79, 79, 79, 79, 79, 79, 79, 79, 79, 79, 79, 79, 79, 79 }, - - /* index 6, Q25 */ - { 32, 22, 20, 32, 48, 80,102,122, 24, 24, 28, 38, 52,116,120,110, - 28, 26, 32, 48, 80,114,138,112, 28, 34, 44, 58,102,174,160,124, - 36, 44, 74,112,136,218,206,154, 48, 70,110,128,162,208,226,184, - 98,128,156,174,206,242,240,202,144,184,190,196,224,200,206,198 }, - { 34, 36, 48, 94,198,198,198,198, 36, 42, 52,132,198,198,198,198, - 48, 52,112,198,198,198,198,198, 94,132,198,198,198,198,198,198, - 198,198,198,198,198,198,198,198,198,198,198,198,198,198,198,198, - 198,198,198,198,198,198,198,198,198,198,198,198,198,198,198,198 }, - - /* index 7, Q95 */ - { 2, 1, 1, 2, 2, 4, 5, 6, 1, 1, 1, 2, 3, 6, 6, 6, - 1, 1, 2, 2, 4, 6, 7, 6, 1, 2, 2, 3, 5, 9, 8, 6, - 2, 2, 4, 6, 7, 11, 10, 8, 2, 4, 6, 6, 8, 10, 11, 9, - 5, 6, 8, 9, 10, 12, 12, 10, 7, 9, 10, 10, 11, 10, 10, 10 }, - { 2, 2, 2, 5, 10, 10, 10, 10, 2, 2, 3, 7, 10, 10, 10, 10, - 2, 3, 6, 10, 10, 10, 10, 10, 5, 7, 10, 10, 10, 10, 10, 10, - 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, - 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10 }, - - /* index 8, Q93 */ - { 2, 2, 1, 2, 3, 6, 7, 9, 2, 2, 2, 3, 4, 8, 8, 8, - 2, 2, 2, 3, 6, 8, 10, 8, 2, 2, 3, 4, 7, 12, 11, 9, - 3, 3, 5, 8, 10, 15, 14, 11, 3, 5, 8, 9, 11, 15, 16, 13, - 7, 9, 11, 12, 14, 17, 17, 14, 10, 13, 13, 14, 16, 14, 14, 14 }, - { 2, 3, 3, 7, 14, 14, 14, 14, 3, 3, 4, 9, 14, 14, 14, 14, - 3, 4, 8, 14, 14, 14, 14, 14, 7, 9, 14, 14, 14, 14, 14, 14, - 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, - 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14 } -}; -#endif - #endif /* AVCODEC_SP5X_H */ diff --git a/libavcodec/sp5xdec.c b/libavcodec/sp5xdec.c index 0b56c101db..4bf45f5454 100644 --- a/libavcodec/sp5xdec.c +++ b/libavcodec/sp5xdec.c @@ -94,29 +94,25 @@ static int sp5x_decode_frame(AVCodecContext *avctx, } AVCodec ff_sp5x_decoder = { - "sp5x", - AVMEDIA_TYPE_VIDEO, - CODEC_ID_SP5X, - sizeof(MJpegDecodeContext), - ff_mjpeg_decode_init, - NULL, - ff_mjpeg_decode_end, - sp5x_decode_frame, - CODEC_CAP_DR1, - NULL, + .name = "sp5x", + .type = AVMEDIA_TYPE_VIDEO, + .id = CODEC_ID_SP5X, + .priv_data_size = sizeof(MJpegDecodeContext), + .init = ff_mjpeg_decode_init, + .close = ff_mjpeg_decode_end, + .decode = sp5x_decode_frame, + .capabilities = CODEC_CAP_DR1, .max_lowres = 3, .long_name = NULL_IF_CONFIG_SMALL("Sunplus JPEG (SP5X)"), }; AVCodec ff_amv_decoder = { - "amv", - AVMEDIA_TYPE_VIDEO, - CODEC_ID_AMV, - sizeof(MJpegDecodeContext), - ff_mjpeg_decode_init, - NULL, - ff_mjpeg_decode_end, - sp5x_decode_frame, - 0, + .name = "amv", + .type = AVMEDIA_TYPE_VIDEO, + .id = CODEC_ID_AMV, + .priv_data_size = sizeof(MJpegDecodeContext), + .init = ff_mjpeg_decode_init, + .close = ff_mjpeg_decode_end, + .decode = sp5x_decode_frame, .long_name = NULL_IF_CONFIG_SMALL("AMV Video"), }; diff --git a/libavcodec/sparc/dsputil_vis.c b/libavcodec/sparc/dsputil_vis.c index e4236602f6..bb80cd9b44 100644 --- a/libavcodec/sparc/dsputil_vis.c +++ b/libavcodec/sparc/dsputil_vis.c @@ -3953,10 +3953,11 @@ void dsputil_init_vis(DSPContext* c, AVCodecContext *avctx) { /* VIS-specific optimizations */ int accel = vis_level (); - const int high_bit_depth = avctx->codec_id == CODEC_ID_H264 && avctx->bits_per_raw_sample > 8; + const int high_bit_depth = avctx->bits_per_raw_sample > 8; if (accel & ACCEL_SPARC_VIS) { - if(avctx->idct_algo==FF_IDCT_SIMPLEVIS){ + if (avctx->bits_per_raw_sample <= 8 && + avctx->idct_algo == FF_IDCT_SIMPLEVIS) { c->idct_put = ff_simple_idct_put_vis; c->idct_add = ff_simple_idct_add_vis; c->idct = ff_simple_idct_vis; diff --git a/libavcodec/sunrast.c b/libavcodec/sunrast.c index 558b0edd8f..58bd2a2ab8 100644 --- a/libavcodec/sunrast.c +++ b/libavcodec/sunrast.c @@ -185,15 +185,13 @@ static av_cold int sunrast_end(AVCodecContext *avctx) { } AVCodec ff_sunrast_decoder = { - "sunrast", - AVMEDIA_TYPE_VIDEO, - CODEC_ID_SUNRAST, - sizeof(SUNRASTContext), - sunrast_init, - NULL, - sunrast_end, - sunrast_decode_frame, - CODEC_CAP_DR1, - NULL, + .name = "sunrast", + .type = AVMEDIA_TYPE_VIDEO, + .id = CODEC_ID_SUNRAST, + .priv_data_size = sizeof(SUNRASTContext), + .init = sunrast_init, + .close = sunrast_end, + .decode = sunrast_decode_frame, + .capabilities = CODEC_CAP_DR1, .long_name = NULL_IF_CONFIG_SMALL("Sun Rasterfile image"), }; diff --git a/libavcodec/svq1dec.c b/libavcodec/svq1dec.c index 3ef8dc4c0e..a079ce4837 100644 --- a/libavcodec/svq1dec.c +++ b/libavcodec/svq1dec.c @@ -692,12 +692,12 @@ static int svq1_decode_frame(AVCodecContext *avctx, linesize= s->uvlinesize; } - current = s->current_picture.data[i]; + current = s->current_picture.f.data[i]; if(s->pict_type==AV_PICTURE_TYPE_B){ - previous = s->next_picture.data[i]; + previous = s->next_picture.f.data[i]; }else{ - previous = s->last_picture.data[i]; + previous = s->last_picture.f.data[i]; } if (s->pict_type == AV_PICTURE_TYPE_I) { @@ -811,15 +811,14 @@ static av_cold int svq1_decode_end(AVCodecContext *avctx) AVCodec ff_svq1_decoder = { - "svq1", - AVMEDIA_TYPE_VIDEO, - CODEC_ID_SVQ1, - sizeof(MpegEncContext), - svq1_decode_init, - NULL, - svq1_decode_end, - svq1_decode_frame, - CODEC_CAP_DR1, + .name = "svq1", + .type = AVMEDIA_TYPE_VIDEO, + .id = CODEC_ID_SVQ1, + .priv_data_size = sizeof(MpegEncContext), + .init = svq1_decode_init, + .close = svq1_decode_end, + .decode = svq1_decode_frame, + .capabilities = CODEC_CAP_DR1, .flush= ff_mpeg_flush, .pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUV410P, PIX_FMT_NONE}, .long_name= NULL_IF_CONFIG_SMALL("Sorenson Vector Quantizer 1 / Sorenson Video 1 / SVQ1"), diff --git a/libavcodec/svq1enc.c b/libavcodec/svq1enc.c index edd6029209..25053e47e9 100644 --- a/libavcodec/svq1enc.c +++ b/libavcodec/svq1enc.c @@ -284,11 +284,11 @@ static int svq1_encode_plane(SVQ1Context *s, int plane, unsigned char *src_plane s->m.avctx= s->avctx; s->m.current_picture_ptr= &s->m.current_picture; s->m.last_picture_ptr = &s->m.last_picture; - s->m.last_picture.data[0]= ref_plane; + s->m.last_picture.f.data[0] = ref_plane; s->m.linesize= - s->m.last_picture.linesize[0]= - s->m.new_picture.linesize[0]= - s->m.current_picture.linesize[0]= stride; + s->m.last_picture.f.linesize[0] = + s->m.new_picture.f.linesize[0] = + s->m.current_picture.f.linesize[0] = stride; s->m.width= width; s->m.height= height; s->m.mb_width= block_width; @@ -318,9 +318,9 @@ static int svq1_encode_plane(SVQ1Context *s, int plane, unsigned char *src_plane s->m.current_picture.mb_mean= (uint8_t *)s->dummy; s->m.current_picture.mb_var= (uint16_t*)s->dummy; s->m.current_picture.mc_mb_var= (uint16_t*)s->dummy; - s->m.current_picture.mb_type= s->dummy; + s->m.current_picture.f.mb_type = s->dummy; - s->m.current_picture.motion_val[0]= s->motion_val8[plane] + 2; + s->m.current_picture.f.motion_val[0] = s->motion_val8[plane] + 2; s->m.p_mv_table= s->motion_val16[plane] + s->m.mb_stride + 1; s->m.dsp= s->dsp; //move ff_init_me(&s->m); @@ -328,7 +328,7 @@ static int svq1_encode_plane(SVQ1Context *s, int plane, unsigned char *src_plane s->m.me.dia_size= s->avctx->dia_size; s->m.first_slice_line=1; for (y = 0; y < block_height; y++) { - s->m.new_picture.data[0]= src - y*16*stride; //ugly + s->m.new_picture.f.data[0] = src - y*16*stride; //ugly s->m.mb_y= y; for(i=0; i<16 && i + 16*y<height; i++){ @@ -573,13 +573,13 @@ static av_cold int svq1_encode_end(AVCodecContext *avctx) AVCodec ff_svq1_encoder = { - "svq1", - AVMEDIA_TYPE_VIDEO, - CODEC_ID_SVQ1, - sizeof(SVQ1Context), - svq1_encode_init, - svq1_encode_frame, - svq1_encode_end, + .name = "svq1", + .type = AVMEDIA_TYPE_VIDEO, + .id = CODEC_ID_SVQ1, + .priv_data_size = sizeof(SVQ1Context), + .init = svq1_encode_init, + .encode = svq1_encode_frame, + .close = svq1_encode_end, .pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUV410P, PIX_FMT_NONE}, .long_name= NULL_IF_CONFIG_SMALL("Sorenson Vector Quantizer 1 / Sorenson Video 1 / SVQ1"), }; diff --git a/libavcodec/svq3.c b/libavcodec/svq3.c index b1f3601ee7..417507fc7e 100644 --- a/libavcodec/svq3.c +++ b/libavcodec/svq3.c @@ -290,8 +290,8 @@ static inline void svq3_mc_dir_part(MpegEncContext *s, } /* form component predictions */ - dest = s->current_picture.data[0] + x + y*s->linesize; - src = pic->data[0] + mx + my*s->linesize; + dest = s->current_picture.f.data[0] + x + y*s->linesize; + src = pic->f.data[0] + mx + my*s->linesize; if (emu) { s->dsp.emulated_edge_mc(s->edge_emu_buffer, src, s->linesize, (width + 1), (height + 1), @@ -311,8 +311,8 @@ static inline void svq3_mc_dir_part(MpegEncContext *s, blocksize++; for (i = 1; i < 3; i++) { - dest = s->current_picture.data[i] + (x >> 1) + (y >> 1)*s->uvlinesize; - src = pic->data[i] + mx + my*s->uvlinesize; + dest = s->current_picture.f.data[i] + (x >> 1) + (y >> 1) * s->uvlinesize; + src = pic->f.data[i] + mx + my * s->uvlinesize; if (emu) { s->dsp.emulated_edge_mc(s->edge_emu_buffer, src, s->uvlinesize, (width + 1), (height + 1), @@ -349,8 +349,8 @@ static inline int svq3_mc_dir(H264Context *h, int size, int mode, int dir, if (mode != PREDICT_MODE) { pred_motion(h, k, (part_width >> 2), dir, 1, &mx, &my); } else { - mx = s->next_picture.motion_val[0][b_xy][0]<<1; - my = s->next_picture.motion_val[0][b_xy][1]<<1; + mx = s->next_picture.f.motion_val[0][b_xy][0] << 1; + my = s->next_picture.f.motion_val[0][b_xy][1] << 1; if (dir == 0) { mx = ((mx * h->frame_num_offset) / h->prev_frame_num_offset + 1) >> 1; @@ -427,7 +427,9 @@ static inline int svq3_mc_dir(H264Context *h, int size, int mode, int dir, } /* write back motion vectors */ - fill_rectangle(s->current_picture.motion_val[dir][b_xy], part_width>>2, part_height>>2, h->b_stride, pack16to32(mx,my), 4); + fill_rectangle(s->current_picture.f.motion_val[dir][b_xy], + part_width >> 2, part_height >> 2, h->b_stride, + pack16to32(mx, my), 4); } } @@ -450,7 +452,7 @@ static int svq3_decode_mb(SVQ3Context *svq3, unsigned int mb_type) h->topright_samples_available = 0xFFFF; if (mb_type == 0) { /* SKIP */ - if (s->pict_type == AV_PICTURE_TYPE_P || s->next_picture.mb_type[mb_xy] == -1) { + if (s->pict_type == AV_PICTURE_TYPE_P || s->next_picture.f.mb_type[mb_xy] == -1) { svq3_mc_dir_part(s, 16*s->mb_x, 16*s->mb_y, 16, 16, 0, 0, 0, 0, 0, 0); if (s->pict_type == AV_PICTURE_TYPE_B) { @@ -459,7 +461,7 @@ static int svq3_decode_mb(SVQ3Context *svq3, unsigned int mb_type) mb_type = MB_TYPE_SKIP; } else { - mb_type = FFMIN(s->next_picture.mb_type[mb_xy], 6); + mb_type = FFMIN(s->next_picture.f.mb_type[mb_xy], 6); if (svq3_mc_dir(h, mb_type, PREDICT_MODE, 0, 0) < 0) return -1; if (svq3_mc_dir(h, mb_type, PREDICT_MODE, 1, 1) < 0) @@ -488,7 +490,7 @@ static int svq3_decode_mb(SVQ3Context *svq3, unsigned int mb_type) for (m = 0; m < 2; m++) { if (s->mb_x > 0 && h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - 1]+6] != -1) { for (i = 0; i < 4; i++) { - *(uint32_t *) h->mv_cache[m][scan8[0] - 1 + i*8] = *(uint32_t *) s->current_picture.motion_val[m][b_xy - 1 + i*h->b_stride]; + *(uint32_t *) h->mv_cache[m][scan8[0] - 1 + i*8] = *(uint32_t *) s->current_picture.f.motion_val[m][b_xy - 1 + i*h->b_stride]; } } else { for (i = 0; i < 4; i++) { @@ -496,18 +498,18 @@ static int svq3_decode_mb(SVQ3Context *svq3, unsigned int mb_type) } } if (s->mb_y > 0) { - memcpy(h->mv_cache[m][scan8[0] - 1*8], s->current_picture.motion_val[m][b_xy - h->b_stride], 4*2*sizeof(int16_t)); + memcpy(h->mv_cache[m][scan8[0] - 1*8], s->current_picture.f.motion_val[m][b_xy - h->b_stride], 4*2*sizeof(int16_t)); memset(&h->ref_cache[m][scan8[0] - 1*8], (h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - s->mb_stride]] == -1) ? PART_NOT_AVAILABLE : 1, 4); if (s->mb_x < (s->mb_width - 1)) { - *(uint32_t *) h->mv_cache[m][scan8[0] + 4 - 1*8] = *(uint32_t *) s->current_picture.motion_val[m][b_xy - h->b_stride + 4]; + *(uint32_t *) h->mv_cache[m][scan8[0] + 4 - 1*8] = *(uint32_t *) s->current_picture.f.motion_val[m][b_xy - h->b_stride + 4]; h->ref_cache[m][scan8[0] + 4 - 1*8] = (h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - s->mb_stride + 1]+6] == -1 || h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - s->mb_stride ] ] == -1) ? PART_NOT_AVAILABLE : 1; }else h->ref_cache[m][scan8[0] + 4 - 1*8] = PART_NOT_AVAILABLE; if (s->mb_x > 0) { - *(uint32_t *) h->mv_cache[m][scan8[0] - 1 - 1*8] = *(uint32_t *) s->current_picture.motion_val[m][b_xy - h->b_stride - 1]; + *(uint32_t *) h->mv_cache[m][scan8[0] - 1 - 1*8] = *(uint32_t *) s->current_picture.f.motion_val[m][b_xy - h->b_stride - 1]; h->ref_cache[m][scan8[0] - 1 - 1*8] = (h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - s->mb_stride - 1]+3] == -1) ? PART_NOT_AVAILABLE : 1; }else h->ref_cache[m][scan8[0] - 1 - 1*8] = PART_NOT_AVAILABLE; @@ -528,7 +530,7 @@ static int svq3_decode_mb(SVQ3Context *svq3, unsigned int mb_type) return -1; } else { for (i = 0; i < 4; i++) { - memset(s->current_picture.motion_val[0][b_xy + i*h->b_stride], 0, 4*2*sizeof(int16_t)); + memset(s->current_picture.f.motion_val[0][b_xy + i*h->b_stride], 0, 4*2*sizeof(int16_t)); } } if (mb_type != 1) { @@ -536,7 +538,7 @@ static int svq3_decode_mb(SVQ3Context *svq3, unsigned int mb_type) return -1; } else { for (i = 0; i < 4; i++) { - memset(s->current_picture.motion_val[1][b_xy + i*h->b_stride], 0, 4*2*sizeof(int16_t)); + memset(s->current_picture.f.motion_val[1][b_xy + i*h->b_stride], 0, 4*2*sizeof(int16_t)); } } } @@ -591,7 +593,7 @@ static int svq3_decode_mb(SVQ3Context *svq3, unsigned int mb_type) } } - ff_h264_write_back_intra_pred_mode(h); + write_back_intra_pred_mode(h); if (mb_type == 8) { ff_h264_check_intra4x4_pred_mode(h); @@ -623,11 +625,11 @@ static int svq3_decode_mb(SVQ3Context *svq3, unsigned int mb_type) if (!IS_INTER(mb_type) && s->pict_type != AV_PICTURE_TYPE_I) { for (i = 0; i < 4; i++) { - memset(s->current_picture.motion_val[0][b_xy + i*h->b_stride], 0, 4*2*sizeof(int16_t)); + memset(s->current_picture.f.motion_val[0][b_xy + i*h->b_stride], 0, 4*2*sizeof(int16_t)); } if (s->pict_type == AV_PICTURE_TYPE_B) { for (i = 0; i < 4; i++) { - memset(s->current_picture.motion_val[1][b_xy + i*h->b_stride], 0, 4*2*sizeof(int16_t)); + memset(s->current_picture.f.motion_val[1][b_xy + i*h->b_stride], 0, 4*2*sizeof(int16_t)); } } } @@ -708,7 +710,7 @@ static int svq3_decode_mb(SVQ3Context *svq3, unsigned int mb_type) } h->cbp= cbp; - s->current_picture.mb_type[mb_xy] = mb_type; + s->current_picture.f.mb_type[mb_xy] = mb_type; if (IS_INTRA(mb_type)) { h->chroma_pred_mode = ff_h264_check_intra_pred_mode(h, DC_PRED8x8); @@ -980,8 +982,8 @@ static int svq3_decode_frame(AVCodecContext *avctx, } /* for skipping the frame */ - s->current_picture.pict_type = s->pict_type; - s->current_picture.key_frame = (s->pict_type == AV_PICTURE_TYPE_I); + s->current_picture.f.pict_type = s->pict_type; + s->current_picture.f.key_frame = (s->pict_type == AV_PICTURE_TYPE_I); /* Skip B-frames if we do not have reference frames. */ if (s->last_picture_ptr == NULL && s->pict_type == AV_PICTURE_TYPE_B) @@ -1073,7 +1075,7 @@ static int svq3_decode_frame(AVCodecContext *avctx, } if (s->pict_type != AV_PICTURE_TYPE_B && !s->low_delay) { - s->current_picture.mb_type[s->mb_x + s->mb_y*s->mb_stride] = + s->current_picture.f.mb_type[s->mb_x + s->mb_y * s->mb_stride] = (s->pict_type == AV_PICTURE_TYPE_P && mb_type < 8) ? (mb_type - 1) : -1; } } @@ -1126,15 +1128,14 @@ static int svq3_decode_end(AVCodecContext *avctx) } AVCodec ff_svq3_decoder = { - "svq3", - AVMEDIA_TYPE_VIDEO, - CODEC_ID_SVQ3, - sizeof(SVQ3Context), - svq3_decode_init, - NULL, - svq3_decode_end, - svq3_decode_frame, - CODEC_CAP_DRAW_HORIZ_BAND | CODEC_CAP_DR1 | CODEC_CAP_DELAY, + .name = "svq3", + .type = AVMEDIA_TYPE_VIDEO, + .id = CODEC_ID_SVQ3, + .priv_data_size = sizeof(SVQ3Context), + .init = svq3_decode_init, + .close = svq3_decode_end, + .decode = svq3_decode_frame, + .capabilities = CODEC_CAP_DRAW_HORIZ_BAND | CODEC_CAP_DR1 | CODEC_CAP_DELAY, .long_name = NULL_IF_CONFIG_SMALL("Sorenson Vector Quantizer 3 / Sorenson Video 3 / SVQ3"), .pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUVJ420P, PIX_FMT_NONE}, }; diff --git a/libavcodec/tableprint.h b/libavcodec/tableprint.h index d81b9a387b..e126a72afb 100644 --- a/libavcodec/tableprint.h +++ b/libavcodec/tableprint.h @@ -56,7 +56,7 @@ void write_##type##_2d_array(const void *arg, int len, int len2)\ } /** - * \defgroup printfuncs Predefined functions for printing tables + * @name Predefined functions for printing tables * * \{ */ diff --git a/libavcodec/targa.c b/libavcodec/targa.c index 3125504b7c..6c01b695b2 100644 --- a/libavcodec/targa.c +++ b/libavcodec/targa.c @@ -22,6 +22,7 @@ #include "libavutil/intreadwrite.h" #include "libavutil/imgutils.h" #include "avcodec.h" +#include "bytestream.h" #include "targa.h" typedef struct TargaContext { @@ -108,21 +109,26 @@ static int decode_frame(AVCodecContext *avctx, AVFrame * const p= (AVFrame*)&s->picture; uint8_t *dst; int stride; - int idlen, compr, y, w, h, bpp, flags; + int idlen, pal, compr, y, w, h, bpp, flags; int first_clr, colors, csize; /* parse image header */ CHECK_BUFFER_SIZE(buf, buf_end, 18, "header"); idlen = *buf++; - buf++; /* pal */ + pal = *buf++; compr = *buf++; - first_clr = AV_RL16(buf); buf += 2; - colors = AV_RL16(buf); buf += 2; + first_clr = bytestream_get_le16(&buf); + colors = bytestream_get_le16(&buf); csize = *buf++; + if (!pal && (first_clr || colors || csize)) { + av_log(avctx, AV_LOG_WARNING, "File without colormap has colormap information set.\n"); + // specification says we should ignore those value in this case + first_clr = colors = csize = 0; + } buf += 2; /* x */ - y = AV_RL16(buf); buf += 2; - w = AV_RL16(buf); buf += 2; - h = AV_RL16(buf); buf += 2; + y = bytestream_get_le16(&buf); + w = bytestream_get_le16(&buf); + h = bytestream_get_le16(&buf); bpp = *buf++; flags = *buf++; //skip identifier if any @@ -193,13 +199,10 @@ static int decode_frame(AVCodecContext *avctx, if(avctx->pix_fmt != PIX_FMT_PAL8)//should not occur but skip palette anyway buf += pal_size; else{ - int r, g, b, t; + int t; int32_t *pal = ((int32_t*)p->data[1]) + first_clr; for(t = 0; t < colors; t++){ - b = *buf++; - g = *buf++; - r = *buf++; - *pal++ = (0xff<<24) | (r << 16) | (g << 8) | b; + *pal++ = (0xff<<24) | bytestream_get_le24(&buf); } p->palette_has_changed = 1; } @@ -261,15 +264,13 @@ static av_cold int targa_end(AVCodecContext *avctx){ } AVCodec ff_targa_decoder = { - "targa", - AVMEDIA_TYPE_VIDEO, - CODEC_ID_TARGA, - sizeof(TargaContext), - targa_init, - NULL, - targa_end, - decode_frame, - CODEC_CAP_DR1, - NULL, + .name = "targa", + .type = AVMEDIA_TYPE_VIDEO, + .id = CODEC_ID_TARGA, + .priv_data_size = sizeof(TargaContext), + .init = targa_init, + .close = targa_end, + .decode = decode_frame, + .capabilities = CODEC_CAP_DR1, .long_name = NULL_IF_CONFIG_SMALL("Truevision Targa image"), }; diff --git a/libavcodec/tiertexseqv.c b/libavcodec/tiertexseqv.c index f3a044882e..8f299be666 100644 --- a/libavcodec/tiertexseqv.c +++ b/libavcodec/tiertexseqv.c @@ -221,14 +221,13 @@ static av_cold int seqvideo_decode_end(AVCodecContext *avctx) } AVCodec ff_tiertexseqvideo_decoder = { - "tiertexseqvideo", - AVMEDIA_TYPE_VIDEO, - CODEC_ID_TIERTEXSEQVIDEO, - sizeof(SeqVideoContext), - seqvideo_decode_init, - NULL, - seqvideo_decode_end, - seqvideo_decode_frame, - CODEC_CAP_DR1, + .name = "tiertexseqvideo", + .type = AVMEDIA_TYPE_VIDEO, + .id = CODEC_ID_TIERTEXSEQVIDEO, + .priv_data_size = sizeof(SeqVideoContext), + .init = seqvideo_decode_init, + .close = seqvideo_decode_end, + .decode = seqvideo_decode_frame, + .capabilities = CODEC_CAP_DR1, .long_name = NULL_IF_CONFIG_SMALL("Tiertex Limited SEQ video"), }; diff --git a/libavcodec/tiff.c b/libavcodec/tiff.c index c54eaee346..a42b27ffb1 100644 --- a/libavcodec/tiff.c +++ b/libavcodec/tiff.c @@ -21,7 +21,9 @@ /** * @file * TIFF image decoder + * @author Konstantin Shishkov */ + #include "avcodec.h" #if CONFIG_ZLIB #include <zlib.h> @@ -619,15 +621,13 @@ static av_cold int tiff_end(AVCodecContext *avctx) } AVCodec ff_tiff_decoder = { - "tiff", - AVMEDIA_TYPE_VIDEO, - CODEC_ID_TIFF, - sizeof(TiffContext), - tiff_init, - NULL, - tiff_end, - decode_frame, - CODEC_CAP_DR1, - NULL, + .name = "tiff", + .type = AVMEDIA_TYPE_VIDEO, + .id = CODEC_ID_TIFF, + .priv_data_size = sizeof(TiffContext), + .init = tiff_init, + .close = tiff_end, + .decode = decode_frame, + .capabilities = CODEC_CAP_DR1, .long_name = NULL_IF_CONFIG_SMALL("TIFF image"), }; diff --git a/libavcodec/tiff.h b/libavcodec/tiff.h index e1aca68e35..d5fad42771 100644 --- a/libavcodec/tiff.h +++ b/libavcodec/tiff.h @@ -18,17 +18,18 @@ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ -#ifndef AVCODEC_TIFF_H -#define AVCODEC_TIFF_H - /** * @file * TIFF tables * * For more information about the TIFF format, check the official docs at: * http://partners.adobe.com/public/developer/tiff/index.html + * @author Konstantin Shishkov */ +#ifndef AVCODEC_TIFF_H +#define AVCODEC_TIFF_H + #include <stdint.h> /** abridged list of TIFF tags */ diff --git a/libavcodec/tiffenc.c b/libavcodec/tiffenc.c index f7228f128f..d635e17aad 100644 --- a/libavcodec/tiffenc.c +++ b/libavcodec/tiffenc.c @@ -20,10 +20,11 @@ */ /** - * TIFF image encoder * @file + * TIFF image encoder * @author Bartlomiej Wolowiec */ + #include "avcodec.h" #if CONFIG_ZLIB #include <zlib.h> @@ -441,16 +442,11 @@ fail: } AVCodec ff_tiff_encoder = { - "tiff", - AVMEDIA_TYPE_VIDEO, - CODEC_ID_TIFF, - sizeof(TiffEncoderContext), - NULL, - encode_frame, - NULL, - NULL, - 0, - NULL, + .name = "tiff", + .type = AVMEDIA_TYPE_VIDEO, + .id = CODEC_ID_TIFF, + .priv_data_size = sizeof(TiffEncoderContext), + .encode = encode_frame, .pix_fmts = (const enum PixelFormat[]) {PIX_FMT_RGB24, PIX_FMT_PAL8, PIX_FMT_GRAY8, PIX_FMT_MONOBLACK, PIX_FMT_MONOWHITE, diff --git a/libavcodec/tmv.c b/libavcodec/tmv.c index b1083fe682..424cddf84d 100644 --- a/libavcodec/tmv.c +++ b/libavcodec/tmv.c @@ -20,10 +20,10 @@ */ /** - * 8088flex TMV video decoder * @file + * 8088flex TMV video decoder * @author Daniel Verkamp - * @sa http://www.oldskool.org/pc/8088_Corruption + * @see http://www.oldskool.org/pc/8088_Corruption */ #include "avcodec.h" diff --git a/libavcodec/truemotion1.c b/libavcodec/truemotion1.c index 284dbd8e12..9c2a273f1c 100644 --- a/libavcodec/truemotion1.c +++ b/libavcodec/truemotion1.c @@ -893,14 +893,13 @@ static av_cold int truemotion1_decode_end(AVCodecContext *avctx) } AVCodec ff_truemotion1_decoder = { - "truemotion1", - AVMEDIA_TYPE_VIDEO, - CODEC_ID_TRUEMOTION1, - sizeof(TrueMotion1Context), - truemotion1_decode_init, - NULL, - truemotion1_decode_end, - truemotion1_decode_frame, - CODEC_CAP_DR1, + .name = "truemotion1", + .type = AVMEDIA_TYPE_VIDEO, + .id = CODEC_ID_TRUEMOTION1, + .priv_data_size = sizeof(TrueMotion1Context), + .init = truemotion1_decode_init, + .close = truemotion1_decode_end, + .decode = truemotion1_decode_frame, + .capabilities = CODEC_CAP_DR1, .long_name = NULL_IF_CONFIG_SMALL("Duck TrueMotion 1.0"), }; diff --git a/libavcodec/truemotion2.c b/libavcodec/truemotion2.c index 2b9a0cba72..4967e29d93 100644 --- a/libavcodec/truemotion2.c +++ b/libavcodec/truemotion2.c @@ -865,14 +865,13 @@ static av_cold int decode_end(AVCodecContext *avctx){ } AVCodec ff_truemotion2_decoder = { - "truemotion2", - AVMEDIA_TYPE_VIDEO, - CODEC_ID_TRUEMOTION2, - sizeof(TM2Context), - decode_init, - NULL, - decode_end, - decode_frame, - CODEC_CAP_DR1, + .name = "truemotion2", + .type = AVMEDIA_TYPE_VIDEO, + .id = CODEC_ID_TRUEMOTION2, + .priv_data_size = sizeof(TM2Context), + .init = decode_init, + .close = decode_end, + .decode = decode_frame, + .capabilities = CODEC_CAP_DR1, .long_name = NULL_IF_CONFIG_SMALL("Duck TrueMotion 2.0"), }; diff --git a/libavcodec/truespeech.c b/libavcodec/truespeech.c index d903a0119e..3aa3444568 100644 --- a/libavcodec/truespeech.c +++ b/libavcodec/truespeech.c @@ -382,13 +382,11 @@ static int truespeech_decode_frame(AVCodecContext *avctx, } AVCodec ff_truespeech_decoder = { - "truespeech", - AVMEDIA_TYPE_AUDIO, - CODEC_ID_TRUESPEECH, - sizeof(TSContext), - truespeech_decode_init, - NULL, - NULL, - truespeech_decode_frame, + .name = "truespeech", + .type = AVMEDIA_TYPE_AUDIO, + .id = CODEC_ID_TRUESPEECH, + .priv_data_size = sizeof(TSContext), + .init = truespeech_decode_init, + .decode = truespeech_decode_frame, .long_name = NULL_IF_CONFIG_SMALL("DSP Group TrueSpeech"), }; diff --git a/libavcodec/tscc.c b/libavcodec/tscc.c index b12a608555..31b691dd0e 100644 --- a/libavcodec/tscc.c +++ b/libavcodec/tscc.c @@ -196,15 +196,14 @@ static av_cold int decode_end(AVCodecContext *avctx) } AVCodec ff_tscc_decoder = { - "camtasia", - AVMEDIA_TYPE_VIDEO, - CODEC_ID_TSCC, - sizeof(CamtasiaContext), - decode_init, - NULL, - decode_end, - decode_frame, - CODEC_CAP_DR1, - .long_name = NULL_IF_CONFIG_SMALL("TechSmith Screen Capture Codec"), + .name = "camtasia", + .type = AVMEDIA_TYPE_VIDEO, + .id = CODEC_ID_TSCC, + .priv_data_size = sizeof(CamtasiaContext), + .init = decode_init, + .close = decode_end, + .decode = decode_frame, + .capabilities = CODEC_CAP_DR1, + .long_name = NULL_IF_CONFIG_SMALL("TechSmith Screen Capture Codec"), }; diff --git a/libavcodec/tta.c b/libavcodec/tta.c index dccca46132..1ce1946333 100644 --- a/libavcodec/tta.c +++ b/libavcodec/tta.c @@ -22,9 +22,9 @@ /** * @file * TTA (The Lossless True Audio) decoder - * (www.true-audio.com or tta.corecodec.org) + * @see http://www.true-audio.com/ + * @see http://tta.corecodec.org/ * @author Alex Beregszaszi - * */ #define ALT_BITSTREAM_READER_LE @@ -66,23 +66,6 @@ typedef struct TTAContext { TTAChannel *ch_ctx; } TTAContext; -#if 0 -static inline int shift_1(int i) -{ - if (i < 32) - return 1 << i; - else - return 0x80000000; // 16 << 31 -} - -static inline int shift_16(int i) -{ - if (i < 28) - return 16 << i; - else - return 0x80000000; // 16 << 27 -} -#else static const uint32_t shift_1[] = { 0x00000001, 0x00000002, 0x00000004, 0x00000008, 0x00000010, 0x00000020, 0x00000040, 0x00000080, @@ -97,7 +80,6 @@ static const uint32_t shift_1[] = { }; static const uint32_t * const shift_16 = shift_1 + 4; -#endif static const int32_t ttafilter_configs[4][2] = { {10, 1}, @@ -403,19 +385,6 @@ static int tta_decode_frame(AVCodecContext *avctx, } *predictor = *p; -#if 0 - // extract 32bit float from last two int samples - if (s->is_float && ((p - data) & 1)) { - uint32_t neg = *p & 0x80000000; - uint32_t hi = *(p - 1); - uint32_t lo = abs(*p) - 1; - - hi += (hi || lo) ? 0x3f80 : 0; - // SWAP16: swap all the 16 bits - *(p - 1) = (hi << 16) | SWAP16(lo) | neg; - } -#endif - /*if ((get_bits_count(&s->gb)+7)/8 > buf_size) { av_log(NULL, AV_LOG_INFO, "overread!!\n"); @@ -485,13 +454,12 @@ static av_cold int tta_decode_close(AVCodecContext *avctx) { } AVCodec ff_tta_decoder = { - "tta", - AVMEDIA_TYPE_AUDIO, - CODEC_ID_TTA, - sizeof(TTAContext), - tta_decode_init, - NULL, - tta_decode_close, - tta_decode_frame, + .name = "tta", + .type = AVMEDIA_TYPE_AUDIO, + .id = CODEC_ID_TTA, + .priv_data_size = sizeof(TTAContext), + .init = tta_decode_init, + .close = tta_decode_close, + .decode = tta_decode_frame, .long_name = NULL_IF_CONFIG_SMALL("True Audio (TTA)"), }; diff --git a/libavcodec/twinvq.c b/libavcodec/twinvq.c index f8e75bb933..e7aceebd5b 100644 --- a/libavcodec/twinvq.c +++ b/libavcodec/twinvq.c @@ -411,7 +411,7 @@ static inline float mulawinv(float y, float clip, float mu) * a*b == 200 and the nearest integer is ill-defined, use a table to emulate * the following broken float-based implementation used by the binary decoder: * - * \code + * @code * static int very_broken_op(int a, int b) * { * static float test; // Ugh, force gcc to do the division first... @@ -419,7 +419,7 @@ static inline float mulawinv(float y, float clip, float mu) * test = a/400.; * return b * test + 0.5; * } - * \endcode + * @endcode * * @note if this function is replaced by just ROUNDED_DIV(a*b,400.), the stddev * between the original file (before encoding with Yamaha encoder) and the @@ -938,14 +938,14 @@ static void permutate_in_line(int16_t *tab, int num_vect, int num_blocks, /** * Interpret the input data as in the following table: * - * \verbatim + * @verbatim * * abcdefgh * ijklmnop * qrstuvw * x123456 * - * \endverbatim + * @endverbatim * * and transpose it, giving the output * aiqxbjr1cks2dlt3emu4fvn5gow6hp diff --git a/libavcodec/txd.c b/libavcodec/txd.c index 0e25458c86..4299636f7b 100644 --- a/libavcodec/txd.c +++ b/libavcodec/txd.c @@ -156,15 +156,13 @@ static av_cold int txd_end(AVCodecContext *avctx) { } AVCodec ff_txd_decoder = { - "txd", - AVMEDIA_TYPE_VIDEO, - CODEC_ID_TXD, - sizeof(TXDContext), - txd_init, - NULL, - txd_end, - txd_decode_frame, - CODEC_CAP_DR1, - NULL, + .name = "txd", + .type = AVMEDIA_TYPE_VIDEO, + .id = CODEC_ID_TXD, + .priv_data_size = sizeof(TXDContext), + .init = txd_init, + .close = txd_end, + .decode = txd_decode_frame, + .capabilities = CODEC_CAP_DR1, .long_name = NULL_IF_CONFIG_SMALL("Renderware TXD (TeXture Dictionary) image"), }; diff --git a/libavcodec/ulti.c b/libavcodec/ulti.c index 9033cee98f..6d41cd9d06 100644 --- a/libavcodec/ulti.c +++ b/libavcodec/ulti.c @@ -403,16 +403,14 @@ static int ulti_decode_frame(AVCodecContext *avctx, } AVCodec ff_ulti_decoder = { - "ultimotion", - AVMEDIA_TYPE_VIDEO, - CODEC_ID_ULTI, - sizeof(UltimotionDecodeContext), - ulti_decode_init, - NULL, - ulti_decode_end, - ulti_decode_frame, - CODEC_CAP_DR1, - NULL, + .name = "ultimotion", + .type = AVMEDIA_TYPE_VIDEO, + .id = CODEC_ID_ULTI, + .priv_data_size = sizeof(UltimotionDecodeContext), + .init = ulti_decode_init, + .close = ulti_decode_end, + .decode = ulti_decode_frame, + .capabilities = CODEC_CAP_DR1, .long_name = NULL_IF_CONFIG_SMALL("IBM UltiMotion"), }; diff --git a/libavcodec/utils.c b/libavcodec/utils.c index 028a3a1532..50a3eaa637 100644 --- a/libavcodec/utils.c +++ b/libavcodec/utils.c @@ -26,12 +26,13 @@ */ #include "libavutil/avstring.h" -#include "libavutil/integer.h" #include "libavutil/crc.h" +#include "libavutil/mathematics.h" #include "libavutil/pixdesc.h" #include "libavutil/audioconvert.h" #include "libavutil/imgutils.h" #include "libavutil/samplefmt.h" +#include "libavutil/dict.h" #include "avcodec.h" #include "dsputil.h" #include "libavutil/opt.h" @@ -149,6 +150,10 @@ void avcodec_align_dimensions2(AVCodecContext *s, int *width, int *height, int l case PIX_FMT_YUV420P10BE: case PIX_FMT_YUV422P10LE: case PIX_FMT_YUV422P10BE: + case PIX_FMT_YUV444P9LE: + case PIX_FMT_YUV444P9BE: + case PIX_FMT_YUV444P10LE: + case PIX_FMT_YUV444P10BE: w_align= 16; //FIXME check for non mpeg style codecs and use less alignment h_align= 16; if(s->codec_id == CODEC_ID_MPEG2VIDEO || s->codec_id == CODEC_ID_MJPEG || s->codec_id == CODEC_ID_AMV || s->codec_id == CODEC_ID_THP || s->codec_id == CODEC_ID_H264) @@ -494,9 +499,20 @@ static void avcodec_get_subtitle_defaults(AVSubtitle *sub) sub->pts = AV_NOPTS_VALUE; } +#if FF_API_AVCODEC_OPEN int attribute_align_arg avcodec_open(AVCodecContext *avctx, AVCodec *codec) { + return avcodec_open2(avctx, codec, NULL); +} +#endif + +int attribute_align_arg avcodec_open2(AVCodecContext *avctx, AVCodec *codec, AVDictionary **options) +{ int ret = 0; + AVDictionary *tmp = NULL; + + if (options) + av_dict_copy(&tmp, *options, 0); /* If there is a user-supplied mutex locking routine, call it. */ if (ff_lockmgr_cb) { @@ -523,14 +539,18 @@ int attribute_align_arg avcodec_open(AVCodecContext *avctx, AVCodec *codec) ret = AVERROR(ENOMEM); goto end; } - if(codec->priv_class){ //this can be droped once all user apps use avcodec_get_context_defaults3() + if (codec->priv_class) { *(AVClass**)avctx->priv_data= codec->priv_class; av_opt_set_defaults(avctx->priv_data); } } + if (codec->priv_class && (ret = av_opt_set_dict(avctx->priv_data, &tmp) < 0)) + goto free_and_end; } else { avctx->priv_data = NULL; } + if ((ret = av_opt_set_dict(avctx, &tmp)) < 0) + goto free_and_end; if(avctx->coded_width && avctx->coded_height) avcodec_set_dimensions(avctx, avctx->coded_width, avctx->coded_height); @@ -649,8 +669,14 @@ end: if (ff_lockmgr_cb) { (*ff_lockmgr_cb)(&codec_mutex, AV_LOCK_RELEASE); } + if (options) { + av_dict_free(options); + *options = tmp; + } + return ret; free_and_end: + av_dict_free(&tmp); av_freep(&avctx->priv_data); avctx->codec= NULL; goto end; @@ -1083,7 +1109,7 @@ void avcodec_string(char *buf, int buf_size, AVCodecContext *enc, int encode) enc->height*enc->sample_aspect_ratio.den, 1024*1024); snprintf(buf + strlen(buf), buf_size - strlen(buf), - " [PAR %d:%d DAR %d:%d]", + " [SAR %d:%d DAR %d:%d]", enc->sample_aspect_ratio.num, enc->sample_aspect_ratio.den, display_aspect_ratio.num, display_aspect_ratio.den); } @@ -1189,7 +1215,7 @@ void avcodec_flush_buffers(AVCodecContext *avctx) { if(HAVE_PTHREADS && avctx->active_thread_type&FF_THREAD_FRAME) ff_thread_flush(avctx); - if(avctx->codec->flush) + else if(avctx->codec->flush) avctx->codec->flush(avctx); } diff --git a/libavcodec/v210dec.c b/libavcodec/v210dec.c index 94c5b5bb26..ecd88be22b 100644 --- a/libavcodec/v210dec.c +++ b/libavcodec/v210dec.c @@ -121,14 +121,12 @@ static av_cold int decode_close(AVCodecContext *avctx) } AVCodec ff_v210_decoder = { - "v210", - AVMEDIA_TYPE_VIDEO, - CODEC_ID_V210, - 0, - decode_init, - NULL, - decode_close, - decode_frame, - CODEC_CAP_DR1, + .name = "v210", + .type = AVMEDIA_TYPE_VIDEO, + .id = CODEC_ID_V210, + .init = decode_init, + .close = decode_close, + .decode = decode_frame, + .capabilities = CODEC_CAP_DR1, .long_name = NULL_IF_CONFIG_SMALL("Uncompressed 4:2:2 10-bit"), }; diff --git a/libavcodec/v210enc.c b/libavcodec/v210enc.c index fd47d7a5e7..1991c8ccfe 100644 --- a/libavcodec/v210enc.c +++ b/libavcodec/v210enc.c @@ -118,13 +118,12 @@ static av_cold int encode_close(AVCodecContext *avctx) } AVCodec ff_v210_encoder = { - "v210", - AVMEDIA_TYPE_VIDEO, - CODEC_ID_V210, - 0, - encode_init, - encode_frame, - encode_close, + .name = "v210", + .type = AVMEDIA_TYPE_VIDEO, + .id = CODEC_ID_V210, + .init = encode_init, + .encode = encode_frame, + .close = encode_close, .pix_fmts = (const enum PixelFormat[]){PIX_FMT_YUV422P10, PIX_FMT_NONE}, .long_name = NULL_IF_CONFIG_SMALL("Uncompressed 4:2:2 10-bit"), }; diff --git a/libavcodec/v210x.c b/libavcodec/v210x.c index 64954cb6bb..ec74a3384f 100644 --- a/libavcodec/v210x.c +++ b/libavcodec/v210x.c @@ -133,14 +133,12 @@ static av_cold int decode_close(AVCodecContext *avctx) } AVCodec ff_v210x_decoder = { - "v210x", - AVMEDIA_TYPE_VIDEO, - CODEC_ID_V210X, - 0, - decode_init, - NULL, - decode_close, - decode_frame, - CODEC_CAP_DR1, + .name = "v210x", + .type = AVMEDIA_TYPE_VIDEO, + .id = CODEC_ID_V210X, + .init = decode_init, + .close = decode_close, + .decode = decode_frame, + .capabilities = CODEC_CAP_DR1, .long_name = NULL_IF_CONFIG_SMALL("Uncompressed 4:2:2 10-bit"), }; diff --git a/libavcodec/vaapi.c b/libavcodec/vaapi.c index de028a0a7e..774fde840f 100644 --- a/libavcodec/vaapi.c +++ b/libavcodec/vaapi.c @@ -24,7 +24,7 @@ #include "vaapi_internal.h" /** - * \addtogroup VAAPI_Decoding + * @addtogroup VAAPI_Decoding * * @{ */ diff --git a/libavcodec/vaapi.h b/libavcodec/vaapi.h index 07568a47fc..4c3bb9bb52 100644 --- a/libavcodec/vaapi.h +++ b/libavcodec/vaapi.h @@ -27,8 +27,8 @@ #include <stdint.h> /** - * \defgroup VAAPI_Decoding VA API Decoding - * \ingroup Decoder + * @defgroup VAAPI_Decoding VA API Decoding + * @ingroup Decoder * @{ */ diff --git a/libavcodec/vaapi_h264.c b/libavcodec/vaapi_h264.c index 922dc86963..718e180942 100644 --- a/libavcodec/vaapi_h264.c +++ b/libavcodec/vaapi_h264.c @@ -23,9 +23,10 @@ #include "vaapi_internal.h" #include "h264.h" -/** @file - * This file implements the glue code between FFmpeg's and VA API's - * structures for H.264 decoding. +/** + * @file + * This file implements the glue code between FFmpeg's and VA API's + * structures for H.264 decoding. */ /** @@ -54,7 +55,7 @@ static void fill_vaapi_pic(VAPictureH264 *va_pic, int pic_structure) { if (pic_structure == 0) - pic_structure = pic->reference; + pic_structure = pic->f.reference; pic_structure &= PICT_FRAME; /* PICT_TOP_FIELD|PICT_BOTTOM_FIELD */ va_pic->picture_id = ff_vaapi_get_surface_id(pic); @@ -63,7 +64,7 @@ static void fill_vaapi_pic(VAPictureH264 *va_pic, va_pic->flags = 0; if (pic_structure != PICT_FRAME) va_pic->flags |= (pic_structure & PICT_TOP_FIELD) ? VA_PICTURE_H264_TOP_FIELD : VA_PICTURE_H264_BOTTOM_FIELD; - if (pic->reference) + if (pic->f.reference) va_pic->flags |= pic->long_ref ? VA_PICTURE_H264_LONG_TERM_REFERENCE : VA_PICTURE_H264_SHORT_TERM_REFERENCE; va_pic->TopFieldOrderCnt = 0; @@ -133,13 +134,13 @@ static int fill_vaapi_ReferenceFrames(VAPictureParameterBufferH264 *pic_param, for (i = 0; i < h->short_ref_count; i++) { Picture * const pic = h->short_ref[i]; - if (pic && pic->reference && dpb_add(&dpb, pic) < 0) + if (pic && pic->f.reference && dpb_add(&dpb, pic) < 0) return -1; } for (i = 0; i < 16; i++) { Picture * const pic = h->long_ref[i]; - if (pic && pic->reference && dpb_add(&dpb, pic) < 0) + if (pic && pic->f.reference && dpb_add(&dpb, pic) < 0) return -1; } return 0; @@ -159,7 +160,7 @@ static void fill_vaapi_RefPicList(VAPictureH264 RefPicList[32], { unsigned int i, n = 0; for (i = 0; i < ref_count; i++) - if (ref_list[i].reference) + if (ref_list[i].f.reference) fill_vaapi_pic(&RefPicList[n++], &ref_list[i], 0); for (; n < 32; n++) diff --git a/libavcodec/vaapi_internal.h b/libavcodec/vaapi_internal.h index 2c0fdf945e..e514dd6f44 100644 --- a/libavcodec/vaapi_internal.h +++ b/libavcodec/vaapi_internal.h @@ -30,7 +30,7 @@ #include "mpegvideo.h" /** - * \addtogroup VAAPI_Decoding + * @addtogroup VAAPI_Decoding * * @{ */ @@ -38,7 +38,7 @@ /** Extract VASurfaceID from a Picture */ static inline VASurfaceID ff_vaapi_get_surface_id(Picture *pic) { - return (uintptr_t)pic->data[3]; + return (uintptr_t)pic->f.data[3]; } /** Common AVHWAccel.end_frame() implementation */ diff --git a/libavcodec/vaapi_mpeg2.c b/libavcodec/vaapi_mpeg2.c index 6c92a0ff72..dbe1d1296c 100644 --- a/libavcodec/vaapi_mpeg2.c +++ b/libavcodec/vaapi_mpeg2.c @@ -109,14 +109,14 @@ static int vaapi_mpeg2_decode_slice(AVCodecContext *avctx, const uint8_t *buffer MpegEncContext * const s = avctx->priv_data; VASliceParameterBufferMPEG2 *slice_param; GetBitContext gb; - uint32_t start_code av_unused, quantiser_scale_code, intra_slice_flag, macroblock_offset; + uint32_t quantiser_scale_code, intra_slice_flag, macroblock_offset; av_dlog(avctx, "vaapi_mpeg2_decode_slice(): buffer %p, size %d\n", buffer, size); /* Determine macroblock_offset */ init_get_bits(&gb, buffer, 8 * size); - start_code = get_bits(&gb, 32); - assert((start_code & 0xffffff00) == 0x00000100); + if (get_bits_long(&gb, 32) >> 8 != 1) /* start code */ + return AVERROR_INVALIDDATA; quantiser_scale_code = get_bits(&gb, 5); intra_slice_flag = get_bits1(&gb); if (intra_slice_flag) { diff --git a/libavcodec/vaapi_mpeg4.c b/libavcodec/vaapi_mpeg4.c index cff77740c2..f6e26d4130 100644 --- a/libavcodec/vaapi_mpeg4.c +++ b/libavcodec/vaapi_mpeg4.c @@ -79,7 +79,7 @@ static int vaapi_mpeg4_start_frame(AVCodecContext *avctx, av_unused const uint8_ pic_param->quant_precision = s->quant_precision; pic_param->vop_fields.value = 0; /* reset all bits */ pic_param->vop_fields.bits.vop_coding_type = s->pict_type - AV_PICTURE_TYPE_I; - pic_param->vop_fields.bits.backward_reference_vop_coding_type = s->pict_type == AV_PICTURE_TYPE_B ? s->next_picture.pict_type - AV_PICTURE_TYPE_I : 0; + pic_param->vop_fields.bits.backward_reference_vop_coding_type = s->pict_type == AV_PICTURE_TYPE_B ? s->next_picture.f.pict_type - AV_PICTURE_TYPE_I : 0; pic_param->vop_fields.bits.vop_rounding_type = s->no_rounding; pic_param->vop_fields.bits.intra_dc_vlc_thr = mpeg4_get_intra_dc_vlc_thr(s); pic_param->vop_fields.bits.top_field_first = s->top_field_first; diff --git a/libavcodec/vb.c b/libavcodec/vb.c index 3fb59cf377..622ea89790 100644 --- a/libavcodec/vb.c +++ b/libavcodec/vb.c @@ -289,14 +289,13 @@ static av_cold int decode_end(AVCodecContext *avctx) } AVCodec ff_vb_decoder = { - "vb", - AVMEDIA_TYPE_VIDEO, - CODEC_ID_VB, - sizeof(VBDecContext), - decode_init, - NULL, - decode_end, - decode_frame, + .name = "vb", + .type = AVMEDIA_TYPE_VIDEO, + .id = CODEC_ID_VB, + .priv_data_size = sizeof(VBDecContext), + .init = decode_init, + .close = decode_end, + .decode = decode_frame, .long_name = NULL_IF_CONFIG_SMALL("Beam Software VB"), }; diff --git a/libavcodec/vc1.c b/libavcodec/vc1.c index e062a35cc1..9504238b86 100644 --- a/libavcodec/vc1.c +++ b/libavcodec/vc1.c @@ -40,7 +40,7 @@ /***********************************************************************/ /** - * @defgroup vc1bitplane VC-1 Bitplane decoding + * @name VC-1 Bitplane decoding * @see 8.7, p56 * @{ */ @@ -337,11 +337,11 @@ int vc1_decode_sequence_header(AVCodecContext *avctx, VC1Context *v, GetBitConte v->res_fasttx = get_bits1(gb); if (!v->res_fasttx) { - v->vc1dsp.vc1_inv_trans_8x8 = ff_simple_idct; + v->vc1dsp.vc1_inv_trans_8x8 = ff_simple_idct_8; v->vc1dsp.vc1_inv_trans_8x4 = ff_simple_idct84_add; v->vc1dsp.vc1_inv_trans_4x8 = ff_simple_idct48_add; v->vc1dsp.vc1_inv_trans_4x4 = ff_simple_idct44_add; - v->vc1dsp.vc1_inv_trans_8x8_dc = ff_simple_idct_add; + v->vc1dsp.vc1_inv_trans_8x8_dc = ff_simple_idct_add_8; v->vc1dsp.vc1_inv_trans_8x4_dc = ff_simple_idct84_add; v->vc1dsp.vc1_inv_trans_4x8_dc = ff_simple_idct48_add; v->vc1dsp.vc1_inv_trans_4x4_dc = ff_simple_idct44_add; diff --git a/libavcodec/vc1data.c b/libavcodec/vc1data.c index 5298079448..2fef110930 100644 --- a/libavcodec/vc1data.c +++ b/libavcodec/vc1data.c @@ -187,75 +187,6 @@ const uint8_t ff_vc1_norm6_bits[64] = { 4, 8, 8, 10, 8, 10, 10, 13, 8, 10, 10, 13, 10, 13, 13, 9, 8, 10, 10, 13, 10, 13, 13, 9, 10, 13, 13, 9, 13, 9, 9, 6, }; -#if 0 -/* Normal-6 imode */ -const uint8_t ff_vc1_norm6_spec[64][5] = { -{ 0, 1, 1 }, -{ 1, 2, 4 }, -{ 2, 3, 4 }, -{ 3, 0, 8 }, -{ 4, 4, 4 }, -{ 5, 1, 8 }, -{ 6, 2, 8 }, -{ 7, 2, 5, 7, 5 }, -{ 8, 5, 4 }, -{ 9, 3, 8 }, -{10, 4, 8 }, -{11, 2, 5, 11, 5 }, -{12, 5, 8 }, -{13, 2, 5, 13, 5 }, -{14, 2, 5, 14, 5 }, -{15, 3, 5, 14, 8 }, -{16, 6, 4 }, -{17, 6, 8 }, -{18, 7, 8 }, -{19, 2, 5, 19, 5 }, -{20, 8, 8 }, -{21, 2, 5, 21, 5 }, -{22, 2, 5, 22, 5 }, -{23, 3, 5, 13, 8 }, -{24, 9, 8 }, -{25, 2, 5, 25, 5 }, -{26, 2, 5, 26, 5 }, -{27, 3, 5, 12, 8 }, -{28, 2, 5, 28, 5 }, -{29, 3, 5, 11, 8 }, -{30, 3, 5, 10, 8 }, -{31, 3, 5, 7, 4 }, -{32, 7, 4 }, -{33, 10, 8 }, -{34, 11, 8 }, -{35, 2, 5, 3, 5 }, -{36, 12, 8 }, -{37, 2, 5, 5, 5 }, -{38, 2, 5, 6, 5 }, -{39, 3, 5, 9, 8 }, -{40, 13, 8 }, -{41, 2, 5, 9, 5 }, -{42, 2, 5, 10, 5 }, -{43, 3, 5, 8, 8 }, -{44, 2, 5, 12, 5 }, -{45, 3, 5, 7, 8 }, -{46, 3, 5, 6, 8 }, -{47, 3, 5, 6, 4 }, -{48, 14, 8 }, -{49, 2, 5, 17, 5 }, -{50, 2, 5, 18, 5 }, -{51, 3, 5, 5, 8 }, -{52, 2, 5, 20, 5 }, -{53, 3, 5, 4, 8 }, -{54, 3, 5, 3, 8 }, -{55, 3, 5, 5, 4 }, -{56, 2, 5, 24, 5 }, -{57, 3, 5, 2, 8 }, -{58, 3, 5, 1, 8 }, -{59, 3, 5, 4, 4 }, -{60, 3, 5, 0, 8 }, -{61, 3, 5, 3, 4 }, -{62, 3, 5, 2, 4 }, -{63, 3, 5, 1, 1 }, -}; -#endif /* 4MV Block pattern VLC tables */ const uint8_t ff_vc1_4mv_block_pattern_codes[4][16] = { @@ -276,7 +207,7 @@ const uint8_t wmv3_dc_scale_table[32]={ }; /* P-Picture CBPCY VLC tables */ -#if 1 // Looks like original tables are not conforming to standard at all. Are they used for old WMV? +// Looks like original tables are not conforming to standard at all. Are they used for old WMV? const uint16_t ff_vc1_cbpcy_p_codes[4][64] = { { 0, 6, 15, 13, 13, 11, 3, 13, 5, 8, 49, 10, 12, 114, 102, 119, @@ -330,60 +261,6 @@ const uint8_t ff_vc1_cbpcy_p_bits[4][64] = { 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 8 } }; -#else -const uint16_t ff_vc1_cbpcy_p_codes[4][64] = { - { - 0, 1, 1, 4, 5, 1, 12, 4, 13, 14, 10, 11, 12, 7, 13, 2, - 15, 1, 96, 1, 49, 97, 2, 100, 3, 4, 5, 101, 102, 52, 53, 4, - 6, 7, 54, 103, 8, 9, 10, 110, 11, 12, 111, 56, 114, 58, 115, 5, - 13, 7, 8, 9, 10, 11, 12, 30, 13, 14, 15, 118, 119, 62, 63, 3 - }, - { - 0, 1, 2, 1, 3, 1, 16, 17, 5, 18, 12, 19, 13, 1, 28, 58, - 1, 1, 1, 2, 3, 2, 3, 236, 237, 4, 5, 238, 6, 7, 239, 8, - 9, 240, 10, 11, 121, 122, 12, 13, 14, 15, 241, 246, 16, 17, 124, 63, - 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 247, 125 - }, - { - 0, 1, 2, 3, 2, 3, 1, 4, 5, 24, 7, 13, 16, 17, 9, 5, - 25, 1, 1, 1, 2, 3, 96, 194, 1, 2, 98, 99, 195, 200, 101, 26, - 201, 102, 412, 413, 414, 54, 220, 111, 221, 3, 224, 113, 225, 114, 230, 29, - 231, 415, 240, 4, 241, 484, 5, 243, 3, 244, 245, 485, 492, 493, 247, 31 - }, - { - 0, 1, 1, 1, 2, 2, 3, 4, 3, 5, 6, 7, 8, 9, 10, 11, - 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, - 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, - 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 28, 29, 30, 31 - } -}; -const uint8_t ff_vc1_cbpcy_p_bits[4][64] = { - { - 13, 6, 5, 6, 6, 7, 7, 5, 7, 7, 6, 6, 6, 5, 6, 3, - 7, 8, 8, 13, 7, 8, 13, 8, 13, 13, 13, 8, 8, 7, 7, 3, - 13, 13, 7, 8, 13, 13, 13, 8, 13, 13, 8, 7, 8, 7, 8, 3, - 13, 12, 12, 12, 12, 12, 12, 6, 12, 12, 12, 8, 8, 7, 7, 2 - }, - { - 14, 3, 3, 5, 3, 4, 5, 5, 3, 5, 4, 5, 4, 6, 5, 6, - 8, 14, 13, 8, 8, 13, 13, 8, 8, 13, 13, 8, 13, 13, 8, 13, - 13, 8, 13, 13, 7, 7, 13, 13, 13, 13, 8, 8, 13, 13, 7, 6, - 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 8, 7 - }, - { - 13, 5, 5, 5, 4, 4, 6, 4, 4, 6, 4, 5, 5, 5, 4, 3, - 6, 8, 10, 9, 8, 8, 7, 8, 13, 13, 7, 7, 8, 8, 7, 5, - 8, 7, 9, 9, 9, 6, 8, 7, 8, 13, 8, 7, 8, 7, 8, 5, - 8, 9, 8, 13, 8, 9, 13, 8, 12, 8, 8, 9, 9, 9, 8, 5 - }, - { - 9, 2, 3, 9, 2, 9, 9, 9, 2, 9, 9, 9, 9, 9, 9, 9, - 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, - 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, - 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 8, 8, 8, 8 - } -}; -#endif /* MacroBlock Transform Type: 7.1.3.11, p89 * 8x8:B diff --git a/libavcodec/vc1data.h b/libavcodec/vc1data.h index 934627a781..0a821f9a90 100644 --- a/libavcodec/vc1data.h +++ b/libavcodec/vc1data.h @@ -74,20 +74,11 @@ extern VLC ff_vc1_ac_coeff_table[8]; //@} -#if 0 //original bfraction from vc9data.h, not conforming to standard -/* Denominator used for ff_vc1_bfraction_lut */ -#define B_FRACTION_DEN 840 - -/* bfraction is fractional, we scale to the GCD 3*5*7*8 = 840 */ -extern const int16_t ff_vc1_bfraction_lut[23]; -#else /* Denominator used for ff_vc1_bfraction_lut */ #define B_FRACTION_DEN 256 /* pre-computed scales for all bfractions and base=256 */ extern const int16_t ff_vc1_bfraction_lut[23]; -#endif - extern const uint8_t ff_vc1_bfraction_bits[23]; extern const uint8_t ff_vc1_bfraction_codes[23]; diff --git a/libavcodec/vc1dec.c b/libavcodec/vc1dec.c index 7093363084..c354a535fd 100644 --- a/libavcodec/vc1dec.c +++ b/libavcodec/vc1dec.c @@ -45,7 +45,6 @@ #define MB_INTRA_VLC_BITS 9 #define DC_VLC_BITS 9 #define AC_VLC_BITS 9 -static const uint16_t table_mb_intra[64][2]; static const uint16_t vlc_offs[] = { @@ -137,7 +136,7 @@ static int vc1_init_common(VC1Context *v) /***********************************************************************/ /** - * @defgroup vc1bitplane VC-1 Bitplane decoding + * @name VC-1 Bitplane decoding * @see 8.7, p56 * @{ */ @@ -408,15 +407,15 @@ static void vc1_mc_1mv(VC1Context *v, int dir) uint8_t *srcY, *srcU, *srcV; int dxy, mx, my, uvmx, uvmy, src_x, src_y, uvsrc_x, uvsrc_y; - if(!v->s.last_picture.data[0])return; + if(!v->s.last_picture.f.data[0])return; mx = s->mv[dir][0][0]; my = s->mv[dir][0][1]; // store motion vectors for further use in B frames if(s->pict_type == AV_PICTURE_TYPE_P) { - s->current_picture.motion_val[1][s->block_index[0]][0] = mx; - s->current_picture.motion_val[1][s->block_index[0]][1] = my; + s->current_picture.f.motion_val[1][s->block_index[0]][0] = mx; + s->current_picture.f.motion_val[1][s->block_index[0]][1] = my; } uvmx = (mx + ((mx & 3) == 3)) >> 1; uvmy = (my + ((my & 3) == 3)) >> 1; @@ -427,13 +426,13 @@ static void vc1_mc_1mv(VC1Context *v, int dir) uvmy = uvmy + ((uvmy<0)?(uvmy&1):-(uvmy&1)); } if(!dir) { - srcY = s->last_picture.data[0]; - srcU = s->last_picture.data[1]; - srcV = s->last_picture.data[2]; + srcY = s->last_picture.f.data[0]; + srcU = s->last_picture.f.data[1]; + srcV = s->last_picture.f.data[2]; } else { - srcY = s->next_picture.data[0]; - srcU = s->next_picture.data[1]; - srcV = s->next_picture.data[2]; + srcY = s->next_picture.f.data[0]; + srcU = s->next_picture.f.data[1]; + srcV = s->next_picture.f.data[2]; } src_x = s->mb_x * 16 + (mx >> 2); @@ -560,10 +559,10 @@ static void vc1_mc_4mv_luma(VC1Context *v, int n) int dxy, mx, my, src_x, src_y; int off; - if(!v->s.last_picture.data[0])return; + if(!v->s.last_picture.f.data[0])return; mx = s->mv[0][n][0]; my = s->mv[0][n][1]; - srcY = s->last_picture.data[0]; + srcY = s->last_picture.f.data[0]; off = s->linesize * 4 * (n&2) + (n&1) * 8; @@ -648,7 +647,7 @@ static void vc1_mc_4mv_chroma(VC1Context *v) int mvx[4], mvy[4], intra[4]; static const int count[16] = { 0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4}; - if(!v->s.last_picture.data[0])return; + if(!v->s.last_picture.f.data[0])return; if(s->flags & CODEC_FLAG_GRAY) return; for(i = 0; i < 4; i++) { @@ -688,14 +687,14 @@ static void vc1_mc_4mv_chroma(VC1Context *v) tx = (mvx[t1] + mvx[t2]) / 2; ty = (mvy[t1] + mvy[t2]) / 2; } else { - s->current_picture.motion_val[1][s->block_index[0]][0] = 0; - s->current_picture.motion_val[1][s->block_index[0]][1] = 0; + s->current_picture.f.motion_val[1][s->block_index[0]][0] = 0; + s->current_picture.f.motion_val[1][s->block_index[0]][1] = 0; v->luma_mv[s->mb_x][0] = v->luma_mv[s->mb_x][1] = 0; return; //no need to do MC for inter blocks } - s->current_picture.motion_val[1][s->block_index[0]][0] = tx; - s->current_picture.motion_val[1][s->block_index[0]][1] = ty; + s->current_picture.f.motion_val[1][s->block_index[0]][0] = tx; + s->current_picture.f.motion_val[1][s->block_index[0]][1] = ty; uvmx = (tx + ((tx&3) == 3)) >> 1; uvmy = (ty + ((ty&3) == 3)) >> 1; v->luma_mv[s->mb_x][0] = uvmx; @@ -716,8 +715,8 @@ static void vc1_mc_4mv_chroma(VC1Context *v) uvsrc_y = av_clip(uvsrc_y, -8, s->avctx->coded_height >> 1); } - srcU = s->last_picture.data[1] + uvsrc_y * s->uvlinesize + uvsrc_x; - srcV = s->last_picture.data[2] + uvsrc_y * s->uvlinesize + uvsrc_x; + srcU = s->last_picture.f.data[1] + uvsrc_y * s->uvlinesize + uvsrc_x; + srcV = s->last_picture.f.data[2] + uvsrc_y * s->uvlinesize + uvsrc_x; if(v->rangeredfrm || (v->mv_mode == MV_PMODE_INTENSITY_COMP) || (unsigned)uvsrc_x > (s->h_edge_pos >> 1) - 9 || (unsigned)uvsrc_y > (s->v_edge_pos >> 1) - 9){ @@ -774,7 +773,7 @@ static void vc1_mc_4mv_chroma(VC1Context *v) /***********************************************************************/ /** - * @defgroup vc1block VC-1 Block-level functions + * @name VC-1 Block-level functions * @see 7.1.4, p91 and 8.1.1.7, p(1)04 * @{ */ @@ -884,30 +883,30 @@ static inline void vc1_pred_mv(VC1Context *v, int n, int dmv_x, int dmv_y, int m xy = s->block_index[n]; if(s->mb_intra){ - s->mv[0][n][0] = s->current_picture.motion_val[0][xy][0] = 0; - s->mv[0][n][1] = s->current_picture.motion_val[0][xy][1] = 0; - s->current_picture.motion_val[1][xy][0] = 0; - s->current_picture.motion_val[1][xy][1] = 0; + s->mv[0][n][0] = s->current_picture.f.motion_val[0][xy][0] = 0; + s->mv[0][n][1] = s->current_picture.f.motion_val[0][xy][1] = 0; + s->current_picture.f.motion_val[1][xy][0] = 0; + s->current_picture.f.motion_val[1][xy][1] = 0; if(mv1) { /* duplicate motion data for 1-MV block */ - s->current_picture.motion_val[0][xy + 1][0] = 0; - s->current_picture.motion_val[0][xy + 1][1] = 0; - s->current_picture.motion_val[0][xy + wrap][0] = 0; - s->current_picture.motion_val[0][xy + wrap][1] = 0; - s->current_picture.motion_val[0][xy + wrap + 1][0] = 0; - s->current_picture.motion_val[0][xy + wrap + 1][1] = 0; + s->current_picture.f.motion_val[0][xy + 1][0] = 0; + s->current_picture.f.motion_val[0][xy + 1][1] = 0; + s->current_picture.f.motion_val[0][xy + wrap][0] = 0; + s->current_picture.f.motion_val[0][xy + wrap][1] = 0; + s->current_picture.f.motion_val[0][xy + wrap + 1][0] = 0; + s->current_picture.f.motion_val[0][xy + wrap + 1][1] = 0; v->luma_mv[s->mb_x][0] = v->luma_mv[s->mb_x][1] = 0; - s->current_picture.motion_val[1][xy + 1][0] = 0; - s->current_picture.motion_val[1][xy + 1][1] = 0; - s->current_picture.motion_val[1][xy + wrap][0] = 0; - s->current_picture.motion_val[1][xy + wrap][1] = 0; - s->current_picture.motion_val[1][xy + wrap + 1][0] = 0; - s->current_picture.motion_val[1][xy + wrap + 1][1] = 0; + s->current_picture.f.motion_val[1][xy + 1][0] = 0; + s->current_picture.f.motion_val[1][xy + 1][1] = 0; + s->current_picture.f.motion_val[1][xy + wrap][0] = 0; + s->current_picture.f.motion_val[1][xy + wrap][1] = 0; + s->current_picture.f.motion_val[1][xy + wrap + 1][0] = 0; + s->current_picture.f.motion_val[1][xy + wrap + 1][1] = 0; } return; } - C = s->current_picture.motion_val[0][xy - 1]; - A = s->current_picture.motion_val[0][xy - wrap]; + C = s->current_picture.f.motion_val[0][xy - 1]; + A = s->current_picture.f.motion_val[0][xy - wrap]; if(mv1) off = (s->mb_x == (s->mb_width - 1)) ? -1 : 2; else { @@ -926,7 +925,7 @@ static inline void vc1_pred_mv(VC1Context *v, int n, int dmv_x, int dmv_y, int m off = -1; } } - B = s->current_picture.motion_val[0][xy - wrap + off]; + B = s->current_picture.f.motion_val[0][xy - wrap + off]; if(!s->first_slice_line || (n==2 || n==3)) { // predictor A is not out of bounds if(s->mb_width == 1) { @@ -990,15 +989,15 @@ static inline void vc1_pred_mv(VC1Context *v, int n, int dmv_x, int dmv_y, int m } } /* store MV using signed modulus of MV range defined in 4.11 */ - s->mv[0][n][0] = s->current_picture.motion_val[0][xy][0] = ((px + dmv_x + r_x) & ((r_x << 1) - 1)) - r_x; - s->mv[0][n][1] = s->current_picture.motion_val[0][xy][1] = ((py + dmv_y + r_y) & ((r_y << 1) - 1)) - r_y; + s->mv[0][n][0] = s->current_picture.f.motion_val[0][xy][0] = ((px + dmv_x + r_x) & ((r_x << 1) - 1)) - r_x; + s->mv[0][n][1] = s->current_picture.f.motion_val[0][xy][1] = ((py + dmv_y + r_y) & ((r_y << 1) - 1)) - r_y; if(mv1) { /* duplicate motion data for 1-MV block */ - s->current_picture.motion_val[0][xy + 1][0] = s->current_picture.motion_val[0][xy][0]; - s->current_picture.motion_val[0][xy + 1][1] = s->current_picture.motion_val[0][xy][1]; - s->current_picture.motion_val[0][xy + wrap][0] = s->current_picture.motion_val[0][xy][0]; - s->current_picture.motion_val[0][xy + wrap][1] = s->current_picture.motion_val[0][xy][1]; - s->current_picture.motion_val[0][xy + wrap + 1][0] = s->current_picture.motion_val[0][xy][0]; - s->current_picture.motion_val[0][xy + wrap + 1][1] = s->current_picture.motion_val[0][xy][1]; + s->current_picture.f.motion_val[0][xy + 1][0] = s->current_picture.f.motion_val[0][xy][0]; + s->current_picture.f.motion_val[0][xy + 1][1] = s->current_picture.f.motion_val[0][xy][1]; + s->current_picture.f.motion_val[0][xy + wrap][0] = s->current_picture.f.motion_val[0][xy][0]; + s->current_picture.f.motion_val[0][xy + wrap][1] = s->current_picture.f.motion_val[0][xy][1]; + s->current_picture.f.motion_val[0][xy + wrap + 1][0] = s->current_picture.f.motion_val[0][xy][0]; + s->current_picture.f.motion_val[0][xy + wrap + 1][1] = s->current_picture.f.motion_val[0][xy][1]; } } @@ -1011,7 +1010,7 @@ static void vc1_interp_mc(VC1Context *v) uint8_t *srcY, *srcU, *srcV; int dxy, mx, my, uvmx, uvmy, src_x, src_y, uvsrc_x, uvsrc_y; - if(!v->s.next_picture.data[0])return; + if(!v->s.next_picture.f.data[0])return; mx = s->mv[1][0][0]; my = s->mv[1][0][1]; @@ -1021,9 +1020,9 @@ static void vc1_interp_mc(VC1Context *v) uvmx = uvmx + ((uvmx<0)?-(uvmx&1):(uvmx&1)); uvmy = uvmy + ((uvmy<0)?-(uvmy&1):(uvmy&1)); } - srcY = s->next_picture.data[0]; - srcU = s->next_picture.data[1]; - srcV = s->next_picture.data[2]; + srcY = s->next_picture.f.data[0]; + srcU = s->next_picture.f.data[1]; + srcV = s->next_picture.f.data[2]; src_x = s->mb_x * 16 + (mx >> 2); src_y = s->mb_y * 16 + (my >> 2); @@ -1186,16 +1185,16 @@ static inline void vc1_pred_b_mv(VC1Context *v, int dmv_x[2], int dmv_y[2], int xy = s->block_index[0]; if(s->mb_intra) { - s->current_picture.motion_val[0][xy][0] = - s->current_picture.motion_val[0][xy][1] = - s->current_picture.motion_val[1][xy][0] = - s->current_picture.motion_val[1][xy][1] = 0; + s->current_picture.f.motion_val[0][xy][0] = + s->current_picture.f.motion_val[0][xy][1] = + s->current_picture.f.motion_val[1][xy][0] = + s->current_picture.f.motion_val[1][xy][1] = 0; return; } - s->mv[0][0][0] = scale_mv(s->next_picture.motion_val[1][xy][0], v->bfraction, 0, s->quarter_sample); - s->mv[0][0][1] = scale_mv(s->next_picture.motion_val[1][xy][1], v->bfraction, 0, s->quarter_sample); - s->mv[1][0][0] = scale_mv(s->next_picture.motion_val[1][xy][0], v->bfraction, 1, s->quarter_sample); - s->mv[1][0][1] = scale_mv(s->next_picture.motion_val[1][xy][1], v->bfraction, 1, s->quarter_sample); + s->mv[0][0][0] = scale_mv(s->next_picture.f.motion_val[1][xy][0], v->bfraction, 0, s->quarter_sample); + s->mv[0][0][1] = scale_mv(s->next_picture.f.motion_val[1][xy][1], v->bfraction, 0, s->quarter_sample); + s->mv[1][0][0] = scale_mv(s->next_picture.f.motion_val[1][xy][0], v->bfraction, 1, s->quarter_sample); + s->mv[1][0][1] = scale_mv(s->next_picture.f.motion_val[1][xy][1], v->bfraction, 1, s->quarter_sample); /* Pullback predicted motion vectors as specified in 8.4.5.4 */ s->mv[0][0][0] = av_clip(s->mv[0][0][0], -60 - (s->mb_x << 6), (s->mb_width << 6) - 4 - (s->mb_x << 6)); @@ -1203,18 +1202,18 @@ static inline void vc1_pred_b_mv(VC1Context *v, int dmv_x[2], int dmv_y[2], int s->mv[1][0][0] = av_clip(s->mv[1][0][0], -60 - (s->mb_x << 6), (s->mb_width << 6) - 4 - (s->mb_x << 6)); s->mv[1][0][1] = av_clip(s->mv[1][0][1], -60 - (s->mb_y << 6), (s->mb_height << 6) - 4 - (s->mb_y << 6)); if(direct) { - s->current_picture.motion_val[0][xy][0] = s->mv[0][0][0]; - s->current_picture.motion_val[0][xy][1] = s->mv[0][0][1]; - s->current_picture.motion_val[1][xy][0] = s->mv[1][0][0]; - s->current_picture.motion_val[1][xy][1] = s->mv[1][0][1]; + s->current_picture.f.motion_val[0][xy][0] = s->mv[0][0][0]; + s->current_picture.f.motion_val[0][xy][1] = s->mv[0][0][1]; + s->current_picture.f.motion_val[1][xy][0] = s->mv[1][0][0]; + s->current_picture.f.motion_val[1][xy][1] = s->mv[1][0][1]; return; } if((mvtype == BMV_TYPE_FORWARD) || (mvtype == BMV_TYPE_INTERPOLATED)) { - C = s->current_picture.motion_val[0][xy - 2]; - A = s->current_picture.motion_val[0][xy - wrap*2]; + C = s->current_picture.f.motion_val[0][xy - 2]; + A = s->current_picture.f.motion_val[0][xy - wrap*2]; off = (s->mb_x == (s->mb_width - 1)) ? -2 : 2; - B = s->current_picture.motion_val[0][xy - wrap*2 + off]; + B = s->current_picture.f.motion_val[0][xy - wrap*2 + off]; if(!s->mb_x) C[0] = C[1] = 0; if(!s->first_slice_line) { // predictor A is not out of bounds @@ -1289,10 +1288,10 @@ static inline void vc1_pred_b_mv(VC1Context *v, int dmv_x[2], int dmv_y[2], int s->mv[0][0][1] = ((py + dmv_y[0] + r_y) & ((r_y << 1) - 1)) - r_y; } if((mvtype == BMV_TYPE_BACKWARD) || (mvtype == BMV_TYPE_INTERPOLATED)) { - C = s->current_picture.motion_val[1][xy - 2]; - A = s->current_picture.motion_val[1][xy - wrap*2]; + C = s->current_picture.f.motion_val[1][xy - 2]; + A = s->current_picture.f.motion_val[1][xy - wrap*2]; off = (s->mb_x == (s->mb_width - 1)) ? -2 : 2; - B = s->current_picture.motion_val[1][xy - wrap*2 + off]; + B = s->current_picture.f.motion_val[1][xy - wrap*2 + off]; if(!s->mb_x) C[0] = C[1] = 0; if(!s->first_slice_line) { // predictor A is not out of bounds @@ -1367,10 +1366,10 @@ static inline void vc1_pred_b_mv(VC1Context *v, int dmv_x[2], int dmv_y[2], int s->mv[1][0][0] = ((px + dmv_x[1] + r_x) & ((r_x << 1) - 1)) - r_x; s->mv[1][0][1] = ((py + dmv_y[1] + r_y) & ((r_y << 1) - 1)) - r_y; } - s->current_picture.motion_val[0][xy][0] = s->mv[0][0][0]; - s->current_picture.motion_val[0][xy][1] = s->mv[0][0][1]; - s->current_picture.motion_val[1][xy][0] = s->mv[1][0][0]; - s->current_picture.motion_val[1][xy][1] = s->mv[1][0][1]; + s->current_picture.f.motion_val[0][xy][0] = s->mv[0][0][0]; + s->current_picture.f.motion_val[0][xy][1] = s->mv[0][0][1]; + s->current_picture.f.motion_val[1][xy][0] = s->mv[1][0][0]; + s->current_picture.f.motion_val[1][xy][1] = s->mv[1][0][1]; } /** Get predicted DC value for I-frames only @@ -1465,14 +1464,14 @@ static inline int vc1_pred_dc(MpegEncContext *s, int overlap, int pq, int n, b = dc_val[ - 1 - wrap]; a = dc_val[ - wrap]; /* scale predictors if needed */ - q1 = s->current_picture.qscale_table[mb_pos]; + q1 = s->current_picture.f.qscale_table[mb_pos]; if(c_avail && (n!= 1 && n!=3)) { - q2 = s->current_picture.qscale_table[mb_pos - 1]; + q2 = s->current_picture.f.qscale_table[mb_pos - 1]; if(q2 && q2 != q1) c = (c * s->y_dc_scale_table[q2] * ff_vc1_dqscale[s->y_dc_scale_table[q1] - 1] + 0x20000) >> 18; } if(a_avail && (n!= 2 && n!=3)) { - q2 = s->current_picture.qscale_table[mb_pos - s->mb_stride]; + q2 = s->current_picture.f.qscale_table[mb_pos - s->mb_stride]; if(q2 && q2 != q1) a = (a * s->y_dc_scale_table[q2] * ff_vc1_dqscale[s->y_dc_scale_table[q1] - 1] + 0x20000) >> 18; } @@ -1480,7 +1479,7 @@ static inline int vc1_pred_dc(MpegEncContext *s, int overlap, int pq, int n, int off = mb_pos; if(n != 1) off--; if(n != 2) off -= s->mb_stride; - q2 = s->current_picture.qscale_table[off]; + q2 = s->current_picture.f.qscale_table[off]; if(q2 && q2 != q1) b = (b * s->y_dc_scale_table[q2] * ff_vc1_dqscale[s->y_dc_scale_table[q1] - 1] + 0x20000) >> 18; } @@ -1512,7 +1511,7 @@ static inline int vc1_pred_dc(MpegEncContext *s, int overlap, int pq, int n, /** @} */ // Block group /** - * @defgroup vc1_std_mb VC1 Macroblock-level functions in Simple/Main Profiles + * @name VC1 Macroblock-level functions in Simple/Main Profiles * @see 7.1.4, p91 and 8.1.1.7, p(1)04 * @{ */ @@ -1854,9 +1853,9 @@ static int vc1_decode_i_block_adv(VC1Context *v, DCTELEM block[64], int n, int c else //top ac_val -= 16 * s->block_wrap[n]; - q1 = s->current_picture.qscale_table[mb_pos]; - if(dc_pred_dir && c_avail && mb_pos) q2 = s->current_picture.qscale_table[mb_pos - 1]; - if(!dc_pred_dir && a_avail && mb_pos >= s->mb_stride) q2 = s->current_picture.qscale_table[mb_pos - s->mb_stride]; + q1 = s->current_picture.f.qscale_table[mb_pos]; + if(dc_pred_dir && c_avail && mb_pos) q2 = s->current_picture.f.qscale_table[mb_pos - 1]; + if(!dc_pred_dir && a_avail && mb_pos >= s->mb_stride) q2 = s->current_picture.f.qscale_table[mb_pos - s->mb_stride]; if(dc_pred_dir && n==1) q2 = q1; if(!dc_pred_dir && n==2) q2 = q1; if(n==3) q2 = q1; @@ -2061,9 +2060,9 @@ static int vc1_decode_intra_block(VC1Context *v, DCTELEM block[64], int n, int c else //top ac_val -= 16 * s->block_wrap[n]; - q1 = s->current_picture.qscale_table[mb_pos]; - if(dc_pred_dir && c_avail && mb_pos) q2 = s->current_picture.qscale_table[mb_pos - 1]; - if(!dc_pred_dir && a_avail && mb_pos >= s->mb_stride) q2 = s->current_picture.qscale_table[mb_pos - s->mb_stride]; + q1 = s->current_picture.f.qscale_table[mb_pos]; + if(dc_pred_dir && c_avail && mb_pos) q2 = s->current_picture.f.qscale_table[mb_pos - 1]; + if(!dc_pred_dir && a_avail && mb_pos >= s->mb_stride) q2 = s->current_picture.f.qscale_table[mb_pos - s->mb_stride]; if(dc_pred_dir && n==1) q2 = q1; if(!dc_pred_dir && n==2) q2 = q1; if(n==3) q2 = q1; @@ -2345,7 +2344,7 @@ static av_always_inline void vc1_apply_p_v_loop_filter(VC1Context *v, int block_ bottom_is_intra = (block_num < 2) ? (mb_is_intra >> ((block_num + 2) * 4)) : (v->is_intra[s->mb_x] >> ((block_num - 2) * 4)); mv_stride = s->b8_stride; - mv = &s->current_picture.motion_val[0][s->block_index[block_num] - 2 * mv_stride]; + mv = &s->current_picture.f.motion_val[0][s->block_index[block_num] - 2 * mv_stride]; } if (bottom_is_intra & 1 || block_is_intra & 1 || @@ -2407,7 +2406,7 @@ static av_always_inline void vc1_apply_p_h_loop_filter(VC1Context *v, int block_ (mb_cbp >> ((block_num + 1) * 4)); right_is_intra = (block_num & 1) ? (v->is_intra[s->mb_x - s->mb_stride] >> ((block_num - 1) * 4)) : (mb_is_intra >> ((block_num + 1) * 4)); - mv = &s->current_picture.motion_val[0][s->block_index[block_num] - s->b8_stride * 2 - 2]; + mv = &s->current_picture.f.motion_val[0][s->block_index[block_num] - s->b8_stride * 2 - 2]; } if (block_is_intra & 1 || right_is_intra & 1 || mv[0][0] != mv[1][0] || mv[0][1] != mv[1][1]) { v->vc1dsp.vc1_h_loop_filter8(dst, linesize, v->pq); @@ -2503,10 +2502,10 @@ static int vc1_decode_p_mb(VC1Context *v) GET_MVDATA(dmv_x, dmv_y); if (s->mb_intra) { - s->current_picture.motion_val[1][s->block_index[0]][0] = 0; - s->current_picture.motion_val[1][s->block_index[0]][1] = 0; + s->current_picture.f.motion_val[1][s->block_index[0]][0] = 0; + s->current_picture.f.motion_val[1][s->block_index[0]][1] = 0; } - s->current_picture.mb_type[mb_pos] = s->mb_intra ? MB_TYPE_INTRA : MB_TYPE_16x16; + s->current_picture.f.mb_type[mb_pos] = s->mb_intra ? MB_TYPE_INTRA : MB_TYPE_16x16; vc1_pred_mv(v, 0, dmv_x, dmv_y, 1, v->range_x, v->range_y, v->mb_type[0]); /* FIXME Set DC val for inter block ? */ @@ -2527,7 +2526,7 @@ static int vc1_decode_p_mb(VC1Context *v) mquant = v->pq; cbp = 0; } - s->current_picture.qscale_table[mb_pos] = mquant; + s->current_picture.f.qscale_table[mb_pos] = mquant; if (!v->ttmbf && !s->mb_intra && mb_has_coeffs) ttmb = get_vlc2(gb, ff_vc1_ttmb_vlc[v->tt_index].table, @@ -2577,8 +2576,8 @@ static int vc1_decode_p_mb(VC1Context *v) v->mb_type[0][s->block_index[i]] = 0; s->dc_val[0][s->block_index[i]] = 0; } - s->current_picture.mb_type[mb_pos] = MB_TYPE_SKIP; - s->current_picture.qscale_table[mb_pos] = 0; + s->current_picture.f.mb_type[mb_pos] = MB_TYPE_SKIP; + s->current_picture.f.qscale_table[mb_pos] = 0; vc1_pred_mv(v, 0, 0, 0, 1, v->range_x, v->range_y, v->mb_type[0]); vc1_mc_1mv(v, 0); } @@ -2622,7 +2621,7 @@ static int vc1_decode_p_mb(VC1Context *v) if(!intra_count && !coded_inter) goto end; GET_MQUANT(); - s->current_picture.qscale_table[mb_pos] = mquant; + s->current_picture.f.qscale_table[mb_pos] = mquant; /* test if block is intra and has pred */ { int intrapred = 0; @@ -2676,7 +2675,7 @@ static int vc1_decode_p_mb(VC1Context *v) else //Skipped MB { s->mb_intra = 0; - s->current_picture.qscale_table[mb_pos] = 0; + s->current_picture.f.qscale_table[mb_pos] = 0; for (i=0; i<6; i++) { v->mb_type[0][s->block_index[i]] = 0; s->dc_val[0][s->block_index[i]] = 0; @@ -2687,7 +2686,7 @@ static int vc1_decode_p_mb(VC1Context *v) vc1_mc_4mv_luma(v, i); } vc1_mc_4mv_chroma(v); - s->current_picture.qscale_table[mb_pos] = 0; + s->current_picture.f.qscale_table[mb_pos] = 0; } } end: @@ -2735,7 +2734,7 @@ static void vc1_decode_b_mb(VC1Context *v) v->mb_type[0][s->block_index[i]] = 0; s->dc_val[0][s->block_index[i]] = 0; } - s->current_picture.qscale_table[mb_pos] = 0; + s->current_picture.f.qscale_table[mb_pos] = 0; if (!direct) { if (!skipped) { @@ -2771,7 +2770,7 @@ static void vc1_decode_b_mb(VC1Context *v) cbp = get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2); GET_MQUANT(); s->mb_intra = 0; - s->current_picture.qscale_table[mb_pos] = mquant; + s->current_picture.f.qscale_table[mb_pos] = mquant; if(!v->ttmbf) ttmb = get_vlc2(gb, ff_vc1_ttmb_vlc[v->tt_index].table, VC1_TTMB_VLC_BITS, 2); dmv_x[0] = dmv_y[0] = dmv_x[1] = dmv_y[1] = 0; @@ -2786,7 +2785,7 @@ static void vc1_decode_b_mb(VC1Context *v) } if(s->mb_intra && !mb_has_coeffs) { GET_MQUANT(); - s->current_picture.qscale_table[mb_pos] = mquant; + s->current_picture.f.qscale_table[mb_pos] = mquant; s->ac_pred = get_bits1(gb); cbp = 0; vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype); @@ -2808,7 +2807,7 @@ static void vc1_decode_b_mb(VC1Context *v) s->ac_pred = get_bits1(gb); cbp = get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2); GET_MQUANT(); - s->current_picture.qscale_table[mb_pos] = mquant; + s->current_picture.f.qscale_table[mb_pos] = mquant; if(!v->ttmbf && !s->mb_intra && mb_has_coeffs) ttmb = get_vlc2(gb, ff_vc1_ttmb_vlc[v->tt_index].table, VC1_TTMB_VLC_BITS, 2); } @@ -2899,10 +2898,10 @@ static void vc1_decode_i_blocks(VC1Context *v) dst[5] = s->dest[2]; s->dsp.clear_blocks(s->block[0]); mb_pos = s->mb_x + s->mb_y * s->mb_width; - s->current_picture.mb_type[mb_pos] = MB_TYPE_INTRA; - s->current_picture.qscale_table[mb_pos] = v->pq; - s->current_picture.motion_val[1][s->block_index[0]][0] = 0; - s->current_picture.motion_val[1][s->block_index[0]][1] = 0; + s->current_picture.f.mb_type[mb_pos] = MB_TYPE_INTRA; + s->current_picture.f.qscale_table[mb_pos] = v->pq; + s->current_picture.f.motion_val[1][s->block_index[0]][0] = 0; + s->current_picture.f.motion_val[1][s->block_index[0]][1] = 0; // do actual MB decoding and displaying cbp = get_vlc2(&v->s.gb, ff_msmp4_mb_i_vlc.table, MB_INTRA_VLC_BITS, 2); @@ -3030,9 +3029,9 @@ static void vc1_decode_i_blocks_adv(VC1Context *v) ff_update_block_index(s); s->dsp.clear_blocks(block[0]); mb_pos = s->mb_x + s->mb_y * s->mb_stride; - s->current_picture.mb_type[mb_pos] = MB_TYPE_INTRA; - s->current_picture.motion_val[1][s->block_index[0]][0] = 0; - s->current_picture.motion_val[1][s->block_index[0]][1] = 0; + s->current_picture.f.mb_type[mb_pos] = MB_TYPE_INTRA; + s->current_picture.f.motion_val[1][s->block_index[0]][0] = 0; + s->current_picture.f.motion_val[1][s->block_index[0]][1] = 0; // do actual MB decoding and displaying cbp = get_vlc2(&v->s.gb, ff_msmp4_mb_i_vlc.table, MB_INTRA_VLC_BITS, 2); @@ -3046,7 +3045,7 @@ static void vc1_decode_i_blocks_adv(VC1Context *v) GET_MQUANT(); - s->current_picture.qscale_table[mb_pos] = mquant; + s->current_picture.f.qscale_table[mb_pos] = mquant; /* Set DC scale - y and c use the same */ s->y_dc_scale = s->y_dc_scale_table[mquant]; s->c_dc_scale = s->c_dc_scale_table[mquant]; @@ -3233,9 +3232,9 @@ static void vc1_decode_skip_blocks(VC1Context *v) s->mb_x = 0; ff_init_block_index(s); ff_update_block_index(s); - memcpy(s->dest[0], s->last_picture.data[0] + s->mb_y * 16 * s->linesize, s->linesize * 16); - memcpy(s->dest[1], s->last_picture.data[1] + s->mb_y * 8 * s->uvlinesize, s->uvlinesize * 8); - memcpy(s->dest[2], s->last_picture.data[2] + s->mb_y * 8 * s->uvlinesize, s->uvlinesize * 8); + memcpy(s->dest[0], s->last_picture.f.data[0] + s->mb_y * 16 * s->linesize, s->linesize * 16); + memcpy(s->dest[1], s->last_picture.f.data[1] + s->mb_y * 8 * s->uvlinesize, s->uvlinesize * 8); + memcpy(s->dest[2], s->last_picture.f.data[2] + s->mb_y * 8 * s->uvlinesize, s->uvlinesize * 8); ff_draw_horiz_band(s, s->mb_y * 16, 16); s->first_slice_line = 0; } @@ -3602,7 +3601,7 @@ static int vc1_decode_frame(AVCodecContext *avctx, /* We need to set current_picture_ptr before reading the header, * otherwise we cannot store anything in there. */ - if(s->current_picture_ptr==NULL || s->current_picture_ptr->data[0]){ + if (s->current_picture_ptr == NULL || s->current_picture_ptr->f.data[0]) { int i= ff_find_unused_picture(s, 0); s->current_picture_ptr= &s->picture[i]; } @@ -3699,18 +3698,18 @@ static int vc1_decode_frame(AVCodecContext *avctx, av_log(v->s.avctx, AV_LOG_WARNING, "Sprite decoder: expected I-frame\n"); } - s->current_picture_ptr->repeat_pict = 0; + s->current_picture_ptr->f.repeat_pict = 0; if (v->rff){ - s->current_picture_ptr->repeat_pict = 1; + s->current_picture_ptr->f.repeat_pict = 1; }else if (v->rptfrm){ - s->current_picture_ptr->repeat_pict = v->rptfrm * 2; + s->current_picture_ptr->f.repeat_pict = v->rptfrm * 2; } - s->current_picture_ptr->top_field_first = v->tff; + s->current_picture_ptr->f.top_field_first = v->tff; // for skipping the frame - s->current_picture.pict_type= s->pict_type; - s->current_picture.key_frame= s->pict_type == AV_PICTURE_TYPE_I; + s->current_picture.f.pict_type = s->pict_type; + s->current_picture.f.key_frame = s->pict_type == AV_PICTURE_TYPE_I; /* skip B-frames if we don't have reference frames */ if(s->last_picture_ptr==NULL && (s->pict_type==AV_PICTURE_TYPE_B || s->dropable)){ @@ -3776,8 +3775,8 @@ static int vc1_decode_frame(AVCodecContext *avctx, MPV_frame_end(s); -assert(s->current_picture.pict_type == s->current_picture_ptr->pict_type); -assert(s->current_picture.pict_type == s->pict_type); +assert(s->current_picture.f.pict_type == s->current_picture_ptr->f.pict_type); +assert(s->current_picture.f.pict_type == s->pict_type); if (s->pict_type == AV_PICTURE_TYPE_B || s->low_delay) { *pict= *(AVFrame*)s->current_picture_ptr; } else if (s->last_picture_ptr != NULL) { @@ -3840,16 +3839,14 @@ static const AVProfile profiles[] = { }; AVCodec ff_vc1_decoder = { - "vc1", - AVMEDIA_TYPE_VIDEO, - CODEC_ID_VC1, - sizeof(VC1Context), - vc1_decode_init, - NULL, - vc1_decode_end, - vc1_decode_frame, - CODEC_CAP_DR1 | CODEC_CAP_DELAY, - NULL, + .name = "vc1", + .type = AVMEDIA_TYPE_VIDEO, + .id = CODEC_ID_VC1, + .priv_data_size = sizeof(VC1Context), + .init = vc1_decode_init, + .close = vc1_decode_end, + .decode = vc1_decode_frame, + .capabilities = CODEC_CAP_DR1 | CODEC_CAP_DELAY, .long_name = NULL_IF_CONFIG_SMALL("SMPTE VC-1"), .pix_fmts = ff_hwaccel_pixfmt_list_420, .profiles = NULL_IF_CONFIG_SMALL(profiles) @@ -3857,16 +3854,14 @@ AVCodec ff_vc1_decoder = { #if CONFIG_WMV3_DECODER AVCodec ff_wmv3_decoder = { - "wmv3", - AVMEDIA_TYPE_VIDEO, - CODEC_ID_WMV3, - sizeof(VC1Context), - vc1_decode_init, - NULL, - vc1_decode_end, - vc1_decode_frame, - CODEC_CAP_DR1 | CODEC_CAP_DELAY, - NULL, + .name = "wmv3", + .type = AVMEDIA_TYPE_VIDEO, + .id = CODEC_ID_WMV3, + .priv_data_size = sizeof(VC1Context), + .init = vc1_decode_init, + .close = vc1_decode_end, + .decode = vc1_decode_frame, + .capabilities = CODEC_CAP_DR1 | CODEC_CAP_DELAY, .long_name = NULL_IF_CONFIG_SMALL("Windows Media Video 9"), .pix_fmts = ff_hwaccel_pixfmt_list_420, .profiles = NULL_IF_CONFIG_SMALL(profiles) @@ -3875,16 +3870,14 @@ AVCodec ff_wmv3_decoder = { #if CONFIG_WMV3_VDPAU_DECODER AVCodec ff_wmv3_vdpau_decoder = { - "wmv3_vdpau", - AVMEDIA_TYPE_VIDEO, - CODEC_ID_WMV3, - sizeof(VC1Context), - vc1_decode_init, - NULL, - vc1_decode_end, - vc1_decode_frame, - CODEC_CAP_DR1 | CODEC_CAP_DELAY | CODEC_CAP_HWACCEL_VDPAU, - NULL, + .name = "wmv3_vdpau", + .type = AVMEDIA_TYPE_VIDEO, + .id = CODEC_ID_WMV3, + .priv_data_size = sizeof(VC1Context), + .init = vc1_decode_init, + .close = vc1_decode_end, + .decode = vc1_decode_frame, + .capabilities = CODEC_CAP_DR1 | CODEC_CAP_DELAY | CODEC_CAP_HWACCEL_VDPAU, .long_name = NULL_IF_CONFIG_SMALL("Windows Media Video 9 VDPAU"), .pix_fmts = (const enum PixelFormat[]){PIX_FMT_VDPAU_WMV3, PIX_FMT_NONE}, .profiles = NULL_IF_CONFIG_SMALL(profiles) @@ -3893,16 +3886,14 @@ AVCodec ff_wmv3_vdpau_decoder = { #if CONFIG_VC1_VDPAU_DECODER AVCodec ff_vc1_vdpau_decoder = { - "vc1_vdpau", - AVMEDIA_TYPE_VIDEO, - CODEC_ID_VC1, - sizeof(VC1Context), - vc1_decode_init, - NULL, - vc1_decode_end, - vc1_decode_frame, - CODEC_CAP_DR1 | CODEC_CAP_DELAY | CODEC_CAP_HWACCEL_VDPAU, - NULL, + .name = "vc1_vdpau", + .type = AVMEDIA_TYPE_VIDEO, + .id = CODEC_ID_VC1, + .priv_data_size = sizeof(VC1Context), + .init = vc1_decode_init, + .close = vc1_decode_end, + .decode = vc1_decode_frame, + .capabilities = CODEC_CAP_DR1 | CODEC_CAP_DELAY | CODEC_CAP_HWACCEL_VDPAU, .long_name = NULL_IF_CONFIG_SMALL("SMPTE VC-1 VDPAU"), .pix_fmts = (const enum PixelFormat[]){PIX_FMT_VDPAU_VC1, PIX_FMT_NONE}, .profiles = NULL_IF_CONFIG_SMALL(profiles) diff --git a/libavcodec/vcr1.c b/libavcodec/vcr1.c index 0d59b7e7ec..787f4944dd 100644 --- a/libavcodec/vcr1.c +++ b/libavcodec/vcr1.c @@ -178,27 +178,25 @@ static av_cold int encode_init(AVCodecContext *avctx){ #endif AVCodec ff_vcr1_decoder = { - "vcr1", - AVMEDIA_TYPE_VIDEO, - CODEC_ID_VCR1, - sizeof(VCR1Context), - decode_init, - NULL, - decode_end, - decode_frame, - CODEC_CAP_DR1, + .name = "vcr1", + .type = AVMEDIA_TYPE_VIDEO, + .id = CODEC_ID_VCR1, + .priv_data_size = sizeof(VCR1Context), + .init = decode_init, + .close = decode_end, + .decode = decode_frame, + .capabilities = CODEC_CAP_DR1, .long_name = NULL_IF_CONFIG_SMALL("ATI VCR1"), }; #if CONFIG_VCR1_ENCODER AVCodec ff_vcr1_encoder = { - "vcr1", - AVMEDIA_TYPE_VIDEO, - CODEC_ID_VCR1, - sizeof(VCR1Context), - encode_init, - encode_frame, - //encode_end, + .name = "vcr1", + .type = AVMEDIA_TYPE_VIDEO, + .id = CODEC_ID_VCR1, + .priv_data_size = sizeof(VCR1Context), + .init = encode_init, + .encode = encode_frame, .long_name = NULL_IF_CONFIG_SMALL("ATI VCR1"), }; #endif diff --git a/libavcodec/vdpau.c b/libavcodec/vdpau.c index 9dc2745922..ce5103a6af 100644 --- a/libavcodec/vdpau.c +++ b/libavcodec/vdpau.c @@ -33,7 +33,7 @@ #include "vdpau_internal.h" /** - * \addtogroup VDPAU_Decoding + * @addtogroup VDPAU_Decoding * * @{ */ @@ -46,7 +46,7 @@ void ff_vdpau_h264_set_reference_frames(MpegEncContext *s) Picture *pic; int i, list, pic_frame_idx; - render = (struct vdpau_render_state *)s->current_picture_ptr->data[0]; + render = (struct vdpau_render_state *)s->current_picture_ptr->f.data[0]; assert(render); rf = &render->info.h264.referenceFrames[0]; @@ -58,11 +58,11 @@ void ff_vdpau_h264_set_reference_frames(MpegEncContext *s) for (i = 0; i < ls; ++i) { pic = lp[i]; - if (!pic || !pic->reference) + if (!pic || !pic->f.reference) continue; pic_frame_idx = pic->long_ref ? pic->pic_id : pic->frame_num; - render_ref = (struct vdpau_render_state *)pic->data[0]; + render_ref = (struct vdpau_render_state *)pic->f.data[0]; assert(render_ref); rf2 = &render->info.h264.referenceFrames[0]; @@ -76,8 +76,8 @@ void ff_vdpau_h264_set_reference_frames(MpegEncContext *s) ++rf2; } if (rf2 != rf) { - rf2->top_is_reference |= (pic->reference & PICT_TOP_FIELD) ? VDP_TRUE : VDP_FALSE; - rf2->bottom_is_reference |= (pic->reference & PICT_BOTTOM_FIELD) ? VDP_TRUE : VDP_FALSE; + rf2->top_is_reference |= (pic->f.reference & PICT_TOP_FIELD) ? VDP_TRUE : VDP_FALSE; + rf2->bottom_is_reference |= (pic->f.reference & PICT_BOTTOM_FIELD) ? VDP_TRUE : VDP_FALSE; continue; } @@ -86,8 +86,8 @@ void ff_vdpau_h264_set_reference_frames(MpegEncContext *s) rf->surface = render_ref->surface; rf->is_long_term = pic->long_ref; - rf->top_is_reference = (pic->reference & PICT_TOP_FIELD) ? VDP_TRUE : VDP_FALSE; - rf->bottom_is_reference = (pic->reference & PICT_BOTTOM_FIELD) ? VDP_TRUE : VDP_FALSE; + rf->top_is_reference = (pic->f.reference & PICT_TOP_FIELD) ? VDP_TRUE : VDP_FALSE; + rf->bottom_is_reference = (pic->f.reference & PICT_BOTTOM_FIELD) ? VDP_TRUE : VDP_FALSE; rf->field_order_cnt[0] = pic->field_poc[0]; rf->field_order_cnt[1] = pic->field_poc[1]; rf->frame_idx = pic_frame_idx; @@ -112,7 +112,7 @@ void ff_vdpau_add_data_chunk(MpegEncContext *s, { struct vdpau_render_state *render; - render = (struct vdpau_render_state *)s->current_picture_ptr->data[0]; + render = (struct vdpau_render_state *)s->current_picture_ptr->f.data[0]; assert(render); render->bitstream_buffers= av_fast_realloc( @@ -133,7 +133,7 @@ void ff_vdpau_h264_picture_start(MpegEncContext *s) struct vdpau_render_state *render; int i; - render = (struct vdpau_render_state *)s->current_picture_ptr->data[0]; + render = (struct vdpau_render_state *)s->current_picture_ptr->f.data[0]; assert(render); for (i = 0; i < 2; ++i) { @@ -151,14 +151,14 @@ void ff_vdpau_h264_picture_complete(MpegEncContext *s) H264Context *h = s->avctx->priv_data; struct vdpau_render_state *render; - render = (struct vdpau_render_state *)s->current_picture_ptr->data[0]; + render = (struct vdpau_render_state *)s->current_picture_ptr->f.data[0]; assert(render); render->info.h264.slice_count = h->slice_num; if (render->info.h264.slice_count < 1) return; - render->info.h264.is_reference = (s->current_picture_ptr->reference & 3) ? VDP_TRUE : VDP_FALSE; + render->info.h264.is_reference = (s->current_picture_ptr->f.reference & 3) ? VDP_TRUE : VDP_FALSE; render->info.h264.field_pic_flag = s->picture_structure != PICT_FRAME; render->info.h264.bottom_field_flag = s->picture_structure == PICT_BOTTOM_FIELD; render->info.h264.num_ref_frames = h->sps.ref_frame_count; @@ -198,7 +198,7 @@ void ff_vdpau_mpeg_picture_complete(MpegEncContext *s, const uint8_t *buf, if (!s->current_picture_ptr) return; - render = (struct vdpau_render_state *)s->current_picture_ptr->data[0]; + render = (struct vdpau_render_state *)s->current_picture_ptr->f.data[0]; assert(render); /* fill VdpPictureInfoMPEG1Or2 struct */ @@ -227,12 +227,12 @@ void ff_vdpau_mpeg_picture_complete(MpegEncContext *s, const uint8_t *buf, switch(s->pict_type){ case AV_PICTURE_TYPE_B: - next = (struct vdpau_render_state *)s->next_picture.data[0]; + next = (struct vdpau_render_state *)s->next_picture.f.data[0]; assert(next); render->info.mpeg.backward_reference = next->surface; // no return here, going to set forward prediction case AV_PICTURE_TYPE_P: - last = (struct vdpau_render_state *)s->last_picture.data[0]; + last = (struct vdpau_render_state *)s->last_picture.f.data[0]; if (!last) // FIXME: Does this test make sense? last = render; // predict second field from the first render->info.mpeg.forward_reference = last->surface; @@ -253,7 +253,7 @@ void ff_vdpau_vc1_decode_picture(MpegEncContext *s, const uint8_t *buf, VC1Context *v = s->avctx->priv_data; struct vdpau_render_state *render, *last, *next; - render = (struct vdpau_render_state *)s->current_picture.data[0]; + render = (struct vdpau_render_state *)s->current_picture.f.data[0]; assert(render); /* fill LvPictureInfoVC1 struct */ @@ -297,12 +297,12 @@ void ff_vdpau_vc1_decode_picture(MpegEncContext *s, const uint8_t *buf, switch(s->pict_type){ case AV_PICTURE_TYPE_B: - next = (struct vdpau_render_state *)s->next_picture.data[0]; + next = (struct vdpau_render_state *)s->next_picture.f.data[0]; assert(next); render->info.vc1.backward_reference = next->surface; // no break here, going to set forward prediction case AV_PICTURE_TYPE_P: - last = (struct vdpau_render_state *)s->last_picture.data[0]; + last = (struct vdpau_render_state *)s->last_picture.f.data[0]; if (!last) // FIXME: Does this test make sense? last = render; // predict second field from the first render->info.vc1.forward_reference = last->surface; @@ -324,7 +324,7 @@ void ff_vdpau_mpeg4_decode_picture(MpegEncContext *s, const uint8_t *buf, if (!s->current_picture_ptr) return; - render = (struct vdpau_render_state *)s->current_picture_ptr->data[0]; + render = (struct vdpau_render_state *)s->current_picture_ptr->f.data[0]; assert(render); /* fill VdpPictureInfoMPEG4Part2 struct */ @@ -353,13 +353,13 @@ void ff_vdpau_mpeg4_decode_picture(MpegEncContext *s, const uint8_t *buf, switch (s->pict_type) { case AV_PICTURE_TYPE_B: - next = (struct vdpau_render_state *)s->next_picture.data[0]; + next = (struct vdpau_render_state *)s->next_picture.f.data[0]; assert(next); render->info.mpeg4.backward_reference = next->surface; render->info.mpeg4.vop_coding_type = 2; // no break here, going to set forward prediction case AV_PICTURE_TYPE_P: - last = (struct vdpau_render_state *)s->last_picture.data[0]; + last = (struct vdpau_render_state *)s->last_picture.f.data[0]; assert(last); render->info.mpeg4.forward_reference = last->surface; } diff --git a/libavcodec/vdpau.h b/libavcodec/vdpau.h index ab5f682c62..e66d2dc76d 100644 --- a/libavcodec/vdpau.h +++ b/libavcodec/vdpau.h @@ -25,7 +25,7 @@ #define AVCODEC_VDPAU_H /** - * \defgroup Decoder VDPAU Decoder and Renderer + * @defgroup Decoder VDPAU Decoder and Renderer * * VDPAU hardware acceleration has two modules * - VDPAU decoding @@ -38,25 +38,25 @@ * and rendering (API calls) are done as part of the VDPAU * presentation (vo_vdpau.c) module. * - * \defgroup VDPAU_Decoding VDPAU Decoding - * \ingroup Decoder + * @defgroup VDPAU_Decoding VDPAU Decoding + * @ingroup Decoder * @{ */ #include <vdpau/vdpau.h> #include <vdpau/vdpau_x11.h> -/** \brief The videoSurface is used for rendering. */ +/** @brief The videoSurface is used for rendering. */ #define FF_VDPAU_STATE_USED_FOR_RENDER 1 /** - * \brief The videoSurface is needed for reference/prediction. + * @brief The videoSurface is needed for reference/prediction. * The codec manipulates this. */ #define FF_VDPAU_STATE_USED_FOR_REFERENCE 2 /** - * \brief This structure is used as a callback between the FFmpeg + * @brief This structure is used as a callback between the FFmpeg * decoder (vd_) and presentation (vo_) module. * This is used for defining a video frame containing surface, * picture parameter, bitstream information etc which are passed diff --git a/libavcodec/version.h b/libavcodec/version.h index 7cbf020d90..f07970344d 100644 --- a/libavcodec/version.h +++ b/libavcodec/version.h @@ -21,7 +21,7 @@ #define AVCODEC_VERSION_H #define LIBAVCODEC_VERSION_MAJOR 52 -#define LIBAVCODEC_VERSION_MINOR 122 +#define LIBAVCODEC_VERSION_MINOR 123 #define LIBAVCODEC_VERSION_MICRO 0 #define LIBAVCODEC_VERSION_INT AV_VERSION_INT(LIBAVCODEC_VERSION_MAJOR, \ @@ -101,5 +101,17 @@ #ifndef FF_API_GET_PIX_FMT_NAME #define FF_API_GET_PIX_FMT_NAME (LIBAVCODEC_VERSION_MAJOR < 54) #endif +#ifndef FF_API_ALLOC_CONTEXT +#define FF_API_ALLOC_CONTEXT (LIBAVCODEC_VERSION_MAJOR < 54) +#endif +#ifndef FF_API_AVCODEC_OPEN +#define FF_API_AVCODEC_OPEN (LIBAVCODEC_VERSION_MAJOR < 54) +#endif +#ifndef FF_API_DRC_SCALE +#define FF_API_DRC_SCALE (LIBAVCODEC_VERSION_MAJOR < 54) +#endif +#ifndef FF_API_VERY_AGGRESSIVE +#define FF_API_VERY_AGGRESSIVE (LIBAVCODEC_VERSION_MAJOR < 54) +#endif #endif /* AVCODEC_VERSION_H */ diff --git a/libavcodec/vmdav.c b/libavcodec/vmdav.c index b9acfe921c..919789168e 100644 --- a/libavcodec/vmdav.c +++ b/libavcodec/vmdav.c @@ -547,26 +547,23 @@ static int vmdaudio_decode_frame(AVCodecContext *avctx, */ AVCodec ff_vmdvideo_decoder = { - "vmdvideo", - AVMEDIA_TYPE_VIDEO, - CODEC_ID_VMDVIDEO, - sizeof(VmdVideoContext), - vmdvideo_decode_init, - NULL, - vmdvideo_decode_end, - vmdvideo_decode_frame, - CODEC_CAP_DR1, + .name = "vmdvideo", + .type = AVMEDIA_TYPE_VIDEO, + .id = CODEC_ID_VMDVIDEO, + .priv_data_size = sizeof(VmdVideoContext), + .init = vmdvideo_decode_init, + .close = vmdvideo_decode_end, + .decode = vmdvideo_decode_frame, + .capabilities = CODEC_CAP_DR1, .long_name = NULL_IF_CONFIG_SMALL("Sierra VMD video"), }; AVCodec ff_vmdaudio_decoder = { - "vmdaudio", - AVMEDIA_TYPE_AUDIO, - CODEC_ID_VMDAUDIO, - sizeof(VmdAudioContext), - vmdaudio_decode_init, - NULL, - NULL, - vmdaudio_decode_frame, + .name = "vmdaudio", + .type = AVMEDIA_TYPE_AUDIO, + .id = CODEC_ID_VMDAUDIO, + .priv_data_size = sizeof(VmdAudioContext), + .init = vmdaudio_decode_init, + .decode = vmdaudio_decode_frame, .long_name = NULL_IF_CONFIG_SMALL("Sierra VMD audio"), }; diff --git a/libavcodec/vmnc.c b/libavcodec/vmnc.c index 6455d86f77..4c9b26c6f8 100644 --- a/libavcodec/vmnc.c +++ b/libavcodec/vmnc.c @@ -510,15 +510,14 @@ static av_cold int decode_end(AVCodecContext *avctx) } AVCodec ff_vmnc_decoder = { - "vmnc", - AVMEDIA_TYPE_VIDEO, - CODEC_ID_VMNC, - sizeof(VmncContext), - decode_init, - NULL, - decode_end, - decode_frame, - CODEC_CAP_DR1, + .name = "vmnc", + .type = AVMEDIA_TYPE_VIDEO, + .id = CODEC_ID_VMNC, + .priv_data_size = sizeof(VmncContext), + .init = decode_init, + .close = decode_end, + .decode = decode_frame, + .capabilities = CODEC_CAP_DR1, .long_name = NULL_IF_CONFIG_SMALL("VMware Screen Codec / VMware Video"), }; diff --git a/libavcodec/vorbis.c b/libavcodec/vorbis.c index fc830293cc..1624948626 100644 --- a/libavcodec/vorbis.c +++ b/libavcodec/vorbis.c @@ -20,6 +20,12 @@ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ +/** + * @file + * Common code for Vorbis I encoder and decoder + * @author Denes Balatoni ( dbalatoni programozo hu ) + */ + #define ALT_BITSTREAM_READER_LE #include "avcodec.h" #include "get_bits.h" diff --git a/libavcodec/vorbisdec.c b/libavcodec/vorbisdec.c index 024c8fd3cf..a29481d1b9 100644 --- a/libavcodec/vorbisdec.c +++ b/libavcodec/vorbisdec.c @@ -20,6 +20,12 @@ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ +/** + * @file + * Vorbis I decoder + * @author Denes Balatoni ( dbalatoni programozo hu ) + */ + #include <inttypes.h> #include <math.h> @@ -1663,14 +1669,13 @@ static av_cold int vorbis_decode_close(AVCodecContext *avccontext) } AVCodec ff_vorbis_decoder = { - "vorbis", - AVMEDIA_TYPE_AUDIO, - CODEC_ID_VORBIS, - sizeof(vorbis_context), - vorbis_decode_init, - NULL, - vorbis_decode_close, - vorbis_decode_frame, + .name = "vorbis", + .type = AVMEDIA_TYPE_AUDIO, + .id = CODEC_ID_VORBIS, + .priv_data_size = sizeof(vorbis_context), + .init = vorbis_decode_init, + .close = vorbis_decode_close, + .decode = vorbis_decode_frame, .long_name = NULL_IF_CONFIG_SMALL("Vorbis"), .channel_layouts = ff_vorbis_channel_layouts, .sample_fmts = (const enum AVSampleFormat[]) { diff --git a/libavcodec/vorbisenc.c b/libavcodec/vorbisenc.c index 617e2b7cc4..80d722db4c 100644 --- a/libavcodec/vorbisenc.c +++ b/libavcodec/vorbisenc.c @@ -1103,13 +1103,13 @@ static av_cold int vorbis_encode_close(AVCodecContext *avccontext) } AVCodec ff_vorbis_encoder = { - "vorbis", - AVMEDIA_TYPE_AUDIO, - CODEC_ID_VORBIS, - sizeof(vorbis_enc_context), - vorbis_encode_init, - vorbis_encode_frame, - vorbis_encode_close, + .name = "vorbis", + .type = AVMEDIA_TYPE_AUDIO, + .id = CODEC_ID_VORBIS, + .priv_data_size = sizeof(vorbis_enc_context), + .init = vorbis_encode_init, + .encode = vorbis_encode_frame, + .close = vorbis_encode_close, .capabilities= CODEC_CAP_DELAY | CODEC_CAP_EXPERIMENTAL, .sample_fmts = (const enum AVSampleFormat[]){AV_SAMPLE_FMT_S16,AV_SAMPLE_FMT_NONE}, .long_name = NULL_IF_CONFIG_SMALL("Vorbis"), diff --git a/libavcodec/vp3.c b/libavcodec/vp3.c index 148f1179e3..7dcd14718b 100644 --- a/libavcodec/vp3.c +++ b/libavcodec/vp3.c @@ -2321,33 +2321,51 @@ static av_cold int theora_decode_init(AVCodecContext *avctx) return vp3_decode_init(avctx); } +static void vp3_decode_flush(AVCodecContext *avctx) +{ + Vp3DecodeContext *s = avctx->priv_data; + + if (s->golden_frame.data[0]) { + if (s->golden_frame.data[0] == s->last_frame.data[0]) + memset(&s->last_frame, 0, sizeof(AVFrame)); + if (s->current_frame.data[0] == s->golden_frame.data[0]) + memset(&s->current_frame, 0, sizeof(AVFrame)); + ff_thread_release_buffer(avctx, &s->golden_frame); + } + if (s->last_frame.data[0]) { + if (s->current_frame.data[0] == s->last_frame.data[0]) + memset(&s->current_frame, 0, sizeof(AVFrame)); + ff_thread_release_buffer(avctx, &s->last_frame); + } + if (s->current_frame.data[0]) + ff_thread_release_buffer(avctx, &s->current_frame); +} + AVCodec ff_theora_decoder = { - "theora", - AVMEDIA_TYPE_VIDEO, - CODEC_ID_THEORA, - sizeof(Vp3DecodeContext), - theora_decode_init, - NULL, - vp3_decode_end, - vp3_decode_frame, - CODEC_CAP_DR1 | CODEC_CAP_DRAW_HORIZ_BAND | CODEC_CAP_FRAME_THREADS, - NULL, + .name = "theora", + .type = AVMEDIA_TYPE_VIDEO, + .id = CODEC_ID_THEORA, + .priv_data_size = sizeof(Vp3DecodeContext), + .init = theora_decode_init, + .close = vp3_decode_end, + .decode = vp3_decode_frame, + .capabilities = CODEC_CAP_DR1 | CODEC_CAP_DRAW_HORIZ_BAND | CODEC_CAP_FRAME_THREADS, + .flush = vp3_decode_flush, .long_name = NULL_IF_CONFIG_SMALL("Theora"), .update_thread_context = ONLY_IF_THREADS_ENABLED(vp3_update_thread_context) }; #endif AVCodec ff_vp3_decoder = { - "vp3", - AVMEDIA_TYPE_VIDEO, - CODEC_ID_VP3, - sizeof(Vp3DecodeContext), - vp3_decode_init, - NULL, - vp3_decode_end, - vp3_decode_frame, - CODEC_CAP_DR1 | CODEC_CAP_DRAW_HORIZ_BAND | CODEC_CAP_FRAME_THREADS, - NULL, + .name = "vp3", + .type = AVMEDIA_TYPE_VIDEO, + .id = CODEC_ID_VP3, + .priv_data_size = sizeof(Vp3DecodeContext), + .init = vp3_decode_init, + .close = vp3_decode_end, + .decode = vp3_decode_frame, + .capabilities = CODEC_CAP_DR1 | CODEC_CAP_DRAW_HORIZ_BAND | CODEC_CAP_FRAME_THREADS, + .flush = vp3_decode_flush, .long_name = NULL_IF_CONFIG_SMALL("On2 VP3"), .update_thread_context = ONLY_IF_THREADS_ENABLED(vp3_update_thread_context) }; diff --git a/libavcodec/vp5.c b/libavcodec/vp5.c index e9de38e6dd..04df3e29a5 100644 --- a/libavcodec/vp5.c +++ b/libavcodec/vp5.c @@ -1,7 +1,4 @@ -/** - * @file - * VP5 compatible video decoder - * +/* * Copyright (C) 2006 Aurelien Jacobs <aurel@gnuage.org> * * This file is part of FFmpeg. @@ -21,6 +18,11 @@ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ +/** + * @file + * VP5 compatible video decoder + */ + #include <stdlib.h> #include <string.h> @@ -267,14 +269,13 @@ static av_cold int vp5_decode_init(AVCodecContext *avctx) } AVCodec ff_vp5_decoder = { - "vp5", - AVMEDIA_TYPE_VIDEO, - CODEC_ID_VP5, - sizeof(VP56Context), - vp5_decode_init, - NULL, - ff_vp56_free, - ff_vp56_decode_frame, - CODEC_CAP_DR1, + .name = "vp5", + .type = AVMEDIA_TYPE_VIDEO, + .id = CODEC_ID_VP5, + .priv_data_size = sizeof(VP56Context), + .init = vp5_decode_init, + .close = ff_vp56_free, + .decode = ff_vp56_decode_frame, + .capabilities = CODEC_CAP_DR1, .long_name = NULL_IF_CONFIG_SMALL("On2 VP5"), }; diff --git a/libavcodec/vp56.c b/libavcodec/vp56.c index ad451c251f..0c6f45a767 100644 --- a/libavcodec/vp56.c +++ b/libavcodec/vp56.c @@ -1,7 +1,4 @@ -/** - * @file - * VP5 and VP6 compatible video decoder (common features) - * +/* * Copyright (C) 2006 Aurelien Jacobs <aurel@gnuage.org> * * This file is part of FFmpeg. @@ -21,6 +18,11 @@ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ +/** + * @file + * VP5 and VP6 compatible video decoder (common features) + */ + #include "avcodec.h" #include "bytestream.h" diff --git a/libavcodec/vp56.h b/libavcodec/vp56.h index d1735e5609..b982435156 100644 --- a/libavcodec/vp56.h +++ b/libavcodec/vp56.h @@ -1,7 +1,4 @@ -/** - * @file - * VP5 and VP6 compatible video decoder (common features) - * +/* * Copyright (C) 2006 Aurelien Jacobs <aurel@gnuage.org> * * This file is part of FFmpeg. @@ -21,6 +18,11 @@ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ +/** + * @file + * VP5 and VP6 compatible video decoder (common features) + */ + #ifndef AVCODEC_VP56_H #define AVCODEC_VP56_H diff --git a/libavcodec/vp56data.c b/libavcodec/vp56data.c index b0515c2410..a161f885d9 100644 --- a/libavcodec/vp56data.c +++ b/libavcodec/vp56data.c @@ -1,7 +1,4 @@ -/** - * @file - * VP5 and VP6 compatible video decoder (common data) - * +/* * Copyright (C) 2006 Aurelien Jacobs <aurel@gnuage.org> * * This file is part of FFmpeg. @@ -21,6 +18,11 @@ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ +/** + * @file + * VP5 and VP6 compatible video decoder (common data) + */ + #include "vp56data.h" const uint8_t vp56_b2p[] = { 0, 0, 0, 0, 1, 2, 3, 3, 3, 3 }; diff --git a/libavcodec/vp56data.h b/libavcodec/vp56data.h index 57b0968d89..cb9cf95998 100644 --- a/libavcodec/vp56data.h +++ b/libavcodec/vp56data.h @@ -1,7 +1,4 @@ -/** - * @file - * VP5 and VP6 compatible video decoder (common data) - * +/* * Copyright (C) 2006 Aurelien Jacobs <aurel@gnuage.org> * * This file is part of FFmpeg. @@ -21,6 +18,11 @@ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ +/** + * @file + * VP5 and VP6 compatible video decoder (common data) + */ + #ifndef AVCODEC_VP56DATA_H #define AVCODEC_VP56DATA_H diff --git a/libavcodec/vp5data.h b/libavcodec/vp5data.h index 5c2d46cde5..e16ff2da4b 100644 --- a/libavcodec/vp5data.h +++ b/libavcodec/vp5data.h @@ -1,7 +1,4 @@ -/** - * @file - * VP5 compatible video decoder - * +/* * Copyright (C) 2006 Aurelien Jacobs <aurel@gnuage.org> * * This file is part of FFmpeg. @@ -21,6 +18,11 @@ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ +/** + * @file + * VP5 compatible video decoder + */ + #ifndef AVCODEC_VP5DATA_H #define AVCODEC_VP5DATA_H diff --git a/libavcodec/vp6.c b/libavcodec/vp6.c index 0a02ce58d6..e6132abeb6 100644 --- a/libavcodec/vp6.c +++ b/libavcodec/vp6.c @@ -1,13 +1,6 @@ -/** - * @file - * VP6 compatible video decoder - * +/* * Copyright (C) 2006 Aurelien Jacobs <aurel@gnuage.org> * - * The VP6F decoder accepts an optional 1 byte extradata. It is composed of: - * - upper 4bits: difference between encoded width and visible width - * - lower 4bits: difference between encoded height and visible height - * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or @@ -25,6 +18,15 @@ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ +/** + * @file + * VP6 compatible video decoder + * + * The VP6F decoder accepts an optional 1 byte extradata. It is composed of: + * - upper 4 bits: difference between encoded width and visible width + * - lower 4 bits: difference between encoded height and visible height + */ + #include <stdlib.h> #include "avcodec.h" @@ -607,42 +609,39 @@ static av_cold int vp6_decode_free(AVCodecContext *avctx) } AVCodec ff_vp6_decoder = { - "vp6", - AVMEDIA_TYPE_VIDEO, - CODEC_ID_VP6, - sizeof(VP56Context), - vp6_decode_init, - NULL, - vp6_decode_free, - ff_vp56_decode_frame, - CODEC_CAP_DR1, + .name = "vp6", + .type = AVMEDIA_TYPE_VIDEO, + .id = CODEC_ID_VP6, + .priv_data_size = sizeof(VP56Context), + .init = vp6_decode_init, + .close = vp6_decode_free, + .decode = ff_vp56_decode_frame, + .capabilities = CODEC_CAP_DR1, .long_name = NULL_IF_CONFIG_SMALL("On2 VP6"), }; /* flash version, not flipped upside-down */ AVCodec ff_vp6f_decoder = { - "vp6f", - AVMEDIA_TYPE_VIDEO, - CODEC_ID_VP6F, - sizeof(VP56Context), - vp6_decode_init, - NULL, - vp6_decode_free, - ff_vp56_decode_frame, - CODEC_CAP_DR1, + .name = "vp6f", + .type = AVMEDIA_TYPE_VIDEO, + .id = CODEC_ID_VP6F, + .priv_data_size = sizeof(VP56Context), + .init = vp6_decode_init, + .close = vp6_decode_free, + .decode = ff_vp56_decode_frame, + .capabilities = CODEC_CAP_DR1, .long_name = NULL_IF_CONFIG_SMALL("On2 VP6 (Flash version)"), }; /* flash version, not flipped upside-down, with alpha channel */ AVCodec ff_vp6a_decoder = { - "vp6a", - AVMEDIA_TYPE_VIDEO, - CODEC_ID_VP6A, - sizeof(VP56Context), - vp6_decode_init, - NULL, - vp6_decode_free, - ff_vp56_decode_frame, - CODEC_CAP_DR1, + .name = "vp6a", + .type = AVMEDIA_TYPE_VIDEO, + .id = CODEC_ID_VP6A, + .priv_data_size = sizeof(VP56Context), + .init = vp6_decode_init, + .close = vp6_decode_free, + .decode = ff_vp56_decode_frame, + .capabilities = CODEC_CAP_DR1, .long_name = NULL_IF_CONFIG_SMALL("On2 VP6 (Flash version, with alpha channel)"), }; diff --git a/libavcodec/vp6data.h b/libavcodec/vp6data.h index 1cfdbe7bea..3ebfd0e252 100644 --- a/libavcodec/vp6data.h +++ b/libavcodec/vp6data.h @@ -1,7 +1,4 @@ -/** - * @file - * VP6 compatible video decoder - * +/* * Copyright (C) 2006 Aurelien Jacobs <aurel@gnuage.org> * * This file is part of FFmpeg. @@ -21,6 +18,11 @@ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ +/** + * @file + * VP6 compatible video decoder + */ + #ifndef AVCODEC_VP6DATA_H #define AVCODEC_VP6DATA_H diff --git a/libavcodec/vp6dsp.c b/libavcodec/vp6dsp.c index 1119b56705..67c6be07de 100644 --- a/libavcodec/vp6dsp.c +++ b/libavcodec/vp6dsp.c @@ -1,7 +1,4 @@ -/** - * @file - * VP6 DSP-oriented functions - * +/* * Copyright (C) 2006 Aurelien Jacobs <aurel@gnuage.org> * * This file is part of FFmpeg. @@ -21,6 +18,11 @@ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ +/** + * @file + * VP6 DSP-oriented functions + */ + #include "libavutil/common.h" #include "vp56dsp.h" diff --git a/libavcodec/vp8.c b/libavcodec/vp8.c index 082d8e5829..fb3f7f733a 100644 --- a/libavcodec/vp8.c +++ b/libavcodec/vp8.c @@ -641,8 +641,6 @@ void decode_mb_mode(VP8Context *s, VP8Macroblock *mb, int mb_x, int mb_y, uint8_ * @param block destination for block coefficients * @param probs probabilities to use when reading trees from the bitstream * @param i initial coeff index, 0 unless a separate DC block is coded - * @param zero_nhood the initial prediction context for number of surrounding - * all-zero blocks (only left/top, so 0-2) * @param qmul array holding the dc/ac dequant factor at position 0/1 * @return 0 if no coeffs were decoded * otherwise, the index of the last coeff decoded plus one @@ -701,6 +699,17 @@ skip_eob: } #endif +/** + * @param c arithmetic bitstream reader context + * @param block destination for block coefficients + * @param probs probabilities to use when reading trees from the bitstream + * @param i initial coeff index, 0 unless a separate DC block is coded + * @param zero_nhood the initial prediction context for number of surrounding + * all-zero blocks (only left/top, so 0-2) + * @param qmul array holding the dc/ac dequant factor at position 0/1 + * @return 0 if no coeffs were decoded + * otherwise, the index of the last coeff decoded plus one + */ static av_always_inline int decode_block_coeffs(VP56RangeCoder *c, DCTELEM block[16], uint8_t probs[16][3][NUM_DCT_TOKENS-1], @@ -1034,12 +1043,11 @@ static const uint8_t subpel_idx[3][8] = { }; /** - * Generic MC function. + * luma MC function * * @param s VP8 decoding context - * @param luma 1 for luma (Y) planes, 0 for chroma (Cb/Cr) planes * @param dst target buffer for block data at block position - * @param src reference picture buffer at origin (0, 0) + * @param ref reference picture buffer at origin (0, 0) * @param mv motion vector (relative to block position) to get pixel data from * @param x_off horizontal position of block from origin (0, 0) * @param y_off vertical position of block from origin (0, 0) @@ -1083,6 +1091,23 @@ void vp8_mc_luma(VP8Context *s, uint8_t *dst, AVFrame *ref, const VP56mv *mv, } } +/** + * chroma MC function + * + * @param s VP8 decoding context + * @param dst1 target buffer for block data at block position (U plane) + * @param dst2 target buffer for block data at block position (V plane) + * @param ref reference picture buffer at origin (0, 0) + * @param mv motion vector (relative to block position) to get pixel data from + * @param x_off horizontal position of block from origin (0, 0) + * @param y_off vertical position of block from origin (0, 0) + * @param block_w width of block (16, 8 or 4) + * @param block_h height of block (always same as block_w) + * @param width width of src/dst plane data + * @param height height of src/dst plane data + * @param linesize size of a single line of plane data, including padding + * @param mc_func motion compensation function pointers (bilinear or sixtap MC) + */ static av_always_inline void vp8_mc_chroma(VP8Context *s, uint8_t *dst1, uint8_t *dst2, AVFrame *ref, const VP56mv *mv, int x_off, int y_off, @@ -1739,15 +1764,14 @@ static int vp8_decode_update_thread_context(AVCodecContext *dst, const AVCodecCo } AVCodec ff_vp8_decoder = { - "vp8", - AVMEDIA_TYPE_VIDEO, - CODEC_ID_VP8, - sizeof(VP8Context), - vp8_decode_init, - NULL, - vp8_decode_free, - vp8_decode_frame, - CODEC_CAP_DR1 | CODEC_CAP_FRAME_THREADS, + .name = "vp8", + .type = AVMEDIA_TYPE_VIDEO, + .id = CODEC_ID_VP8, + .priv_data_size = sizeof(VP8Context), + .init = vp8_decode_init, + .close = vp8_decode_free, + .decode = vp8_decode_frame, + .capabilities = CODEC_CAP_DR1 | CODEC_CAP_FRAME_THREADS, .flush = vp8_decode_flush, .long_name = NULL_IF_CONFIG_SMALL("On2 VP8"), .init_thread_copy = ONLY_IF_THREADS_ENABLED(vp8_decode_init_thread_copy), diff --git a/libavcodec/vp8data.h b/libavcodec/vp8data.h index 9ff21a1a97..0ea24d7ed8 100644 --- a/libavcodec/vp8data.h +++ b/libavcodec/vp8data.h @@ -1,6 +1,4 @@ -/** - * VP8 compatible video decoder - * +/* * Copyright (C) 2010 David Conrad * Copyright (C) 2010 Ronald S. Bultje * @@ -21,6 +19,11 @@ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ +/** + * @file + * VP8 compatible video decoder + */ + #ifndef AVCODEC_VP8DATA_H #define AVCODEC_VP8DATA_H diff --git a/libavcodec/vp8dsp.c b/libavcodec/vp8dsp.c index 4391a7ebc7..ce90675d87 100644 --- a/libavcodec/vp8dsp.c +++ b/libavcodec/vp8dsp.c @@ -1,6 +1,4 @@ -/** - * VP8 compatible video decoder - * +/* * Copyright (C) 2010 David Conrad * Copyright (C) 2010 Ronald S. Bultje * @@ -21,6 +19,11 @@ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ +/** + * @file + * VP8 compatible video decoder + */ + #include "dsputil.h" #include "vp8dsp.h" diff --git a/libavcodec/vp8dsp.h b/libavcodec/vp8dsp.h index e8757ab7d0..987fa59a72 100644 --- a/libavcodec/vp8dsp.h +++ b/libavcodec/vp8dsp.h @@ -1,6 +1,4 @@ -/** - * VP8 compatible video decoder - * +/* * Copyright (C) 2010 David Conrad * Copyright (C) 2010 Ronald S. Bultje * @@ -21,6 +19,10 @@ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ +/** + * @file + * VP8 compatible video decoder + */ #ifndef AVCODEC_VP8DSP_H #define AVCODEC_VP8DSP_H diff --git a/libavcodec/vqavideo.c b/libavcodec/vqavideo.c index ca4fd94d75..67721097ef 100644 --- a/libavcodec/vqavideo.c +++ b/libavcodec/vqavideo.c @@ -21,9 +21,9 @@ /** * @file - * VQA Video Decoder by Mike Melanson (melanson@pcisys.net) - * For more information about the VQA format, visit: - * http://wiki.multimedia.cx/index.php?title=VQA + * VQA Video Decoder + * @author Mike Melanson (melanson@pcisys.net) + * @see http://wiki.multimedia.cx/index.php?title=VQA * * The VQA video decoder outputs PAL8 or RGB555 colorspace data, depending * on the type of data in the file. @@ -601,14 +601,13 @@ static av_cold int vqa_decode_end(AVCodecContext *avctx) } AVCodec ff_vqa_decoder = { - "vqavideo", - AVMEDIA_TYPE_VIDEO, - CODEC_ID_WS_VQA, - sizeof(VqaContext), - vqa_decode_init, - NULL, - vqa_decode_end, - vqa_decode_frame, - CODEC_CAP_DR1, + .name = "vqavideo", + .type = AVMEDIA_TYPE_VIDEO, + .id = CODEC_ID_WS_VQA, + .priv_data_size = sizeof(VqaContext), + .init = vqa_decode_init, + .close = vqa_decode_end, + .decode = vqa_decode_frame, + .capabilities = CODEC_CAP_DR1, .long_name = NULL_IF_CONFIG_SMALL("Westwood Studios VQA (Vector Quantized Animation) video"), }; diff --git a/libavcodec/wavpack.c b/libavcodec/wavpack.c index d5102320fd..ba974bd089 100644 --- a/libavcodec/wavpack.c +++ b/libavcodec/wavpack.c @@ -1190,14 +1190,13 @@ static int wavpack_decode_frame(AVCodecContext *avctx, } AVCodec ff_wavpack_decoder = { - "wavpack", - AVMEDIA_TYPE_AUDIO, - CODEC_ID_WAVPACK, - sizeof(WavpackContext), - wavpack_decode_init, - NULL, - wavpack_decode_end, - wavpack_decode_frame, + .name = "wavpack", + .type = AVMEDIA_TYPE_AUDIO, + .id = CODEC_ID_WAVPACK, + .priv_data_size = sizeof(WavpackContext), + .init = wavpack_decode_init, + .close = wavpack_decode_end, + .decode = wavpack_decode_frame, .capabilities = CODEC_CAP_SUBFRAMES, .long_name = NULL_IF_CONFIG_SMALL("WavPack"), }; diff --git a/libavcodec/wmaprodec.c b/libavcodec/wmaprodec.c index e1d942dca2..d4ec0c02af 100644 --- a/libavcodec/wmaprodec.c +++ b/libavcodec/wmaprodec.c @@ -1605,14 +1605,13 @@ static void flush(AVCodecContext *avctx) *@brief wmapro decoder */ AVCodec ff_wmapro_decoder = { - "wmapro", - AVMEDIA_TYPE_AUDIO, - CODEC_ID_WMAPRO, - sizeof(WMAProDecodeCtx), - decode_init, - NULL, - decode_end, - decode_packet, + .name = "wmapro", + .type = AVMEDIA_TYPE_AUDIO, + .id = CODEC_ID_WMAPRO, + .priv_data_size = sizeof(WMAProDecodeCtx), + .init = decode_init, + .close = decode_end, + .decode = decode_packet, .capabilities = CODEC_CAP_SUBFRAMES, .flush= flush, .long_name = NULL_IF_CONFIG_SMALL("Windows Media Audio 9 Professional"), diff --git a/libavcodec/wmavoice.c b/libavcodec/wmavoice.c index 3604eac782..400d96386b 100644 --- a/libavcodec/wmavoice.c +++ b/libavcodec/wmavoice.c @@ -128,9 +128,7 @@ static const struct frame_type_desc { */ typedef struct { /** - * @defgroup struct_global Global values - * Global values, specified in the stream header / extradata or used - * all over. + * @name Global values specified in the stream header / extradata or used all over. * @{ */ GetBitContext gb; ///< packet bitreader. During decoder init, @@ -182,8 +180,9 @@ typedef struct { /** * @} - * @defgroup struct_packet Packet values - * Packet values, specified in the packet header or related to a packet. + * + * @name Packet values specified in the packet header or related to a packet. + * * A packet is considered to be a single unit of data provided to this * decoder by the demuxer. * @{ @@ -213,7 +212,8 @@ typedef struct { /** * @} - * @defgroup struct_frame Frame and superframe values + * + * @name Frame and superframe values * Superframe and frame data - these can change from frame to frame, * although some of them do in that case serve as a cache / history for * the next frame or superframe. @@ -256,7 +256,9 @@ typedef struct { float synth_history[MAX_LSPS]; ///< see #excitation_history /** * @} - * @defgroup post_filter Postfilter values + * + * @name Postfilter values + * * Variables used for postfilter implementation, mostly history for * smoothing and so on, and context variables for FFT/iFFT. * @{ @@ -432,7 +434,7 @@ static av_cold int wmavoice_decode_init(AVCodecContext *ctx) } /** - * @defgroup postfilter Postfilter functions + * @name Postfilter functions * Postfilter functions (gain control, wiener denoise filter, DC filter, * kalman smoothening, plus surrounding code to wrap it) * @{ @@ -825,7 +827,7 @@ static void dequant_lsps(double *lsps, int num, } /** - * @defgroup lsp_dequant LSP dequantization routines + * @name LSP dequantization routines * LSP dequantization routines, for 10/16LSPs and independent/residual coding. * @note we assume enough bits are available, caller should check. * lsp10i() consumes 24 bits; lsp10r() consumes an additional 24 bits; @@ -969,7 +971,7 @@ static void dequant_lsp16r(GetBitContext *gb, /** * @} - * @defgroup aw Pitch-adaptive window coding functions + * @name Pitch-adaptive window coding functions * The next few functions are for pitch-adaptive window coding. * @{ */ @@ -2020,15 +2022,14 @@ static av_cold void wmavoice_flush(AVCodecContext *ctx) } AVCodec ff_wmavoice_decoder = { - "wmavoice", - AVMEDIA_TYPE_AUDIO, - CODEC_ID_WMAVOICE, - sizeof(WMAVoiceContext), - wmavoice_decode_init, - NULL, - wmavoice_decode_end, - wmavoice_decode_packet, - CODEC_CAP_SUBFRAMES, + .name = "wmavoice", + .type = AVMEDIA_TYPE_AUDIO, + .id = CODEC_ID_WMAVOICE, + .priv_data_size = sizeof(WMAVoiceContext), + .init = wmavoice_decode_init, + .close = wmavoice_decode_end, + .decode = wmavoice_decode_packet, + .capabilities = CODEC_CAP_SUBFRAMES, .flush = wmavoice_flush, .long_name = NULL_IF_CONFIG_SMALL("Windows Media Audio Voice"), }; diff --git a/libavcodec/wmv2dec.c b/libavcodec/wmv2dec.c index 21d1eeca3b..519ae61b93 100644 --- a/libavcodec/wmv2dec.c +++ b/libavcodec/wmv2dec.c @@ -32,7 +32,7 @@ static void parse_mb_skip(Wmv2Context * w){ int mb_x, mb_y; MpegEncContext * const s= &w->s; - uint32_t * const mb_type= s->current_picture_ptr->mb_type; + uint32_t * const mb_type = s->current_picture_ptr->f.mb_type; w->skip_type= get_bits(&s->gb, 2); switch(w->skip_type){ @@ -257,11 +257,11 @@ static int16_t *wmv2_pred_motion(Wmv2Context *w, int *px, int *py){ wrap = s->b8_stride; xy = s->block_index[0]; - mot_val = s->current_picture.motion_val[0][xy]; + mot_val = s->current_picture.f.motion_val[0][xy]; - A = s->current_picture.motion_val[0][xy - 1]; - B = s->current_picture.motion_val[0][xy - wrap]; - C = s->current_picture.motion_val[0][xy + 2 - wrap]; + A = s->current_picture.f.motion_val[0][xy - 1]; + B = s->current_picture.f.motion_val[0][xy - wrap]; + C = s->current_picture.f.motion_val[0][xy + 2 - wrap]; if(s->mb_x && !s->first_slice_line && !s->mspel && w->top_left_mv_flag) diff= FFMAX(FFABS(A[0] - B[0]), FFABS(A[1] - B[1])); @@ -343,7 +343,7 @@ int ff_wmv2_decode_mb(MpegEncContext *s, DCTELEM block[6][64]) if(w->j_type) return 0; if (s->pict_type == AV_PICTURE_TYPE_P) { - if(IS_SKIP(s->current_picture.mb_type[s->mb_y * s->mb_stride + s->mb_x])){ + if (IS_SKIP(s->current_picture.f.mb_type[s->mb_y * s->mb_stride + s->mb_x])) { /* skip mb */ s->mb_intra = 0; for(i=0;i<6;i++) @@ -471,15 +471,14 @@ static av_cold int wmv2_decode_end(AVCodecContext *avctx) } AVCodec ff_wmv2_decoder = { - "wmv2", - AVMEDIA_TYPE_VIDEO, - CODEC_ID_WMV2, - sizeof(Wmv2Context), - wmv2_decode_init, - NULL, - wmv2_decode_end, - ff_h263_decode_frame, - CODEC_CAP_DRAW_HORIZ_BAND | CODEC_CAP_DR1, + .name = "wmv2", + .type = AVMEDIA_TYPE_VIDEO, + .id = CODEC_ID_WMV2, + .priv_data_size = sizeof(Wmv2Context), + .init = wmv2_decode_init, + .close = wmv2_decode_end, + .decode = ff_h263_decode_frame, + .capabilities = CODEC_CAP_DRAW_HORIZ_BAND | CODEC_CAP_DR1, .long_name = NULL_IF_CONFIG_SMALL("Windows Media Video 8"), .pix_fmts= ff_pixfmt_list_420, }; diff --git a/libavcodec/wmv2enc.c b/libavcodec/wmv2enc.c index 4a074e674c..bc9e4fa0ab 100644 --- a/libavcodec/wmv2enc.c +++ b/libavcodec/wmv2enc.c @@ -212,13 +212,13 @@ void ff_wmv2_encode_mb(MpegEncContext * s, } AVCodec ff_wmv2_encoder = { - "wmv2", - AVMEDIA_TYPE_VIDEO, - CODEC_ID_WMV2, - sizeof(Wmv2Context), - wmv2_encode_init, - MPV_encode_picture, - MPV_encode_end, + .name = "wmv2", + .type = AVMEDIA_TYPE_VIDEO, + .id = CODEC_ID_WMV2, + .priv_data_size = sizeof(Wmv2Context), + .init = wmv2_encode_init, + .encode = MPV_encode_picture, + .close = MPV_encode_end, .pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_NONE}, .long_name= NULL_IF_CONFIG_SMALL("Windows Media Video 8"), }; diff --git a/libavcodec/wnv1.c b/libavcodec/wnv1.c index 197cf7985d..6429a5b748 100644 --- a/libavcodec/wnv1.c +++ b/libavcodec/wnv1.c @@ -158,14 +158,13 @@ static av_cold int decode_end(AVCodecContext *avctx){ } AVCodec ff_wnv1_decoder = { - "wnv1", - AVMEDIA_TYPE_VIDEO, - CODEC_ID_WNV1, - sizeof(WNV1Context), - decode_init, - NULL, - decode_end, - decode_frame, - CODEC_CAP_DR1, + .name = "wnv1", + .type = AVMEDIA_TYPE_VIDEO, + .id = CODEC_ID_WNV1, + .priv_data_size = sizeof(WNV1Context), + .init = decode_init, + .close = decode_end, + .decode = decode_frame, + .capabilities = CODEC_CAP_DR1, .long_name = NULL_IF_CONFIG_SMALL("Winnov WNV1"), }; diff --git a/libavcodec/ws-snd1.c b/libavcodec/ws-snd1.c index d6a60d441f..8e2e8ced99 100644 --- a/libavcodec/ws-snd1.c +++ b/libavcodec/ws-snd1.c @@ -147,13 +147,10 @@ static int ws_snd_decode_frame(AVCodecContext *avctx, } AVCodec ff_ws_snd1_decoder = { - "ws_snd1", - AVMEDIA_TYPE_AUDIO, - CODEC_ID_WESTWOOD_SND1, - 0, - ws_snd_decode_init, - NULL, - NULL, - ws_snd_decode_frame, + .name = "ws_snd1", + .type = AVMEDIA_TYPE_AUDIO, + .id = CODEC_ID_WESTWOOD_SND1, + .init = ws_snd_decode_init, + .decode = ws_snd_decode_frame, .long_name = NULL_IF_CONFIG_SMALL("Westwood Audio (SND1)"), }; diff --git a/libavcodec/x86/Makefile b/libavcodec/x86/Makefile index de57b58d00..09f65e8691 100644 --- a/libavcodec/x86/Makefile +++ b/libavcodec/x86/Makefile @@ -15,6 +15,7 @@ YASM-OBJS-$(CONFIG_H264DSP) += x86/h264_deblock.o \ x86/h264_idct.o \ x86/h264_idct_10bit.o \ x86/h264_weight.o \ + x86/h264_weight_10bit.o \ YASM-OBJS-$(CONFIG_H264PRED) += x86/h264_intrapred.o \ x86/h264_intrapred_10bit.o @@ -46,6 +47,7 @@ MMX-OBJS-$(HAVE_YASM) += x86/dsputil_yasm.o \ x86/fmtconvert.o \ x86/h264_chromamc.o \ x86/h264_chromamc_10bit.o \ + x86/h264_qpel_10bit.o \ $(YASM-OBJS-yes) MMX-OBJS-$(CONFIG_FFT) += x86/fft.o diff --git a/libavcodec/x86/ac3dsp.asm b/libavcodec/x86/ac3dsp.asm index 99c5df340e..8c958a17ee 100644 --- a/libavcodec/x86/ac3dsp.asm +++ b/libavcodec/x86/ac3dsp.asm @@ -32,6 +32,11 @@ cextern ac3_bap_bits pw_bap_mul1: dw 21846, 21846, 0, 32768, 21846, 21846, 0, 32768 pw_bap_mul2: dw 5, 7, 0, 7, 5, 7, 0, 7 +; used in ff_ac3_extract_exponents() +pd_1: times 4 dd 1 +pd_151: times 4 dd 151 +pb_shuf_4dwb: db 0, 4, 8, 12 + SECTION .text ;----------------------------------------------------------------------------- @@ -346,3 +351,100 @@ cglobal ac3_compute_mantissa_size_sse2, 1,2,4, mant_cnt, sum movd eax, m0 add eax, sumd RET + +;------------------------------------------------------------------------------ +; void ff_ac3_extract_exponents(uint8_t *exp, int32_t *coef, int nb_coefs) +;------------------------------------------------------------------------------ + +%macro PABSD_MMX 2 ; src/dst, tmp + pxor %2, %2 + pcmpgtd %2, %1 + pxor %1, %2 + psubd %1, %2 +%endmacro + +%macro PABSD_SSSE3 1-2 ; src/dst, unused + pabsd %1, %1 +%endmacro + +%ifdef HAVE_AMD3DNOW +INIT_MMX +cglobal ac3_extract_exponents_3dnow, 3,3,0, exp, coef, len + add expq, lenq + lea coefq, [coefq+4*lenq] + neg lenq + movq m3, [pd_1] + movq m4, [pd_151] +.loop: + movq m0, [coefq+4*lenq ] + movq m1, [coefq+4*lenq+8] + PABSD_MMX m0, m2 + PABSD_MMX m1, m2 + pslld m0, 1 + por m0, m3 + pi2fd m2, m0 + psrld m2, 23 + movq m0, m4 + psubd m0, m2 + pslld m1, 1 + por m1, m3 + pi2fd m2, m1 + psrld m2, 23 + movq m1, m4 + psubd m1, m2 + packssdw m0, m0 + packuswb m0, m0 + packssdw m1, m1 + packuswb m1, m1 + punpcklwd m0, m1 + movd [expq+lenq], m0 + add lenq, 4 + jl .loop + REP_RET +%endif + +%macro AC3_EXTRACT_EXPONENTS 1 +cglobal ac3_extract_exponents_%1, 3,3,5, exp, coef, len + add expq, lenq + lea coefq, [coefq+4*lenq] + neg lenq + mova m2, [pd_1] + mova m3, [pd_151] +%ifidn %1, ssse3 ; + movd m4, [pb_shuf_4dwb] +%endif +.loop: + ; move 4 32-bit coefs to xmm0 + mova m0, [coefq+4*lenq] + ; absolute value + PABSD m0, m1 + ; convert to float and extract exponents + pslld m0, 1 + por m0, m2 + cvtdq2ps m1, m0 + psrld m1, 23 + mova m0, m3 + psubd m0, m1 + ; move the lowest byte in each of 4 dwords to the low dword +%ifidn %1, ssse3 + pshufb m0, m4 +%else + packssdw m0, m0 + packuswb m0, m0 +%endif + movd [expq+lenq], m0 + + add lenq, 4 + jl .loop + REP_RET +%endmacro + +%ifdef HAVE_SSE +INIT_XMM +%define PABSD PABSD_MMX +AC3_EXTRACT_EXPONENTS sse2 +%ifdef HAVE_SSSE3 +%define PABSD PABSD_SSSE3 +AC3_EXTRACT_EXPONENTS ssse3 +%endif +%endif diff --git a/libavcodec/x86/ac3dsp_mmx.c b/libavcodec/x86/ac3dsp_mmx.c index e853b8831a..31275707e9 100644 --- a/libavcodec/x86/ac3dsp_mmx.c +++ b/libavcodec/x86/ac3dsp_mmx.c @@ -44,6 +44,10 @@ extern void ff_float_to_fixed24_sse2 (int32_t *dst, const float *src, unsigned i extern int ff_ac3_compute_mantissa_size_sse2(uint16_t mant_cnt[6][16]); +extern void ff_ac3_extract_exponents_3dnow(uint8_t *exp, int32_t *coef, int nb_coefs); +extern void ff_ac3_extract_exponents_sse2 (uint8_t *exp, int32_t *coef, int nb_coefs); +extern void ff_ac3_extract_exponents_ssse3(uint8_t *exp, int32_t *coef, int nb_coefs); + av_cold void ff_ac3dsp_init_x86(AC3DSPContext *c, int bit_exact) { int mm_flags = av_get_cpu_flags(); @@ -56,6 +60,7 @@ av_cold void ff_ac3dsp_init_x86(AC3DSPContext *c, int bit_exact) c->ac3_rshift_int32 = ff_ac3_rshift_int32_mmx; } if (mm_flags & AV_CPU_FLAG_3DNOW && HAVE_AMD3DNOW) { + c->extract_exponents = ff_ac3_extract_exponents_3dnow; if (!bit_exact) { c->float_to_fixed24 = ff_float_to_fixed24_3dnow; } @@ -72,6 +77,7 @@ av_cold void ff_ac3dsp_init_x86(AC3DSPContext *c, int bit_exact) c->ac3_max_msb_abs_int16 = ff_ac3_max_msb_abs_int16_sse2; c->float_to_fixed24 = ff_float_to_fixed24_sse2; c->compute_mantissa_size = ff_ac3_compute_mantissa_size_sse2; + c->extract_exponents = ff_ac3_extract_exponents_sse2; if (!(mm_flags & AV_CPU_FLAG_SSE2SLOW)) { c->ac3_lshift_int16 = ff_ac3_lshift_int16_sse2; c->ac3_rshift_int32 = ff_ac3_rshift_int32_sse2; @@ -79,6 +85,9 @@ av_cold void ff_ac3dsp_init_x86(AC3DSPContext *c, int bit_exact) } if (mm_flags & AV_CPU_FLAG_SSSE3 && HAVE_SSSE3) { c->ac3_max_msb_abs_int16 = ff_ac3_max_msb_abs_int16_ssse3; + if (!(mm_flags & AV_CPU_FLAG_ATOM)) { + c->extract_exponents = ff_ac3_extract_exponents_ssse3; + } } #endif } diff --git a/libavcodec/x86/cabac.h b/libavcodec/x86/cabac.h index 52bea9c53d..5c46342808 100644 --- a/libavcodec/x86/cabac.h +++ b/libavcodec/x86/cabac.h @@ -34,8 +34,8 @@ "cmova %%ecx , "range" \n\t"\ "sbb %%ecx , %%ecx \n\t"\ "and %%ecx , "tmp" \n\t"\ - "sub "tmp" , "low" \n\t"\ - "xor %%ecx , "ret" \n\t" + "xor %%ecx , "ret" \n\t"\ + "sub "tmp" , "low" \n\t" #else /* HAVE_FAST_CMOV */ #define BRANCHLESS_GET_CABAC_UPDATE(ret, cabac, statep, low, lowword, range, tmp)\ "mov "tmp" , %%ecx \n\t"\ @@ -62,21 +62,20 @@ "movzbl " MANGLE(ff_h264_norm_shift) "("range"), %%ecx \n\t"\ "shl %%cl , "range" \n\t"\ "movzbl "MANGLE(ff_h264_mlps_state)"+128("ret"), "tmp" \n\t"\ - "mov "tmpbyte" , "statep" \n\t"\ "shl %%cl , "low" \n\t"\ + "mov "tmpbyte" , "statep" \n\t"\ "test "lowword" , "lowword" \n\t"\ " jnz 1f \n\t"\ "mov "byte"("cabac"), %%"REG_c" \n\t"\ + "add"OPSIZE" $2 , "byte "("cabac") \n\t"\ "movzwl (%%"REG_c") , "tmp" \n\t"\ - "bswap "tmp" \n\t"\ - "shr $15 , "tmp" \n\t"\ - "sub $0xFFFF , "tmp" \n\t"\ - "add $2 , %%"REG_c" \n\t"\ - "mov %%"REG_c" , "byte "("cabac") \n\t"\ "lea -1("low") , %%ecx \n\t"\ "xor "low" , %%ecx \n\t"\ "shr $15 , %%ecx \n\t"\ + "bswap "tmp" \n\t"\ + "shr $15 , "tmp" \n\t"\ "movzbl " MANGLE(ff_h264_norm_shift) "(%%ecx), %%ecx \n\t"\ + "sub $0xFFFF , "tmp" \n\t"\ "neg %%ecx \n\t"\ "add $7 , %%ecx \n\t"\ "shl %%cl , "tmp" \n\t"\ diff --git a/libavcodec/x86/dct32_sse.asm b/libavcodec/x86/dct32_sse.asm index 7f5f815591..9a2a6ea88c 100644 --- a/libavcodec/x86/dct32_sse.asm +++ b/libavcodec/x86/dct32_sse.asm @@ -63,6 +63,13 @@ ps_p1p1m1m1: dd 0, 0, 0x80000000, 0x80000000, 0, 0, 0x80000000, 0x80000000 mulps %1, %3 %endmacro +%macro BUTTERFLY0_SSE2 5 + pshufd %4, %1, %5 + xorps %1, %2 + addps %1, %4 + mulps %1, %3 +%endmacro + %macro BUTTERFLY0_AVX 5 vshufps %4, %1, %1, %5 vxorps %1, %1, %2 @@ -405,18 +412,17 @@ INIT_XMM INIT_XMM +%macro DCT32_FUNC 1 ; void ff_dct32_float_sse(FFTSample *out, const FFTSample *in) -cglobal dct32_float_sse, 2,3,16, out, in, tmp +cglobal dct32_float_%1, 2,3,16, out, in, tmp ; pass 1 movaps m0, [inq+0] - movaps m1, [inq+112] - shufps m1, m1, 0x1b + LOAD_INV m1, [inq+112] BUTTERFLY m0, m1, [ps_cos_vec], m3 movaps m7, [inq+64] - movaps m4, [inq+48] - shufps m4, m4, 0x1b + LOAD_INV m4, [inq+48] BUTTERFLY m7, m4, [ps_cos_vec+32], m3 ; pass 2 @@ -427,13 +433,11 @@ cglobal dct32_float_sse, 2,3,16, out, in, tmp ; pass 1 movaps m1, [inq+16] - movaps m6, [inq+96] - shufps m6, m6, 0x1b + LOAD_INV m6, [inq+96] BUTTERFLY m1, m6, [ps_cos_vec+16], m3 movaps m4, [inq+80] - movaps m5, [inq+32] - shufps m5, m5, 0x1b + LOAD_INV m5, [inq+32] BUTTERFLY m4, m5, [ps_cos_vec+48], m3 ; pass 2 @@ -492,3 +496,20 @@ cglobal dct32_float_sse, 2,3,16, out, in, tmp PASS5 PASS6 RET +%endmacro + +%macro LOAD_INV_SSE 2 + movaps %1, %2 + shufps %1, %1, 0x1b +%endmacro + +%define LOAD_INV LOAD_INV_SSE +DCT32_FUNC sse + +%macro LOAD_INV_SSE2 2 + pshufd %1, %2, 0x1b +%endmacro + +%define LOAD_INV LOAD_INV_SSE2 +%define BUTTERFLY0 BUTTERFLY0_SSE2 +DCT32_FUNC sse2 diff --git a/libavcodec/x86/dnxhd_mmx.c b/libavcodec/x86/dnxhd_mmx.c index 1256beef7f..1f2b035212 100644 --- a/libavcodec/x86/dnxhd_mmx.c +++ b/libavcodec/x86/dnxhd_mmx.c @@ -53,6 +53,7 @@ static void get_pixels_8x4_sym_sse2(DCTELEM *block, const uint8_t *pixels, int l void ff_dnxhd_init_mmx(DNXHDEncContext *ctx) { if (av_get_cpu_flags() & AV_CPU_FLAG_SSE2) { - ctx->get_pixels_8x4_sym = get_pixels_8x4_sym_sse2; + if (ctx->cid_table->bit_depth == 8) + ctx->get_pixels_8x4_sym = get_pixels_8x4_sym_sse2; } } diff --git a/libavcodec/x86/dsputil_mmx.c b/libavcodec/x86/dsputil_mmx.c index ec4d0dc1aa..5276053bb9 100644 --- a/libavcodec/x86/dsputil_mmx.c +++ b/libavcodec/x86/dsputil_mmx.c @@ -42,7 +42,7 @@ DECLARE_ALIGNED(8, const uint64_t, ff_wtwo) = 0x0002000200020002ULL; DECLARE_ALIGNED(16, const uint64_t, ff_pdw_80000000)[2] = {0x8000000080000000ULL, 0x8000000080000000ULL}; -DECLARE_ALIGNED(8, const uint64_t, ff_pw_1 ) = 0x0001000100010001ULL; +DECLARE_ALIGNED(16, const xmm_reg, ff_pw_1 ) = {0x0001000100010001ULL, 0x0001000100010001ULL}; DECLARE_ALIGNED(16, const xmm_reg, ff_pw_2 ) = {0x0002000200020002ULL, 0x0002000200020002ULL}; DECLARE_ALIGNED(16, const xmm_reg, ff_pw_3 ) = {0x0003000300030003ULL, 0x0003000300030003ULL}; DECLARE_ALIGNED(16, const xmm_reg, ff_pw_4 ) = {0x0004000400040004ULL, 0x0004000400040004ULL}; @@ -456,12 +456,12 @@ static void put_pixels16_sse2(uint8_t *block, const uint8_t *pixels, int line_si "movdqu (%1,%3), %%xmm1 \n\t" "movdqu (%1,%3,2), %%xmm2 \n\t" "movdqu (%1,%4), %%xmm3 \n\t" + "lea (%1,%3,4), %1 \n\t" "movdqa %%xmm0, (%2) \n\t" "movdqa %%xmm1, (%2,%3) \n\t" "movdqa %%xmm2, (%2,%3,2) \n\t" "movdqa %%xmm3, (%2,%4) \n\t" "subl $4, %0 \n\t" - "lea (%1,%3,4), %1 \n\t" "lea (%2,%3,4), %2 \n\t" "jnz 1b \n\t" : "+g"(h), "+r" (pixels), "+r" (block) @@ -478,6 +478,7 @@ static void avg_pixels16_sse2(uint8_t *block, const uint8_t *pixels, int line_si "movdqu (%1,%3), %%xmm1 \n\t" "movdqu (%1,%3,2), %%xmm2 \n\t" "movdqu (%1,%4), %%xmm3 \n\t" + "lea (%1,%3,4), %1 \n\t" "pavgb (%2), %%xmm0 \n\t" "pavgb (%2,%3), %%xmm1 \n\t" "pavgb (%2,%3,2), %%xmm2 \n\t" @@ -487,7 +488,6 @@ static void avg_pixels16_sse2(uint8_t *block, const uint8_t *pixels, int line_si "movdqa %%xmm2, (%2,%3,2) \n\t" "movdqa %%xmm3, (%2,%4) \n\t" "subl $4, %0 \n\t" - "lea (%1,%3,4), %1 \n\t" "lea (%2,%3,4), %2 \n\t" "jnz 1b \n\t" : "+g"(h), "+r" (pixels), "+r" (block) @@ -580,7 +580,7 @@ static void add_bytes_mmx(uint8_t *dst, uint8_t *src, int w){ dst[i+0] += src[i+0]; } -#if HAVE_7REGS && HAVE_TEN_OPERANDS +#if HAVE_7REGS static void add_hfyu_median_prediction_cmov(uint8_t *dst, const uint8_t *top, const uint8_t *diff, int w, int *left, int *left_top) { x86_reg w2 = -w; x86_reg x; @@ -1569,10 +1569,6 @@ QPEL_2TAP(put_, 8, 3dnow) QPEL_2TAP(avg_, 8, 3dnow) -#if 0 -static void just_return(void) { return; } -#endif - #if HAVE_YASM typedef void emu_edge_core_func (uint8_t *buf, const uint8_t *src, x86_reg linesize, x86_reg start_y, @@ -1783,7 +1779,7 @@ static void gmc_mmx(uint8_t *dst, uint8_t *src, int stride, int h, int ox, int o int dxx, int dxy, int dyx, int dyy, int shift, int r, int width, int height) { gmc(dst, src, stride, h, ox, oy, dxx, dxy, dyx, dyy, shift, r, - width, height, &ff_emulated_edge_mc); + width, height, &ff_emulated_edge_mc_8); } #endif @@ -2333,10 +2329,19 @@ int ff_add_hfyu_left_prediction_sse4(uint8_t *dst, const uint8_t *src, int w, i float ff_scalarproduct_float_sse(const float *v1, const float *v2, int order); +void ff_vector_clip_int32_mmx (int32_t *dst, const int32_t *src, int32_t min, + int32_t max, unsigned int len); +void ff_vector_clip_int32_sse2 (int32_t *dst, const int32_t *src, int32_t min, + int32_t max, unsigned int len); +void ff_vector_clip_int32_sse2_int(int32_t *dst, const int32_t *src, int32_t min, + int32_t max, unsigned int len); +void ff_vector_clip_int32_sse41 (int32_t *dst, const int32_t *src, int32_t min, + int32_t max, unsigned int len); + void dsputil_init_mmx(DSPContext* c, AVCodecContext *avctx) { int mm_flags = av_get_cpu_flags(); - const int high_bit_depth = avctx->codec_id == CODEC_ID_H264 && avctx->bits_per_raw_sample > 8; + const int high_bit_depth = avctx->bits_per_raw_sample > 8; const int bit_depth = avctx->bits_per_raw_sample; if (avctx->dsp_mask) { @@ -2364,7 +2369,7 @@ void dsputil_init_mmx(DSPContext* c, AVCodecContext *avctx) if (mm_flags & AV_CPU_FLAG_MMX) { const int idct_algo= avctx->idct_algo; - if(avctx->lowres==0){ + if (avctx->lowres == 0 && avctx->bits_per_raw_sample <= 8) { if(idct_algo==FF_IDCT_AUTO || idct_algo==FF_IDCT_SIMPLEMMX){ c->idct_put= ff_simple_idct_put_mmx; c->idct_add= ff_simple_idct_add_mmx; @@ -2473,6 +2478,8 @@ void dsputil_init_mmx(DSPContext* c, AVCodecContext *avctx) c->put_rv40_chroma_pixels_tab[0]= ff_put_rv40_chroma_mc8_mmx; c->put_rv40_chroma_pixels_tab[1]= ff_put_rv40_chroma_mc4_mmx; + + c->vector_clip_int32 = ff_vector_clip_int32_mmx; #endif if (mm_flags & AV_CPU_FLAG_MMX2) { @@ -2519,44 +2526,56 @@ void dsputil_init_mmx(DSPContext* c, AVCodecContext *avctx) c->put_no_rnd_pixels_tab[1][2] = put_no_rnd_pixels8_y2_exact_mmx2; } -#define SET_QPEL_FUNCS(PFX, IDX, SIZE, CPU) \ - c->PFX ## _pixels_tab[IDX][ 0] = PFX ## SIZE ## _mc00_ ## CPU; \ - c->PFX ## _pixels_tab[IDX][ 1] = PFX ## SIZE ## _mc10_ ## CPU; \ - c->PFX ## _pixels_tab[IDX][ 2] = PFX ## SIZE ## _mc20_ ## CPU; \ - c->PFX ## _pixels_tab[IDX][ 3] = PFX ## SIZE ## _mc30_ ## CPU; \ - c->PFX ## _pixels_tab[IDX][ 4] = PFX ## SIZE ## _mc01_ ## CPU; \ - c->PFX ## _pixels_tab[IDX][ 5] = PFX ## SIZE ## _mc11_ ## CPU; \ - c->PFX ## _pixels_tab[IDX][ 6] = PFX ## SIZE ## _mc21_ ## CPU; \ - c->PFX ## _pixels_tab[IDX][ 7] = PFX ## SIZE ## _mc31_ ## CPU; \ - c->PFX ## _pixels_tab[IDX][ 8] = PFX ## SIZE ## _mc02_ ## CPU; \ - c->PFX ## _pixels_tab[IDX][ 9] = PFX ## SIZE ## _mc12_ ## CPU; \ - c->PFX ## _pixels_tab[IDX][10] = PFX ## SIZE ## _mc22_ ## CPU; \ - c->PFX ## _pixels_tab[IDX][11] = PFX ## SIZE ## _mc32_ ## CPU; \ - c->PFX ## _pixels_tab[IDX][12] = PFX ## SIZE ## _mc03_ ## CPU; \ - c->PFX ## _pixels_tab[IDX][13] = PFX ## SIZE ## _mc13_ ## CPU; \ - c->PFX ## _pixels_tab[IDX][14] = PFX ## SIZE ## _mc23_ ## CPU; \ - c->PFX ## _pixels_tab[IDX][15] = PFX ## SIZE ## _mc33_ ## CPU - - SET_QPEL_FUNCS(put_qpel, 0, 16, mmx2); - SET_QPEL_FUNCS(put_qpel, 1, 8, mmx2); - SET_QPEL_FUNCS(put_no_rnd_qpel, 0, 16, mmx2); - SET_QPEL_FUNCS(put_no_rnd_qpel, 1, 8, mmx2); - SET_QPEL_FUNCS(avg_qpel, 0, 16, mmx2); - SET_QPEL_FUNCS(avg_qpel, 1, 8, mmx2); +#define SET_QPEL_FUNCS(PFX, IDX, SIZE, CPU, PREFIX) \ + c->PFX ## _pixels_tab[IDX][ 0] = PREFIX ## PFX ## SIZE ## _mc00_ ## CPU; \ + c->PFX ## _pixels_tab[IDX][ 1] = PREFIX ## PFX ## SIZE ## _mc10_ ## CPU; \ + c->PFX ## _pixels_tab[IDX][ 2] = PREFIX ## PFX ## SIZE ## _mc20_ ## CPU; \ + c->PFX ## _pixels_tab[IDX][ 3] = PREFIX ## PFX ## SIZE ## _mc30_ ## CPU; \ + c->PFX ## _pixels_tab[IDX][ 4] = PREFIX ## PFX ## SIZE ## _mc01_ ## CPU; \ + c->PFX ## _pixels_tab[IDX][ 5] = PREFIX ## PFX ## SIZE ## _mc11_ ## CPU; \ + c->PFX ## _pixels_tab[IDX][ 6] = PREFIX ## PFX ## SIZE ## _mc21_ ## CPU; \ + c->PFX ## _pixels_tab[IDX][ 7] = PREFIX ## PFX ## SIZE ## _mc31_ ## CPU; \ + c->PFX ## _pixels_tab[IDX][ 8] = PREFIX ## PFX ## SIZE ## _mc02_ ## CPU; \ + c->PFX ## _pixels_tab[IDX][ 9] = PREFIX ## PFX ## SIZE ## _mc12_ ## CPU; \ + c->PFX ## _pixels_tab[IDX][10] = PREFIX ## PFX ## SIZE ## _mc22_ ## CPU; \ + c->PFX ## _pixels_tab[IDX][11] = PREFIX ## PFX ## SIZE ## _mc32_ ## CPU; \ + c->PFX ## _pixels_tab[IDX][12] = PREFIX ## PFX ## SIZE ## _mc03_ ## CPU; \ + c->PFX ## _pixels_tab[IDX][13] = PREFIX ## PFX ## SIZE ## _mc13_ ## CPU; \ + c->PFX ## _pixels_tab[IDX][14] = PREFIX ## PFX ## SIZE ## _mc23_ ## CPU; \ + c->PFX ## _pixels_tab[IDX][15] = PREFIX ## PFX ## SIZE ## _mc33_ ## CPU + + SET_QPEL_FUNCS(put_qpel, 0, 16, mmx2, ); + SET_QPEL_FUNCS(put_qpel, 1, 8, mmx2, ); + SET_QPEL_FUNCS(put_no_rnd_qpel, 0, 16, mmx2, ); + SET_QPEL_FUNCS(put_no_rnd_qpel, 1, 8, mmx2, ); + SET_QPEL_FUNCS(avg_qpel, 0, 16, mmx2, ); + SET_QPEL_FUNCS(avg_qpel, 1, 8, mmx2, ); if (!high_bit_depth) { - SET_QPEL_FUNCS(put_h264_qpel, 0, 16, mmx2); - SET_QPEL_FUNCS(put_h264_qpel, 1, 8, mmx2); - SET_QPEL_FUNCS(put_h264_qpel, 2, 4, mmx2); - SET_QPEL_FUNCS(avg_h264_qpel, 0, 16, mmx2); - SET_QPEL_FUNCS(avg_h264_qpel, 1, 8, mmx2); - SET_QPEL_FUNCS(avg_h264_qpel, 2, 4, mmx2); + SET_QPEL_FUNCS(put_h264_qpel, 0, 16, mmx2, ); + SET_QPEL_FUNCS(put_h264_qpel, 1, 8, mmx2, ); + SET_QPEL_FUNCS(put_h264_qpel, 2, 4, mmx2, ); + SET_QPEL_FUNCS(avg_h264_qpel, 0, 16, mmx2, ); + SET_QPEL_FUNCS(avg_h264_qpel, 1, 8, mmx2, ); + SET_QPEL_FUNCS(avg_h264_qpel, 2, 4, mmx2, ); + } + else if (bit_depth == 10) { +#if HAVE_YASM +#if !ARCH_X86_64 + SET_QPEL_FUNCS(avg_h264_qpel, 0, 16, 10_mmxext, ff_); + SET_QPEL_FUNCS(put_h264_qpel, 0, 16, 10_mmxext, ff_); + SET_QPEL_FUNCS(put_h264_qpel, 1, 8, 10_mmxext, ff_); + SET_QPEL_FUNCS(avg_h264_qpel, 1, 8, 10_mmxext, ff_); +#endif + SET_QPEL_FUNCS(put_h264_qpel, 2, 4, 10_mmxext, ff_); + SET_QPEL_FUNCS(avg_h264_qpel, 2, 4, 10_mmxext, ff_); +#endif } - SET_QPEL_FUNCS(put_2tap_qpel, 0, 16, mmx2); - SET_QPEL_FUNCS(put_2tap_qpel, 1, 8, mmx2); - SET_QPEL_FUNCS(avg_2tap_qpel, 0, 16, mmx2); - SET_QPEL_FUNCS(avg_2tap_qpel, 1, 8, mmx2); + SET_QPEL_FUNCS(put_2tap_qpel, 0, 16, mmx2, ); + SET_QPEL_FUNCS(put_2tap_qpel, 1, 8, mmx2, ); + SET_QPEL_FUNCS(avg_2tap_qpel, 0, 16, mmx2, ); + SET_QPEL_FUNCS(avg_2tap_qpel, 1, 8, mmx2, ); #if HAVE_YASM c->avg_rv40_chroma_pixels_tab[0]= ff_avg_rv40_chroma_mc8_mmx2; @@ -2577,7 +2596,7 @@ void dsputil_init_mmx(DSPContext* c, AVCodecContext *avctx) c->add_hfyu_median_prediction = ff_add_hfyu_median_prediction_mmx2; #endif -#if HAVE_7REGS && HAVE_TEN_OPERANDS +#if HAVE_7REGS if( mm_flags&AV_CPU_FLAG_3DNOW ) c->add_hfyu_median_prediction = add_hfyu_median_prediction_cmov; #endif @@ -2616,26 +2635,26 @@ void dsputil_init_mmx(DSPContext* c, AVCodecContext *avctx) c->put_no_rnd_pixels_tab[1][2] = put_no_rnd_pixels8_y2_exact_3dnow; } - SET_QPEL_FUNCS(put_qpel, 0, 16, 3dnow); - SET_QPEL_FUNCS(put_qpel, 1, 8, 3dnow); - SET_QPEL_FUNCS(put_no_rnd_qpel, 0, 16, 3dnow); - SET_QPEL_FUNCS(put_no_rnd_qpel, 1, 8, 3dnow); - SET_QPEL_FUNCS(avg_qpel, 0, 16, 3dnow); - SET_QPEL_FUNCS(avg_qpel, 1, 8, 3dnow); + SET_QPEL_FUNCS(put_qpel, 0, 16, 3dnow, ); + SET_QPEL_FUNCS(put_qpel, 1, 8, 3dnow, ); + SET_QPEL_FUNCS(put_no_rnd_qpel, 0, 16, 3dnow, ); + SET_QPEL_FUNCS(put_no_rnd_qpel, 1, 8, 3dnow, ); + SET_QPEL_FUNCS(avg_qpel, 0, 16, 3dnow, ); + SET_QPEL_FUNCS(avg_qpel, 1, 8, 3dnow, ); if (!high_bit_depth) { - SET_QPEL_FUNCS(put_h264_qpel, 0, 16, 3dnow); - SET_QPEL_FUNCS(put_h264_qpel, 1, 8, 3dnow); - SET_QPEL_FUNCS(put_h264_qpel, 2, 4, 3dnow); - SET_QPEL_FUNCS(avg_h264_qpel, 0, 16, 3dnow); - SET_QPEL_FUNCS(avg_h264_qpel, 1, 8, 3dnow); - SET_QPEL_FUNCS(avg_h264_qpel, 2, 4, 3dnow); + SET_QPEL_FUNCS(put_h264_qpel, 0, 16, 3dnow, ); + SET_QPEL_FUNCS(put_h264_qpel, 1, 8, 3dnow, ); + SET_QPEL_FUNCS(put_h264_qpel, 2, 4, 3dnow, ); + SET_QPEL_FUNCS(avg_h264_qpel, 0, 16, 3dnow, ); + SET_QPEL_FUNCS(avg_h264_qpel, 1, 8, 3dnow, ); + SET_QPEL_FUNCS(avg_h264_qpel, 2, 4, 3dnow, ); } - SET_QPEL_FUNCS(put_2tap_qpel, 0, 16, 3dnow); - SET_QPEL_FUNCS(put_2tap_qpel, 1, 8, 3dnow); - SET_QPEL_FUNCS(avg_2tap_qpel, 0, 16, 3dnow); - SET_QPEL_FUNCS(avg_2tap_qpel, 1, 8, 3dnow); + SET_QPEL_FUNCS(put_2tap_qpel, 0, 16, 3dnow, ); + SET_QPEL_FUNCS(put_2tap_qpel, 1, 8, 3dnow, ); + SET_QPEL_FUNCS(avg_2tap_qpel, 0, 16, 3dnow, ); + SET_QPEL_FUNCS(avg_2tap_qpel, 1, 8, 3dnow, ); #if HAVE_YASM if (!high_bit_depth) { @@ -2679,7 +2698,20 @@ void dsputil_init_mmx(DSPContext* c, AVCodecContext *avctx) H264_QPEL_FUNCS(3, 3, sse2); } #if HAVE_YASM +#define H264_QPEL_FUNCS_10(x, y, CPU)\ + c->put_h264_qpel_pixels_tab[0][x+y*4] = ff_put_h264_qpel16_mc##x##y##_10_##CPU;\ + c->put_h264_qpel_pixels_tab[1][x+y*4] = ff_put_h264_qpel8_mc##x##y##_10_##CPU;\ + c->avg_h264_qpel_pixels_tab[0][x+y*4] = ff_avg_h264_qpel16_mc##x##y##_10_##CPU;\ + c->avg_h264_qpel_pixels_tab[1][x+y*4] = ff_avg_h264_qpel8_mc##x##y##_10_##CPU; if (bit_depth == 10) { + SET_QPEL_FUNCS(put_h264_qpel, 0, 16, 10_sse2, ff_); + SET_QPEL_FUNCS(put_h264_qpel, 1, 8, 10_sse2, ff_); + SET_QPEL_FUNCS(avg_h264_qpel, 0, 16, 10_sse2, ff_); + SET_QPEL_FUNCS(avg_h264_qpel, 1, 8, 10_sse2, ff_); + H264_QPEL_FUNCS_10(1, 0, sse2_cache64) + H264_QPEL_FUNCS_10(2, 0, sse2_cache64) + H264_QPEL_FUNCS_10(3, 0, sse2_cache64) + c->put_h264_chroma_pixels_tab[0]= ff_put_h264_chroma_mc8_10_sse2; c->avg_h264_chroma_pixels_tab[0]= ff_avg_h264_chroma_mc8_10_sse2; } @@ -2702,6 +2734,11 @@ void dsputil_init_mmx(DSPContext* c, AVCodecContext *avctx) H264_QPEL_FUNCS(3, 3, ssse3); } #if HAVE_YASM + else if (bit_depth == 10) { + H264_QPEL_FUNCS_10(1, 0, ssse3_cache64) + H264_QPEL_FUNCS_10(2, 0, ssse3_cache64) + H264_QPEL_FUNCS_10(3, 0, ssse3_cache64) + } if (!high_bit_depth) { c->put_h264_chroma_pixels_tab[0]= ff_put_h264_chroma_mc8_ssse3_rnd; c->avg_h264_chroma_pixels_tab[0]= ff_avg_h264_chroma_mc8_ssse3_rnd; @@ -2756,6 +2793,11 @@ void dsputil_init_mmx(DSPContext* c, AVCodecContext *avctx) #if HAVE_YASM c->scalarproduct_int16 = ff_scalarproduct_int16_sse2; c->scalarproduct_and_madd_int16 = ff_scalarproduct_and_madd_int16_sse2; + if (mm_flags & AV_CPU_FLAG_ATOM) { + c->vector_clip_int32 = ff_vector_clip_int32_sse2_int; + } else { + c->vector_clip_int32 = ff_vector_clip_int32_sse2; + } if (avctx->flags & CODEC_FLAG_BITEXACT) { c->apply_window_int16 = ff_apply_window_int16_sse2_ba; } else { @@ -2781,9 +2823,22 @@ void dsputil_init_mmx(DSPContext* c, AVCodecContext *avctx) } #endif } + + if (mm_flags & AV_CPU_FLAG_SSE4 && HAVE_SSE) { +#if HAVE_YASM + c->vector_clip_int32 = ff_vector_clip_int32_sse41; +#endif + } + #if HAVE_AVX && HAVE_YASM if (mm_flags & AV_CPU_FLAG_AVX) { if (bit_depth == 10) { + //AVX implies !cache64. + //TODO: Port cache(32|64) detection from x264. + H264_QPEL_FUNCS_10(1, 0, sse2) + H264_QPEL_FUNCS_10(2, 0, sse2) + H264_QPEL_FUNCS_10(3, 0, sse2) + c->put_h264_chroma_pixels_tab[0]= ff_put_h264_chroma_mc8_10_avx; c->avg_h264_chroma_pixels_tab[0]= ff_avg_h264_chroma_mc8_10_avx; } @@ -2793,39 +2848,4 @@ void dsputil_init_mmx(DSPContext* c, AVCodecContext *avctx) if (CONFIG_ENCODERS) dsputilenc_init_mmx(c, avctx); - -#if 0 - // for speed testing - get_pixels = just_return; - put_pixels_clamped = just_return; - add_pixels_clamped = just_return; - - pix_abs16x16 = just_return; - pix_abs16x16_x2 = just_return; - pix_abs16x16_y2 = just_return; - pix_abs16x16_xy2 = just_return; - - put_pixels_tab[0] = just_return; - put_pixels_tab[1] = just_return; - put_pixels_tab[2] = just_return; - put_pixels_tab[3] = just_return; - - put_no_rnd_pixels_tab[0] = just_return; - put_no_rnd_pixels_tab[1] = just_return; - put_no_rnd_pixels_tab[2] = just_return; - put_no_rnd_pixels_tab[3] = just_return; - - avg_pixels_tab[0] = just_return; - avg_pixels_tab[1] = just_return; - avg_pixels_tab[2] = just_return; - avg_pixels_tab[3] = just_return; - - avg_no_rnd_pixels_tab[0] = just_return; - avg_no_rnd_pixels_tab[1] = just_return; - avg_no_rnd_pixels_tab[2] = just_return; - avg_no_rnd_pixels_tab[3] = just_return; - - //av_fdct = just_return; - //ff_idct = just_return; -#endif } diff --git a/libavcodec/x86/dsputil_yasm.asm b/libavcodec/x86/dsputil_yasm.asm index 695aba5ec3..1f5a4f68c7 100644 --- a/libavcodec/x86/dsputil_yasm.asm +++ b/libavcodec/x86/dsputil_yasm.asm @@ -1048,3 +1048,118 @@ emu_edge sse %ifdef ARCH_X86_32 emu_edge mmx %endif + +;----------------------------------------------------------------------------- +; void ff_vector_clip_int32(int32_t *dst, const int32_t *src, int32_t min, +; int32_t max, unsigned int len) +;----------------------------------------------------------------------------- + +%macro PMINSD_MMX 3 ; dst, src, tmp + mova %3, %2 + pcmpgtd %3, %1 + pxor %1, %2 + pand %1, %3 + pxor %1, %2 +%endmacro + +%macro PMAXSD_MMX 3 ; dst, src, tmp + mova %3, %1 + pcmpgtd %3, %2 + pand %1, %3 + pandn %3, %2 + por %1, %3 +%endmacro + +%macro CLIPD_MMX 3-4 ; src/dst, min, max, tmp + PMINSD_MMX %1, %3, %4 + PMAXSD_MMX %1, %2, %4 +%endmacro + +%macro CLIPD_SSE2 3-4 ; src/dst, min (float), max (float), unused + cvtdq2ps %1, %1 + minps %1, %3 + maxps %1, %2 + cvtps2dq %1, %1 +%endmacro + +%macro CLIPD_SSE41 3-4 ; src/dst, min, max, unused + pminsd %1, %3 + pmaxsd %1, %2 +%endmacro + +%macro SPLATD_MMX 1 + punpckldq %1, %1 +%endmacro + +%macro SPLATD_SSE2 1 + pshufd %1, %1, 0 +%endmacro + +%macro VECTOR_CLIP_INT32 4 +cglobal vector_clip_int32_%1, 5,5,%2, dst, src, min, max, len +%ifidn %1, sse2 + cvtsi2ss m4, minm + cvtsi2ss m5, maxm +%else + movd m4, minm + movd m5, maxm +%endif + SPLATD m4 + SPLATD m5 +.loop: +%assign %%i 1 +%rep %3 + mova m0, [srcq+mmsize*0*%%i] + mova m1, [srcq+mmsize*1*%%i] + mova m2, [srcq+mmsize*2*%%i] + mova m3, [srcq+mmsize*3*%%i] +%if %4 + mova m7, [srcq+mmsize*4*%%i] + mova m8, [srcq+mmsize*5*%%i] + mova m9, [srcq+mmsize*6*%%i] + mova m10, [srcq+mmsize*7*%%i] +%endif + CLIPD m0, m4, m5, m6 + CLIPD m1, m4, m5, m6 + CLIPD m2, m4, m5, m6 + CLIPD m3, m4, m5, m6 +%if %4 + CLIPD m7, m4, m5, m6 + CLIPD m8, m4, m5, m6 + CLIPD m9, m4, m5, m6 + CLIPD m10, m4, m5, m6 +%endif + mova [dstq+mmsize*0*%%i], m0 + mova [dstq+mmsize*1*%%i], m1 + mova [dstq+mmsize*2*%%i], m2 + mova [dstq+mmsize*3*%%i], m3 +%if %4 + mova [dstq+mmsize*4*%%i], m7 + mova [dstq+mmsize*5*%%i], m8 + mova [dstq+mmsize*6*%%i], m9 + mova [dstq+mmsize*7*%%i], m10 +%endif +%assign %%i %%i+1 +%endrep + add srcq, mmsize*4*(%3+%4) + add dstq, mmsize*4*(%3+%4) + sub lend, mmsize*(%3+%4) + jg .loop + REP_RET +%endmacro + +INIT_MMX +%define SPLATD SPLATD_MMX +%define CLIPD CLIPD_MMX +VECTOR_CLIP_INT32 mmx, 0, 1, 0 +INIT_XMM +%define SPLATD SPLATD_SSE2 +VECTOR_CLIP_INT32 sse2_int, 6, 1, 0 +%define CLIPD CLIPD_SSE2 +VECTOR_CLIP_INT32 sse2, 6, 2, 0 +%define CLIPD CLIPD_SSE41 +%ifdef m8 +VECTOR_CLIP_INT32 sse41, 11, 1, 1 +%else +VECTOR_CLIP_INT32 sse41, 6, 1, 0 +%endif diff --git a/libavcodec/x86/dsputilenc_mmx.c b/libavcodec/x86/dsputilenc_mmx.c index bd31205a6b..f13c1219da 100644 --- a/libavcodec/x86/dsputilenc_mmx.c +++ b/libavcodec/x86/dsputilenc_mmx.c @@ -1098,10 +1098,12 @@ static int ssd_int8_vs_int16_mmx(const int8_t *pix1, const int16_t *pix2, int si void dsputilenc_init_mmx(DSPContext* c, AVCodecContext *avctx) { int mm_flags = av_get_cpu_flags(); + int bit_depth = avctx->bits_per_raw_sample; if (mm_flags & AV_CPU_FLAG_MMX) { const int dct_algo = avctx->dct_algo; - if(dct_algo==FF_DCT_AUTO || dct_algo==FF_DCT_MMX){ + if (avctx->bits_per_raw_sample <= 8 && + (dct_algo==FF_DCT_AUTO || dct_algo==FF_DCT_MMX)) { if(mm_flags & AV_CPU_FLAG_SSE2){ c->fdct = ff_fdct_sse2; }else if(mm_flags & AV_CPU_FLAG_MMX2){ @@ -1111,7 +1113,8 @@ void dsputilenc_init_mmx(DSPContext* c, AVCodecContext *avctx) } } - c->get_pixels = get_pixels_mmx; + if (bit_depth <= 8) + c->get_pixels = get_pixels_mmx; c->diff_pixels = diff_pixels_mmx; c->pix_sum = pix_sum16_mmx; @@ -1158,7 +1161,8 @@ void dsputilenc_init_mmx(DSPContext* c, AVCodecContext *avctx) } if(mm_flags & AV_CPU_FLAG_SSE2){ - c->get_pixels = get_pixels_sse2; + if (bit_depth <= 8) + c->get_pixels = get_pixels_sse2; c->sum_abs_dctelem= sum_abs_dctelem_sse2; #if HAVE_YASM && HAVE_ALIGNED_STACK c->hadamard8_diff[0]= ff_hadamard8_diff16_sse2; diff --git a/libavcodec/x86/fft.c b/libavcodec/x86/fft.c index 18964d88ca..d2d157c2d3 100644 --- a/libavcodec/x86/fft.c +++ b/libavcodec/x86/fft.c @@ -60,6 +60,8 @@ av_cold void ff_dct_init_mmx(DCTContext *s) int has_vectors = av_get_cpu_flags(); if (has_vectors & AV_CPU_FLAG_AVX && HAVE_AVX) s->dct32 = ff_dct32_float_avx; + else if (has_vectors & AV_CPU_FLAG_SSE2 && HAVE_SSE) + s->dct32 = ff_dct32_float_sse2; else if (has_vectors & AV_CPU_FLAG_SSE && HAVE_SSE) s->dct32 = ff_dct32_float_sse; #endif diff --git a/libavcodec/x86/fft.h b/libavcodec/x86/fft.h index 79064c27cd..7fdc858a50 100644 --- a/libavcodec/x86/fft.h +++ b/libavcodec/x86/fft.h @@ -35,6 +35,7 @@ void ff_imdct_calc_sse(FFTContext *s, FFTSample *output, const FFTSample *input) void ff_imdct_half_sse(FFTContext *s, FFTSample *output, const FFTSample *input); void ff_imdct_half_avx(FFTContext *s, FFTSample *output, const FFTSample *input); void ff_dct32_float_sse(FFTSample *out, const FFTSample *in); +void ff_dct32_float_sse2(FFTSample *out, const FFTSample *in); void ff_dct32_float_avx(FFTSample *out, const FFTSample *in); #endif /* AVCODEC_X86_FFT_H */ diff --git a/libavcodec/x86/h264_chromamc.asm b/libavcodec/x86/h264_chromamc.asm index 5dae1cca85..e9091f7059 100644 --- a/libavcodec/x86/h264_chromamc.asm +++ b/libavcodec/x86/h264_chromamc.asm @@ -72,17 +72,17 @@ SECTION .text .next4rows movq mm0, [r1 ] movq mm1, [r1+r2] + add r1, r4 CHROMAMC_AVG mm0, [r0 ] CHROMAMC_AVG mm1, [r0+r2] movq [r0 ], mm0 movq [r0+r2], mm1 add r0, r4 - add r1, r4 movq mm0, [r1 ] movq mm1, [r1+r2] + add r1, r4 CHROMAMC_AVG mm0, [r0 ] CHROMAMC_AVG mm1, [r0+r2] - add r1, r4 movq [r0 ], mm0 movq [r0+r2], mm1 add r0, r4 @@ -472,8 +472,8 @@ cglobal %1_%2_chroma_mc8_%3, 6, 7, 8 mov r6d, r4d shl r4d, 8 sub r4, r6 - add r4, 8 ; x*288+8 = x<<8 | (8-x) mov r6, 8 + add r4, 8 ; x*288+8 = x<<8 | (8-x) sub r6d, r5d imul r6, r4 ; (8-y)*(x*255+8) = (8-y)*x<<8 | (8-y)*(8-x) imul r4d, r5d ; y *(x*255+8) = y *x<<8 | y *(8-x) @@ -481,24 +481,23 @@ cglobal %1_%2_chroma_mc8_%3, 6, 7, 8 movd m7, r6d movd m6, r4d movdqa m5, [rnd_2d_%2] + movq m0, [r1 ] + movq m1, [r1+1] pshuflw m7, m7, 0 pshuflw m6, m6, 0 + punpcklbw m0, m1 movlhps m7, m7 movlhps m6, m6 - movq m0, [r1 ] - movq m1, [r1 +1] - punpcklbw m0, m1 - add r1, r2 .next2rows - movq m1, [r1 ] - movq m2, [r1 +1] - movq m3, [r1+r2 ] - movq m4, [r1+r2+1] + movq m1, [r1+r2*1 ] + movq m2, [r1+r2*1+1] + movq m3, [r1+r2*2 ] + movq m4, [r1+r2*2+1] lea r1, [r1+r2*2] punpcklbw m1, m2 - punpcklbw m3, m4 movdqa m2, m1 + punpcklbw m3, m4 movdqa m4, m3 pmaddubsw m0, m7 pmaddubsw m1, m6 @@ -508,8 +507,8 @@ cglobal %1_%2_chroma_mc8_%3, 6, 7, 8 paddw m2, m5 paddw m1, m0 paddw m3, m2 - movdqa m0, m4 psrlw m1, 6 + movdqa m0, m4 psrlw m3, 6 %ifidn %1, avg movq m2, [r0 ] @@ -576,6 +575,7 @@ cglobal %1_%2_chroma_mc8_%3, 6, 7, 8 movq m1, [r1+r2 ] movdqa m2, m1 movq m3, [r1+r2*2] + lea r1, [r1+r2*2] punpcklbw m0, m1 punpcklbw m2, m3 pmaddubsw m0, m7 @@ -594,7 +594,6 @@ cglobal %1_%2_chroma_mc8_%3, 6, 7, 8 movhps [r0+r2], m0 sub r3d, 2 lea r0, [r0+r2*2] - lea r1, [r1+r2*2] jg .next2yrows REP_RET %endmacro @@ -607,8 +606,8 @@ cglobal %1_%2_chroma_mc4_%3, 6, 7, 0 mov r6, r4 shl r4d, 8 sub r4d, r6d - add r4d, 8 ; x*288+8 mov r6, 8 + add r4d, 8 ; x*288+8 sub r6d, r5d imul r6d, r4d ; (8-y)*(x*255+8) = (8-y)*x<<8 | (8-y)*(8-x) imul r4d, r5d ; y *(x*255+8) = y *x<<8 | y *(8-x) @@ -616,17 +615,16 @@ cglobal %1_%2_chroma_mc4_%3, 6, 7, 0 movd m7, r6d movd m6, r4d movq m5, [pw_32] + movd m0, [r1 ] pshufw m7, m7, 0 + punpcklbw m0, [r1+1] pshufw m6, m6, 0 - movd m0, [r1 ] - punpcklbw m0, [r1 +1] - add r1, r2 .next2rows - movd m1, [r1 ] - movd m3, [r1+r2 ] - punpcklbw m1, [r1 +1] - punpcklbw m3, [r1+r2+1] + movd m1, [r1+r2*1 ] + movd m3, [r1+r2*2 ] + punpcklbw m1, [r1+r2*1+1] + punpcklbw m3, [r1+r2*2+1] lea r1, [r1+r2*2] movq m2, m1 movq m4, m3 @@ -638,8 +636,8 @@ cglobal %1_%2_chroma_mc4_%3, 6, 7, 0 paddw m2, m5 paddw m1, m0 paddw m3, m2 - movq m0, m4 psrlw m1, 6 + movq m0, m4 psrlw m3, 6 packuswb m1, m1 packuswb m3, m3 diff --git a/libavcodec/x86/h264_deblock.asm b/libavcodec/x86/h264_deblock.asm index 6c2ef18bc2..ca90f3f4c2 100644 --- a/libavcodec/x86/h264_deblock.asm +++ b/libavcodec/x86/h264_deblock.asm @@ -240,17 +240,17 @@ cextern pb_A1 ; out: m1=p0' m2=q0' ; clobbers: m0,3-6 %macro DEBLOCK_P0_Q0 0 - pxor m5, m1, m2 ; p0^q0 - pand m5, [pb_1] ; (p0^q0)&1 pcmpeqb m4, m4 + pxor m5, m1, m2 ; p0^q0 pxor m3, m4 + pand m5, [pb_1] ; (p0^q0)&1 pavgb m3, m0 ; (p1 - q1 + 256)>>1 - pavgb m3, [pb_3] ; (((p1 - q1 + 256)>>1)+4)>>1 = 64+2+(p1-q1)>>2 pxor m4, m1 + pavgb m3, [pb_3] ; (((p1 - q1 + 256)>>1)+4)>>1 = 64+2+(p1-q1)>>2 pavgb m4, m2 ; (q0 - p0 + 256)>>1 pavgb m3, m5 - paddusb m3, m4 ; d+128+33 mova m6, [pb_A1] + paddusb m3, m4 ; d+128+33 psubusb m6, m3 psubusb m3, [pb_A1] pminub m6, m7 @@ -413,16 +413,16 @@ cglobal deblock_%2_luma_8_%1, 5,5 LOAD_MASK r2, r3 mov r3, r4mp + pcmpeqb m3, m3 movd m4, [r3] ; tc0 punpcklbw m4, m4 punpcklbw m4, m4 ; tc = 4x tc0[3], 4x tc0[2], 4x tc0[1], 4x tc0[0] mova [esp+%3], m4 ; tc - pcmpeqb m3, m3 pcmpgtb m4, m3 + mova m3, [r4] ; p2 pand m4, m7 mova [esp], m4 ; mask - mova m3, [r4] ; p2 DIFF_GT2 m1, m3, m5, m6, m7 ; |p2-p0| > beta-1 pand m6, m4 pand m4, [esp+%3] ; tc @@ -432,11 +432,10 @@ cglobal deblock_%2_luma_8_%1, 5,5 mova m4, [r0+2*r1] ; q2 DIFF_GT2 m2, m4, m5, m6, m3 ; |q2-q0| > beta-1 - mova m5, [esp] ; mask - pand m6, m5 + pand m6, [esp] ; mask mova m5, [esp+%3] ; tc - pand m5, m6 psubb m7, m6 + pand m5, m6 mova m3, [r0+r1] LUMA_Q1 m3, m4, [r0+2*r1], [r0+r1], m5, m6 @@ -484,10 +483,10 @@ cglobal deblock_h_luma_8_%1, 0,5 ; transpose 16x4 -> original space (only the middle 4 rows were changed by the filter) mov r0, r0mp sub r0, 2 - lea r1, [r0+r4] movq m0, [pix_tmp+0x10] movq m1, [pix_tmp+0x20] + lea r1, [r0+r4] movq m2, [pix_tmp+0x30] movq m3, [pix_tmp+0x40] TRANSPOSE8x4B_STORE PASS8ROWS(r0, r1, r3, r4) diff --git a/libavcodec/x86/h264_i386.h b/libavcodec/x86/h264_i386.h index c2477ac96b..8ea430a82a 100644 --- a/libavcodec/x86/h264_i386.h +++ b/libavcodec/x86/h264_i386.h @@ -45,23 +45,18 @@ static int decode_significance_x86(CABACContext *c, int max_coeff, int minusindex= 4-(intptr_t)index; int bit; x86_reg coeff_count; - int low; - int range; __asm__ volatile( - "movl %a11(%6), %5 \n\t" - "movl %a12(%6), %3 \n\t" - "2: \n\t" BRANCHLESS_GET_CABAC("%4", "%6", "(%1)", "%3", - "%w3", "%5", "%k0", "%b0", "%a13") + "%w3", "%5", "%k0", "%b0", "%a11") "test $1, %4 \n\t" " jz 3f \n\t" "add %10, %1 \n\t" BRANCHLESS_GET_CABAC("%4", "%6", "(%1)", "%3", - "%w3", "%5", "%k0", "%b0", "%a13") + "%w3", "%5", "%k0", "%b0", "%a11") "sub %10, %1 \n\t" "mov %2, %0 \n\t" @@ -72,8 +67,7 @@ static int decode_significance_x86(CABACContext *c, int max_coeff, "test $1, %4 \n\t" " jnz 4f \n\t" - "add $4, %0 \n\t" - "mov %0, %2 \n\t" + "add"OPSIZE" $4, %2 \n\t" "3: \n\t" "add $1, %1 \n\t" @@ -86,13 +80,9 @@ static int decode_significance_x86(CABACContext *c, int max_coeff, "4: \n\t" "add %9, %k0 \n\t" "shr $2, %k0 \n\t" - - "movl %5, %a11(%6) \n\t" - "movl %3, %a12(%6) \n\t" :"=&q"(coeff_count), "+r"(significant_coeff_ctx_base), "+m"(index), - "=&r"(low), "=&r"(bit), "=&r"(range) + "+&r"(c->low), "=&r"(bit), "+&r"(c->range) :"r"(c), "m"(minusstart), "m"(end), "m"(minusindex), "m"(last_off), - "i"(offsetof(CABACContext, range)), "i"(offsetof(CABACContext, low)), "i"(offsetof(CABACContext, bytestream)) : "%"REG_c, "memory" ); @@ -101,18 +91,13 @@ static int decode_significance_x86(CABACContext *c, int max_coeff, static int decode_significance_8x8_x86(CABACContext *c, uint8_t *significant_coeff_ctx_base, - int *index, x86_reg last_off, const uint8_t *sig_off){ + int *index, uint8_t *last_coeff_ctx_base, const uint8_t *sig_off){ int minusindex= 4-(intptr_t)index; int bit; x86_reg coeff_count; - int low; - int range; x86_reg last=0; x86_reg state; __asm__ volatile( - "movl %a12(%7), %5 \n\t" - "movl %a13(%7), %3 \n\t" - "mov %1, %6 \n\t" "2: \n\t" @@ -121,18 +106,17 @@ static int decode_significance_8x8_x86(CABACContext *c, "add %9, %6 \n\t" BRANCHLESS_GET_CABAC("%4", "%7", "(%6)", "%3", - "%w3", "%5", "%k0", "%b0", "%a14") + "%w3", "%5", "%k0", "%b0", "%a12") "mov %1, %k6 \n\t" "test $1, %4 \n\t" " jz 3f \n\t" "movzbl "MANGLE(last_coeff_flag_offset_8x8)"(%k6), %k6\n\t" - "add %9, %6 \n\t" "add %11, %6 \n\t" BRANCHLESS_GET_CABAC("%4", "%7", "(%6)", "%3", - "%w3", "%5", "%k0", "%b0", "%a14") + "%w3", "%5", "%k0", "%b0", "%a12") "mov %2, %0 \n\t" "mov %1, %k6 \n\t" @@ -141,8 +125,7 @@ static int decode_significance_8x8_x86(CABACContext *c, "test $1, %4 \n\t" " jnz 4f \n\t" - "add $4, %0 \n\t" - "mov %0, %2 \n\t" + "add"OPSIZE" $4, %2 \n\t" "3: \n\t" "addl $1, %k6 \n\t" @@ -154,13 +137,9 @@ static int decode_significance_8x8_x86(CABACContext *c, "4: \n\t" "addl %8, %k0 \n\t" "shr $2, %k0 \n\t" - - "movl %5, %a12(%7) \n\t" - "movl %3, %a13(%7) \n\t" - :"=&q"(coeff_count),"+m"(last), "+m"(index), "=&r"(low), "=&r"(bit), - "=&r"(range), "=&r"(state) - :"r"(c), "m"(minusindex), "m"(significant_coeff_ctx_base), "m"(sig_off), "m"(last_off), - "i"(offsetof(CABACContext, range)), "i"(offsetof(CABACContext, low)), + :"=&q"(coeff_count),"+m"(last), "+m"(index), "+&r"(c->low), "=&r"(bit), + "+&r"(c->range), "=&r"(state) + :"r"(c), "m"(minusindex), "m"(significant_coeff_ctx_base), "m"(sig_off), "m"(last_coeff_ctx_base), "i"(offsetof(CABACContext, bytestream)) : "%"REG_c, "memory" ); diff --git a/libavcodec/x86/h264_idct.asm b/libavcodec/x86/h264_idct.asm index 04dabc3a2d..3b3dabe601 100644 --- a/libavcodec/x86/h264_idct.asm +++ b/libavcodec/x86/h264_idct.asm @@ -82,10 +82,10 @@ cglobal h264_idct_add_8_mmx, 3, 3, 0 RET %macro IDCT8_1D 2 - mova m4, m5 mova m0, m1 - psraw m4, 1 psraw m1, 1 + mova m4, m5 + psraw m4, 1 paddw m4, m5 paddw m1, m0 paddw m4, m7 @@ -95,16 +95,16 @@ cglobal h264_idct_add_8_mmx, 3, 3, 0 psubw m0, m3 psubw m5, m3 + psraw m3, 1 paddw m0, m7 psubw m5, m7 - psraw m3, 1 psraw m7, 1 psubw m0, m3 psubw m5, m7 - mova m3, m4 mova m7, m1 psraw m1, 2 + mova m3, m4 psraw m3, 2 paddw m3, m0 psraw m0, 2 @@ -113,12 +113,12 @@ cglobal h264_idct_add_8_mmx, 3, 3, 0 psubw m0, m4 psubw m7, m5 - mova m4, m2 mova m5, m6 - psraw m4, 1 psraw m6, 1 - psubw m4, m5 + mova m4, m2 + psraw m4, 1 paddw m6, m2 + psubw m4, m5 mova m2, %1 mova m5, %2 @@ -337,7 +337,7 @@ cglobal h264_idct8_add4_8_mmx, 5, 7, 0 test r6, r6 jz .skipblock mov r6d, dword [r1+r5*4] - lea r6, [r0+r6] + add r6, r0 add word [r2], 32 IDCT8_ADD_MMX_START r2 , rsp IDCT8_ADD_MMX_START r2+8, rsp+64 @@ -391,7 +391,7 @@ cglobal h264_idct_add16_8_mmx2, 5, 7, 0 REP_RET .no_dc mov r6d, dword [r1+r5*4] - lea r6, [r0+r6] + add r6, r0 IDCT4_ADD r6, r2, r3 .skipblock inc r5 @@ -414,7 +414,7 @@ cglobal h264_idct_add16intra_8_mmx, 5, 7, 0 test r6, r6 jz .skipblock mov r6d, dword [r1+r5*4] - lea r6, [r0+r6] + add r6, r0 IDCT4_ADD r6, r2, r3 .skipblock inc r5 @@ -456,7 +456,7 @@ cglobal h264_idct_add16intra_8_mmx2, 5, 7, 0 %define dst_regd r1d %endif mov dst_regd, dword [r1+r5*4] - lea dst_reg, [r0+dst_reg] + add dst_reg, r0 DC_ADD_MMX2_OP movh, dst_reg, r3, r6 %ifndef ARCH_X86_64 mov r1, r1m @@ -513,7 +513,7 @@ cglobal h264_idct8_add4_8_mmx2, 5, 7, 0 RET .no_dc mov r6d, dword [r1+r5*4] - lea r6, [r0+r6] + add r6, r0 add word [r2], 32 IDCT8_ADD_MMX_START r2 , rsp IDCT8_ADD_MMX_START r2+8, rsp+64 @@ -558,7 +558,7 @@ INIT_MMX %define dst_regd r1d %endif mov dst_regd, dword [r1+r5*4] - lea dst_reg, [r0+dst_reg] + add dst_reg, r0 DC_ADD_MMX2_OP mova, dst_reg, r3, r6 lea dst_reg, [dst_reg+r3*4] DC_ADD_MMX2_OP mova, dst_reg, r3, r6 @@ -573,7 +573,7 @@ INIT_MMX .no_dc INIT_XMM mov dst_regd, dword [r1+r5*4] - lea dst_reg, [r0+dst_reg] + add dst_reg, r0 IDCT8_ADD_SSE dst_reg, r2, r3, r6 %ifndef ARCH_X86_64 mov r1, r1m diff --git a/libavcodec/x86/h264_intrapred.asm b/libavcodec/x86/h264_intrapred.asm index a9da6a19db..a178a335ed 100644 --- a/libavcodec/x86/h264_intrapred.asm +++ b/libavcodec/x86/h264_intrapred.asm @@ -2611,12 +2611,11 @@ cglobal pred4x4_down_left_mmxext, 3,3 punpckldq m1, [r1] movq m2, m1 movq m3, m1 - movq m4, m1 psllq m1, 8 pxor m2, m1 psrlq m2, 8 - pxor m3, m2 - PRED4x4_LOWPASS m0, m1, m3, m4, m5 + pxor m2, m3 + PRED4x4_LOWPASS m0, m1, m2, m3, m4 lea r1, [r0+r2*2] psrlq m0, 8 movd [r0+r2*1], m0 diff --git a/libavcodec/x86/h264_intrapred_10bit.asm b/libavcodec/x86/h264_intrapred_10bit.asm index 5cb593ac38..e14e31a38c 100644 --- a/libavcodec/x86/h264_intrapred_10bit.asm +++ b/libavcodec/x86/h264_intrapred_10bit.asm @@ -27,11 +27,23 @@ SECTION_RODATA -SECTION .text - +cextern pw_16 +cextern pw_8 cextern pw_4 +cextern pw_2 cextern pw_1 +pw_m32101234: dw -3, -2, -1, 0, 1, 2, 3, 4 +pw_m3: times 8 dw -3 +pw_pixel_max: times 8 dw ((1 << 10)-1) +pw_512: times 8 dw 512 +pd_17: times 4 dd 17 +pd_16: times 4 dd 16 + +SECTION .text + +; dest, left, right, src +; output: %1 = (t[n-1] + t[n]*2 + t[n+1] + 2) >> 2 %macro PRED4x4_LOWPASS 4 paddw %2, %3 psrlw %2, 1 @@ -52,13 +64,11 @@ cglobal pred4x4_down_right_10_%1, 3,3 movq m3, [r0] punpckhdq m1, m2 PALIGNR m3, m1, 10, m1 - mova m1, m3 movhps m4, [r1+r2*1-8] - PALIGNR m3, m4, 14, m4 - mova m2, m3 + PALIGNR m0, m3, m4, 14, m4 movhps m4, [r1+r2*2-8] - PALIGNR m3, m4, 14, m4 - PRED4x4_LOWPASS m0, m3, m1, m2 + PALIGNR m2, m0, m4, 14, m4 + PRED4x4_LOWPASS m0, m2, m3, m0 movq [r1+r2*2], m0 psrldq m0, 2 movq [r1+r2*1], m0 @@ -92,22 +102,20 @@ cglobal pred4x4_vertical_right_10_%1, 3,3,6 pavgw m5, m0 movhps m1, [r0+r2*1-8] PALIGNR m0, m1, 14, m1 ; ....t3t2t1t0ltl0 - mova m1, m0 movhps m2, [r0+r2*2-8] - PALIGNR m0, m2, 14, m2 ; ..t3t2t1t0ltl0l1 - mova m2, m0 + PALIGNR m1, m0, m2, 14, m2 ; ..t3t2t1t0ltl0l1 movhps m3, [r1+r2*1-8] - PALIGNR m0, m3, 14, m3 ; t3t2t1t0ltl0l1l2 - PRED4x4_LOWPASS m3, m1, m0, m2 - pslldq m1, m3, 12 - psrldq m3, 4 + PALIGNR m2, m1, m3, 14, m3 ; t3t2t1t0ltl0l1l2 + PRED4x4_LOWPASS m1, m0, m2, m1 + pslldq m0, m1, 12 + psrldq m1, 4 movq [r0+r2*1], m5 - movq [r0+r2*2], m3 - PALIGNR m5, m1, 14, m2 - pslldq m1, 2 + movq [r0+r2*2], m1 + PALIGNR m5, m0, 14, m2 + pslldq m0, 2 movq [r1+r2*1], m5 - PALIGNR m3, m1, 14, m1 - movq [r1+r2*2], m3 + PALIGNR m1, m0, 14, m0 + movq [r1+r2*2], m1 RET %endmacro @@ -140,9 +148,9 @@ cglobal pred4x4_horizontal_down_10_%1, 3,3 punpckhdq m1, m2 ; l0 l1 l2 l3 punpckhqdq m1, m0 ; t2 t1 t0 lt l0 l1 l2 l3 psrldq m0, m1, 4 ; .. .. t2 t1 t0 lt l0 l1 - psrldq m2, m1, 2 ; .. t2 t1 t0 lt l0 l1 l2 - pavgw m5, m1, m2 - PRED4x4_LOWPASS m3, m1, m0, m2 + psrldq m3, m1, 2 ; .. t2 t1 t0 lt l0 l1 l2 + pavgw m5, m1, m3 + PRED4x4_LOWPASS m3, m1, m0, m3 punpcklwd m5, m3 psrldq m3, 8 PALIGNR m3, m5, 12, m4 @@ -208,17 +216,15 @@ cglobal pred4x4_dc_10_mmxext, 3,3 ;----------------------------------------------------------------------------- ; void pred4x4_down_left(pixel *src, const pixel *topright, int stride) ;----------------------------------------------------------------------------- -;TODO: more AVX here %macro PRED4x4_DL 1 cglobal pred4x4_down_left_10_%1, 3,3 sub r0, r2 - movq m1, [r0] - movhps m1, [r1] - pslldq m5, m1, 2 - pxor m2, m5, m1 - psrldq m2, 2 - pxor m3, m1, m2 - PRED4x4_LOWPASS m0, m5, m3, m1 + movq m0, [r0] + movhps m0, [r1] + psrldq m2, m0, 2 + pslldq m3, m0, 2 + pshufhw m2, m2, 10100100b + PRED4x4_LOWPASS m0, m3, m2, m0 lea r1, [r0+r2*2] movhps [r1+r2*2], m0 psrldq m0, 2 @@ -245,10 +251,10 @@ cglobal pred4x4_vertical_left_10_%1, 3,3 sub r0, r2 movu m1, [r0] movhps m1, [r1] - psrldq m3, m1, 2 + psrldq m0, m1, 2 psrldq m2, m1, 4 - pavgw m4, m3, m1 - PRED4x4_LOWPASS m0, m1, m2, m3 + pavgw m4, m0, m1 + PRED4x4_LOWPASS m0, m1, m2, m0 lea r1, [r0+r2*2] movq [r0+r2*1], m4 movq [r0+r2*2], m0 @@ -286,13 +292,13 @@ cglobal pred4x4_horizontal_up_10_mmxext, 3,3 pavgw m2, m0 pshufw m5, m0, 11111110b - PRED4x4_LOWPASS m3, m0, m5, m1 + PRED4x4_LOWPASS m1, m0, m5, m1 movq m6, m2 - punpcklwd m6, m3 + punpcklwd m6, m1 movq [r0+r2*1], m6 psrlq m2, 16 - psrlq m3, 16 - punpcklwd m2, m3 + psrlq m1, 16 + punpcklwd m2, m1 movq [r0+r2*2], m2 psrlq m2, 32 movd [r1+r2*1], m2 @@ -321,7 +327,7 @@ cglobal pred8x8_vertical_10_sse2, 2,2 ;----------------------------------------------------------------------------- INIT_XMM cglobal pred8x8_horizontal_10_sse2, 2,3 - mov r2, 4 + mov r2d, 4 .loop: movq m0, [r0+r1*0-8] movq m1, [r0+r1*1-8] @@ -332,6 +338,871 @@ cglobal pred8x8_horizontal_10_sse2, 2,3 mova [r0+r1*0], m0 mova [r0+r1*1], m1 lea r0, [r0+r1*2] - dec r2 + dec r2d + jg .loop + REP_RET + +;----------------------------------------------------------------------------- +; void predict_8x8_dc(pixel *src, int stride) +;----------------------------------------------------------------------------- +%macro MOV8 2-3 +; sort of a hack, but it works +%if mmsize==8 + movq [%1+0], %2 + movq [%1+8], %3 +%else + movdqa [%1], %2 +%endif +%endmacro + +%macro PRED8x8_DC 2 +cglobal pred8x8_dc_10_%1, 2,6 + sub r0, r1 + pxor m4, m4 + movq m0, [r0+0] + movq m1, [r0+8] +%if mmsize==16 + punpcklwd m0, m1 + movhlps m1, m0 + paddw m0, m1 +%else + pshufw m2, m0, 00001110b + pshufw m3, m1, 00001110b + paddw m0, m2 + paddw m1, m3 + punpcklwd m0, m1 +%endif + %2 m2, m0, 00001110b + paddw m0, m2 + + lea r5, [r1*3] + lea r4, [r0+r1*4] + movzx r2d, word [r0+r1*1-2] + movzx r3d, word [r0+r1*2-2] + add r2d, r3d + movzx r3d, word [r0+r5*1-2] + add r2d, r3d + movzx r3d, word [r4-2] + add r2d, r3d + movd m2, r2d ; s2 + + movzx r2d, word [r4+r1*1-2] + movzx r3d, word [r4+r1*2-2] + add r2d, r3d + movzx r3d, word [r4+r5*1-2] + add r2d, r3d + movzx r3d, word [r4+r1*4-2] + add r2d, r3d + movd m3, r2d ; s3 + + punpcklwd m2, m3 + punpckldq m0, m2 ; s0, s1, s2, s3 + %2 m3, m0, 11110110b ; s2, s1, s3, s3 + %2 m0, m0, 01110100b ; s0, s1, s3, s1 + paddw m0, m3 + psrlw m0, 2 + pavgw m0, m4 ; s0+s2, s1, s3, s1+s3 +%if mmsize==16 + punpcklwd m0, m0 + pshufd m3, m0, 11111010b + punpckldq m0, m0 + SWAP 0,1 +%else + pshufw m1, m0, 0x00 + pshufw m2, m0, 0x55 + pshufw m3, m0, 0xaa + pshufw m4, m0, 0xff +%endif + MOV8 r0+r1*1, m1, m2 + MOV8 r0+r1*2, m1, m2 + MOV8 r0+r5*1, m1, m2 + MOV8 r0+r1*4, m1, m2 + MOV8 r4+r1*1, m3, m4 + MOV8 r4+r1*2, m3, m4 + MOV8 r4+r5*1, m3, m4 + MOV8 r4+r1*4, m3, m4 + RET +%endmacro + +INIT_MMX +PRED8x8_DC mmxext, pshufw +INIT_XMM +PRED8x8_DC sse2 , pshuflw + +;----------------------------------------------------------------------------- +; void pred8x8_top_dc(pixel *src, int stride) +;----------------------------------------------------------------------------- +INIT_XMM +cglobal pred8x8_top_dc_10_sse2, 2,4 + sub r0, r1 + mova m0, [r0] + pshuflw m1, m0, 0x4e + pshufhw m1, m1, 0x4e + paddw m0, m1 + pshuflw m1, m0, 0xb1 + pshufhw m1, m1, 0xb1 + paddw m0, m1 + lea r2, [r1*3] + lea r3, [r0+r1*4] + paddw m0, [pw_2] + psrlw m0, 2 + mova [r0+r1*1], m0 + mova [r0+r1*2], m0 + mova [r0+r2*1], m0 + mova [r0+r1*4], m0 + mova [r3+r1*1], m0 + mova [r3+r1*2], m0 + mova [r3+r2*1], m0 + mova [r3+r1*4], m0 + RET + +;----------------------------------------------------------------------------- +; void pred8x8_plane(pixel *src, int stride) +;----------------------------------------------------------------------------- +INIT_XMM +cglobal pred8x8_plane_10_sse2, 2,7,7 + sub r0, r1 + lea r2, [r1*3] + lea r3, [r0+r1*4] + mova m2, [r0] + pmaddwd m2, [pw_m32101234] + HADDD m2, m1 + movd m0, [r0-4] + psrld m0, 14 + psubw m2, m0 ; H + movd m0, [r3+r1*4-4] + movd m1, [r0+12] + paddw m0, m1 + psllw m0, 4 ; 16*(src[7*stride-1] + src[-stride+7]) + movzx r4d, word [r3+r1*1-2] ; src[4*stride-1] + movzx r5d, word [r0+r2*1-2] ; src[2*stride-1] + sub r4d, r5d + movzx r6d, word [r3+r1*2-2] ; src[5*stride-1] + movzx r5d, word [r0+r1*2-2] ; src[1*stride-1] + sub r6d, r5d + lea r4d, [r4+r6*2] + movzx r5d, word [r3+r2*1-2] ; src[6*stride-1] + movzx r6d, word [r0+r1*1-2] ; src[0*stride-1] + sub r5d, r6d + lea r5d, [r5*3] + add r4d, r5d + movzx r6d, word [r3+r1*4-2] ; src[7*stride-1] + movzx r5d, word [r0+r1*0-2] ; src[ -stride-1] + sub r6d, r5d + lea r4d, [r4+r6*4] + movd m3, r4d ; V + punpckldq m2, m3 + pmaddwd m2, [pd_17] + paddd m2, [pd_16] + psrad m2, 5 ; b, c + + mova m3, [pw_pixel_max] + pxor m1, m1 + SPLATW m0, m0, 1 + SPLATW m4, m2, 2 + SPLATW m2, m2, 0 + pmullw m2, [pw_m32101234] ; b + pmullw m5, m4, [pw_m3] ; c + paddw m5, [pw_16] + mov r2d, 8 + add r0, r1 +.loop: + paddsw m6, m2, m5 + paddsw m6, m0 + psraw m6, 5 + CLIPW m6, m1, m3 + mova [r0], m6 + paddw m5, m4 + add r0, r1 + dec r2d jg .loop REP_RET + + +;----------------------------------------------------------------------------- +; void pred8x8l_128_dc(pixel *src, int has_topleft, int has_topright, int stride) +;----------------------------------------------------------------------------- +%macro PRED8x8L_128_DC 1 +cglobal pred8x8l_128_dc_10_%1, 4,4 + mova m0, [pw_512] ; (1<<(BIT_DEPTH-1)) + lea r1, [r3*3] + lea r2, [r0+r3*4] + MOV8 r0+r3*0, m0, m0 + MOV8 r0+r3*1, m0, m0 + MOV8 r0+r3*2, m0, m0 + MOV8 r0+r1*1, m0, m0 + MOV8 r2+r3*0, m0, m0 + MOV8 r2+r3*1, m0, m0 + MOV8 r2+r3*2, m0, m0 + MOV8 r2+r1*1, m0, m0 + RET +%endmacro + +INIT_MMX +PRED8x8L_128_DC mmxext +INIT_XMM +PRED8x8L_128_DC sse2 + +;----------------------------------------------------------------------------- +; void pred8x8l_top_dc(pixel *src, int has_topleft, int has_topright, int stride) +;----------------------------------------------------------------------------- +%macro PRED8x8L_TOP_DC 1 +cglobal pred8x8l_top_dc_10_%1, 4,4,6 + sub r0, r3 + mova m0, [r0] + shr r1d, 14 + shr r2d, 13 + neg r1 + pslldq m1, m0, 2 + psrldq m2, m0, 2 + pinsrw m1, [r0+r1], 0 + pinsrw m2, [r0+r2+14], 7 + lea r1, [r3*3] + lea r2, [r0+r3*4] + PRED4x4_LOWPASS m0, m2, m1, m0 + HADDW m0, m1 + paddw m0, [pw_4] + psrlw m0, 3 + SPLATW m0, m0, 0 + mova [r0+r3*1], m0 + mova [r0+r3*2], m0 + mova [r0+r1*1], m0 + mova [r0+r3*4], m0 + mova [r2+r3*1], m0 + mova [r2+r3*2], m0 + mova [r2+r1*1], m0 + mova [r2+r3*4], m0 + RET +%endmacro + +INIT_XMM +PRED8x8L_TOP_DC sse2 +%ifdef HAVE_AVX +INIT_AVX +PRED8x8L_TOP_DC avx +%endif + +;----------------------------------------------------------------------------- +;void pred8x8l_dc(pixel *src, int has_topleft, int has_topright, int stride) +;----------------------------------------------------------------------------- +;TODO: see if scalar is faster +%macro PRED8x8L_DC 1 +cglobal pred8x8l_dc_10_%1, 4,6,6 + sub r0, r3 + lea r4, [r0+r3*4] + lea r5, [r3*3] + mova m0, [r0+r3*2-16] + punpckhwd m0, [r0+r3*1-16] + mova m1, [r4+r3*0-16] + punpckhwd m1, [r0+r5*1-16] + punpckhdq m1, m0 + mova m2, [r4+r3*2-16] + punpckhwd m2, [r4+r3*1-16] + mova m3, [r4+r3*4-16] + punpckhwd m3, [r4+r5*1-16] + punpckhdq m3, m2 + punpckhqdq m3, m1 + mova m0, [r0] + shr r1d, 14 + shr r2d, 13 + neg r1 + pslldq m1, m0, 2 + psrldq m2, m0, 2 + pinsrw m1, [r0+r1], 0 + pinsrw m2, [r0+r2+14], 7 + not r1 + and r1, r3 + pslldq m4, m3, 2 + psrldq m5, m3, 2 + pshuflw m4, m4, 11100101b + pinsrw m5, [r0+r1-2], 7 + PRED4x4_LOWPASS m3, m4, m5, m3 + PRED4x4_LOWPASS m0, m2, m1, m0 + paddw m0, m3 + HADDW m0, m1 + paddw m0, [pw_8] + psrlw m0, 4 + SPLATW m0, m0 + mova [r0+r3*1], m0 + mova [r0+r3*2], m0 + mova [r0+r5*1], m0 + mova [r0+r3*4], m0 + mova [r4+r3*1], m0 + mova [r4+r3*2], m0 + mova [r4+r5*1], m0 + mova [r4+r3*4], m0 + RET +%endmacro + +INIT_XMM +PRED8x8L_DC sse2 +%ifdef HAVE_AVX +INIT_AVX +PRED8x8L_DC avx +%endif + +;----------------------------------------------------------------------------- +; void pred8x8l_vertical(pixel *src, int has_topleft, int has_topright, int stride) +;----------------------------------------------------------------------------- +%macro PRED8x8L_VERTICAL 1 +cglobal pred8x8l_vertical_10_%1, 4,4,6 + sub r0, r3 + mova m0, [r0] + shr r1d, 14 + shr r2d, 13 + neg r1 + pslldq m1, m0, 2 + psrldq m2, m0, 2 + pinsrw m1, [r0+r1], 0 + pinsrw m2, [r0+r2+14], 7 + lea r1, [r3*3] + lea r2, [r0+r3*4] + PRED4x4_LOWPASS m0, m2, m1, m0 + mova [r0+r3*1], m0 + mova [r0+r3*2], m0 + mova [r0+r1*1], m0 + mova [r0+r3*4], m0 + mova [r2+r3*1], m0 + mova [r2+r3*2], m0 + mova [r2+r1*1], m0 + mova [r2+r3*4], m0 + RET +%endmacro + +INIT_XMM +PRED8x8L_VERTICAL sse2 +%ifdef HAVE_AVX +INIT_AVX +PRED8x8L_VERTICAL avx +%endif + +;----------------------------------------------------------------------------- +; void pred8x8l_horizontal(uint8_t *src, int has_topleft, int has_topright, int stride) +;----------------------------------------------------------------------------- +%macro PRED8x8L_HORIZONTAL 1 +cglobal pred8x8l_horizontal_10_%1, 4,4,5 + mova m0, [r0-16] + shr r1d, 14 + dec r1 + and r1, r3 + sub r1, r3 + punpckhwd m0, [r0+r1-16] + mova m1, [r0+r3*2-16] + punpckhwd m1, [r0+r3*1-16] + lea r2, [r0+r3*4] + lea r1, [r3*3] + punpckhdq m1, m0 + mova m2, [r2+r3*0-16] + punpckhwd m2, [r0+r1-16] + mova m3, [r2+r3*2-16] + punpckhwd m3, [r2+r3*1-16] + punpckhdq m3, m2 + punpckhqdq m3, m1 + PALIGNR m4, m3, [r2+r1-16], 14, m0 + pslldq m0, m4, 2 + pshuflw m0, m0, 11100101b + PRED4x4_LOWPASS m4, m3, m0, m4 + punpckhwd m3, m4, m4 + punpcklwd m4, m4 + pshufd m0, m3, 0xff + pshufd m1, m3, 0xaa + pshufd m2, m3, 0x55 + pshufd m3, m3, 0x00 + mova [r0+r3*0], m0 + mova [r0+r3*1], m1 + mova [r0+r3*2], m2 + mova [r0+r1*1], m3 + pshufd m0, m4, 0xff + pshufd m1, m4, 0xaa + pshufd m2, m4, 0x55 + pshufd m3, m4, 0x00 + mova [r2+r3*0], m0 + mova [r2+r3*1], m1 + mova [r2+r3*2], m2 + mova [r2+r1*1], m3 + RET +%endmacro + +INIT_XMM +%define PALIGNR PALIGNR_MMX +PRED8x8L_HORIZONTAL sse2 +%define PALIGNR PALIGNR_SSSE3 +PRED8x8L_HORIZONTAL ssse3 +%ifdef HAVE_AVX +INIT_AVX +PRED8x8L_HORIZONTAL avx +%endif + +;----------------------------------------------------------------------------- +;void pred8x8l_down_left(pixel *src, int has_topleft, int has_topright, int stride) +;----------------------------------------------------------------------------- +%macro PRED8x8L_DOWN_LEFT 1 +cglobal pred8x8l_down_left_10_%1, 4,4,7 + sub r0, r3 + mova m3, [r0] + shr r1d, 14 + neg r1 + shr r2d, 13 + pslldq m1, m3, 2 + psrldq m2, m3, 2 + pinsrw m1, [r0+r1], 0 + pinsrw m2, [r0+r2+14], 7 + PRED4x4_LOWPASS m6, m2, m1, m3 + jz .fix_tr ; flags from shr r2d + mova m1, [r0+16] + psrldq m5, m1, 2 + PALIGNR m2, m1, m3, 14, m3 + pshufhw m5, m5, 10100100b + PRED4x4_LOWPASS m1, m2, m5, m1 +.do_topright: + lea r1, [r3*3] + psrldq m5, m1, 14 + lea r2, [r0+r3*4] + PALIGNR m2, m1, m6, 2, m0 + PALIGNR m3, m1, m6, 14, m0 + PALIGNR m5, m1, 2, m0 + pslldq m4, m6, 2 + PRED4x4_LOWPASS m6, m4, m2, m6 + PRED4x4_LOWPASS m1, m3, m5, m1 + mova [r2+r3*4], m1 + PALIGNR m1, m6, 14, m2 + pslldq m6, 2 + mova [r2+r1*1], m1 + PALIGNR m1, m6, 14, m2 + pslldq m6, 2 + mova [r2+r3*2], m1 + PALIGNR m1, m6, 14, m2 + pslldq m6, 2 + mova [r2+r3*1], m1 + PALIGNR m1, m6, 14, m2 + pslldq m6, 2 + mova [r0+r3*4], m1 + PALIGNR m1, m6, 14, m2 + pslldq m6, 2 + mova [r0+r1*1], m1 + PALIGNR m1, m6, 14, m2 + pslldq m6, 2 + mova [r0+r3*2], m1 + PALIGNR m1, m6, 14, m6 + mova [r0+r3*1], m1 + RET +.fix_tr: + punpckhwd m3, m3 + pshufd m1, m3, 0xFF + jmp .do_topright +%endmacro + +INIT_XMM +%define PALIGNR PALIGNR_MMX +PRED8x8L_DOWN_LEFT sse2 +%define PALIGNR PALIGNR_SSSE3 +PRED8x8L_DOWN_LEFT ssse3 +%ifdef HAVE_AVX +INIT_AVX +PRED8x8L_DOWN_LEFT avx +%endif + +;----------------------------------------------------------------------------- +;void pred8x8l_down_right(pixel *src, int has_topleft, int has_topright, int stride) +;----------------------------------------------------------------------------- +%macro PRED8x8L_DOWN_RIGHT 1 +; standard forbids this when has_topleft is false +; no need to check +cglobal pred8x8l_down_right_10_%1, 4,5,8 + sub r0, r3 + lea r4, [r0+r3*4] + lea r1, [r3*3] + mova m0, [r0+r3*1-16] + punpckhwd m0, [r0+r3*0-16] + mova m1, [r0+r1*1-16] + punpckhwd m1, [r0+r3*2-16] + punpckhdq m1, m0 + mova m2, [r4+r3*1-16] + punpckhwd m2, [r4+r3*0-16] + mova m3, [r4+r1*1-16] + punpckhwd m3, [r4+r3*2-16] + punpckhdq m3, m2 + punpckhqdq m3, m1 + mova m0, [r4+r3*4-16] + mova m1, [r0] + PALIGNR m4, m3, m0, 14, m0 + PALIGNR m1, m3, 2, m2 + pslldq m0, m4, 2 + pshuflw m0, m0, 11100101b + PRED4x4_LOWPASS m6, m1, m4, m3 + PRED4x4_LOWPASS m4, m3, m0, m4 + mova m3, [r0] + shr r2d, 13 + pslldq m1, m3, 2 + psrldq m2, m3, 2 + pinsrw m1, [r0-2], 0 + pinsrw m2, [r0+r2+14], 7 + PRED4x4_LOWPASS m3, m2, m1, m3 + PALIGNR m2, m3, m6, 2, m0 + PALIGNR m5, m3, m6, 14, m0 + psrldq m7, m3, 2 + PRED4x4_LOWPASS m6, m4, m2, m6 + PRED4x4_LOWPASS m3, m5, m7, m3 + mova [r4+r3*4], m6 + PALIGNR m3, m6, 14, m2 + pslldq m6, 2 + mova [r0+r3*1], m3 + PALIGNR m3, m6, 14, m2 + pslldq m6, 2 + mova [r0+r3*2], m3 + PALIGNR m3, m6, 14, m2 + pslldq m6, 2 + mova [r0+r1*1], m3 + PALIGNR m3, m6, 14, m2 + pslldq m6, 2 + mova [r0+r3*4], m3 + PALIGNR m3, m6, 14, m2 + pslldq m6, 2 + mova [r4+r3*1], m3 + PALIGNR m3, m6, 14, m2 + pslldq m6, 2 + mova [r4+r3*2], m3 + PALIGNR m3, m6, 14, m6 + mova [r4+r1*1], m3 + RET +%endmacro + +INIT_XMM +%define PALIGNR PALIGNR_MMX +PRED8x8L_DOWN_RIGHT sse2 +%define PALIGNR PALIGNR_SSSE3 +PRED8x8L_DOWN_RIGHT ssse3 +%ifdef HAVE_AVX +INIT_AVX +PRED8x8L_DOWN_RIGHT avx +%endif + +;----------------------------------------------------------------------------- +; void pred8x8l_vertical_right(pixel *src, int has_topleft, int has_topright, int stride) +;----------------------------------------------------------------------------- +%macro PRED8x8L_VERTICAL_RIGHT 1 +; likewise with 8x8l_down_right +cglobal pred8x8l_vertical_right_10_%1, 4,5,7 + sub r0, r3 + lea r4, [r0+r3*4] + lea r1, [r3*3] + mova m0, [r0+r3*1-16] + punpckhwd m0, [r0+r3*0-16] + mova m1, [r0+r1*1-16] + punpckhwd m1, [r0+r3*2-16] + punpckhdq m1, m0 + mova m2, [r4+r3*1-16] + punpckhwd m2, [r4+r3*0-16] + mova m3, [r4+r1*1-16] + punpckhwd m3, [r4+r3*2-16] + punpckhdq m3, m2 + punpckhqdq m3, m1 + mova m0, [r4+r3*4-16] + mova m1, [r0] + PALIGNR m4, m3, m0, 14, m0 + PALIGNR m1, m3, 2, m2 + PRED4x4_LOWPASS m3, m1, m4, m3 + mova m2, [r0] + shr r2d, 13 + pslldq m1, m2, 2 + psrldq m5, m2, 2 + pinsrw m1, [r0-2], 0 + pinsrw m5, [r0+r2+14], 7 + PRED4x4_LOWPASS m2, m5, m1, m2 + PALIGNR m6, m2, m3, 12, m1 + PALIGNR m5, m2, m3, 14, m0 + PRED4x4_LOWPASS m0, m6, m2, m5 + pavgw m2, m5 + mova [r0+r3*2], m0 + mova [r0+r3*1], m2 + pslldq m6, m3, 4 + pslldq m1, m3, 2 + PRED4x4_LOWPASS m1, m3, m6, m1 + PALIGNR m2, m1, 14, m4 + mova [r0+r1*1], m2 + pslldq m1, 2 + PALIGNR m0, m1, 14, m3 + mova [r0+r3*4], m0 + pslldq m1, 2 + PALIGNR m2, m1, 14, m4 + mova [r4+r3*1], m2 + pslldq m1, 2 + PALIGNR m0, m1, 14, m3 + mova [r4+r3*2], m0 + pslldq m1, 2 + PALIGNR m2, m1, 14, m4 + mova [r4+r1*1], m2 + pslldq m1, 2 + PALIGNR m0, m1, 14, m1 + mova [r4+r3*4], m0 + RET +%endmacro + +INIT_XMM +%define PALIGNR PALIGNR_MMX +PRED8x8L_VERTICAL_RIGHT sse2 +%define PALIGNR PALIGNR_SSSE3 +PRED8x8L_VERTICAL_RIGHT ssse3 +%ifdef HAVE_AVX +INIT_AVX +PRED8x8L_VERTICAL_RIGHT avx +%endif + +;----------------------------------------------------------------------------- +; void pred8x8l_horizontal_up(pixel *src, int has_topleft, int has_topright, int stride) +;----------------------------------------------------------------------------- +%macro PRED8x8L_HORIZONTAL_UP 1 +cglobal pred8x8l_horizontal_up_10_%1, 4,4,6 + mova m0, [r0+r3*0-16] + punpckhwd m0, [r0+r3*1-16] + shr r1d, 14 + dec r1 + and r1, r3 + sub r1, r3 + mova m4, [r0+r1*1-16] + lea r1, [r3*3] + lea r2, [r0+r3*4] + mova m1, [r0+r3*2-16] + punpckhwd m1, [r0+r1*1-16] + punpckhdq m0, m1 + mova m2, [r2+r3*0-16] + punpckhwd m2, [r2+r3*1-16] + mova m3, [r2+r3*2-16] + punpckhwd m3, [r2+r1*1-16] + punpckhdq m2, m3 + punpckhqdq m0, m2 + PALIGNR m1, m0, m4, 14, m4 + psrldq m2, m0, 2 + pshufhw m2, m2, 10100100b + PRED4x4_LOWPASS m0, m1, m2, m0 + psrldq m1, m0, 2 + psrldq m2, m0, 4 + pshufhw m1, m1, 10100100b + pshufhw m2, m2, 01010100b + pavgw m4, m0, m1 + PRED4x4_LOWPASS m1, m2, m0, m1 + punpckhwd m5, m4, m1 + punpcklwd m4, m1 + mova [r2+r3*0], m5 + mova [r0+r3*0], m4 + pshufd m0, m5, 11111001b + pshufd m1, m5, 11111110b + pshufd m2, m5, 11111111b + mova [r2+r3*1], m0 + mova [r2+r3*2], m1 + mova [r2+r1*1], m2 + PALIGNR m2, m5, m4, 4, m0 + PALIGNR m3, m5, m4, 8, m1 + PALIGNR m5, m5, m4, 12, m4 + mova [r0+r3*1], m2 + mova [r0+r3*2], m3 + mova [r0+r1*1], m5 + RET +%endmacro + +INIT_XMM +%define PALIGNR PALIGNR_MMX +PRED8x8L_HORIZONTAL_UP sse2 +%define PALIGNR PALIGNR_SSSE3 +PRED8x8L_HORIZONTAL_UP ssse3 +%ifdef HAVE_AVX +INIT_AVX +PRED8x8L_HORIZONTAL_UP avx +%endif + + +;----------------------------------------------------------------------------- +; void pred16x16_vertical(pixel *src, int stride) +;----------------------------------------------------------------------------- +%macro MOV16 3-5 + mova [%1+ 0], %2 + mova [%1+mmsize], %3 +%if mmsize==8 + mova [%1+ 16], %4 + mova [%1+ 24], %5 +%endif +%endmacro + +%macro PRED16x16_VERTICAL 1 +cglobal pred16x16_vertical_10_%1, 2,3 + sub r0, r1 + mov r2d, 8 + mova m0, [r0+ 0] + mova m1, [r0+mmsize] +%if mmsize==8 + mova m2, [r0+16] + mova m3, [r0+24] +%endif +.loop: + MOV16 r0+r1*1, m0, m1, m2, m3 + MOV16 r0+r1*2, m0, m1, m2, m3 + lea r0, [r0+r1*2] + dec r2d + jg .loop + REP_RET +%endmacro + +INIT_MMX +PRED16x16_VERTICAL mmxext +INIT_XMM +PRED16x16_VERTICAL sse2 + +;----------------------------------------------------------------------------- +; void pred16x16_horizontal(pixel *src, int stride) +;----------------------------------------------------------------------------- +%macro PRED16x16_HORIZONTAL 1 +cglobal pred16x16_horizontal_10_%1, 2,3 + mov r2d, 8 +.vloop: + movd m0, [r0+r1*0-4] + movd m1, [r0+r1*1-4] + SPLATW m0, m0, 1 + SPLATW m1, m1, 1 + MOV16 r0+r1*0, m0, m0, m0, m0 + MOV16 r0+r1*1, m1, m1, m1, m1 + lea r0, [r0+r1*2] + dec r2d + jg .vloop + REP_RET +%endmacro + +INIT_MMX +PRED16x16_HORIZONTAL mmxext +INIT_XMM +PRED16x16_HORIZONTAL sse2 + +;----------------------------------------------------------------------------- +; void pred16x16_dc(pixel *src, int stride) +;----------------------------------------------------------------------------- +%macro PRED16x16_DC 1 +cglobal pred16x16_dc_10_%1, 2,6 + mov r5, r0 + sub r0, r1 + mova m0, [r0+0] + paddw m0, [r0+mmsize] +%if mmsize==8 + paddw m0, [r0+16] + paddw m0, [r0+24] +%endif + HADDW m0, m2 + + lea r0, [r0+r1-2] + movzx r3d, word [r0] + movzx r4d, word [r0+r1] +%rep 7 + lea r0, [r0+r1*2] + movzx r2d, word [r0] + add r3d, r2d + movzx r2d, word [r0+r1] + add r4d, r2d +%endrep + lea r3d, [r3+r4+16] + + movd m1, r3d + paddw m0, m1 + psrlw m0, 5 + SPLATW m0, m0 + mov r3d, 8 +.loop: + MOV16 r5+r1*0, m0, m0, m0, m0 + MOV16 r5+r1*1, m0, m0, m0, m0 + lea r5, [r5+r1*2] + dec r3d + jg .loop + REP_RET +%endmacro + +INIT_MMX +PRED16x16_DC mmxext +INIT_XMM +PRED16x16_DC sse2 + +;----------------------------------------------------------------------------- +; void pred16x16_top_dc(pixel *src, int stride) +;----------------------------------------------------------------------------- +%macro PRED16x16_TOP_DC 1 +cglobal pred16x16_top_dc_10_%1, 2,3 + sub r0, r1 + mova m0, [r0+0] + paddw m0, [r0+mmsize] +%if mmsize==8 + paddw m0, [r0+16] + paddw m0, [r0+24] +%endif + HADDW m0, m2 + + SPLATW m0, m0 + paddw m0, [pw_8] + psrlw m0, 4 + mov r2d, 8 +.loop: + MOV16 r0+r1*1, m0, m0, m0, m0 + MOV16 r0+r1*2, m0, m0, m0, m0 + lea r0, [r0+r1*2] + dec r2d + jg .loop + REP_RET +%endmacro + +INIT_MMX +PRED16x16_TOP_DC mmxext +INIT_XMM +PRED16x16_TOP_DC sse2 + +;----------------------------------------------------------------------------- +; void pred16x16_left_dc(pixel *src, int stride) +;----------------------------------------------------------------------------- +%macro PRED16x16_LEFT_DC 1 +cglobal pred16x16_left_dc_10_%1, 2,6 + mov r5, r0 + + sub r0, 2 + movzx r3d, word [r0] + movzx r4d, word [r0+r1] +%rep 7 + lea r0, [r0+r1*2] + movzx r2d, word [r0] + add r3d, r2d + movzx r2d, word [r0+r1] + add r4d, r2d +%endrep + lea r3d, [r3+r4+8] + shr r3d, 4 + + movd m0, r3d + SPLATW m0, m0 + mov r3d, 8 +.loop: + MOV16 r5+r1*0, m0, m0, m0, m0 + MOV16 r5+r1*1, m0, m0, m0, m0 + lea r5, [r5+r1*2] + dec r3d + jg .loop + REP_RET +%endmacro + +INIT_MMX +PRED16x16_LEFT_DC mmxext +INIT_XMM +PRED16x16_LEFT_DC sse2 + +;----------------------------------------------------------------------------- +; void pred16x16_128_dc(pixel *src, int stride) +;----------------------------------------------------------------------------- +%macro PRED16x16_128_DC 1 +cglobal pred16x16_128_dc_10_%1, 2,3 + mova m0, [pw_512] + mov r2d, 8 +.loop: + MOV16 r0+r1*0, m0, m0, m0, m0 + MOV16 r0+r1*1, m0, m0, m0, m0 + lea r0, [r0+r1*2] + dec r2d + jg .loop + REP_RET +%endmacro + +INIT_MMX +PRED16x16_128_DC mmxext +INIT_XMM +PRED16x16_128_DC sse2 diff --git a/libavcodec/x86/h264_intrapred_init.c b/libavcodec/x86/h264_intrapred_init.c index d71f71e861..7220dd75c1 100644 --- a/libavcodec/x86/h264_intrapred_init.c +++ b/libavcodec/x86/h264_intrapred_init.c @@ -43,9 +43,56 @@ PRED4x4(horizontal_down, 10, avx) #define PRED8x8(TYPE, DEPTH, OPT) \ void ff_pred8x8_ ## TYPE ## _ ## DEPTH ## _ ## OPT (uint8_t *src, int stride); +PRED8x8(dc, 10, mmxext) +PRED8x8(dc, 10, sse2) +PRED8x8(top_dc, 10, sse2) +PRED8x8(plane, 10, sse2) PRED8x8(vertical, 10, sse2) PRED8x8(horizontal, 10, sse2) +#define PRED8x8L(TYPE, DEPTH, OPT)\ +void ff_pred8x8l_ ## TYPE ## _ ## DEPTH ## _ ## OPT (uint8_t *src, int has_topleft, int has_topright, int stride); + +PRED8x8L(dc, 10, sse2) +PRED8x8L(dc, 10, avx) +PRED8x8L(128_dc, 10, mmxext) +PRED8x8L(128_dc, 10, sse2) +PRED8x8L(top_dc, 10, sse2) +PRED8x8L(top_dc, 10, avx) +PRED8x8L(vertical, 10, sse2) +PRED8x8L(vertical, 10, avx) +PRED8x8L(horizontal, 10, sse2) +PRED8x8L(horizontal, 10, ssse3) +PRED8x8L(horizontal, 10, avx) +PRED8x8L(down_left, 10, sse2) +PRED8x8L(down_left, 10, ssse3) +PRED8x8L(down_left, 10, avx) +PRED8x8L(down_right, 10, sse2) +PRED8x8L(down_right, 10, ssse3) +PRED8x8L(down_right, 10, avx) +PRED8x8L(vertical_right, 10, sse2) +PRED8x8L(vertical_right, 10, ssse3) +PRED8x8L(vertical_right, 10, avx) +PRED8x8L(horizontal_up, 10, sse2) +PRED8x8L(horizontal_up, 10, ssse3) +PRED8x8L(horizontal_up, 10, avx) + +#define PRED16x16(TYPE, DEPTH, OPT)\ +void ff_pred16x16_ ## TYPE ## _ ## DEPTH ## _ ## OPT (uint8_t *src, int stride); + +PRED16x16(dc, 10, mmxext) +PRED16x16(dc, 10, sse2) +PRED16x16(top_dc, 10, mmxext) +PRED16x16(top_dc, 10, sse2) +PRED16x16(128_dc, 10, mmxext) +PRED16x16(128_dc, 10, sse2) +PRED16x16(left_dc, 10, mmxext) +PRED16x16(left_dc, 10, sse2) +PRED16x16(vertical, 10, mmxext) +PRED16x16(vertical, 10, sse2) +PRED16x16(horizontal, 10, mmxext) +PRED16x16(horizontal, 10, sse2) + void ff_pred16x16_vertical_mmx (uint8_t *src, int stride); void ff_pred16x16_vertical_sse (uint8_t *src, int stride); void ff_pred16x16_horizontal_mmx (uint8_t *src, int stride); @@ -253,6 +300,17 @@ void ff_h264_pred_init_x86(H264PredContext *h, int codec_id, const int bit_depth if (mm_flags & AV_CPU_FLAG_MMX2) { h->pred4x4[DC_PRED ] = ff_pred4x4_dc_10_mmxext; h->pred4x4[HOR_UP_PRED ] = ff_pred4x4_horizontal_up_10_mmxext; + + h->pred8x8[DC_PRED8x8 ] = ff_pred8x8_dc_10_mmxext; + + h->pred8x8l[DC_128_PRED ] = ff_pred8x8l_128_dc_10_mmxext; + + h->pred16x16[DC_PRED8x8 ] = ff_pred16x16_dc_10_mmxext; + h->pred16x16[TOP_DC_PRED8x8 ] = ff_pred16x16_top_dc_10_mmxext; + h->pred16x16[DC_128_PRED8x8 ] = ff_pred16x16_128_dc_10_mmxext; + h->pred16x16[LEFT_DC_PRED8x8 ] = ff_pred16x16_left_dc_10_mmxext; + h->pred16x16[VERT_PRED8x8 ] = ff_pred16x16_vertical_10_mmxext; + h->pred16x16[HOR_PRED8x8 ] = ff_pred16x16_horizontal_10_mmxext; } if (mm_flags & AV_CPU_FLAG_SSE2) { h->pred4x4[DIAG_DOWN_LEFT_PRED ] = ff_pred4x4_down_left_10_sse2; @@ -261,20 +319,56 @@ void ff_h264_pred_init_x86(H264PredContext *h, int codec_id, const int bit_depth h->pred4x4[VERT_RIGHT_PRED ] = ff_pred4x4_vertical_right_10_sse2; h->pred4x4[HOR_DOWN_PRED ] = ff_pred4x4_horizontal_down_10_sse2; + h->pred8x8[DC_PRED8x8 ] = ff_pred8x8_dc_10_sse2; + h->pred8x8[TOP_DC_PRED8x8 ] = ff_pred8x8_top_dc_10_sse2; + h->pred8x8[PLANE_PRED8x8 ] = ff_pred8x8_plane_10_sse2; h->pred8x8[VERT_PRED8x8 ] = ff_pred8x8_vertical_10_sse2; h->pred8x8[HOR_PRED8x8 ] = ff_pred8x8_horizontal_10_sse2; + + h->pred8x8l[VERT_PRED ] = ff_pred8x8l_vertical_10_sse2; + h->pred8x8l[HOR_PRED ] = ff_pred8x8l_horizontal_10_sse2; + h->pred8x8l[DC_PRED ] = ff_pred8x8l_dc_10_sse2; + h->pred8x8l[DC_128_PRED ] = ff_pred8x8l_128_dc_10_sse2; + h->pred8x8l[TOP_DC_PRED ] = ff_pred8x8l_top_dc_10_sse2; + h->pred8x8l[DIAG_DOWN_LEFT_PRED ] = ff_pred8x8l_down_left_10_sse2; + h->pred8x8l[DIAG_DOWN_RIGHT_PRED] = ff_pred8x8l_down_right_10_sse2; + h->pred8x8l[VERT_RIGHT_PRED ] = ff_pred8x8l_vertical_right_10_sse2; + h->pred8x8l[HOR_UP_PRED ] = ff_pred8x8l_horizontal_up_10_sse2; + + h->pred16x16[DC_PRED8x8 ] = ff_pred16x16_dc_10_sse2; + h->pred16x16[TOP_DC_PRED8x8 ] = ff_pred16x16_top_dc_10_sse2; + h->pred16x16[DC_128_PRED8x8 ] = ff_pred16x16_128_dc_10_sse2; + h->pred16x16[LEFT_DC_PRED8x8 ] = ff_pred16x16_left_dc_10_sse2; + h->pred16x16[VERT_PRED8x8 ] = ff_pred16x16_vertical_10_sse2; + h->pred16x16[HOR_PRED8x8 ] = ff_pred16x16_horizontal_10_sse2; } if (mm_flags & AV_CPU_FLAG_SSSE3) { h->pred4x4[DIAG_DOWN_RIGHT_PRED] = ff_pred4x4_down_right_10_ssse3; h->pred4x4[VERT_RIGHT_PRED ] = ff_pred4x4_vertical_right_10_ssse3; h->pred4x4[HOR_DOWN_PRED ] = ff_pred4x4_horizontal_down_10_ssse3; + + h->pred8x8l[HOR_PRED ] = ff_pred8x8l_horizontal_10_ssse3; + h->pred8x8l[DIAG_DOWN_LEFT_PRED ] = ff_pred8x8l_down_left_10_ssse3; + h->pred8x8l[DIAG_DOWN_RIGHT_PRED] = ff_pred8x8l_down_right_10_ssse3; + h->pred8x8l[VERT_RIGHT_PRED ] = ff_pred8x8l_vertical_right_10_ssse3; + h->pred8x8l[HOR_UP_PRED ] = ff_pred8x8l_horizontal_up_10_ssse3; } #if HAVE_AVX if (mm_flags & AV_CPU_FLAG_AVX) { h->pred4x4[DIAG_DOWN_LEFT_PRED ] = ff_pred4x4_down_left_10_avx; h->pred4x4[DIAG_DOWN_RIGHT_PRED] = ff_pred4x4_down_right_10_avx; + h->pred4x4[VERT_LEFT_PRED ] = ff_pred4x4_vertical_left_10_avx; h->pred4x4[VERT_RIGHT_PRED ] = ff_pred4x4_vertical_right_10_avx; h->pred4x4[HOR_DOWN_PRED ] = ff_pred4x4_horizontal_down_10_avx; + + h->pred8x8l[VERT_PRED ] = ff_pred8x8l_vertical_10_avx; + h->pred8x8l[HOR_PRED ] = ff_pred8x8l_horizontal_10_avx; + h->pred8x8l[DC_PRED ] = ff_pred8x8l_dc_10_avx; + h->pred8x8l[TOP_DC_PRED ] = ff_pred8x8l_top_dc_10_avx; + h->pred8x8l[DIAG_DOWN_RIGHT_PRED] = ff_pred8x8l_down_right_10_avx; + h->pred8x8l[DIAG_DOWN_LEFT_PRED ] = ff_pred8x8l_down_left_10_avx; + h->pred8x8l[VERT_RIGHT_PRED ] = ff_pred8x8l_vertical_right_10_avx; + h->pred8x8l[HOR_UP_PRED ] = ff_pred8x8l_horizontal_up_10_avx; } #endif /* HAVE_AVX */ } diff --git a/libavcodec/x86/h264_qpel_10bit.asm b/libavcodec/x86/h264_qpel_10bit.asm new file mode 100644 index 0000000000..15dd72ca36 --- /dev/null +++ b/libavcodec/x86/h264_qpel_10bit.asm @@ -0,0 +1,891 @@ +;***************************************************************************** +;* MMX/SSE2/AVX-optimized 10-bit H.264 qpel code +;***************************************************************************** +;* Copyright (C) 2011 x264 project +;* +;* Authors: Daniel Kang <daniel.d.kang@gmail.com> +;* +;* This file is part of Libav. +;* +;* Libav is free software; you can redistribute it and/or +;* modify it under the terms of the GNU Lesser General Public +;* License as published by the Free Software Foundation; either +;* version 2.1 of the License, or (at your option) any later version. +;* +;* Libav is distributed in the hope that it will be useful, +;* but WITHOUT ANY WARRANTY; without even the implied warranty of +;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +;* Lesser General Public License for more details. +;* +;* You should have received a copy of the GNU Lesser General Public +;* License along with Libav; if not, write to the Free Software +;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA +;****************************************************************************** + +%include "x86inc.asm" +%include "x86util.asm" + +SECTION_RODATA 32 + +cextern pw_16 +cextern pw_1 +cextern pb_0 + +pw_pixel_max: times 8 dw ((1 << 10)-1) + +pad10: times 8 dw 10*1023 +pad20: times 8 dw 20*1023 +pad30: times 8 dw 30*1023 +depad: times 4 dd 32*20*1023 + 512 +depad2: times 8 dw 20*1023 + 16*1022 + 16 +unpad: times 8 dw 16*1022/32 ; needs to be mod 16 + +tap1: times 4 dw 1, -5 +tap2: times 4 dw 20, 20 +tap3: times 4 dw -5, 1 +pd_0f: times 4 dd 0xffff + +SECTION .text + + +%macro AVG_MOV 2 + pavgw %2, %1 + mova %1, %2 +%endmacro + +%macro ADDW 3 +%if mmsize == 8 + paddw %1, %2 +%else + movu %3, %2 + paddw %1, %3 +%endif +%endmacro + +%macro FILT_H 4 + paddw %1, %4 + psubw %1, %2 ; a-b + psraw %1, 2 ; (a-b)/4 + psubw %1, %2 ; (a-b)/4-b + paddw %1, %3 ; (a-b)/4-b+c + psraw %1, 2 ; ((a-b)/4-b+c)/4 + paddw %1, %3 ; ((a-b)/4-b+c)/4+c = (a-5*b+20*c)/16 +%endmacro + +%macro PRELOAD_V 0 + lea r3, [r2*3] + sub r1, r3 + movu m0, [r1+r2] + movu m1, [r1+r2*2] + add r1, r3 + movu m2, [r1] + movu m3, [r1+r2] + movu m4, [r1+r2*2] + add r1, r3 +%endmacro + +%macro FILT_V 8 + movu %6, [r1] + paddw %1, %6 + mova %7, %2 + paddw %7, %5 + mova %8, %3 + paddw %8, %4 + FILT_H %1, %7, %8, [pw_16] + psraw %1, 1 + CLIPW %1, [pb_0], [pw_pixel_max] +%endmacro + +%macro MC 1 +%define OP_MOV mova +INIT_MMX +%1 mmxext, put, 4 +INIT_XMM +%1 sse2 , put, 8 + +%define OP_MOV AVG_MOV +INIT_MMX +%1 mmxext, avg, 4 +INIT_XMM +%1 sse2 , avg, 8 +%endmacro + +%macro MCAxA 8 +%ifdef ARCH_X86_64 +%ifnidn %1,mmxext +MCAxA_OP %1,%2,%3,%4,%5,%6,%7,%8 +%endif +%else +MCAxA_OP %1,%2,%3,%4,%5,%6,%7,%8 +%endif +%endmacro + +%macro MCAxA_OP 8 +cglobal %2_h264_qpel%5_%3_10_%1, %6,%7,%8 +%ifdef ARCH_X86_32 + call stub_%2_h264_qpel%4_%3_10_%1 + mov r0, r0m + mov r1, r1m + add r0, %4*2 + add r1, %4*2 + call stub_%2_h264_qpel%4_%3_10_%1 + mov r0, r0m + mov r1, r1m + lea r0, [r0+r2*%4] + lea r1, [r1+r2*%4] + call stub_%2_h264_qpel%4_%3_10_%1 + mov r0, r0m + mov r1, r1m + lea r0, [r0+r2*%4+%4*2] + lea r1, [r1+r2*%4+%4*2] + call stub_%2_h264_qpel%4_%3_10_%1 + RET +%else ; ARCH_X86_64 + mov r10, r0 + mov r11, r1 + call stub_%2_h264_qpel%4_%3_10_%1 + lea r0, [r10+%4*2] + lea r1, [r11+%4*2] + call stub_%2_h264_qpel%4_%3_10_%1 + lea r0, [r10+r2*%4] + lea r1, [r11+r2*%4] + call stub_%2_h264_qpel%4_%3_10_%1 + lea r0, [r10+r2*%4+%4*2] + lea r1, [r11+r2*%4+%4*2] +%ifndef UNIX64 ; fall through to function + call stub_%2_h264_qpel%4_%3_10_%1 + RET +%endif +%endif +%endmacro + +;cpu, put/avg, mc, 4/8, ... +%macro cglobal_mc 7 +%assign i %4*2 +MCAxA %1, %2, %3, %4, i, %5,%6,%7 + +cglobal %2_h264_qpel%4_%3_10_%1, %5,%6,%7 +%ifndef UNIX64 ; no prologue or epilogue for UNIX64 + call stub_%2_h264_qpel%4_%3_10_%1 + RET +%endif + +stub_%2_h264_qpel%4_%3_10_%1: +%endmacro + +;----------------------------------------------------------------------------- +; void h264_qpel_mc00(uint8_t *dst, uint8_t *src, int stride) +;----------------------------------------------------------------------------- +%macro COPY4 0 + movu m0, [r1 ] + OP_MOV [r0 ], m0 + movu m0, [r1+r2 ] + OP_MOV [r0+r2 ], m0 + movu m0, [r1+r2*2] + OP_MOV [r0+r2*2], m0 + movu m0, [r1+r3 ] + OP_MOV [r0+r3 ], m0 +%endmacro + +%macro MC00 1 +INIT_MMX +cglobal_mc mmxext, %1, mc00, 4, 3,4,0 + lea r3, [r2*3] + COPY4 + ret + +INIT_XMM +cglobal %1_h264_qpel8_mc00_10_sse2, 3,4 + lea r3, [r2*3] + COPY4 + lea r0, [r0+r2*4] + lea r1, [r1+r2*4] + COPY4 + RET + +cglobal %1_h264_qpel16_mc00_10_sse2, 3,4 + mov r3d, 8 +.loop: + movu m0, [r1 ] + movu m1, [r1 +16] + OP_MOV [r0 ], m0 + OP_MOV [r0 +16], m1 + movu m0, [r1+r2 ] + movu m1, [r1+r2+16] + OP_MOV [r0+r2 ], m0 + OP_MOV [r0+r2+16], m1 + lea r0, [r0+r2*2] + lea r1, [r1+r2*2] + dec r3d + jg .loop + REP_RET +%endmacro + +%define OP_MOV mova +MC00 put + +%define OP_MOV AVG_MOV +MC00 avg + +;----------------------------------------------------------------------------- +; void h264_qpel_mc20(uint8_t *dst, uint8_t *src, int stride) +;----------------------------------------------------------------------------- +%macro MC_CACHE 1 +%define OP_MOV mova +%define PALIGNR PALIGNR_MMX +INIT_MMX +%1 mmxext , put, 4 +INIT_XMM +%1 sse2_cache64 , put, 8 +%define PALIGNR PALIGNR_SSSE3 +%1 ssse3_cache64, put, 8 +%1 sse2 , put, 8, 0 + +%define OP_MOV AVG_MOV +%define PALIGNR PALIGNR_MMX +INIT_MMX +%1 mmxext , avg, 4 +INIT_XMM +%1 sse2_cache64 , avg, 8 +%define PALIGNR PALIGNR_SSSE3 +%1 ssse3_cache64, avg, 8 +%1 sse2 , avg, 8, 0 +%endmacro + +%macro MC20 3-4 +cglobal_mc %1, %2, mc20, %3, 3,4,9 + mov r3d, %3 + mova m1, [pw_pixel_max] +%if num_mmregs > 8 + mova m8, [pw_16] + %define p16 m8 +%else + %define p16 [pw_16] +%endif +.nextrow +%if %0 == 4 + movu m2, [r1-4] + movu m3, [r1-2] + movu m4, [r1+0] + ADDW m2, [r1+6], m5 + ADDW m3, [r1+4], m5 + ADDW m4, [r1+2], m5 +%else ; movu is slow on these processors +%if mmsize==16 + movu m2, [r1-4] + movu m0, [r1+6] + mova m6, m0 + psrldq m0, 6 + + paddw m6, m2 + PALIGNR m3, m0, m2, 2, m5 + PALIGNR m7, m0, m2, 8, m5 + paddw m3, m7 + PALIGNR m4, m0, m2, 4, m5 + PALIGNR m7, m0, m2, 6, m5 + paddw m4, m7 + SWAP 2, 6 +%else + movu m2, [r1-4] + movu m6, [r1+4] + PALIGNR m3, m6, m2, 2, m5 + paddw m3, m6 + PALIGNR m4, m6, m2, 4, m5 + PALIGNR m7, m6, m2, 6, m5 + paddw m4, m7 + paddw m2, [r1+6] +%endif +%endif + + FILT_H m2, m3, m4, p16 + psraw m2, 1 + pxor m0, m0 + CLIPW m2, m0, m1 + OP_MOV [r0], m2 + add r0, r2 + add r1, r2 + dec r3d + jg .nextrow + rep ret +%endmacro + +MC_CACHE MC20 + +;----------------------------------------------------------------------------- +; void h264_qpel_mc30(uint8_t *dst, uint8_t *src, int stride) +;----------------------------------------------------------------------------- +%macro MC30 3-4 +cglobal_mc %1, %2, mc30, %3, 3,5,9 + lea r4, [r1+2] + jmp stub_%2_h264_qpel%3_mc10_10_%1.body +%endmacro + +MC_CACHE MC30 + +;----------------------------------------------------------------------------- +; void h264_qpel_mc10(uint8_t *dst, uint8_t *src, int stride) +;----------------------------------------------------------------------------- +%macro MC10 3-4 +cglobal_mc %1, %2, mc10, %3, 3,5,9 + mov r4, r1 +.body + mov r3d, %3 + mova m1, [pw_pixel_max] +%if num_mmregs > 8 + mova m8, [pw_16] + %define p16 m8 +%else + %define p16 [pw_16] +%endif +.nextrow +%if %0 == 4 + movu m2, [r1-4] + movu m3, [r1-2] + movu m4, [r1+0] + ADDW m2, [r1+6], m5 + ADDW m3, [r1+4], m5 + ADDW m4, [r1+2], m5 +%else ; movu is slow on these processors +%if mmsize==16 + movu m2, [r1-4] + movu m0, [r1+6] + mova m6, m0 + psrldq m0, 6 + + paddw m6, m2 + PALIGNR m3, m0, m2, 2, m5 + PALIGNR m7, m0, m2, 8, m5 + paddw m3, m7 + PALIGNR m4, m0, m2, 4, m5 + PALIGNR m7, m0, m2, 6, m5 + paddw m4, m7 + SWAP 2, 6 +%else + movu m2, [r1-4] + movu m6, [r1+4] + PALIGNR m3, m6, m2, 2, m5 + paddw m3, m6 + PALIGNR m4, m6, m2, 4, m5 + PALIGNR m7, m6, m2, 6, m5 + paddw m4, m7 + paddw m2, [r1+6] +%endif +%endif + + FILT_H m2, m3, m4, p16 + psraw m2, 1 + pxor m0, m0 + CLIPW m2, m0, m1 + movu m3, [r4] + pavgw m2, m3 + OP_MOV [r0], m2 + add r0, r2 + add r1, r2 + add r4, r2 + dec r3d + jg .nextrow + rep ret +%endmacro + +MC_CACHE MC10 + +;----------------------------------------------------------------------------- +; void h264_qpel_mc02(uint8_t *dst, uint8_t *src, int stride) +;----------------------------------------------------------------------------- +%macro V_FILT 11 +v_filt%9_%10_10_%11: + add r4, r2 +.no_addr4: + FILT_V m0, m1, m2, m3, m4, m5, m6, m7 + add r1, r2 + add r0, r2 + ret +%endmacro + +INIT_MMX +RESET_MM_PERMUTATION +%assign i 0 +%rep 4 +V_FILT m0, m1, m2, m3, m4, m5, m6, m7, 4, i, mmxext +SWAP 0,1,2,3,4,5 +%assign i i+1 +%endrep + +INIT_XMM +RESET_MM_PERMUTATION +%assign i 0 +%rep 6 +V_FILT m0, m1, m2, m3, m4, m5, m6, m7, 8, i, sse2 +SWAP 0,1,2,3,4,5 +%assign i i+1 +%endrep + +%macro MC02 3 +cglobal_mc %1, %2, mc02, %3, 3,4,8 + PRELOAD_V + + sub r0, r2 +%assign j 0 +%rep %3 + %assign i (j % 6) + call v_filt%3_ %+ i %+ _10_%1.no_addr4 + OP_MOV [r0], m0 + SWAP 0,1,2,3,4,5 + %assign j j+1 +%endrep + ret +%endmacro + +MC MC02 + +;----------------------------------------------------------------------------- +; void h264_qpel_mc01(uint8_t *dst, uint8_t *src, int stride) +;----------------------------------------------------------------------------- +%macro MC01 3 +cglobal_mc %1, %2, mc01, %3, 3,5,8 + mov r4, r1 +.body + PRELOAD_V + + sub r4, r2 + sub r0, r2 +%assign j 0 +%rep %3 + %assign i (j % 6) + call v_filt%3_ %+ i %+ _10_%1 + movu m7, [r4] + pavgw m0, m7 + OP_MOV [r0], m0 + SWAP 0,1,2,3,4,5 + %assign j j+1 +%endrep + ret +%endmacro + +MC MC01 + +;----------------------------------------------------------------------------- +; void h264_qpel_mc03(uint8_t *dst, uint8_t *src, int stride) +;----------------------------------------------------------------------------- +%macro MC03 3 +cglobal_mc %1, %2, mc03, %3, 3,5,8 + lea r4, [r1+r2] + jmp stub_%2_h264_qpel%3_mc01_10_%1.body +%endmacro + +MC MC03 + +;----------------------------------------------------------------------------- +; void h264_qpel_mc11(uint8_t *dst, uint8_t *src, int stride) +;----------------------------------------------------------------------------- +%macro H_FILT_AVG 3-4 +h_filt%2_%3_10_%1: +;FILT_H with fewer registers and averaged with the FILT_V result +;m6,m7 are tmp registers, m0 is the FILT_V result, the rest are to be used next in the next iteration +;unfortunately I need three registers, so m5 will have to be re-read from memory + movu m5, [r4-4] + ADDW m5, [r4+6], m7 + movu m6, [r4-2] + ADDW m6, [r4+4], m7 + paddw m5, [pw_16] + psubw m5, m6 ; a-b + psraw m5, 2 ; (a-b)/4 + psubw m5, m6 ; (a-b)/4-b + movu m6, [r4+0] + ADDW m6, [r4+2], m7 + paddw m5, m6 ; (a-b)/4-b+c + psraw m5, 2 ; ((a-b)/4-b+c)/4 + paddw m5, m6 ; ((a-b)/4-b+c)/4+c = (a-5*b+20*c)/16 + psraw m5, 1 + CLIPW m5, [pb_0], [pw_pixel_max] +;avg FILT_V, FILT_H + pavgw m0, m5 +%if %0!=4 + movu m5, [r1+r5] +%endif + ret +%endmacro + +INIT_MMX +RESET_MM_PERMUTATION +%assign i 0 +%rep 3 +H_FILT_AVG mmxext, 4, i +SWAP 0,1,2,3,4,5 +%assign i i+1 +%endrep +H_FILT_AVG mmxext, 4, i, 0 + +INIT_XMM +RESET_MM_PERMUTATION +%assign i 0 +%rep 6 +%if i==1 +H_FILT_AVG sse2, 8, i, 0 +%else +H_FILT_AVG sse2, 8, i +%endif +SWAP 0,1,2,3,4,5 +%assign i i+1 +%endrep + +%macro MC11 3 +; this REALLY needs x86_64 +cglobal_mc %1, %2, mc11, %3, 3,6,8 + mov r4, r1 +.body + PRELOAD_V + + sub r0, r2 + sub r4, r2 + mov r5, r2 + neg r5 +%assign j 0 +%rep %3 + %assign i (j % 6) + call v_filt%3_ %+ i %+ _10_%1 + call h_filt%3_ %+ i %+ _10_%1 +%if %3==8 && i==1 + movu m5, [r1+r5] +%endif + OP_MOV [r0], m0 + SWAP 0,1,2,3,4,5 + %assign j j+1 +%endrep + ret +%endmacro + +MC MC11 + +;----------------------------------------------------------------------------- +; void h264_qpel_mc31(uint8_t *dst, uint8_t *src, int stride) +;----------------------------------------------------------------------------- +%macro MC31 3 +cglobal_mc %1, %2, mc31, %3, 3,6,8 + mov r4, r1 + add r1, 2 + jmp stub_%2_h264_qpel%3_mc11_10_%1.body +%endmacro + +MC MC31 + +;----------------------------------------------------------------------------- +; void h264_qpel_mc13(uint8_t *dst, uint8_t *src, int stride) +;----------------------------------------------------------------------------- +%macro MC13 3 +cglobal_mc %1, %2, mc13, %3, 3,7,12 + lea r4, [r1+r2] + jmp stub_%2_h264_qpel%3_mc11_10_%1.body +%endmacro + +MC MC13 + +;----------------------------------------------------------------------------- +; void h264_qpel_mc33(uint8_t *dst, uint8_t *src, int stride) +;----------------------------------------------------------------------------- +%macro MC33 3 +cglobal_mc %1, %2, mc33, %3, 3,6,8 + lea r4, [r1+r2] + add r1, 2 + jmp stub_%2_h264_qpel%3_mc11_10_%1.body +%endmacro + +MC MC33 + +;----------------------------------------------------------------------------- +; void h264_qpel_mc22(uint8_t *dst, uint8_t *src, int stride) +;----------------------------------------------------------------------------- +%macro FILT_H2 3 + psubw %1, %2 ; a-b + psubw %2, %3 ; b-c + psllw %2, 2 + psubw %1, %2 ; a-5*b+4*c + psllw %3, 4 + paddw %1, %3 ; a-5*b+20*c +%endmacro + +%macro FILT_VNRD 8 + movu %6, [r1] + paddw %1, %6 + mova %7, %2 + paddw %7, %5 + mova %8, %3 + paddw %8, %4 + FILT_H2 %1, %7, %8 +%endmacro + +%macro HV 2 +%ifidn %1,sse2 +%define PAD 12 +%define COUNT 2 +%else +%define PAD 0 +%define COUNT 3 +%endif +put_hv%2_10_%1: + neg r2 ; This actually saves instructions + lea r1, [r1+r2*2-mmsize+PAD] + lea r4, [rsp+PAD+gprsize] + mov r3d, COUNT +.v_loop: + movu m0, [r1] + sub r1, r2 + movu m1, [r1] + sub r1, r2 + movu m2, [r1] + sub r1, r2 + movu m3, [r1] + sub r1, r2 + movu m4, [r1] + sub r1, r2 +%assign i 0 +%rep %2-1 + FILT_VNRD m0, m1, m2, m3, m4, m5, m6, m7 + psubw m0, [pad20] + movu [r4+i*mmsize*3], m0 + sub r1, r2 + SWAP 0,1,2,3,4,5 +%assign i i+1 +%endrep + FILT_VNRD m0, m1, m2, m3, m4, m5, m6, m7 + psubw m0, [pad20] + movu [r4+i*mmsize*3], m0 + add r4, mmsize + lea r1, [r1+r2*8+mmsize] +%if %2==8 + lea r1, [r1+r2*4] +%endif + dec r3d + jg .v_loop + neg r2 + ret +%endmacro + +INIT_MMX +HV mmxext, 4 +INIT_XMM +HV sse2 , 8 + +%macro H_LOOP 2 +%if num_mmregs > 8 + %define s1 m8 + %define s2 m9 + %define s3 m10 + %define d1 m11 +%else + %define s1 [tap1] + %define s2 [tap2] + %define s3 [tap3] + %define d1 [depad] +%endif +h%2_loop_op_%1: + movu m1, [r1+mmsize-4] + movu m2, [r1+mmsize-2] + mova m3, [r1+mmsize+0] + movu m4, [r1+mmsize+2] + movu m5, [r1+mmsize+4] + movu m6, [r1+mmsize+6] +%if num_mmregs > 8 + pmaddwd m1, s1 + pmaddwd m2, s1 + pmaddwd m3, s2 + pmaddwd m4, s2 + pmaddwd m5, s3 + pmaddwd m6, s3 + paddd m1, d1 + paddd m2, d1 +%else + mova m0, s1 + pmaddwd m1, m0 + pmaddwd m2, m0 + mova m0, s2 + pmaddwd m3, m0 + pmaddwd m4, m0 + mova m0, s3 + pmaddwd m5, m0 + pmaddwd m6, m0 + mova m0, d1 + paddd m1, m0 + paddd m2, m0 +%endif + paddd m3, m5 + paddd m4, m6 + paddd m1, m3 + paddd m2, m4 + psrad m1, 10 + psrad m2, 10 + pslld m2, 16 + pand m1, [pd_0f] + por m1, m2 +%if num_mmregs <= 8 + pxor m0, m0 +%endif + CLIPW m1, m0, m7 + add r1, mmsize*3 + ret +%endmacro + +INIT_MMX +H_LOOP mmxext, 4 +INIT_XMM +H_LOOP sse2 , 8 + +%macro MC22 3 +cglobal_mc %1, %2, mc22, %3, 3,7,12 +%define PAD mmsize*8*4*2 ; SIZE*16*4*sizeof(pixel) + mov r6, rsp ; backup stack pointer + and rsp, ~(mmsize-1) ; align stack + sub rsp, PAD + + call put_hv%3_10_%1 + + mov r3d, %3 + mova m7, [pw_pixel_max] +%if num_mmregs > 8 + pxor m0, m0 + mova m8, [tap1] + mova m9, [tap2] + mova m10, [tap3] + mova m11, [depad] +%endif + mov r1, rsp +.h_loop: + call h%3_loop_op_%1 + + OP_MOV [r0], m1 + add r0, r2 + dec r3d + jg .h_loop + + mov rsp, r6 ; restore stack pointer + ret +%endmacro + +MC MC22 + +;----------------------------------------------------------------------------- +; void h264_qpel_mc12(uint8_t *dst, uint8_t *src, int stride) +;----------------------------------------------------------------------------- +%macro MC12 3 +cglobal_mc %1, %2, mc12, %3, 3,7,12 +%define PAD mmsize*8*4*2 ; SIZE*16*4*sizeof(pixel) + mov r6, rsp ; backup stack pointer + and rsp, ~(mmsize-1) ; align stack + sub rsp, PAD + + call put_hv%3_10_%1 + + xor r4d, r4d +.body + mov r3d, %3 + pxor m0, m0 + mova m7, [pw_pixel_max] +%if num_mmregs > 8 + mova m8, [tap1] + mova m9, [tap2] + mova m10, [tap3] + mova m11, [depad] +%endif + mov r1, rsp +.h_loop: + call h%3_loop_op_%1 + + movu m3, [r1+r4-2*mmsize] ; movu needed for mc32, etc + paddw m3, [depad2] + psrlw m3, 5 + psubw m3, [unpad] + CLIPW m3, m0, m7 + pavgw m1, m3 + + OP_MOV [r0], m1 + add r0, r2 + dec r3d + jg .h_loop + + mov rsp, r6 ; restore stack pointer + ret +%endmacro + +MC MC12 + +;----------------------------------------------------------------------------- +; void h264_qpel_mc32(uint8_t *dst, uint8_t *src, int stride) +;----------------------------------------------------------------------------- +%macro MC32 3 +cglobal_mc %1, %2, mc32, %3, 3,7,12 +%define PAD mmsize*8*3*2 ; SIZE*16*4*sizeof(pixel) + mov r6, rsp ; backup stack pointer + and rsp, ~(mmsize-1) ; align stack + sub rsp, PAD + + call put_hv%3_10_%1 + + mov r4d, 2 ; sizeof(pixel) + jmp stub_%2_h264_qpel%3_mc12_10_%1.body +%endmacro + +MC MC32 + +;----------------------------------------------------------------------------- +; void h264_qpel_mc21(uint8_t *dst, uint8_t *src, int stride) +;----------------------------------------------------------------------------- +%macro H_NRD 2 +put_h%2_10_%1: + add rsp, gprsize + mov r3d, %2 + xor r4d, r4d + mova m6, [pad20] +.nextrow + movu m2, [r5-4] + movu m3, [r5-2] + movu m4, [r5+0] + ADDW m2, [r5+6], m5 + ADDW m3, [r5+4], m5 + ADDW m4, [r5+2], m5 + + FILT_H2 m2, m3, m4 + psubw m2, m6 + mova [rsp+r4], m2 + add r4d, mmsize*3 + add r5, r2 + dec r3d + jg .nextrow + sub rsp, gprsize + ret +%endmacro + +INIT_MMX +H_NRD mmxext, 4 +INIT_XMM +H_NRD sse2 , 8 + +%macro MC21 3 +cglobal_mc %1, %2, mc21, %3, 3,7,12 + mov r5, r1 +.body +%define PAD mmsize*8*3*2 ; SIZE*16*4*sizeof(pixel) + mov r6, rsp ; backup stack pointer + and rsp, ~(mmsize-1) ; align stack + + sub rsp, PAD + call put_h%3_10_%1 + + sub rsp, PAD + call put_hv%3_10_%1 + + mov r4d, PAD-mmsize ; H buffer + jmp stub_%2_h264_qpel%3_mc12_10_%1.body +%endmacro + +MC MC21 + +;----------------------------------------------------------------------------- +; void h264_qpel_mc23(uint8_t *dst, uint8_t *src, int stride) +;----------------------------------------------------------------------------- +%macro MC23 3 +cglobal_mc %1, %2, mc23, %3, 3,7,12 + lea r5, [r1+r2] + jmp stub_%2_h264_qpel%3_mc21_10_%1.body +%endmacro + +MC MC23 diff --git a/libavcodec/x86/h264_qpel_mmx.c b/libavcodec/x86/h264_qpel_mmx.c index f5af44e82f..807d8548d6 100644 --- a/libavcodec/x86/h264_qpel_mmx.c +++ b/libavcodec/x86/h264_qpel_mmx.c @@ -1,5 +1,6 @@ /* * Copyright (c) 2004-2005 Michael Niedermayer, Loren Merritt + * Copyright (c) 2011 Daniel Kang * * This file is part of FFmpeg. * @@ -1199,3 +1200,100 @@ H264_MC_816(H264_MC_HV, sse2) H264_MC_816(H264_MC_H, ssse3) H264_MC_816(H264_MC_HV, ssse3) #endif + + + +//10bit +#define LUMA_MC_OP(OP, NUM, DEPTH, TYPE, OPT) \ +void ff_ ## OP ## _h264_qpel ## NUM ## _ ## TYPE ## _ ## DEPTH ## _ ## OPT \ + (uint8_t *dst, uint8_t *src, int stride); + +#define LUMA_MC_ALL(DEPTH, TYPE, OPT) \ + LUMA_MC_OP(put, 4, DEPTH, TYPE, OPT) \ + LUMA_MC_OP(avg, 4, DEPTH, TYPE, OPT) \ + LUMA_MC_OP(put, 8, DEPTH, TYPE, OPT) \ + LUMA_MC_OP(avg, 8, DEPTH, TYPE, OPT) \ + LUMA_MC_OP(put, 16, DEPTH, TYPE, OPT) \ + LUMA_MC_OP(avg, 16, DEPTH, TYPE, OPT) + +#define LUMA_MC_816(DEPTH, TYPE, OPT) \ + LUMA_MC_OP(put, 8, DEPTH, TYPE, OPT) \ + LUMA_MC_OP(avg, 8, DEPTH, TYPE, OPT) \ + LUMA_MC_OP(put, 16, DEPTH, TYPE, OPT) \ + LUMA_MC_OP(avg, 16, DEPTH, TYPE, OPT) + +LUMA_MC_ALL(10, mc00, mmxext) +LUMA_MC_ALL(10, mc10, mmxext) +LUMA_MC_ALL(10, mc20, mmxext) +LUMA_MC_ALL(10, mc30, mmxext) +LUMA_MC_ALL(10, mc01, mmxext) +LUMA_MC_ALL(10, mc11, mmxext) +LUMA_MC_ALL(10, mc21, mmxext) +LUMA_MC_ALL(10, mc31, mmxext) +LUMA_MC_ALL(10, mc02, mmxext) +LUMA_MC_ALL(10, mc12, mmxext) +LUMA_MC_ALL(10, mc22, mmxext) +LUMA_MC_ALL(10, mc32, mmxext) +LUMA_MC_ALL(10, mc03, mmxext) +LUMA_MC_ALL(10, mc13, mmxext) +LUMA_MC_ALL(10, mc23, mmxext) +LUMA_MC_ALL(10, mc33, mmxext) + +LUMA_MC_816(10, mc00, sse2) +LUMA_MC_816(10, mc10, sse2) +LUMA_MC_816(10, mc10, sse2_cache64) +LUMA_MC_816(10, mc10, ssse3_cache64) +LUMA_MC_816(10, mc20, sse2) +LUMA_MC_816(10, mc20, sse2_cache64) +LUMA_MC_816(10, mc20, ssse3_cache64) +LUMA_MC_816(10, mc30, sse2) +LUMA_MC_816(10, mc30, sse2_cache64) +LUMA_MC_816(10, mc30, ssse3_cache64) +LUMA_MC_816(10, mc01, sse2) +LUMA_MC_816(10, mc11, sse2) +LUMA_MC_816(10, mc21, sse2) +LUMA_MC_816(10, mc31, sse2) +LUMA_MC_816(10, mc02, sse2) +LUMA_MC_816(10, mc12, sse2) +LUMA_MC_816(10, mc22, sse2) +LUMA_MC_816(10, mc32, sse2) +LUMA_MC_816(10, mc03, sse2) +LUMA_MC_816(10, mc13, sse2) +LUMA_MC_816(10, mc23, sse2) +LUMA_MC_816(10, mc33, sse2) + +#define QPEL16_OPMC(OP, MC, MMX)\ +void ff_ ## OP ## _h264_qpel16_ ## MC ## _10_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ + ff_ ## OP ## _h264_qpel8_ ## MC ## _10_ ## MMX(dst , src , stride);\ + ff_ ## OP ## _h264_qpel8_ ## MC ## _10_ ## MMX(dst+16, src+16, stride);\ + src += 8*stride;\ + dst += 8*stride;\ + ff_ ## OP ## _h264_qpel8_ ## MC ## _10_ ## MMX(dst , src , stride);\ + ff_ ## OP ## _h264_qpel8_ ## MC ## _10_ ## MMX(dst+16, src+16, stride);\ +} + +#define QPEL16_OP(MC, MMX)\ +QPEL16_OPMC(put, MC, MMX)\ +QPEL16_OPMC(avg, MC, MMX) + +#define QPEL16(MMX)\ +QPEL16_OP(mc00, MMX)\ +QPEL16_OP(mc01, MMX)\ +QPEL16_OP(mc02, MMX)\ +QPEL16_OP(mc03, MMX)\ +QPEL16_OP(mc10, MMX)\ +QPEL16_OP(mc11, MMX)\ +QPEL16_OP(mc12, MMX)\ +QPEL16_OP(mc13, MMX)\ +QPEL16_OP(mc20, MMX)\ +QPEL16_OP(mc21, MMX)\ +QPEL16_OP(mc22, MMX)\ +QPEL16_OP(mc23, MMX)\ +QPEL16_OP(mc30, MMX)\ +QPEL16_OP(mc31, MMX)\ +QPEL16_OP(mc32, MMX)\ +QPEL16_OP(mc33, MMX) + +#if ARCH_X86_32 && HAVE_YASM // ARCH_X86_64 implies sse2+ +QPEL16(mmxext) +#endif diff --git a/libavcodec/x86/h264_weight_10bit.asm b/libavcodec/x86/h264_weight_10bit.asm new file mode 100644 index 0000000000..1c58d72d94 --- /dev/null +++ b/libavcodec/x86/h264_weight_10bit.asm @@ -0,0 +1,321 @@ +;***************************************************************************** +;* MMX/SSE2/AVX-optimized 10-bit H.264 weighted prediction code +;***************************************************************************** +;* Copyright (C) 2005-2011 x264 project +;* +;* Authors: Daniel Kang <daniel.d.kang@gmail.com> +;* +;* This file is part of Libav. +;* +;* Libav is free software; you can redistribute it and/or +;* modify it under the terms of the GNU Lesser General Public +;* License as published by the Free Software Foundation; either +;* version 2.1 of the License, or (at your option) any later version. +;* +;* Libav is distributed in the hope that it will be useful, +;* but WITHOUT ANY WARRANTY; without even the implied warranty of +;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +;* Lesser General Public License for more details. +;* +;* You should have received a copy of the GNU Lesser General Public +;* License along with Libav; if not, write to the Free Software +;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA +;****************************************************************************** + +%include "x86inc.asm" +%include "x86util.asm" + +SECTION_RODATA 32 + +pw_pixel_max: times 8 dw ((1 << 10)-1) +sq_1: dq 1 + dq 0 + +cextern pw_1 + +SECTION .text + +;----------------------------------------------------------------------------- +; void h264_weight(uint8_t *dst, int stride, int log2_denom, +; int weight, int offset); +;----------------------------------------------------------------------------- +%ifdef ARCH_X86_32 +DECLARE_REG_TMP 2 +%else +DECLARE_REG_TMP 10 +%endif + +%macro WEIGHT_PROLOGUE 1 + mov t0, %1 +.prologue + PROLOGUE 0,5,8 + movifnidn r0, r0mp + movifnidn r1d, r1m + movifnidn r3d, r3m + movifnidn r4d, r4m +%endmacro + +%macro WEIGHT_SETUP 1 + mova m0, [pw_1] + movd m2, r2m + pslld m0, m2 ; 1<<log2_denom + SPLATW m0, m0 + shl r4, 19 ; *8, move to upper half of dword + lea r4, [r4+r3*2+0x10000] + movd m3, r4d ; weight<<1 | 1+(offset<<(3)) + pshufd m3, m3, 0 + mova m4, [pw_pixel_max] + paddw m2, [sq_1] ; log2_denom+1 +%ifnidn %1, sse4 + pxor m7, m7 +%endif +%endmacro + +%macro WEIGHT_OP 2-3 +%if %0==2 + mova m5, [r0+%2] + punpckhwd m6, m5, m0 + punpcklwd m5, m0 +%else + movq m5, [r0+%2] + movq m6, [r0+%3] + punpcklwd m5, m0 + punpcklwd m6, m0 +%endif + pmaddwd m5, m3 + pmaddwd m6, m3 + psrad m5, m2 + psrad m6, m2 +%ifidn %1, sse4 + packusdw m5, m6 + pminsw m5, m4 +%else + packssdw m5, m6 + CLIPW m5, m7, m4 +%endif +%endmacro + +%macro WEIGHT_FUNC_DBL 1 +cglobal h264_weight_16x16_10_%1 + WEIGHT_PROLOGUE 16 + WEIGHT_SETUP %1 +.nextrow + WEIGHT_OP %1, 0 + mova [r0 ], m5 + WEIGHT_OP %1, 16 + mova [r0+16], m5 + add r0, r1 + dec t0 + jnz .nextrow + REP_RET + +cglobal h264_weight_16x8_10_%1 + mov t0, 8 + jmp mangle(ff_h264_weight_16x16_10_%1.prologue) +%endmacro + +INIT_XMM +WEIGHT_FUNC_DBL sse2 +WEIGHT_FUNC_DBL sse4 + + +%macro WEIGHT_FUNC_MM 1 +cglobal h264_weight_8x16_10_%1 + WEIGHT_PROLOGUE 16 + WEIGHT_SETUP %1 +.nextrow + WEIGHT_OP %1, 0 + mova [r0], m5 + add r0, r1 + dec t0 + jnz .nextrow + REP_RET + +cglobal h264_weight_8x8_10_%1 + mov t0, 8 + jmp mangle(ff_h264_weight_8x16_10_%1.prologue) + +cglobal h264_weight_8x4_10_%1 + mov t0, 4 + jmp mangle(ff_h264_weight_8x16_10_%1.prologue) +%endmacro + +INIT_XMM +WEIGHT_FUNC_MM sse2 +WEIGHT_FUNC_MM sse4 + + +%macro WEIGHT_FUNC_HALF_MM 1 +cglobal h264_weight_4x8_10_%1 + WEIGHT_PROLOGUE 4 + WEIGHT_SETUP %1 + lea r3, [r1*2] +.nextrow + WEIGHT_OP %1, 0, r1 + movh [r0], m5 + movhps [r0+r1], m5 + add r0, r3 + dec t0 + jnz .nextrow + REP_RET + +cglobal h264_weight_4x4_10_%1 + mov t0, 2 + jmp mangle(ff_h264_weight_4x8_10_%1.prologue) + +cglobal h264_weight_4x2_10_%1 + mov t0, 1 + jmp mangle(ff_h264_weight_4x8_10_%1.prologue) +%endmacro + +INIT_XMM +WEIGHT_FUNC_HALF_MM sse2 +WEIGHT_FUNC_HALF_MM sse4 + + +;----------------------------------------------------------------------------- +; void h264_biweight(uint8_t *dst, uint8_t *src, int stride, int log2_denom, +; int weightd, int weights, int offset); +;----------------------------------------------------------------------------- +%ifdef ARCH_X86_32 +DECLARE_REG_TMP 2,3 +%else +DECLARE_REG_TMP 10,2 +%endif + +%macro BIWEIGHT_PROLOGUE 1 + mov t0, %1 +.prologue + PROLOGUE 0,7,8 + movifnidn r0, r0mp + movifnidn r1, r1mp + movifnidn t1d, r2m + movifnidn r4d, r4m + movifnidn r5d, r5m + movifnidn r6d, r6m +%endmacro + +%macro BIWEIGHT_SETUP 1 + lea r6, [r6*4+1] ; (offset<<2)+1 + or r6, 1 + shl r5, 16 + or r4, r5 + movd m4, r4d ; weightd | weights + movd m5, r6d ; (offset+1)|1 + movd m6, r3m ; log2_denom + pslld m5, m6 ; (((offset<<2)+1)|1)<<log2_denom + paddd m6, [sq_1] + pshufd m4, m4, 0 + pshufd m5, m5, 0 + mova m3, [pw_pixel_max] +%ifnidn %1, sse4 + pxor m7, m7 +%endif +%endmacro + +%macro BIWEIGHT 2-3 +%if %0==2 + mova m0, [r0+%2] + mova m1, [r1+%2] + punpckhwd m2, m0, m1 + punpcklwd m0, m1 +%else + movq m0, [r0+%2] + movq m1, [r1+%2] + punpcklwd m0, m1 + movq m2, [r0+%3] + movq m1, [r1+%3] + punpcklwd m2, m1 +%endif + pmaddwd m0, m4 + pmaddwd m2, m4 + paddd m0, m5 + paddd m2, m5 + psrad m0, m6 + psrad m2, m6 +%ifidn %1, sse4 + packusdw m0, m2 + pminsw m0, m3 +%else + packssdw m0, m2 + CLIPW m0, m7, m3 +%endif +%endmacro + +%macro BIWEIGHT_FUNC_DBL 1 +cglobal h264_biweight_16x16_10_%1 + BIWEIGHT_PROLOGUE 16 + BIWEIGHT_SETUP %1 +.nextrow + BIWEIGHT %1, 0 + mova [r0 ], m0 + BIWEIGHT %1, 16 + mova [r0+16], m0 + add r0, t1 + add r1, t1 + dec t0 + jnz .nextrow + REP_RET + +cglobal h264_biweight_16x8_10_%1 + mov t0, 8 + jmp mangle(ff_h264_biweight_16x16_10_%1.prologue) +%endmacro + +INIT_XMM +BIWEIGHT_FUNC_DBL sse2 +BIWEIGHT_FUNC_DBL sse4 + +%macro BIWEIGHT_FUNC 1 +cglobal h264_biweight_8x16_10_%1 + BIWEIGHT_PROLOGUE 16 + BIWEIGHT_SETUP %1 +.nextrow + BIWEIGHT %1, 0 + mova [r0], m0 + add r0, t1 + add r1, t1 + dec t0 + jnz .nextrow + REP_RET + +cglobal h264_biweight_8x8_10_%1 + mov t0, 8 + jmp mangle(ff_h264_biweight_8x16_10_%1.prologue) + +cglobal h264_biweight_8x4_10_%1 + mov t0, 4 + jmp mangle(ff_h264_biweight_8x16_10_%1.prologue) +%endmacro + +INIT_XMM +BIWEIGHT_FUNC sse2 +BIWEIGHT_FUNC sse4 + +%macro BIWEIGHT_FUNC_HALF 1 +cglobal h264_biweight_4x8_10_%1 + BIWEIGHT_PROLOGUE 4 + BIWEIGHT_SETUP %1 + lea r4, [t1*2] +.nextrow + BIWEIGHT %1, 0, t1 + movh [r0 ], m0 + movhps [r0+t1], m0 + add r0, r4 + add r1, r4 + dec t0 + jnz .nextrow + REP_RET + +cglobal h264_biweight_4x4_10_%1 + mov t0, 2 + jmp mangle(ff_h264_biweight_4x8_10_%1.prologue) + +cglobal h264_biweight_4x2_10_%1 + mov t0, 1 + jmp mangle(ff_h264_biweight_4x8_10_%1.prologue) +%endmacro + +INIT_XMM +BIWEIGHT_FUNC_HALF sse2 +BIWEIGHT_FUNC_HALF sse4 diff --git a/libavcodec/x86/h264dsp_mmx.c b/libavcodec/x86/h264dsp_mmx.c index 6c1b0a5619..68e543681f 100644 --- a/libavcodec/x86/h264dsp_mmx.c +++ b/libavcodec/x86/h264dsp_mmx.c @@ -326,14 +326,39 @@ H264_BIWEIGHT_MMX ( 4, 8) H264_BIWEIGHT_MMX ( 4, 4) H264_BIWEIGHT_MMX ( 4, 2) +#define H264_WEIGHT_10(W, H, DEPTH, OPT) \ +void ff_h264_weight_ ## W ## x ## H ## _ ## DEPTH ## _ ## OPT(uint8_t *dst, \ + int stride, int log2_denom, int weight, int offset); + +#define H264_BIWEIGHT_10(W, H, DEPTH, OPT) \ +void ff_h264_biweight_ ## W ## x ## H ## _ ## DEPTH ## _ ## OPT \ + (uint8_t *dst, uint8_t *src, int stride, int log2_denom, \ + int weightd, int weights, int offset); + +#define H264_BIWEIGHT_10_SSE(W, H, DEPTH) \ +H264_WEIGHT_10 (W, H, DEPTH, sse2) \ +H264_WEIGHT_10 (W, H, DEPTH, sse4) \ +H264_BIWEIGHT_10(W, H, DEPTH, sse2) \ +H264_BIWEIGHT_10(W, H, DEPTH, sse4) + +H264_BIWEIGHT_10_SSE(16, 16, 10) +H264_BIWEIGHT_10_SSE(16, 8, 10) +H264_BIWEIGHT_10_SSE( 8, 16, 10) +H264_BIWEIGHT_10_SSE( 8, 8, 10) +H264_BIWEIGHT_10_SSE( 8, 4, 10) +H264_BIWEIGHT_10_SSE( 4, 8, 10) +H264_BIWEIGHT_10_SSE( 4, 4, 10) +H264_BIWEIGHT_10_SSE( 4, 2, 10) + void ff_h264dsp_init_x86(H264DSPContext *c, const int bit_depth) { int mm_flags = av_get_cpu_flags(); - if (bit_depth == 8) { if (mm_flags & AV_CPU_FLAG_MMX2) { c->h264_loop_filter_strength= h264_loop_filter_strength_mmx2; } + + if (bit_depth == 8) { #if HAVE_YASM if (mm_flags & AV_CPU_FLAG_MMX) { c->h264_idct_dc_add = @@ -454,6 +479,24 @@ void ff_h264dsp_init_x86(H264DSPContext *c, const int bit_depth) c->h264_idct8_add4 = ff_h264_idct8_add4_10_sse2; #endif + c->weight_h264_pixels_tab[0] = ff_h264_weight_16x16_10_sse2; + c->weight_h264_pixels_tab[1] = ff_h264_weight_16x8_10_sse2; + c->weight_h264_pixels_tab[2] = ff_h264_weight_8x16_10_sse2; + c->weight_h264_pixels_tab[3] = ff_h264_weight_8x8_10_sse2; + c->weight_h264_pixels_tab[4] = ff_h264_weight_8x4_10_sse2; + c->weight_h264_pixels_tab[5] = ff_h264_weight_4x8_10_sse2; + c->weight_h264_pixels_tab[6] = ff_h264_weight_4x4_10_sse2; + c->weight_h264_pixels_tab[7] = ff_h264_weight_4x2_10_sse2; + + c->biweight_h264_pixels_tab[0] = ff_h264_biweight_16x16_10_sse2; + c->biweight_h264_pixels_tab[1] = ff_h264_biweight_16x8_10_sse2; + c->biweight_h264_pixels_tab[2] = ff_h264_biweight_8x16_10_sse2; + c->biweight_h264_pixels_tab[3] = ff_h264_biweight_8x8_10_sse2; + c->biweight_h264_pixels_tab[4] = ff_h264_biweight_8x4_10_sse2; + c->biweight_h264_pixels_tab[5] = ff_h264_biweight_4x8_10_sse2; + c->biweight_h264_pixels_tab[6] = ff_h264_biweight_4x4_10_sse2; + c->biweight_h264_pixels_tab[7] = ff_h264_biweight_4x2_10_sse2; + c->h264_v_loop_filter_chroma= ff_deblock_v_chroma_10_sse2; c->h264_v_loop_filter_chroma_intra= ff_deblock_v_chroma_intra_10_sse2; #if HAVE_ALIGNED_STACK @@ -463,6 +506,25 @@ void ff_h264dsp_init_x86(H264DSPContext *c, const int bit_depth) c->h264_h_loop_filter_luma_intra = ff_deblock_h_luma_intra_10_sse2; #endif } + if (mm_flags&AV_CPU_FLAG_SSE4) { + c->weight_h264_pixels_tab[0] = ff_h264_weight_16x16_10_sse4; + c->weight_h264_pixels_tab[1] = ff_h264_weight_16x8_10_sse4; + c->weight_h264_pixels_tab[2] = ff_h264_weight_8x16_10_sse4; + c->weight_h264_pixels_tab[3] = ff_h264_weight_8x8_10_sse4; + c->weight_h264_pixels_tab[4] = ff_h264_weight_8x4_10_sse4; + c->weight_h264_pixels_tab[5] = ff_h264_weight_4x8_10_sse4; + c->weight_h264_pixels_tab[6] = ff_h264_weight_4x4_10_sse4; + c->weight_h264_pixels_tab[7] = ff_h264_weight_4x2_10_sse4; + + c->biweight_h264_pixels_tab[0] = ff_h264_biweight_16x16_10_sse4; + c->biweight_h264_pixels_tab[1] = ff_h264_biweight_16x8_10_sse4; + c->biweight_h264_pixels_tab[2] = ff_h264_biweight_8x16_10_sse4; + c->biweight_h264_pixels_tab[3] = ff_h264_biweight_8x8_10_sse4; + c->biweight_h264_pixels_tab[4] = ff_h264_biweight_8x4_10_sse4; + c->biweight_h264_pixels_tab[5] = ff_h264_biweight_4x8_10_sse4; + c->biweight_h264_pixels_tab[6] = ff_h264_biweight_4x4_10_sse4; + c->biweight_h264_pixels_tab[7] = ff_h264_biweight_4x2_10_sse4; + } #if HAVE_AVX if (mm_flags&AV_CPU_FLAG_AVX) { c->h264_idct_dc_add = diff --git a/libavcodec/x86/idct_sse2_xvid.c b/libavcodec/x86/idct_sse2_xvid.c index 5185d61e54..fc75a57519 100644 --- a/libavcodec/x86/idct_sse2_xvid.c +++ b/libavcodec/x86/idct_sse2_xvid.c @@ -43,7 +43,7 @@ #include "idct_xvid.h" #include "dsputil_mmx.h" -/*! +/** * @file * @brief SSE2 idct compatible with xvidmmx */ diff --git a/libavcodec/x86/idct_xvid.h b/libavcodec/x86/idct_xvid.h index 5fdc20d3ea..be91d1c68a 100644 --- a/libavcodec/x86/idct_xvid.h +++ b/libavcodec/x86/idct_xvid.h @@ -18,7 +18,7 @@ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ -/*! +/** * @file * header for Xvid IDCT functions */ diff --git a/libavcodec/x86/mlpdsp.c b/libavcodec/x86/mlpdsp.c index 486a927ad7..7ea77fc1b8 100644 --- a/libavcodec/x86/mlpdsp.c +++ b/libavcodec/x86/mlpdsp.c @@ -23,7 +23,7 @@ #include "libavcodec/dsputil.h" #include "libavcodec/mlp.h" -#if HAVE_7REGS && HAVE_TEN_OPERANDS +#if HAVE_7REGS extern void ff_mlp_firorder_8; extern void ff_mlp_firorder_7; @@ -171,11 +171,11 @@ static void mlp_filter_channel_x86(int32_t *state, const int32_t *coeff, ); } -#endif /* HAVE_7REGS && HAVE_TEN_OPERANDS */ +#endif /* HAVE_7REGS */ void ff_mlp_init_x86(DSPContext* c, AVCodecContext *avctx) { -#if HAVE_7REGS && HAVE_TEN_OPERANDS +#if HAVE_7REGS c->mlp_filter_channel = mlp_filter_channel_x86; #endif } diff --git a/libavcodec/x86/mpegvideo_mmx_template.c b/libavcodec/x86/mpegvideo_mmx_template.c index fb52159576..de6a4724b6 100644 --- a/libavcodec/x86/mpegvideo_mmx_template.c +++ b/libavcodec/x86/mpegvideo_mmx_template.c @@ -98,7 +98,7 @@ static int RENAME(dct_quantize)(MpegEncContext *s, x86_reg last_non_zero_p1; int level=0, q; //=0 is because gcc says uninitialized ... const uint16_t *qmat, *bias; - DECLARE_ALIGNED(16, int16_t, temp_block)[64]; + LOCAL_ALIGNED_16(int16_t, temp_block, [64]); assert((7&(int)(&temp_block[0])) == 0); //did gcc align it correctly? diff --git a/libavcodec/x86/simple_idct_mmx.c b/libavcodec/x86/simple_idct_mmx.c index 92cc18465c..db479ce257 100644 --- a/libavcodec/x86/simple_idct_mmx.c +++ b/libavcodec/x86/simple_idct_mmx.c @@ -37,11 +37,7 @@ #define C1 22725 //cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5 #define C2 21407 //cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5 #define C3 19266 //cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5 -#if 0 -#define C4 16384 //cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5 -#else #define C4 16383 //cos(i*M_PI/16)*sqrt(2)*(1<<14) - 0.5 -#endif #define C5 12873 //cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5 #define C6 8867 //cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5 #define C7 4520 //cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5 @@ -80,135 +76,6 @@ DECLARE_ALIGNED(8, static const int16_t, coeffs)[]= { C3, -C1, C3, -C1 }; -#if 0 -static void unused_var_killer(void) -{ - int a= wm1010 + d40000; - temp[0]=a; -} - -static void inline idctCol (int16_t * col, int16_t *input) -{ -#undef C0 -#undef C1 -#undef C2 -#undef C3 -#undef C4 -#undef C5 -#undef C6 -#undef C7 - int a0, a1, a2, a3, b0, b1, b2, b3; - const int C0 = 23170; //cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5 - const int C1 = 22725; //cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5 - const int C2 = 21407; //cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5 - const int C3 = 19266; //cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5 - const int C4 = 16383; //cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5 - const int C5 = 12873; //cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5 - const int C6 = 8867; //cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5 - const int C7 = 4520; //cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5 -/* - if( !(col[8*1] | col[8*2] |col[8*3] |col[8*4] |col[8*5] |col[8*6] | col[8*7])) { - col[8*0] = col[8*1] = col[8*2] = col[8*3] = col[8*4] = - col[8*5] = col[8*6] = col[8*7] = col[8*0]<<3; - return; - }*/ - -col[8*0] = input[8*0 + 0]; -col[8*1] = input[8*2 + 0]; -col[8*2] = input[8*0 + 1]; -col[8*3] = input[8*2 + 1]; -col[8*4] = input[8*4 + 0]; -col[8*5] = input[8*6 + 0]; -col[8*6] = input[8*4 + 1]; -col[8*7] = input[8*6 + 1]; - - a0 = C4*col[8*0] + C2*col[8*2] + C4*col[8*4] + C6*col[8*6] + (1<<(COL_SHIFT-1)); - a1 = C4*col[8*0] + C6*col[8*2] - C4*col[8*4] - C2*col[8*6] + (1<<(COL_SHIFT-1)); - a2 = C4*col[8*0] - C6*col[8*2] - C4*col[8*4] + C2*col[8*6] + (1<<(COL_SHIFT-1)); - a3 = C4*col[8*0] - C2*col[8*2] + C4*col[8*4] - C6*col[8*6] + (1<<(COL_SHIFT-1)); - - b0 = C1*col[8*1] + C3*col[8*3] + C5*col[8*5] + C7*col[8*7]; - b1 = C3*col[8*1] - C7*col[8*3] - C1*col[8*5] - C5*col[8*7]; - b2 = C5*col[8*1] - C1*col[8*3] + C7*col[8*5] + C3*col[8*7]; - b3 = C7*col[8*1] - C5*col[8*3] + C3*col[8*5] - C1*col[8*7]; - - col[8*0] = (a0 + b0) >> COL_SHIFT; - col[8*1] = (a1 + b1) >> COL_SHIFT; - col[8*2] = (a2 + b2) >> COL_SHIFT; - col[8*3] = (a3 + b3) >> COL_SHIFT; - col[8*4] = (a3 - b3) >> COL_SHIFT; - col[8*5] = (a2 - b2) >> COL_SHIFT; - col[8*6] = (a1 - b1) >> COL_SHIFT; - col[8*7] = (a0 - b0) >> COL_SHIFT; -} - -static void inline idctRow (int16_t * output, int16_t * input) -{ - int16_t row[8]; - - int a0, a1, a2, a3, b0, b1, b2, b3; - const int C0 = 23170; //cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5 - const int C1 = 22725; //cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5 - const int C2 = 21407; //cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5 - const int C3 = 19266; //cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5 - const int C4 = 16383; //cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5 - const int C5 = 12873; //cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5 - const int C6 = 8867; //cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5 - const int C7 = 4520; //cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5 - -row[0] = input[0]; -row[2] = input[1]; -row[4] = input[4]; -row[6] = input[5]; -row[1] = input[8]; -row[3] = input[9]; -row[5] = input[12]; -row[7] = input[13]; - - if( !(row[1] | row[2] |row[3] |row[4] |row[5] |row[6] | row[7]) ) { - row[0] = row[1] = row[2] = row[3] = row[4] = - row[5] = row[6] = row[7] = row[0]<<3; - output[0] = row[0]; - output[2] = row[1]; - output[4] = row[2]; - output[6] = row[3]; - output[8] = row[4]; - output[10] = row[5]; - output[12] = row[6]; - output[14] = row[7]; - return; - } - - a0 = C4*row[0] + C2*row[2] + C4*row[4] + C6*row[6] + (1<<(ROW_SHIFT-1)); - a1 = C4*row[0] + C6*row[2] - C4*row[4] - C2*row[6] + (1<<(ROW_SHIFT-1)); - a2 = C4*row[0] - C6*row[2] - C4*row[4] + C2*row[6] + (1<<(ROW_SHIFT-1)); - a3 = C4*row[0] - C2*row[2] + C4*row[4] - C6*row[6] + (1<<(ROW_SHIFT-1)); - - b0 = C1*row[1] + C3*row[3] + C5*row[5] + C7*row[7]; - b1 = C3*row[1] - C7*row[3] - C1*row[5] - C5*row[7]; - b2 = C5*row[1] - C1*row[3] + C7*row[5] + C3*row[7]; - b3 = C7*row[1] - C5*row[3] + C3*row[5] - C1*row[7]; - - row[0] = (a0 + b0) >> ROW_SHIFT; - row[1] = (a1 + b1) >> ROW_SHIFT; - row[2] = (a2 + b2) >> ROW_SHIFT; - row[3] = (a3 + b3) >> ROW_SHIFT; - row[4] = (a3 - b3) >> ROW_SHIFT; - row[5] = (a2 - b2) >> ROW_SHIFT; - row[6] = (a1 - b1) >> ROW_SHIFT; - row[7] = (a0 - b0) >> ROW_SHIFT; - - output[0] = row[0]; - output[2] = row[1]; - output[4] = row[2]; - output[6] = row[3]; - output[8] = row[4]; - output[10] = row[5]; - output[12] = row[6]; - output[14] = row[7]; -} -#endif - static inline void idct(int16_t *block) { DECLARE_ALIGNED(8, int64_t, align_tmp)[16]; diff --git a/libavcodec/x86/snowdsp_mmx.c b/libavcodec/x86/snowdsp_mmx.c index 9c1fa429a8..f107d55e87 100644 --- a/libavcodec/x86/snowdsp_mmx.c +++ b/libavcodec/x86/snowdsp_mmx.c @@ -675,14 +675,14 @@ static void ff_snow_vertical_compose97i_mmx(IDWTELEM *b0, IDWTELEM *b1, IDWTELEM #define snow_inner_add_yblock_sse2_end_8\ "sal $1, %%"REG_c" \n\t"\ - "add $"PTR_SIZE"*2, %1 \n\t"\ + "add"OPSIZE" $"PTR_SIZE"*2, %1 \n\t"\ snow_inner_add_yblock_sse2_end_common1\ "sar $1, %%"REG_c" \n\t"\ "sub $2, %2 \n\t"\ snow_inner_add_yblock_sse2_end_common2 #define snow_inner_add_yblock_sse2_end_16\ - "add $"PTR_SIZE"*1, %1 \n\t"\ + "add"OPSIZE" $"PTR_SIZE"*1, %1 \n\t"\ snow_inner_add_yblock_sse2_end_common1\ "dec %2 \n\t"\ snow_inner_add_yblock_sse2_end_common2 diff --git a/libavcodec/x86/x86util.asm b/libavcodec/x86/x86util.asm index b8ca348b68..1cede4d336 100644 --- a/libavcodec/x86/x86util.asm +++ b/libavcodec/x86/x86util.asm @@ -497,10 +497,10 @@ %macro STORE_DIFFx2 8 ; add1, add2, reg1, reg2, zero, shift, source, stride movh %3, [%7] movh %4, [%7+%8] - punpcklbw %3, %5 - punpcklbw %4, %5 psraw %1, %6 psraw %2, %6 + punpcklbw %3, %5 + punpcklbw %4, %5 paddw %3, %1 paddw %4, %2 packuswb %3, %5 @@ -528,6 +528,14 @@ %endif %endmacro +%macro SPLATD 2-3 0 +%if mmsize == 16 + pshufd %1, %2, (%3)*0x55 +%else + pshufw %1, %2, (%3)*0x11 + ((%3)+1)*0x44 +%endif +%endmacro + %macro CLIPW 3 ;(dst, min, max) pmaxsw %1, %2 pminsw %1, %3 diff --git a/libavcodec/xan.c b/libavcodec/xan.c index f5d1812aec..598d1e1423 100644 --- a/libavcodec/xan.c +++ b/libavcodec/xan.c @@ -553,15 +553,14 @@ static av_cold int xan_decode_end(AVCodecContext *avctx) } AVCodec ff_xan_wc3_decoder = { - "xan_wc3", - AVMEDIA_TYPE_VIDEO, - CODEC_ID_XAN_WC3, - sizeof(XanContext), - xan_decode_init, - NULL, - xan_decode_end, - xan_decode_frame, - CODEC_CAP_DR1, + .name = "xan_wc3", + .type = AVMEDIA_TYPE_VIDEO, + .id = CODEC_ID_XAN_WC3, + .priv_data_size = sizeof(XanContext), + .init = xan_decode_init, + .close = xan_decode_end, + .decode = xan_decode_frame, + .capabilities = CODEC_CAP_DR1, .long_name = NULL_IF_CONFIG_SMALL("Wing Commander III / Xan"), }; diff --git a/libavcodec/xl.c b/libavcodec/xl.c index 7f3b0775c0..c29e8b3190 100644 --- a/libavcodec/xl.c +++ b/libavcodec/xl.c @@ -140,14 +140,13 @@ static av_cold int decode_end(AVCodecContext *avctx){ } AVCodec ff_xl_decoder = { - "xl", - AVMEDIA_TYPE_VIDEO, - CODEC_ID_VIXL, - sizeof(VideoXLContext), - decode_init, - NULL, - decode_end, - decode_frame, - CODEC_CAP_DR1, + .name = "xl", + .type = AVMEDIA_TYPE_VIDEO, + .id = CODEC_ID_VIXL, + .priv_data_size = sizeof(VideoXLContext), + .init = decode_init, + .close = decode_end, + .decode = decode_frame, + .capabilities = CODEC_CAP_DR1, .long_name = NULL_IF_CONFIG_SMALL("Miro VideoXL"), }; diff --git a/libavcodec/xsubdec.c b/libavcodec/xsubdec.c index 9289a2554e..5e0e59faa4 100644 --- a/libavcodec/xsubdec.c +++ b/libavcodec/xsubdec.c @@ -18,6 +18,8 @@ * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ + +#include "libavutil/mathematics.h" #include "libavutil/imgutils.h" #include "avcodec.h" #include "get_bits.h" @@ -134,13 +136,10 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, } AVCodec ff_xsub_decoder = { - "xsub", - AVMEDIA_TYPE_SUBTITLE, - CODEC_ID_XSUB, - 0, - decode_init, - NULL, - NULL, - decode_frame, + .name = "xsub", + .type = AVMEDIA_TYPE_SUBTITLE, + .id = CODEC_ID_XSUB, + .init = decode_init, + .decode = decode_frame, .long_name = NULL_IF_CONFIG_SMALL("XSUB"), }; diff --git a/libavcodec/xsubenc.c b/libavcodec/xsubenc.c index a7e3a891d4..447759b1fe 100644 --- a/libavcodec/xsubenc.c +++ b/libavcodec/xsubenc.c @@ -36,8 +36,8 @@ /** * Encode a single color run. At most 16 bits will be used. - * \param len length of the run, values > 255 mean "until end of line", may not be < 0. - * \param color color to encode, only the lowest two bits are used and all others must be 0. + * @param len length of the run, values > 255 mean "until end of line", may not be < 0. + * @param color color to encode, only the lowest two bits are used and all others must be 0. */ static void put_xsub_rle(PutBitContext *pb, int len, int color) { @@ -211,12 +211,10 @@ static av_cold int xsub_encoder_init(AVCodecContext *avctx) } AVCodec ff_xsub_encoder = { - "xsub", - AVMEDIA_TYPE_SUBTITLE, - CODEC_ID_XSUB, - 0, - xsub_encoder_init, - xsub_encode, - NULL, + .name = "xsub", + .type = AVMEDIA_TYPE_SUBTITLE, + .id = CODEC_ID_XSUB, + .init = xsub_encoder_init, + .encode = xsub_encode, .long_name = NULL_IF_CONFIG_SMALL("DivX subtitles (XSUB)"), }; diff --git a/libavcodec/xxan.c b/libavcodec/xxan.c index 10ec53f467..e96e1ddbfe 100644 --- a/libavcodec/xxan.c +++ b/libavcodec/xxan.c @@ -415,15 +415,14 @@ static av_cold int xan_decode_end(AVCodecContext *avctx) } AVCodec ff_xan_wc4_decoder = { - "xan_wc4", - AVMEDIA_TYPE_VIDEO, - CODEC_ID_XAN_WC4, - sizeof(XanContext), - xan_decode_init, - NULL, - xan_decode_end, - xan_decode_frame, - CODEC_CAP_DR1, + .name = "xan_wc4", + .type = AVMEDIA_TYPE_VIDEO, + .id = CODEC_ID_XAN_WC4, + .priv_data_size = sizeof(XanContext), + .init = xan_decode_init, + .close = xan_decode_end, + .decode = xan_decode_frame, + .capabilities = CODEC_CAP_DR1, .long_name = NULL_IF_CONFIG_SMALL("Wing Commander IV / Xxan"), }; diff --git a/libavcodec/yop.c b/libavcodec/yop.c index 45a3344b9e..87a91f28d7 100644 --- a/libavcodec/yop.c +++ b/libavcodec/yop.c @@ -1,5 +1,4 @@ -/** - * @file +/* * Psygnosis YOP decoder * * Copyright (C) 2010 Mohamed Naufal Basheer <naufal11@gmail.com> @@ -250,13 +249,12 @@ static int yop_decode_frame(AVCodecContext *avctx, void *data, int *data_size, } AVCodec ff_yop_decoder = { - "yop", - AVMEDIA_TYPE_VIDEO, - CODEC_ID_YOP, - sizeof(YopDecContext), - yop_decode_init, - NULL, - yop_decode_close, - yop_decode_frame, + .name = "yop", + .type = AVMEDIA_TYPE_VIDEO, + .id = CODEC_ID_YOP, + .priv_data_size = sizeof(YopDecContext), + .init = yop_decode_init, + .close = yop_decode_close, + .decode = yop_decode_frame, .long_name = NULL_IF_CONFIG_SMALL("Psygnosis YOP Video"), }; diff --git a/libavcodec/zmbv.c b/libavcodec/zmbv.c index 4bd159cc44..054c84231f 100644 --- a/libavcodec/zmbv.c +++ b/libavcodec/zmbv.c @@ -652,15 +652,14 @@ static av_cold int decode_end(AVCodecContext *avctx) } AVCodec ff_zmbv_decoder = { - "zmbv", - AVMEDIA_TYPE_VIDEO, - CODEC_ID_ZMBV, - sizeof(ZmbvContext), - decode_init, - NULL, - decode_end, - decode_frame, - CODEC_CAP_DR1, + .name = "zmbv", + .type = AVMEDIA_TYPE_VIDEO, + .id = CODEC_ID_ZMBV, + .priv_data_size = sizeof(ZmbvContext), + .init = decode_init, + .close = decode_end, + .decode = decode_frame, + .capabilities = CODEC_CAP_DR1, .long_name = NULL_IF_CONFIG_SMALL("Zip Motion Blocks Video"), }; diff --git a/libavcodec/zmbvenc.c b/libavcodec/zmbvenc.c index 4c98987fea..ce65ce4dc0 100644 --- a/libavcodec/zmbvenc.c +++ b/libavcodec/zmbvenc.c @@ -324,13 +324,13 @@ static av_cold int encode_end(AVCodecContext *avctx) } AVCodec ff_zmbv_encoder = { - "zmbv", - AVMEDIA_TYPE_VIDEO, - CODEC_ID_ZMBV, - sizeof(ZmbvEncContext), - encode_init, - encode_frame, - encode_end, + .name = "zmbv", + .type = AVMEDIA_TYPE_VIDEO, + .id = CODEC_ID_ZMBV, + .priv_data_size = sizeof(ZmbvEncContext), + .init = encode_init, + .encode = encode_frame, + .close = encode_end, .pix_fmts = (const enum PixelFormat[]){PIX_FMT_PAL8, PIX_FMT_NONE}, .long_name = NULL_IF_CONFIG_SMALL("Zip Motion Blocks Video"), }; diff --git a/libavdevice/Makefile b/libavdevice/Makefile index 60103a4864..0537c0f95f 100644 --- a/libavdevice/Makefile +++ b/libavdevice/Makefile @@ -19,6 +19,7 @@ OBJS-$(CONFIG_DSHOW_INDEV) += dshow.o dshow_enummediatypes.o \ OBJS-$(CONFIG_DV1394_INDEV) += dv1394.o OBJS-$(CONFIG_FBDEV_INDEV) += fbdev.o OBJS-$(CONFIG_JACK_INDEV) += jack_audio.o +OBJS-$(CONFIG_OPENAL_INDEV) += openal-dec.o OBJS-$(CONFIG_OSS_INDEV) += oss_audio.o OBJS-$(CONFIG_OSS_OUTDEV) += oss_audio.o OBJS-$(CONFIG_SDL_OUTDEV) += sdl.o @@ -35,4 +36,4 @@ OBJS-$(CONFIG_LIBDC1394_INDEV) += libdc1394.o SKIPHEADERS-$(HAVE_ALSA_ASOUNDLIB_H) += alsa-audio.h SKIPHEADERS-$(HAVE_SNDIO_H) += sndio_common.h -include $(SUBDIR)../subdir.mak +include $(SRC_PATH)/subdir.mak diff --git a/libavdevice/alldevices.c b/libavdevice/alldevices.c index 7846704861..ef302d700d 100644 --- a/libavdevice/alldevices.c +++ b/libavdevice/alldevices.c @@ -44,6 +44,7 @@ void avdevice_register_all(void) REGISTER_INDEV (DV1394, dv1394); REGISTER_INDEV (FBDEV, fbdev); REGISTER_INDEV (JACK, jack); + REGISTER_INDEV (OPENAL, openal); REGISTER_INOUTDEV (OSS, oss); REGISTER_OUTDEV (SDL, sdl); REGISTER_INOUTDEV (SNDIO, sndio); diff --git a/libavdevice/alsa-audio-common.c b/libavdevice/alsa-audio-common.c index 38466a06ce..0943ab030a 100644 --- a/libavdevice/alsa-audio-common.c +++ b/libavdevice/alsa-audio-common.c @@ -30,6 +30,7 @@ #include <alsa/asoundlib.h> #include "avdevice.h" +#include "libavutil/avassert.h" #include "alsa-audio.h" @@ -64,7 +65,7 @@ static av_cold snd_pcm_format_t codec_id_to_pcm_format(int codec_id) static void alsa_reorder_ ## NAME ## _out_50(const void *in_v, void *out_v, int n) \ { \ const TYPE *in = in_v; \ - TYPE * out = out_v; \ + TYPE *out = out_v; \ \ while (n-- > 0) { \ out[0] = in[0]; \ @@ -81,7 +82,7 @@ static void alsa_reorder_ ## NAME ## _out_50(const void *in_v, void *out_v, int static void alsa_reorder_ ## NAME ## _out_51(const void *in_v, void *out_v, int n) \ { \ const TYPE *in = in_v; \ - TYPE * out = out_v; \ + TYPE *out = out_v; \ \ while (n-- > 0) { \ out[0] = in[0]; \ @@ -99,7 +100,7 @@ static void alsa_reorder_ ## NAME ## _out_51(const void *in_v, void *out_v, int static void alsa_reorder_ ## NAME ## _out_71(const void *in_v, void *out_v, int n) \ { \ const TYPE *in = in_v; \ - TYPE * out = out_v; \ + TYPE *out = out_v; \ \ while (n-- > 0) { \ out[0] = in[0]; \ @@ -115,6 +116,9 @@ static void alsa_reorder_ ## NAME ## _out_71(const void *in_v, void *out_v, int } \ } +REORDER_OUT_50(int8, int8_t) +REORDER_OUT_51(int8, int8_t) +REORDER_OUT_71(int8, int8_t) REORDER_OUT_50(int16, int16_t) REORDER_OUT_51(int16, int16_t) REORDER_OUT_71(int16, int16_t) @@ -125,46 +129,57 @@ REORDER_OUT_50(f32, float) REORDER_OUT_51(f32, float) REORDER_OUT_71(f32, float) -#define REORDER_DUMMY ((void *)1) +#define FORMAT_I8 0 +#define FORMAT_I16 1 +#define FORMAT_I32 2 +#define FORMAT_F32 3 + +#define PICK_REORDER(layout)\ +switch(format) {\ + case FORMAT_I8: s->reorder_func = alsa_reorder_int8_out_ ##layout; break;\ + case FORMAT_I16: s->reorder_func = alsa_reorder_int16_out_ ##layout; break;\ + case FORMAT_I32: s->reorder_func = alsa_reorder_int32_out_ ##layout; break;\ + case FORMAT_F32: s->reorder_func = alsa_reorder_f32_out_ ##layout; break;\ +} -static av_cold ff_reorder_func find_reorder_func(int codec_id, - int64_t layout, - int out) +static av_cold int find_reorder_func(AlsaData *s, int codec_id, int64_t layout, int out) { - return - codec_id == CODEC_ID_PCM_U16LE || codec_id == CODEC_ID_PCM_U16BE || - codec_id == CODEC_ID_PCM_S16LE || codec_id == CODEC_ID_PCM_S16BE ? - layout == AV_CH_LAYOUT_QUAD || layout == AV_CH_LAYOUT_2_2 ? - REORDER_DUMMY : - layout == AV_CH_LAYOUT_5POINT0_BACK || layout == AV_CH_LAYOUT_5POINT0 ? - out ? alsa_reorder_int16_out_50 : NULL : - layout == AV_CH_LAYOUT_5POINT1_BACK || layout == AV_CH_LAYOUT_5POINT1 ? - out ? alsa_reorder_int16_out_51 : NULL : - layout == AV_CH_LAYOUT_7POINT1 ? - out ? alsa_reorder_int16_out_71 : NULL : - NULL : - codec_id == CODEC_ID_PCM_U32LE || codec_id == CODEC_ID_PCM_U32BE || - codec_id == CODEC_ID_PCM_S32LE || codec_id == CODEC_ID_PCM_S32BE ? - layout == AV_CH_LAYOUT_QUAD || layout == AV_CH_LAYOUT_2_2 ? - REORDER_DUMMY : - layout == AV_CH_LAYOUT_5POINT0_BACK || layout == AV_CH_LAYOUT_5POINT0 ? - out ? alsa_reorder_int32_out_50 : NULL : - layout == AV_CH_LAYOUT_5POINT1_BACK || layout == AV_CH_LAYOUT_5POINT1 ? - out ? alsa_reorder_int32_out_51 : NULL : - layout == AV_CH_LAYOUT_7POINT1 ? - out ? alsa_reorder_int32_out_71 : NULL : - NULL : - codec_id == CODEC_ID_PCM_F32LE || codec_id == CODEC_ID_PCM_F32BE ? - layout == AV_CH_LAYOUT_QUAD || layout == AV_CH_LAYOUT_2_2 ? - REORDER_DUMMY : - layout == AV_CH_LAYOUT_5POINT0_BACK || layout == AV_CH_LAYOUT_5POINT0 ? - out ? alsa_reorder_f32_out_50 : NULL : - layout == AV_CH_LAYOUT_5POINT1_BACK || layout == AV_CH_LAYOUT_5POINT1 ? - out ? alsa_reorder_f32_out_51 : NULL : - layout == AV_CH_LAYOUT_7POINT1 ? - out ? alsa_reorder_f32_out_71 : NULL : - NULL : - NULL; + int format; + + /* reordering input is not currently supported */ + if (!out) + return AVERROR(ENOSYS); + + /* reordering is not needed for QUAD or 2_2 layout */ + if (layout == AV_CH_LAYOUT_QUAD || layout == AV_CH_LAYOUT_2_2) + return 0; + + switch (codec_id) { + case CODEC_ID_PCM_S8: + case CODEC_ID_PCM_U8: + case CODEC_ID_PCM_ALAW: + case CODEC_ID_PCM_MULAW: format = FORMAT_I8; break; + case CODEC_ID_PCM_S16LE: + case CODEC_ID_PCM_S16BE: + case CODEC_ID_PCM_U16LE: + case CODEC_ID_PCM_U16BE: format = FORMAT_I16; break; + case CODEC_ID_PCM_S32LE: + case CODEC_ID_PCM_S32BE: + case CODEC_ID_PCM_U32LE: + case CODEC_ID_PCM_U32BE: format = FORMAT_I32; break; + case CODEC_ID_PCM_F32LE: + case CODEC_ID_PCM_F32BE: format = FORMAT_F32; break; + default: return AVERROR(ENOSYS); + } + + if (layout == AV_CH_LAYOUT_5POINT0_BACK || layout == AV_CH_LAYOUT_5POINT0) + PICK_REORDER(50) + else if (layout == AV_CH_LAYOUT_5POINT1_BACK || layout == AV_CH_LAYOUT_5POINT1) + PICK_REORDER(51) + else if (layout == AV_CH_LAYOUT_7POINT1) + PICK_REORDER(71) + + return s->reorder_func ? 0 : AVERROR(ENOSYS); } av_cold int ff_alsa_open(AVFormatContext *ctx, snd_pcm_stream_t mode, @@ -245,6 +260,7 @@ av_cold int ff_alsa_open(AVFormatContext *ctx, snd_pcm_stream_t mode, } snd_pcm_hw_params_get_buffer_size_max(hw_params, &buffer_size); + buffer_size = FFMIN(buffer_size, ALSA_BUFFER_SIZE_MAX); /* TODO: maybe use ctx->max_picture_buffer somehow */ res = snd_pcm_hw_params_set_buffer_size_near(h, hw_params, &buffer_size); if (res < 0) { @@ -254,6 +270,8 @@ av_cold int ff_alsa_open(AVFormatContext *ctx, snd_pcm_stream_t mode, } snd_pcm_hw_params_get_period_size_min(hw_params, &period_size, NULL); + if (!period_size) + period_size = buffer_size / 4; res = snd_pcm_hw_params_set_period_size_near(h, hw_params, &period_size, NULL); if (res < 0) { av_log(ctx, AV_LOG_ERROR, "cannot set ALSA period size (%s)\n", @@ -272,22 +290,17 @@ av_cold int ff_alsa_open(AVFormatContext *ctx, snd_pcm_stream_t mode, snd_pcm_hw_params_free(hw_params); if (channels > 2 && layout) { - s->reorder_func = find_reorder_func(*codec_id, layout, - mode == SND_PCM_STREAM_PLAYBACK); - if (s->reorder_func == REORDER_DUMMY) { - s->reorder_func = NULL; - } else if (s->reorder_func) { + if (find_reorder_func(s, *codec_id, layout, mode == SND_PCM_STREAM_PLAYBACK) < 0) { + char name[128]; + av_get_channel_layout_string(name, sizeof(name), channels, layout); + av_log(ctx, AV_LOG_WARNING, "ALSA channel layout unknown or unimplemented for %s %s.\n", + name, mode == SND_PCM_STREAM_PLAYBACK ? "playback" : "capture"); + } + if (s->reorder_func) { s->reorder_buf_size = buffer_size; s->reorder_buf = av_malloc(s->reorder_buf_size * s->frame_size); if (!s->reorder_buf) goto fail1; - } else { - char name[32]; - av_get_channel_layout_string(name, sizeof(name), channels, layout); - av_log(ctx, AV_LOG_WARNING, - "ALSA channel layout unknown or unimplemented for %s %s.\n", - name, - mode == SND_PCM_STREAM_PLAYBACK ? "playback" : "capture"); } } @@ -306,6 +319,7 @@ av_cold int ff_alsa_close(AVFormatContext *s1) AlsaData *s = s1->priv_data; av_freep(&s->reorder_buf); + ff_timefilter_destroy(s->timefilter); snd_pcm_close(s->h); return 0; } @@ -336,6 +350,7 @@ int ff_alsa_extend_reorder_buf(AlsaData *s, int min_size) int size = s->reorder_buf_size; void *r; + av_assert0(size != 0); while (size < min_size) size *= 2; r = av_realloc(s->reorder_buf, size * s->frame_size); diff --git a/libavdevice/alsa-audio-dec.c b/libavdevice/alsa-audio-dec.c index 2424c022d3..94162d2d9f 100644 --- a/libavdevice/alsa-audio-dec.c +++ b/libavdevice/alsa-audio-dec.c @@ -47,6 +47,7 @@ #include <alsa/asoundlib.h> #include "libavutil/opt.h" +#include "libavutil/mathematics.h" #include "avdevice.h" #include "alsa-audio.h" @@ -59,6 +60,7 @@ static av_cold int audio_read_header(AVFormatContext *s1, int ret; enum CodecID codec_id; snd_pcm_sw_params_t *sw_params; + double o; #if FF_API_FORMAT_PARAMETERS if (ap->sample_rate > 0) @@ -82,35 +84,17 @@ static av_cold int audio_read_header(AVFormatContext *s1, return AVERROR(EIO); } - if (snd_pcm_type(s->h) != SND_PCM_TYPE_HW) - av_log(s1, AV_LOG_WARNING, - "capture with some ALSA plugins, especially dsnoop, " - "may hang.\n"); - - ret = snd_pcm_sw_params_malloc(&sw_params); - if (ret < 0) { - av_log(s1, AV_LOG_ERROR, "cannot allocate software parameters structure (%s)\n", - snd_strerror(ret)); - goto fail; - } - - snd_pcm_sw_params_current(s->h, sw_params); - snd_pcm_sw_params_set_tstamp_mode(s->h, sw_params, SND_PCM_TSTAMP_ENABLE); - - ret = snd_pcm_sw_params(s->h, sw_params); - snd_pcm_sw_params_free(sw_params); - if (ret < 0) { - av_log(s1, AV_LOG_ERROR, "cannot install ALSA software parameters (%s)\n", - snd_strerror(ret)); - goto fail; - } - /* take real parameters */ st->codec->codec_type = AVMEDIA_TYPE_AUDIO; st->codec->codec_id = codec_id; st->codec->sample_rate = s->sample_rate; st->codec->channels = s->channels; av_set_pts_info(st, 64, 1, 1000000); /* 64 bits pts in us */ + o = 2 * M_PI * s->period_size / s->sample_rate * 1.5; // bandwidth: 1.5Hz + s->timefilter = ff_timefilter_new(1000000.0 / s->sample_rate, + sqrt(2 * o), o * o); + if (!s->timefilter) + goto fail; return 0; @@ -124,14 +108,14 @@ static int audio_read_packet(AVFormatContext *s1, AVPacket *pkt) AlsaData *s = s1->priv_data; AVStream *st = s1->streams[0]; int res; - snd_htimestamp_t timestamp; - snd_pcm_uframes_t ts_delay; + int64_t dts; + snd_pcm_sframes_t delay = 0; - if (av_new_packet(pkt, s->period_size) < 0) { + if (av_new_packet(pkt, s->period_size * s->frame_size) < 0) { return AVERROR(EIO); } - while ((res = snd_pcm_readi(s->h, pkt->data, pkt->size / s->frame_size)) < 0) { + while ((res = snd_pcm_readi(s->h, pkt->data, s->period_size)) < 0) { if (res == -EAGAIN) { av_free_packet(pkt); @@ -144,14 +128,13 @@ static int audio_read_packet(AVFormatContext *s1, AVPacket *pkt) return AVERROR(EIO); } + ff_timefilter_reset(s->timefilter); } - snd_pcm_htimestamp(s->h, &ts_delay, ×tamp); - ts_delay += res; - pkt->pts = timestamp.tv_sec * 1000000LL - + (timestamp.tv_nsec * st->codec->sample_rate - - ts_delay * 1000000000LL + st->codec->sample_rate * 500LL) - / (st->codec->sample_rate * 1000LL); + dts = av_gettime(); + snd_pcm_delay(s->h, &delay); + dts -= av_rescale(delay + res, 1000000, s->sample_rate); + pkt->pts = ff_timefilter_update(s->timefilter, dts, res); pkt->size = res * s->frame_size; diff --git a/libavdevice/alsa-audio-enc.c b/libavdevice/alsa-audio-enc.c index a53c1763d5..0da22bb070 100644 --- a/libavdevice/alsa-audio-enc.c +++ b/libavdevice/alsa-audio-enc.c @@ -61,6 +61,7 @@ static av_cold int audio_write_header(AVFormatContext *s1) st->codec->sample_rate, sample_rate); goto fail; } + av_set_pts_info(st, 64, 1, sample_rate); return res; @@ -101,6 +102,17 @@ static int audio_write_packet(AVFormatContext *s1, AVPacket *pkt) return 0; } +static void +audio_get_output_timestamp(AVFormatContext *s1, int stream, + int64_t *dts, int64_t *wall) +{ + AlsaData *s = s1->priv_data; + snd_pcm_sframes_t delay = 0; + *wall = av_gettime(); + snd_pcm_delay(s->h, &delay); + *dts = s1->streams[0]->cur_dts - delay; +} + AVOutputFormat ff_alsa_muxer = { "alsa", NULL_IF_CONFIG_SMALL("ALSA audio output"), @@ -112,5 +124,6 @@ AVOutputFormat ff_alsa_muxer = { audio_write_header, audio_write_packet, ff_alsa_close, + .get_output_timestamp = audio_get_output_timestamp, .flags = AVFMT_NOFILE, }; diff --git a/libavdevice/alsa-audio.h b/libavdevice/alsa-audio.h index 431401bb13..ab60e73c59 100644 --- a/libavdevice/alsa-audio.h +++ b/libavdevice/alsa-audio.h @@ -33,6 +33,7 @@ #include <alsa/asoundlib.h> #include "config.h" #include "libavutil/log.h" +#include "libavformat/timefilter.h" #include "avdevice.h" /* XXX: we make the assumption that the soundcard accepts this format */ @@ -42,16 +43,19 @@ typedef void (*ff_reorder_func)(const void *, void *, int); +#define ALSA_BUFFER_SIZE_MAX 32768 + typedef struct { AVClass *class; snd_pcm_t *h; - int frame_size; ///< preferred size for reads and writes - int period_size; ///< bytes per sample * channels - ff_reorder_func reorder_func; - void *reorder_buf; - int reorder_buf_size; ///< in frames + int frame_size; ///< bytes per sample * channels + int period_size; ///< preferred size for reads and writes, in frames int sample_rate; ///< sample rate set by user int channels; ///< number of channels set by user + TimeFilter *timefilter; + void (*reorder_func)(const void *, void *, int); + void *reorder_buf; + int reorder_buf_size; ///< in frames } AlsaData; /** diff --git a/libavdevice/avdevice.h b/libavdevice/avdevice.h index 0661bcbc2c..ce3167e8bf 100644 --- a/libavdevice/avdevice.h +++ b/libavdevice/avdevice.h @@ -23,7 +23,7 @@ #include "libavformat/avformat.h" #define LIBAVDEVICE_VERSION_MAJOR 52 -#define LIBAVDEVICE_VERSION_MINOR 5 +#define LIBAVDEVICE_VERSION_MINOR 6 #define LIBAVDEVICE_VERSION_MICRO 0 #define LIBAVDEVICE_VERSION_INT AV_VERSION_INT(LIBAVDEVICE_VERSION_MAJOR, \ diff --git a/libavdevice/bktr.c b/libavdevice/bktr.c index f6216e0aa3..9c1afe518b 100644 --- a/libavdevice/bktr.c +++ b/libavdevice/bktr.c @@ -248,7 +248,7 @@ static int grab_read_header(AVFormatContext *s1, AVFormatParameters *ap) VideoData *s = s1->priv_data; AVStream *st; int width, height; - AVRational fps; + AVRational framerate; int ret = 0; #if FF_API_FORMAT_PARAMETERS @@ -263,7 +263,7 @@ static int grab_read_header(AVFormatContext *s1, AVFormatParameters *ap) #endif if ((ret = av_parse_video_size(&width, &height, s->video_size)) < 0) { - av_log(s1, AV_LOG_ERROR, "Couldn't parse video size.\n"); + av_log(s1, AV_LOG_ERROR, "Could not parse video size '%s'.\n", s->video_size); goto out; } @@ -277,8 +277,8 @@ static int grab_read_header(AVFormatContext *s1, AVFormatParameters *ap) ret = AVERROR(EINVAL); goto out; } - if ((ret = av_parse_video_rate(&fps, s->framerate)) < 0) { - av_log(s1, AV_LOG_ERROR, "Couldn't parse framerate.\n"); + if ((ret = av_parse_video_rate(&framerate, s->framerate)) < 0) { + av_log(s1, AV_LOG_ERROR, "Could not parse framerate '%s'.\n", s->framerate); goto out; } #if FF_API_FORMAT_PARAMETERS @@ -287,7 +287,7 @@ static int grab_read_header(AVFormatContext *s1, AVFormatParameters *ap) if (ap->height > 0) height = ap->height; if (ap->time_base.num) - fps = (AVRational){ap->time_base.den, ap->time_base.num}; + framerate = (AVRational){ap->time_base.den, ap->time_base.num}; #endif st = av_new_stream(s1, 0); @@ -299,15 +299,15 @@ static int grab_read_header(AVFormatContext *s1, AVFormatParameters *ap) s->width = width; s->height = height; - s->per_frame = ((uint64_t)1000000 * fps.den) / fps.num; + s->per_frame = ((uint64_t)1000000 * framerate.den) / framerate.num; st->codec->codec_type = AVMEDIA_TYPE_VIDEO; st->codec->pix_fmt = PIX_FMT_YUV420P; st->codec->codec_id = CODEC_ID_RAWVIDEO; st->codec->width = width; st->codec->height = height; - st->codec->time_base.den = fps.num; - st->codec->time_base.num = fps.den; + st->codec->time_base.den = framerate.num; + st->codec->time_base.num = framerate.den; if (bktr_init(s1->filename, width, height, s->standard, diff --git a/libavdevice/fbdev.c b/libavdevice/fbdev.c index d5ba561db8..8f7fc676ac 100644 --- a/libavdevice/fbdev.c +++ b/libavdevice/fbdev.c @@ -24,7 +24,7 @@ * @file * Linux framebuffer input device, * inspired by code from fbgrab.c by Gunnar Monell. - * See also http://linux-fbdev.sourceforge.net/. + * @see http://linux-fbdev.sourceforge.net/ */ /* #define DEBUG */ @@ -79,7 +79,7 @@ static enum PixelFormat get_pixfmt_from_fb_varinfo(struct fb_var_screeninfo *var typedef struct { AVClass *class; ///< class for private options int frame_size; ///< size in bytes of a grabbed frame - AVRational fps; ///< framerate + AVRational framerate_q; ///< framerate char *framerate; ///< framerate string set by a private option int64_t time_frame; ///< time for the next frame to output (in 1/1000000 units) @@ -102,14 +102,14 @@ av_cold static int fbdev_read_header(AVFormatContext *avctx, enum PixelFormat pix_fmt; int ret, flags = O_RDONLY; - ret = av_parse_video_rate(&fbdev->fps, fbdev->framerate); + ret = av_parse_video_rate(&fbdev->framerate_q, fbdev->framerate); if (ret < 0) { - av_log(avctx, AV_LOG_ERROR, "Couldn't parse framerate.\n"); + av_log(avctx, AV_LOG_ERROR, "Could not parse framerate '%s'.\n", fbdev->framerate); return ret; } #if FF_API_FORMAT_PARAMETERS if (ap->time_base.num) - fbdev->fps = (AVRational){ap->time_base.den, ap->time_base.num}; + fbdev->framerate_q = (AVRational){ap->time_base.den, ap->time_base.num}; #endif if (!(st = av_new_stream(avctx, 0))) @@ -168,15 +168,15 @@ av_cold static int fbdev_read_header(AVFormatContext *avctx, st->codec->width = fbdev->width; st->codec->height = fbdev->heigth; st->codec->pix_fmt = pix_fmt; - st->codec->time_base = (AVRational){fbdev->fps.den, fbdev->fps.num}; + st->codec->time_base = (AVRational){fbdev->framerate_q.den, fbdev->framerate_q.num}; st->codec->bit_rate = - fbdev->width * fbdev->heigth * fbdev->bytes_per_pixel * av_q2d(fbdev->fps) * 8; + fbdev->width * fbdev->heigth * fbdev->bytes_per_pixel * av_q2d(fbdev->framerate_q) * 8; av_log(avctx, AV_LOG_INFO, "w:%d h:%d bpp:%d pixfmt:%s fps:%d/%d bit_rate:%d\n", fbdev->width, fbdev->heigth, fbdev->varinfo.bits_per_pixel, av_pix_fmt_descriptors[pix_fmt].name, - fbdev->fps.num, fbdev->fps.den, + fbdev->framerate_q.num, fbdev->framerate_q.den, st->codec->bit_rate); return 0; @@ -204,7 +204,7 @@ static int fbdev_read_packet(AVFormatContext *avctx, AVPacket *pkt) "time_frame:%"PRId64" curtime:%"PRId64" delay:%"PRId64"\n", fbdev->time_frame, curtime, delay); if (delay <= 0) { - fbdev->time_frame += INT64_C(1000000) / av_q2d(fbdev->fps); + fbdev->time_frame += INT64_C(1000000) / av_q2d(fbdev->framerate_q); break; } if (avctx->flags & AVFMT_FLAG_NONBLOCK) diff --git a/libavdevice/libdc1394.c b/libavdevice/libdc1394.c index 20170877dc..dffb06ca1a 100644 --- a/libavdevice/libdc1394.c +++ b/libavdevice/libdc1394.c @@ -23,6 +23,7 @@ #include "config.h" #include "libavutil/log.h" +#include "libavutil/mathematics.h" #include "libavutil/opt.h" #include "avdevice.h" diff --git a/libavdevice/openal-dec.c b/libavdevice/openal-dec.c new file mode 100644 index 0000000000..c1bc351393 --- /dev/null +++ b/libavdevice/openal-dec.c @@ -0,0 +1,250 @@ +/* + * Copyright (c) 2011 Jonathan Baldwin + * + * This file is part of FFmpeg. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH + * REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY + * AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, + * INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM + * LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR + * OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * @file + * OpenAL 1.1 capture device for libavdevice + **/ + +#include <AL/al.h> +#include <AL/alc.h> + +#include "libavutil/opt.h" +#include "avdevice.h" + +typedef struct { + AVClass *class; + /** OpenAL capture device context. **/ + ALCdevice *device; + /** The number of channels in the captured audio. **/ + int channels; + /** The sample rate (in Hz) of the captured audio. **/ + int sample_rate; + /** The sample size (in bits) of the captured audio. **/ + int sample_size; + /** The OpenAL sample format of the captured audio. **/ + ALCenum sample_format; + /** The number of bytes between two consecutive samples of the same channel/component. **/ + ALCint sample_step; + /** If true, print a list of capture devices on this system and exit. **/ + int list_devices; +} al_data; + +typedef struct { + ALCenum al_fmt; + enum CodecID codec_id; + int channels; +} al_format_info; + +#define LOWEST_AL_FORMAT FFMIN(FFMIN(AL_FORMAT_MONO8,AL_FORMAT_MONO16),FFMIN(AL_FORMAT_STEREO8,AL_FORMAT_STEREO16)) + +/** + * Get information about an AL_FORMAT value. + * @param al_fmt the AL_FORMAT value to find information about. + * @return A pointer to a structure containing information about the AL_FORMAT value. + */ +static inline al_format_info* get_al_format_info(ALCenum al_fmt) +{ + static al_format_info info_table[] = { + [AL_FORMAT_MONO8-LOWEST_AL_FORMAT] = {AL_FORMAT_MONO8, CODEC_ID_PCM_U8, 1}, + [AL_FORMAT_MONO16-LOWEST_AL_FORMAT] = {AL_FORMAT_MONO16, AV_NE (CODEC_ID_PCM_S16BE, CODEC_ID_PCM_S16LE), 1}, + [AL_FORMAT_STEREO8-LOWEST_AL_FORMAT] = {AL_FORMAT_STEREO8, CODEC_ID_PCM_U8, 2}, + [AL_FORMAT_STEREO16-LOWEST_AL_FORMAT] = {AL_FORMAT_STEREO16, AV_NE (CODEC_ID_PCM_S16BE, CODEC_ID_PCM_S16LE), 2}, + }; + + return &info_table[al_fmt-LOWEST_AL_FORMAT]; +} + +/** + * Get the OpenAL error code, translated into an av/errno error code. + * @param device The ALC device to check for errors. + * @param error_msg_ret A pointer to a char* in which to return the error message, or NULL if desired. + * @return The error code, or 0 if there is no error. + */ +static inline int al_get_error(ALCdevice *device, const char** error_msg_ret) +{ + ALCenum error = alcGetError(device); + if (error_msg_ret) + *error_msg_ret = (const char*) alcGetString(device, error); + switch (error) { + case ALC_NO_ERROR: + return 0; + case ALC_INVALID_DEVICE: + return AVERROR(ENODEV); + break; + case ALC_INVALID_CONTEXT: + case ALC_INVALID_ENUM: + case ALC_INVALID_VALUE: + return AVERROR(EINVAL); + break; + case ALC_OUT_OF_MEMORY: + return AVERROR(ENOMEM); + break; + default: + return AVERROR(EIO); + } +} + +/** + * Print out a list of OpenAL capture devices on this system. + */ +static inline void print_al_capture_devices(void *log_ctx) +{ + const char *devices; + + if (!(devices = alcGetString(NULL, ALC_CAPTURE_DEVICE_SPECIFIER))) + return; + + av_log(log_ctx, AV_LOG_INFO, "List of OpenAL capture devices on this system:\n"); + + for (; *devices != '\0'; devices += strlen(devices) + 1) + av_log(log_ctx, AV_LOG_INFO, " %s\n", devices); +} + +static int read_header(AVFormatContext *ctx, AVFormatParameters *ap) +{ + al_data *ad = ctx->priv_data; + static const ALCenum sample_formats[2][2] = { + { AL_FORMAT_MONO8, AL_FORMAT_STEREO8 }, + { AL_FORMAT_MONO16, AL_FORMAT_STEREO16 } + }; + int error = 0; + const char *error_msg; + AVStream *st = NULL; + AVCodecContext *codec = NULL; + + if (ad->list_devices) { + print_al_capture_devices(ctx); + return AVERROR_EXIT; + } + + ad->sample_format = sample_formats[ad->sample_size/8-1][ad->channels-1]; + + /* Open device for capture */ + ad->device = + alcCaptureOpenDevice(ctx->filename[0] ? ctx->filename : NULL, + ad->sample_rate, + ad->sample_format, + ad->sample_rate); /* Maximum 1 second of sample data to be read at once */ + + if (error = al_get_error(ad->device, &error_msg)) goto fail; + + /* Create stream */ + if (!(st = av_new_stream(ctx, 0))) { + error = AVERROR(ENOMEM); + goto fail; + } + + /* We work in microseconds */ + av_set_pts_info(st, 64, 1, 1000000); + + /* Set codec parameters */ + codec = st->codec; + codec->codec_type = AVMEDIA_TYPE_AUDIO; + codec->sample_rate = ad->sample_rate; + codec->channels = get_al_format_info(ad->sample_format)->channels; + codec->codec_id = get_al_format_info(ad->sample_format)->codec_id; + + /* This is needed to read the audio data */ + ad->sample_step = (av_get_bits_per_sample(get_al_format_info(ad->sample_format)->codec_id) * + get_al_format_info(ad->sample_format)->channels) / 8; + + /* Finally, start the capture process */ + alcCaptureStart(ad->device); + + return 0; + +fail: + /* Handle failure */ + if (ad->device) + alcCaptureCloseDevice(ad->device); + if (error_msg) + av_log(ctx, AV_LOG_ERROR, "Cannot open device: %s\n", error_msg); + return error; +} + +static int read_packet(AVFormatContext* ctx, AVPacket *pkt) +{ + al_data *ad = ctx->priv_data; + int error=0; + const char *error_msg; + ALCint nb_samples; + + /* Get number of samples available */ + alcGetIntegerv(ad->device, ALC_CAPTURE_SAMPLES, (ALCsizei) sizeof(ALCint), &nb_samples); + if (error = al_get_error(ad->device, &error_msg)) goto fail; + + /* Create a packet of appropriate size */ + av_new_packet(pkt, nb_samples*ad->sample_step); + pkt->pts = av_gettime(); + + /* Fill the packet with the available samples */ + alcCaptureSamples(ad->device, pkt->data, nb_samples); + if (error = al_get_error(ad->device, &error_msg)) goto fail; + + return pkt->size; +fail: + /* Handle failure */ + if (pkt->data) + av_destruct_packet(pkt); + if (error_msg) + av_log(ctx, AV_LOG_ERROR, "Error: %s\n", error_msg); + return error; +} + +static int read_close(AVFormatContext* ctx) +{ + al_data *ad = ctx->priv_data; + + if (ad->device) { + alcCaptureStop(ad->device); + alcCaptureCloseDevice(ad->device); + } + return 0; +} + +#define OFFSET(x) offsetof(al_data, x) + +static const AVOption options[] = { + {"channels", "set number of channels", OFFSET(channels), FF_OPT_TYPE_INT, {.dbl=2}, 1, 2, AV_OPT_FLAG_DECODING_PARAM }, + {"sample_rate", "set sample rate", OFFSET(sample_rate), FF_OPT_TYPE_INT, {.dbl=44100}, 1, 192000, AV_OPT_FLAG_DECODING_PARAM }, + {"sample_size", "set sample size", OFFSET(sample_size), FF_OPT_TYPE_INT, {.dbl=16}, 8, 16, AV_OPT_FLAG_DECODING_PARAM }, + {"list_devices", "list available devices", OFFSET(list_devices), FF_OPT_TYPE_INT, {.dbl=0}, 0, 1, AV_OPT_FLAG_DECODING_PARAM }, + {"true", "", 0, FF_OPT_TYPE_CONST, {.dbl=1}, 0, 0, AV_OPT_FLAG_DECODING_PARAM, "list_devices" }, + {"false", "", 0, FF_OPT_TYPE_CONST, {.dbl=0}, 0, 0, AV_OPT_FLAG_DECODING_PARAM, "list_devices" }, + {NULL}, +}; + +static const AVClass class = { + .class_name = "openal", + .item_name = av_default_item_name, + .option = options, + .version = LIBAVUTIL_VERSION_INT +}; + +AVInputFormat ff_openal_demuxer = { + .name = "openal", + .long_name = NULL_IF_CONFIG_SMALL("OpenAL audio capture device"), + .priv_data_size = sizeof(al_data), + .read_probe = NULL, + .read_header = read_header, + .read_packet = read_packet, + .read_close = read_close, + .flags = AVFMT_NOFILE, + .priv_class = &class +}; diff --git a/libavdevice/oss_audio.c b/libavdevice/oss_audio.c index 2fde491a07..a1f0cbd3d0 100644 --- a/libavdevice/oss_audio.c +++ b/libavdevice/oss_audio.c @@ -80,13 +80,6 @@ static int audio_open(AVFormatContext *s1, int is_output, const char *audio_devi fcntl(audio_fd, F_SETFL, O_NONBLOCK); s->frame_size = AUDIO_BLOCK_SIZE; -#if 0 - tmp = (NB_FRAGMENTS << 16) | FRAGMENT_BITS; - err = ioctl(audio_fd, SNDCTL_DSP_SETFRAGMENT, &tmp); - if (err < 0) { - perror("SNDCTL_DSP_SETFRAGMENT"); - } -#endif /* select format : favour native format */ err = ioctl(audio_fd, SNDCTL_DSP_GETFMTS, &tmp); diff --git a/libavdevice/v4l2.c b/libavdevice/v4l2.c index 7223654891..468c133f60 100644 --- a/libavdevice/v4l2.c +++ b/libavdevice/v4l2.c @@ -439,19 +439,19 @@ static int v4l2_set_parameters(AVFormatContext *s1, AVFormatParameters *ap) struct v4l2_streamparm streamparm = {0}; struct v4l2_fract *tpf = &streamparm.parm.capture.timeperframe; int i, ret; - AVRational fps; + AVRational framerate_q; streamparm.type = V4L2_BUF_TYPE_VIDEO_CAPTURE; - if (s->framerate && (ret = av_parse_video_rate(&fps, s->framerate)) < 0) { - av_log(s1, AV_LOG_ERROR, "Couldn't parse framerate.\n"); + if (s->framerate && (ret = av_parse_video_rate(&framerate_q, s->framerate)) < 0) { + av_log(s1, AV_LOG_ERROR, "Could not parse framerate '%s'.\n", s->framerate); return ret; } #if FF_API_FORMAT_PARAMETERS if (ap->channel > 0) s->channel = ap->channel; if (ap->time_base.num) - fps = (AVRational){ap->time_base.den, ap->time_base.num}; + framerate_q = (AVRational){ap->time_base.den, ap->time_base.num}; #endif /* set tv video input */ @@ -500,23 +500,23 @@ static int v4l2_set_parameters(AVFormatContext *s1, AVFormatParameters *ap) } } - if (fps.num && fps.den) { + if (framerate_q.num && framerate_q.den) { av_log(s1, AV_LOG_DEBUG, "Setting time per frame to %d/%d\n", - fps.den, fps.num); - tpf->numerator = fps.den; - tpf->denominator = fps.num; + framerate_q.den, framerate_q.num); + tpf->numerator = framerate_q.den; + tpf->denominator = framerate_q.num; if (ioctl(s->fd, VIDIOC_S_PARM, &streamparm) != 0) { av_log(s1, AV_LOG_ERROR, "ioctl set time per frame(%d/%d) failed\n", - fps.den, fps.num); + framerate_q.den, framerate_q.num); return AVERROR(EIO); } - if (fps.num != tpf->denominator || - fps.den != tpf->numerator) { + if (framerate_q.num != tpf->denominator || + framerate_q.den != tpf->numerator) { av_log(s1, AV_LOG_INFO, "The driver changed the time per frame from %d/%d to %d/%d\n", - fps.den, fps.num, + framerate_q.den, framerate_q.num, tpf->numerator, tpf->denominator); } } else { @@ -581,7 +581,7 @@ static int v4l2_read_header(AVFormatContext *s1, AVFormatParameters *ap) av_set_pts_info(st, 64, 1, 1000000); /* 64 bits pts in us */ if (s->video_size && (res = av_parse_video_size(&s->width, &s->height, s->video_size)) < 0) { - av_log(s1, AV_LOG_ERROR, "Couldn't parse video size.\n"); + av_log(s1, AV_LOG_ERROR, "Could not parse video size '%s'.\n", s->video_size); goto out; } if (s->pixel_format && (pix_fmt = av_get_pix_fmt(s->pixel_format)) == PIX_FMT_NONE) { diff --git a/libavdevice/vfwcap.c b/libavdevice/vfwcap.c index a8e67e7dda..7279817529 100644 --- a/libavdevice/vfwcap.c +++ b/libavdevice/vfwcap.c @@ -247,7 +247,7 @@ static int vfw_read_header(AVFormatContext *s, AVFormatParameters *ap) DWORD biCompression; WORD biBitCount; int ret; - AVRational fps; + AVRational framerate_q; if (!strcmp(s->filename, "list")) { for (devnum = 0; devnum <= 9; devnum++) { @@ -267,7 +267,7 @@ static int vfw_read_header(AVFormatContext *s, AVFormatParameters *ap) #if FF_API_FORMAT_PARAMETERS if (ap->time_base.num) - fps = (AVRational){ap->time_base.den, ap->time_base.num}; + framerate_q = (AVRational){ap->time_base.den, ap->time_base.num}; #endif ctx->hwnd = capCreateCaptureWindow(NULL, 0, 0, 0, 0, 0, HWND_MESSAGE, 0); @@ -367,7 +367,7 @@ static int vfw_read_header(AVFormatContext *s, AVFormatParameters *ap) cparms.fYield = 1; // Spawn a background thread cparms.dwRequestMicroSecPerFrame = - (fps.den*1000000) / fps.num; + (framerate_q.den*1000000) / framerate_q.num; cparms.fAbortLeftMouse = 0; cparms.fAbortRightMouse = 0; cparms.fCaptureAudio = 0; @@ -379,7 +379,7 @@ static int vfw_read_header(AVFormatContext *s, AVFormatParameters *ap) goto fail_io; codec = st->codec; - codec->time_base = (AVRational){fps.den, fps.num}; + codec->time_base = (AVRational){framerate_q.den, framerate_q.num}; codec->codec_type = AVMEDIA_TYPE_VIDEO; codec->width = bi->bmiHeader.biWidth; codec->height = bi->bmiHeader.biHeight; diff --git a/libavdevice/x11grab.c b/libavdevice/x11grab.c index 09c121ee7a..a41733480f 100644 --- a/libavdevice/x11grab.c +++ b/libavdevice/x11grab.c @@ -46,6 +46,7 @@ #include <X11/Xproto.h> #include <X11/Xutil.h> #include <sys/shm.h> +#include <X11/extensions/shape.h> #include <X11/extensions/XShm.h> #include <X11/extensions/Xfixes.h> #include "avdevice.h" @@ -70,10 +71,75 @@ struct x11_grab XImage *image; /**< X11 image holding the grab */ int use_shm; /**< !0 when using XShm extension */ XShmSegmentInfo shminfo; /**< When using XShm, keeps track of XShm infos */ - int nomouse; + int draw_mouse; /**< Set by a private option. */ + int follow_mouse; /**< Set by a private option. */ + int show_region; /**< set by a private option. */ char *framerate; /**< Set by a private option. */ + + Window region_win; /**< This is used by show_region option. */ }; +#define REGION_WIN_BORDER 3 +/** + * Draw grabbing region window + * + * @param s x11_grab context + */ +static void +x11grab_draw_region_win(struct x11_grab *s) +{ + Display *dpy = s->dpy; + int screen; + Window win = s->region_win; + GC gc; + + screen = DefaultScreen(dpy); + gc = XCreateGC(dpy, win, 0, 0); + XSetForeground(dpy, gc, WhitePixel(dpy, screen)); + XSetBackground(dpy, gc, BlackPixel(dpy, screen)); + XSetLineAttributes(dpy, gc, REGION_WIN_BORDER, LineDoubleDash, 0, 0); + XDrawRectangle(dpy, win, gc, + 1, 1, + (s->width + REGION_WIN_BORDER * 2) - 1 * 2 - 1, + (s->height + REGION_WIN_BORDER * 2) - 1 * 2 - 1); + XFreeGC(dpy, gc); +} + +/** + * Initialize grabbing region window + * + * @param s x11_grab context + */ +static void +x11grab_region_win_init(struct x11_grab *s) +{ + Display *dpy = s->dpy; + int screen; + XSetWindowAttributes attribs; + XRectangle rect; + + screen = DefaultScreen(dpy); + attribs.override_redirect = True; + s->region_win = XCreateWindow(dpy, RootWindow(dpy, screen), + s->x_off - REGION_WIN_BORDER, + s->y_off - REGION_WIN_BORDER, + s->width + REGION_WIN_BORDER * 2, + s->height + REGION_WIN_BORDER * 2, + 0, CopyFromParent, + InputOutput, CopyFromParent, + CWOverrideRedirect, &attribs); + rect.x = 0; + rect.y = 0; + rect.width = s->width; + rect.height = s->height; + XShapeCombineRectangles(dpy, s->region_win, + ShapeBounding, REGION_WIN_BORDER, REGION_WIN_BORDER, + &rect, 1, ShapeSubtract, 0); + XMapWindow(dpy, s->region_win); + XSelectInput(dpy, s->region_win, ExposureMask | StructureNotifyMask); + x11grab_draw_region_win(s); +} + /** * Initialize the x11 grab device demuxer (public device demuxer API). * @@ -95,6 +161,7 @@ x11grab_read_header(AVFormatContext *s1, AVFormatParameters *ap) XImage *image; int x_off = 0; int y_off = 0; + int screen; int use_shm; char *dpyname, *offset; int ret = 0; @@ -104,7 +171,7 @@ x11grab_read_header(AVFormatContext *s1, AVFormatParameters *ap) offset = strchr(dpyname, '+'); if (offset) { sscanf(offset, "%d,%d", &x_off, &y_off); - x11grab->nomouse= strstr(offset, "nomouse"); + x11grab->draw_mouse = !strstr(offset, "nomouse"); *offset= 0; } @@ -142,6 +209,22 @@ x11grab_read_header(AVFormatContext *s1, AVFormatParameters *ap) } av_set_pts_info(st, 64, 1, 1000000); /* 64 bits pts in us */ + screen = DefaultScreen(dpy); + + if (x11grab->follow_mouse) { + int screen_w, screen_h; + Window w; + + screen_w = DisplayWidth(dpy, screen); + screen_h = DisplayHeight(dpy, screen); + XQueryPointer(dpy, RootWindow(dpy, screen), &w, &w, &x_off, &y_off, &ret, &ret, &ret); + x_off -= x11grab->width / 2; + y_off -= x11grab->height / 2; + x_off = FFMIN(FFMAX(x_off, 0), screen_w - x11grab->width); + y_off = FFMIN(FFMAX(y_off, 0), screen_h - x11grab->height); + av_log(s1, AV_LOG_INFO, "followmouse is enabled, resetting grabbing region to x: %d y: %d\n", x_off, y_off); + } + use_shm = XShmQueryExtension(dpy); av_log(s1, AV_LOG_INFO, "shared memory extension%s found\n", use_shm ? "" : " not"); @@ -172,7 +255,7 @@ x11grab_read_header(AVFormatContext *s1, AVFormatParameters *ap) goto out; } } else { - image = XGetImage(dpy, RootWindow(dpy, DefaultScreen(dpy)), + image = XGetImage(dpy, RootWindow(dpy, screen), x_off,y_off, x11grab->width, x11grab->height, AllPlanes, ZPixmap); @@ -218,21 +301,6 @@ x11grab_read_header(AVFormatContext *s1, AVFormatParameters *ap) } break; case 32: -#if 0 - GetColorInfo (image, &c_info); - if ( c_info.alpha_mask == 0xff000000 && image->green_mask == 0x0000ff00) { - /* byte order is relevant here, not endianness - * endianness is handled by avcodec, but atm no such thing - * as having ABGR, instead of ARGB in a word. Since we - * need this for Solaris/SPARC, but need to do the conversion - * for every frame we do it outside of this loop, cf. below - * this matches both ARGB32 and ABGR32 */ - input_pixfmt = PIX_FMT_ARGB32; - } else { - av_log(s1, AV_LOG_ERROR,"image depth %i not supported ... aborting\n", image->bits_per_pixel); - return AVERROR(EIO); - } -#endif input_pixfmt = PIX_FMT_RGB32; break; default: @@ -390,6 +458,10 @@ x11grab_read_packet(AVFormatContext *s1, AVPacket *pkt) int x_off = s->x_off; int y_off = s->y_off; + int screen; + Window root; + int follow_mouse = s->follow_mouse; + int64_t curtime, delay; struct timespec ts; @@ -416,17 +488,65 @@ x11grab_read_packet(AVFormatContext *s1, AVPacket *pkt) pkt->size = s->frame_size; pkt->pts = curtime; + screen = DefaultScreen(dpy); + root = RootWindow(dpy, screen); + if (follow_mouse) { + int screen_w, screen_h; + int pointer_x, pointer_y, _; + Window w; + + screen_w = DisplayWidth(dpy, screen); + screen_h = DisplayHeight(dpy, screen); + XQueryPointer(dpy, root, &w, &w, &pointer_x, &pointer_y, &_, &_, &_); + if (follow_mouse == -1) { + // follow the mouse, put it at center of grabbing region + x_off += pointer_x - s->width / 2 - x_off; + y_off += pointer_y - s->height / 2 - y_off; + } else { + // follow the mouse, but only move the grabbing region when mouse + // reaches within certain pixels to the edge. + if (pointer_x > x_off + s->width - follow_mouse) { + x_off += pointer_x - (x_off + s->width - follow_mouse); + } else if (pointer_x < x_off + follow_mouse) + x_off -= (x_off + follow_mouse) - pointer_x; + if (pointer_y > y_off + s->height - follow_mouse) { + y_off += pointer_y - (y_off + s->height - follow_mouse); + } else if (pointer_y < y_off + follow_mouse) + y_off -= (y_off + follow_mouse) - pointer_y; + } + // adjust grabbing region position if it goes out of screen. + s->x_off = x_off = FFMIN(FFMAX(x_off, 0), screen_w - s->width); + s->y_off = y_off = FFMIN(FFMAX(y_off, 0), screen_h - s->height); + + if (s->show_region && s->region_win) + XMoveWindow(dpy, s->region_win, + s->x_off - REGION_WIN_BORDER, + s->y_off - REGION_WIN_BORDER); + } + + if (s->show_region) { + if (s->region_win) { + XEvent evt; + // clean up the events, and do the initinal draw or redraw. + for (evt.type = NoEventMask; XCheckMaskEvent(dpy, ExposureMask | StructureNotifyMask, &evt); ); + if (evt.type) + x11grab_draw_region_win(s); + } else { + x11grab_region_win_init(s); + } + } + if(s->use_shm) { - if (!XShmGetImage(dpy, RootWindow(dpy, DefaultScreen(dpy)), image, x_off, y_off, AllPlanes)) { + if (!XShmGetImage(dpy, root, image, x_off, y_off, AllPlanes)) { av_log (s1, AV_LOG_INFO, "XShmGetImage() failed\n"); } } else { - if (!xget_zpixmap(dpy, RootWindow(dpy, DefaultScreen(dpy)), image, x_off, y_off)) { + if (!xget_zpixmap(dpy, root, image, x_off, y_off)) { av_log (s1, AV_LOG_INFO, "XGetZPixmap() failed\n"); } } - if(!s->nomouse){ + if (s->draw_mouse) { paint_mouse_pointer(image, s); } @@ -457,6 +577,10 @@ x11grab_read_close(AVFormatContext *s1) x11grab->image = NULL; } + if (x11grab->region_win) { + XDestroyWindow(x11grab->dpy, x11grab->region_win); + } + /* Free X11 display */ XCloseDisplay(x11grab->dpy); return 0; @@ -467,6 +591,11 @@ x11grab_read_close(AVFormatContext *s1) static const AVOption options[] = { { "video_size", "A string describing frame size, such as 640x480 or hd720.", OFFSET(video_size), FF_OPT_TYPE_STRING, {.str = "vga"}, 0, 0, DEC }, { "framerate", "", OFFSET(framerate), FF_OPT_TYPE_STRING, {.str = "ntsc"}, 0, 0, DEC }, + { "draw_mouse", "Draw the mouse pointer.", OFFSET(draw_mouse), FF_OPT_TYPE_INT, { 1 }, 0, 1, DEC }, + { "follow_mouse", "Move the grabbing region when the mouse pointer reaches within specified amount of pixels to the edge of region.", + OFFSET(follow_mouse), FF_OPT_TYPE_INT, { 0 }, -1, INT_MAX, DEC, "follow_mouse" }, + { "centered", "Keep the mouse pointer at the center of grabbing region when following.", 0, FF_OPT_TYPE_CONST, { -1 }, INT_MIN, INT_MAX, DEC, "follow_mouse" }, + { "show_region", "Show the grabbing region.", OFFSET(show_region), FF_OPT_TYPE_INT, { 0 }, 0, 1, DEC }, { NULL }, }; diff --git a/libavfilter/Makefile b/libavfilter/Makefile index 461df37a10..6007fedca5 100644 --- a/libavfilter/Makefile +++ b/libavfilter/Makefile @@ -25,6 +25,7 @@ OBJS-$(CONFIG_ANULLSRC_FILTER) += asrc_anullsrc.o OBJS-$(CONFIG_ANULLSINK_FILTER) += asink_anullsink.o OBJS-$(CONFIG_BLACKFRAME_FILTER) += vf_blackframe.o +OBJS-$(CONFIG_BOXBLUR_FILTER) += vf_boxblur.o OBJS-$(CONFIG_COPY_FILTER) += vf_copy.o OBJS-$(CONFIG_CROP_FILTER) += vf_crop.o OBJS-$(CONFIG_CROPDETECT_FILTER) += vf_cropdetect.o @@ -68,6 +69,8 @@ OBJS-$(CONFIG_COLOR_FILTER) += vsrc_color.o OBJS-$(CONFIG_FREI0R_SRC_FILTER) += vf_frei0r.o OBJS-$(CONFIG_MOVIE_FILTER) += vsrc_movie.o OBJS-$(CONFIG_NULLSRC_FILTER) += vsrc_nullsrc.o +OBJS-$(CONFIG_RGBTESTSRC_FILTER) += vsrc_testsrc.o +OBJS-$(CONFIG_TESTSRC_FILTER) += vsrc_testsrc.o OBJS-$(CONFIG_BUFFERSINK_FILTER) += vsink_buffer.o OBJS-$(CONFIG_NULLSINK_FILTER) += vsink_nullsink.o @@ -76,9 +79,6 @@ OBJS-$(CONFIG_NULLSINK_FILTER) += vsink_nullsink.o OBJS-$(CONFIG_MP_FILTER) += libmpcodecs/mp_image.o OBJS-$(CONFIG_MP_FILTER) += libmpcodecs/img_format.o OBJS-$(CONFIG_MP_FILTER) += libmpcodecs/vf_2xsai.o -OBJS-$(CONFIG_MP_FILTER) += libmpcodecs/vf_blackframe.o -OBJS-$(CONFIG_MP_FILTER) += libmpcodecs/vf_boxblur.o -OBJS-$(CONFIG_MP_FILTER) += libmpcodecs/vf_cropdetect.o OBJS-$(CONFIG_MP_FILTER) += libmpcodecs/vf_decimate.o OBJS-$(CONFIG_MP_FILTER) += libmpcodecs/vf_delogo.o OBJS-$(CONFIG_MP_FILTER) += libmpcodecs/vf_denoise3d.o @@ -116,7 +116,6 @@ OBJS-$(CONFIG_MP_FILTER) += libmpcodecs/vf_pullup.o OBJS-$(CONFIG_MP_FILTER) += libmpcodecs/vf_qp.o OBJS-$(CONFIG_MP_FILTER) += libmpcodecs/vf_rectangle.o OBJS-$(CONFIG_MP_FILTER) += libmpcodecs/vf_remove_logo.o -OBJS-$(CONFIG_MP_FILTER) += libmpcodecs/vf_rgbtest.o OBJS-$(CONFIG_MP_FILTER) += libmpcodecs/vf_rotate.o OBJS-$(CONFIG_MP_FILTER) += libmpcodecs/vf_sab.o OBJS-$(CONFIG_MP_FILTER) += libmpcodecs/vf_screenshot.o @@ -136,8 +135,10 @@ OBJS-$(CONFIG_MP_FILTER) += libmpcodecs/vf_yvu9.o OBJS-$(CONFIG_MP_FILTER) += libmpcodecs/pullup.o --include $(SUBDIR)$(ARCH)/Makefile +-include $(SRC_PATH)/$(SUBDIR)$(ARCH)/Makefile DIRS = x86 libmpcodecs -include $(SUBDIR)../subdir.mak +TOOLS = graph2dot lavfi-showfiltfmts + +include $(SRC_PATH)/subdir.mak diff --git a/libavfilter/allfilters.c b/libavfilter/allfilters.c index 42047ecbe8..99f7bc42e2 100644 --- a/libavfilter/allfilters.c +++ b/libavfilter/allfilters.c @@ -41,6 +41,7 @@ void avfilter_register_all(void) REGISTER_FILTER (ANULLSINK, anullsink, asink); REGISTER_FILTER (BLACKFRAME, blackframe, vf); + REGISTER_FILTER (BOXBLUR, boxblur, vf); REGISTER_FILTER (COPY, copy, vf); REGISTER_FILTER (CROP, crop, vf); REGISTER_FILTER (CROPDETECT, cropdetect, vf); @@ -84,6 +85,8 @@ void avfilter_register_all(void) REGISTER_FILTER (FREI0R, frei0r_src, vsrc); REGISTER_FILTER (MOVIE, movie, vsrc); REGISTER_FILTER (NULLSRC, nullsrc, vsrc); + REGISTER_FILTER (RGBTESTSRC, rgbtestsrc, vsrc); + REGISTER_FILTER (TESTSRC, testsrc, vsrc); REGISTER_FILTER (BUFFER, buffersink, vsink); REGISTER_FILTER (NULLSINK, nullsink, vsink); diff --git a/libavfilter/avfilter.c b/libavfilter/avfilter.c index a57677c0e4..7003cdda5c 100644 --- a/libavfilter/avfilter.c +++ b/libavfilter/avfilter.c @@ -221,6 +221,9 @@ int avfilter_insert_filter(AVFilterLink *link, AVFilterContext *filt, if (link->out_chlayouts) avfilter_formats_changeref(&link->out_chlayouts, &filt->outputs[filt_dstpad_idx]->out_chlayouts); + if (link->out_packing) + avfilter_formats_changeref(&link->out_packing, + &filt->outputs[filt_dstpad_idx]->out_packing); return 0; } diff --git a/libavfilter/avfilter.h b/libavfilter/avfilter.h index 1e6a08b1d8..0cbc3f7415 100644 --- a/libavfilter/avfilter.h +++ b/libavfilter/avfilter.h @@ -23,10 +23,13 @@ #define AVFILTER_AVFILTER_H #include "libavutil/avutil.h" +#include "libavutil/log.h" #include "libavutil/samplefmt.h" +#include "libavutil/pixfmt.h" +#include "libavutil/rational.h" #define LIBAVFILTER_VERSION_MAJOR 1 -#define LIBAVFILTER_VERSION_MINOR 80 +#define LIBAVFILTER_VERSION_MINOR 81 #define LIBAVFILTER_VERSION_MICRO 0 #define LIBAVFILTER_VERSION_INT AV_VERSION_INT(LIBAVFILTER_VERSION_MAJOR, \ @@ -159,6 +162,7 @@ static inline void avfilter_copy_buffer_ref_props(AVFilterBufferRef *dst, AVFilt switch (src->type) { case AVMEDIA_TYPE_VIDEO: *dst->video = *src->video; break; case AVMEDIA_TYPE_AUDIO: *dst->audio = *src->audio; break; + default: break; } } @@ -261,6 +265,11 @@ AVFilterFormats *avfilter_all_formats(enum AVMediaType type); AVFilterFormats *avfilter_all_channel_layouts(void); /** + * Return a list of all audio packing formats. + */ +AVFilterFormats *avfilter_all_packing_formats(void); + +/** * Return a format list which contains the intersection of the formats of * a and b. Also, all the references of a, all the references of b, and * a and b themselves will be deallocated. @@ -478,6 +487,7 @@ AVFilterBufferRef *avfilter_default_get_audio_buffer(AVFilterLink *link, int per void avfilter_set_common_pixel_formats(AVFilterContext *ctx, AVFilterFormats *formats); void avfilter_set_common_sample_formats(AVFilterContext *ctx, AVFilterFormats *formats); void avfilter_set_common_channel_layouts(AVFilterContext *ctx, AVFilterFormats *formats); +void avfilter_set_common_packing_formats(AVFilterContext *ctx, AVFilterFormats *formats); /** Default handler for query_formats() */ int avfilter_default_query_formats(AVFilterContext *ctx); @@ -566,6 +576,11 @@ struct AVFilterContext { void *priv; ///< private data for use by the filter }; +enum AVFilterPacking { + AVFILTER_PACKED = 0, + AVFILTER_PLANAR, +}; + /** * A link between two filters. This contains pointers to the source and * destination filters between which this link exists, and the indexes of @@ -593,9 +608,10 @@ struct AVFilterLink { int w; ///< agreed upon image width int h; ///< agreed upon image height AVRational sample_aspect_ratio; ///< agreed upon sample aspect ratio - /* These two parameters apply only to audio */ + /* These parameters apply only to audio */ int64_t channel_layout; ///< channel layout of current buffer (see libavutil/audioconvert.h) int64_t sample_rate; ///< samples per second + int planar; ///< agreed upon packing mode of audio buffers. true if planar. int format; ///< agreed upon media format @@ -611,6 +627,8 @@ struct AVFilterLink { AVFilterFormats *in_chlayouts; AVFilterFormats *out_chlayouts; + AVFilterFormats *in_packing; + AVFilterFormats *out_packing; /** * The buffer reference currently being sent across the link by the source diff --git a/libavfilter/avfiltergraph.c b/libavfilter/avfiltergraph.c index 04768617de..8756e42bd4 100644 --- a/libavfilter/avfiltergraph.c +++ b/libavfilter/avfiltergraph.c @@ -203,8 +203,12 @@ static void pick_format(AVFilterLink *link) link->channel_layout = link->in_chlayouts->formats[0]; avfilter_formats_unref(&link->in_chlayouts); avfilter_formats_unref(&link->out_chlayouts); - } + link->in_packing->format_count = 1; + link->planar = link->in_packing->formats[0] == AVFILTER_PLANAR; + avfilter_formats_unref(&link->in_packing); + avfilter_formats_unref(&link->out_packing); + } } static void pick_formats(AVFilterGraph *graph) diff --git a/libavfilter/avfiltergraph.h b/libavfilter/avfiltergraph.h index a975926fd1..f4c88bc796 100644 --- a/libavfilter/avfiltergraph.h +++ b/libavfilter/avfiltergraph.h @@ -124,12 +124,12 @@ void avfilter_inout_free(AVFilterInOut **inout); * * @param graph the filter graph where to link the parsed graph context * @param filters string to be parsed - * @param inputs linked list to the inputs of the graph, may be NULL. - * It is updated to contain the list of open inputs after the parsing, - * should be freed with avfilter_inout_free(). - * @param outputs linked list to the outputs of the graph, may be NULL. - * It is updated to contain the list of open outputs after the parsing, - * should be freed with avfilter_inout_free(). + * @param inputs pointer to a linked list to the inputs of the graph, may be NULL. + * If non-NULL, *inputs is updated to contain the list of open inputs + * after the parsing, should be freed with avfilter_inout_free(). + * @param outputs pointer to a linked list to the outputs of the graph, may be NULL. + * If non-NULL, *outputs is updated to contain the list of open outputs + * after the parsing, should be freed with avfilter_inout_free(). * @return zero on success, a negative AVERROR code on error */ int avfilter_graph_parse(AVFilterGraph *graph, const char *filters, diff --git a/libavfilter/defaults.c b/libavfilter/defaults.c index b03816dd24..eef9fd1278 100644 --- a/libavfilter/defaults.c +++ b/libavfilter/defaults.c @@ -239,11 +239,19 @@ void avfilter_set_common_channel_layouts(AVFilterContext *ctx, AVFilterFormats * offsetof(AVFilterLink, out_chlayouts)); } +void avfilter_set_common_packing_formats(AVFilterContext *ctx, AVFilterFormats *formats) +{ + set_common_formats(ctx, formats, AVMEDIA_TYPE_AUDIO, + offsetof(AVFilterLink, in_packing), + offsetof(AVFilterLink, out_packing)); +} + int avfilter_default_query_formats(AVFilterContext *ctx) { avfilter_set_common_pixel_formats(ctx, avfilter_all_formats(AVMEDIA_TYPE_VIDEO)); avfilter_set_common_sample_formats(ctx, avfilter_all_formats(AVMEDIA_TYPE_AUDIO)); avfilter_set_common_channel_layouts(ctx, avfilter_all_channel_layouts()); + avfilter_set_common_packing_formats(ctx, avfilter_all_packing_formats()); return 0; } diff --git a/libavfilter/formats.c b/libavfilter/formats.c index 49977c51fd..214718b779 100644 --- a/libavfilter/formats.c +++ b/libavfilter/formats.c @@ -22,6 +22,7 @@ #include "libavutil/pixdesc.h" #include "libavutil/audioconvert.h" #include "avfilter.h" +#include "internal.h" /** * Add all refs from a to ret and destroy a. @@ -30,7 +31,7 @@ static void merge_ref(AVFilterFormats *ret, AVFilterFormats *a) { int i; - for(i = 0; i < a->refcount; i ++) { + for (i = 0; i < a->refcount; i++) { ret->refs[ret->refcount] = a->refs[i]; *ret->refs[ret->refcount++] = ret; } @@ -52,14 +53,14 @@ AVFilterFormats *avfilter_merge_formats(AVFilterFormats *a, AVFilterFormats *b) /* merge list of formats */ ret->formats = av_malloc(sizeof(*ret->formats) * FFMIN(a->format_count, b->format_count)); - for(i = 0; i < a->format_count; i ++) - for(j = 0; j < b->format_count; j ++) - if(a->formats[i] == b->formats[j]) + for (i = 0; i < a->format_count; i++) + for (j = 0; j < b->format_count; j++) + if (a->formats[i] == b->formats[j]) ret->formats[k++] = a->formats[i]; ret->format_count = k; /* check that there was at least one common format */ - if(!ret->format_count) { + if (!ret->format_count) { av_free(ret->formats); av_free(ret); return NULL; @@ -73,6 +74,17 @@ AVFilterFormats *avfilter_merge_formats(AVFilterFormats *a, AVFilterFormats *b) return ret; } +int ff_fmt_is_in(int fmt, const int *fmts) +{ + const int *p; + + for (p = fmts; *p != -1; p++) { + if (fmt == *p) + return 1; + } + return 0; +} + #define MAKE_FORMAT_LIST() \ AVFilterFormats *formats; \ int count = 0; \ @@ -83,7 +95,7 @@ AVFilterFormats *avfilter_merge_formats(AVFilterFormats *a, AVFilterFormats *b) if (!formats) return NULL; \ formats->format_count = count; \ if (count) { \ - formats->formats = av_malloc(sizeof(*formats->formats)*count); \ + formats->formats = av_malloc(sizeof(*formats->formats)*count); \ if (!formats->formats) { \ av_free(formats); \ return NULL; \ @@ -161,6 +173,17 @@ AVFilterFormats *avfilter_all_channel_layouts(void) return avfilter_make_format64_list(chlayouts); } +AVFilterFormats *avfilter_all_packing_formats(void) +{ + static int packing[] = { + AVFILTER_PACKED, + AVFILTER_PLANAR, + -1, + }; + + return avfilter_make_format_list(packing); +} + void avfilter_formats_ref(AVFilterFormats *f, AVFilterFormats **ref) { *ref = f; @@ -171,8 +194,8 @@ void avfilter_formats_ref(AVFilterFormats *f, AVFilterFormats **ref) static int find_ref_index(AVFilterFormats **ref) { int i; - for(i = 0; i < (*ref)->refcount; i ++) - if((*ref)->refs[i] == ref) + for (i = 0; i < (*ref)->refcount; i++) + if ((*ref)->refs[i] == ref) return i; return -1; } @@ -186,11 +209,11 @@ void avfilter_formats_unref(AVFilterFormats **ref) idx = find_ref_index(ref); - if(idx >= 0) + if (idx >= 0) memmove((*ref)->refs + idx, (*ref)->refs + idx+1, sizeof(AVFilterFormats**) * ((*ref)->refcount-idx-1)); - if(!--(*ref)->refcount) { + if (!--(*ref)->refcount) { av_free((*ref)->formats); av_free((*ref)->refs); av_free(*ref); @@ -203,7 +226,7 @@ void avfilter_formats_changeref(AVFilterFormats **oldref, { int idx = find_ref_index(oldref); - if(idx >= 0) { + if (idx >= 0) { (*oldref)->refs[idx] = newref; *newref = *oldref; *oldref = NULL; diff --git a/libavfilter/graphparser.c b/libavfilter/graphparser.c index 5178eea4c6..f38c6cfcae 100644 --- a/libavfilter/graphparser.c +++ b/libavfilter/graphparser.c @@ -83,8 +83,8 @@ static char *parse_link_name(const char **buf, void *log_ctx) * Create an instance of a filter, initialize and insert it in the * filtergraph in *ctx. * + * @param filt_ctx put here a filter context in case of successful creation and configuration, NULL otherwise. * @param ctx the filtergraph context - * @param put here a filter context in case of successful creation and configuration, NULL otherwise. * @param index an index which is supposed to be unique for each filter instance added to the filtergraph * @param filt_name the name of the filter to create * @param args the arguments provided to the filter during its initialization @@ -141,6 +141,8 @@ static int create_filter(AVFilterContext **filt_ctx, AVFilterGraph *ctx, int ind * corresponding filter instance which is added to graph with * create_filter(). * + * @param filt_ctx Pointer that is set to the created and configured filter + * context on success, set to NULL on failure. * @param filt_ctx put here a pointer to the created filter context on * success, NULL otherwise * @param buf pointer to the buffer to parse, *buf will be updated to @@ -333,38 +335,40 @@ static int parse_outputs(const char **buf, AVFilterInOut **curr_inputs, } int avfilter_graph_parse(AVFilterGraph *graph, const char *filters, - AVFilterInOut **open_inputs, AVFilterInOut **open_outputs, + AVFilterInOut **open_inputs_ptr, AVFilterInOut **open_outputs_ptr, void *log_ctx) { - int index = 0, ret; + int index = 0, ret = 0; char chr = 0; AVFilterInOut *curr_inputs = NULL; + AVFilterInOut *open_inputs = open_inputs_ptr ? *open_inputs_ptr : NULL; + AVFilterInOut *open_outputs = open_outputs_ptr ? *open_outputs_ptr : NULL; do { AVFilterContext *filter; const char *filterchain = filters; filters += strspn(filters, WHITESPACES); - if ((ret = parse_inputs(&filters, &curr_inputs, open_outputs, log_ctx)) < 0) - goto fail; + if ((ret = parse_inputs(&filters, &curr_inputs, &open_outputs, log_ctx)) < 0) + goto end; if ((ret = parse_filter(&filter, &filters, graph, index, log_ctx)) < 0) - goto fail; + goto end; if (filter->input_count == 1 && !curr_inputs && !index) { - /* First input can be omitted if it is "[in]" */ + /* First input pad, assume it is "[in]" if not specified */ const char *tmp = "[in]"; - if ((ret = parse_inputs(&tmp, &curr_inputs, open_outputs, log_ctx)) < 0) - goto fail; + if ((ret = parse_inputs(&tmp, &curr_inputs, &open_outputs, log_ctx)) < 0) + goto end; } - if ((ret = link_filter_inouts(filter, &curr_inputs, open_inputs, log_ctx)) < 0) - goto fail; + if ((ret = link_filter_inouts(filter, &curr_inputs, &open_inputs, log_ctx)) < 0) + goto end; - if ((ret = parse_outputs(&filters, &curr_inputs, open_inputs, open_outputs, + if ((ret = parse_outputs(&filters, &curr_inputs, &open_inputs, &open_outputs, log_ctx)) < 0) - goto fail; + goto end; filters += strspn(filters, WHITESPACES); chr = *filters++; @@ -374,7 +378,7 @@ int avfilter_graph_parse(AVFilterGraph *graph, const char *filters, "Invalid filterchain containing an unlabelled output pad: \"%s\"\n", filterchain); ret = AVERROR(EINVAL); - goto fail; + goto end; } index++; } while (chr == ',' || chr == ';'); @@ -384,25 +388,29 @@ int avfilter_graph_parse(AVFilterGraph *graph, const char *filters, "Unable to parse graph description substring: \"%s\"\n", filters - 1); ret = AVERROR(EINVAL); - goto fail; + goto end; } - if (open_inputs && *open_inputs && !strcmp((*open_inputs)->name, "out") && curr_inputs) { - /* Last output can be omitted if it is "[out]" */ + if (curr_inputs) { + /* Last output pad, assume it is "[out]" if not specified */ const char *tmp = "[out]"; - if ((ret = parse_outputs(&tmp, &curr_inputs, open_inputs, open_outputs, + if ((ret = parse_outputs(&tmp, &curr_inputs, &open_inputs, &open_outputs, log_ctx)) < 0) - goto fail; + goto end; } - return 0; - - fail: - for (; graph->filter_count > 0; graph->filter_count--) - avfilter_free(graph->filters[graph->filter_count - 1]); - av_freep(&graph->filters); - avfilter_inout_free(open_inputs); - avfilter_inout_free(open_outputs); +end: + /* clear open_in/outputs only if not passed as parameters */ + if (open_inputs_ptr) *open_inputs_ptr = open_inputs; + else avfilter_inout_free(&open_inputs); + if (open_outputs_ptr) *open_outputs_ptr = open_outputs; + else avfilter_inout_free(&open_outputs); avfilter_inout_free(&curr_inputs); + + if (ret < 0) { + for (; graph->filter_count > 0; graph->filter_count--) + avfilter_free(graph->filters[graph->filter_count - 1]); + av_freep(&graph->filters); + } return ret; } diff --git a/libavfilter/internal.h b/libavfilter/internal.h index be1e9b08f2..7537565768 100644 --- a/libavfilter/internal.h +++ b/libavfilter/internal.h @@ -58,4 +58,7 @@ int ff_avfilter_graph_config_formats(AVFilterGraph *graphctx, AVClass *log_ctx); /** default handler for freeing audio/video buffer when there are no references left */ void ff_avfilter_default_free_buffer(AVFilterBuffer *buf); +/** Tell is a format is contained in the provided list terminated by -1. */ +int ff_fmt_is_in(int fmt, const int *fmts); + #endif /* AVFILTER_INTERNAL_H */ diff --git a/libavfilter/libmpcodecs/vf_blackframe.c b/libavfilter/libmpcodecs/vf_blackframe.c deleted file mode 100644 index c72552c99e..0000000000 --- a/libavfilter/libmpcodecs/vf_blackframe.c +++ /dev/null @@ -1,148 +0,0 @@ -/* - * detect frames that are (almost) black - * search for black frames to detect scene transitions - * (c) 2006 Julian Hall - * - * based on code designed for skipping commercials - * (c) 2002-2003 Brian J. Murrell - * - * cleanup, simplify, speedup (c) 2006 by Ivo van Poorten - * - * This file is part of MPlayer. - * - * MPlayer is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * MPlayer is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License along - * with MPlayer; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. - */ - -#include <stdio.h> -#include <stdlib.h> -#include <string.h> - -#include "config.h" -#include "mp_msg.h" - -#include "img_format.h" -#include "mp_image.h" -#include "vf.h" - -struct vf_priv_s { - unsigned int bamount, bthresh, frame, lastkeyframe; -}; - -static int config(struct vf_instance *vf, int width, int height, int d_width, - int d_height, unsigned int flags, unsigned int outfmt) { - return vf_next_config(vf,width,height,d_width,d_height,flags,outfmt); -} - -static int query_format(struct vf_instance *vf, unsigned fmt) { - switch(fmt) { - case IMGFMT_YVU9: - case IMGFMT_IF09: - case IMGFMT_YV12: - case IMGFMT_I420: - case IMGFMT_IYUV: - case IMGFMT_CLPL: - case IMGFMT_Y800: - case IMGFMT_Y8: - case IMGFMT_NV12: - case IMGFMT_NV21: - case IMGFMT_444P: - case IMGFMT_422P: - case IMGFMT_411P: - case IMGFMT_HM12: - return vf_next_query_format(vf, fmt); - } - return 0; -} - -static int put_image(struct vf_instance *vf, mp_image_t *mpi, double pts){ - mp_image_t *dmpi; - int x, y; - int nblack=0, pblack=0; - unsigned char *yplane = mpi->planes[0]; - unsigned int ystride = mpi->stride[0]; - int pict_type = mpi->pict_type; - int w = mpi->w, h = mpi->h; - int bthresh = vf->priv->bthresh; - int bamount = vf->priv->bamount; - static const char *const picttypes[4] = { "unknown", "I", "P", "B" }; - - for (y=1; y<=h; y++) { - for (x=0; x<w; x++) - nblack += yplane[x] < bthresh; - pblack = nblack*100/(w*y); - if (pblack < bamount) break; - yplane += ystride; - } - - if (pict_type > 3 || pict_type < 0) pict_type = 0; - if (pict_type == 1) vf->priv->lastkeyframe = vf->priv->frame; - - if (pblack >= bamount) - mp_msg(MSGT_VFILTER, MSGL_INFO,"vf_blackframe: %u, %i%%, %s (I:%u)\n", - vf->priv->frame, pblack, picttypes[pict_type], - vf->priv->lastkeyframe); - - vf->priv->frame++; - - dmpi = vf_get_image(vf->next, mpi->imgfmt, MP_IMGTYPE_EXPORT, 0, - mpi->width, mpi->height); - dmpi->planes[0] = mpi->planes[0]; - dmpi->stride[0] = mpi->stride[0]; - dmpi->planes[1] = mpi->planes[1]; - dmpi->stride[1] = mpi->stride[1]; - dmpi->planes[2] = mpi->planes[2]; - dmpi->stride[2] = mpi->stride[2]; - - vf_clone_mpi_attributes(dmpi, mpi); - - return vf_next_put_image(vf, dmpi, pts); -} - -static int control(struct vf_instance *vf, int request, void* data){ - return vf_next_control(vf,request,data); -} - -static void uninit(struct vf_instance *vf) { - free(vf->priv); -} - -static int vf_open(vf_instance_t *vf, char *args){ - vf->priv = malloc(sizeof(struct vf_priv_s)); - if (!vf->priv) return 0; - - vf->config = config; - vf->put_image = put_image; - vf->control = control; - vf->uninit = uninit; - vf->query_format = query_format; - - vf->priv->bamount = 98; - vf->priv->bthresh = 0x20; - vf->priv->frame = 0; - vf->priv->lastkeyframe = 0; - - if (args) - sscanf(args, "%u:%u", &vf->priv->bamount, &vf->priv->bthresh); - return 1; -} - -const vf_info_t vf_info_blackframe = { - "detects black frames", - "blackframe", - "Brian J. Murrell, Julian Hall, Ivo van Poorten", - "Useful for detecting scene transitions", - vf_open, - NULL -}; diff --git a/libavfilter/libmpcodecs/vf_boxblur.c b/libavfilter/libmpcodecs/vf_boxblur.c deleted file mode 100644 index bc1ec80284..0000000000 --- a/libavfilter/libmpcodecs/vf_boxblur.c +++ /dev/null @@ -1,214 +0,0 @@ -/* - * Copyright (C) 2002 Michael Niedermayer <michaelni@gmx.at> - * - * This file is part of MPlayer. - * - * MPlayer is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * MPlayer is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License along - * with MPlayer; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. - */ - -#include <stdio.h> -#include <stdlib.h> -#include <string.h> -#include <inttypes.h> -#include <assert.h> - -#include "mp_msg.h" -#include "img_format.h" -#include "mp_image.h" -#include "vf.h" - - -//===========================================================================// - -typedef struct FilterParam{ - int radius; - int power; -}FilterParam; - -struct vf_priv_s { - FilterParam lumaParam; - FilterParam chromaParam; -}; - - -/***************************************************************************/ - - -static int config(struct vf_instance *vf, - int width, int height, int d_width, int d_height, - unsigned int flags, unsigned int outfmt){ - - return vf_next_config(vf,width,height,d_width,d_height,flags,outfmt); -} - -static inline void blur(uint8_t *dst, uint8_t *src, int w, int radius, int dstStep, int srcStep){ - int x; - const int length= radius*2 + 1; - const int inv= ((1<<16) + length/2)/length; - - int sum= 0; - - for(x=0; x<radius; x++){ - sum+= src[x*srcStep]<<1; - } - sum+= src[radius*srcStep]; - - for(x=0; x<=radius; x++){ - sum+= src[(radius+x)*srcStep] - src[(radius-x)*srcStep]; - dst[x*dstStep]= (sum*inv + (1<<15))>>16; - } - - for(; x<w-radius; x++){ - sum+= src[(radius+x)*srcStep] - src[(x-radius-1)*srcStep]; - dst[x*dstStep]= (sum*inv + (1<<15))>>16; - } - - for(; x<w; x++){ - sum+= src[(2*w-radius-x-1)*srcStep] - src[(x-radius-1)*srcStep]; - dst[x*dstStep]= (sum*inv + (1<<15))>>16; - } -} - -static inline void blur2(uint8_t *dst, uint8_t *src, int w, int radius, int power, int dstStep, int srcStep){ - uint8_t temp[2][4096]; - uint8_t *a= temp[0], *b=temp[1]; - - if(radius){ - blur(a, src, w, radius, 1, srcStep); - for(; power>2; power--){ - uint8_t *c; - blur(b, a, w, radius, 1, 1); - c=a; a=b; b=c; - } - if(power>1) - blur(dst, a, w, radius, dstStep, 1); - else{ - int i; - for(i=0; i<w; i++) - dst[i*dstStep]= a[i]; - } - }else{ - int i; - for(i=0; i<w; i++) - dst[i*dstStep]= src[i*srcStep]; - } -} - -static void hBlur(uint8_t *dst, uint8_t *src, int w, int h, int dstStride, int srcStride, int radius, int power){ - int y; - - if(radius==0 && dst==src) return; - - for(y=0; y<h; y++){ - blur2(dst + y*dstStride, src + y*srcStride, w, radius, power, 1, 1); - } -} - -//FIXME optimize (x before y !!!) -static void vBlur(uint8_t *dst, uint8_t *src, int w, int h, int dstStride, int srcStride, int radius, int power){ - int x; - - if(radius==0 && dst==src) return; - - for(x=0; x<w; x++){ - blur2(dst + x, src + x, h, radius, power, dstStride, srcStride); - } -} - -static int put_image(struct vf_instance *vf, mp_image_t *mpi, double pts){ - int cw= mpi->w >> mpi->chroma_x_shift; - int ch= mpi->h >> mpi->chroma_y_shift; - - mp_image_t *dmpi=vf_get_image(vf->next,mpi->imgfmt, - MP_IMGTYPE_TEMP, MP_IMGFLAG_ACCEPT_STRIDE | MP_IMGFLAG_READABLE, - mpi->w,mpi->h); - - assert(mpi->flags&MP_IMGFLAG_PLANAR); - - hBlur(dmpi->planes[0], mpi->planes[0], mpi->w,mpi->h, - dmpi->stride[0], mpi->stride[0], vf->priv->lumaParam.radius, vf->priv->lumaParam.power); - hBlur(dmpi->planes[1], mpi->planes[1], cw,ch, - dmpi->stride[1], mpi->stride[1], vf->priv->chromaParam.radius, vf->priv->chromaParam.power); - hBlur(dmpi->planes[2], mpi->planes[2], cw,ch, - dmpi->stride[2], mpi->stride[2], vf->priv->chromaParam.radius, vf->priv->chromaParam.power); - - vBlur(dmpi->planes[0], dmpi->planes[0], mpi->w,mpi->h, - dmpi->stride[0], dmpi->stride[0], vf->priv->lumaParam.radius, vf->priv->lumaParam.power); - vBlur(dmpi->planes[1], dmpi->planes[1], cw,ch, - dmpi->stride[1], dmpi->stride[1], vf->priv->chromaParam.radius, vf->priv->chromaParam.power); - vBlur(dmpi->planes[2], dmpi->planes[2], cw,ch, - dmpi->stride[2], dmpi->stride[2], vf->priv->chromaParam.radius, vf->priv->chromaParam.power); - - return vf_next_put_image(vf,dmpi, pts); -} - -//===========================================================================// - -static int query_format(struct vf_instance *vf, unsigned int fmt){ - switch(fmt) - { - case IMGFMT_YV12: - case IMGFMT_I420: - case IMGFMT_IYUV: - case IMGFMT_YVU9: - case IMGFMT_444P: - case IMGFMT_422P: - case IMGFMT_411P: - return vf_next_query_format(vf, fmt); - } - return 0; -} - -static int vf_open(vf_instance_t *vf, char *args){ - int e; - - vf->config=config; - vf->put_image=put_image; -// vf->get_image=get_image; - vf->query_format=query_format; - vf->priv=malloc(sizeof(struct vf_priv_s)); - memset(vf->priv, 0, sizeof(struct vf_priv_s)); - - if(args==NULL) return 0; - - e=sscanf(args, "%d:%d:%d:%d", - &vf->priv->lumaParam.radius, - &vf->priv->lumaParam.power, - &vf->priv->chromaParam.radius, - &vf->priv->chromaParam.power - ); - - if(e==2){ - vf->priv->chromaParam.radius= vf->priv->lumaParam.radius; - vf->priv->chromaParam.power = vf->priv->lumaParam.power; - }else if(e!=4) - return 0; - - if(vf->priv->lumaParam.radius < 0) return 0; - if(vf->priv->chromaParam.radius < 0) return 0; - - return 1; -} - -const vf_info_t vf_info_boxblur = { - "box blur", - "boxblur", - "Michael Niedermayer", - "", - vf_open, - NULL -}; - -//===========================================================================// diff --git a/libavfilter/libmpcodecs/vf_cropdetect.c b/libavfilter/libmpcodecs/vf_cropdetect.c deleted file mode 100644 index c3de24793b..0000000000 --- a/libavfilter/libmpcodecs/vf_cropdetect.c +++ /dev/null @@ -1,201 +0,0 @@ -/* - * This file is part of MPlayer. - * - * MPlayer is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * MPlayer is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License along - * with MPlayer; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. - */ - -#include <stdio.h> -#include <stdlib.h> -#include <string.h> -#include <inttypes.h> - -#include "config.h" -#include "mp_msg.h" -#include "help_mp.h" - -#include "img_format.h" -#include "mp_image.h" -#include "vf.h" - -struct vf_priv_s { - int x1,y1,x2,y2; - int limit; - int round; - int reset_count; - int fno; -}; - -static int checkline(unsigned char* src,int stride,int len,int bpp){ - int total=0; - int div=len; - switch(bpp){ - case 1: - while(--len>=0){ - total+=src[0]; src+=stride; - } - break; - case 3: - case 4: - while(--len>=0){ - total+=src[0]+src[1]+src[2]; src+=stride; - } - div*=3; - break; - } - total/=div; -// printf("total=%d\n",total); - return total; -} - -//===========================================================================// - -static int config(struct vf_instance *vf, - int width, int height, int d_width, int d_height, - unsigned int flags, unsigned int outfmt){ - vf->priv->x1=width - 1; - vf->priv->y1=height - 1; - vf->priv->x2=0; - vf->priv->y2=0; - vf->priv->fno=-2; - return vf_next_config(vf,width,height,d_width,d_height,flags,outfmt); -} - -static int put_image(struct vf_instance *vf, mp_image_t *mpi, double pts){ - mp_image_t *dmpi; - int bpp=mpi->bpp/8; - int w,h,x,y,shrink_by; - - // hope we'll get DR buffer: - dmpi=vf_get_image(vf->next,mpi->imgfmt, - MP_IMGTYPE_EXPORT, 0, - mpi->w, mpi->h); - - dmpi->planes[0]=mpi->planes[0]; - dmpi->planes[1]=mpi->planes[1]; - dmpi->planes[2]=mpi->planes[2]; - dmpi->stride[0]=mpi->stride[0]; - dmpi->stride[1]=mpi->stride[1]; - dmpi->stride[2]=mpi->stride[2]; - dmpi->width=mpi->width; - dmpi->height=mpi->height; - -if(++vf->priv->fno>0){ // ignore first 2 frames - they may be empty - - // Reset the crop area every reset_count frames, if reset_count is > 0 - if(vf->priv->reset_count > 0 && vf->priv->fno > vf->priv->reset_count){ - vf->priv->x1=mpi->w-1; - vf->priv->y1=mpi->h-1; - vf->priv->x2=0; - vf->priv->y2=0; - vf->priv->fno=1; - } - - for(y=0;y<vf->priv->y1;y++){ - if(checkline(mpi->planes[0]+mpi->stride[0]*y,bpp,mpi->w,bpp)>vf->priv->limit){ - vf->priv->y1=y; - break; - } - } - - for(y=mpi->h-1;y>vf->priv->y2;y--){ - if(checkline(mpi->planes[0]+mpi->stride[0]*y,bpp,mpi->w,bpp)>vf->priv->limit){ - vf->priv->y2=y; - break; - } - } - - for(y=0;y<vf->priv->x1;y++){ - if(checkline(mpi->planes[0]+bpp*y,mpi->stride[0],mpi->h,bpp)>vf->priv->limit){ - vf->priv->x1=y; - break; - } - } - - for(y=mpi->w-1;y>vf->priv->x2;y--){ - if(checkline(mpi->planes[0]+bpp*y,mpi->stride[0],mpi->h,bpp)>vf->priv->limit){ - vf->priv->x2=y; - break; - } - } - - // round x and y (up), important for yuv colorspaces - // make sure they stay rounded! - x=(vf->priv->x1+1)&(~1); - y=(vf->priv->y1+1)&(~1); - - w = vf->priv->x2 - x + 1; - h = vf->priv->y2 - y + 1; - - // w and h must be divisible by 2 as well because of yuv - // colorspace problems. - if (vf->priv->round <= 1) - vf->priv->round = 16; - if (vf->priv->round % 2) - vf->priv->round *= 2; - - shrink_by = w % vf->priv->round; - w -= shrink_by; - x += (shrink_by / 2 + 1) & ~1; - - shrink_by = h % vf->priv->round; - h -= shrink_by; - y += (shrink_by / 2 + 1) & ~1; - - mp_msg(MSGT_VFILTER, MSGL_INFO, MSGTR_MPCODECS_CropArea, - vf->priv->x1,vf->priv->x2, - vf->priv->y1,vf->priv->y2, - w,h,x,y); - - -} - - return vf_next_put_image(vf,dmpi, pts); -} - -static int query_format(struct vf_instance *vf, unsigned int fmt) { - switch(fmt) { - // the default limit value works only right with YV12 right now. - case IMGFMT_YV12: - return vf_next_query_format(vf, fmt); - } - return 0; -} -//===========================================================================// - -static int vf_open(vf_instance_t *vf, char *args){ - vf->config=config; - vf->put_image=put_image; - vf->query_format=query_format; - vf->priv=malloc(sizeof(struct vf_priv_s)); - vf->priv->limit=24; // should be option - vf->priv->round = 0; - vf->priv->reset_count = 0; - if(args) sscanf(args, "%d:%d:%d", - &vf->priv->limit, - &vf->priv->round, - &vf->priv->reset_count); - return 1; -} - -const vf_info_t vf_info_cropdetect = { - "autodetect crop size", - "cropdetect", - "A'rpi", - "", - vf_open, - NULL -}; - -//===========================================================================// diff --git a/libavfilter/libmpcodecs/vf_rgbtest.c b/libavfilter/libmpcodecs/vf_rgbtest.c deleted file mode 100644 index cbed6ed367..0000000000 --- a/libavfilter/libmpcodecs/vf_rgbtest.c +++ /dev/null @@ -1,171 +0,0 @@ -/* - * This file is part of MPlayer. - * - * MPlayer is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * MPlayer is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License along - * with MPlayer; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. - */ - -#include <stdio.h> -#include <stdlib.h> -#include <string.h> -#include <inttypes.h> - -#include "config.h" -#include "mp_msg.h" - -#include "img_format.h" -#include "mp_image.h" -#include "vf.h" - -//===========================================================================// - -struct vf_priv_s { - unsigned int fmt; - int w, h; -}; - -static unsigned int getfmt(unsigned int outfmt){ - switch(outfmt){ - case IMGFMT_RGB12: - case IMGFMT_RGB15: - case IMGFMT_RGB16: - case IMGFMT_RGB24: - case IMGFMT_RGBA: - case IMGFMT_ARGB: - case IMGFMT_BGR12: - case IMGFMT_BGR15: - case IMGFMT_BGR16: - case IMGFMT_BGR24: - case IMGFMT_BGRA: - case IMGFMT_ABGR: - return outfmt; - } - return 0; -} - -static void put_pixel(uint8_t *buf, int x, int y, int stride, int r, int g, int b, int fmt){ - switch(fmt){ - case IMGFMT_BGR12: ((uint16_t*)(buf + y*stride))[x]= - ((r >> 4) << 8) | ((g >> 4) << 4) | (b >> 4); - break; - case IMGFMT_RGB12: ((uint16_t*)(buf + y*stride))[x]= - ((b >> 4) << 8) | ((g >> 4) << 4) | (r >> 4); - break; - case IMGFMT_BGR15: ((uint16_t*)(buf + y*stride))[x]= ((r>>3)<<10) | ((g>>3)<<5) | (b>>3); - break; - case IMGFMT_RGB15: ((uint16_t*)(buf + y*stride))[x]= ((b>>3)<<10) | ((g>>3)<<5) | (r>>3); - break; - case IMGFMT_BGR16: ((uint16_t*)(buf + y*stride))[x]= ((r>>3)<<11) | ((g>>2)<<5) | (b>>3); - break; - case IMGFMT_RGB16: ((uint16_t*)(buf + y*stride))[x]= ((b>>3)<<11) | ((g>>2)<<5) | (r>>3); - break; - case IMGFMT_RGB24: - buf[3*x + y*stride + 0]= r; - buf[3*x + y*stride + 1]= g; - buf[3*x + y*stride + 2]= b; - break; - case IMGFMT_BGR24: - buf[3*x + y*stride + 0]= b; - buf[3*x + y*stride + 1]= g; - buf[3*x + y*stride + 2]= r; - break; - case IMGFMT_RGBA: - buf[4*x + y*stride + 0]= r; - buf[4*x + y*stride + 1]= g; - buf[4*x + y*stride + 2]= b; - break; - case IMGFMT_BGRA: - buf[4*x + y*stride + 0]= b; - buf[4*x + y*stride + 1]= g; - buf[4*x + y*stride + 2]= r; - break; - case IMGFMT_ARGB: - buf[4*x + y*stride + 1]= r; - buf[4*x + y*stride + 2]= g; - buf[4*x + y*stride + 3]= b; - break; - case IMGFMT_ABGR: - buf[4*x + y*stride + 1]= b; - buf[4*x + y*stride + 2]= g; - buf[4*x + y*stride + 3]= r; - break; - } -} - -static int config(struct vf_instance *vf, - int width, int height, int d_width, int d_height, - unsigned int flags, unsigned int outfmt){ - if (vf->priv->w > 0) { d_width = width = vf->priv->w; } - if (vf->priv->h > 0) { d_height = height = vf->priv->h; } - vf->priv->fmt=getfmt(outfmt); - mp_msg(MSGT_VFILTER,MSGL_V,"rgb test format:%s\n", vo_format_name(outfmt)); - return vf_next_config(vf,width,height,d_width,d_height,flags,vf->priv->fmt); -} - -static int put_image(struct vf_instance *vf, mp_image_t *mpi, double pts){ - mp_image_t *dmpi; - int x, y; - int w = vf->priv->w > 0 ? vf->priv->w : mpi->w; - int h = vf->priv->h > 0 ? vf->priv->h : mpi->h; - - // hope we'll get DR buffer: - dmpi=vf_get_image(vf->next,vf->priv->fmt, - MP_IMGTYPE_TEMP, MP_IMGFLAG_ACCEPT_STRIDE, - w, h); - - for(y=0; y<h; y++){ - for(x=0; x<w; x++){ - int c= 256*x/w; - int r=0,g=0,b=0; - - if(3*y<h) r=c; - else if(3*y<2*h) g=c; - else b=c; - - put_pixel(dmpi->planes[0], x, y, dmpi->stride[0], r, g, b, vf->priv->fmt); - } - } - - return vf_next_put_image(vf,dmpi, pts); -} - -//===========================================================================// - -static int query_format(struct vf_instance *vf, unsigned int outfmt){ - unsigned int fmt=getfmt(outfmt); - if(!fmt) return 0; - return vf_next_query_format(vf,fmt) & (~VFCAP_CSP_SUPPORTED_BY_HW); -} - -static int vf_open(vf_instance_t *vf, char *args){ - vf->config=config; - vf->put_image=put_image; - vf->query_format=query_format; - vf->priv=malloc(sizeof(struct vf_priv_s)); - vf->priv->w = vf->priv->h = 0; - if (args) - sscanf(args, "%d:%d", &vf->priv->w, &vf->priv->h); - return 1; -} - -const vf_info_t vf_info_rgbtest = { - "rgbtest", - "rgbtest", - "Michael Niedermayer", - "", - vf_open, - NULL -}; - -//===========================================================================// diff --git a/libavfilter/vf_aspect.c b/libavfilter/vf_aspect.c index 3b4a57cf58..5234adb4f5 100644 --- a/libavfilter/vf_aspect.c +++ b/libavfilter/vf_aspect.c @@ -23,6 +23,7 @@ * aspect ratio modification video filters */ +#include "libavutil/mathematics.h" #include "avfilter.h" typedef struct { diff --git a/libavfilter/vf_blackframe.c b/libavfilter/vf_blackframe.c index 41b4a92ce5..843e8273f4 100644 --- a/libavfilter/vf_blackframe.c +++ b/libavfilter/vf_blackframe.c @@ -34,6 +34,7 @@ typedef struct { unsigned int bthresh; ///< black threshold unsigned int frame; ///< frame number unsigned int nblack; ///< number of black pixels counted so far + unsigned int last_keyframe; ///< frame number of the last received key-frame } BlackFrameContext; static int query_formats(AVFilterContext *ctx) @@ -56,6 +57,7 @@ static av_cold int init(AVFilterContext *ctx, const char *args, void *opaque) blackframe->bthresh = 32; blackframe->nblack = 0; blackframe->frame = 0; + blackframe->last_keyframe = 0; if (args) sscanf(args, "%u:%u", &blackframe->bamount, &blackframe->bthresh); @@ -95,11 +97,16 @@ static void end_frame(AVFilterLink *inlink) AVFilterBufferRef *picref = inlink->cur_buf; int pblack = 0; + if (picref->video->key_frame) + blackframe->last_keyframe = blackframe->frame; + pblack = blackframe->nblack * 100 / (inlink->w * inlink->h); if (pblack >= blackframe->bamount) - av_log(ctx, AV_LOG_INFO, "frame:%u pblack:%u pos:%"PRId64" pts:%"PRId64" t:%f\n", + av_log(ctx, AV_LOG_INFO, "frame:%u pblack:%u pos:%"PRId64" pts:%"PRId64" t:%f " + "type:%c last_keyframe:%d\n", blackframe->frame, pblack, picref->pos, picref->pts, - picref->pts == AV_NOPTS_VALUE ? -1 : picref->pts * av_q2d(inlink->time_base)); + picref->pts == AV_NOPTS_VALUE ? -1 : picref->pts * av_q2d(inlink->time_base), + av_get_picture_type_char(picref->video->pict_type), blackframe->last_keyframe); blackframe->frame++; blackframe->nblack = 0; diff --git a/libavfilter/vf_boxblur.c b/libavfilter/vf_boxblur.c new file mode 100644 index 0000000000..a10b630ed5 --- /dev/null +++ b/libavfilter/vf_boxblur.c @@ -0,0 +1,352 @@ +/* + * Copyright (c) 2002 Michael Niedermayer <michaelni@gmx.at> + * Copyright (c) 2011 Stefano Sabatini + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with FFmpeg; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + */ + +/** + * @file + * Apply a boxblur filter to the input video. + * Ported from MPlayer libmpcodecs/vf_boxblur.c. + */ + +#include "libavutil/avstring.h" +#include "libavutil/eval.h" +#include "libavutil/pixdesc.h" +#include "avfilter.h" + +static const char *var_names[] = { + "w", + "h", + "cw", + "ch", + "hsub", + "vsub", + NULL +}; + +enum var_name { + VAR_W, + VAR_H, + VAR_CW, + VAR_CH, + VAR_HSUB, + VAR_VSUB, + VARS_NB +}; + +typedef struct { + int radius; + int power; +} FilterParam; + +typedef struct { + FilterParam luma_param; + FilterParam chroma_param; + FilterParam alpha_param; + char luma_radius_expr [256]; + char chroma_radius_expr[256]; + char alpha_radius_expr [256]; + + int hsub, vsub; + int radius[4]; + int power[4]; + uint8_t *temp[2]; ///< temporary buffer used in blur_power() +} BoxBlurContext; + +#define Y 0 +#define U 1 +#define V 2 +#define A 3 + +static av_cold int init(AVFilterContext *ctx, const char *args, void *opaque) +{ + BoxBlurContext *boxblur = ctx->priv; + int e; + + if (!args) { + av_log(ctx, AV_LOG_ERROR, + "Filter expects 2 or 4 arguments, none provided\n"); + return AVERROR(EINVAL); + } + + e = sscanf(args, "%255[^:]:%d:%255[^:]:%d:%255[^:]:%d", + boxblur->luma_radius_expr, &boxblur->luma_param .power, + boxblur->chroma_radius_expr, &boxblur->chroma_param.power, + boxblur->alpha_radius_expr, &boxblur->alpha_param .power); + + if (e != 2 && e != 4 && e != 6) { + av_log(ctx, AV_LOG_ERROR, + "Filter expects 2 or 4 or 6 params, provided %d\n", e); + return AVERROR(EINVAL); + } + + if (e < 4) { + boxblur->chroma_param.power = boxblur->luma_param.power; + av_strlcpy(boxblur->chroma_radius_expr, boxblur->luma_radius_expr, + sizeof(boxblur->chroma_radius_expr)); + } + if (e < 6) { + boxblur->alpha_param.power = boxblur->luma_param.power; + av_strlcpy(boxblur->alpha_radius_expr, boxblur->luma_radius_expr, + sizeof(boxblur->alpha_radius_expr)); + } + + return 0; +} + +static av_cold void uninit(AVFilterContext *ctx) +{ + BoxBlurContext *boxblur = ctx->priv; + + av_freep(&boxblur->temp[0]); + av_freep(&boxblur->temp[1]); +} + +static int query_formats(AVFilterContext *ctx) +{ + enum PixelFormat pix_fmts[] = { + PIX_FMT_YUV444P, PIX_FMT_YUV422P, PIX_FMT_YUV420P, + PIX_FMT_YUV411P, PIX_FMT_YUV410P, PIX_FMT_YUVA420P, + PIX_FMT_YUV440P, PIX_FMT_GRAY8, + PIX_FMT_YUVJ444P, PIX_FMT_YUVJ422P, PIX_FMT_YUVJ420P, + PIX_FMT_YUVJ440P, + PIX_FMT_NONE + }; + + avfilter_set_common_pixel_formats(ctx, avfilter_make_format_list(pix_fmts)); + return 0; +} + +static int config_input(AVFilterLink *inlink) +{ + AVFilterContext *ctx = inlink->dst; + BoxBlurContext *boxblur = ctx->priv; + const AVPixFmtDescriptor *desc = &av_pix_fmt_descriptors[inlink->format]; + int w = inlink->w, h = inlink->h; + int cw, ch; + double var_values[VARS_NB], res; + char *expr; + int ret; + + if (!(boxblur->temp[0] = av_malloc(FFMAX(w, h))) || + !(boxblur->temp[1] = av_malloc(FFMAX(w, h)))) + return AVERROR(ENOMEM); + + boxblur->hsub = desc->log2_chroma_w; + boxblur->vsub = desc->log2_chroma_h; + + var_values[VAR_W] = inlink->w; + var_values[VAR_H] = inlink->h; + var_values[VAR_CW] = cw = w>>boxblur->hsub; + var_values[VAR_CH] = ch = h>>boxblur->vsub; + var_values[VAR_HSUB] = 1<<boxblur->hsub; + var_values[VAR_VSUB] = 1<<boxblur->vsub; + +#define EVAL_RADIUS_EXPR(comp) \ + expr = boxblur->comp##_radius_expr; \ + ret = av_expr_parse_and_eval(&res, expr, var_names, var_values, \ + NULL, NULL, NULL, NULL, NULL, 0, ctx); \ + boxblur->comp##_param.radius = res; \ + if (ret < 0) { \ + av_log(NULL, AV_LOG_ERROR, \ + "Error when evaluating " #comp " radius expression '%s'\n", expr); \ + return ret; \ + } + EVAL_RADIUS_EXPR(luma); + EVAL_RADIUS_EXPR(chroma); + EVAL_RADIUS_EXPR(alpha); + + av_log(ctx, AV_LOG_INFO, + "luma_radius:%d luma_power:%d " + "chroma_radius:%d chroma_power:%d " + "alpha_radius:%d alpha_power:%d " + "w:%d chroma_w:%d h:%d chroma_h:%d\n", + boxblur->luma_param .radius, boxblur->luma_param .power, + boxblur->chroma_param.radius, boxblur->chroma_param.power, + boxblur->alpha_param .radius, boxblur->alpha_param .power, + w, cw, h, ch); + +#define CHECK_RADIUS_VAL(w_, h_, comp) \ + if (boxblur->comp##_param.radius < 0 || \ + 2*boxblur->comp##_param.radius > FFMIN(w_, h_)) { \ + av_log(ctx, AV_LOG_ERROR, \ + "Invalid " #comp " radius value %d, must be >= 0 and <= %d\n", \ + boxblur->comp##_param.radius, FFMIN(w_, h_)/2); \ + return AVERROR(EINVAL); \ + } + CHECK_RADIUS_VAL(w, h, luma); + CHECK_RADIUS_VAL(cw, ch, chroma); + CHECK_RADIUS_VAL(w, h, alpha); + + boxblur->radius[Y] = boxblur->luma_param.radius; + boxblur->radius[U] = boxblur->radius[V] = boxblur->chroma_param.radius; + boxblur->radius[A] = boxblur->alpha_param.radius; + + boxblur->power[Y] = boxblur->luma_param.power; + boxblur->power[U] = boxblur->power[V] = boxblur->chroma_param.power; + boxblur->power[A] = boxblur->alpha_param.power; + + return 0; +} + +static inline void blur(uint8_t *dst, int dst_step, const uint8_t *src, int src_step, + int len, int radius) +{ + /* Naive boxblur would sum source pixels from x-radius .. x+radius + * for destination pixel x. That would be O(radius*width). + * If you now look at what source pixels represent 2 consecutive + * output pixels, then you see they are almost identical and only + * differ by 2 pixels, like: + * src0 111111111 + * dst0 1 + * src1 111111111 + * dst1 1 + * src0-src1 1 -1 + * so when you know one output pixel you can find the next by just adding + * and subtracting 1 input pixel. + * The following code adopts this faster variant. + */ + int x, sum = 0; + const int length = radius*2 + 1; + const int inv = ((1<<16) + length/2)/length; + + for (x = 0; x < radius; x++) + sum += src[x*src_step]<<1; + sum += src[radius*src_step]; + + for (x = 0; x <= radius; x++) { + sum += src[(radius+x)*src_step] - src[(radius-x)*src_step]; + dst[x*dst_step] = (sum*inv + (1<<15))>>16; + } + + for (; x < len-radius; x++) { + sum += src[(radius+x)*src_step] - src[(x-radius-1)*src_step]; + dst[x*dst_step] = (sum*inv + (1<<15))>>16; + } + + for (; x < len; x++) { + sum += src[(2*len-radius-x-1)*src_step] - src[(x-radius-1)*src_step]; + dst[x*dst_step] = (sum*inv + (1<<15))>>16; + } +} + +static inline void blur_power(uint8_t *dst, int dst_step, const uint8_t *src, int src_step, + int len, int radius, int power, uint8_t *temp[2]) +{ + uint8_t *a = temp[0], *b = temp[1]; + + if (radius && power) { + blur(a, 1, src, src_step, len, radius); + for (; power > 2; power--) { + uint8_t *c; + blur(b, 1, a, 1, len, radius); + c = a; a = b; b = c; + } + if (power > 1) { + blur(dst, dst_step, a, 1, len, radius); + } else { + int i; + for (i = 0; i < len; i++) + dst[i*dst_step] = a[i]; + } + } else { + int i; + for (i = 0; i < len; i++) + dst[i*dst_step] = src[i*src_step]; + } +} + +static void hblur(uint8_t *dst, int dst_linesize, const uint8_t *src, int src_linesize, + int w, int h, int radius, int power, uint8_t *temp[2]) +{ + int y; + + if (radius == 0 && dst == src) + return; + + for (y = 0; y < h; y++) + blur_power(dst + y*dst_linesize, 1, src + y*src_linesize, 1, + w, radius, power, temp); +} + +static void vblur(uint8_t *dst, int dst_linesize, const uint8_t *src, int src_linesize, + int w, int h, int radius, int power, uint8_t *temp[2]) +{ + int x; + + if (radius == 0 && dst == src) + return; + + for (x = 0; x < w; x++) + blur_power(dst + x, dst_linesize, src + x, src_linesize, + h, radius, power, temp); +} + +static void draw_slice(AVFilterLink *inlink, int y0, int h0, int slice_dir) +{ + AVFilterContext *ctx = inlink->dst; + BoxBlurContext *boxblur = ctx->priv; + AVFilterLink *outlink = inlink->dst->outputs[0]; + AVFilterBufferRef *inpicref = inlink ->cur_buf; + AVFilterBufferRef *outpicref = outlink->out_buf; + int plane; + int cw = inlink->w >> boxblur->hsub, ch = h0 >> boxblur->vsub; + int w[4] = { inlink->w, cw, cw, inlink->w }; + int h[4] = { h0, ch, ch, h0 }; + uint8_t *dst[4], *src[4]; + + for (plane = 0; inpicref->data[plane] && plane < 4; plane++) { + int y = plane == 1 || plane == 2 ? y0 >> boxblur->vsub : y0; + src[plane] = inpicref ->data[plane] + inpicref ->linesize[plane] * y; + dst[plane] = outpicref->data[plane] + outpicref->linesize[plane] * y; + } + + for (plane = 0; inpicref->data[plane] && plane < 4; plane++) + hblur(outpicref->data[plane], outpicref->linesize[plane], + inpicref ->data[plane], inpicref ->linesize[plane], + w[plane], h[plane], boxblur->radius[plane], boxblur->power[plane], + boxblur->temp); + + for (plane = 0; inpicref->data[plane] && plane < 4; plane++) + vblur(outpicref->data[plane], outpicref->linesize[plane], + outpicref->data[plane], outpicref->linesize[plane], + w[plane], h[plane], boxblur->radius[plane], boxblur->power[plane], + boxblur->temp); + + avfilter_draw_slice(outlink, y0, h0, slice_dir); +} + +AVFilter avfilter_vf_boxblur = { + .name = "boxblur", + .description = NULL_IF_CONFIG_SMALL("Blur the input."), + .priv_size = sizeof(BoxBlurContext), + .init = init, + .uninit = uninit, + .query_formats = query_formats, + + .inputs = (AVFilterPad[]) {{ .name = "default", + .type = AVMEDIA_TYPE_VIDEO, + .config_props = config_input, + .draw_slice = draw_slice, + .min_perms = AV_PERM_READ }, + { .name = NULL}}, + .outputs = (AVFilterPad[]) {{ .name = "default", + .type = AVMEDIA_TYPE_VIDEO, }, + { .name = NULL}}, +}; diff --git a/libavfilter/vf_crop.c b/libavfilter/vf_crop.c index 531b8de658..fb084041ab 100644 --- a/libavfilter/vf_crop.c +++ b/libavfilter/vf_crop.c @@ -30,6 +30,7 @@ #include "libavutil/avstring.h" #include "libavutil/libm.h" #include "libavutil/imgutils.h" +#include "libavutil/mathematics.h" static const char *var_names[] = { "E", @@ -39,6 +40,11 @@ static const char *var_names[] = { "in_h", "ih", ///< height of the input video "out_w", "ow", ///< width of the cropped video "out_h", "oh", ///< height of the cropped video + "a", + "sar", + "dar", + "hsub", + "vsub", "x", "y", "n", ///< number of frame @@ -55,6 +61,11 @@ enum var_name { VAR_IN_H, VAR_IH, VAR_OUT_W, VAR_OW, VAR_OUT_H, VAR_OH, + VAR_A, + VAR_SAR, + VAR_DAR, + VAR_HSUB, + VAR_VSUB, VAR_X, VAR_Y, VAR_N, @@ -161,6 +172,11 @@ static int config_input(AVFilterLink *link) crop->var_values[VAR_PI] = M_PI; crop->var_values[VAR_IN_W] = crop->var_values[VAR_IW] = ctx->inputs[0]->w; crop->var_values[VAR_IN_H] = crop->var_values[VAR_IH] = ctx->inputs[0]->h; + crop->var_values[VAR_A] = (float) link->w / link->h; + crop->var_values[VAR_SAR] = link->sample_aspect_ratio.num ? av_q2d(link->sample_aspect_ratio) : 1; + crop->var_values[VAR_DAR] = crop->var_values[VAR_A] * crop->var_values[VAR_SAR]; + crop->var_values[VAR_HSUB] = 1<<pix_desc->log2_chroma_w; + crop->var_values[VAR_VSUB] = 1<<pix_desc->log2_chroma_h; crop->var_values[VAR_X] = NAN; crop->var_values[VAR_Y] = NAN; crop->var_values[VAR_OUT_W] = crop->var_values[VAR_OW] = NAN; diff --git a/libavfilter/vf_drawtext.c b/libavfilter/vf_drawtext.c index f495f68424..7d8372b886 100644 --- a/libavfilter/vf_drawtext.c +++ b/libavfilter/vf_drawtext.c @@ -424,15 +424,10 @@ static inline int draw_glyph_yuv(AVFilterBufferRef *picref, FT_Bitmap *bitmap, u { int r, c, alpha; unsigned int luma_pos, chroma_pos1, chroma_pos2; - uint8_t src_val, dst_pixel[4]; + uint8_t src_val; for (r = 0; r < bitmap->rows && r+y < height; r++) { for (c = 0; c < bitmap->width && c+x < width; c++) { - /* get pixel in the picref (destination) */ - dst_pixel[0] = picref->data[0][ c+x + (y+r) * picref->linesize[0]]; - dst_pixel[1] = picref->data[1][((c+x) >> hsub) + ((y+r) >> vsub) * picref->linesize[1]]; - dst_pixel[2] = picref->data[2][((c+x) >> hsub) + ((y+r) >> vsub) * picref->linesize[2]]; - /* get intensity value in the glyph bitmap (source) */ src_val = GET_BITMAP_VAL(r, c); if (!src_val) @@ -460,18 +455,10 @@ static inline int draw_glyph_rgb(AVFilterBufferRef *picref, FT_Bitmap *bitmap, { int r, c, alpha; uint8_t *p; - uint8_t src_val, dst_pixel[4]; + uint8_t src_val; for (r = 0; r < bitmap->rows && r+y < height; r++) { for (c = 0; c < bitmap->width && c+x < width; c++) { - /* get pixel in the picref (destination) */ - dst_pixel[0] = picref->data[0][(c+x + rgba_map[0]) * pixel_step + - (y+r) * picref->linesize[0]]; - dst_pixel[1] = picref->data[0][(c+x + rgba_map[1]) * pixel_step + - (y+r) * picref->linesize[0]]; - dst_pixel[2] = picref->data[0][(c+x + rgba_map[2]) * pixel_step + - (y+r) * picref->linesize[0]]; - /* get intensity value in the glyph bitmap (source) */ src_val = GET_BITMAP_VAL(r, c); if (!src_val) diff --git a/libavfilter/vf_frei0r.c b/libavfilter/vf_frei0r.c index ab1957089e..f5b7abb543 100644 --- a/libavfilter/vf_frei0r.c +++ b/libavfilter/vf_frei0r.c @@ -28,6 +28,7 @@ #include <frei0r.h> #include "libavutil/avstring.h" #include "libavutil/imgutils.h" +#include "libavutil/mathematics.h" #include "libavutil/parseutils.h" #include "avfilter.h" diff --git a/libavfilter/vf_lut.c b/libavfilter/vf_lut.c index c457972474..bc74a9d489 100644 --- a/libavfilter/vf_lut.c +++ b/libavfilter/vf_lut.c @@ -28,6 +28,7 @@ #include "libavutil/opt.h" #include "libavutil/pixdesc.h" #include "avfilter.h" +#include "internal.h" static const char *var_names[] = { "E", @@ -165,16 +166,6 @@ static int query_formats(AVFilterContext *ctx) return 0; } -static int pix_fmt_is_in(enum PixelFormat pix_fmt, enum PixelFormat *pix_fmts) -{ - enum PixelFormat *p; - for (p = pix_fmts; *p != PIX_FMT_NONE; p++) { - if (pix_fmt == *p) - return 1; - } - return 0; -} - /** * Clip value val in the minval - maxval range. */ @@ -238,6 +229,7 @@ static int config_props(AVFilterLink *inlink) min[Y] = min[U] = min[V] = 16; max[Y] = 235; max[U] = max[V] = 240; + min[A] = 0; max[A] = 255; break; default: min[0] = min[1] = min[2] = min[3] = 0; @@ -245,8 +237,8 @@ static int config_props(AVFilterLink *inlink) } lut->is_yuv = lut->is_rgb = 0; - if (pix_fmt_is_in(inlink->format, yuv_pix_fmts)) lut->is_yuv = 1; - else if (pix_fmt_is_in(inlink->format, rgb_pix_fmts)) lut->is_rgb = 1; + if (ff_fmt_is_in(inlink->format, yuv_pix_fmts)) lut->is_yuv = 1; + else if (ff_fmt_is_in(inlink->format, rgb_pix_fmts)) lut->is_rgb = 1; if (lut->is_rgb) { switch (inlink->format) { @@ -306,25 +298,29 @@ static void draw_slice(AVFilterLink *inlink, int y, int h, int slice_dir) AVFilterLink *outlink = ctx->outputs[0]; AVFilterBufferRef *inpic = inlink ->cur_buf; AVFilterBufferRef *outpic = outlink->out_buf; - uint8_t *inrow, *outrow; + uint8_t *inrow, *outrow, *inrow0, *outrow0; int i, j, k, plane; if (lut->is_rgb) { /* packed */ - inrow = inpic ->data[0] + y * inpic ->linesize[0]; - outrow = outpic->data[0] + y * outpic->linesize[0]; + inrow0 = inpic ->data[0] + y * inpic ->linesize[0]; + outrow0 = outpic->data[0] + y * outpic->linesize[0]; for (i = 0; i < h; i ++) { + inrow = inrow0; + outrow = outrow0; for (j = 0; j < inlink->w; j++) { for (k = 0; k < lut->step; k++) outrow[k] = lut->lut[lut->rgba_map[k]][inrow[k]]; outrow += lut->step; inrow += lut->step; } + inrow0 += inpic ->linesize[0]; + outrow0 += outpic->linesize[0]; } } else { /* planar */ - for (plane = 0; inpic->data[plane]; plane++) { + for (plane = 0; plane < 4 && inpic->data[plane]; plane++) { int vsub = plane == 1 || plane == 2 ? lut->vsub : 0; int hsub = plane == 1 || plane == 2 ? lut->hsub : 0; @@ -345,8 +341,8 @@ static void draw_slice(AVFilterLink *inlink, int y, int h, int slice_dir) #define DEFINE_LUT_FILTER(name_, description_, init_) \ AVFilter avfilter_vf_##name_ = { \ - .name = NULL_IF_CONFIG_SMALL(#name_), \ - .description = description_, \ + .name = #name_, \ + .description = NULL_IF_CONFIG_SMALL(description_), \ .priv_size = sizeof(LutContext), \ \ .init = init_, \ diff --git a/libavfilter/vf_mp.c b/libavfilter/vf_mp.c index 36616b9c94..86f6244863 100644 --- a/libavfilter/vf_mp.c +++ b/libavfilter/vf_mp.c @@ -41,6 +41,7 @@ //FIXME maybe link the orig in +//XXX: identical pix_fmt must be following with each others static const struct { int fmt; enum PixelFormat pix_fmt; @@ -135,7 +136,6 @@ extern const vf_info_t vf_info_palette; extern const vf_info_t vf_info_lavc; extern const vf_info_t vf_info_zrmjpeg; extern const vf_info_t vf_info_dvbscale; -extern const vf_info_t vf_info_cropdetect; extern const vf_info_t vf_info_test; extern const vf_info_t vf_info_noise; extern const vf_info_t vf_info_yvu9; @@ -151,7 +151,6 @@ extern const vf_info_t vf_info_unsharp; extern const vf_info_t vf_info_swapuv; extern const vf_info_t vf_info_il; extern const vf_info_t vf_info_fil; -extern const vf_info_t vf_info_boxblur; extern const vf_info_t vf_info_sab; extern const vf_info_t vf_info_smartblur; extern const vf_info_t vf_info_perspective; @@ -181,7 +180,6 @@ extern const vf_info_t vf_info_fspp; extern const vf_info_t vf_info_pp7; extern const vf_info_t vf_info_yuvcsp; extern const vf_info_t vf_info_kerndeint; -extern const vf_info_t vf_info_rgbtest; extern const vf_info_t vf_info_qp; extern const vf_info_t vf_info_phase; extern const vf_info_t vf_info_divtc; @@ -191,7 +189,6 @@ extern const vf_info_t vf_info_screenshot; extern const vf_info_t vf_info_ass; extern const vf_info_t vf_info_mcdeint; extern const vf_info_t vf_info_yadif; -extern const vf_info_t vf_info_blackframe; extern const vf_info_t vf_info_geq; extern const vf_info_t vf_info_ow; extern const vf_info_t vf_info_fixpts; @@ -200,9 +197,6 @@ extern const vf_info_t vf_info_stereo3d; static const vf_info_t* const filters[]={ &vf_info_2xsai, - &vf_info_blackframe, - &vf_info_boxblur, - &vf_info_cropdetect, &vf_info_decimate, &vf_info_delogo, &vf_info_denoise3d, @@ -240,7 +234,6 @@ static const vf_info_t* const filters[]={ &vf_info_qp, &vf_info_rectangle, &vf_info_remove_logo, - &vf_info_rgbtest, &vf_info_rotate, &vf_info_sab, &vf_info_screenshot, @@ -785,13 +778,17 @@ static int query_formats(AVFilterContext *ctx) { AVFilterFormats *avfmts=NULL; MPContext *m = ctx->priv; + enum PixelFormat lastpixfmt = PIX_FMT_NONE; int i; for(i=0; conversion_map[i].fmt; i++){ av_log(ctx, AV_LOG_DEBUG, "query: %X\n", conversion_map[i].fmt); if(m->vf.query_format(&m->vf, conversion_map[i].fmt)){ av_log(ctx, AV_LOG_DEBUG, "supported,adding\n"); - avfilter_add_format(&avfmts, conversion_map[i].pix_fmt); + if (conversion_map[i].pix_fmt != lastpixfmt) { + avfilter_add_format(&avfmts, conversion_map[i].pix_fmt); + lastpixfmt = conversion_map[i].pix_fmt; + } } } diff --git a/libavfilter/vf_overlay.c b/libavfilter/vf_overlay.c index 0eb24b9682..d473c1b62b 100644 --- a/libavfilter/vf_overlay.c +++ b/libavfilter/vf_overlay.c @@ -30,6 +30,7 @@ #include "libavutil/avstring.h" #include "libavutil/pixdesc.h" #include "libavutil/imgutils.h" +#include "libavutil/mathematics.h" #include "internal.h" static const char *var_names[] = { diff --git a/libavfilter/vf_pad.c b/libavfilter/vf_pad.c index 0ca5bd08fc..c4edc23cc5 100644 --- a/libavfilter/vf_pad.c +++ b/libavfilter/vf_pad.c @@ -32,6 +32,7 @@ #include "libavutil/avassert.h" #include "libavutil/imgutils.h" #include "libavutil/parseutils.h" +#include "libavutil/mathematics.h" #include "drawutils.h" static const char *var_names[] = { @@ -45,6 +46,8 @@ static const char *var_names[] = { "x", "y", "a", + "sar", + "dar", "hsub", "vsub", NULL @@ -61,6 +64,8 @@ enum var_name { VAR_X, VAR_Y, VAR_A, + VAR_SAR, + VAR_DAR, VAR_HSUB, VAR_VSUB, VARS_NB @@ -156,8 +161,11 @@ static int config_input(AVFilterLink *inlink) var_values[VAR_OUT_W] = var_values[VAR_OW] = NAN; var_values[VAR_OUT_H] = var_values[VAR_OH] = NAN; var_values[VAR_A] = (float) inlink->w / inlink->h; + var_values[VAR_SAR] = inlink->sample_aspect_ratio.num ? + (float) inlink->sample_aspect_ratio.num / inlink->sample_aspect_ratio.den : 1; + var_values[VAR_DAR] = var_values[VAR_A] * var_values[VAR_SAR]; var_values[VAR_HSUB] = 1<<pad->hsub; - var_values[VAR_VSUB] = 2<<pad->vsub; + var_values[VAR_VSUB] = 1<<pad->vsub; /* evaluate width and height */ av_expr_parse_and_eval(&res, (expr = pad->w_expr), diff --git a/libavfilter/vf_scale.c b/libavfilter/vf_scale.c index e172a2e586..044b2843bc 100644 --- a/libavfilter/vf_scale.c +++ b/libavfilter/vf_scale.c @@ -26,6 +26,7 @@ #include "avfilter.h" #include "libavutil/avstring.h" #include "libavutil/eval.h" +#include "libavutil/mathematics.h" #include "libavutil/pixdesc.h" #include "libavutil/avassert.h" #include "libswscale/swscale.h" @@ -39,6 +40,8 @@ static const char *var_names[] = { "out_w", "ow", "out_h", "oh", "a", + "sar", + "dar", "hsub", "vsub", NULL @@ -53,6 +56,8 @@ enum var_name { VAR_OUT_W, VAR_OW, VAR_OUT_H, VAR_OH, VAR_A, + VAR_SAR, + VAR_DAR, VAR_HSUB, VAR_VSUB, VARS_NB @@ -158,6 +163,9 @@ static int config_props(AVFilterLink *outlink) var_values[VAR_OUT_W] = var_values[VAR_OW] = NAN; var_values[VAR_OUT_H] = var_values[VAR_OH] = NAN; var_values[VAR_A] = (float) inlink->w / inlink->h; + var_values[VAR_SAR] = inlink->sample_aspect_ratio.num ? + (float) inlink->sample_aspect_ratio.num / inlink->sample_aspect_ratio.den : 1; + var_values[VAR_DAR] = var_values[VAR_A] * var_values[VAR_SAR]; var_values[VAR_HSUB] = 1<<av_pix_fmt_descriptors[inlink->format].log2_chroma_w; var_values[VAR_VSUB] = 1<<av_pix_fmt_descriptors[inlink->format].log2_chroma_h; diff --git a/libavfilter/vf_setpts.c b/libavfilter/vf_setpts.c index 31fa1ef65f..ace2595346 100644 --- a/libavfilter/vf_setpts.c +++ b/libavfilter/vf_setpts.c @@ -27,6 +27,7 @@ /* #define DEBUG */ #include "libavutil/eval.h" +#include "libavutil/mathematics.h" #include "avfilter.h" static const char *var_names[] = { diff --git a/libavfilter/vf_settb.c b/libavfilter/vf_settb.c index 3e48ac4191..d0ac35c8f2 100644 --- a/libavfilter/vf_settb.c +++ b/libavfilter/vf_settb.c @@ -25,6 +25,7 @@ #include "libavutil/avstring.h" #include "libavutil/eval.h" +#include "libavutil/mathematics.h" #include "libavutil/rational.h" #include "avfilter.h" #include "internal.h" diff --git a/libavfilter/vf_transpose.c b/libavfilter/vf_transpose.c index 8b11ae8d53..f4f72b9c64 100644 --- a/libavfilter/vf_transpose.c +++ b/libavfilter/vf_transpose.c @@ -195,6 +195,8 @@ static void end_frame(AVFilterLink *inlink) avfilter_unref_buffer(outpic); } +static void null_draw_slice(AVFilterLink *link, int y, int h, int slice_dir) { } + AVFilter avfilter_vf_transpose = { .name = "transpose", .description = NULL_IF_CONFIG_SMALL("Transpose input video."), @@ -207,6 +209,7 @@ AVFilter avfilter_vf_transpose = { .inputs = (AVFilterPad[]) {{ .name = "default", .type = AVMEDIA_TYPE_VIDEO, .start_frame = start_frame, + .draw_slice = null_draw_slice, .end_frame = end_frame, .min_perms = AV_PERM_READ, }, { .name = NULL}}, diff --git a/libavfilter/vf_yadif.c b/libavfilter/vf_yadif.c index 296328b71a..13186e79ad 100644 --- a/libavfilter/vf_yadif.c +++ b/libavfilter/vf_yadif.c @@ -36,14 +36,20 @@ typedef struct { int mode; /** - * 0: bottom field first - * 1: top field first + * 0: top field first + * 1: bottom field first * -1: auto-detection */ int parity; int frame_pending; + /** + * 0: deinterlace all frames + * 1: only deinterlace frames marked as interlaced + */ + int auto_enable; + AVFilterBufferRef *cur; AVFilterBufferRef *next; AVFilterBufferRef *prev; @@ -195,9 +201,12 @@ static void return_frame(AVFilterContext *ctx, int is_second) tff = yadif->parity^1; } - if (is_second) + if (is_second) { yadif->out = avfilter_get_video_buffer(link, AV_PERM_WRITE | AV_PERM_PRESERVE | AV_PERM_REUSE, link->w, link->h); + avfilter_copy_buffer_ref_props(yadif->out, yadif->cur); + yadif->out->video->interlaced = 0; + } if (!yadif->csp) yadif->csp = &av_pix_fmt_descriptors[link->format]; @@ -240,6 +249,14 @@ static void start_frame(AVFilterLink *link, AVFilterBufferRef *picref) if (!yadif->cur) return; + if (yadif->auto_enable && !yadif->cur->video->interlaced) { + yadif->out = avfilter_ref_buffer(yadif->cur, AV_PERM_READ); + avfilter_unref_buffer(yadif->prev); + yadif->prev = NULL; + avfilter_start_frame(ctx->outputs[0], yadif->out); + return; + } + if (!yadif->prev) yadif->prev = avfilter_ref_buffer(yadif->cur, AV_PERM_READ); @@ -259,6 +276,12 @@ static void end_frame(AVFilterLink *link) if (!yadif->out) return; + if (yadif->auto_enable && !yadif->cur->video->interlaced) { + avfilter_draw_slice(ctx->outputs[0], 0, link->h, 1); + avfilter_end_frame(ctx->outputs[0]); + return; + } + return_frame(ctx, 0); } @@ -299,6 +322,9 @@ static int poll_frame(AVFilterLink *link) } assert(yadif->next || !val); + if (yadif->auto_enable && yadif->next && !yadif->next->video->interlaced) + return val; + return val * ((yadif->mode&1)+1); } @@ -344,9 +370,10 @@ static av_cold int init(AVFilterContext *ctx, const char *args, void *opaque) yadif->mode = 0; yadif->parity = -1; + yadif->auto_enable = 0; yadif->csp = NULL; - if (args) sscanf(args, "%d:%d", &yadif->mode, &yadif->parity); + if (args) sscanf(args, "%d:%d:%d", &yadif->mode, &yadif->parity, &yadif->auto_enable); yadif->filter_line = filter_line_c; if (HAVE_SSSE3 && cpu_flags & AV_CPU_FLAG_SSSE3) @@ -356,7 +383,7 @@ static av_cold int init(AVFilterContext *ctx, const char *args, void *opaque) else if (HAVE_MMX && cpu_flags & AV_CPU_FLAG_MMX) yadif->filter_line = ff_yadif_filter_line_mmx; - av_log(ctx, AV_LOG_INFO, "mode:%d parity:%d\n", yadif->mode, yadif->parity); + av_log(ctx, AV_LOG_INFO, "mode:%d parity:%d auto_enable:%d\n", yadif->mode, yadif->parity, yadif->auto_enable); return 0; } diff --git a/libavfilter/vsink_buffer.c b/libavfilter/vsink_buffer.c index b5627b4f82..f8140b5e5a 100644 --- a/libavfilter/vsink_buffer.c +++ b/libavfilter/vsink_buffer.c @@ -96,6 +96,7 @@ int av_vsink_buffer_get_video_buffer_ref(AVFilterContext *ctx, AVFilter avfilter_vsink_buffersink = { .name = "buffersink", + .description = NULL_IF_CONFIG_SMALL("Buffer video frames, and make them available to the end of the filter graph."), .priv_size = sizeof(BufferSinkContext), .init = init, .uninit = uninit, diff --git a/libavfilter/vsrc_color.c b/libavfilter/vsrc_color.c index 00bfb66dee..b854ff2e4c 100644 --- a/libavfilter/vsrc_color.c +++ b/libavfilter/vsrc_color.c @@ -27,6 +27,7 @@ #include "libavutil/pixdesc.h" #include "libavutil/colorspace.h" #include "libavutil/imgutils.h" +#include "libavutil/mathematics.h" #include "libavutil/parseutils.h" #include "drawutils.h" diff --git a/libavfilter/vsrc_movie.c b/libavfilter/vsrc_movie.c index eea2660cea..f9818d1694 100644 --- a/libavfilter/vsrc_movie.c +++ b/libavfilter/vsrc_movie.c @@ -97,7 +97,7 @@ static int movie_init(AVFilterContext *ctx) "Failed to avformat_open_input '%s'\n", movie->file_name); return ret; } - if ((ret = av_find_stream_info(movie->format_ctx)) < 0) + if ((ret = avformat_find_stream_info(movie->format_ctx, NULL)) < 0) av_log(ctx, AV_LOG_WARNING, "Failed to find stream info\n"); // if seeking requested, we execute it @@ -140,7 +140,7 @@ static int movie_init(AVFilterContext *ctx) return AVERROR(EINVAL); } - if ((ret = avcodec_open(movie->codec_ctx, codec)) < 0) { + if ((ret = avcodec_open2(movie->codec_ctx, codec, NULL)) < 0) { av_log(ctx, AV_LOG_ERROR, "Failed to open codec\n"); return ret; } diff --git a/libavfilter/vsrc_nullsrc.c b/libavfilter/vsrc_nullsrc.c index 487aa50069..c513be800d 100644 --- a/libavfilter/vsrc_nullsrc.c +++ b/libavfilter/vsrc_nullsrc.c @@ -23,6 +23,7 @@ #include "libavutil/avstring.h" #include "libavutil/eval.h" +#include "libavutil/mathematics.h" #include "libavutil/parseutils.h" #include "avfilter.h" diff --git a/libavfilter/vsrc_testsrc.c b/libavfilter/vsrc_testsrc.c new file mode 100644 index 0000000000..e2cf4d47ce --- /dev/null +++ b/libavfilter/vsrc_testsrc.c @@ -0,0 +1,488 @@ +/* + * Copyright (c) 2007 Nicolas George <nicolas.george@normalesup.org> + * Copyright (c) 2011 Stefano Sabatini + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/** + * @file + * Misc test sources. + * + * testsrc is based on the test pattern generator demuxer by Nicolas George: + * http://lists.mplayerhq.hu/pipermail/ffmpeg-devel/2007-October/037845.html + * + * rgbtestsrc is ported from MPlayer libmpcodecs/vf_rgbtest.c by + * Michael Niedermayer. + */ + +#include <float.h> + +#include "libavutil/opt.h" +#include "libavutil/intreadwrite.h" +#include "libavutil/parseutils.h" +#include "avfilter.h" + +typedef struct { + const AVClass *class; + int h, w; + unsigned int nb_frame; + AVRational time_base; + int64_t pts, max_pts; + char *size; ///< video frame size + char *rate; ///< video frame rate + char *duration; ///< total duration of the generated video + void (* fill_picture_fn)(AVFilterContext *ctx, AVFilterBufferRef *picref); + + /* only used by rgbtest */ + int rgba_map[4]; +} TestSourceContext; + +#define OFFSET(x) offsetof(TestSourceContext, x) + +static const AVOption testsrc_options[]= { + { "size", "set video size", OFFSET(size), FF_OPT_TYPE_STRING, {.str = "320x240"}, 0, 0 }, + { "s", "set video size", OFFSET(size), FF_OPT_TYPE_STRING, {.str = "320x240"}, 0, 0 }, + { "rate", "set video rate", OFFSET(rate), FF_OPT_TYPE_STRING, {.str = "25"}, 0, 0 }, + { "r", "set video rate", OFFSET(rate), FF_OPT_TYPE_STRING, {.str = "25"}, 0, 0 }, + { "duration", "set video duration", OFFSET(duration), FF_OPT_TYPE_STRING, {.str = NULL}, 0, 0 }, + { NULL }, +}; + +static av_cold int init(AVFilterContext *ctx, const char *args, void *opaque) +{ + TestSourceContext *test = ctx->priv; + AVRational frame_rate_q; + int64_t duration = -1; + int ret = 0; + + av_opt_set_defaults2(test, 0, 0); + + if ((ret = (av_set_options_string(test, args, "=", ":"))) < 0) { + av_log(ctx, AV_LOG_ERROR, "Error parsing options string: '%s'\n", args); + return ret; + } + + if ((ret = av_parse_video_size(&test->w, &test->h, test->size)) < 0) { + av_log(ctx, AV_LOG_ERROR, "Invalid frame size: '%s'\n", test->size); + return ret; + } + + if ((ret = av_parse_video_rate(&frame_rate_q, test->rate)) < 0 || + frame_rate_q.den <= 0 || frame_rate_q.num <= 0) { + av_log(ctx, AV_LOG_ERROR, "Invalid frame rate: '%s'\n", test->rate); + return ret; + } + + if ((test->duration) && (ret = av_parse_time(&duration, test->duration, 1)) < 0) { + av_log(ctx, AV_LOG_ERROR, "Invalid duration: '%s'\n", test->duration); + return ret; + } + + test->time_base.num = frame_rate_q.den; + test->time_base.den = frame_rate_q.num; + test->max_pts = duration >= 0 ? + av_rescale_q(duration, AV_TIME_BASE_Q, test->time_base) : -1; + test->nb_frame = 0; + test->pts = 0; + + av_log(ctx, AV_LOG_INFO, "size:%dx%d rate:%d/%d duration:%f\n", + test->w, test->h, frame_rate_q.num, frame_rate_q.den, + duration < 0 ? -1 : test->max_pts * av_q2d(test->time_base)); + return 0; +} + +static int config_props(AVFilterLink *outlink) +{ + TestSourceContext *test = outlink->src->priv; + + outlink->w = test->w; + outlink->h = test->h; + outlink->time_base = test->time_base; + + return 0; +} + +static int request_frame(AVFilterLink *outlink) +{ + TestSourceContext *test = outlink->src->priv; + AVFilterBufferRef *picref; + + if (test->max_pts >= 0 && test->pts > test->max_pts) + return AVERROR_EOF; + picref = avfilter_get_video_buffer(outlink, AV_PERM_WRITE, + test->w, test->h); + picref->pts = test->pts++; + test->nb_frame++; + test->fill_picture_fn(outlink->src, picref); + + avfilter_start_frame(outlink, avfilter_ref_buffer(picref, ~0)); + avfilter_draw_slice(outlink, 0, picref->video->h, 1); + avfilter_end_frame(outlink); + avfilter_unref_buffer(picref); + + return 0; +} + +#if CONFIG_TESTSRC_FILTER + +static const char *testsrc_get_name(void *ctx) +{ + return "testsrc"; +} + +static const AVClass testsrc_class = { + "TestSourceContext", + testsrc_get_name, + testsrc_options +}; + +/** + * Fill a rectangle with value val. + * + * @param val the RGB value to set + * @param dst pointer to the destination buffer to fill + * @param dst_linesize linesize of destination + * @param segment_width width of the segment + * @param x horizontal coordinate where to draw the rectangle in the destination buffer + * @param y horizontal coordinate where to draw the rectangle in the destination buffer + * @param w width of the rectangle to draw, expressed as a number of segment_width units + * @param h height of the rectangle to draw, expressed as a number of segment_width units + */ +static void draw_rectangle(unsigned val, uint8_t *dst, int dst_linesize, unsigned segment_width, + unsigned x, unsigned y, unsigned w, unsigned h) +{ + int i; + int step = 3; + + dst += segment_width * (step * x + y * dst_linesize); + w *= segment_width * step; + h *= segment_width; + for (i = 0; i < h; i++) { + memset(dst, val, w); + dst += dst_linesize; + } +} + +static void draw_digit(int digit, uint8_t *dst, unsigned dst_linesize, + unsigned segment_width) +{ +#define TOP_HBAR 1 +#define MID_HBAR 2 +#define BOT_HBAR 4 +#define LEFT_TOP_VBAR 8 +#define LEFT_BOT_VBAR 16 +#define RIGHT_TOP_VBAR 32 +#define RIGHT_BOT_VBAR 64 + struct { + int x, y, w, h; + } segments[] = { + { 1, 0, 5, 1 }, /* TOP_HBAR */ + { 1, 6, 5, 1 }, /* MID_HBAR */ + { 1, 12, 5, 1 }, /* BOT_HBAR */ + { 0, 1, 1, 5 }, /* LEFT_TOP_VBAR */ + { 0, 7, 1, 5 }, /* LEFT_BOT_VBAR */ + { 6, 1, 1, 5 }, /* RIGHT_TOP_VBAR */ + { 6, 7, 1, 5 } /* RIGHT_BOT_VBAR */ + }; + static const unsigned char masks[10] = { + /* 0 */ TOP_HBAR |BOT_HBAR|LEFT_TOP_VBAR|LEFT_BOT_VBAR|RIGHT_TOP_VBAR|RIGHT_BOT_VBAR, + /* 1 */ RIGHT_TOP_VBAR|RIGHT_BOT_VBAR, + /* 2 */ TOP_HBAR|MID_HBAR|BOT_HBAR|LEFT_BOT_VBAR |RIGHT_TOP_VBAR, + /* 3 */ TOP_HBAR|MID_HBAR|BOT_HBAR |RIGHT_TOP_VBAR|RIGHT_BOT_VBAR, + /* 4 */ MID_HBAR |LEFT_TOP_VBAR |RIGHT_TOP_VBAR|RIGHT_BOT_VBAR, + /* 5 */ TOP_HBAR|BOT_HBAR|MID_HBAR|LEFT_TOP_VBAR |RIGHT_BOT_VBAR, + /* 6 */ TOP_HBAR|BOT_HBAR|MID_HBAR|LEFT_TOP_VBAR|LEFT_BOT_VBAR |RIGHT_BOT_VBAR, + /* 7 */ TOP_HBAR |RIGHT_TOP_VBAR|RIGHT_BOT_VBAR, + /* 8 */ TOP_HBAR|BOT_HBAR|MID_HBAR|LEFT_TOP_VBAR|LEFT_BOT_VBAR|RIGHT_TOP_VBAR|RIGHT_BOT_VBAR, + /* 9 */ TOP_HBAR|BOT_HBAR|MID_HBAR|LEFT_TOP_VBAR |RIGHT_TOP_VBAR|RIGHT_BOT_VBAR, + }; + unsigned mask = masks[digit]; + int i; + + draw_rectangle(0, dst, dst_linesize, segment_width, 0, 0, 8, 13); + for (i = 0; i < FF_ARRAY_ELEMS(segments); i++) + if (mask & (1<<i)) + draw_rectangle(255, dst, dst_linesize, segment_width, + segments[i].x, segments[i].y, segments[i].w, segments[i].h); +} + +#define GRADIENT_SIZE (6 * 256) + +static void test_fill_picture(AVFilterContext *ctx, AVFilterBufferRef *picref) +{ + TestSourceContext *test = ctx->priv; + uint8_t *p, *p0; + int x, y; + int color, color_rest; + int icolor; + int radius; + int quad0, quad; + int dquad_x, dquad_y; + int grad, dgrad, rgrad, drgrad; + int seg_size; + int second; + int i; + uint8_t *data = picref->data[0]; + int width = picref->video->w; + int height = picref->video->h; + + /* draw colored bars and circle */ + radius = (width + height) / 4; + quad0 = width * width / 4 + height * height / 4 - radius * radius; + dquad_y = 1 - height; + p0 = data; + for (y = 0; y < height; y++) { + p = p0; + color = 0; + color_rest = 0; + quad = quad0; + dquad_x = 1 - width; + for (x = 0; x < width; x++) { + icolor = color; + if (quad < 0) + icolor ^= 7; + quad += dquad_x; + dquad_x += 2; + *(p++) = icolor & 1 ? 255 : 0; + *(p++) = icolor & 2 ? 255 : 0; + *(p++) = icolor & 4 ? 255 : 0; + color_rest += 8; + if (color_rest >= width) { + color_rest -= width; + color++; + } + } + quad0 += dquad_y; + dquad_y += 2; + p0 += picref->linesize[0]; + } + + /* draw sliding color line */ + p = data + picref->linesize[0] * height * 3/4; + grad = (256 * test->nb_frame * test->time_base.num / test->time_base.den) % + GRADIENT_SIZE; + rgrad = 0; + dgrad = GRADIENT_SIZE / width; + drgrad = GRADIENT_SIZE % width; + for (x = 0; x < width; x++) { + *(p++) = + grad < 256 || grad >= 5 * 256 ? 255 : + grad >= 2 * 256 && grad < 4 * 256 ? 0 : + grad < 2 * 256 ? 2 * 256 - 1 - grad : grad - 4 * 256; + *(p++) = + grad >= 4 * 256 ? 0 : + grad >= 1 * 256 && grad < 3 * 256 ? 255 : + grad < 1 * 256 ? grad : 4 * 256 - 1 - grad; + *(p++) = + grad < 2 * 256 ? 0 : + grad >= 3 * 256 && grad < 5 * 256 ? 255 : + grad < 3 * 256 ? grad - 2 * 256 : 6 * 256 - 1 - grad; + grad += dgrad; + rgrad += drgrad; + if (rgrad >= GRADIENT_SIZE) { + grad++; + rgrad -= GRADIENT_SIZE; + } + if (grad >= GRADIENT_SIZE) + grad -= GRADIENT_SIZE; + } + for (y = height / 8; y > 0; y--) { + memcpy(p, p - picref->linesize[0], 3 * width); + p += picref->linesize[0]; + } + + /* draw digits */ + seg_size = width / 80; + if (seg_size >= 1 && height >= 13 * seg_size) { + second = test->nb_frame * test->time_base.num / test->time_base.den; + x = width - (width - seg_size * 64) / 2; + y = (height - seg_size * 13) / 2; + p = data + (x*3 + y * picref->linesize[0]); + for (i = 0; i < 8; i++) { + p -= 3 * 8 * seg_size; + draw_digit(second % 10, p, picref->linesize[0], seg_size); + second /= 10; + if (second == 0) + break; + } + } +} + +static av_cold int test_init(AVFilterContext *ctx, const char *args, void *opaque) +{ + TestSourceContext *test = ctx->priv; + + test->class = &testsrc_class; + test->fill_picture_fn = test_fill_picture; + return init(ctx, args, opaque); +} + +static int test_query_formats(AVFilterContext *ctx) +{ + static const enum PixelFormat pix_fmts[] = { + PIX_FMT_RGB24, PIX_FMT_NONE + }; + avfilter_set_common_pixel_formats(ctx, avfilter_make_format_list(pix_fmts)); + return 0; +} + +AVFilter avfilter_vsrc_testsrc = { + .name = "testsrc", + .description = NULL_IF_CONFIG_SMALL("Generate test pattern."), + .priv_size = sizeof(TestSourceContext), + .init = test_init, + + .query_formats = test_query_formats, + + .inputs = (AVFilterPad[]) {{ .name = NULL}}, + + .outputs = (AVFilterPad[]) {{ .name = "default", + .type = AVMEDIA_TYPE_VIDEO, + .request_frame = request_frame, + .config_props = config_props, }, + { .name = NULL }}, +}; + +#endif /* CONFIG_TESTSRC_FILTER */ + +#if CONFIG_RGBTESTSRC_FILTER + +static const char *rgbtestsrc_get_name(void *ctx) +{ + return "rgbtestsrc"; +} + +static const AVClass rgbtestsrc_class = { + "RGBTestSourceContext", + rgbtestsrc_get_name, + testsrc_options +}; + +#define R 0 +#define G 1 +#define B 2 +#define A 3 + +static void rgbtest_put_pixel(uint8_t *dst, int dst_linesize, + int x, int y, int r, int g, int b, enum PixelFormat fmt, + int rgba_map[4]) +{ + int32_t v; + uint8_t *p; + + switch (fmt) { + case PIX_FMT_BGR444: ((uint16_t*)(dst + y*dst_linesize))[x] = ((r >> 4) << 8) | ((g >> 4) << 4) | (b >> 4); break; + case PIX_FMT_RGB444: ((uint16_t*)(dst + y*dst_linesize))[x] = ((b >> 4) << 8) | ((g >> 4) << 4) | (r >> 4); break; + case PIX_FMT_BGR555: ((uint16_t*)(dst + y*dst_linesize))[x] = ((r>>3)<<10) | ((g>>3)<<5) | (b>>3); break; + case PIX_FMT_RGB555: ((uint16_t*)(dst + y*dst_linesize))[x] = ((b>>3)<<10) | ((g>>3)<<5) | (r>>3); break; + case PIX_FMT_BGR565: ((uint16_t*)(dst + y*dst_linesize))[x] = ((r>>3)<<11) | ((g>>2)<<5) | (b>>3); break; + case PIX_FMT_RGB565: ((uint16_t*)(dst + y*dst_linesize))[x] = ((b>>3)<<11) | ((g>>2)<<5) | (r>>3); break; + case PIX_FMT_RGB24: + case PIX_FMT_BGR24: + v = (r << (rgba_map[R]*8)) + (g << (rgba_map[G]*8)) + (b << (rgba_map[B]*8)); + p = dst + 3*x + y*dst_linesize; + AV_WL24(p, v); + break; + case PIX_FMT_RGBA: + case PIX_FMT_BGRA: + case PIX_FMT_ARGB: + case PIX_FMT_ABGR: + v = (r << (rgba_map[R]*8)) + (g << (rgba_map[G]*8)) + (b << (rgba_map[B]*8)); + p = dst + 4*x + y*dst_linesize; + AV_WL32(p, v); + break; + } +} + +static void rgbtest_fill_picture(AVFilterContext *ctx, AVFilterBufferRef *picref) +{ + TestSourceContext *test = ctx->priv; + int x, y, w = picref->video->w, h = picref->video->h; + + for (y = 0; y < h; y++) { + for (x = 0; x < picref->video->w; x++) { + int c = 256*x/w; + int r = 0, g = 0, b = 0; + + if (3*y < h ) r = c; + else if (3*y < 2*h) g = c; + else b = c; + + rgbtest_put_pixel(picref->data[0], picref->linesize[0], x, y, r, g, b, + ctx->outputs[0]->format, test->rgba_map); + } + } +} + +static av_cold int rgbtest_init(AVFilterContext *ctx, const char *args, void *opaque) +{ + TestSourceContext *test = ctx->priv; + + test->class = &rgbtestsrc_class; + test->fill_picture_fn = rgbtest_fill_picture; + return init(ctx, args, opaque); +} + +static int rgbtest_query_formats(AVFilterContext *ctx) +{ + static const enum PixelFormat pix_fmts[] = { + PIX_FMT_RGBA, PIX_FMT_ARGB, PIX_FMT_BGRA, PIX_FMT_ABGR, + PIX_FMT_BGR24, PIX_FMT_RGB24, + PIX_FMT_RGB444, PIX_FMT_BGR444, + PIX_FMT_RGB565, PIX_FMT_BGR565, + PIX_FMT_RGB555, PIX_FMT_BGR555, + PIX_FMT_NONE + }; + avfilter_set_common_pixel_formats(ctx, avfilter_make_format_list(pix_fmts)); + return 0; +} + +static int rgbtest_config_props(AVFilterLink *outlink) +{ + TestSourceContext *test = outlink->src->priv; + + switch (outlink->format) { + case PIX_FMT_ARGB: test->rgba_map[A] = 0; test->rgba_map[R] = 1; test->rgba_map[G] = 2; test->rgba_map[B] = 3; break; + case PIX_FMT_ABGR: test->rgba_map[A] = 0; test->rgba_map[B] = 1; test->rgba_map[G] = 2; test->rgba_map[R] = 3; break; + case PIX_FMT_RGBA: + case PIX_FMT_RGB24: test->rgba_map[R] = 0; test->rgba_map[G] = 1; test->rgba_map[B] = 2; test->rgba_map[A] = 3; break; + case PIX_FMT_BGRA: + case PIX_FMT_BGR24: test->rgba_map[B] = 0; test->rgba_map[G] = 1; test->rgba_map[R] = 2; test->rgba_map[A] = 3; break; + } + + return config_props(outlink); +} + +AVFilter avfilter_vsrc_rgbtestsrc = { + .name = "rgbtestsrc", + .description = NULL_IF_CONFIG_SMALL("Generate RGB test pattern."), + .priv_size = sizeof(TestSourceContext), + .init = rgbtest_init, + + .query_formats = rgbtest_query_formats, + + .inputs = (AVFilterPad[]) {{ .name = NULL}}, + + .outputs = (AVFilterPad[]) {{ .name = "default", + .type = AVMEDIA_TYPE_VIDEO, + .request_frame = request_frame, + .config_props = rgbtest_config_props, }, + { .name = NULL }}, +}; + +#endif /* CONFIG_RGBTESTSRC_FILTER */ diff --git a/libavfilter/x86/gradfun.c b/libavfilter/x86/gradfun.c index 05d4a6fd6e..e892117d67 100644 --- a/libavfilter/x86/gradfun.c +++ b/libavfilter/x86/gradfun.c @@ -1,19 +1,21 @@ /* + * Copyright (C) 2009 Loren Merritt <lorenm@u.washignton.edu> + * * This file is part of FFmpeg. * - * FFmpeg is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. * - * You should have received a copy of the GNU General Public License along - * with FFmpeg; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "libavutil/cpu.h" diff --git a/libavformat/4xm.c b/libavformat/4xm.c index 0b79c761a5..92a001f691 100644 --- a/libavformat/4xm.c +++ b/libavformat/4xm.c @@ -28,6 +28,7 @@ */ #include "libavutil/intreadwrite.h" +#include "libavutil/intfloat_readwrite.h" #include "avformat.h" #define RIFF_TAG MKTAG('R', 'I', 'F', 'F') @@ -344,11 +345,11 @@ static int fourxm_read_close(AVFormatContext *s) } AVInputFormat ff_fourxm_demuxer = { - "4xm", - NULL_IF_CONFIG_SMALL("4X Technologies format"), - sizeof(FourxmDemuxContext), - fourxm_probe, - fourxm_read_header, - fourxm_read_packet, - fourxm_read_close, + .name = "4xm", + .long_name = NULL_IF_CONFIG_SMALL("4X Technologies format"), + .priv_data_size = sizeof(FourxmDemuxContext), + .read_probe = fourxm_probe, + .read_header = fourxm_read_header, + .read_packet = fourxm_read_packet, + .read_close = fourxm_read_close, }; diff --git a/libavformat/Makefile b/libavformat/Makefile index 05f524690b..5ced4a6129 100644 --- a/libavformat/Makefile +++ b/libavformat/Makefile @@ -339,8 +339,10 @@ OBJS-$(CONFIG_TCP_PROTOCOL) += tcp.o OBJS-$(CONFIG_UDP_PROTOCOL) += udp.o # libavdevice dependencies +OBJS-$(CONFIG_ALSA_INDEV) += timefilter.o OBJS-$(CONFIG_JACK_INDEV) += timefilter.o -TESTPROGS = timefilter +TESTPROGS = seek timefilter +TOOLS = pktdumper probetest -include $(SUBDIR)../subdir.mak +include $(SRC_PATH)/subdir.mak diff --git a/libavformat/a64.c b/libavformat/a64.c index 3481ee7d03..edab918129 100644 --- a/libavformat/a64.c +++ b/libavformat/a64.c @@ -55,7 +55,6 @@ static int a64_write_header(struct AVFormatContext *s) break; default: return AVERROR(EINVAL); - break; } avio_write(s->pb, header, 2); c->prev_pkt.size = 0; @@ -171,7 +170,7 @@ AVOutputFormat ff_a64_muxer = { .extensions = "a64, A64", .priv_data_size = sizeof (A64Context), .video_codec = CODEC_ID_A64_MULTI, - a64_write_header, - a64_write_packet, - a64_write_trailer + .write_header = a64_write_header, + .write_packet = a64_write_packet, + .write_trailer = a64_write_trailer }; diff --git a/libavformat/aacdec.c b/libavformat/aacdec.c index 4ac2cadb8f..c3a5029260 100644 --- a/libavformat/aacdec.c +++ b/libavformat/aacdec.c @@ -47,6 +47,7 @@ static int adts_aac_probe(AVProbeData *p) fsize = (AV_RB32(buf2 + 3) >> 13) & 0x1FFF; if(fsize < 7) break; + fsize = FFMIN(fsize, end - buf2); buf2 += fsize; } max_frames = FFMAX(max_frames, frames); @@ -82,12 +83,11 @@ static int adts_aac_read_header(AVFormatContext *s, } AVInputFormat ff_aac_demuxer = { - "aac", - NULL_IF_CONFIG_SMALL("raw ADTS AAC"), - 0, - adts_aac_probe, - adts_aac_read_header, - ff_raw_read_partial_packet, + .name = "aac", + .long_name = NULL_IF_CONFIG_SMALL("raw ADTS AAC"), + .read_probe = adts_aac_probe, + .read_header = adts_aac_read_header, + .read_packet = ff_raw_read_partial_packet, .flags= AVFMT_GENERIC_INDEX, .extensions = "aac", .value = CODEC_ID_AAC, diff --git a/libavformat/ac3dec.c b/libavformat/ac3dec.c index fcf99363ee..5d4d2f64c5 100644 --- a/libavformat/ac3dec.c +++ b/libavformat/ac3dec.c @@ -71,12 +71,11 @@ static int ac3_probe(AVProbeData *p) } AVInputFormat ff_ac3_demuxer = { - "ac3", - NULL_IF_CONFIG_SMALL("raw AC-3"), - 0, - ac3_probe, - ff_raw_audio_read_header, - ff_raw_read_partial_packet, + .name = "ac3", + .long_name = NULL_IF_CONFIG_SMALL("raw AC-3"), + .read_probe = ac3_probe, + .read_header = ff_raw_audio_read_header, + .read_packet = ff_raw_read_partial_packet, .flags= AVFMT_GENERIC_INDEX, .extensions = "ac3", .value = CODEC_ID_AC3, @@ -90,12 +89,11 @@ static int eac3_probe(AVProbeData *p) } AVInputFormat ff_eac3_demuxer = { - "eac3", - NULL_IF_CONFIG_SMALL("raw E-AC-3"), - 0, - eac3_probe, - ff_raw_audio_read_header, - ff_raw_read_partial_packet, + .name = "eac3", + .long_name = NULL_IF_CONFIG_SMALL("raw E-AC-3"), + .read_probe = eac3_probe, + .read_header = ff_raw_audio_read_header, + .read_packet = ff_raw_read_partial_packet, .flags= AVFMT_GENERIC_INDEX, .extensions = "eac3", .value = CODEC_ID_EAC3, diff --git a/libavformat/adtsenc.c b/libavformat/adtsenc.c index a03e128d96..5387cc855b 100644 --- a/libavformat/adtsenc.c +++ b/libavformat/adtsenc.c @@ -59,6 +59,10 @@ int ff_adts_decode_extradata(AVFormatContext *s, ADTSContext *adts, uint8_t *buf av_log(s, AV_LOG_ERROR, "Scalable configurations are not allowed in ADTS\n"); return -1; } + if (get_bits(&gb, 1)) { + av_log(s, AV_LOG_ERROR, "Extension flag is not allowed in ADTS\n"); + return -1; + } if (!adts->channel_conf) { init_put_bits(&pb, adts->pce_data, MAX_PCE_SIZE); @@ -138,13 +142,13 @@ static int adts_write_packet(AVFormatContext *s, AVPacket *pkt) } AVOutputFormat ff_adts_muxer = { - "adts", - NULL_IF_CONFIG_SMALL("ADTS AAC"), - "audio/aac", - "aac,adts", - sizeof(ADTSContext), - CODEC_ID_AAC, - CODEC_ID_NONE, - adts_write_header, - adts_write_packet, + .name = "adts", + .long_name = NULL_IF_CONFIG_SMALL("ADTS AAC"), + .mime_type = "audio/aac", + .extensions = "aac,adts", + .priv_data_size = sizeof(ADTSContext), + .audio_codec = CODEC_ID_AAC, + .video_codec = CODEC_ID_NONE, + .write_header = adts_write_header, + .write_packet = adts_write_packet, }; diff --git a/libavformat/aea.c b/libavformat/aea.c index 2a1d24d7a2..42889b9b90 100644 --- a/libavformat/aea.c +++ b/libavformat/aea.c @@ -95,14 +95,12 @@ static int aea_read_packet(AVFormatContext *s, AVPacket *pkt) } AVInputFormat ff_aea_demuxer = { - "aea", - NULL_IF_CONFIG_SMALL("MD STUDIO audio"), - 0, - aea_read_probe, - aea_read_header, - aea_read_packet, - 0, - pcm_read_seek, + .name = "aea", + .long_name = NULL_IF_CONFIG_SMALL("MD STUDIO audio"), + .read_probe = aea_read_probe, + .read_header = aea_read_header, + .read_packet = aea_read_packet, + .read_seek = pcm_read_seek, .flags= AVFMT_GENERIC_INDEX, .extensions = "aea", }; diff --git a/libavformat/aiffdec.c b/libavformat/aiffdec.c index 9608910340..97fecf643d 100644 --- a/libavformat/aiffdec.c +++ b/libavformat/aiffdec.c @@ -320,13 +320,12 @@ static int aiff_read_packet(AVFormatContext *s, } AVInputFormat ff_aiff_demuxer = { - "aiff", - NULL_IF_CONFIG_SMALL("Audio IFF"), - sizeof(AIFFInputContext), - aiff_probe, - aiff_read_header, - aiff_read_packet, - NULL, - pcm_read_seek, + .name = "aiff", + .long_name = NULL_IF_CONFIG_SMALL("Audio IFF"), + .priv_data_size = sizeof(AIFFInputContext), + .read_probe = aiff_probe, + .read_header = aiff_read_header, + .read_packet = aiff_read_packet, + .read_seek = pcm_read_seek, .codec_tag= (const AVCodecTag* const []){ff_codec_aiff_tags, 0}, }; diff --git a/libavformat/aiffenc.c b/libavformat/aiffenc.c index 34ab0cc246..3b2fc38160 100644 --- a/libavformat/aiffenc.c +++ b/libavformat/aiffenc.c @@ -19,6 +19,7 @@ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ +#include "libavutil/intfloat_readwrite.h" #include "avformat.h" #include "aiff.h" #include "avio_internal.h" @@ -154,15 +155,15 @@ static int aiff_write_trailer(AVFormatContext *s) } AVOutputFormat ff_aiff_muxer = { - "aiff", - NULL_IF_CONFIG_SMALL("Audio IFF"), - "audio/aiff", - "aif,aiff,afc,aifc", - sizeof(AIFFOutputContext), - CODEC_ID_PCM_S16BE, - CODEC_ID_NONE, - aiff_write_header, - aiff_write_packet, - aiff_write_trailer, + .name = "aiff", + .long_name = NULL_IF_CONFIG_SMALL("Audio IFF"), + .mime_type = "audio/aiff", + .extensions = "aif,aiff,afc,aifc", + .priv_data_size = sizeof(AIFFOutputContext), + .audio_codec = CODEC_ID_PCM_S16BE, + .video_codec = CODEC_ID_NONE, + .write_header = aiff_write_header, + .write_packet = aiff_write_packet, + .write_trailer = aiff_write_trailer, .codec_tag= (const AVCodecTag* const []){ff_codec_aiff_tags, 0}, }; diff --git a/libavformat/amr.c b/libavformat/amr.c index 66763f3fa7..373e454519 100644 --- a/libavformat/amr.c +++ b/libavformat/amr.c @@ -174,27 +174,25 @@ static int amr_read_packet(AVFormatContext *s, #if CONFIG_AMR_DEMUXER AVInputFormat ff_amr_demuxer = { - "amr", - NULL_IF_CONFIG_SMALL("3GPP AMR file format"), - 0, /*priv_data_size*/ - amr_probe, - amr_read_header, - amr_read_packet, - NULL, + .name = "amr", + .long_name = NULL_IF_CONFIG_SMALL("3GPP AMR file format"), + .priv_data_size = 0, /*priv_data_size*/ + .read_probe = amr_probe, + .read_header = amr_read_header, + .read_packet = amr_read_packet, .flags = AVFMT_GENERIC_INDEX, }; #endif #if CONFIG_AMR_MUXER AVOutputFormat ff_amr_muxer = { - "amr", - NULL_IF_CONFIG_SMALL("3GPP AMR file format"), - "audio/amr", - "amr", - 0, - CODEC_ID_AMR_NB, - CODEC_ID_NONE, - amr_write_header, - amr_write_packet, + .name = "amr", + .long_name = NULL_IF_CONFIG_SMALL("3GPP AMR file format"), + .mime_type = "audio/amr", + .extensions = "amr", + .audio_codec = CODEC_ID_AMR_NB, + .video_codec = CODEC_ID_NONE, + .write_header = amr_write_header, + .write_packet = amr_write_packet, }; #endif diff --git a/libavformat/anm.c b/libavformat/anm.c index 269e325e42..e35dc1517d 100644 --- a/libavformat/anm.c +++ b/libavformat/anm.c @@ -226,10 +226,10 @@ repeat: } AVInputFormat ff_anm_demuxer = { - "anm", - NULL_IF_CONFIG_SMALL("Deluxe Paint Animation"), - sizeof(AnmDemuxContext), - probe, - read_header, - read_packet, + .name = "anm", + .long_name = NULL_IF_CONFIG_SMALL("Deluxe Paint Animation"), + .priv_data_size = sizeof(AnmDemuxContext), + .read_probe = probe, + .read_header = read_header, + .read_packet = read_packet, }; diff --git a/libavformat/apc.c b/libavformat/apc.c index bf93fc1522..19de7f5c30 100644 --- a/libavformat/apc.c +++ b/libavformat/apc.c @@ -81,10 +81,9 @@ static int apc_read_packet(AVFormatContext *s, AVPacket *pkt) } AVInputFormat ff_apc_demuxer = { - "apc", - NULL_IF_CONFIG_SMALL("CRYO APC format"), - 0, - apc_probe, - apc_read_header, - apc_read_packet, + .name = "apc", + .long_name = NULL_IF_CONFIG_SMALL("CRYO APC format"), + .read_probe = apc_probe, + .read_header = apc_read_header, + .read_packet = apc_read_packet, }; diff --git a/libavformat/ape.c b/libavformat/ape.c index bda6b32b1d..61590bdd35 100644 --- a/libavformat/ape.c +++ b/libavformat/ape.c @@ -129,9 +129,11 @@ static void ape_dumpinfo(AVFormatContext * s, APEContext * ape_ctx) } else { for (i = 0; i < ape_ctx->seektablelength / sizeof(uint32_t); i++) { if (i < ape_ctx->totalframes - 1) { - av_log(s, AV_LOG_DEBUG, "%8d %d (%d bytes)\n", i, ape_ctx->seektable[i], ape_ctx->seektable[i + 1] - ape_ctx->seektable[i]); + av_log(s, AV_LOG_DEBUG, "%8d %"PRIu32" (%"PRIu32" bytes)\n", + i, ape_ctx->seektable[i], + ape_ctx->seektable[i + 1] - ape_ctx->seektable[i]); } else { - av_log(s, AV_LOG_DEBUG, "%8d %d\n", i, ape_ctx->seektable[i]); + av_log(s, AV_LOG_DEBUG, "%8d %"PRIu32"\n", i, ape_ctx->seektable[i]); } } } @@ -169,7 +171,7 @@ static int ape_read_header(AVFormatContext * s, AVFormatParameters * ap) ape->fileversion = avio_rl16(pb); if (ape->fileversion < APE_MIN_VERSION || ape->fileversion > APE_MAX_VERSION) { - av_log(s, AV_LOG_ERROR, "Unsupported file version - %"PRId16".%02"PRId16"\n", + av_log(s, AV_LOG_ERROR, "Unsupported file version - %d.%02d\n", ape->fileversion / 1000, (ape->fileversion % 1000) / 10); return -1; } @@ -253,7 +255,8 @@ static int ape_read_header(AVFormatContext * s, AVFormatParameters * ap) return -1; } if (ape->seektablelength && (ape->seektablelength / sizeof(*ape->seektable)) < ape->totalframes) { - av_log(s, AV_LOG_ERROR, "Number of seek entries is less than number of frames: %ld vs. %"PRIu32"\n", + av_log(s, AV_LOG_ERROR, + "Number of seek entries is less than number of frames: %zu vs. %"PRIu32"\n", ape->seektablelength / sizeof(*ape->seektable), ape->totalframes); return AVERROR_INVALIDDATA; } @@ -405,13 +408,13 @@ static int ape_read_seek(AVFormatContext *s, int stream_index, int64_t timestamp } AVInputFormat ff_ape_demuxer = { - "ape", - NULL_IF_CONFIG_SMALL("Monkey's Audio"), - sizeof(APEContext), - ape_probe, - ape_read_header, - ape_read_packet, - ape_read_close, - ape_read_seek, + .name = "ape", + .long_name = NULL_IF_CONFIG_SMALL("Monkey's Audio"), + .priv_data_size = sizeof(APEContext), + .read_probe = ape_probe, + .read_header = ape_read_header, + .read_packet = ape_read_packet, + .read_close = ape_read_close, + .read_seek = ape_read_seek, .extensions = "ape,apl,mac" }; diff --git a/libavformat/applehttp.c b/libavformat/applehttp.c index 7c5ebc7f88..5cbbafd110 100644 --- a/libavformat/applehttp.c +++ b/libavformat/applehttp.c @@ -27,6 +27,7 @@ #include "libavutil/avstring.h" #include "libavutil/intreadwrite.h" +#include "libavutil/mathematics.h" #include "libavutil/opt.h" #include "libavutil/dict.h" #include "avformat.h" @@ -413,7 +414,7 @@ reload: c->end_of_segment = 1; c->cur_seq_no = v->cur_seq_no; - if (v->ctx) { + if (v->ctx && v->ctx->nb_streams) { v->needed = 0; for (i = v->stream_offset; i < v->stream_offset + v->ctx->nb_streams; i++) { @@ -669,12 +670,12 @@ static int applehttp_probe(AVProbeData *p) } AVInputFormat ff_applehttp_demuxer = { - "applehttp", - NULL_IF_CONFIG_SMALL("Apple HTTP Live Streaming format"), - sizeof(AppleHTTPContext), - applehttp_probe, - applehttp_read_header, - applehttp_read_packet, - applehttp_close, - applehttp_read_seek, + .name = "applehttp", + .long_name = NULL_IF_CONFIG_SMALL("Apple HTTP Live Streaming format"), + .priv_data_size = sizeof(AppleHTTPContext), + .read_probe = applehttp_probe, + .read_header = applehttp_read_header, + .read_packet = applehttp_read_packet, + .read_close = applehttp_close, + .read_seek = applehttp_read_seek, }; diff --git a/libavformat/asfcrypt.c b/libavformat/asfcrypt.c index 59986e0a2c..750758d822 100644 --- a/libavformat/asfcrypt.c +++ b/libavformat/asfcrypt.c @@ -28,9 +28,9 @@ #include "asfcrypt.h" /** - * \brief find multiplicative inverse modulo 2 ^ 32 - * \param v number to invert, must be odd! - * \return number so that result * v = 1 (mod 2^32) + * @brief find multiplicative inverse modulo 2 ^ 32 + * @param v number to invert, must be odd! + * @return number so that result * v = 1 (mod 2^32) */ static uint32_t inverse(uint32_t v) { // v ^ 3 gives the inverse (mod 16), could also be implemented @@ -45,9 +45,9 @@ static uint32_t inverse(uint32_t v) { } /** - * \brief read keys from keybuf into keys - * \param keybuf buffer containing the keys - * \param keys output key array containing the keys for encryption in + * @brief read keys from keybuf into keys + * @param keybuf buffer containing the keys + * @param keys output key array containing the keys for encryption in * native endianness */ static void multiswap_init(const uint8_t keybuf[48], uint32_t keys[12]) { @@ -57,9 +57,9 @@ static void multiswap_init(const uint8_t keybuf[48], uint32_t keys[12]) { } /** - * \brief invert the keys so that encryption become decryption keys and + * @brief invert the keys so that encryption become decryption keys and * the other way round. - * \param keys key array of ints to invert + * @param keys key array of ints to invert */ static void multiswap_invert_keys(uint32_t keys[12]) { int i; @@ -92,12 +92,12 @@ static uint32_t multiswap_inv_step(const uint32_t keys[12], uint32_t v) { } /** - * \brief "MultiSwap" encryption - * \param keys 32 bit numbers in machine endianness, + * @brief "MultiSwap" encryption + * @param keys 32 bit numbers in machine endianness, * 0-4 and 6-10 must be inverted from decryption - * \param key another key, this one must be the same for the decryption - * \param data data to encrypt - * \return encrypted data + * @param key another key, this one must be the same for the decryption + * @param data data to encrypt + * @return encrypted data */ static uint64_t multiswap_enc(const uint32_t keys[12], uint64_t key, uint64_t data) { uint32_t a = data; @@ -114,12 +114,12 @@ static uint64_t multiswap_enc(const uint32_t keys[12], uint64_t key, uint64_t da } /** - * \brief "MultiSwap" decryption - * \param keys 32 bit numbers in machine endianness, + * @brief "MultiSwap" decryption + * @param keys 32 bit numbers in machine endianness, * 0-4 and 6-10 must be inverted from encryption - * \param key another key, this one must be the same as for the encryption - * \param data data to decrypt - * \return decrypted data + * @param key another key, this one must be the same as for the encryption + * @param data data to decrypt + * @return decrypted data */ static uint64_t multiswap_dec(const uint32_t keys[12], uint64_t key, uint64_t data) { uint32_t a; diff --git a/libavformat/asfdec.c b/libavformat/asfdec.c index 725e013b17..633eeeb266 100644 --- a/libavformat/asfdec.c +++ b/libavformat/asfdec.c @@ -25,6 +25,7 @@ #include "libavutil/common.h" #include "libavutil/avstring.h" #include "libavutil/dict.h" +#include "libavutil/mathematics.h" #include "libavcodec/mpegaudio.h" #include "avformat.h" #include "avio_internal.h" @@ -84,13 +85,11 @@ static const ff_asf_guid index_guid = { 0x90, 0x08, 0x00, 0x33, 0xb1, 0xe5, 0xcf, 0x11, 0x89, 0xf4, 0x00, 0xa0, 0xc9, 0x03, 0x49, 0xcb }; +#ifdef DEBUG static const ff_asf_guid stream_bitrate_guid = { /* (http://get.to/sdp) */ 0xce, 0x75, 0xf8, 0x7b, 0x8d, 0x46, 0xd1, 0x11, 0x8d, 0x82, 0x00, 0x60, 0x97, 0xc9, 0xa2, 0xb2 }; -/**********************************/ -/* decoding */ -#ifdef DEBUG #define PRINT_IF_GUID(g,cmp) \ if (!ff_guidcmp(g, &cmp)) \ av_dlog(NULL, "(GUID: %s) ", #cmp) @@ -1084,8 +1083,6 @@ static int asf_read_packet(AVFormatContext *s, AVPacket *pkt) assert(asf->packet_size_left < FRAME_HEADER_SIZE || asf->packet_segments < 1); asf->packet_time_start = 0; } - - return 0; } // Added to support seeking after packets have been read @@ -1281,14 +1278,14 @@ static int asf_read_seek(AVFormatContext *s, int stream_index, int64_t pts, int } AVInputFormat ff_asf_demuxer = { - "asf", - NULL_IF_CONFIG_SMALL("ASF format"), - sizeof(ASFContext), - asf_probe, - asf_read_header, - asf_read_packet, - asf_read_close, - asf_read_seek, - asf_read_pts, + .name = "asf", + .long_name = NULL_IF_CONFIG_SMALL("ASF format"), + .priv_data_size = sizeof(ASFContext), + .read_probe = asf_probe, + .read_header = asf_read_header, + .read_packet = asf_read_packet, + .read_close = asf_read_close, + .read_seek = asf_read_seek, + .read_timestamp = asf_read_pts, .flags = AVFMT_NOBINSEARCH | AVFMT_NOGENSEARCH, }; diff --git a/libavformat/asfenc.c b/libavformat/asfenc.c index 5c929f4547..b23ea785ef 100644 --- a/libavformat/asfenc.c +++ b/libavformat/asfenc.c @@ -882,20 +882,20 @@ static int asf_write_trailer(AVFormatContext *s) #if CONFIG_ASF_MUXER AVOutputFormat ff_asf_muxer = { - "asf", - NULL_IF_CONFIG_SMALL("ASF format"), - "video/x-ms-asf", - "asf,wmv,wma", - sizeof(ASFContext), + .name = "asf", + .long_name = NULL_IF_CONFIG_SMALL("ASF format"), + .mime_type = "video/x-ms-asf", + .extensions = "asf,wmv,wma", + .priv_data_size = sizeof(ASFContext), #if CONFIG_LIBMP3LAME - CODEC_ID_MP3, + .audio_codec = CODEC_ID_MP3, #else - CODEC_ID_MP2, + .audio_codec = CODEC_ID_MP2, #endif - CODEC_ID_MSMPEG4V3, - asf_write_header, - asf_write_packet, - asf_write_trailer, + .video_codec = CODEC_ID_MSMPEG4V3, + .write_header = asf_write_header, + .write_packet = asf_write_packet, + .write_trailer = asf_write_trailer, .flags = AVFMT_GLOBALHEADER, .codec_tag= (const AVCodecTag* const []){codec_asf_bmp_tags, ff_codec_bmp_tags, ff_codec_wav_tags, 0}, }; @@ -903,20 +903,20 @@ AVOutputFormat ff_asf_muxer = { #if CONFIG_ASF_STREAM_MUXER AVOutputFormat ff_asf_stream_muxer = { - "asf_stream", - NULL_IF_CONFIG_SMALL("ASF format"), - "video/x-ms-asf", - "asf,wmv,wma", - sizeof(ASFContext), + .name = "asf_stream", + .long_name = NULL_IF_CONFIG_SMALL("ASF format"), + .mime_type = "video/x-ms-asf", + .extensions = "asf,wmv,wma", + .priv_data_size = sizeof(ASFContext), #if CONFIG_LIBMP3LAME - CODEC_ID_MP3, + .audio_codec = CODEC_ID_MP3, #else - CODEC_ID_MP2, + .audio_codec = CODEC_ID_MP2, #endif - CODEC_ID_MSMPEG4V3, - asf_write_stream_header, - asf_write_packet, - asf_write_trailer, + .video_codec = CODEC_ID_MSMPEG4V3, + .write_header = asf_write_stream_header, + .write_packet = asf_write_packet, + .write_trailer = asf_write_trailer, .flags = AVFMT_GLOBALHEADER, .codec_tag= (const AVCodecTag* const []){codec_asf_bmp_tags, ff_codec_bmp_tags, ff_codec_wav_tags, 0}, }; diff --git a/libavformat/assdec.c b/libavformat/assdec.c index 3dc28f21fe..e2cebf8875 100644 --- a/libavformat/assdec.c +++ b/libavformat/assdec.c @@ -19,6 +19,7 @@ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ +#include "libavutil/mathematics.h" #include "avformat.h" #include "internal.h" diff --git a/libavformat/au.c b/libavformat/au.c index 23365c4734..870f46f65d 100644 --- a/libavformat/au.c +++ b/libavformat/au.c @@ -120,7 +120,7 @@ static int au_probe(AVProbeData *p) static int au_read_header(AVFormatContext *s, AVFormatParameters *ap) { - int size; + int size, bps, data_size = 0; unsigned int tag; AVIOContext *pb = s->pb; unsigned int id, channels, rate; @@ -132,7 +132,12 @@ static int au_read_header(AVFormatContext *s, if (tag != MKTAG('.', 's', 'n', 'd')) return -1; size = avio_rb32(pb); /* header size */ - avio_rb32(pb); /* data size */ + data_size = avio_rb32(pb); /* data size in bytes */ + + if (data_size < 0) { + av_log(s, AV_LOG_ERROR, "Invalid negative data size '%d' found\n", data_size); + return AVERROR_INVALIDDATA; + } id = avio_rb32(pb); rate = avio_rb32(pb); @@ -140,7 +145,7 @@ static int au_read_header(AVFormatContext *s, codec = ff_codec_get_id(codec_au_tags, id); - if (!av_get_bits_per_sample(codec)) { + if (!(bps = av_get_bits_per_sample(codec))) { av_log_ask_for_sample(s, "could not determine bits per sample\n"); return AVERROR_INVALIDDATA; } @@ -159,6 +164,7 @@ static int au_read_header(AVFormatContext *s, st->codec->codec_id = codec; st->codec->channels = channels; st->codec->sample_rate = rate; + st->duration = (((int64_t)data_size)<<3) / (st->codec->channels * bps); av_set_pts_info(st, 64, 1, rate); return 0; } @@ -185,30 +191,27 @@ static int au_read_packet(AVFormatContext *s, #if CONFIG_AU_DEMUXER AVInputFormat ff_au_demuxer = { - "au", - NULL_IF_CONFIG_SMALL("SUN AU format"), - 0, - au_probe, - au_read_header, - au_read_packet, - NULL, - pcm_read_seek, + .name = "au", + .long_name = NULL_IF_CONFIG_SMALL("SUN AU format"), + .read_probe = au_probe, + .read_header = au_read_header, + .read_packet = au_read_packet, + .read_seek = pcm_read_seek, .codec_tag= (const AVCodecTag* const []){codec_au_tags, 0}, }; #endif #if CONFIG_AU_MUXER AVOutputFormat ff_au_muxer = { - "au", - NULL_IF_CONFIG_SMALL("SUN AU format"), - "audio/basic", - "au", - 0, - CODEC_ID_PCM_S16BE, - CODEC_ID_NONE, - au_write_header, - au_write_packet, - au_write_trailer, + .name = "au", + .long_name = NULL_IF_CONFIG_SMALL("SUN AU format"), + .mime_type = "audio/basic", + .extensions = "au", + .audio_codec = CODEC_ID_PCM_S16BE, + .video_codec = CODEC_ID_NONE, + .write_header = au_write_header, + .write_packet = au_write_packet, + .write_trailer = au_write_trailer, .codec_tag= (const AVCodecTag* const []){codec_au_tags, 0}, }; #endif //CONFIG_AU_MUXER diff --git a/libavformat/audiointerleave.c b/libavformat/audiointerleave.c index 3c235c069e..844112fcf5 100644 --- a/libavformat/audiointerleave.c +++ b/libavformat/audiointerleave.c @@ -21,6 +21,7 @@ */ #include "libavutil/fifo.h" +#include "libavutil/mathematics.h" #include "avformat.h" #include "audiointerleave.h" #include "internal.h" diff --git a/libavformat/avformat.h b/libavformat/avformat.h index 01dc916610..dd93237a2b 100644 --- a/libavformat/avformat.h +++ b/libavformat/avformat.h @@ -41,6 +41,7 @@ const char *avformat_license(void); #include <stdio.h> /* FILE */ #include "libavcodec/avcodec.h" #include "libavutil/dict.h" +#include "libavutil/log.h" #include "avio.h" #include "libavformat/version.h" @@ -48,63 +49,70 @@ const char *avformat_license(void); struct AVFormatContext; -/* - * Public Metadata API. +/** + * @defgroup metadata_api Public Metadata API + * @{ * The metadata API allows libavformat to export metadata tags to a client * application using a sequence of key/value pairs. Like all strings in FFmpeg, * metadata must be stored as UTF-8 encoded Unicode. Note that metadata * exported by demuxers isn't checked to be valid UTF-8 in most cases. * Important concepts to keep in mind: - * 1. Keys are unique; there can never be 2 tags with the same key. This is + * - Keys are unique; there can never be 2 tags with the same key. This is * also meant semantically, i.e., a demuxer should not knowingly produce * several keys that are literally different but semantically identical. * E.g., key=Author5, key=Author6. In this example, all authors must be * placed in the same tag. - * 2. Metadata is flat, not hierarchical; there are no subtags. If you + * - Metadata is flat, not hierarchical; there are no subtags. If you * want to store, e.g., the email address of the child of producer Alice * and actor Bob, that could have key=alice_and_bobs_childs_email_address. - * 3. Several modifiers can be applied to the tag name. This is done by + * - Several modifiers can be applied to the tag name. This is done by * appending a dash character ('-') and the modifier name in the order * they appear in the list below -- e.g. foo-eng-sort, not foo-sort-eng. - * a) language -- a tag whose value is localized for a particular language + * - language -- a tag whose value is localized for a particular language * is appended with the ISO 639-2/B 3-letter language code. * For example: Author-ger=Michael, Author-eng=Mike * The original/default language is in the unqualified "Author" tag. * A demuxer should set a default if it sets any translated tag. - * b) sorting -- a modified version of a tag that should be used for + * - sorting -- a modified version of a tag that should be used for * sorting will have '-sort' appended. E.g. artist="The Beatles", * artist-sort="Beatles, The". * - * 4. Demuxers attempt to export metadata in a generic format, however tags + * - Demuxers attempt to export metadata in a generic format, however tags * with no generic equivalents are left as they are stored in the container. * Follows a list of generic tag names: * - * album -- name of the set this work belongs to - * album_artist -- main creator of the set/album, if different from artist. - * e.g. "Various Artists" for compilation albums. - * artist -- main creator of the work - * comment -- any additional description of the file. - * composer -- who composed the work, if different from artist. - * copyright -- name of copyright holder. - * creation_time-- date when the file was created, preferably in ISO 8601. - * date -- date when the work was created, preferably in ISO 8601. - * disc -- number of a subset, e.g. disc in a multi-disc collection. - * encoder -- name/settings of the software/hardware that produced the file. - * encoded_by -- person/group who created the file. - * filename -- original name of the file. - * genre -- <self-evident>. - * language -- main language in which the work is performed, preferably - * in ISO 639-2 format. Multiple languages can be specified by - * separating them with commas. - * performer -- artist who performed the work, if different from artist. - * E.g for "Also sprach Zarathustra", artist would be "Richard - * Strauss" and performer "London Philharmonic Orchestra". - * publisher -- name of the label/publisher. - * service_name -- name of the service in broadcasting (channel name). - * service_provider -- name of the service provider in broadcasting. - * title -- name of the work. - * track -- number of this work in the set, can be in form current/total. - * variant_bitrate -- the total bitrate of the bitrate variant that the current stream is part of + @verbatim + album -- name of the set this work belongs to + album_artist -- main creator of the set/album, if different from artist. + e.g. "Various Artists" for compilation albums. + artist -- main creator of the work + comment -- any additional description of the file. + composer -- who composed the work, if different from artist. + copyright -- name of copyright holder. + creation_time-- date when the file was created, preferably in ISO 8601. + date -- date when the work was created, preferably in ISO 8601. + disc -- number of a subset, e.g. disc in a multi-disc collection. + encoder -- name/settings of the software/hardware that produced the file. + encoded_by -- person/group who created the file. + filename -- original name of the file. + genre -- <self-evident>. + language -- main language in which the work is performed, preferably + in ISO 639-2 format. Multiple languages can be specified by + separating them with commas. + performer -- artist who performed the work, if different from artist. + E.g for "Also sprach Zarathustra", artist would be "Richard + Strauss" and performer "London Philharmonic Orchestra". + publisher -- name of the label/publisher. + service_name -- name of the service in broadcasting (channel name). + service_provider -- name of the service provider in broadcasting. + title -- name of the work. + track -- number of this work in the set, can be in form current/total. + variant_bitrate -- the total bitrate of the bitrate variant that the current stream is part of + @endverbatim + * + * Look in the examples section for an application example how to use the Metadata API. + * + * @} */ #if FF_API_OLD_METADATA2 @@ -334,6 +342,9 @@ typedef struct AVOutputFormat { const AVClass *priv_class; ///< AVClass for the private context + void (*get_output_timestamp)(struct AVFormatContext *s, int stream, + int64_t *dts, int64_t *wall); + /* private fields */ struct AVOutputFormat *next; } AVOutputFormat; @@ -535,12 +546,14 @@ typedef struct AVStream { int stream_copy; /**< If set, just copy stream. */ enum AVDiscard discard; ///< Selects which packets can be discarded at will and do not need to be demuxed. +#if FF_API_AVSTREAM_QUALITY //FIXME move stuff to a flags field? /** * Quality, as it has been removed from AVCodecContext and put in AVVideoFrame. * MN: dunno if that is the right place for it */ - float quality; + attribute_deprecated float quality; +#endif /** * Decoding: pts of the first frame of the stream, in stream time base. @@ -730,7 +743,12 @@ typedef struct AVFormatContext { #endif char filename[1024]; /**< input or output filename */ /* stream info */ - int64_t timestamp; +#if FF_API_TIMESTAMP + /** + * @deprecated use 'creation_time' metadata tag instead + */ + attribute_deprecated int64_t timestamp; +#endif #if FF_API_OLD_METADATA attribute_deprecated char title[512]; attribute_deprecated char author[512]; @@ -797,12 +815,16 @@ typedef struct AVFormatContext { int preload; int max_delay; +#if FF_API_LOOP_OUTPUT #define AVFMT_NOOUTPUTLOOP -1 #define AVFMT_INFINITEOUTPUTLOOP 0 /** * number of times to loop output in formats that support it + * + * @deprecated use the 'loop' private option in the gif muxer. */ - int loop_output; + attribute_deprecated int loop_output; +#endif int flags; #define AVFMT_FLAG_GENPTS 0x0001 ///< Generate missing pts even if it requires parsing future frames. @@ -815,12 +837,18 @@ typedef struct AVFormatContext { #define AVFMT_FLAG_RTP_HINT 0x0040 ///< Deprecated, use the -movflags rtphint muxer specific AVOption instead #endif #define AVFMT_FLAG_CUSTOM_IO 0x0080 ///< The caller has supplied a custom AVIOContext, don't avio_close() it. +#define AVFMT_FLAG_DISCARD_CORRUPT 0x0100 ///< Discard frames marked corrupted #define AVFMT_FLAG_MP4A_LATM 0x8000 ///< Enable RTP MP4A-LATM payload #define AVFMT_FLAG_SORT_DTS 0x10000 ///< try to interleave outputted packets by dts (using this flag can slow demuxing down) #define AVFMT_FLAG_PRIV_OPT 0x20000 ///< Enable use of private options by delaying codec open (this could be made default once all code is converted) #define AVFMT_FLAG_KEEP_SIDE_DATA 0x40000 ///< Dont merge side data but keep it seperate. - int loop_input; +#if FF_API_LOOP_INPUT + /** + * @deprecated, use the 'loop' img2 demuxer private option. + */ + attribute_deprecated int loop_input; +#endif /** * decoding: size of data to probe; encoding: unused. @@ -1231,6 +1259,7 @@ AVFormatContext *avformat_alloc_output_context(const char *format, int avformat_alloc_output_context2(AVFormatContext **ctx, AVOutputFormat *oformat, const char *format_name, const char *filename); +#if FF_API_FORMAT_PARAMETERS /** * Read packets of a media file to get stream information. This * is useful for file formats with no headers such as MPEG. This @@ -1243,8 +1272,35 @@ int avformat_alloc_output_context2(AVFormatContext **ctx, AVOutputFormat *oforma * @return >=0 if OK, AVERROR_xxx on error * @todo Let the user decide somehow what information is needed so that * we do not waste time getting stuff the user does not need. + * + * @deprecated use avformat_find_stream_info. */ +attribute_deprecated int av_find_stream_info(AVFormatContext *ic); +#endif + +/** + * Read packets of a media file to get stream information. This + * is useful for file formats with no headers such as MPEG. This + * function also computes the real framerate in case of MPEG-2 repeat + * frame mode. + * The logical file position is not changed by this function; + * examined packets may be buffered for later processing. + * + * @param ic media file handle + * @param options If non-NULL, an ic.nb_streams long array of pointers to + * dictionaries, where i-th member contains options for + * codec corresponding to i-th stream. + * On return each dictionary will be filled with options that were not found. + * @return >=0 if OK, AVERROR_xxx on error + * + * @note this function isn't guaranteed to open all the codecs, so + * options being non-empty at return is a perfectly normal behavior. + * + * @todo Let the user decide somehow what information is needed so that + * we do not waste time getting stuff the user does not need. + */ +int avformat_find_stream_info(AVFormatContext *ic, AVDictionary **options); /** * Find the "best" stream in the file. @@ -1604,6 +1660,24 @@ int av_interleave_packet_per_dts(AVFormatContext *s, AVPacket *out, */ int av_write_trailer(AVFormatContext *s); +/** + * Get timing information for the data currently output. + * The exact meaning of "currently output" depends on the format. + * It is mostly relevant for devices that have an internal buffer and/or + * work in real time. + * @param s media file handle + * @param stream stream in the media file + * @param dts[out] DTS of the last packet output for the stream, in stream + * time_base units + * @param wall[out] absolute time when that packet whas output, + * in microsecond + * @return 0 if OK, AVERROR(ENOSYS) if the format does not support it + * Note: some formats or devices may not allow to measure dts and wall + * atomically. + */ +int av_get_output_timestamp(struct AVFormatContext *s, int stream, + int64_t *dts, int64_t *wall); + #if FF_API_DUMP_FORMAT /** * @deprecated Deprecated in favor of av_dump_format(). diff --git a/libavformat/avidec.c b/libavformat/avidec.c index f71a270239..0588518a87 100644 --- a/libavformat/avidec.c +++ b/libavformat/avidec.c @@ -21,6 +21,7 @@ #include <strings.h> #include "libavutil/intreadwrite.h" +#include "libavutil/mathematics.h" #include "libavutil/bswap.h" #include "libavutil/opt.h" #include "libavutil/dict.h" @@ -72,6 +73,7 @@ typedef struct { int odml_depth; int use_odml; #define MAX_ODML_DEPTH 1000 + int64_t dts_max; } AVIContext; @@ -663,6 +665,7 @@ static int avi_read_header(AVFormatContext *s, AVFormatParameters *ap) if (st->codec->stream_codec_tag == AV_RL32("Axan")){ st->codec->codec_id = CODEC_ID_XAN_DPCM; st->codec->codec_tag = 0; + ast->dshow_block_align = 0; } if (amv_file_format){ st->codec->codec_id = CODEC_ID_ADPCM_IMA_AMV; @@ -858,13 +861,141 @@ static int get_stream_idx(int *d){ } } -static int avi_read_packet(AVFormatContext *s, AVPacket *pkt) +static int avi_sync(AVFormatContext *s, int exit_early) { AVIContext *avi = s->priv_data; AVIOContext *pb = s->pb; int n, d[8]; unsigned int size; int64_t i, sync; + +start_sync: + memset(d, -1, sizeof(int)*8); + for(i=sync=avio_tell(pb); !url_feof(pb); i++) { + int j; + + for(j=0; j<7; j++) + d[j]= d[j+1]; + d[7]= avio_r8(pb); + + size= d[4] + (d[5]<<8) + (d[6]<<16) + (d[7]<<24); + + n= get_stream_idx(d+2); +//av_log(s, AV_LOG_DEBUG, "%X %X %X %X %X %X %X %X %"PRId64" %d %d\n", d[0], d[1], d[2], d[3], d[4], d[5], d[6], d[7], i, size, n); + if(i + (uint64_t)size > avi->fsize || d[0]<0) + continue; + + //parse ix## + if( (d[0] == 'i' && d[1] == 'x' && n < s->nb_streams) + //parse JUNK + ||(d[0] == 'J' && d[1] == 'U' && d[2] == 'N' && d[3] == 'K') + ||(d[0] == 'i' && d[1] == 'd' && d[2] == 'x' && d[3] == '1')){ + avio_skip(pb, size); +//av_log(s, AV_LOG_DEBUG, "SKIP\n"); + goto start_sync; + } + + //parse stray LIST + if(d[0] == 'L' && d[1] == 'I' && d[2] == 'S' && d[3] == 'T'){ + avio_skip(pb, 4); + goto start_sync; + } + + n= get_stream_idx(d); + + if(!((i-avi->last_pkt_pos)&1) && get_stream_idx(d+1) < s->nb_streams) + continue; + + //detect ##ix chunk and skip + if(d[2] == 'i' && d[3] == 'x' && n < s->nb_streams){ + avio_skip(pb, size); + goto start_sync; + } + + //parse ##dc/##wb + if(n < s->nb_streams){ + AVStream *st; + AVIStream *ast; + st = s->streams[n]; + ast = st->priv_data; + + if(s->nb_streams>=2){ + AVStream *st1 = s->streams[1]; + AVIStream *ast1= st1->priv_data; + //workaround for broken small-file-bug402.avi + if( d[2] == 'w' && d[3] == 'b' + && n==0 + && st ->codec->codec_type == AVMEDIA_TYPE_VIDEO + && st1->codec->codec_type == AVMEDIA_TYPE_AUDIO + && ast->prefix == 'd'*256+'c' + && (d[2]*256+d[3] == ast1->prefix || !ast1->prefix_count) + ){ + n=1; + st = st1; + ast = ast1; + av_log(s, AV_LOG_WARNING, "Invalid stream + prefix combination, assuming audio.\n"); + } + } + + + if( (st->discard >= AVDISCARD_DEFAULT && size==0) + /*|| (st->discard >= AVDISCARD_NONKEY && !(pkt->flags & AV_PKT_FLAG_KEY))*/ //FIXME needs a little reordering + || st->discard >= AVDISCARD_ALL){ + if (!exit_early) { + ast->frame_offset += get_duration(ast, size); + } + avio_skip(pb, size); + goto start_sync; + } + + if (d[2] == 'p' && d[3] == 'c' && size<=4*256+4) { + int k = avio_r8(pb); + int last = (k + avio_r8(pb) - 1) & 0xFF; + + avio_rl16(pb); //flags + + for (; k <= last; k++) + ast->pal[k] = avio_rb32(pb)>>8;// b + (g << 8) + (r << 16); + ast->has_pal= 1; + goto start_sync; + } else if( ((ast->prefix_count<5 || sync+9 > i) && d[2]<128 && d[3]<128) || + d[2]*256+d[3] == ast->prefix /*|| + (d[2] == 'd' && d[3] == 'c') || + (d[2] == 'w' && d[3] == 'b')*/) { + + if (exit_early) + return 0; +//av_log(s, AV_LOG_DEBUG, "OK\n"); + if(d[2]*256+d[3] == ast->prefix) + ast->prefix_count++; + else{ + ast->prefix= d[2]*256+d[3]; + ast->prefix_count= 0; + } + + avi->stream_index= n; + ast->packet_size= size + 8; + ast->remaining= size; + + if(size || !ast->sample_size){ + uint64_t pos= avio_tell(pb) - 8; + if(!st->index_entries || !st->nb_index_entries || st->index_entries[st->nb_index_entries - 1].pos < pos){ + av_add_index_entry(st, pos, ast->frame_offset, size, 0, AVINDEX_KEYFRAME); + } + } + return 0; + } + } + } + + return AVERROR_EOF; +} + +static int avi_read_packet(AVFormatContext *s, AVPacket *pkt) +{ + AVIContext *avi = s->priv_data; + AVIOContext *pb = s->pb; + int err; void* dstr; if (CONFIG_DV_DEMUXER && avi->dv_demux) { @@ -1035,124 +1166,22 @@ resync: } ast->seek_pos= 0; - return size; - } + if(!avi->non_interleaved && st->nb_index_entries>1){ + int64_t dts= av_rescale_q(pkt->dts, st->time_base, AV_TIME_BASE_Q); - memset(d, -1, sizeof(int)*8); - for(i=sync=avio_tell(pb); !url_feof(pb); i++) { - int j; - - for(j=0; j<7; j++) - d[j]= d[j+1]; - d[7]= avio_r8(pb); - - size= d[4] + (d[5]<<8) + (d[6]<<16) + (d[7]<<24); - - n= get_stream_idx(d+2); -//av_log(s, AV_LOG_DEBUG, "%X %X %X %X %X %X %X %X %"PRId64" %d %d\n", d[0], d[1], d[2], d[3], d[4], d[5], d[6], d[7], i, size, n); - if(i + (uint64_t)size > avi->fsize || d[0]<0) - continue; - - //parse ix## - if( (d[0] == 'i' && d[1] == 'x' && n < s->nb_streams) - //parse JUNK - ||(d[0] == 'J' && d[1] == 'U' && d[2] == 'N' && d[3] == 'K') - ||(d[0] == 'i' && d[1] == 'd' && d[2] == 'x' && d[3] == '1')){ - avio_skip(pb, size); -//av_log(s, AV_LOG_DEBUG, "SKIP\n"); - goto resync; - } - - //parse stray LIST - if(d[0] == 'L' && d[1] == 'I' && d[2] == 'S' && d[3] == 'T'){ - avio_skip(pb, 4); - goto resync; - } - - n= get_stream_idx(d); - - if(!((i-avi->last_pkt_pos)&1) && get_stream_idx(d+1) < s->nb_streams) - continue; - - //detect ##ix chunk and skip - if(d[2] == 'i' && d[3] == 'x' && n < s->nb_streams){ - avio_skip(pb, size); - goto resync; + if(avi->dts_max - dts > 2*AV_TIME_BASE){ + avi->non_interleaved= 1; + av_log(s, AV_LOG_INFO, "Switching to NI mode, due to poor interleaving\n"); + }else if(avi->dts_max < dts) + avi->dts_max = dts; } - //parse ##dc/##wb - if(n < s->nb_streams){ - AVStream *st; - AVIStream *ast; - st = s->streams[n]; - ast = st->priv_data; - - if(s->nb_streams>=2){ - AVStream *st1 = s->streams[1]; - AVIStream *ast1= st1->priv_data; - //workaround for broken small-file-bug402.avi - if( d[2] == 'w' && d[3] == 'b' - && n==0 - && st ->codec->codec_type == AVMEDIA_TYPE_VIDEO - && st1->codec->codec_type == AVMEDIA_TYPE_AUDIO - && ast->prefix == 'd'*256+'c' - && (d[2]*256+d[3] == ast1->prefix || !ast1->prefix_count) - ){ - n=1; - st = st1; - ast = ast1; - av_log(s, AV_LOG_WARNING, "Invalid stream + prefix combination, assuming audio.\n"); - } - } - - - if( (st->discard >= AVDISCARD_DEFAULT && size==0) - /*|| (st->discard >= AVDISCARD_NONKEY && !(pkt->flags & AV_PKT_FLAG_KEY))*/ //FIXME needs a little reordering - || st->discard >= AVDISCARD_ALL){ - ast->frame_offset += get_duration(ast, size); - avio_skip(pb, size); - goto resync; - } - - if (d[2] == 'p' && d[3] == 'c' && size<=4*256+4) { - int k = avio_r8(pb); - int last = (k + avio_r8(pb) - 1) & 0xFF; - - avio_rl16(pb); //flags - - for (; k <= last; k++) - ast->pal[k] = avio_rb32(pb)>>8;// b + (g << 8) + (r << 16); - ast->has_pal= 1; - goto resync; - } else if( ((ast->prefix_count<5 || sync+9 > i) && d[2]<128 && d[3]<128) || - d[2]*256+d[3] == ast->prefix /*|| - (d[2] == 'd' && d[3] == 'c') || - (d[2] == 'w' && d[3] == 'b')*/) { - -//av_log(s, AV_LOG_DEBUG, "OK\n"); - if(d[2]*256+d[3] == ast->prefix) - ast->prefix_count++; - else{ - ast->prefix= d[2]*256+d[3]; - ast->prefix_count= 0; - } - - avi->stream_index= n; - ast->packet_size= size + 8; - ast->remaining= size; - - if(size || !ast->sample_size){ - uint64_t pos= avio_tell(pb) - 8; - if(!st->index_entries || !st->nb_index_entries || st->index_entries[st->nb_index_entries - 1].pos < pos){ - av_add_index_entry(st, pos, ast->frame_offset, size, 0, AVINDEX_KEYFRAME); - } - } - goto resync; - } - } + return size; } - return AVERROR_EOF; + if ((err = avi_sync(s, 0)) < 0) + return err; + goto resync; } /* XXX: We make the implicit supposition that the positions are sorted @@ -1164,13 +1193,22 @@ static int avi_read_idx1(AVFormatContext *s, int size) int nb_index_entries, i; AVStream *st; AVIStream *ast; - unsigned int index, tag, flags, pos, len; + unsigned int index, tag, flags, pos, len, first_packet = 1; unsigned last_pos= -1; + int64_t idx1_pos, first_packet_pos = 0, data_offset = 0; nb_index_entries = size / 16; if (nb_index_entries <= 0) return -1; + idx1_pos = avio_tell(pb); + avio_seek(pb, avi->movi_list+4, SEEK_SET); + if (avi_sync(s, 1) == 0) { + first_packet_pos = avio_tell(pb) - 8; + } + avi->stream_index = -1; + avio_seek(pb, idx1_pos, SEEK_SET); + /* Read the entries and sort them in each stream component. */ for(i = 0; i < nb_index_entries; i++) { tag = avio_rl32(pb); @@ -1179,9 +1217,6 @@ static int avi_read_idx1(AVFormatContext *s, int size) len = avio_rl32(pb); av_dlog(s, "%d: tag=0x%x flags=0x%x pos=0x%x len=%d/", i, tag, flags, pos, len); - if(i==0 && pos > avi->movi_list) - avi->movi_list= 0; //FIXME better check - pos += avi->movi_list; index = ((tag & 0xff) - '0') * 10; index += ((tag >> 8) & 0xff) - '0'; @@ -1190,9 +1225,14 @@ static int avi_read_idx1(AVFormatContext *s, int size) st = s->streams[index]; ast = st->priv_data; -#if defined(DEBUG_SEEK) - av_log(s, AV_LOG_DEBUG, "%d cum_len=%"PRId64"\n", len, ast->cum_len); -#endif + if(first_packet && first_packet_pos && len) { + data_offset = first_packet_pos - pos; + first_packet = 0; + } + pos += data_offset; + + av_dlog(s, "%d cum_len=%"PRId64"\n", len, ast->cum_len); + if(url_feof(pb)) return -1; @@ -1259,20 +1299,16 @@ static int avi_load_index(AVFormatContext *s) (tag >> 16) & 0xff, (tag >> 24) & 0xff, size); - switch(tag) { - case MKTAG('i', 'd', 'x', '1'): - if (avi_read_idx1(s, size) < 0) - goto skip; + + if (tag == MKTAG('i', 'd', 'x', '1') && + avi_read_idx1(s, size) >= 0) { ret = 0; - goto the_end; - break; - default: - skip: - size += (size & 1); - if (avio_skip(pb, size) < 0) - goto the_end; // something is wrong here break; } + + size += (size & 1); + if (avio_skip(pb, size) < 0) + break; // something is wrong here } the_end: avio_seek(pb, pos, SEEK_SET); @@ -1371,7 +1407,7 @@ static int avi_read_seek(AVFormatContext *s, int stream_index, int64_t timestamp flags | AVSEEK_FLAG_BACKWARD | (st2->codec->codec_type != AVMEDIA_TYPE_VIDEO ? AVSEEK_FLAG_ANY : 0)); if(index<0) index=0; - while(index>0 && st2->index_entries[index-1].pos >= pos_min) + while(!avi->non_interleaved && index>0 && st2->index_entries[index-1].pos >= pos_min) index--; ast2->frame_offset = st2->index_entries[index].timestamp; } @@ -1379,6 +1415,7 @@ static int avi_read_seek(AVFormatContext *s, int stream_index, int64_t timestamp /* do the seek */ avio_seek(s->pb, pos_min, SEEK_SET); avi->stream_index= -1; + avi->dts_max= INT_MIN; return 0; } @@ -1420,13 +1457,13 @@ static int avi_probe(AVProbeData *p) } AVInputFormat ff_avi_demuxer = { - "avi", - NULL_IF_CONFIG_SMALL("AVI format"), - sizeof(AVIContext), - avi_probe, - avi_read_header, - avi_read_packet, - avi_read_close, - avi_read_seek, + .name = "avi", + .long_name = NULL_IF_CONFIG_SMALL("AVI format"), + .priv_data_size = sizeof(AVIContext), + .read_probe = avi_probe, + .read_header = avi_read_header, + .read_packet = avi_read_packet, + .read_close = avi_read_close, + .read_seek = avi_read_seek, .priv_class = &demuxer_class, }; diff --git a/libavformat/avienc.c b/libavformat/avienc.c index 8a53bb53d3..20bbfc0375 100644 --- a/libavformat/avienc.c +++ b/libavformat/avienc.c @@ -639,16 +639,16 @@ static int avi_write_trailer(AVFormatContext *s) } AVOutputFormat ff_avi_muxer = { - "avi", - NULL_IF_CONFIG_SMALL("AVI format"), - "video/x-msvideo", - "avi", - sizeof(AVIContext), - CODEC_ID_MP2, - CODEC_ID_MPEG4, - avi_write_header, - avi_write_packet, - avi_write_trailer, + .name = "avi", + .long_name = NULL_IF_CONFIG_SMALL("AVI format"), + .mime_type = "video/x-msvideo", + .extensions = "avi", + .priv_data_size = sizeof(AVIContext), + .audio_codec = CODEC_ID_MP2, + .video_codec = CODEC_ID_MPEG4, + .write_header = avi_write_header, + .write_packet = avi_write_packet, + .write_trailer = avi_write_trailer, .codec_tag= (const AVCodecTag* const []){ff_codec_bmp_tags, ff_codec_wav_tags, 0}, .flags= AVFMT_VARIABLE_FPS, }; diff --git a/libavformat/avio.h b/libavformat/avio.h index 5885349ca7..8b7dd91c06 100644 --- a/libavformat/avio.h +++ b/libavformat/avio.h @@ -147,7 +147,7 @@ typedef struct URLPollEntry { attribute_deprecated int url_poll(URLPollEntry *poll_table, int n, int timeout); /** - * @defgroup open_modes URL open modes + * @name URL open modes * The flags argument to url_open and cosins must be one of the following * constants, optionally ORed with other flags. * @{ @@ -178,7 +178,7 @@ extern URLInterruptCB *url_interrupt_cb; /** * @defgroup old_url_funcs Old url_* functions - * @deprecated use the buffered API based on AVIOContext instead + * The following functions are deprecated. Use the buffered API based on #AVIOContext instead. * @{ */ attribute_deprecated int url_open_protocol (URLContext **puc, struct URLProtocol *up, @@ -238,7 +238,7 @@ attribute_deprecated AVIOContext *av_alloc_put_byte( /** * @defgroup old_avio_funcs Old put_/get_*() functions - * @deprecated use the avio_ -prefixed functions instead. + * The following functions are deprecated. Use the "avio_"-prefixed functions instead. * @{ */ attribute_deprecated int get_buffer(AVIOContext *s, unsigned char *buf, int size); @@ -275,7 +275,7 @@ attribute_deprecated int64_t av_url_read_fseek (AVIOContext *h, int stream_in /** * @defgroup old_url_f_funcs Old url_f* functions - * @deprecated use the avio_ -prefixed functions instead. + * The following functions are deprecated, use the "avio_"-prefixed functions instead. * @{ */ attribute_deprecated int url_fopen( AVIOContext **s, const char *url, int flags); @@ -287,11 +287,7 @@ attribute_deprecated int64_t url_fsize(AVIOContext *s); #define URL_EOF (-1) attribute_deprecated int url_fgetc(AVIOContext *s); attribute_deprecated int url_setbufsize(AVIOContext *s, int buf_size); -#ifdef __GNUC__ -attribute_deprecated int url_fprintf(AVIOContext *s, const char *fmt, ...) __attribute__ ((__format__ (__printf__, 2, 3))); -#else -attribute_deprecated int url_fprintf(AVIOContext *s, const char *fmt, ...); -#endif +attribute_deprecated int url_fprintf(AVIOContext *s, const char *fmt, ...) av_printf_format(2, 3); attribute_deprecated void put_flush_packet(AVIOContext *s); attribute_deprecated int url_open_dyn_buf(AVIOContext **s); attribute_deprecated int url_open_dyn_packet_buf(AVIOContext **s, int max_packet_size); @@ -484,11 +480,7 @@ int64_t avio_size(AVIOContext *s); int url_feof(AVIOContext *s); /** @warning currently size is limited */ -#ifdef __GNUC__ -int avio_printf(AVIOContext *s, const char *fmt, ...) __attribute__ ((__format__ (__printf__, 2, 3))); -#else -int avio_printf(AVIOContext *s, const char *fmt, ...); -#endif +int avio_printf(AVIOContext *s, const char *fmt, ...) av_printf_format(2, 3); void avio_flush(AVIOContext *s); @@ -500,7 +492,7 @@ void avio_flush(AVIOContext *s); int avio_read(AVIOContext *s, unsigned char *buf, int size); /** - * @defgroup avio_read Functions for reading from AVIOContext. + * @name Functions for reading from AVIOContext * @{ * * @note return 0 if EOF, so you cannot use it if EOF handling is @@ -552,7 +544,7 @@ int url_resetbuf(AVIOContext *s, int flags); #endif /** - * @defgroup open_modes URL open modes + * @name URL open modes * The flags argument to avio_open must be one of the following * constants, optionally ORed with other flags. * @{ diff --git a/libavformat/aviobuf.c b/libavformat/aviobuf.c index c6cea6adfb..0bdff0f1af 100644 --- a/libavformat/aviobuf.c +++ b/libavformat/aviobuf.c @@ -849,19 +849,13 @@ int ffio_fdopen(AVIOContext **s, URLContext *h) if (!buffer) return AVERROR(ENOMEM); - *s = av_mallocz(sizeof(AVIOContext)); - if(!*s) { + *s = avio_alloc_context(buffer, buffer_size, (h->flags & AVIO_WRONLY || h->flags & AVIO_RDWR), h, + ffurl_read, ffurl_write, ffurl_seek); + if (!*s) { av_free(buffer); return AVERROR(ENOMEM); } - if (ffio_init_context(*s, buffer, buffer_size, - (h->flags & AVIO_WRONLY || h->flags & AVIO_RDWR), h, - (void*)ffurl_read, (void*)ffurl_write, (void*)ffurl_seek) < 0) { - av_free(buffer); - av_freep(s); - return AVERROR(EIO); - } #if FF_API_OLD_AVIO (*s)->is_streamed = h->is_streamed; #endif diff --git a/libavformat/avisynth.c b/libavformat/avisynth.c index 768459a38d..e41e1c0277 100644 --- a/libavformat/avisynth.c +++ b/libavformat/avisynth.c @@ -208,15 +208,12 @@ static int avisynth_read_seek(AVFormatContext *s, int stream_index, int64_t pts, } AVInputFormat ff_avisynth_demuxer = { - "avs", - NULL_IF_CONFIG_SMALL("AVISynth"), - sizeof(AVISynthContext), - NULL, - avisynth_read_header, - avisynth_read_packet, - avisynth_read_close, - avisynth_read_seek, - NULL, - 0, - "avs", + .name = "avs", + .long_name = NULL_IF_CONFIG_SMALL("AVISynth"), + .priv_data_size = sizeof(AVISynthContext), + .read_header = avisynth_read_header, + .read_packet = avisynth_read_packet, + .read_close = avisynth_read_close, + .read_seek = avisynth_read_seek, + .extensions = "avs", }; diff --git a/libavformat/avs.c b/libavformat/avs.c index 355ae31f35..89b2642c36 100644 --- a/libavformat/avs.c +++ b/libavformat/avs.c @@ -216,11 +216,11 @@ static int avs_read_close(AVFormatContext * s) } AVInputFormat ff_avs_demuxer = { - "avs", - NULL_IF_CONFIG_SMALL("AVS format"), - sizeof(AvsFormat), - avs_probe, - avs_read_header, - avs_read_packet, - avs_read_close, + .name = "avs", + .long_name = NULL_IF_CONFIG_SMALL("AVS format"), + .priv_data_size = sizeof(AvsFormat), + .read_probe = avs_probe, + .read_header = avs_read_header, + .read_packet = avs_read_packet, + .read_close = avs_read_close, }; diff --git a/libavformat/bethsoftvid.c b/libavformat/bethsoftvid.c index 399d627121..73d0035d34 100644 --- a/libavformat/bethsoftvid.c +++ b/libavformat/bethsoftvid.c @@ -23,8 +23,8 @@ * @file * @brief Bethesda Softworks VID (.vid) file demuxer * @author Nicholas Tung [ntung (at. ntung com] (2007-03) - * @sa http://wiki.multimedia.cx/index.php?title=Bethsoft_VID - * @sa http://www.svatopluk.com/andux/docs/dfvid.html + * @see http://wiki.multimedia.cx/index.php?title=Bethsoft_VID + * @see http://www.svatopluk.com/andux/docs/dfvid.html */ #include "libavutil/intreadwrite.h" @@ -220,15 +220,13 @@ static int vid_read_packet(AVFormatContext *s, av_log(s, AV_LOG_ERROR, "unknown block (character = %c, decimal = %d, hex = %x)!!!\n", block_type, block_type, block_type); return -1; } - - return 0; } AVInputFormat ff_bethsoftvid_demuxer = { - "bethsoftvid", - NULL_IF_CONFIG_SMALL("Bethesda Softworks VID format"), - sizeof(BVID_DemuxContext), - vid_probe, - vid_read_header, - vid_read_packet, + .name = "bethsoftvid", + .long_name = NULL_IF_CONFIG_SMALL("Bethesda Softworks VID format"), + .priv_data_size = sizeof(BVID_DemuxContext), + .read_probe = vid_probe, + .read_header = vid_read_header, + .read_packet = vid_read_packet, }; diff --git a/libavformat/bfi.c b/libavformat/bfi.c index 843ba9972e..05ace473d0 100644 --- a/libavformat/bfi.c +++ b/libavformat/bfi.c @@ -23,7 +23,7 @@ * @file * @brief Brute Force & Ignorance (.bfi) file demuxer * @author Sisir Koppaka ( sisir.koppaka at gmail dot com ) - * @sa http://wiki.multimedia.cx/index.php?title=BFI + * @see http://wiki.multimedia.cx/index.php?title=BFI */ #include "libavutil/intreadwrite.h" @@ -159,10 +159,10 @@ static int bfi_read_packet(AVFormatContext * s, AVPacket * pkt) } AVInputFormat ff_bfi_demuxer = { - "bfi", - NULL_IF_CONFIG_SMALL("Brute Force & Ignorance"), - sizeof(BFIContext), - bfi_probe, - bfi_read_header, - bfi_read_packet, + .name = "bfi", + .long_name = NULL_IF_CONFIG_SMALL("Brute Force & Ignorance"), + .priv_data_size = sizeof(BFIContext), + .read_probe = bfi_probe, + .read_header = bfi_read_header, + .read_packet = bfi_read_packet, }; diff --git a/libavformat/bink.c b/libavformat/bink.c index eed52cdb49..a54765ad18 100644 --- a/libavformat/bink.c +++ b/libavformat/bink.c @@ -134,15 +134,18 @@ static int read_header(AVFormatContext *s, AVFormatParameters *ap) if (!ast) return AVERROR(ENOMEM); ast->codec->codec_type = AVMEDIA_TYPE_AUDIO; + ast->codec->codec_tag = 0; ast->codec->sample_rate = avio_rl16(pb); av_set_pts_info(ast, 64, 1, ast->codec->sample_rate); flags = avio_rl16(pb); ast->codec->codec_id = flags & BINK_AUD_USEDCT ? CODEC_ID_BINKAUDIO_DCT : CODEC_ID_BINKAUDIO_RDFT; ast->codec->channels = flags & BINK_AUD_STEREO ? 2 : 1; - ast->codec->extradata = av_mallocz(1 + FF_INPUT_BUFFER_PADDING_SIZE); - ast->codec->extradata_size = 1; - ast->codec->extradata[0] = vst->codec->codec_tag == MKTAG('B','I','K','b'); + ast->codec->extradata = av_mallocz(4 + FF_INPUT_BUFFER_PADDING_SIZE); + if (!ast->codec->extradata) + return AVERROR(ENOMEM); + ast->codec->extradata_size = 4; + AV_WL32(ast->codec->extradata, vst->codec->codec_tag); } for (i = 0; i < bink->num_audio_tracks; i++) @@ -261,12 +264,11 @@ static int read_seek(AVFormatContext *s, int stream_index, int64_t timestamp, in } AVInputFormat ff_bink_demuxer = { - "bink", - NULL_IF_CONFIG_SMALL("Bink"), - sizeof(BinkDemuxContext), - probe, - read_header, - read_packet, - NULL, - read_seek, + .name = "bink", + .long_name = NULL_IF_CONFIG_SMALL("Bink"), + .priv_data_size = sizeof(BinkDemuxContext), + .read_probe = probe, + .read_header = read_header, + .read_packet = read_packet, + .read_seek = read_seek, }; diff --git a/libavformat/c93.c b/libavformat/c93.c index 270a09bf6b..adfa876e9a 100644 --- a/libavformat/c93.c +++ b/libavformat/c93.c @@ -193,10 +193,10 @@ static int read_packet(AVFormatContext *s, AVPacket *pkt) } AVInputFormat ff_c93_demuxer = { - "c93", - NULL_IF_CONFIG_SMALL("Interplay C93"), - sizeof(C93DemuxContext), - probe, - read_header, - read_packet, + .name = "c93", + .long_name = NULL_IF_CONFIG_SMALL("Interplay C93"), + .priv_data_size = sizeof(C93DemuxContext), + .read_probe = probe, + .read_header = read_header, + .read_packet = read_packet, }; diff --git a/libavformat/caf.c b/libavformat/caf.c index 907562a082..054533038f 100644 --- a/libavformat/caf.c +++ b/libavformat/caf.c @@ -32,30 +32,34 @@ * Known codec tags for CAF */ const AVCodecTag ff_codec_caf_tags[] = { - { CODEC_ID_AAC, MKBETAG('a','a','c',' ') }, - { CODEC_ID_AC3, MKBETAG('a','c','-','3') }, - { CODEC_ID_ALAC, MKBETAG('a','l','a','c') }, + { CODEC_ID_AAC, MKTAG('a','a','c',' ') }, + { CODEC_ID_AC3, MKTAG('a','c','-','3') }, + { CODEC_ID_ADPCM_IMA_QT, MKTAG('i','m','a','4') }, + { CODEC_ID_ADPCM_IMA_WAV, MKTAG('m','s', 0, 17 ) }, + { CODEC_ID_ADPCM_MS, MKTAG('m','s', 0, 2 ) }, + { CODEC_ID_ALAC, MKTAG('a','l','a','c') }, + { CODEC_ID_AMR_NB, MKTAG('s','a','m','r') }, /* FIXME: use DV demuxer, as done in MOV */ - /*{ CODEC_ID_DVAUDIO, MKBETAG('v','d','v','a') },*/ - /*{ CODEC_ID_DVAUDIO, MKBETAG('d','v','c','a') },*/ - { CODEC_ID_ADPCM_IMA_QT, MKBETAG('i','m','a','4') }, - { CODEC_ID_AMR_NB, MKBETAG('s','a','m','r') }, - { CODEC_ID_GSM, MKBETAG('a','g','s','m') }, - { CODEC_ID_MACE3, MKBETAG('M','A','C','3') }, - { CODEC_ID_MACE6, MKBETAG('M','A','C','6') }, - { CODEC_ID_MP3, MKBETAG('.','m','p','3') }, - { CODEC_ID_MP2, MKBETAG('.','m','p','2') }, - { CODEC_ID_MP1, MKBETAG('.','m','p','1') }, - { CODEC_ID_PCM_ALAW, MKBETAG('a','l','a','w') }, - { CODEC_ID_PCM_MULAW, MKBETAG('u','l','a','w') }, - { CODEC_ID_QCELP, MKBETAG('Q','c','l','p') }, - { CODEC_ID_QDM2, MKBETAG('Q','D','M','2') }, - { CODEC_ID_QDM2, MKBETAG('Q','D','M','C') }, + /*{ CODEC_ID_DVAUDIO, MKTAG('v','d','v','a') },*/ + /*{ CODEC_ID_DVAUDIO, MKTAG('d','v','c','a') },*/ + { CODEC_ID_GSM, MKTAG('a','g','s','m') }, + { CODEC_ID_GSM_MS, MKTAG('m','s', 0, '1') }, + { CODEC_ID_MACE3, MKTAG('M','A','C','3') }, + { CODEC_ID_MACE6, MKTAG('M','A','C','6') }, + { CODEC_ID_MP1, MKTAG('.','m','p','1') }, + { CODEC_ID_MP2, MKTAG('.','m','p','2') }, + { CODEC_ID_MP3, MKTAG('.','m','p','3') }, + { CODEC_ID_MP3, MKTAG('m','s', 0 ,'U') }, + { CODEC_ID_PCM_ALAW, MKTAG('a','l','a','w') }, + { CODEC_ID_PCM_MULAW, MKTAG('u','l','a','w') }, + { CODEC_ID_QCELP, MKTAG('Q','c','l','p') }, + { CODEC_ID_QDM2, MKTAG('Q','D','M','2') }, + { CODEC_ID_QDM2, MKTAG('Q','D','M','C') }, /* currently unsupported codecs */ - /*{ AC-3 over S/PDIF MKBETAG('c','a','c','3') },*/ - /*{ MPEG4CELP MKBETAG('c','e','l','p') },*/ - /*{ MPEG4HVXC MKBETAG('h','v','x','c') },*/ - /*{ MPEG4TwinVQ MKBETAG('t','w','v','q') },*/ + /*{ AC-3 over S/PDIF MKTAG('c','a','c','3') },*/ + /*{ MPEG4CELP MKTAG('c','e','l','p') },*/ + /*{ MPEG4HVXC MKTAG('h','v','x','c') },*/ + /*{ MPEG4TwinVQ MKTAG('t','w','v','q') },*/ { CODEC_ID_NONE, 0 }, }; diff --git a/libavformat/cafdec.c b/libavformat/cafdec.c index b24ff1d023..8b0daddd58 100644 --- a/libavformat/cafdec.c +++ b/libavformat/cafdec.c @@ -29,6 +29,7 @@ #include "riff.h" #include "isom.h" #include "libavutil/intreadwrite.h" +#include "libavutil/intfloat_readwrite.h" #include "libavutil/dict.h" #include "caf.h" @@ -67,7 +68,7 @@ static int read_desc_chunk(AVFormatContext *s) /* parse format description */ st->codec->codec_type = AVMEDIA_TYPE_AUDIO; st->codec->sample_rate = av_int2dbl(avio_rb64(pb)); - st->codec->codec_tag = avio_rb32(pb); + st->codec->codec_tag = avio_rl32(pb); flags = avio_rb32(pb); caf->bytes_per_packet = avio_rb32(pb); st->codec->block_align = caf->bytes_per_packet; @@ -84,7 +85,7 @@ static int read_desc_chunk(AVFormatContext *s) } /* determine codec */ - if (st->codec->codec_tag == MKBETAG('l','p','c','m')) + if (st->codec->codec_tag == MKTAG('l','p','c','m')) st->codec->codec_id = ff_mov_get_lpcm_codec_id(st->codec->bits_per_coded_sample, (flags ^ 0x2) | 0x4); else st->codec->codec_id = ff_codec_get_id(ff_codec_caf_tags, st->codec->codec_tag); @@ -389,13 +390,12 @@ static int read_seek(AVFormatContext *s, int stream_index, } AVInputFormat ff_caf_demuxer = { - "caf", - NULL_IF_CONFIG_SMALL("Apple Core Audio Format"), - sizeof(CaffContext), - probe, - read_header, - read_packet, - NULL, - read_seek, + .name = "caf", + .long_name = NULL_IF_CONFIG_SMALL("Apple Core Audio Format"), + .priv_data_size = sizeof(CaffContext), + .read_probe = probe, + .read_header = read_header, + .read_packet = read_packet, + .read_seek = read_seek, .codec_tag = (const AVCodecTag*[]){ff_codec_caf_tags, 0}, }; diff --git a/libavformat/cafenc.c b/libavformat/cafenc.c index 13636f30ee..1c77251604 100644 --- a/libavformat/cafenc.c +++ b/libavformat/cafenc.c @@ -24,9 +24,14 @@ #include "riff.h" #include "isom.h" #include "avio_internal.h" +#include "libavutil/intfloat_readwrite.h" typedef struct { int64_t data; + uint8_t *pkt_sizes; + int size_buffer_size; + int size_entries_used; + int packets; } CAFContext; static uint32_t codec_flags(enum CodecID codec_id) { @@ -46,7 +51,7 @@ static uint32_t codec_flags(enum CodecID codec_id) { } } -static uint32_t samples_per_packet(enum CodecID codec_id) { +static uint32_t samples_per_packet(enum CodecID codec_id, int channels) { switch (codec_id) { case CODEC_ID_PCM_S8: case CODEC_ID_PCM_S16LE: @@ -71,6 +76,8 @@ static uint32_t samples_per_packet(enum CodecID codec_id) { case CODEC_ID_GSM: case CODEC_ID_QCELP: return 160; + case CODEC_ID_GSM_MS: + return 320; case CODEC_ID_MP1: return 384; case CODEC_ID_MP2: @@ -81,6 +88,10 @@ static uint32_t samples_per_packet(enum CodecID codec_id) { case CODEC_ID_ALAC: case CODEC_ID_QDM2: return 4096; + case CODEC_ID_ADPCM_IMA_WAV: + return (1024 - 4 * channels) * 8 / (4 * channels) + 1; + case CODEC_ID_ADPCM_MS: + return (1024 - 7 * channels) * 2 / channels + 2; default: return 0; } @@ -94,6 +105,17 @@ static int caf_write_header(AVFormatContext *s) unsigned int codec_tag = ff_codec_get_tag(ff_codec_caf_tags, enc->codec_id); switch (enc->codec_id) { + case CODEC_ID_AAC: + case CODEC_ID_AC3: + case CODEC_ID_ALAC: + case CODEC_ID_AMR_NB: + case CODEC_ID_QCELP: + case CODEC_ID_QDM2: + av_log(s, AV_LOG_ERROR, "muxing codec currently unsupported\n"); + return AVERROR_PATCHWELCOME; + } + + switch (enc->codec_id) { case CODEC_ID_PCM_S8: case CODEC_ID_PCM_S16LE: case CODEC_ID_PCM_S16BE: @@ -107,7 +129,7 @@ static int caf_write_header(AVFormatContext *s) case CODEC_ID_PCM_F64BE: case CODEC_ID_PCM_ALAW: case CODEC_ID_PCM_MULAW: - codec_tag = MKBETAG('l','p','c','m'); + codec_tag = MKTAG('l','p','c','m'); } if (!codec_tag) { @@ -115,9 +137,9 @@ static int caf_write_header(AVFormatContext *s) return AVERROR_INVALIDDATA; } - if (!enc->block_align) { - av_log(s, AV_LOG_ERROR, "muxing with unknown or variable packet size not yet supported\n"); - return AVERROR_PATCHWELCOME; + if (!enc->block_align && !pb->seekable) { + av_log(s, AV_LOG_ERROR, "Muxing variable packet size not supported on non seekable output\n"); + return AVERROR_INVALIDDATA; } ffio_wfourcc(pb, "caff"); //< mFileType @@ -127,12 +149,12 @@ static int caf_write_header(AVFormatContext *s) ffio_wfourcc(pb, "desc"); //< Audio Description chunk avio_wb64(pb, 32); //< mChunkSize avio_wb64(pb, av_dbl2int(enc->sample_rate)); //< mSampleRate - avio_wb32(pb, codec_tag); //< mFormatID + avio_wl32(pb, codec_tag); //< mFormatID avio_wb32(pb, codec_flags(enc->codec_id)); //< mFormatFlags avio_wb32(pb, enc->block_align); //< mBytesPerPacket - avio_wb32(pb, samples_per_packet(enc->codec_id)); //< mFramesPerPacket + avio_wb32(pb, samples_per_packet(enc->codec_id, enc->channels)); //< mFramesPerPacket avio_wb32(pb, enc->channels); //< mChannelsPerFrame - avio_wb32(pb, enc->bits_per_coded_sample); //< mBitsPerChannel + avio_wb32(pb, av_get_bits_per_sample(enc->codec_id)); //< mBitsPerChannel if (enc->channel_layout) { ffio_wfourcc(pb, "chan"); @@ -151,13 +173,38 @@ static int caf_write_header(AVFormatContext *s) static int caf_write_packet(AVFormatContext *s, AVPacket *pkt) { + CAFContext *caf = s->priv_data; + avio_write(s->pb, pkt->data, pkt->size); + if (!s->streams[0]->codec->block_align) { + void *pkt_sizes = caf->pkt_sizes; + int i, alloc_size = caf->size_entries_used + 5; + if (alloc_size < 0) { + caf->pkt_sizes = NULL; + } else { + caf->pkt_sizes = av_fast_realloc(caf->pkt_sizes, + &caf->size_buffer_size, + alloc_size); + } + if (!caf->pkt_sizes) { + av_free(pkt_sizes); + return AVERROR(ENOMEM); + } + for (i = 4; i > 0; i--) { + unsigned top = pkt->size >> i * 7; + if (top) + caf->pkt_sizes[caf->size_entries_used++] = 128 | top; + } + caf->pkt_sizes[caf->size_entries_used++] = pkt->size & 127; + caf->packets++; + } return 0; } static int caf_write_trailer(AVFormatContext *s) { AVIOContext *pb = s->pb; + AVCodecContext *enc = s->streams[0]->codec; if (pb->seekable) { CAFContext *caf = s->priv_data; @@ -166,6 +213,17 @@ static int caf_write_trailer(AVFormatContext *s) avio_seek(pb, caf->data, SEEK_SET); avio_wb64(pb, file_size - caf->data - 8); avio_seek(pb, file_size, SEEK_SET); + if (!enc->block_align) { + ffio_wfourcc(pb, "pakt"); + avio_wb64(pb, caf->size_entries_used + 24); + avio_wb64(pb, caf->packets); ///< mNumberPackets + avio_wb64(pb, caf->packets * samples_per_packet(enc->codec_id, enc->channels)); ///< mNumberValidFrames + avio_wb32(pb, 0); ///< mPrimingFrames + avio_wb32(pb, 0); ///< mRemainderFrames + avio_write(pb, caf->pkt_sizes, caf->size_entries_used); + av_freep(&caf->pkt_sizes); + caf->size_buffer_size = 0; + } avio_flush(pb); } return 0; diff --git a/libavformat/cdg.c b/libavformat/cdg.c index 8fa38f74e9..b547bbeebe 100644 --- a/libavformat/cdg.c +++ b/libavformat/cdg.c @@ -56,11 +56,9 @@ static int read_packet(AVFormatContext *s, AVPacket *pkt) } AVInputFormat ff_cdg_demuxer = { - "cdg", - NULL_IF_CONFIG_SMALL("CD Graphics Format"), - 0, - NULL, - read_header, - read_packet, + .name = "cdg", + .long_name = NULL_IF_CONFIG_SMALL("CD Graphics Format"), + .read_header = read_header, + .read_packet = read_packet, .extensions = "cdg" }; diff --git a/libavformat/crcenc.c b/libavformat/crcenc.c index b5fed3918d..f596e665d1 100644 --- a/libavformat/crcenc.c +++ b/libavformat/crcenc.c @@ -55,14 +55,13 @@ static int crc_write_trailer(struct AVFormatContext *s) } AVOutputFormat ff_crc_muxer = { - "crc", - NULL_IF_CONFIG_SMALL("CRC testing format"), - NULL, - "", - sizeof(CRCState), - CODEC_ID_PCM_S16LE, - CODEC_ID_RAWVIDEO, - crc_write_header, - crc_write_packet, - crc_write_trailer, + .name = "crc", + .long_name = NULL_IF_CONFIG_SMALL("CRC testing format"), + .extensions = "", + .priv_data_size = sizeof(CRCState), + .audio_codec = CODEC_ID_PCM_S16LE, + .video_codec = CODEC_ID_RAWVIDEO, + .write_header = crc_write_header, + .write_packet = crc_write_packet, + .write_trailer = crc_write_trailer, }; diff --git a/libavformat/daud.c b/libavformat/daud.c index 6a36d28acb..66a20c347f 100644 --- a/libavformat/daud.c +++ b/libavformat/daud.c @@ -71,14 +71,10 @@ static int daud_write_packet(struct AVFormatContext *s, AVPacket *pkt) #if CONFIG_DAUD_DEMUXER AVInputFormat ff_daud_demuxer = { - "daud", - NULL_IF_CONFIG_SMALL("D-Cinema audio format"), - 0, - NULL, - daud_header, - daud_packet, - NULL, - NULL, + .name = "daud", + .long_name = NULL_IF_CONFIG_SMALL("D-Cinema audio format"), + .read_header = daud_header, + .read_packet = daud_packet, .extensions = "302", }; #endif diff --git a/libavformat/dfa.c b/libavformat/dfa.c index c1ee55cfb2..12ede0426d 100644 --- a/libavformat/dfa.c +++ b/libavformat/dfa.c @@ -109,11 +109,10 @@ static int dfa_read_packet(AVFormatContext *s, AVPacket *pkt) } AVInputFormat ff_dfa_demuxer = { - "dfa", - NULL_IF_CONFIG_SMALL("Chronomaster DFA"), - 0, - dfa_probe, - dfa_read_header, - dfa_read_packet, + .name = "dfa", + .long_name = NULL_IF_CONFIG_SMALL("Chronomaster DFA"), + .read_probe = dfa_probe, + .read_header = dfa_read_header, + .read_packet = dfa_read_packet, .flags = AVFMT_GENERIC_INDEX, }; diff --git a/libavformat/dsicin.c b/libavformat/dsicin.c index 22233731c5..d11e285113 100644 --- a/libavformat/dsicin.c +++ b/libavformat/dsicin.c @@ -217,10 +217,10 @@ static int cin_read_packet(AVFormatContext *s, AVPacket *pkt) } AVInputFormat ff_dsicin_demuxer = { - "dsicin", - NULL_IF_CONFIG_SMALL("Delphine Software International CIN format"), - sizeof(CinDemuxContext), - cin_probe, - cin_read_header, - cin_read_packet, + .name = "dsicin", + .long_name = NULL_IF_CONFIG_SMALL("Delphine Software International CIN format"), + .priv_data_size = sizeof(CinDemuxContext), + .read_probe = cin_probe, + .read_header = cin_read_header, + .read_packet = cin_read_packet, }; diff --git a/libavformat/dtsdec.c b/libavformat/dtsdec.c index b203597283..e762b85e19 100644 --- a/libavformat/dtsdec.c +++ b/libavformat/dtsdec.c @@ -66,12 +66,11 @@ static int dts_probe(AVProbeData *p) } AVInputFormat ff_dts_demuxer = { - "dts", - NULL_IF_CONFIG_SMALL("raw DTS"), - 0, - dts_probe, - ff_raw_audio_read_header, - ff_raw_read_partial_packet, + .name = "dts", + .long_name = NULL_IF_CONFIG_SMALL("raw DTS"), + .read_probe = dts_probe, + .read_header = ff_raw_audio_read_header, + .read_packet = ff_raw_read_partial_packet, .flags= AVFMT_GENERIC_INDEX, .extensions = "dts", .value = CODEC_ID_DTS, diff --git a/libavformat/dv.c b/libavformat/dv.c index 750c950df8..becea54813 100644 --- a/libavformat/dv.c +++ b/libavformat/dv.c @@ -32,6 +32,7 @@ #include "avformat.h" #include "libavcodec/dvdata.h" #include "libavutil/intreadwrite.h" +#include "libavutil/mathematics.h" #include "dv.h" struct DVDemuxContext { @@ -522,14 +523,14 @@ static int dv_probe(AVProbeData *p) #if CONFIG_DV_DEMUXER AVInputFormat ff_dv_demuxer = { - "dv", - NULL_IF_CONFIG_SMALL("DV video format"), - sizeof(RawDVContext), - dv_probe, - dv_read_header, - dv_read_packet, - dv_read_close, - dv_read_seek, + .name = "dv", + .long_name = NULL_IF_CONFIG_SMALL("DV video format"), + .priv_data_size = sizeof(RawDVContext), + .read_probe = dv_probe, + .read_header = dv_read_header, + .read_packet = dv_read_packet, + .read_close = dv_read_close, + .read_seek = dv_read_seek, .extensions = "dv,dif", }; #endif diff --git a/libavformat/dvenc.c b/libavformat/dvenc.c index 0b80835fae..ff816b6c3f 100644 --- a/libavformat/dvenc.c +++ b/libavformat/dvenc.c @@ -35,6 +35,7 @@ #include "libavcodec/dvdata.h" #include "dv.h" #include "libavutil/fifo.h" +#include "libavutil/mathematics.h" struct DVMuxContext { const DVprofile* sys; /* current DV profile, e.g.: 525/60, 625/50 */ @@ -42,7 +43,7 @@ struct DVMuxContext { AVStream *ast[2]; /* stereo audio streams */ AVFifoBuffer *audio_data[2]; /* FIFO for storing excessive amounts of PCM */ int frames; /* current frame number */ - time_t start_time; /* recording start time */ + int64_t start_time; /* recording start time */ int has_audio; /* frame under contruction has audio */ int has_video; /* frame under contruction has video */ uint8_t frame_buf[DV_MAX_FRAME_SIZE]; /* frame under contruction */ @@ -289,6 +290,7 @@ static DVMuxContext* dv_init_mux(AVFormatContext* s) { DVMuxContext *c = s->priv_data; AVStream *vst = NULL; + AVDictionaryEntry *t; int i; /* we support at most 1 video and 2 audio streams */ @@ -336,7 +338,13 @@ static DVMuxContext* dv_init_mux(AVFormatContext* s) c->frames = 0; c->has_audio = 0; c->has_video = 0; - c->start_time = (time_t)s->timestamp; +#if FF_API_TIMESTAMP + if (s->timestamp) + c->start_time = s->timestamp; + else +#endif + if (t = av_dict_get(s->metadata, "creation_time", NULL, 0)) + c->start_time = ff_iso8601_to_unix_time(t->value); for (i=0; i < c->n_ast; i++) { if (c->ast[i] && !(c->audio_data[i]=av_fifo_alloc(100*AVCODEC_MAX_AUDIO_FRAME_SIZE))) { @@ -400,14 +408,13 @@ static int dv_write_trailer(struct AVFormatContext *s) } AVOutputFormat ff_dv_muxer = { - "dv", - NULL_IF_CONFIG_SMALL("DV video format"), - NULL, - "dv", - sizeof(DVMuxContext), - CODEC_ID_PCM_S16LE, - CODEC_ID_DVVIDEO, - dv_write_header, - dv_write_packet, - dv_write_trailer, + .name = "dv", + .long_name = NULL_IF_CONFIG_SMALL("DV video format"), + .extensions = "dv", + .priv_data_size = sizeof(DVMuxContext), + .audio_codec = CODEC_ID_PCM_S16LE, + .video_codec = CODEC_ID_DVVIDEO, + .write_header = dv_write_header, + .write_packet = dv_write_packet, + .write_trailer = dv_write_trailer, }; diff --git a/libavformat/dxa.c b/libavformat/dxa.c index 15147a7a8d..930ac6f44b 100644 --- a/libavformat/dxa.c +++ b/libavformat/dxa.c @@ -213,10 +213,10 @@ static int dxa_read_packet(AVFormatContext *s, AVPacket *pkt) } AVInputFormat ff_dxa_demuxer = { - "dxa", - NULL_IF_CONFIG_SMALL("DXA"), - sizeof(DXAContext), - dxa_probe, - dxa_read_header, - dxa_read_packet, + .name = "dxa", + .long_name = NULL_IF_CONFIG_SMALL("DXA"), + .priv_data_size = sizeof(DXAContext), + .read_probe = dxa_probe, + .read_header = dxa_read_header, + .read_packet = dxa_read_packet, }; diff --git a/libavformat/eacdata.c b/libavformat/eacdata.c index ef474b3604..01acaca35d 100644 --- a/libavformat/eacdata.c +++ b/libavformat/eacdata.c @@ -95,11 +95,11 @@ static int cdata_read_packet(AVFormatContext *s, AVPacket *pkt) } AVInputFormat ff_ea_cdata_demuxer = { - "ea_cdata", - NULL_IF_CONFIG_SMALL("Electronic Arts cdata"), - sizeof(CdataDemuxContext), - cdata_probe, - cdata_read_header, - cdata_read_packet, + .name = "ea_cdata", + .long_name = NULL_IF_CONFIG_SMALL("Electronic Arts cdata"), + .priv_data_size = sizeof(CdataDemuxContext), + .read_probe = cdata_probe, + .read_header = cdata_read_header, + .read_packet = cdata_read_packet, .extensions = "cdata", }; diff --git a/libavformat/electronicarts.c b/libavformat/electronicarts.c index 0b882aac87..f561319a2b 100644 --- a/libavformat/electronicarts.c +++ b/libavformat/electronicarts.c @@ -571,10 +571,10 @@ get_video_packet: } AVInputFormat ff_ea_demuxer = { - "ea", - NULL_IF_CONFIG_SMALL("Electronic Arts Multimedia Format"), - sizeof(EaDemuxContext), - ea_probe, - ea_read_header, - ea_read_packet, + .name = "ea", + .long_name = NULL_IF_CONFIG_SMALL("Electronic Arts Multimedia Format"), + .priv_data_size = sizeof(EaDemuxContext), + .read_probe = ea_probe, + .read_header = ea_read_header, + .read_packet = ea_read_packet, }; diff --git a/libavformat/ffmdec.c b/libavformat/ffmdec.c index e776d0c384..2fd154fa25 100644 --- a/libavformat/ffmdec.c +++ b/libavformat/ffmdec.c @@ -20,6 +20,7 @@ */ #include "libavutil/intreadwrite.h" +#include "libavutil/intfloat_readwrite.h" #include "avformat.h" #include "ffm.h" #if CONFIG_FFSERVER @@ -300,7 +301,6 @@ static int ffm_read_header(AVFormatContext *s, AVFormatParameters *ap) codec->codec_id = avio_rb32(pb); codec->codec_type = avio_r8(pb); /* codec_type */ codec->bit_rate = avio_rb32(pb); - st->quality = avio_rb32(pb); codec->flags = avio_rb32(pb); codec->flags2 = avio_rb32(pb); codec->debug = avio_rb32(pb); @@ -508,12 +508,12 @@ static int ffm_probe(AVProbeData *p) } AVInputFormat ff_ffm_demuxer = { - "ffm", - NULL_IF_CONFIG_SMALL("FFM (FFserver live feed) format"), - sizeof(FFMContext), - ffm_probe, - ffm_read_header, - ffm_read_packet, - ffm_close, - ffm_seek, + .name = "ffm", + .long_name = NULL_IF_CONFIG_SMALL("FFM (FFserver live feed) format"), + .priv_data_size = sizeof(FFMContext), + .read_probe = ffm_probe, + .read_header = ffm_read_header, + .read_packet = ffm_read_packet, + .read_close = ffm_close, + .read_seek = ffm_seek, }; diff --git a/libavformat/ffmenc.c b/libavformat/ffmenc.c index 8071045467..99d2e98dbf 100644 --- a/libavformat/ffmenc.c +++ b/libavformat/ffmenc.c @@ -20,6 +20,7 @@ */ #include "libavutil/intreadwrite.h" +#include "libavutil/intfloat_readwrite.h" #include "avformat.h" #include "ffm.h" @@ -113,7 +114,6 @@ static int ffm_write_header(AVFormatContext *s) avio_wb32(pb, codec->codec_id); avio_w8(pb, codec->codec_type); avio_wb32(pb, codec->bit_rate); - avio_wb32(pb, st->quality); avio_wb32(pb, codec->flags); avio_wb32(pb, codec->flags2); avio_wb32(pb, codec->debug); @@ -241,15 +241,14 @@ static int ffm_write_trailer(AVFormatContext *s) } AVOutputFormat ff_ffm_muxer = { - "ffm", - NULL_IF_CONFIG_SMALL("FFM (FFserver live feed) format"), - "", - "ffm", - sizeof(FFMContext), - /* not really used */ - CODEC_ID_MP2, - CODEC_ID_MPEG1VIDEO, - ffm_write_header, - ffm_write_packet, - ffm_write_trailer, + .name = "ffm", + .long_name = NULL_IF_CONFIG_SMALL("FFM (FFserver live feed) format"), + .mime_type = "", + .extensions = "ffm", + .priv_data_size = sizeof(FFMContext), + .audio_codec = CODEC_ID_MP2, + .video_codec = CODEC_ID_MPEG1VIDEO, + .write_header = ffm_write_header, + .write_packet = ffm_write_packet, + .write_trailer = ffm_write_trailer, }; diff --git a/libavformat/ffmetadec.c b/libavformat/ffmetadec.c index 0063e6eafa..4da9dfdaa6 100644 --- a/libavformat/ffmetadec.c +++ b/libavformat/ffmetadec.c @@ -19,6 +19,7 @@ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ +#include "libavutil/mathematics.h" #include "avformat.h" #include "ffmeta.h" #include "internal.h" diff --git a/libavformat/filmstripdec.c b/libavformat/filmstripdec.c index 60c17819e8..3963b53c0a 100644 --- a/libavformat/filmstripdec.c +++ b/libavformat/filmstripdec.c @@ -99,13 +99,11 @@ static int read_seek(AVFormatContext *s, int stream_index, int64_t timestamp, in } AVInputFormat ff_filmstrip_demuxer = { - "filmstrip", - NULL_IF_CONFIG_SMALL("Adobe Filmstrip"), - sizeof(FilmstripDemuxContext), - NULL, - read_header, - read_packet, - NULL, - read_seek, + .name = "filmstrip", + .long_name = NULL_IF_CONFIG_SMALL("Adobe Filmstrip"), + .priv_data_size = sizeof(FilmstripDemuxContext), + .read_header = read_header, + .read_packet = read_packet, + .read_seek = read_seek, .extensions = "flm", }; diff --git a/libavformat/filmstripenc.c b/libavformat/filmstripenc.c index 9bbc546eaf..3862cb1dba 100644 --- a/libavformat/filmstripenc.c +++ b/libavformat/filmstripenc.c @@ -72,14 +72,13 @@ static int write_trailer(AVFormatContext *s) } AVOutputFormat ff_filmstrip_muxer = { - "filmstrip", - NULL_IF_CONFIG_SMALL("Adobe Filmstrip"), - NULL, - "flm", - sizeof(FilmstripMuxContext), - CODEC_ID_NONE, - CODEC_ID_RAWVIDEO, - write_header, - write_packet, - write_trailer, + .name = "filmstrip", + .long_name = NULL_IF_CONFIG_SMALL("Adobe Filmstrip"), + .extensions = "flm", + .priv_data_size = sizeof(FilmstripMuxContext), + .audio_codec = CODEC_ID_NONE, + .video_codec = CODEC_ID_RAWVIDEO, + .write_header = write_header, + .write_packet = write_packet, + .write_trailer = write_trailer, }; diff --git a/libavformat/flacdec.c b/libavformat/flacdec.c index 3dd3e1f70f..73308f45f6 100644 --- a/libavformat/flacdec.c +++ b/libavformat/flacdec.c @@ -124,12 +124,11 @@ static int flac_probe(AVProbeData *p) } AVInputFormat ff_flac_demuxer = { - "flac", - NULL_IF_CONFIG_SMALL("raw FLAC"), - 0, - flac_probe, - flac_read_header, - ff_raw_read_partial_packet, + .name = "flac", + .long_name = NULL_IF_CONFIG_SMALL("raw FLAC"), + .read_probe = flac_probe, + .read_header = flac_read_header, + .read_packet = ff_raw_read_partial_packet, .flags= AVFMT_GENERIC_INDEX, .extensions = "flac", .value = CODEC_ID_FLAC, diff --git a/libavformat/flacenc.c b/libavformat/flacenc.c index 24e0287ef8..617b8f849e 100644 --- a/libavformat/flacenc.c +++ b/libavformat/flacenc.c @@ -118,15 +118,14 @@ static int flac_write_packet(struct AVFormatContext *s, AVPacket *pkt) } AVOutputFormat ff_flac_muxer = { - "flac", - NULL_IF_CONFIG_SMALL("raw FLAC"), - "audio/x-flac", - "flac", - 0, - CODEC_ID_FLAC, - CODEC_ID_NONE, - flac_write_header, - flac_write_packet, - flac_write_trailer, + .name = "flac", + .long_name = NULL_IF_CONFIG_SMALL("raw FLAC"), + .mime_type = "audio/x-flac", + .extensions = "flac", + .audio_codec = CODEC_ID_FLAC, + .video_codec = CODEC_ID_NONE, + .write_header = flac_write_header, + .write_packet = flac_write_packet, + .write_trailer = flac_write_trailer, .flags= AVFMT_NOTIMESTAMPS, }; diff --git a/libavformat/flic.c b/libavformat/flic.c index d6aadb7730..bcef1fd663 100644 --- a/libavformat/flic.c +++ b/libavformat/flic.c @@ -261,10 +261,10 @@ static int flic_read_packet(AVFormatContext *s, } AVInputFormat ff_flic_demuxer = { - "flic", - NULL_IF_CONFIG_SMALL("FLI/FLC/FLX animation format"), - sizeof(FlicDemuxContext), - flic_probe, - flic_read_header, - flic_read_packet, + .name = "flic", + .long_name = NULL_IF_CONFIG_SMALL("FLI/FLC/FLX animation format"), + .priv_data_size = sizeof(FlicDemuxContext), + .read_probe = flic_probe, + .read_header = flic_read_header, + .read_packet = flic_read_packet, }; diff --git a/libavformat/flv.h b/libavformat/flv.h index 497729adc6..d0731c80a3 100644 --- a/libavformat/flv.h +++ b/libavformat/flv.h @@ -1,5 +1,4 @@ -/** - * @file +/* * FLV common header * * Copyright (c) 2006 The FFmpeg Project @@ -21,6 +20,11 @@ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ +/** + * @file + * FLV common header + */ + #ifndef AVFORMAT_FLV_H #define AVFORMAT_FLV_H diff --git a/libavformat/flvdec.c b/libavformat/flvdec.c index 4fb562f4e2..d3e3d77fce 100644 --- a/libavformat/flvdec.c +++ b/libavformat/flvdec.c @@ -26,6 +26,8 @@ #include "libavutil/avstring.h" #include "libavutil/dict.h" +#include "libavutil/intfloat_readwrite.h" +#include "libavutil/mathematics.h" #include "libavcodec/bytestream.h" #include "libavcodec/mpeg4audio.h" #include "avformat.h" @@ -558,12 +560,12 @@ static int flv_read_seek2(AVFormatContext *s, int stream_index, #endif AVInputFormat ff_flv_demuxer = { - "flv", - NULL_IF_CONFIG_SMALL("FLV format"), - sizeof(FLVContext), - flv_probe, - flv_read_header, - flv_read_packet, + .name = "flv", + .long_name = NULL_IF_CONFIG_SMALL("FLV format"), + .priv_data_size = sizeof(FLVContext), + .read_probe = flv_probe, + .read_header = flv_read_header, + .read_packet = flv_read_packet, .read_seek = flv_read_seek, #if 0 .read_seek2 = flv_read_seek2, diff --git a/libavformat/flvenc.c b/libavformat/flvenc.c index cea0d02e3c..d8d915e269 100644 --- a/libavformat/flvenc.c +++ b/libavformat/flvenc.c @@ -20,6 +20,7 @@ */ #include "libavutil/intreadwrite.h" +#include "libavutil/intfloat_readwrite.h" #include "avformat.h" #include "flv.h" #include "internal.h" @@ -445,20 +446,20 @@ static int flv_write_packet(AVFormatContext *s, AVPacket *pkt) } AVOutputFormat ff_flv_muxer = { - "flv", - NULL_IF_CONFIG_SMALL("FLV format"), - "video/x-flv", - "flv", - sizeof(FLVContext), + .name = "flv", + .long_name = NULL_IF_CONFIG_SMALL("FLV format"), + .mime_type = "video/x-flv", + .extensions = "flv", + .priv_data_size = sizeof(FLVContext), #if CONFIG_LIBMP3LAME - CODEC_ID_MP3, + .audio_codec = CODEC_ID_MP3, #else // CONFIG_LIBMP3LAME - CODEC_ID_ADPCM_SWF, + .audio_codec = CODEC_ID_ADPCM_SWF, #endif // CONFIG_LIBMP3LAME - CODEC_ID_FLV1, - flv_write_header, - flv_write_packet, - flv_write_trailer, + .video_codec = CODEC_ID_FLV1, + .write_header = flv_write_header, + .write_packet = flv_write_packet, + .write_trailer = flv_write_trailer, .codec_tag= (const AVCodecTag* const []){flv_video_codec_ids, flv_audio_codec_ids, 0}, .flags= AVFMT_GLOBALHEADER | AVFMT_VARIABLE_FPS, }; diff --git a/libavformat/framecrcenc.c b/libavformat/framecrcenc.c index dcdfac882a..65ca670bbd 100644 --- a/libavformat/framecrcenc.c +++ b/libavformat/framecrcenc.c @@ -34,14 +34,10 @@ static int framecrc_write_packet(struct AVFormatContext *s, AVPacket *pkt) } AVOutputFormat ff_framecrc_muxer = { - "framecrc", - NULL_IF_CONFIG_SMALL("framecrc testing format"), - NULL, - "", - 0, - CODEC_ID_PCM_S16LE, - CODEC_ID_RAWVIDEO, - NULL, - framecrc_write_packet, - NULL, + .name = "framecrc", + .long_name = NULL_IF_CONFIG_SMALL("framecrc testing format"), + .extensions = "", + .audio_codec = CODEC_ID_PCM_S16LE, + .video_codec = CODEC_ID_RAWVIDEO, + .write_packet = framecrc_write_packet, }; diff --git a/libavformat/gif.c b/libavformat/gif.c index 0960705cf3..280a663690 100644 --- a/libavformat/gif.c +++ b/libavformat/gif.c @@ -40,6 +40,8 @@ */ #include "avformat.h" +#include "libavutil/log.h" +#include "libavutil/opt.h" /* The GIF format uses reversed order for bitstreams... */ /* at least they don't use PDP_ENDIAN :) */ @@ -245,8 +247,10 @@ static int gif_image_write_image(AVIOContext *pb, } typedef struct { + AVClass *class; /** Class for private options. */ int64_t time, file_time; uint8_t buffer[100]; /* data chunks */ + int loop; } GIFContext; static int gif_write_header(AVFormatContext *s) @@ -254,7 +258,7 @@ static int gif_write_header(AVFormatContext *s) GIFContext *gif = s->priv_data; AVIOContext *pb = s->pb; AVCodecContext *enc, *video_enc; - int i, width, height, loop_count /*, rate*/; + int i, width, height /*, rate*/; /* XXX: do we reject audio streams or just ignore them ? if(s->nb_streams > 1) @@ -276,7 +280,6 @@ static int gif_write_header(AVFormatContext *s) } else { width = video_enc->width; height = video_enc->height; - loop_count = s->loop_output; // rate = video_enc->time_base.den; } @@ -285,7 +288,12 @@ static int gif_write_header(AVFormatContext *s) return AVERROR(EIO); } - gif_image_write_header(pb, width, height, loop_count, NULL); +#if FF_API_LOOP_OUTPUT + if (s->loop_output) + gif->loop = s->loop_output; +#endif + + gif_image_write_header(pb, width, height, gif->loop, NULL); avio_flush(s->pb); return 0; @@ -340,15 +348,30 @@ static int gif_write_trailer(AVFormatContext *s) return 0; } +#define OFFSET(x) offsetof(GIFContext, x) +#define ENC AV_OPT_FLAG_ENCODING_PARAM +static const AVOption options[] = { + { "loop", "Number of times to loop the output.", OFFSET(loop), FF_OPT_TYPE_INT, {0}, 0, 65535, ENC }, + { NULL }, +}; + +static const AVClass gif_muxer_class = { + .class_name = "GIF muxer", + .item_name = av_default_item_name, + .version = LIBAVUTIL_VERSION_INT, + .option = options, +}; + AVOutputFormat ff_gif_muxer = { - "gif", - NULL_IF_CONFIG_SMALL("GIF Animation"), - "image/gif", - "gif", - sizeof(GIFContext), - CODEC_ID_NONE, - CODEC_ID_RAWVIDEO, - gif_write_header, - gif_write_packet, - gif_write_trailer, + .name = "gif", + .long_name = NULL_IF_CONFIG_SMALL("GIF Animation"), + .mime_type = "image/gif", + .extensions = "gif", + .priv_data_size = sizeof(GIFContext), + .audio_codec = CODEC_ID_NONE, + .video_codec = CODEC_ID_RAWVIDEO, + .write_header = gif_write_header, + .write_packet = gif_write_packet, + .write_trailer = gif_write_trailer, + .priv_class = &gif_muxer_class, }; diff --git a/libavformat/gxf.c b/libavformat/gxf.c index e278b9b846..898dce7eb5 100644 --- a/libavformat/gxf.c +++ b/libavformat/gxf.c @@ -32,11 +32,11 @@ struct gxf_stream_info { }; /** - * \brief parses a packet header, extracting type and length - * \param pb AVIOContext to read header from - * \param type detected packet type is stored here - * \param length detected packet length, excluding header is stored here - * \return 0 if header not found or contains invalid data, 1 otherwise + * @brief parses a packet header, extracting type and length + * @param pb AVIOContext to read header from + * @param type detected packet type is stored here + * @param length detected packet length, excluding header is stored here + * @return 0 if header not found or contains invalid data, 1 otherwise */ static int parse_packet_header(AVIOContext *pb, GXFPktType *type, int *length) { if (avio_rb32(pb)) @@ -58,7 +58,7 @@ static int parse_packet_header(AVIOContext *pb, GXFPktType *type, int *length) { } /** - * \brief check if file starts with a PKT_MAP header + * @brief check if file starts with a PKT_MAP header */ static int gxf_probe(AVProbeData *p) { static const uint8_t startcode[] = {0, 0, 0, 0, 1, 0xbc}; // start with map packet @@ -70,10 +70,10 @@ static int gxf_probe(AVProbeData *p) { } /** - * \brief gets the stream index for the track with the specified id, creates new + * @brief gets the stream index for the track with the specified id, creates new * stream if not found - * \param id id of stream to find / add - * \param format stream format identifier + * @param id id of stream to find / add + * @param format stream format identifier */ static int get_sindex(AVFormatContext *s, int id, int format) { int i; @@ -153,9 +153,9 @@ static int get_sindex(AVFormatContext *s, int id, int format) { } /** - * \brief filters out interesting tags from material information. - * \param len length of tag section, will be adjusted to contain remaining bytes - * \param si struct to store collected information into + * @brief filters out interesting tags from material information. + * @param len length of tag section, will be adjusted to contain remaining bytes + * @param si struct to store collected information into */ static void gxf_material_tags(AVIOContext *pb, int *len, struct gxf_stream_info *si) { si->first_field = AV_NOPTS_VALUE; @@ -179,9 +179,9 @@ static void gxf_material_tags(AVIOContext *pb, int *len, struct gxf_stream_info } /** - * \brief convert fps tag value to AVRational fps - * \param fps fps value from tag - * \return fps as AVRational, or 0 / 0 if unknown + * @brief convert fps tag value to AVRational fps + * @param fps fps value from tag + * @return fps as AVRational, or 0 / 0 if unknown */ static AVRational fps_tag2avr(int32_t fps) { extern const AVRational ff_frame_rate_tab[]; @@ -190,9 +190,9 @@ static AVRational fps_tag2avr(int32_t fps) { } /** - * \brief convert UMF attributes flags to AVRational fps - * \param flags UMF flags to convert - * \return fps as AVRational, or 0 / 0 if unknown + * @brief convert UMF attributes flags to AVRational fps + * @param flags UMF flags to convert + * @return fps as AVRational, or 0 / 0 if unknown */ static AVRational fps_umf2avr(uint32_t flags) { static const AVRational map[] = {{50, 1}, {60000, 1001}, {24, 1}, @@ -202,9 +202,9 @@ static AVRational fps_umf2avr(uint32_t flags) { } /** - * \brief filters out interesting tags from track information. - * \param len length of tag section, will be adjusted to contain remaining bytes - * \param si struct to store collected information into + * @brief filters out interesting tags from track information. + * @param len length of tag section, will be adjusted to contain remaining bytes + * @param si struct to store collected information into */ static void gxf_track_tags(AVIOContext *pb, int *len, struct gxf_stream_info *si) { si->frames_per_second = (AVRational){0, 0}; @@ -228,7 +228,7 @@ static void gxf_track_tags(AVIOContext *pb, int *len, struct gxf_stream_info *si } /** - * \brief read index from FLT packet into stream 0 av_index + * @brief read index from FLT packet into stream 0 av_index */ static void gxf_read_index(AVFormatContext *s, int pkt_len) { AVIOContext *pb = s->pb; @@ -264,7 +264,7 @@ static int gxf_header(AVFormatContext *s, AVFormatParameters *ap) { int map_len; int len; AVRational main_timebase = {0, 0}; - struct gxf_stream_info si; + struct gxf_stream_info *si = s->priv_data; int i; if (!parse_packet_header(pb, &pkt_type, &map_len) || pkt_type != PKT_MAP) { av_log(s, AV_LOG_ERROR, "map packet not found\n"); @@ -282,7 +282,7 @@ static int gxf_header(AVFormatContext *s, AVFormatParameters *ap) { return 0; } map_len -= len; - gxf_material_tags(pb, &len, &si); + gxf_material_tags(pb, &len, si); avio_skip(pb, len); map_len -= 2; len = avio_rb16(pb); // length of track description @@ -300,7 +300,7 @@ static int gxf_header(AVFormatContext *s, AVFormatParameters *ap) { track_id = avio_r8(pb); track_len = avio_rb16(pb); len -= track_len; - gxf_track_tags(pb, &track_len, &si); + gxf_track_tags(pb, &track_len, si); avio_skip(pb, track_len); if (!(track_type & 0x80)) { av_log(s, AV_LOG_ERROR, "invalid track type %x\n", track_type); @@ -316,12 +316,12 @@ static int gxf_header(AVFormatContext *s, AVFormatParameters *ap) { if (idx < 0) continue; st = s->streams[idx]; if (!main_timebase.num || !main_timebase.den) { - main_timebase.num = si.frames_per_second.den; - main_timebase.den = si.frames_per_second.num * 2; + main_timebase.num = si->frames_per_second.den; + main_timebase.den = si->frames_per_second.num * 2; } - st->start_time = si.first_field; - if (si.first_field != AV_NOPTS_VALUE && si.last_field != AV_NOPTS_VALUE) - st->duration = si.last_field - si.first_field; + st->start_time = si->first_field; + if (si->first_field != AV_NOPTS_VALUE && si->last_field != AV_NOPTS_VALUE) + st->duration = si->last_field - si->first_field; } if (len < 0) av_log(s, AV_LOG_ERROR, "invalid track description length specified\n"); @@ -346,6 +346,8 @@ static int gxf_header(AVFormatContext *s, AVFormatParameters *ap) { avio_skip(pb, 0x30); // payload description fps = fps_umf2avr(avio_rl32(pb)); if (!main_timebase.num || !main_timebase.den) { + av_log(s, AV_LOG_WARNING, "No FPS track tag, using UMF fps tag." + " This might give wrong results.\n"); // this may not always be correct, but simply the best we can get main_timebase.num = fps.den; main_timebase.den = fps.num * 2; @@ -374,11 +376,11 @@ static int gxf_header(AVFormatContext *s, AVFormatParameters *ap) { } /** - * \brief resync the stream on the next media packet with specified properties - * \param max_interval how many bytes to search for matching packet at most - * \param track track id the media packet must belong to, -1 for any - * \param timestamp minimum timestamp (== field number) the packet must have, -1 for any - * \return timestamp of packet found + * @brief resync the stream on the next media packet with specified properties + * @param max_interval how many bytes to search for matching packet at most + * @param track track id the media packet must belong to, -1 for any + * @param timestamp minimum timestamp (== field number) the packet must have, -1 for any + * @return timestamp of packet found */ static int64_t gxf_resync_media(AVFormatContext *s, uint64_t max_interval, int track, int timestamp) { uint32_t tmp; @@ -422,7 +424,9 @@ static int gxf_packet(AVFormatContext *s, AVPacket *pkt) { AVIOContext *pb = s->pb; GXFPktType pkt_type; int pkt_len; - while (!url_feof(pb)) { + struct gxf_stream_info *si = s->priv_data; + + while (!pb->eof_reached) { AVStream *st; int track_type, track_id, ret; int field_nr, field_info, skip = 0; @@ -473,6 +477,11 @@ static int gxf_packet(AVFormatContext *s, AVPacket *pkt) { avio_skip(pb, skip); pkt->stream_index = stream_index; pkt->dts = field_nr; + + //set duration manually for DV or else lavf misdetects the frame rate + if (st->codec->codec_id == CODEC_ID_DVVIDEO) + pkt->duration = si->fields_per_frame; + return ret; } return AVERROR(EIO); @@ -516,13 +525,12 @@ static int64_t gxf_read_timestamp(AVFormatContext *s, int stream_index, } AVInputFormat ff_gxf_demuxer = { - "gxf", - NULL_IF_CONFIG_SMALL("GXF format"), - 0, - gxf_probe, - gxf_header, - gxf_packet, - NULL, - gxf_seek, - gxf_read_timestamp, + .name = "gxf", + .long_name = NULL_IF_CONFIG_SMALL("GXF format"), + .priv_data_size = sizeof(struct gxf_stream_info), + .read_probe = gxf_probe, + .read_header = gxf_header, + .read_packet = gxf_packet, + .read_seek = gxf_seek, + .read_timestamp = gxf_read_timestamp, }; diff --git a/libavformat/gxfenc.c b/libavformat/gxfenc.c index 3f7d7851f7..8496384a5d 100644 --- a/libavformat/gxfenc.c +++ b/libavformat/gxfenc.c @@ -19,6 +19,8 @@ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ +#include "libavutil/intfloat_readwrite.h" +#include "libavutil/mathematics.h" #include "avformat.h" #include "gxf.h" #include "riff.h" @@ -392,9 +394,20 @@ static int gxf_write_umf_material_description(AVFormatContext *s) GXFContext *gxf = s->priv_data; AVIOContext *pb = s->pb; int timecode_base = gxf->time_base.den == 60000 ? 60 : 50; + int64_t timestamp = 0; + AVDictionaryEntry *t; + uint32_t timecode; + +#if FF_API_TIMESTAMP + if (s->timestamp) + timestamp = s->timestamp; + else +#endif + if (t = av_dict_get(s->metadata, "creation_time", NULL, 0)) + timestamp = ff_iso8601_to_unix_time(t->value); // XXX drop frame - uint32_t timecode = + timecode = gxf->nb_fields / (timecode_base * 3600) % 24 << 24 | // hours gxf->nb_fields / (timecode_base * 60) % 60 << 16 | // minutes gxf->nb_fields / timecode_base % 60 << 8 | // seconds @@ -407,8 +420,8 @@ static int gxf_write_umf_material_description(AVFormatContext *s) avio_wl32(pb, gxf->nb_fields); /* mark out */ avio_wl32(pb, 0); /* timecode mark in */ avio_wl32(pb, timecode); /* timecode mark out */ - avio_wl64(pb, s->timestamp); /* modification time */ - avio_wl64(pb, s->timestamp); /* creation time */ + avio_wl64(pb, timestamp); /* modification time */ + avio_wl64(pb, timestamp); /* creation time */ avio_wl16(pb, 0); /* reserved */ avio_wl16(pb, 0); /* reserved */ avio_wl16(pb, gxf->audio_tracks); @@ -931,17 +944,14 @@ static int gxf_interleave_packet(AVFormatContext *s, AVPacket *out, AVPacket *pk } AVOutputFormat ff_gxf_muxer = { - "gxf", - NULL_IF_CONFIG_SMALL("GXF format"), - NULL, - "gxf", - sizeof(GXFContext), - CODEC_ID_PCM_S16LE, - CODEC_ID_MPEG2VIDEO, - gxf_write_header, - gxf_write_packet, - gxf_write_trailer, - 0, - NULL, - gxf_interleave_packet, + .name = "gxf", + .long_name = NULL_IF_CONFIG_SMALL("GXF format"), + .extensions = "gxf", + .priv_data_size = sizeof(GXFContext), + .audio_codec = CODEC_ID_PCM_S16LE, + .video_codec = CODEC_ID_MPEG2VIDEO, + .write_header = gxf_write_header, + .write_packet = gxf_write_packet, + .write_trailer = gxf_write_trailer, + .interleave_packet = gxf_interleave_packet, }; diff --git a/libavformat/idcin.c b/libavformat/idcin.c index fb07788655..826b17d5d5 100644 --- a/libavformat/idcin.c +++ b/libavformat/idcin.c @@ -287,10 +287,10 @@ static int idcin_read_packet(AVFormatContext *s, } AVInputFormat ff_idcin_demuxer = { - "idcin", - NULL_IF_CONFIG_SMALL("id Cinematic format"), - sizeof(IdcinDemuxContext), - idcin_probe, - idcin_read_header, - idcin_read_packet, + .name = "idcin", + .long_name = NULL_IF_CONFIG_SMALL("id Cinematic format"), + .priv_data_size = sizeof(IdcinDemuxContext), + .read_probe = idcin_probe, + .read_header = idcin_read_header, + .read_packet = idcin_read_packet, }; diff --git a/libavformat/idroqdec.c b/libavformat/idroqdec.c index cbf3b3ed8d..5cc32d2b47 100644 --- a/libavformat/idroqdec.c +++ b/libavformat/idroqdec.c @@ -209,7 +209,6 @@ static int roq_read_packet(AVFormatContext *s, default: av_log(s, AV_LOG_ERROR, " unknown RoQ chunk (%04X)\n", chunk_type); return AVERROR_INVALIDDATA; - break; } } @@ -217,10 +216,10 @@ static int roq_read_packet(AVFormatContext *s, } AVInputFormat ff_roq_demuxer = { - "RoQ", - NULL_IF_CONFIG_SMALL("id RoQ format"), - sizeof(RoqDemuxContext), - roq_probe, - roq_read_header, - roq_read_packet, + .name = "RoQ", + .long_name = NULL_IF_CONFIG_SMALL("id RoQ format"), + .priv_data_size = sizeof(RoqDemuxContext), + .read_probe = roq_probe, + .read_header = roq_read_header, + .read_packet = roq_read_packet, }; diff --git a/libavformat/iff.c b/libavformat/iff.c index db988a6ecd..9455d96817 100644 --- a/libavformat/iff.c +++ b/libavformat/iff.c @@ -340,10 +340,10 @@ static int iff_read_packet(AVFormatContext *s, } AVInputFormat ff_iff_demuxer = { - "IFF", - NULL_IF_CONFIG_SMALL("IFF format"), - sizeof(IffDemuxContext), - iff_probe, - iff_read_header, - iff_read_packet, + .name = "IFF", + .long_name = NULL_IF_CONFIG_SMALL("IFF format"), + .priv_data_size = sizeof(IffDemuxContext), + .read_probe = iff_probe, + .read_header = iff_read_header, + .read_packet = iff_read_packet, }; diff --git a/libavformat/img2.c b/libavformat/img2.c index 11ebf12d43..052d4d5ea3 100644 --- a/libavformat/img2.c +++ b/libavformat/img2.c @@ -43,6 +43,7 @@ typedef struct { char *pixel_format; /**< Set by a private option. */ char *video_size; /**< Set by a private option. */ char *framerate; /**< Set by a private option. */ + int loop; } VideoData; typedef struct { @@ -54,6 +55,7 @@ static const IdStrMap img_tags[] = { { CODEC_ID_MJPEG , "jpeg"}, { CODEC_ID_MJPEG , "jpg"}, { CODEC_ID_LJPEG , "ljpg"}, + { CODEC_ID_JPEGLS , "jls"}, { CODEC_ID_PNG , "png"}, { CODEC_ID_PNG , "mng"}, { CODEC_ID_PPM , "ppm"}, @@ -246,6 +248,11 @@ static int read_header(AVFormatContext *s1, AVFormatParameters *ap) framerate = (AVRational){ap->time_base.den, ap->time_base.num}; #endif +#if FF_API_LOOP_INPUT + if (s1->loop_input) + s->loop = s1->loop_input; +#endif + av_strlcpy(s->path, s1->filename, sizeof(s->path)); s->img_number = 0; s->img_count = 0; @@ -305,7 +312,7 @@ static int read_packet(AVFormatContext *s1, AVPacket *pkt) if (!s->is_pipe) { /* loop over input */ - if (s1->loop_input && s->img_number > s->img_last) { + if (s->loop && s->img_number > s->img_last) { s->img_number = s->img_first; } if (s->img_number > s->img_last) @@ -466,6 +473,7 @@ static const AVOption options[] = { { "pixel_format", "", OFFSET(pixel_format), FF_OPT_TYPE_STRING, {.str = NULL}, 0, 0, DEC }, { "video_size", "", OFFSET(video_size), FF_OPT_TYPE_STRING, {.str = NULL}, 0, 0, DEC }, { "framerate", "", OFFSET(framerate), FF_OPT_TYPE_STRING, {.str = "25"}, 0, 0, DEC }, + { "loop", "", OFFSET(loop), FF_OPT_TYPE_INT, {.dbl = 0}, 0, 1, DEC }, { NULL }, }; @@ -505,7 +513,7 @@ AVInputFormat ff_image2pipe_demuxer = { AVOutputFormat ff_image2_muxer = { .name = "image2", .long_name = NULL_IF_CONFIG_SMALL("image2 sequence"), - .extensions = "bmp,dpx,jpeg,jpg,ljpg,pam,pbm,pcx,pgm,pgmyuv,png," + .extensions = "bmp,dpx,jls,jpeg,jpg,ljpg,pam,pbm,pcx,pgm,pgmyuv,png," "ppm,sgi,tga,tif,tiff,jp2", .priv_data_size = sizeof(VideoData), .video_codec = CODEC_ID_MJPEG, diff --git a/libavformat/ingenientdec.c b/libavformat/ingenientdec.c index 4a7ff09dbe..791e935433 100644 --- a/libavformat/ingenientdec.c +++ b/libavformat/ingenientdec.c @@ -59,12 +59,11 @@ static int ingenient_read_packet(AVFormatContext *s, AVPacket *pkt) } AVInputFormat ff_ingenient_demuxer = { - "ingenient", - NULL_IF_CONFIG_SMALL("raw Ingenient MJPEG"), - sizeof(FFRawVideoDemuxerContext), - NULL, - ff_raw_video_read_header, - ingenient_read_packet, + .name = "ingenient", + .long_name = NULL_IF_CONFIG_SMALL("raw Ingenient MJPEG"), + .priv_data_size = sizeof(FFRawVideoDemuxerContext), + .read_header = ff_raw_video_read_header, + .read_packet = ingenient_read_packet, .flags= AVFMT_GENERIC_INDEX, .extensions = "cgi", // FIXME .value = CODEC_ID_MJPEG, diff --git a/libavformat/internal.h b/libavformat/internal.h index 64992dadfa..0bd9591ba7 100644 --- a/libavformat/internal.h +++ b/libavformat/internal.h @@ -118,7 +118,7 @@ void ff_url_split(char *proto, int proto_size, */ int ff_url_join(char *str, int size, const char *proto, const char *authorization, const char *hostname, - int port, const char *fmt, ...); + int port, const char *fmt, ...) av_printf_format(7, 8); /** * Append the media-specific SDP fragment for the media stream c @@ -167,14 +167,14 @@ void ff_put_v(AVIOContext *bc, uint64_t val); /** * Read a whole line of text from AVIOContext. Stop reading after reaching - * either a \n, a \0 or EOF. The returned string is always \0 terminated, + * either a \\n, a \\0 or EOF. The returned string is always \\0-terminated, * and may be truncated if the buffer is too small. * * @param s the read-only AVIOContext * @param buf buffer to store the read line * @param maxlen size of the buffer * @return the length of the string written in the buffer, not including the - * final \0 + * final \\0 */ int ff_get_line(AVIOContext *s, char *buf, int maxlen); @@ -258,4 +258,9 @@ void ff_make_absolute_url(char *buf, int size, const char *base, enum CodecID ff_guess_image2_codec(const char *filename); +/** + * Convert a date string in ISO8601 format to Unix timestamp. + */ +int64_t ff_iso8601_to_unix_time(const char *datestr); + #endif /* AVFORMAT_INTERNAL_H */ diff --git a/libavformat/ipmovie.c b/libavformat/ipmovie.c index 471d6a4bc0..b1db03efe1 100644 --- a/libavformat/ipmovie.c +++ b/libavformat/ipmovie.c @@ -609,10 +609,10 @@ static int ipmovie_read_packet(AVFormatContext *s, } AVInputFormat ff_ipmovie_demuxer = { - "ipmovie", - NULL_IF_CONFIG_SMALL("Interplay MVE format"), - sizeof(IPMVEContext), - ipmovie_probe, - ipmovie_read_header, - ipmovie_read_packet, + .name = "ipmovie", + .long_name = NULL_IF_CONFIG_SMALL("Interplay MVE format"), + .priv_data_size = sizeof(IPMVEContext), + .read_probe = ipmovie_probe, + .read_header = ipmovie_read_header, + .read_packet = ipmovie_read_packet, }; diff --git a/libavformat/isom.c b/libavformat/isom.c index 33a448da34..09ee23bdfd 100644 --- a/libavformat/isom.c +++ b/libavformat/isom.c @@ -246,7 +246,7 @@ const AVCodecTag codec_movaudio_tags[] = { { CODEC_ID_AC3, MKTAG('a', 'c', '-', '3') }, /* ETSI TS 102 366 Annex F */ { CODEC_ID_AC3, MKTAG('s', 'a', 'c', '3') }, /* Nero Recode */ { CODEC_ID_DTS, MKTAG('d', 't', 's', 'c') }, /* mp4ra.org */ - { CODEC_ID_DTS, MKTAG('D', 'T', 'S', ' ') }, /* non standard */ + { CODEC_ID_DTS, MKTAG('D', 'T', 'S', ' ') }, /* non-standard */ { CODEC_ID_AMR_NB, MKTAG('s', 'a', 'm', 'r') }, /* AMR-NB 3gp */ { CODEC_ID_AMR_WB, MKTAG('s', 'a', 'w', 'b') }, /* AMR-WB 3gp */ diff --git a/libavformat/iss.c b/libavformat/iss.c index 8d225108ad..9a034c6f7f 100644 --- a/libavformat/iss.c +++ b/libavformat/iss.c @@ -23,8 +23,7 @@ * @file * Funcom ISS file demuxer * @author Jaikrishnan Menon - * for more information on the .iss file format, visit: - * http://wiki.multimedia.cx/index.php?title=FunCom_ISS + * @see http://wiki.multimedia.cx/index.php?title=FunCom_ISS */ #include "avformat.h" @@ -123,11 +122,11 @@ static int iss_read_packet(AVFormatContext *s, AVPacket *pkt) } AVInputFormat ff_iss_demuxer = { - "ISS", - NULL_IF_CONFIG_SMALL("Funcom ISS format"), - sizeof(IssDemuxContext), - iss_probe, - iss_read_header, - iss_read_packet, + .name = "ISS", + .long_name = NULL_IF_CONFIG_SMALL("Funcom ISS format"), + .priv_data_size = sizeof(IssDemuxContext), + .read_probe = iss_probe, + .read_header = iss_read_header, + .read_packet = iss_read_packet, }; diff --git a/libavformat/iv8.c b/libavformat/iv8.c index df5eb116c1..1f3dfe2f82 100644 --- a/libavformat/iv8.c +++ b/libavformat/iv8.c @@ -85,12 +85,11 @@ retry: } AVInputFormat ff_iv8_demuxer = { - "iv8", - NULL_IF_CONFIG_SMALL("A format generated by IndigoVision 8000 video server"), - 0, - probe, - read_header, - read_packet, + .name = "iv8", + .long_name = NULL_IF_CONFIG_SMALL("A format generated by IndigoVision 8000 video server"), + .read_probe = probe, + .read_header = read_header, + .read_packet = read_packet, .flags= AVFMT_GENERIC_INDEX, .value = CODEC_ID_MPEG4, }; diff --git a/libavformat/ivfdec.c b/libavformat/ivfdec.c index af3fe19e22..58455d02be 100644 --- a/libavformat/ivfdec.c +++ b/libavformat/ivfdec.c @@ -80,12 +80,11 @@ static int read_packet(AVFormatContext *s, AVPacket *pkt) } AVInputFormat ff_ivf_demuxer = { - "ivf", - NULL_IF_CONFIG_SMALL("On2 IVF"), - 0, - probe, - read_header, - read_packet, + .name = "ivf", + .long_name = NULL_IF_CONFIG_SMALL("On2 IVF"), + .read_probe = probe, + .read_header = read_header, + .read_packet = read_packet, .flags= AVFMT_GENERIC_INDEX, .codec_tag = (const AVCodecTag*[]){ff_codec_bmp_tags, 0}, }; diff --git a/libavformat/libnut.c b/libavformat/libnut.c index 5e5831892a..4ed9bb9a17 100644 --- a/libavformat/libnut.c +++ b/libavformat/libnut.c @@ -151,16 +151,16 @@ static int nut_write_trailer(AVFormatContext * avf) { } AVOutputFormat ff_libnut_muxer = { - "libnut", - "nut format", - "video/x-nut", - "nut", - sizeof(NUTContext), - CODEC_ID_VORBIS, - CODEC_ID_MPEG4, - nut_write_header, - nut_write_packet, - nut_write_trailer, + .name = "libnut", + .long_name = "nut format", + .mime_type = "video/x-nut", + .extensions = "nut", + .priv_data_size = sizeof(NUTContext), + .audio_codec = CODEC_ID_VORBIS, + .video_codec = CODEC_ID_MPEG4, + .write_header = nut_write_header, + .write_packet = nut_write_packet, + .write_trailer = nut_write_trailer, .flags = AVFMT_GLOBALHEADER, }; #endif /* CONFIG_LIBNUT_MUXER */ @@ -298,13 +298,13 @@ static int nut_read_close(AVFormatContext *s) { } AVInputFormat ff_libnut_demuxer = { - "libnut", - NULL_IF_CONFIG_SMALL("NUT format"), - sizeof(NUTContext), - nut_probe, - nut_read_header, - nut_read_packet, - nut_read_close, - nut_read_seek, + .name = "libnut", + .long_name = NULL_IF_CONFIG_SMALL("NUT format"), + .priv_data_size = sizeof(NUTContext), + .read_probe = nut_probe, + .read_header = nut_read_header, + .read_packet = nut_read_packet, + .read_close = nut_read_close, + .read_seek = nut_read_seek, .extensions = "nut", }; diff --git a/libavformat/librtmp.c b/libavformat/librtmp.c index 4ada91887f..f93c76662f 100644 --- a/libavformat/librtmp.c +++ b/libavformat/librtmp.c @@ -24,6 +24,7 @@ * RTMP protocol based on http://rtmpdump.mplayerhq.hu/ librtmp */ +#include "libavutil/mathematics.h" #include "avformat.h" #include "url.h" diff --git a/libavformat/lmlm4.c b/libavformat/lmlm4.c index d68e6608ea..b965d606ca 100644 --- a/libavformat/lmlm4.c +++ b/libavformat/lmlm4.c @@ -118,10 +118,9 @@ static int lmlm4_read_packet(AVFormatContext *s, AVPacket *pkt) { } AVInputFormat ff_lmlm4_demuxer = { - "lmlm4", - NULL_IF_CONFIG_SMALL("lmlm4 raw format"), - 0, - lmlm4_probe, - lmlm4_read_header, - lmlm4_read_packet, + .name = "lmlm4", + .long_name = NULL_IF_CONFIG_SMALL("lmlm4 raw format"), + .read_probe = lmlm4_probe, + .read_header = lmlm4_read_header, + .read_packet = lmlm4_read_packet, }; diff --git a/libavformat/matroskadec.c b/libavformat/matroskadec.c index efb80a017d..80358f11a7 100644 --- a/libavformat/matroskadec.c +++ b/libavformat/matroskadec.c @@ -255,6 +255,9 @@ typedef struct { /* What to skip before effectively reading a packet. */ int skip_to_keyframe; uint64_t skip_to_timecode; + + /* File has a CUES element, but we defer parsing until it is needed. */ + int cues_parsing_deferred; } MatroskaDemuxContext; typedef struct { @@ -826,11 +829,15 @@ static int ebml_parse_elem(MatroskaDemuxContext *matroska, uint32_t id = syntax->id; uint64_t length; int res; + void *newelem; data = (char *)data + syntax->data_offset; if (syntax->list_elem_size) { EbmlList *list = data; - list->elem = av_realloc(list->elem, (list->nb_elem+1)*syntax->list_elem_size); + newelem = av_realloc(list->elem, (list->nb_elem+1)*syntax->list_elem_size); + if (!newelem) + return AVERROR(ENOMEM); + list->elem = newelem; data = (char*)list->elem + list->nb_elem*syntax->list_elem_size; memset(data, 0, syntax->list_elem_size); list->nb_elem++; @@ -928,6 +935,8 @@ static int matroska_probe(AVProbeData *p) * Not fully fool-proof, but good enough. */ for (i = 0; i < FF_ARRAY_ELEMS(matroska_doctypes); i++) { int probelen = strlen(matroska_doctypes[i]); + if (total < probelen) + continue; for (n = 4+size; n <= 4+size+total-probelen; n++) if (!memcmp(p->buf+n, matroska_doctypes[i], probelen)) return AVPROBE_SCORE_MAX; @@ -990,7 +999,10 @@ static int matroska_decode_buffer(uint8_t** buf, int* buf_size, pkt_data = av_realloc(pkt_data, pkt_size); zstream.avail_out = pkt_size - zstream.total_out; zstream.next_out = pkt_data + zstream.total_out; - result = inflate(&zstream, Z_NO_FLUSH); + if (pkt_data) { + result = inflate(&zstream, Z_NO_FLUSH); + } else + result = Z_MEM_ERROR; } while (result==Z_OK && pkt_size<10000000); pkt_size = zstream.total_out; inflateEnd(&zstream); @@ -1011,7 +1023,10 @@ static int matroska_decode_buffer(uint8_t** buf, int* buf_size, pkt_data = av_realloc(pkt_data, pkt_size); bzstream.avail_out = pkt_size - bzstream.total_out_lo32; bzstream.next_out = pkt_data + bzstream.total_out_lo32; - result = BZ2_bzDecompress(&bzstream); + if (pkt_data) { + result = BZ2_bzDecompress(&bzstream); + } else + result = BZ_MEM_ERROR; } while (result==BZ_OK && pkt_size<10000000); pkt_size = bzstream.total_out_lo32; BZ2_bzDecompressEnd(&bzstream); @@ -1064,13 +1079,17 @@ static void matroska_fix_ass_packet(MatroskaDemuxContext *matroska, } } -static void matroska_merge_packets(AVPacket *out, AVPacket *in) +static int matroska_merge_packets(AVPacket *out, AVPacket *in) { - out->data = av_realloc(out->data, out->size+in->size); + void *newdata = av_realloc(out->data, out->size+in->size); + if (!newdata) + return AVERROR(ENOMEM); + out->data = newdata; memcpy(out->data+out->size, in->data, in->size); out->size += in->size; av_destruct_packet(in); av_free(in); + return 0; } static void matroska_convert_tag(AVFormatContext *s, EbmlList *list, @@ -1139,7 +1158,7 @@ static void matroska_convert_tags(AVFormatContext *s) } } -static void matroska_execute_seekhead(MatroskaDemuxContext *matroska) +static int matroska_parse_seekhead_entry(MatroskaDemuxContext *matroska, int idx) { EbmlList *seekhead_list = &matroska->seekhead; MatroskaSeekhead *seekhead = seekhead_list->elem; @@ -1147,6 +1166,54 @@ static void matroska_execute_seekhead(MatroskaDemuxContext *matroska) int64_t before_pos = avio_tell(matroska->ctx->pb); uint32_t saved_id = matroska->current_id; MatroskaLevel level; + int64_t offset; + int ret = 0; + + if (idx >= seekhead_list->nb_elem + || seekhead[idx].id == MATROSKA_ID_SEEKHEAD + || seekhead[idx].id == MATROSKA_ID_CLUSTER) + return 0; + + /* seek */ + offset = seekhead[idx].pos + matroska->segment_start; + if (avio_seek(matroska->ctx->pb, offset, SEEK_SET) == offset) { + /* We don't want to lose our seekhead level, so we add + * a dummy. This is a crude hack. */ + if (matroska->num_levels == EBML_MAX_DEPTH) { + av_log(matroska->ctx, AV_LOG_INFO, + "Max EBML element depth (%d) reached, " + "cannot parse further.\n", EBML_MAX_DEPTH); + ret = AVERROR_INVALIDDATA; + } else { + level.start = 0; + level.length = (uint64_t)-1; + matroska->levels[matroska->num_levels] = level; + matroska->num_levels++; + matroska->current_id = 0; + + ebml_parse(matroska, matroska_segment, matroska); + + /* remove dummy level */ + while (matroska->num_levels) { + uint64_t length = matroska->levels[--matroska->num_levels].length; + if (length == (uint64_t)-1) + break; + } + } + } + /* seek back */ + avio_seek(matroska->ctx->pb, before_pos, SEEK_SET); + matroska->level_up = level_up; + matroska->current_id = saved_id; + + return ret; +} + +static void matroska_execute_seekhead(MatroskaDemuxContext *matroska) +{ + EbmlList *seekhead_list = &matroska->seekhead; + MatroskaSeekhead *seekhead = seekhead_list->elem; + int64_t before_pos = avio_tell(matroska->ctx->pb); int i; // we should not do any seeking in the streaming case @@ -1154,47 +1221,55 @@ static void matroska_execute_seekhead(MatroskaDemuxContext *matroska) (matroska->ctx->flags & AVFMT_FLAG_IGNIDX)) return; - for (i=0; i<seekhead_list->nb_elem; i++) { - int64_t offset = seekhead[i].pos + matroska->segment_start; - - if (seekhead[i].pos <= before_pos - || seekhead[i].id == MATROSKA_ID_SEEKHEAD - || seekhead[i].id == MATROSKA_ID_CLUSTER) + for (i = 0; i < seekhead_list->nb_elem; i++) { + if (seekhead[i].pos <= before_pos) continue; - /* seek */ - if (avio_seek(matroska->ctx->pb, offset, SEEK_SET) != offset) + // defer cues parsing until we actually need cue data. + if (seekhead[i].id == MATROSKA_ID_CUES) { + matroska->cues_parsing_deferred = 1; continue; + } - /* We don't want to lose our seekhead level, so we add - * a dummy. This is a crude hack. */ - if (matroska->num_levels == EBML_MAX_DEPTH) { - av_log(matroska->ctx, AV_LOG_INFO, - "Max EBML element depth (%d) reached, " - "cannot parse further.\n", EBML_MAX_DEPTH); + if (matroska_parse_seekhead_entry(matroska, i) < 0) break; - } + } +} - level.start = 0; - level.length = (uint64_t)-1; - matroska->levels[matroska->num_levels] = level; - matroska->num_levels++; - matroska->current_id = 0; +static void matroska_parse_cues(MatroskaDemuxContext *matroska) { + EbmlList *seekhead_list = &matroska->seekhead; + MatroskaSeekhead *seekhead = seekhead_list->elem; + EbmlList *index_list; + MatroskaIndex *index; + int index_scale = 1; + int i, j; - ebml_parse(matroska, matroska_segment, matroska); + for (i = 0; i < seekhead_list->nb_elem; i++) + if (seekhead[i].id == MATROSKA_ID_CUES) + break; + assert(i <= seekhead_list->nb_elem); - /* remove dummy level */ - while (matroska->num_levels) { - uint64_t length = matroska->levels[--matroska->num_levels].length; - if (length == (uint64_t)-1) - break; + matroska_parse_seekhead_entry(matroska, i); + + index_list = &matroska->index; + index = index_list->elem; + if (index_list->nb_elem + && index[0].time > 1E14/matroska->time_scale) { + av_log(matroska->ctx, AV_LOG_WARNING, "Working around broken index.\n"); + index_scale = matroska->time_scale; + } + for (i = 0; i < index_list->nb_elem; i++) { + EbmlList *pos_list = &index[i].pos; + MatroskaIndexPos *pos = pos_list->elem; + for (j = 0; j < pos_list->nb_elem; j++) { + MatroskaTrack *track = matroska_find_track_by_num(matroska, pos[j].track); + if (track && track->stream) + av_add_index_entry(track->stream, + pos[j].pos + matroska->segment_start, + index[i].time/index_scale, 0, 0, + AVINDEX_KEYFRAME); } } - - /* seek back */ - avio_seek(matroska->ctx->pb, before_pos, SEEK_SET); - matroska->level_up = level_up; - matroska->current_id = saved_id; } static int matroska_aac_profile(char *codec_id) @@ -1226,9 +1301,6 @@ static int matroska_read_header(AVFormatContext *s, AVFormatParameters *ap) EbmlList *chapters_list = &matroska->chapters; MatroskaChapter *chapters; MatroskaTrack *tracks; - EbmlList *index_list; - MatroskaIndex *index; - int index_scale = 1; uint64_t max_start = 0; Ebml ebml = { 0 }; AVStream *st; @@ -1587,27 +1659,6 @@ static int matroska_read_header(AVFormatContext *s, AVFormatParameters *ap) max_start = chapters[i].start; } - index_list = &matroska->index; - index = index_list->elem; - if (index_list->nb_elem - && index[0].time > 100000000000000/matroska->time_scale) { - av_log(matroska->ctx, AV_LOG_WARNING, "Working around broken index.\n"); - index_scale = matroska->time_scale; - } - for (i=0; i<index_list->nb_elem; i++) { - EbmlList *pos_list = &index[i].pos; - MatroskaIndexPos *pos = pos_list->elem; - for (j=0; j<pos_list->nb_elem; j++) { - MatroskaTrack *track = matroska_find_track_by_num(matroska, - pos[j].track); - if (track && track->stream) - av_add_index_entry(track->stream, - pos[j].pos + matroska->segment_start, - index[i].time/index_scale, 0, 0, - AVINDEX_KEYFRAME); - } - } - matroska_convert_tags(s); return 0; @@ -1624,11 +1675,13 @@ static int matroska_deliver_packet(MatroskaDemuxContext *matroska, memcpy(pkt, matroska->packets[0], sizeof(AVPacket)); av_free(matroska->packets[0]); if (matroska->num_packets > 1) { + void *newpackets; memmove(&matroska->packets[0], &matroska->packets[1], (matroska->num_packets - 1) * sizeof(AVPacket *)); - matroska->packets = - av_realloc(matroska->packets, (matroska->num_packets - 1) * - sizeof(AVPacket *)); + newpackets = av_realloc(matroska->packets, + (matroska->num_packets - 1) * sizeof(AVPacket *)); + if (newpackets) + matroska->packets = newpackets; } else { av_freep(&matroska->packets); } @@ -1681,7 +1734,7 @@ static int matroska_parse_block(MatroskaDemuxContext *matroska, uint8_t *data, if (size <= 3 || !track || !track->stream) { av_log(matroska->ctx, AV_LOG_INFO, "Invalid stream %"PRIu64" or size %u\n", num, size); - return res; + return AVERROR_INVALIDDATA; } st = track->stream; if (st->discard >= AVDISCARD_ALL) @@ -1918,7 +1971,7 @@ static int matroska_parse_cluster(MatroskaDemuxContext *matroska) res = ebml_parse(matroska, matroska_clusters, &cluster); blocks_list = &cluster.blocks; blocks = blocks_list->elem; - for (i=0; i<blocks_list->nb_elem; i++) + for (i=0; i<blocks_list->nb_elem && !res; i++) if (blocks[i].bin.size > 0 && blocks[i].bin.data) { int is_keyframe = blocks[i].non_simple ? !blocks[i].reference : -1; res=matroska_parse_block(matroska, @@ -1935,14 +1988,15 @@ static int matroska_parse_cluster(MatroskaDemuxContext *matroska) static int matroska_read_packet(AVFormatContext *s, AVPacket *pkt) { MatroskaDemuxContext *matroska = s->priv_data; + int ret = 0; - while (matroska_deliver_packet(matroska, pkt)) { + while (!ret && matroska_deliver_packet(matroska, pkt)) { if (matroska->done) return AVERROR_EOF; - matroska_parse_cluster(matroska); + ret = matroska_parse_cluster(matroska); } - return 0; + return ret; } static int matroska_read_seek(AVFormatContext *s, int stream_index, @@ -1953,12 +2007,19 @@ static int matroska_read_seek(AVFormatContext *s, int stream_index, AVStream *st = s->streams[stream_index]; int i, index, index_sub, index_min; + /* Parse the CUES now since we need the index data to seek. */ + if (matroska->cues_parsing_deferred) { + matroska_parse_cues(matroska); + matroska->cues_parsing_deferred = 0; + } + if (!st->nb_index_entries) return 0; timestamp = FFMAX(timestamp, st->index_entries[0].timestamp); if ((index = av_index_search_timestamp(st, timestamp, flags)) < 0) { avio_seek(s->pb, st->index_entries[st->nb_index_entries-1].pos, SEEK_SET); + matroska->current_id = 0; while ((index = av_index_search_timestamp(st, timestamp, flags)) < 0) { matroska_clear_queue(matroska); if (matroska_parse_cluster(matroska) < 0) @@ -1987,6 +2048,7 @@ static int matroska_read_seek(AVFormatContext *s, int stream_index, } avio_seek(s->pb, st->index_entries[index_min].pos, SEEK_SET); + matroska->current_id = 0; matroska->skip_to_keyframe = !(flags & AVSEEK_FLAG_ANY); matroska->skip_to_timecode = st->index_entries[index].timestamp; matroska->done = 0; @@ -2011,12 +2073,12 @@ static int matroska_read_close(AVFormatContext *s) } AVInputFormat ff_matroska_demuxer = { - "matroska,webm", - NULL_IF_CONFIG_SMALL("Matroska/WebM file format"), - sizeof(MatroskaDemuxContext), - matroska_probe, - matroska_read_header, - matroska_read_packet, - matroska_read_close, - matroska_read_seek, + .name = "matroska,webm", + .long_name = NULL_IF_CONFIG_SMALL("Matroska/WebM file format"), + .priv_data_size = sizeof(MatroskaDemuxContext), + .read_probe = matroska_probe, + .read_header = matroska_read_header, + .read_packet = matroska_read_packet, + .read_close = matroska_read_close, + .read_seek = matroska_read_seek, }; diff --git a/libavformat/matroskaenc.c b/libavformat/matroskaenc.c index e9c977a59f..ecc5e6bbb3 100644 --- a/libavformat/matroskaenc.c +++ b/libavformat/matroskaenc.c @@ -28,6 +28,8 @@ #include "avlanguage.h" #include "libavutil/samplefmt.h" #include "libavutil/intreadwrite.h" +#include "libavutil/intfloat_readwrite.h" +#include "libavutil/mathematics.h" #include "libavutil/random_seed.h" #include "libavutil/lfg.h" #include "libavutil/dict.h" @@ -937,7 +939,7 @@ static int mkv_write_ass_blocks(AVFormatContext *s, AVIOContext *pb, AVPacket *p size -= start - data; sscanf(data, "Dialogue: %d,", &layer); i = snprintf(buffer, sizeof(buffer), "%"PRId64",%d,", - s->streams[pkt->stream_index]->nb_frames++, layer); + s->streams[pkt->stream_index]->nb_frames, layer); size = FFMIN(i+size, sizeof(buffer)); memcpy(buffer+i, start, size-i); @@ -1198,50 +1200,50 @@ static int mkv_write_trailer(AVFormatContext *s) #if CONFIG_MATROSKA_MUXER AVOutputFormat ff_matroska_muxer = { - "matroska", - NULL_IF_CONFIG_SMALL("Matroska file format"), - "video/x-matroska", - "mkv", - sizeof(MatroskaMuxContext), - CODEC_ID_MP2, - CODEC_ID_MPEG4, - mkv_write_header, - mkv_write_packet, - mkv_write_trailer, + .name = "matroska", + .long_name = NULL_IF_CONFIG_SMALL("Matroska file format"), + .mime_type = "video/x-matroska", + .extensions = "mkv", + .priv_data_size = sizeof(MatroskaMuxContext), + .audio_codec = CODEC_ID_MP2, + .video_codec = CODEC_ID_MPEG4, + .write_header = mkv_write_header, + .write_packet = mkv_write_packet, + .write_trailer = mkv_write_trailer, .flags = AVFMT_GLOBALHEADER | AVFMT_VARIABLE_FPS, .codec_tag = (const AVCodecTag* const []){ff_codec_bmp_tags, ff_codec_wav_tags, 0}, - .subtitle_codec = CODEC_ID_TEXT, + .subtitle_codec = CODEC_ID_SSA, }; #endif #if CONFIG_WEBM_MUXER AVOutputFormat ff_webm_muxer = { - "webm", - NULL_IF_CONFIG_SMALL("WebM file format"), - "video/webm", - "webm", - sizeof(MatroskaMuxContext), - CODEC_ID_VORBIS, - CODEC_ID_VP8, - mkv_write_header, - mkv_write_packet, - mkv_write_trailer, + .name = "webm", + .long_name = NULL_IF_CONFIG_SMALL("WebM file format"), + .mime_type = "video/webm", + .extensions = "webm", + .priv_data_size = sizeof(MatroskaMuxContext), + .audio_codec = CODEC_ID_VORBIS, + .video_codec = CODEC_ID_VP8, + .write_header = mkv_write_header, + .write_packet = mkv_write_packet, + .write_trailer = mkv_write_trailer, .flags = AVFMT_GLOBALHEADER | AVFMT_VARIABLE_FPS | AVFMT_TS_NONSTRICT, }; #endif #if CONFIG_MATROSKA_AUDIO_MUXER AVOutputFormat ff_matroska_audio_muxer = { - "matroska", - NULL_IF_CONFIG_SMALL("Matroska file format"), - "audio/x-matroska", - "mka", - sizeof(MatroskaMuxContext), - CODEC_ID_MP2, - CODEC_ID_NONE, - mkv_write_header, - mkv_write_packet, - mkv_write_trailer, + .name = "matroska", + .long_name = NULL_IF_CONFIG_SMALL("Matroska file format"), + .mime_type = "audio/x-matroska", + .extensions = "mka", + .priv_data_size = sizeof(MatroskaMuxContext), + .audio_codec = CODEC_ID_MP2, + .video_codec = CODEC_ID_NONE, + .write_header = mkv_write_header, + .write_packet = mkv_write_packet, + .write_trailer = mkv_write_trailer, .flags = AVFMT_GLOBALHEADER, .codec_tag = (const AVCodecTag* const []){ff_codec_wav_tags, 0}, }; diff --git a/libavformat/md5enc.c b/libavformat/md5enc.c index 10b3d59581..512e082778 100644 --- a/libavformat/md5enc.c +++ b/libavformat/md5enc.c @@ -66,16 +66,15 @@ static int write_trailer(struct AVFormatContext *s) } AVOutputFormat ff_md5_muxer = { - "md5", - NULL_IF_CONFIG_SMALL("MD5 testing format"), - NULL, - "", - PRIVSIZE, - CODEC_ID_PCM_S16LE, - CODEC_ID_RAWVIDEO, - write_header, - write_packet, - write_trailer, + .name = "md5", + .long_name = NULL_IF_CONFIG_SMALL("MD5 testing format"), + .extensions = "", + .priv_data_size = PRIVSIZE, + .audio_codec = CODEC_ID_PCM_S16LE, + .video_codec = CODEC_ID_RAWVIDEO, + .write_header = write_header, + .write_packet = write_packet, + .write_trailer = write_trailer, }; #endif @@ -96,15 +95,12 @@ static int framemd5_write_packet(struct AVFormatContext *s, AVPacket *pkt) } AVOutputFormat ff_framemd5_muxer = { - "framemd5", - NULL_IF_CONFIG_SMALL("Per-frame MD5 testing format"), - NULL, - "", - PRIVSIZE, - CODEC_ID_PCM_S16LE, - CODEC_ID_RAWVIDEO, - NULL, - framemd5_write_packet, - NULL, + .name = "framemd5", + .long_name = NULL_IF_CONFIG_SMALL("Per-frame MD5 testing format"), + .extensions = "", + .priv_data_size = PRIVSIZE, + .audio_codec = CODEC_ID_PCM_S16LE, + .video_codec = CODEC_ID_RAWVIDEO, + .write_packet = framemd5_write_packet, }; #endif diff --git a/libavformat/mm.c b/libavformat/mm.c index c6264f1cb7..2cca7c965a 100644 --- a/libavformat/mm.c +++ b/libavformat/mm.c @@ -184,15 +184,13 @@ static int read_packet(AVFormatContext *s, avio_skip(pb, length); } } - - return 0; } AVInputFormat ff_mm_demuxer = { - "mm", - NULL_IF_CONFIG_SMALL("American Laser Games MM format"), - sizeof(MmDemuxContext), - probe, - read_header, - read_packet, + .name = "mm", + .long_name = NULL_IF_CONFIG_SMALL("American Laser Games MM format"), + .priv_data_size = sizeof(MmDemuxContext), + .read_probe = probe, + .read_header = read_header, + .read_packet = read_packet, }; diff --git a/libavformat/mmf.c b/libavformat/mmf.c index 89e33b4ce2..0f64b998da 100644 --- a/libavformat/mmf.c +++ b/libavformat/mmf.c @@ -291,27 +291,26 @@ static int mmf_read_packet(AVFormatContext *s, #if CONFIG_MMF_DEMUXER AVInputFormat ff_mmf_demuxer = { - "mmf", - NULL_IF_CONFIG_SMALL("Yamaha SMAF"), - sizeof(MMFContext), - mmf_probe, - mmf_read_header, - mmf_read_packet, - NULL, - pcm_read_seek, + .name = "mmf", + .long_name = NULL_IF_CONFIG_SMALL("Yamaha SMAF"), + .priv_data_size = sizeof(MMFContext), + .read_probe = mmf_probe, + .read_header = mmf_read_header, + .read_packet = mmf_read_packet, + .read_seek = pcm_read_seek, }; #endif #if CONFIG_MMF_MUXER AVOutputFormat ff_mmf_muxer = { - "mmf", - NULL_IF_CONFIG_SMALL("Yamaha SMAF"), - "application/vnd.smaf", - "mmf", - sizeof(MMFContext), - CODEC_ID_ADPCM_YAMAHA, - CODEC_ID_NONE, - mmf_write_header, - mmf_write_packet, - mmf_write_trailer, + .name = "mmf", + .long_name = NULL_IF_CONFIG_SMALL("Yamaha SMAF"), + .mime_type = "application/vnd.smaf", + .extensions = "mmf", + .priv_data_size = sizeof(MMFContext), + .audio_codec = CODEC_ID_ADPCM_YAMAHA, + .video_codec = CODEC_ID_NONE, + .write_header = mmf_write_header, + .write_packet = mmf_write_packet, + .write_trailer = mmf_write_trailer, }; #endif diff --git a/libavformat/mmsh.c b/libavformat/mmsh.c index 4b296e30d9..49ff6bf0fc 100644 --- a/libavformat/mmsh.c +++ b/libavformat/mmsh.c @@ -208,7 +208,6 @@ static int get_http_header_data(MMSHContext *mmsh) } } } - return 0; } static int mmsh_open(URLContext *h, const char *uri, int flags) diff --git a/libavformat/mov.c b/libavformat/mov.c index 63144d15ea..c9352105ab 100644 --- a/libavformat/mov.c +++ b/libavformat/mov.c @@ -26,6 +26,8 @@ //#define MOV_EXPORT_ALL_METADATA #include "libavutil/intreadwrite.h" +#include "libavutil/intfloat_readwrite.h" +#include "libavutil/mathematics.h" #include "libavutil/avstring.h" #include "libavutil/dict.h" #include "avformat.h" @@ -454,7 +456,7 @@ static int mov_read_hdlr(MOVContext *c, AVIOContext *pb, MOVAtom atom) st->codec->codec_type = AVMEDIA_TYPE_AUDIO; else if(type == MKTAG('m','1','a',' ')) st->codec->codec_id = CODEC_ID_MP2; - else if(type == MKTAG('s','u','b','p')) + else if((type == MKTAG('s','u','b','p')) || (type == MKTAG('c','l','c','p'))) st->codec->codec_type = AVMEDIA_TYPE_SUBTITLE; avio_rb32(pb); /* component manufacture */ @@ -2328,7 +2330,6 @@ static int mov_probe(AVProbeData *p) return score; } } - return score; } // must be done after parsing all trak because there's no order requirement @@ -2619,12 +2620,12 @@ static int mov_read_close(AVFormatContext *s) } AVInputFormat ff_mov_demuxer = { - "mov,mp4,m4a,3gp,3g2,mj2", - NULL_IF_CONFIG_SMALL("QuickTime/MPEG-4/Motion JPEG 2000 format"), - sizeof(MOVContext), - mov_probe, - mov_read_header, - mov_read_packet, - mov_read_close, - mov_read_seek, + .name = "mov,mp4,m4a,3gp,3g2,mj2", + .long_name = NULL_IF_CONFIG_SMALL("QuickTime/MPEG-4/Motion JPEG 2000 format"), + .priv_data_size = sizeof(MOVContext), + .read_probe = mov_probe, + .read_header = mov_read_header, + .read_packet = mov_read_packet, + .read_close = mov_read_close, + .read_seek = mov_read_seek, }; diff --git a/libavformat/movenc.c b/libavformat/movenc.c index ebfcacb107..bf429c0f48 100644 --- a/libavformat/movenc.c +++ b/libavformat/movenc.c @@ -32,6 +32,8 @@ #include "libavcodec/put_bits.h" #include "internal.h" #include "libavutil/avstring.h" +#include "libavutil/intfloat_readwrite.h" +#include "libavutil/mathematics.h" #include "libavutil/opt.h" #include "libavutil/dict.h" #include "rtpenc.h" @@ -2113,7 +2115,7 @@ static void mov_create_chapter_track(AVFormatContext *s, int tracknum) track->mode = mov->mode; track->tag = MKTAG('t','e','x','t'); track->timescale = MOV_TIMESCALE; - track->enc = avcodec_alloc_context(); + track->enc = avcodec_alloc_context3(NULL); track->enc->codec_type = AVMEDIA_TYPE_SUBTITLE; for (i = 0; i < s->nb_chapters; i++) { @@ -2140,6 +2142,7 @@ static int mov_write_header(AVFormatContext *s) { AVIOContext *pb = s->pb; MOVMuxContext *mov = s->priv_data; + AVDictionaryEntry *t; int i, hint_track = 0; if (!s->pb->seekable) { @@ -2270,7 +2273,15 @@ static int mov_write_header(AVFormatContext *s) } mov_write_mdat_tag(pb, mov); - mov->time = s->timestamp + 0x7C25B080; //1970 based -> 1904 based + +#if FF_API_TIMESTAMP + if (s->timestamp) + mov->time = s->timestamp; + else +#endif + if (t = av_dict_get(s->metadata, "creation_time", NULL, 0)) + mov->time = ff_iso8601_to_unix_time(t->value); + mov->time += 0x7C25B080; //1970 based -> 1904 based if (mov->chapter_track) mov_create_chapter_track(s, mov->chapter_track); @@ -2340,16 +2351,15 @@ static int mov_write_trailer(AVFormatContext *s) #if CONFIG_MOV_MUXER AVOutputFormat ff_mov_muxer = { - "mov", - NULL_IF_CONFIG_SMALL("MOV format"), - NULL, - "mov", - sizeof(MOVMuxContext), - CODEC_ID_AAC, - CODEC_ID_MPEG4, - mov_write_header, - ff_mov_write_packet, - mov_write_trailer, + .name = "mov", + .long_name = NULL_IF_CONFIG_SMALL("MOV format"), + .extensions = "mov", + .priv_data_size = sizeof(MOVMuxContext), + .audio_codec = CODEC_ID_AAC, + .video_codec = CODEC_ID_MPEG4, + .write_header = mov_write_header, + .write_packet = ff_mov_write_packet, + .write_trailer = mov_write_trailer, .flags = AVFMT_GLOBALHEADER, .codec_tag = (const AVCodecTag* const []){codec_movvideo_tags, codec_movaudio_tags, 0}, .priv_class = &mov_muxer_class, @@ -2357,16 +2367,15 @@ AVOutputFormat ff_mov_muxer = { #endif #if CONFIG_TGP_MUXER AVOutputFormat ff_tgp_muxer = { - "3gp", - NULL_IF_CONFIG_SMALL("3GP format"), - NULL, - "3gp", - sizeof(MOVMuxContext), - CODEC_ID_AMR_NB, - CODEC_ID_H263, - mov_write_header, - ff_mov_write_packet, - mov_write_trailer, + .name = "3gp", + .long_name = NULL_IF_CONFIG_SMALL("3GP format"), + .extensions = "3gp", + .priv_data_size = sizeof(MOVMuxContext), + .audio_codec = CODEC_ID_AMR_NB, + .video_codec = CODEC_ID_H263, + .write_header = mov_write_header, + .write_packet = ff_mov_write_packet, + .write_trailer = mov_write_trailer, .flags = AVFMT_GLOBALHEADER, .codec_tag = (const AVCodecTag* const []){codec_3gp_tags, 0}, .priv_class = &mov_muxer_class, @@ -2374,16 +2383,16 @@ AVOutputFormat ff_tgp_muxer = { #endif #if CONFIG_MP4_MUXER AVOutputFormat ff_mp4_muxer = { - "mp4", - NULL_IF_CONFIG_SMALL("MP4 format"), - "application/mp4", - "mp4", - sizeof(MOVMuxContext), - CODEC_ID_AAC, - CODEC_ID_MPEG4, - mov_write_header, - ff_mov_write_packet, - mov_write_trailer, + .name = "mp4", + .long_name = NULL_IF_CONFIG_SMALL("MP4 format"), + .mime_type = "application/mp4", + .extensions = "mp4", + .priv_data_size = sizeof(MOVMuxContext), + .audio_codec = CODEC_ID_AAC, + .video_codec = CODEC_ID_MPEG4, + .write_header = mov_write_header, + .write_packet = ff_mov_write_packet, + .write_trailer = mov_write_trailer, .flags = AVFMT_GLOBALHEADER, .codec_tag = (const AVCodecTag* const []){ff_mp4_obj_type, 0}, .priv_class = &mov_muxer_class, @@ -2391,16 +2400,15 @@ AVOutputFormat ff_mp4_muxer = { #endif #if CONFIG_PSP_MUXER AVOutputFormat ff_psp_muxer = { - "psp", - NULL_IF_CONFIG_SMALL("PSP MP4 format"), - NULL, - "mp4,psp", - sizeof(MOVMuxContext), - CODEC_ID_AAC, - CODEC_ID_MPEG4, - mov_write_header, - ff_mov_write_packet, - mov_write_trailer, + .name = "psp", + .long_name = NULL_IF_CONFIG_SMALL("PSP MP4 format"), + .extensions = "mp4,psp", + .priv_data_size = sizeof(MOVMuxContext), + .audio_codec = CODEC_ID_AAC, + .video_codec = CODEC_ID_MPEG4, + .write_header = mov_write_header, + .write_packet = ff_mov_write_packet, + .write_trailer = mov_write_trailer, .flags = AVFMT_GLOBALHEADER, .codec_tag = (const AVCodecTag* const []){ff_mp4_obj_type, 0}, .priv_class = &mov_muxer_class, @@ -2408,16 +2416,15 @@ AVOutputFormat ff_psp_muxer = { #endif #if CONFIG_TG2_MUXER AVOutputFormat ff_tg2_muxer = { - "3g2", - NULL_IF_CONFIG_SMALL("3GP2 format"), - NULL, - "3g2", - sizeof(MOVMuxContext), - CODEC_ID_AMR_NB, - CODEC_ID_H263, - mov_write_header, - ff_mov_write_packet, - mov_write_trailer, + .name = "3g2", + .long_name = NULL_IF_CONFIG_SMALL("3GP2 format"), + .extensions = "3g2", + .priv_data_size = sizeof(MOVMuxContext), + .audio_codec = CODEC_ID_AMR_NB, + .video_codec = CODEC_ID_H263, + .write_header = mov_write_header, + .write_packet = ff_mov_write_packet, + .write_trailer = mov_write_trailer, .flags = AVFMT_GLOBALHEADER, .codec_tag = (const AVCodecTag* const []){codec_3gp_tags, 0}, .priv_class = &mov_muxer_class, @@ -2425,16 +2432,16 @@ AVOutputFormat ff_tg2_muxer = { #endif #if CONFIG_IPOD_MUXER AVOutputFormat ff_ipod_muxer = { - "ipod", - NULL_IF_CONFIG_SMALL("iPod H.264 MP4 format"), - "application/mp4", - "m4v,m4a", - sizeof(MOVMuxContext), - CODEC_ID_AAC, - CODEC_ID_H264, - mov_write_header, - ff_mov_write_packet, - mov_write_trailer, + .name = "ipod", + .long_name = NULL_IF_CONFIG_SMALL("iPod H.264 MP4 format"), + .mime_type = "application/mp4", + .extensions = "m4v,m4a", + .priv_data_size = sizeof(MOVMuxContext), + .audio_codec = CODEC_ID_AAC, + .video_codec = CODEC_ID_H264, + .write_header = mov_write_header, + .write_packet = ff_mov_write_packet, + .write_trailer = mov_write_trailer, .flags = AVFMT_GLOBALHEADER, .codec_tag = (const AVCodecTag* const []){codec_ipod_tags, 0}, .priv_class = &mov_muxer_class, diff --git a/libavformat/movenchint.c b/libavformat/movenchint.c index 4e69c73407..8e96355abc 100644 --- a/libavformat/movenchint.c +++ b/libavformat/movenchint.c @@ -36,7 +36,7 @@ int ff_mov_init_hinting(AVFormatContext *s, int index, int src_index) track->tag = MKTAG('r','t','p',' '); track->src_track = src_index; - track->enc = avcodec_alloc_context(); + track->enc = avcodec_alloc_context3(NULL); if (!track->enc) goto fail; track->enc->codec_type = AVMEDIA_TYPE_DATA; diff --git a/libavformat/mp3dec.c b/libavformat/mp3dec.c index 64f6ea8c0e..73fce71cea 100644 --- a/libavformat/mp3dec.c +++ b/libavformat/mp3dec.c @@ -22,6 +22,7 @@ #include "libavutil/avstring.h" #include "libavutil/intreadwrite.h" #include "libavutil/dict.h" +#include "libavutil/mathematics.h" #include "avformat.h" #include "id3v2.h" #include "id3v1.h" @@ -187,12 +188,11 @@ static int mp3_read_packet(AVFormatContext *s, AVPacket *pkt) } AVInputFormat ff_mp3_demuxer = { - "mp3", - NULL_IF_CONFIG_SMALL("MPEG audio layer 2/3"), - 0, - mp3_read_probe, - mp3_read_header, - mp3_read_packet, + .name = "mp3", + .long_name = NULL_IF_CONFIG_SMALL("MPEG audio layer 2/3"), + .read_probe = mp3_read_probe, + .read_header = mp3_read_header, + .read_packet = mp3_read_packet, .flags= AVFMT_GENERIC_INDEX, .extensions = "mp2,mp3,m2a", /* XXX: use probe */ }; diff --git a/libavformat/mp3enc.c b/libavformat/mp3enc.c index 50342bb950..9caa65282b 100644 --- a/libavformat/mp3enc.c +++ b/libavformat/mp3enc.c @@ -21,10 +21,14 @@ #include <strings.h> #include "avformat.h" +#include "avio_internal.h" #include "id3v1.h" #include "id3v2.h" #include "rawenc.h" #include "libavutil/avstring.h" +#include "libavcodec/mpegaudio.h" +#include "libavcodec/mpegaudiodata.h" +#include "libavcodec/mpegaudiodecheader.h" #include "libavutil/intreadwrite.h" #include "libavutil/opt.h" #include "libavcodec/mpegaudio.h" @@ -132,47 +136,56 @@ static int id3v2_put_ttag(AVFormatContext *s, const char *str1, const char *str2 return len + ID3v2_HEADER_SIZE; } +#define VBR_NUM_BAGS 400 +#define VBR_TOC_SIZE 100 +typedef struct MP3Context { + const AVClass *class; + int id3v2_version; + int64_t frames_offset; + int32_t frames; + int32_t size; + uint32_t want; + uint32_t seen; + uint32_t pos; + uint64_t bag[VBR_NUM_BAGS]; +} MP3Context; + static int mp2_write_trailer(struct AVFormatContext *s) { uint8_t buf[ID3v1_TAG_SIZE]; + MP3Context *mp3 = s->priv_data; /* write the id3v1 tag */ if (id3v1_create_tag(s, buf) > 0) { avio_write(s->pb, buf, ID3v1_TAG_SIZE); - avio_flush(s->pb); } + + /* write number of frames */ + if (mp3 && mp3->frames_offset) { + avio_seek(s->pb, mp3->frames_offset, SEEK_SET); + avio_wb32(s->pb, s->streams[0]->nb_frames); + avio_seek(s->pb, 0, SEEK_END); + } + + avio_flush(s->pb); + return 0; } #if CONFIG_MP2_MUXER AVOutputFormat ff_mp2_muxer = { - "mp2", - NULL_IF_CONFIG_SMALL("MPEG audio layer 2"), - "audio/x-mpeg", - "mp2,m2a", - 0, - CODEC_ID_MP2, - CODEC_ID_NONE, - NULL, - ff_raw_write_packet, - mp2_write_trailer, + .name = "mp2", + .long_name = NULL_IF_CONFIG_SMALL("MPEG audio layer 2"), + .mime_type = "audio/x-mpeg", + .extensions = "mp2,m2a", + .audio_codec = CODEC_ID_MP2, + .video_codec = CODEC_ID_NONE, + .write_packet = ff_raw_write_packet, + .write_trailer = mp2_write_trailer, }; #endif #if CONFIG_MP3_MUXER -#define VBR_NUM_BAGS 400 -#define VBR_TOC_SIZE 100 -typedef struct MP3Context { - const AVClass *class; - int id3v2_version; - int64_t frames_offset; - int32_t frames; - int32_t size; - uint32_t want; - uint32_t seen; - uint32_t pos; - uint64_t bag[VBR_NUM_BAGS]; -} MP3Context; static const AVOption options[] = { { "id3v2_version", "Select ID3v2 version to write. Currently 3 and 4 are supported.", @@ -444,17 +457,17 @@ static int mp3_write_trailer(AVFormatContext *s) } AVOutputFormat ff_mp3_muxer = { - "mp3", - NULL_IF_CONFIG_SMALL("MPEG audio layer 3"), - "audio/x-mpeg", - "mp3", - sizeof(MP3Context), - CODEC_ID_MP3, - CODEC_ID_NONE, - mp3_write_header, - mp3_write_packet, - mp3_write_trailer, - AVFMT_NOTIMESTAMPS, + .name = "mp3", + .long_name = NULL_IF_CONFIG_SMALL("MPEG audio layer 3"), + .mime_type = "audio/x-mpeg", + .extensions = "mp3", + .priv_data_size = sizeof(MP3Context), + .audio_codec = CODEC_ID_MP3, + .video_codec = CODEC_ID_NONE, + .write_header = mp3_write_header, + .write_packet = mp3_write_packet, + .write_trailer = mp3_write_trailer, + .flags = AVFMT_NOTIMESTAMPS, .priv_class = &mp3_muxer_class, }; #endif diff --git a/libavformat/mpc.c b/libavformat/mpc.c index 4d6854f13f..8027241b89 100644 --- a/libavformat/mpc.c +++ b/libavformat/mpc.c @@ -70,7 +70,15 @@ static int mpc_read_header(AVFormatContext *s, AVFormatParameters *ap) av_log(s, AV_LOG_ERROR, "Too many frames, seeking is not possible\n"); return -1; } - c->frames = av_malloc(c->fcount * sizeof(MPCFrame)); + if(c->fcount){ + c->frames = av_malloc(c->fcount * sizeof(MPCFrame)); + if(!c->frames){ + av_log(s, AV_LOG_ERROR, "Cannot allocate seektable\n"); + return AVERROR(ENOMEM); + } + }else{ + av_log(s, AV_LOG_WARNING, "Container reports no frames\n"); + } c->curframe = 0; c->lastframe = -1; c->curbits = 8; @@ -111,7 +119,7 @@ static int mpc_read_packet(AVFormatContext *s, AVPacket *pkt) int ret, size, size2, curbits, cur = c->curframe; int64_t tmp, pos; - if (c->curframe >= c->fcount) + if (c->curframe >= c->fcount && c->fcount) return -1; if(c->curframe != c->lastframe + 1){ @@ -133,7 +141,7 @@ static int mpc_read_packet(AVFormatContext *s, AVPacket *pkt) avio_seek(s->pb, pos, SEEK_SET); size = ((size2 + curbits + 31) & ~31) >> 3; - if(cur == c->frames_noted){ + if(cur == c->frames_noted && c->fcount){ c->frames[cur].pos = pos; c->frames[cur].size = size; c->frames[cur].skip = curbits - 20; @@ -146,7 +154,7 @@ static int mpc_read_packet(AVFormatContext *s, AVPacket *pkt) return AVERROR(EIO); pkt->data[0] = curbits; - pkt->data[1] = (c->curframe > c->fcount); + pkt->data[1] = (c->curframe > c->fcount) && c->fcount; pkt->data[2] = 0; pkt->data[3] = 0; @@ -214,13 +222,13 @@ static int mpc_read_seek(AVFormatContext *s, int stream_index, int64_t timestamp AVInputFormat ff_mpc_demuxer = { - "mpc", - NULL_IF_CONFIG_SMALL("Musepack"), - sizeof(MPCContext), - mpc_probe, - mpc_read_header, - mpc_read_packet, - mpc_read_close, - mpc_read_seek, + .name = "mpc", + .long_name = NULL_IF_CONFIG_SMALL("Musepack"), + .priv_data_size = sizeof(MPCContext), + .read_probe = mpc_probe, + .read_header = mpc_read_header, + .read_packet = mpc_read_packet, + .read_close = mpc_read_close, + .read_seek = mpc_read_seek, .extensions = "mpc", }; diff --git a/libavformat/mpc8.c b/libavformat/mpc8.c index b18726c6db..7a4cbbc8b7 100644 --- a/libavformat/mpc8.c +++ b/libavformat/mpc8.c @@ -281,12 +281,11 @@ static int mpc8_read_seek(AVFormatContext *s, int stream_index, int64_t timestam AVInputFormat ff_mpc8_demuxer = { - "mpc8", - NULL_IF_CONFIG_SMALL("Musepack SV8"), - sizeof(MPCContext), - mpc8_probe, - mpc8_read_header, - mpc8_read_packet, - NULL, - mpc8_read_seek, + .name = "mpc8", + .long_name = NULL_IF_CONFIG_SMALL("Musepack SV8"), + .priv_data_size = sizeof(MPCContext), + .read_probe = mpc8_probe, + .read_header = mpc8_read_header, + .read_packet = mpc8_read_packet, + .read_seek = mpc8_read_seek, }; diff --git a/libavformat/mpeg.c b/libavformat/mpeg.c index 1bc4480cd8..dc5d264b68 100644 --- a/libavformat/mpeg.c +++ b/libavformat/mpeg.c @@ -606,14 +606,13 @@ static int64_t mpegps_read_dts(AVFormatContext *s, int stream_index, } AVInputFormat ff_mpegps_demuxer = { - "mpeg", - NULL_IF_CONFIG_SMALL("MPEG-PS format"), - sizeof(MpegDemuxContext), - mpegps_probe, - mpegps_read_header, - mpegps_read_packet, - NULL, - NULL, //mpegps_read_seek, - mpegps_read_dts, + .name = "mpeg", + .long_name = NULL_IF_CONFIG_SMALL("MPEG-PS format"), + .priv_data_size = sizeof(MpegDemuxContext), + .read_probe = mpegps_probe, + .read_header = mpegps_read_header, + .read_packet = mpegps_read_packet, + .read_seek = NULL, //mpegps_read_seek, + .read_timestamp = mpegps_read_dts, .flags = AVFMT_SHOW_IDS|AVFMT_TS_DISCONT, }; diff --git a/libavformat/mpegenc.c b/libavformat/mpegenc.c index 9c808d0f1e..2adba49e38 100644 --- a/libavformat/mpegenc.c +++ b/libavformat/mpegenc.c @@ -20,6 +20,7 @@ */ #include "libavutil/fifo.h" +#include "libavutil/mathematics.h" #include "libavcodec/put_bits.h" #include "avformat.h" #include "mpeg.h" @@ -1231,75 +1232,74 @@ static int mpeg_mux_end(AVFormatContext *ctx) #if CONFIG_MPEG1SYSTEM_MUXER AVOutputFormat ff_mpeg1system_muxer = { - "mpeg", - NULL_IF_CONFIG_SMALL("MPEG-1 System format"), - "video/mpeg", - "mpg,mpeg", - sizeof(MpegMuxContext), - CODEC_ID_MP2, - CODEC_ID_MPEG1VIDEO, - mpeg_mux_init, - mpeg_mux_write_packet, - mpeg_mux_end, + .name = "mpeg", + .long_name = NULL_IF_CONFIG_SMALL("MPEG-1 System format"), + .mime_type = "video/mpeg", + .extensions = "mpg,mpeg", + .priv_data_size = sizeof(MpegMuxContext), + .audio_codec = CODEC_ID_MP2, + .video_codec = CODEC_ID_MPEG1VIDEO, + .write_header = mpeg_mux_init, + .write_packet = mpeg_mux_write_packet, + .write_trailer = mpeg_mux_end, }; #endif #if CONFIG_MPEG1VCD_MUXER AVOutputFormat ff_mpeg1vcd_muxer = { - "vcd", - NULL_IF_CONFIG_SMALL("MPEG-1 System format (VCD)"), - "video/mpeg", - NULL, - sizeof(MpegMuxContext), - CODEC_ID_MP2, - CODEC_ID_MPEG1VIDEO, - mpeg_mux_init, - mpeg_mux_write_packet, - mpeg_mux_end, + .name = "vcd", + .long_name = NULL_IF_CONFIG_SMALL("MPEG-1 System format (VCD)"), + .mime_type = "video/mpeg", + .priv_data_size = sizeof(MpegMuxContext), + .audio_codec = CODEC_ID_MP2, + .video_codec = CODEC_ID_MPEG1VIDEO, + .write_header = mpeg_mux_init, + .write_packet = mpeg_mux_write_packet, + .write_trailer = mpeg_mux_end, }; #endif #if CONFIG_MPEG2VOB_MUXER AVOutputFormat ff_mpeg2vob_muxer = { - "vob", - NULL_IF_CONFIG_SMALL("MPEG-2 PS format (VOB)"), - "video/mpeg", - "vob", - sizeof(MpegMuxContext), - CODEC_ID_MP2, - CODEC_ID_MPEG2VIDEO, - mpeg_mux_init, - mpeg_mux_write_packet, - mpeg_mux_end, + .name = "vob", + .long_name = NULL_IF_CONFIG_SMALL("MPEG-2 PS format (VOB)"), + .mime_type = "video/mpeg", + .extensions = "vob", + .priv_data_size = sizeof(MpegMuxContext), + .audio_codec = CODEC_ID_MP2, + .video_codec = CODEC_ID_MPEG2VIDEO, + .write_header = mpeg_mux_init, + .write_packet = mpeg_mux_write_packet, + .write_trailer = mpeg_mux_end, }; #endif /* Same as mpeg2vob_mux except that the pack size is 2324 */ #if CONFIG_MPEG2SVCD_MUXER AVOutputFormat ff_mpeg2svcd_muxer = { - "svcd", - NULL_IF_CONFIG_SMALL("MPEG-2 PS format (VOB)"), - "video/mpeg", - "vob", - sizeof(MpegMuxContext), - CODEC_ID_MP2, - CODEC_ID_MPEG2VIDEO, - mpeg_mux_init, - mpeg_mux_write_packet, - mpeg_mux_end, + .name = "svcd", + .long_name = NULL_IF_CONFIG_SMALL("MPEG-2 PS format (VOB)"), + .mime_type = "video/mpeg", + .extensions = "vob", + .priv_data_size = sizeof(MpegMuxContext), + .audio_codec = CODEC_ID_MP2, + .video_codec = CODEC_ID_MPEG2VIDEO, + .write_header = mpeg_mux_init, + .write_packet = mpeg_mux_write_packet, + .write_trailer = mpeg_mux_end, }; #endif /* Same as mpeg2vob_mux except the 'is_dvd' flag is set to produce NAV pkts */ #if CONFIG_MPEG2DVD_MUXER AVOutputFormat ff_mpeg2dvd_muxer = { - "dvd", - NULL_IF_CONFIG_SMALL("MPEG-2 PS format (DVD VOB)"), - "video/mpeg", - "dvd", - sizeof(MpegMuxContext), - CODEC_ID_MP2, - CODEC_ID_MPEG2VIDEO, - mpeg_mux_init, - mpeg_mux_write_packet, - mpeg_mux_end, + .name = "dvd", + .long_name = NULL_IF_CONFIG_SMALL("MPEG-2 PS format (DVD VOB)"), + .mime_type = "video/mpeg", + .extensions = "dvd", + .priv_data_size = sizeof(MpegMuxContext), + .audio_codec = CODEC_ID_MP2, + .video_codec = CODEC_ID_MPEG2VIDEO, + .write_header = mpeg_mux_init, + .write_packet = mpeg_mux_write_packet, + .write_trailer = mpeg_mux_end, }; #endif diff --git a/libavformat/mpegts.c b/libavformat/mpegts.c index 89109c0689..a0c549644a 100644 --- a/libavformat/mpegts.c +++ b/libavformat/mpegts.c @@ -25,6 +25,7 @@ #include "libavutil/intreadwrite.h" #include "libavutil/log.h" #include "libavutil/dict.h" +#include "libavutil/mathematics.h" #include "libavutil/opt.h" #include "libavcodec/bytestream.h" #include "avformat.h" @@ -163,6 +164,7 @@ typedef struct PESContext { enum MpegTSState state; /* used to get the format */ int data_index; + int flags; /**< copied to the AVPacket flags */ int total_size; int pes_header_size; int extended_stream_id; @@ -232,11 +234,11 @@ static void set_pcr_pid(AVFormatContext *s, unsigned int programid, unsigned int } /** - * \brief discard_pid() decides if the pid is to be discarded according + * @brief discard_pid() decides if the pid is to be discarded according * to caller's programs selection - * \param ts : - TS context - * \param pid : - pid - * \return 1 if the pid is only comprised in programs that have .discard=AVDISCARD_ALL + * @param ts : - TS context + * @param pid : - pid + * @return 1 if the pid is only comprised in programs that have .discard=AVDISCARD_ALL * 0 otherwise */ static int discard_pid(MpegTSContext *ts, unsigned int pid) @@ -635,6 +637,12 @@ static void new_pes_packet(PESContext *pes, AVPacket *pkt) pkt->destruct = av_destruct_packet; pkt->data = pes->buffer; pkt->size = pes->data_index; + + if(pes->total_size != MAX_PES_PAYLOAD && + pes->pes_header_size + pes->data_index != pes->total_size + 6) { + av_log(pes->ts, AV_LOG_WARNING, "PES packet size mismatch\n"); + pes->flags |= AV_PKT_FLAG_CORRUPT; + } memset(pkt->data+pkt->size, 0, FF_INPUT_BUFFER_PADDING_SIZE); // Separate out the AC3 substream from an HDMV combined TrueHD/AC3 PID @@ -646,12 +654,14 @@ static void new_pes_packet(PESContext *pes, AVPacket *pkt) pkt->dts = pes->dts; /* store position of first TS packet of this PES packet */ pkt->pos = pes->ts_packet_pos; + pkt->flags = pes->flags; /* reset pts values */ pes->pts = AV_NOPTS_VALUE; pes->dts = AV_NOPTS_VALUE; pes->buffer = NULL; pes->data_index = 0; + pes->flags = 0; } /* return non zero if a packet could be constructed */ @@ -1273,7 +1283,8 @@ static int handle_packet(MpegTSContext *ts, const uint8_t *packet) { AVFormatContext *s = ts->stream; MpegTSFilter *tss; - int len, pid, cc, cc_ok, afc, is_start; + int len, pid, cc, expected_cc, cc_ok, afc, is_start, is_discontinuity, + has_adaptation, has_payload; const uint8_t *p, *p_end; int64_t pos; @@ -1289,19 +1300,36 @@ static int handle_packet(MpegTSContext *ts, const uint8_t *packet) if (!tss) return 0; + afc = (packet[3] >> 4) & 3; + if (afc == 0) /* reserved value */ + return 0; + has_adaptation = afc & 2; + has_payload = afc & 1; + is_discontinuity = has_adaptation + && packet[4] != 0 /* with length > 0 */ + && (packet[5] & 0x80); /* and discontinuity indicated */ + /* continuity check (currently not used) */ cc = (packet[3] & 0xf); - cc_ok = (tss->last_cc < 0) || ((((tss->last_cc + 1) & 0x0f) == cc)); + expected_cc = has_payload ? (tss->last_cc + 1) & 0x0f : tss->last_cc; + cc_ok = pid == 0x1FFF // null packet PID + || is_discontinuity + || tss->last_cc < 0 + || expected_cc == cc; + tss->last_cc = cc; + if (!cc_ok) { + av_log(ts, AV_LOG_WARNING, "Continuity Check Failed\n"); + if(tss->type == MPEGTS_PES) { + PESContext *pc = tss->u.pes_filter.opaque; + pc->flags |= AV_PKT_FLAG_CORRUPT; + } + } - /* skip adaptation field */ - afc = (packet[3] >> 4) & 3; - p = packet + 4; - if (afc == 0) /* reserved value */ - return 0; - if (afc == 2) /* adaptation field only */ + if (!has_payload) return 0; - if (afc == 3) { + p = packet + 4; + if (has_adaptation) { /* skip adapation field */ p += p[0] + 1; } @@ -1402,7 +1430,22 @@ static int handle_packets(MpegTSContext *ts, int nb_packets) { AVFormatContext *s = ts->stream; uint8_t packet[TS_PACKET_SIZE]; - int packet_num, ret; + int packet_num, ret = 0; + + if (avio_tell(s->pb) != ts->last_pos) { + int i; +// av_dlog("Skipping after seek\n"); + /* seek detected, flush pes buffer */ + for (i = 0; i < NB_PID_MAX; i++) { + if (ts->pids[i] && ts->pids[i]->type == MPEGTS_PES) { + PESContext *pes = ts->pids[i]->u.pes_filter.opaque; + av_freep(&pes->buffer); + ts->pids[i]->last_cc = -1; + pes->data_index = 0; + pes->state = MPEGTS_SKIP; /* skip until pes header */ + } + } + } ts->stop_parse = 0; packet_num = 0; @@ -1414,12 +1457,13 @@ static int handle_packets(MpegTSContext *ts, int nb_packets) break; ret = read_packet(s, packet, ts->raw_packet_size); if (ret != 0) - return ret; + break; ret = handle_packet(ts, packet); if (ret != 0) - return ret; + break; } - return 0; + ts->last_pos = avio_tell(s->pb); + return ret; } static int mpegts_probe(AVProbeData *p) @@ -1494,10 +1538,6 @@ static int mpegts_read_header(AVFormatContext *s, if (ap) { if (ap->mpeg2ts_compute_pcr) ts->mpeg2ts_compute_pcr = ap->mpeg2ts_compute_pcr; - if(ap->mpeg2ts_raw){ - av_log(s, AV_LOG_ERROR, "use mpegtsraw_demuxer!\n"); - return -1; - } } #endif @@ -1518,7 +1558,7 @@ static int mpegts_read_header(AVFormatContext *s, /* normal demux */ /* first do a scaning to get all the services */ - if (avio_seek(pb, pos, SEEK_SET) < 0) + if (pb->seekable && avio_seek(pb, pos, SEEK_SET) < 0) av_log(s, AV_LOG_ERROR, "Unable to seek back to the start\n"); mpegts_open_section_filter(ts, SDT_PID, sdt_cb, ts, 1); @@ -1639,18 +1679,6 @@ static int mpegts_read_packet(AVFormatContext *s, MpegTSContext *ts = s->priv_data; int ret, i; - if (avio_tell(s->pb) != ts->last_pos) { - /* seek detected, flush pes buffer */ - for (i = 0; i < NB_PID_MAX; i++) { - if (ts->pids[i] && ts->pids[i]->type == MPEGTS_PES) { - PESContext *pes = ts->pids[i]->u.pes_filter.opaque; - av_freep(&pes->buffer); - pes->data_index = 0; - pes->state = MPEGTS_SKIP; /* skip until pes header */ - } - } - } - ts->pkt = pkt; ret = handle_packets(ts, 0); if (ret < 0) { @@ -1668,8 +1696,6 @@ static int mpegts_read_packet(AVFormatContext *s, } } - ts->last_pos = avio_tell(s->pb); - return ret; } @@ -1890,15 +1916,15 @@ void ff_mpegts_parse_close(MpegTSContext *ts) } AVInputFormat ff_mpegts_demuxer = { - "mpegts", - NULL_IF_CONFIG_SMALL("MPEG-2 transport stream format"), - sizeof(MpegTSContext), - mpegts_probe, - mpegts_read_header, - mpegts_read_packet, - mpegts_read_close, - read_seek, - mpegts_get_pcr, + .name = "mpegts", + .long_name = NULL_IF_CONFIG_SMALL("MPEG-2 transport stream format"), + .priv_data_size = sizeof(MpegTSContext), + .read_probe = mpegts_probe, + .read_header = mpegts_read_header, + .read_packet = mpegts_read_packet, + .read_close = mpegts_read_close, + .read_seek = read_seek, + .read_timestamp = mpegts_get_pcr, .flags = AVFMT_SHOW_IDS|AVFMT_TS_DISCONT, #ifdef USE_SYNCPOINT_SEARCH .read_seek2 = read_seek2, @@ -1906,15 +1932,14 @@ AVInputFormat ff_mpegts_demuxer = { }; AVInputFormat ff_mpegtsraw_demuxer = { - "mpegtsraw", - NULL_IF_CONFIG_SMALL("MPEG-2 raw transport stream format"), - sizeof(MpegTSContext), - NULL, - mpegts_read_header, - mpegts_raw_read_packet, - mpegts_read_close, - read_seek, - mpegts_get_pcr, + .name = "mpegtsraw", + .long_name = NULL_IF_CONFIG_SMALL("MPEG-2 raw transport stream format"), + .priv_data_size = sizeof(MpegTSContext), + .read_header = mpegts_read_header, + .read_packet = mpegts_raw_read_packet, + .read_close = mpegts_read_close, + .read_seek = read_seek, + .read_timestamp = mpegts_get_pcr, .flags = AVFMT_SHOW_IDS|AVFMT_TS_DISCONT, #ifdef USE_SYNCPOINT_SEARCH .read_seek2 = read_seek2, diff --git a/libavformat/mpegtsenc.c b/libavformat/mpegtsenc.c index 7e9647257f..5daacaf3f6 100644 --- a/libavformat/mpegtsenc.c +++ b/libavformat/mpegtsenc.c @@ -22,6 +22,7 @@ #include "libavutil/bswap.h" #include "libavutil/crc.h" #include "libavutil/dict.h" +#include "libavutil/mathematics.h" #include "libavutil/opt.h" #include "libavcodec/mpegvideo.h" #include "avformat.h" @@ -203,6 +204,7 @@ typedef struct MpegTSWriteStream { int first_pts_check; ///< first pts check needed int64_t payload_pts; int64_t payload_dts; + int payload_flags; uint8_t payload[DEFAULT_PES_PAYLOAD_SIZE]; ADTSContext *adts; } MpegTSWriteStream; @@ -620,7 +622,7 @@ static int64_t get_pcr(const MpegTSWrite *ts, AVIOContext *pb) ts->first_pcr; } -static uint8_t* write_pcr_bits(uint8_t *buf, int64_t pcr) +static int write_pcr_bits(uint8_t *buf, int64_t pcr) { int64_t pcr_low = pcr % 300, pcr_high = pcr / 300; @@ -631,7 +633,7 @@ static uint8_t* write_pcr_bits(uint8_t *buf, int64_t pcr) *buf++ = pcr_high << 7 | pcr_low >> 8 | 0x7e; *buf++ = pcr_low; - return buf; + return 6; } /* Write a single null transport stream packet */ @@ -667,7 +669,7 @@ static void mpegts_insert_pcr_only(AVFormatContext *s, AVStream *st) *q++ = 0x10; /* Adaptation flags: PCR present */ /* PCR coded into 6 bytes */ - q = write_pcr_bits(q, get_pcr(ts, s->pb)); + q += write_pcr_bits(q, get_pcr(ts, s->pb)); /* stuffing bytes */ memset(q, 0xFF, TS_PACKET_SIZE - (q - buf)); @@ -688,6 +690,39 @@ static void write_pts(uint8_t *q, int fourbits, int64_t pts) *q++ = val; } +/* Set an adaptation field flag in an MPEG-TS packet*/ +static void set_af_flag(uint8_t *pkt, int flag) +{ + // expect at least one flag to set + assert(flag); + + if ((pkt[3] & 0x20) == 0) { + // no AF yet, set adaptation field flag + pkt[3] |= 0x20; + // 1 byte length, no flags + pkt[4] = 1; + pkt[5] = 0; + } + pkt[5] |= flag; +} + +/* Extend the adaptation field by size bytes */ +static void extend_af(uint8_t *pkt, int size) +{ + // expect already existing adaptation field + assert(pkt[3] & 0x20); + pkt[4] += size; +} + +/* Get a pointer to MPEG-TS payload (right after TS packet header) */ +static uint8_t *get_ts_payload_start(uint8_t *pkt) +{ + if (pkt[3] & 0x20) + return pkt + 5 + pkt[4]; + else + return pkt + 4; +} + /* Add a pes header to the front of payload, and segment into an integer number of * ts packets. The final ts packet is padded using an over-sized adaptation header * to exactly fill the last ts packet. @@ -695,7 +730,7 @@ static void write_pts(uint8_t *q, int fourbits, int64_t pts) */ static void mpegts_write_pes(AVFormatContext *s, AVStream *st, const uint8_t *payload, int payload_size, - int64_t pts, int64_t dts) + int64_t pts, int64_t dts, int key) { MpegTSWriteStream *ts_st = st->priv_data; MpegTSWrite *ts = s->priv_data; @@ -740,8 +775,17 @@ static void mpegts_write_pes(AVFormatContext *s, AVStream *st, *q++ = val; *q++ = ts_st->pid; ts_st->cc = (ts_st->cc + 1) & 0xf; - *q++ = 0x10 | ts_st->cc | (write_pcr ? 0x20 : 0); + *q++ = 0x10 | ts_st->cc; // payload indicator + CC + if (key && is_start && pts != AV_NOPTS_VALUE) { + // set Random Access for key frames + if (ts_st->pid == ts_st->service->pcr_pid) + write_pcr = 1; + set_af_flag(buf, 0x40); + q = get_ts_payload_start(buf); + } if (write_pcr) { + set_af_flag(buf, 0x10); + q = get_ts_payload_start(buf); // add 11, pcr references the last byte of program clock reference base if (ts->mux_rate > 1) pcr = get_pcr(ts, s->pb); @@ -749,9 +793,8 @@ static void mpegts_write_pes(AVFormatContext *s, AVStream *st, pcr = (dts - delay)*300; if (dts != AV_NOPTS_VALUE && dts < pcr / 300) av_log(s, AV_LOG_WARNING, "dts < pcr, TS is invalid\n"); - *q++ = 7; /* AFC length */ - *q++ = 0x10; /* flags: PCR present */ - q = write_pcr_bits(q, pcr); + extend_af(buf, write_pcr_bits(q, pcr)); + q = get_ts_payload_start(buf); } if (is_start) { int pes_extension = 0; @@ -949,20 +992,22 @@ static int mpegts_write_packet(AVFormatContext *s, AVPacket *pkt) if (st->codec->codec_type != AVMEDIA_TYPE_AUDIO) { // for video and subtitle, write a single pes packet - mpegts_write_pes(s, st, buf, size, pts, dts); + mpegts_write_pes(s, st, buf, size, pts, dts, pkt->flags & AV_PKT_FLAG_KEY); av_free(data); return 0; } if (ts_st->payload_index + size > DEFAULT_PES_PAYLOAD_SIZE) { mpegts_write_pes(s, st, ts_st->payload, ts_st->payload_index, - ts_st->payload_pts, ts_st->payload_dts); + ts_st->payload_pts, ts_st->payload_dts, + ts_st->payload_flags & AV_PKT_FLAG_KEY); ts_st->payload_index = 0; } if (!ts_st->payload_index) { ts_st->payload_pts = pts; ts_st->payload_dts = dts; + ts_st->payload_flags = pkt->flags; } memcpy(ts_st->payload + ts_st->payload_index, buf, size); @@ -987,7 +1032,8 @@ static int mpegts_write_end(AVFormatContext *s) ts_st = st->priv_data; if (ts_st->payload_index > 0) { mpegts_write_pes(s, st, ts_st->payload, ts_st->payload_index, - ts_st->payload_pts, ts_st->payload_dts); + ts_st->payload_pts, ts_st->payload_dts, + ts_st->payload_flags & AV_PKT_FLAG_KEY); } av_freep(&ts_st->adts); } @@ -1005,15 +1051,15 @@ static int mpegts_write_end(AVFormatContext *s) } AVOutputFormat ff_mpegts_muxer = { - "mpegts", - NULL_IF_CONFIG_SMALL("MPEG-2 transport stream format"), - "video/x-mpegts", - "ts,m2t", - sizeof(MpegTSWrite), - CODEC_ID_MP2, - CODEC_ID_MPEG2VIDEO, - mpegts_write_header, - mpegts_write_packet, - mpegts_write_end, + .name = "mpegts", + .long_name = NULL_IF_CONFIG_SMALL("MPEG-2 transport stream format"), + .mime_type = "video/x-mpegts", + .extensions = "ts,m2t", + .priv_data_size = sizeof(MpegTSWrite), + .audio_codec = CODEC_ID_MP2, + .video_codec = CODEC_ID_MPEG2VIDEO, + .write_header = mpegts_write_header, + .write_packet = mpegts_write_packet, + .write_trailer = mpegts_write_end, .priv_class = &mpegts_muxer_class, }; diff --git a/libavformat/mpjpeg.c b/libavformat/mpjpeg.c index e6f6bcc5f6..46a98b3d44 100644 --- a/libavformat/mpjpeg.c +++ b/libavformat/mpjpeg.c @@ -54,14 +54,13 @@ static int mpjpeg_write_trailer(AVFormatContext *s) } AVOutputFormat ff_mpjpeg_muxer = { - "mpjpeg", - NULL_IF_CONFIG_SMALL("MIME multipart JPEG format"), - "multipart/x-mixed-replace;boundary=" BOUNDARY_TAG, - "mjpg", - 0, - CODEC_ID_NONE, - CODEC_ID_MJPEG, - mpjpeg_write_header, - mpjpeg_write_packet, - mpjpeg_write_trailer, + .name = "mpjpeg", + .long_name = NULL_IF_CONFIG_SMALL("MIME multipart JPEG format"), + .mime_type = "multipart/x-mixed-replace;boundary=" BOUNDARY_TAG, + .extensions = "mjpg", + .audio_codec = CODEC_ID_NONE, + .video_codec = CODEC_ID_MJPEG, + .write_header = mpjpeg_write_header, + .write_packet = mpjpeg_write_packet, + .write_trailer = mpjpeg_write_trailer, }; diff --git a/libavformat/msnwc_tcp.c b/libavformat/msnwc_tcp.c index 252ce34f6b..a08938a9df 100644 --- a/libavformat/msnwc_tcp.c +++ b/libavformat/msnwc_tcp.c @@ -131,10 +131,9 @@ static int msnwc_tcp_read_packet(AVFormatContext *ctx, AVPacket *pkt) } AVInputFormat ff_msnwc_tcp_demuxer = { - "msnwctcp", - NULL_IF_CONFIG_SMALL("MSN TCP Webcam stream"), - 0, - msnwc_tcp_probe, - msnwc_tcp_read_header, - msnwc_tcp_read_packet, + .name = "msnwctcp", + .long_name = NULL_IF_CONFIG_SMALL("MSN TCP Webcam stream"), + .read_probe = msnwc_tcp_probe, + .read_header = msnwc_tcp_read_header, + .read_packet = msnwc_tcp_read_packet, }; diff --git a/libavformat/mtv.c b/libavformat/mtv.c index 76e0862cf4..2fa48cc705 100644 --- a/libavformat/mtv.c +++ b/libavformat/mtv.c @@ -197,10 +197,10 @@ static int mtv_read_packet(AVFormatContext *s, AVPacket *pkt) } AVInputFormat ff_mtv_demuxer = { - "MTV", - NULL_IF_CONFIG_SMALL("MTV format"), - sizeof(MTVDemuxContext), - mtv_probe, - mtv_read_header, - mtv_read_packet, + .name = "MTV", + .long_name = NULL_IF_CONFIG_SMALL("MTV format"), + .priv_data_size = sizeof(MTVDemuxContext), + .read_probe = mtv_probe, + .read_header = mtv_read_header, + .read_packet = mtv_read_packet, }; diff --git a/libavformat/mvi.c b/libavformat/mvi.c index 4784efae0d..846af8d911 100644 --- a/libavformat/mvi.c +++ b/libavformat/mvi.c @@ -124,11 +124,10 @@ static int read_packet(AVFormatContext *s, AVPacket *pkt) } AVInputFormat ff_mvi_demuxer = { - "mvi", - NULL_IF_CONFIG_SMALL("Motion Pixels MVI format"), - sizeof(MviDemuxContext), - NULL, - read_header, - read_packet, + .name = "mvi", + .long_name = NULL_IF_CONFIG_SMALL("Motion Pixels MVI format"), + .priv_data_size = sizeof(MviDemuxContext), + .read_header = read_header, + .read_packet = read_packet, .extensions = "mvi" }; diff --git a/libavformat/mxfdec.c b/libavformat/mxfdec.c index 8548c792f6..c27fbfcd62 100644 --- a/libavformat/mxfdec.c +++ b/libavformat/mxfdec.c @@ -46,6 +46,7 @@ //#define DEBUG #include "libavutil/aes.h" +#include "libavutil/mathematics.h" #include "libavcodec/bytestream.h" #include "avformat.h" #include "mxf.h" @@ -599,7 +600,7 @@ static int mxf_read_generic_descriptor(void *arg, AVIOContext *pb, int tag, int default: /* Private uid used by SONY C0023S01.mxf */ if (IS_KLV_KEY(uid, mxf_sony_mpeg4_extradata)) { - descriptor->extradata = av_malloc(size); + descriptor->extradata = av_malloc(size + FF_INPUT_BUFFER_PADDING_SIZE); if (!descriptor->extradata) return -1; descriptor->extradata_size = size; @@ -1015,12 +1016,12 @@ static int mxf_read_seek(AVFormatContext *s, int stream_index, int64_t sample_ti } AVInputFormat ff_mxf_demuxer = { - "mxf", - NULL_IF_CONFIG_SMALL("Material eXchange Format"), - sizeof(MXFContext), - mxf_probe, - mxf_read_header, - mxf_read_packet, - mxf_read_close, - mxf_read_seek, + .name = "mxf", + .long_name = NULL_IF_CONFIG_SMALL("Material eXchange Format"), + .priv_data_size = sizeof(MXFContext), + .read_probe = mxf_probe, + .read_header = mxf_read_header, + .read_packet = mxf_read_packet, + .read_close = mxf_read_close, + .read_seek = mxf_read_seek, }; diff --git a/libavformat/mxfenc.c b/libavformat/mxfenc.c index c6532a3427..b84504cf5c 100644 --- a/libavformat/mxfenc.c +++ b/libavformat/mxfenc.c @@ -39,6 +39,7 @@ #include "libavcodec/bytestream.h" #include "audiointerleave.h" #include "avformat.h" +#include "internal.h" #include "mxf.h" static const int NTSC_samples_per_frame[] = { 1602, 1601, 1602, 1601, 1602, 0 }; @@ -1407,6 +1408,8 @@ static int mxf_write_header(AVFormatContext *s) int i; uint8_t present[FF_ARRAY_ELEMS(mxf_essence_container_uls)] = {0}; const int *samples_per_frame = NULL; + AVDictionaryEntry *t; + int64_t timestamp = 0; if (!s->nb_streams) return -1; @@ -1512,8 +1515,15 @@ static int mxf_write_header(AVFormatContext *s) sc->order = AV_RB32(sc->track_essence_element_key+12); } +#if FF_API_TIMESTAMP if (s->timestamp) - mxf->timestamp = mxf_parse_timestamp(s->timestamp); + timestamp = s->timestamp; + else +#endif + if (t = av_dict_get(s->metadata, "creation_time", NULL, 0)) + timestamp = ff_iso8601_to_unix_time(t->value); + if (timestamp) + mxf->timestamp = mxf_parse_timestamp(timestamp); mxf->duration = -1; mxf->timecode_track = av_mallocz(sizeof(*mxf->timecode_track)); @@ -1539,7 +1549,7 @@ static const uint8_t system_metadata_package_set_key[] = { 0x06,0x0E,0x2B,0x34,0 static uint32_t ff_framenum_to_12m_time_code(unsigned frame, int drop, int fps) { return (0 << 31) | // color frame flag - (0 << 30) | // drop frame flag + (drop << 30) | // drop frame flag ( ((frame % fps) / 10) << 28) | // tens of frames ( ((frame % fps) % 10) << 24) | // units of frames (0 << 23) | // field phase (NTSC), b0 (PAL) @@ -1549,7 +1559,7 @@ static uint32_t ff_framenum_to_12m_time_code(unsigned frame, int drop, int fps) ((((frame / (fps * 60)) % 60) / 10) << 12) | // tens of minutes ((((frame / (fps * 60)) % 60) % 10) << 8) | // units of minutes (0 << 7) | // b1 - (0 << 6) | // b2 (NSC), field phase (PAL) + (0 << 6) | // b2 (NTSC), field phase (PAL) ((((frame / (fps * 3600) % 24)) / 10) << 4) | // tens of hours ( (frame / (fps * 3600) % 24)) % 10; // units of hours } @@ -1880,33 +1890,30 @@ static int mxf_interleave(AVFormatContext *s, AVPacket *out, AVPacket *pkt, int } AVOutputFormat ff_mxf_muxer = { - "mxf", - NULL_IF_CONFIG_SMALL("Material eXchange Format"), - "application/mxf", - "mxf", - sizeof(MXFContext), - CODEC_ID_PCM_S16LE, - CODEC_ID_MPEG2VIDEO, - mxf_write_header, - mxf_write_packet, - mxf_write_footer, - AVFMT_NOTIMESTAMPS, - NULL, - mxf_interleave, + .name = "mxf", + .long_name = NULL_IF_CONFIG_SMALL("Material eXchange Format"), + .mime_type = "application/mxf", + .extensions = "mxf", + .priv_data_size = sizeof(MXFContext), + .audio_codec = CODEC_ID_PCM_S16LE, + .video_codec = CODEC_ID_MPEG2VIDEO, + .write_header = mxf_write_header, + .write_packet = mxf_write_packet, + .write_trailer = mxf_write_footer, + .flags = AVFMT_NOTIMESTAMPS, + .interleave_packet = mxf_interleave, }; AVOutputFormat ff_mxf_d10_muxer = { - "mxf_d10", - NULL_IF_CONFIG_SMALL("Material eXchange Format, D-10 Mapping"), - "application/mxf", - NULL, - sizeof(MXFContext), - CODEC_ID_PCM_S16LE, - CODEC_ID_MPEG2VIDEO, - mxf_write_header, - mxf_write_packet, - mxf_write_footer, - AVFMT_NOTIMESTAMPS, - NULL, - mxf_interleave, + .name = "mxf_d10", + .long_name = NULL_IF_CONFIG_SMALL("Material eXchange Format, D-10 Mapping"), + .mime_type = "application/mxf", + .priv_data_size = sizeof(MXFContext), + .audio_codec = CODEC_ID_PCM_S16LE, + .video_codec = CODEC_ID_MPEG2VIDEO, + .write_header = mxf_write_header, + .write_packet = mxf_write_packet, + .write_trailer = mxf_write_footer, + .flags = AVFMT_NOTIMESTAMPS, + .interleave_packet = mxf_interleave, }; diff --git a/libavformat/ncdec.c b/libavformat/ncdec.c index c95b4bde9d..e04a6aae31 100644 --- a/libavformat/ncdec.c +++ b/libavformat/ncdec.c @@ -91,11 +91,10 @@ static int nc_read_packet(AVFormatContext *s, AVPacket *pkt) } AVInputFormat ff_nc_demuxer = { - "nc", - NULL_IF_CONFIG_SMALL("NC camera feed format"), - 0, - nc_probe, - nc_read_header, - nc_read_packet, + .name = "nc", + .long_name = NULL_IF_CONFIG_SMALL("NC camera feed format"), + .read_probe = nc_probe, + .read_header = nc_read_header, + .read_packet = nc_read_packet, .extensions = "v", }; diff --git a/libavformat/nsvdec.c b/libavformat/nsvdec.c index 4898187f3a..bf03cecc0d 100644 --- a/libavformat/nsvdec.c +++ b/libavformat/nsvdec.c @@ -18,6 +18,8 @@ * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ + +#include "libavutil/mathematics.h" #include "avformat.h" #include "riff.h" #include "libavutil/dict.h" @@ -768,12 +770,12 @@ static int nsv_probe(AVProbeData *p) } AVInputFormat ff_nsv_demuxer = { - "nsv", - NULL_IF_CONFIG_SMALL("Nullsoft Streaming Video"), - sizeof(NSVContext), - nsv_probe, - nsv_read_header, - nsv_read_packet, - nsv_read_close, - nsv_read_seek, + .name = "nsv", + .long_name = NULL_IF_CONFIG_SMALL("Nullsoft Streaming Video"), + .priv_data_size = sizeof(NSVContext), + .read_probe = nsv_probe, + .read_header = nsv_read_header, + .read_packet = nsv_read_packet, + .read_close = nsv_read_close, + .read_seek = nsv_read_seek, }; diff --git a/libavformat/nullenc.c b/libavformat/nullenc.c index 1a7d42e924..2ab92dff75 100644 --- a/libavformat/nullenc.c +++ b/libavformat/nullenc.c @@ -27,14 +27,10 @@ static int null_write_packet(struct AVFormatContext *s, AVPacket *pkt) } AVOutputFormat ff_null_muxer = { - "null", - NULL_IF_CONFIG_SMALL("raw null video format"), - NULL, - NULL, - 0, - AV_NE(CODEC_ID_PCM_S16BE, CODEC_ID_PCM_S16LE), - CODEC_ID_RAWVIDEO, - NULL, - null_write_packet, + .name = "null", + .long_name = NULL_IF_CONFIG_SMALL("raw null video format"), + .audio_codec = AV_NE(CODEC_ID_PCM_S16BE, CODEC_ID_PCM_S16LE), + .video_codec = CODEC_ID_RAWVIDEO, + .write_packet = null_write_packet, .flags = AVFMT_NOFILE | AVFMT_RAWPICTURE | AVFMT_NOTIMESTAMPS, }; diff --git a/libavformat/nut.c b/libavformat/nut.c index c31f53a189..2a5e6fe567 100644 --- a/libavformat/nut.c +++ b/libavformat/nut.c @@ -19,6 +19,7 @@ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ +#include "libavutil/mathematics.h" #include "libavutil/tree.h" #include "nut.h" #include "internal.h" diff --git a/libavformat/nutdec.c b/libavformat/nutdec.c index 8af0c48bfc..a6be493a62 100644 --- a/libavformat/nutdec.c +++ b/libavformat/nutdec.c @@ -24,6 +24,7 @@ #include "libavutil/avstring.h" #include "libavutil/bswap.h" #include "libavutil/dict.h" +#include "libavutil/mathematics.h" #include "libavutil/tree.h" #include "avio_internal.h" #include "nut.h" @@ -927,14 +928,14 @@ static int nut_read_close(AVFormatContext *s) #if CONFIG_NUT_DEMUXER AVInputFormat ff_nut_demuxer = { - "nut", - NULL_IF_CONFIG_SMALL("NUT format"), - sizeof(NUTContext), - nut_probe, - nut_read_header, - nut_read_packet, - nut_read_close, - read_seek, + .name = "nut", + .long_name = NULL_IF_CONFIG_SMALL("NUT format"), + .priv_data_size = sizeof(NUTContext), + .read_probe = nut_probe, + .read_header = nut_read_header, + .read_packet = nut_read_packet, + .read_close = nut_read_close, + .read_seek = read_seek, .extensions = "nut", .codec_tag = (const AVCodecTag * const []) { ff_codec_bmp_tags, ff_nut_video_tags, ff_codec_wav_tags, ff_nut_subtitle_tags, 0 }, }; diff --git a/libavformat/nutenc.c b/libavformat/nutenc.c index 6ec9bcd8fd..624da98c5d 100644 --- a/libavformat/nutenc.c +++ b/libavformat/nutenc.c @@ -20,6 +20,7 @@ */ #include "libavutil/intreadwrite.h" +#include "libavutil/mathematics.h" #include "libavutil/tree.h" #include "libavutil/dict.h" #include "libavcodec/mpegaudiodata.h" @@ -861,22 +862,22 @@ static int write_trailer(AVFormatContext *s){ } AVOutputFormat ff_nut_muxer = { - "nut", - NULL_IF_CONFIG_SMALL("NUT format"), - "video/x-nut", - "nut", - sizeof(NUTContext), + .name = "nut", + .long_name = NULL_IF_CONFIG_SMALL("NUT format"), + .mime_type = "video/x-nut", + .extensions = "nut", + .priv_data_size = sizeof(NUTContext), #if CONFIG_LIBVORBIS - CODEC_ID_VORBIS, + .audio_codec = CODEC_ID_VORBIS, #elif CONFIG_LIBMP3LAME - CODEC_ID_MP3, + .audio_codec = CODEC_ID_MP3, #else - CODEC_ID_MP2, + .audio_codec = CODEC_ID_MP2, #endif - CODEC_ID_MPEG4, - write_header, - write_packet, - write_trailer, + .video_codec = CODEC_ID_MPEG4, + .write_header = write_header, + .write_packet = write_packet, + .write_trailer = write_trailer, .flags = AVFMT_GLOBALHEADER | AVFMT_VARIABLE_FPS, .codec_tag = (const AVCodecTag * const []){ ff_codec_bmp_tags, ff_nut_video_tags, ff_codec_wav_tags, ff_nut_subtitle_tags, 0 }, }; diff --git a/libavformat/nuv.c b/libavformat/nuv.c index db31689753..607dcdc201 100644 --- a/libavformat/nuv.c +++ b/libavformat/nuv.c @@ -20,6 +20,7 @@ */ #include "libavutil/intreadwrite.h" +#include "libavutil/intfloat_readwrite.h" #include "avformat.h" #include "riff.h" @@ -49,11 +50,11 @@ static int nuv_probe(AVProbeData *p) { #define PKTSIZE(s) (s & 0xffffff) /** - * \brief read until we found all data needed for decoding - * \param vst video stream of which to change parameters - * \param ast video stream of which to change parameters - * \param myth set if this is a MythTVVideo format file - * \return 1 if all required codec data was found + * @brief read until we found all data needed for decoding + * @param vst video stream of which to change parameters + * @param ast video stream of which to change parameters + * @param myth set if this is a MythTVVideo format file + * @return 1 if all required codec data was found */ static int get_codec_data(AVIOContext *pb, AVStream *vst, AVStream *ast, int myth) { @@ -258,13 +259,11 @@ static int nuv_packet(AVFormatContext *s, AVPacket *pkt) { } AVInputFormat ff_nuv_demuxer = { - "nuv", - NULL_IF_CONFIG_SMALL("NuppelVideo format"), - sizeof(NUVContext), - nuv_probe, - nuv_header, - nuv_packet, - NULL, - NULL, + .name = "nuv", + .long_name = NULL_IF_CONFIG_SMALL("NuppelVideo format"), + .priv_data_size = sizeof(NUVContext), + .read_probe = nuv_probe, + .read_header = nuv_header, + .read_packet = nuv_packet, .flags = AVFMT_GENERIC_INDEX, }; diff --git a/libavformat/oggdec.c b/libavformat/oggdec.c index dc9f7b62fd..cd5592208a 100644 --- a/libavformat/oggdec.c +++ b/libavformat/oggdec.c @@ -201,7 +201,7 @@ static int ogg_read_page(AVFormatContext *s, int *str) uint8_t sync[4]; int sp = 0; - ret = avio_read (bc, sync, 4); + ret = avio_read(bc, sync, 4); if (ret < 4) return ret < 0 ? ret : AVERROR_EOF; @@ -259,7 +259,7 @@ static int ogg_read_page(AVFormatContext *s, int *str) if(os->psize > 0) ogg_new_buf(ogg, idx); - ret = avio_read (bc, os->segments, nsegs); + ret = avio_read(bc, os->segments, nsegs); if (ret < nsegs) return ret < 0 ? ret : AVERROR_EOF; @@ -292,7 +292,7 @@ static int ogg_read_page(AVFormatContext *s, int *str) os->buf = nb; } - ret = avio_read (bc, os->buf + os->bufpos, size); + ret = avio_read(bc, os->buf + os->bufpos, size); if (ret < size) return ret < 0 ? ret : AVERROR_EOF; @@ -321,7 +321,7 @@ static int ogg_packet(AVFormatContext *s, int *str, int *dstart, int *dsize, idx = ogg->curidx; while (idx < 0){ - ret = ogg_read_page (s, &idx); + ret = ogg_read_page(s, &idx); if (ret < 0) return ret; } @@ -437,7 +437,7 @@ static int ogg_get_headers(AVFormatContext *s) int ret; do{ - ret = ogg_packet (s, NULL, NULL, NULL, NULL); + ret = ogg_packet(s, NULL, NULL, NULL, NULL); if (ret < 0) return ret; }while (!ogg->headers); @@ -501,10 +501,9 @@ static int ogg_read_header(AVFormatContext *s, AVFormatParameters *ap) int ret, i; ogg->curidx = -1; //linear headers seek from start - ret = ogg_get_headers (s); - if (ret < 0){ + ret = ogg_get_headers(s); + if (ret < 0) return ret; - } for (i = 0; i < ogg->nstreams; i++) if (ogg->streams[i].header < 0) @@ -551,15 +550,16 @@ static int ogg_read_packet(AVFormatContext *s, AVPacket *pkt) { struct ogg *ogg; struct ogg_stream *os; - int idx = -1; + int idx = -1, ret; int pstart, psize; int64_t fpos, pts, dts; //Get an ogg packet retry: do{ - if (ogg_packet (s, &idx, &pstart, &psize, &fpos) < 0) - return AVERROR(EIO); + ret = ogg_packet(s, &idx, &pstart, &psize, &fpos); + if (ret < 0) + return ret; }while (idx < 0 || !s->streams[idx]); ogg = s->priv_data; @@ -573,8 +573,9 @@ retry: os->keyframe_seek = 0; //Alloc a pkt - if (av_new_packet (pkt, psize) < 0) - return AVERROR(EIO); + ret = av_new_packet(pkt, psize); + if (ret < 0) + return ret; pkt->stream_index = idx; memcpy (pkt->data, os->buf + pstart, psize); @@ -604,15 +605,15 @@ static int64_t ogg_read_timestamp(AVFormatContext *s, int stream_index, int64_t *pos_arg, int64_t pos_limit) { struct ogg *ogg = s->priv_data; - struct ogg_stream *os = ogg->streams + stream_index; AVIOContext *bc = s->pb; int64_t pts = AV_NOPTS_VALUE; - int i; + int i = -1; avio_seek(bc, *pos_arg, SEEK_SET); ogg_reset(ogg); while (avio_tell(bc) < pos_limit && !ogg_packet(s, &i, NULL, NULL, pos_arg)) { if (i == stream_index) { + struct ogg_stream *os = ogg->streams + stream_index; pts = ogg_calc_pts(s, i, NULL); if (os->keyframe_seek && !(os->pflags & AV_PKT_FLAG_KEY)) pts = AV_NOPTS_VALUE; @@ -638,6 +639,7 @@ static int ogg_read_seek(AVFormatContext *s, int stream_index, os->keyframe_seek = 1; ret = av_seek_frame_binary(s, stream_index, timestamp, flags); + os = ogg->streams + stream_index; if (ret < 0) os->keyframe_seek = 0; return ret; diff --git a/libavformat/oggenc.c b/libavformat/oggenc.c index 04f536f157..ab35e86293 100644 --- a/libavformat/oggenc.c +++ b/libavformat/oggenc.c @@ -21,6 +21,7 @@ #include "libavutil/crc.h" #include "libavutil/opt.h" +#include "libavutil/mathematics.h" #include "libavutil/random_seed.h" #include "libavcodec/xiph.h" #include "libavcodec/bytestream.h" @@ -524,15 +525,15 @@ static int ogg_write_trailer(AVFormatContext *s) } AVOutputFormat ff_ogg_muxer = { - "ogg", - NULL_IF_CONFIG_SMALL("Ogg"), - "application/ogg", - "ogg,ogv,spx", - sizeof(OGGContext), - CODEC_ID_FLAC, - CODEC_ID_THEORA, - ogg_write_header, - ogg_write_packet, - ogg_write_trailer, + .name = "ogg", + .long_name = NULL_IF_CONFIG_SMALL("Ogg"), + .mime_type = "application/ogg", + .extensions = "ogg,ogv,spx", + .priv_data_size = sizeof(OGGContext), + .audio_codec = CODEC_ID_FLAC, + .video_codec = CODEC_ID_THEORA, + .write_header = ogg_write_header, + .write_packet = ogg_write_packet, + .write_trailer = ogg_write_trailer, .priv_class = &ogg_muxer_class, }; diff --git a/libavformat/oma.c b/libavformat/oma.c index aac96d14e5..1ab30688c6 100644 --- a/libavformat/oma.c +++ b/libavformat/oma.c @@ -149,7 +149,6 @@ static int oma_read_header(AVFormatContext *s, default: av_log(s, AV_LOG_ERROR, "Unsupported codec %d!\n",buf[32]); return -1; - break; } st->codec->block_align = framesize; @@ -193,14 +192,12 @@ static int oma_read_probe(AVProbeData *p) AVInputFormat ff_oma_demuxer = { - "oma", - NULL_IF_CONFIG_SMALL("Sony OpenMG audio"), - 0, - oma_read_probe, - oma_read_header, - oma_read_packet, - 0, - pcm_read_seek, + .name = "oma", + .long_name = NULL_IF_CONFIG_SMALL("Sony OpenMG audio"), + .read_probe = oma_read_probe, + .read_header = oma_read_header, + .read_packet = oma_read_packet, + .read_seek = pcm_read_seek, .flags= AVFMT_GENERIC_INDEX, .extensions = "oma,aa3", .codec_tag= (const AVCodecTag* const []){codec_oma_tags, 0}, diff --git a/libavformat/options.c b/libavformat/options.c index ef3261b8b6..4451b5e147 100644 --- a/libavformat/options.c +++ b/libavformat/options.c @@ -79,6 +79,7 @@ static const AVOption options[]={ #if FF_API_FLAG_RTP_HINT {"rtphint", "add rtp hinting (deprecated, use the -movflags rtphint option instead)", 0, FF_OPT_TYPE_CONST, {.dbl = AVFMT_FLAG_RTP_HINT }, INT_MIN, INT_MAX, E, "fflags"}, #endif +{"discardcorrupt", "discard corrupted frames", 0, FF_OPT_TYPE_CONST, {.dbl = AVFMT_FLAG_DISCARD_CORRUPT }, INT_MIN, INT_MAX, D, "fflags"}, {"sortdts", "try to interleave outputted packets by dts", 0, FF_OPT_TYPE_CONST, {.dbl = AVFMT_FLAG_SORT_DTS }, INT_MIN, INT_MAX, D, "fflags"}, {"keepside", "dont merge side data", 0, FF_OPT_TYPE_CONST, {.dbl = AVFMT_FLAG_KEEP_SIDE_DATA }, INT_MIN, INT_MAX, D, "fflags"}, {"latm", "enable RTP MP4A-LATM payload", 0, FF_OPT_TYPE_CONST, {.dbl = AVFMT_FLAG_MP4A_LATM }, INT_MIN, INT_MAX, E, "fflags"}, diff --git a/libavformat/pcm.c b/libavformat/pcm.c index def183c5b2..d66be59ccb 100644 --- a/libavformat/pcm.c +++ b/libavformat/pcm.c @@ -19,6 +19,7 @@ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ +#include "libavutil/mathematics.h" #include "avformat.h" #include "pcm.h" diff --git a/libavformat/psxstr.c b/libavformat/psxstr.c index 744ae94459..4376e743c4 100644 --- a/libavformat/psxstr.c +++ b/libavformat/psxstr.c @@ -234,7 +234,6 @@ static int str_read_packet(AVFormatContext *s, pkt->stream_index = str->channels[channel].audio_stream_index; return 0; - break; default: av_log(s, AV_LOG_WARNING, "Unknown sector type %02X\n", sector[0x12]); /* drop the sector and move on */ @@ -259,11 +258,11 @@ static int str_read_close(AVFormatContext *s) } AVInputFormat ff_str_demuxer = { - "psxstr", - NULL_IF_CONFIG_SMALL("Sony Playstation STR format"), - sizeof(StrDemuxContext), - str_probe, - str_read_header, - str_read_packet, - str_read_close, + .name = "psxstr", + .long_name = NULL_IF_CONFIG_SMALL("Sony Playstation STR format"), + .priv_data_size = sizeof(StrDemuxContext), + .read_probe = str_probe, + .read_header = str_read_header, + .read_packet = str_read_packet, + .read_close = str_read_close, }; diff --git a/libavformat/pva.c b/libavformat/pva.c index 8e8c060a65..fda5fc3867 100644 --- a/libavformat/pva.c +++ b/libavformat/pva.c @@ -201,11 +201,11 @@ static int64_t pva_read_timestamp(struct AVFormatContext *s, int stream_index, } AVInputFormat ff_pva_demuxer = { - "pva", - NULL_IF_CONFIG_SMALL("TechnoTrend PVA file and stream format"), - sizeof(PVAContext), - pva_probe, - pva_read_header, - pva_read_packet, + .name = "pva", + .long_name = NULL_IF_CONFIG_SMALL("TechnoTrend PVA file and stream format"), + .priv_data_size = sizeof(PVAContext), + .read_probe = pva_probe, + .read_header = pva_read_header, + .read_packet = pva_read_packet, .read_timestamp = pva_read_timestamp }; diff --git a/libavformat/qcp.c b/libavformat/qcp.c index b4f559ee11..0f761a8ec1 100644 --- a/libavformat/qcp.c +++ b/libavformat/qcp.c @@ -23,7 +23,7 @@ * @file * QCP format (.qcp) demuxer * @author Kenan Gillet - * @sa RFC 3625: "The QCP File Format and Media Types for Speech Data" + * @see RFC 3625: "The QCP File Format and Media Types for Speech Data" * http://tools.ietf.org/html/rfc3625 */ diff --git a/libavformat/r3d.c b/libavformat/r3d.c index af74a9ee56..4658ea1e5d 100644 --- a/libavformat/r3d.c +++ b/libavformat/r3d.c @@ -23,6 +23,7 @@ #include "libavutil/intreadwrite.h" #include "libavutil/dict.h" +#include "libavutil/mathematics.h" #include "avformat.h" typedef struct { @@ -383,12 +384,12 @@ static int r3d_close(AVFormatContext *s) } AVInputFormat ff_r3d_demuxer = { - "r3d", - NULL_IF_CONFIG_SMALL("REDCODE R3D format"), - sizeof(R3DContext), - r3d_probe, - r3d_read_header, - r3d_read_packet, - r3d_close, - r3d_seek, + .name = "r3d", + .long_name = NULL_IF_CONFIG_SMALL("REDCODE R3D format"), + .priv_data_size = sizeof(R3DContext), + .read_probe = r3d_probe, + .read_header = r3d_read_header, + .read_packet = r3d_read_packet, + .read_close = r3d_close, + .read_seek = r3d_seek, }; diff --git a/libavformat/rawdec.c b/libavformat/rawdec.c index a4e009b7e0..da47b8b14f 100644 --- a/libavformat/rawdec.c +++ b/libavformat/rawdec.c @@ -218,12 +218,11 @@ const AVClass ff_rawvideo_demuxer_class = { #if CONFIG_G722_DEMUXER AVInputFormat ff_g722_demuxer = { - "g722", - NULL_IF_CONFIG_SMALL("raw G.722"), - sizeof(RawAudioDemuxerContext), - NULL, - ff_raw_read_header, - ff_raw_read_partial_packet, + .name = "g722", + .long_name = NULL_IF_CONFIG_SMALL("raw G.722"), + .priv_data_size = sizeof(RawAudioDemuxerContext), + .read_header = ff_raw_read_header, + .read_packet = ff_raw_read_partial_packet, .flags= AVFMT_GENERIC_INDEX, .extensions = "g722,722", .value = CODEC_ID_ADPCM_G722, @@ -233,12 +232,10 @@ AVInputFormat ff_g722_demuxer = { #if CONFIG_GSM_DEMUXER AVInputFormat ff_gsm_demuxer = { - "gsm", - NULL_IF_CONFIG_SMALL("raw GSM"), - 0, - NULL, - ff_raw_audio_read_header, - ff_raw_read_partial_packet, + .name = "gsm", + .long_name = NULL_IF_CONFIG_SMALL("raw GSM"), + .read_header = ff_raw_audio_read_header, + .read_packet = ff_raw_read_partial_packet, .flags= AVFMT_GENERIC_INDEX, .extensions = "gsm", .value = CODEC_ID_GSM, @@ -251,12 +248,10 @@ FF_DEF_RAWVIDEO_DEMUXER(mjpeg, "raw MJPEG video", NULL, "mjpg,mjpeg", CODEC_ID_M #if CONFIG_MLP_DEMUXER AVInputFormat ff_mlp_demuxer = { - "mlp", - NULL_IF_CONFIG_SMALL("raw MLP"), - 0, - NULL, - ff_raw_audio_read_header, - ff_raw_read_partial_packet, + .name = "mlp", + .long_name = NULL_IF_CONFIG_SMALL("raw MLP"), + .read_header = ff_raw_audio_read_header, + .read_packet = ff_raw_read_partial_packet, .flags= AVFMT_GENERIC_INDEX, .extensions = "mlp", .value = CODEC_ID_MLP, @@ -265,12 +260,10 @@ AVInputFormat ff_mlp_demuxer = { #if CONFIG_TRUEHD_DEMUXER AVInputFormat ff_truehd_demuxer = { - "truehd", - NULL_IF_CONFIG_SMALL("raw TrueHD"), - 0, - NULL, - ff_raw_audio_read_header, - ff_raw_read_partial_packet, + .name = "truehd", + .long_name = NULL_IF_CONFIG_SMALL("raw TrueHD"), + .read_header = ff_raw_audio_read_header, + .read_packet = ff_raw_read_partial_packet, .flags= AVFMT_GENERIC_INDEX, .extensions = "thd", .value = CODEC_ID_TRUEHD, @@ -279,12 +272,10 @@ AVInputFormat ff_truehd_demuxer = { #if CONFIG_SHORTEN_DEMUXER AVInputFormat ff_shorten_demuxer = { - "shn", - NULL_IF_CONFIG_SMALL("raw Shorten"), - 0, - NULL, - ff_raw_audio_read_header, - ff_raw_read_partial_packet, + .name = "shn", + .long_name = NULL_IF_CONFIG_SMALL("raw Shorten"), + .read_header = ff_raw_audio_read_header, + .read_packet = ff_raw_read_partial_packet, .flags= AVFMT_GENERIC_INDEX, .extensions = "shn", .value = CODEC_ID_SHORTEN, diff --git a/libavformat/rawenc.c b/libavformat/rawenc.c index a43d5f61b2..d0857dea05 100644 --- a/libavformat/rawenc.c +++ b/libavformat/rawenc.c @@ -34,195 +34,163 @@ int ff_raw_write_packet(AVFormatContext *s, AVPacket *pkt) #if CONFIG_AC3_MUXER AVOutputFormat ff_ac3_muxer = { - "ac3", - NULL_IF_CONFIG_SMALL("raw AC-3"), - "audio/x-ac3", - "ac3", - 0, - CODEC_ID_AC3, - CODEC_ID_NONE, - NULL, - ff_raw_write_packet, + .name = "ac3", + .long_name = NULL_IF_CONFIG_SMALL("raw AC-3"), + .mime_type = "audio/x-ac3", + .extensions = "ac3", + .audio_codec = CODEC_ID_AC3, + .video_codec = CODEC_ID_NONE, + .write_packet = ff_raw_write_packet, .flags= AVFMT_NOTIMESTAMPS, }; #endif #if CONFIG_DIRAC_MUXER AVOutputFormat ff_dirac_muxer = { - "dirac", - NULL_IF_CONFIG_SMALL("raw Dirac"), - NULL, - "drc", - 0, - CODEC_ID_NONE, - CODEC_ID_DIRAC, - NULL, - ff_raw_write_packet, + .name = "dirac", + .long_name = NULL_IF_CONFIG_SMALL("raw Dirac"), + .extensions = "drc", + .audio_codec = CODEC_ID_NONE, + .video_codec = CODEC_ID_DIRAC, + .write_packet = ff_raw_write_packet, .flags= AVFMT_NOTIMESTAMPS, }; #endif #if CONFIG_DNXHD_MUXER AVOutputFormat ff_dnxhd_muxer = { - "dnxhd", - NULL_IF_CONFIG_SMALL("raw DNxHD (SMPTE VC-3)"), - NULL, - "dnxhd", - 0, - CODEC_ID_NONE, - CODEC_ID_DNXHD, - NULL, - ff_raw_write_packet, + .name = "dnxhd", + .long_name = NULL_IF_CONFIG_SMALL("raw DNxHD (SMPTE VC-3)"), + .extensions = "dnxhd", + .audio_codec = CODEC_ID_NONE, + .video_codec = CODEC_ID_DNXHD, + .write_packet = ff_raw_write_packet, .flags= AVFMT_NOTIMESTAMPS, }; #endif #if CONFIG_DTS_MUXER AVOutputFormat ff_dts_muxer = { - "dts", - NULL_IF_CONFIG_SMALL("raw DTS"), - "audio/x-dca", - "dts", - 0, - CODEC_ID_DTS, - CODEC_ID_NONE, - NULL, - ff_raw_write_packet, + .name = "dts", + .long_name = NULL_IF_CONFIG_SMALL("raw DTS"), + .mime_type = "audio/x-dca", + .extensions = "dts", + .audio_codec = CODEC_ID_DTS, + .video_codec = CODEC_ID_NONE, + .write_packet = ff_raw_write_packet, .flags= AVFMT_NOTIMESTAMPS, }; #endif #if CONFIG_EAC3_MUXER AVOutputFormat ff_eac3_muxer = { - "eac3", - NULL_IF_CONFIG_SMALL("raw E-AC-3"), - "audio/x-eac3", - "eac3", - 0, - CODEC_ID_EAC3, - CODEC_ID_NONE, - NULL, - ff_raw_write_packet, + .name = "eac3", + .long_name = NULL_IF_CONFIG_SMALL("raw E-AC-3"), + .mime_type = "audio/x-eac3", + .extensions = "eac3", + .audio_codec = CODEC_ID_EAC3, + .video_codec = CODEC_ID_NONE, + .write_packet = ff_raw_write_packet, .flags= AVFMT_NOTIMESTAMPS, }; #endif #if CONFIG_G722_MUXER AVOutputFormat ff_g722_muxer = { - "g722", - NULL_IF_CONFIG_SMALL("raw G.722"), - "audio/G722", - "g722", - 0, - CODEC_ID_ADPCM_G722, - CODEC_ID_NONE, - NULL, - ff_raw_write_packet, + .name = "g722", + .long_name = NULL_IF_CONFIG_SMALL("raw G.722"), + .mime_type = "audio/G722", + .extensions = "g722", + .audio_codec = CODEC_ID_ADPCM_G722, + .video_codec = CODEC_ID_NONE, + .write_packet = ff_raw_write_packet, .flags= AVFMT_NOTIMESTAMPS, }; #endif #if CONFIG_H261_MUXER AVOutputFormat ff_h261_muxer = { - "h261", - NULL_IF_CONFIG_SMALL("raw H.261"), - "video/x-h261", - "h261", - 0, - CODEC_ID_NONE, - CODEC_ID_H261, - NULL, - ff_raw_write_packet, + .name = "h261", + .long_name = NULL_IF_CONFIG_SMALL("raw H.261"), + .mime_type = "video/x-h261", + .extensions = "h261", + .audio_codec = CODEC_ID_NONE, + .video_codec = CODEC_ID_H261, + .write_packet = ff_raw_write_packet, .flags= AVFMT_NOTIMESTAMPS, }; #endif #if CONFIG_H263_MUXER AVOutputFormat ff_h263_muxer = { - "h263", - NULL_IF_CONFIG_SMALL("raw H.263"), - "video/x-h263", - "h263", - 0, - CODEC_ID_NONE, - CODEC_ID_H263, - NULL, - ff_raw_write_packet, + .name = "h263", + .long_name = NULL_IF_CONFIG_SMALL("raw H.263"), + .mime_type = "video/x-h263", + .extensions = "h263", + .audio_codec = CODEC_ID_NONE, + .video_codec = CODEC_ID_H263, + .write_packet = ff_raw_write_packet, .flags= AVFMT_NOTIMESTAMPS, }; #endif #if CONFIG_H264_MUXER AVOutputFormat ff_h264_muxer = { - "h264", - NULL_IF_CONFIG_SMALL("raw H.264 video format"), - NULL, - "h264", - 0, - CODEC_ID_NONE, - CODEC_ID_H264, - NULL, - ff_raw_write_packet, + .name = "h264", + .long_name = NULL_IF_CONFIG_SMALL("raw H.264 video format"), + .extensions = "h264", + .audio_codec = CODEC_ID_NONE, + .video_codec = CODEC_ID_H264, + .write_packet = ff_raw_write_packet, .flags= AVFMT_NOTIMESTAMPS, }; #endif #if CONFIG_CAVSVIDEO_MUXER AVOutputFormat ff_cavsvideo_muxer = { - "cavsvideo", - NULL_IF_CONFIG_SMALL("raw Chinese AVS video"), - NULL, - "cavs", - 0, - CODEC_ID_NONE, - CODEC_ID_CAVS, - NULL, - ff_raw_write_packet, + .name = "cavsvideo", + .long_name = NULL_IF_CONFIG_SMALL("raw Chinese AVS video"), + .extensions = "cavs", + .audio_codec = CODEC_ID_NONE, + .video_codec = CODEC_ID_CAVS, + .write_packet = ff_raw_write_packet, .flags= AVFMT_NOTIMESTAMPS, }; #endif #if CONFIG_M4V_MUXER AVOutputFormat ff_m4v_muxer = { - "m4v", - NULL_IF_CONFIG_SMALL("raw MPEG-4 video format"), - NULL, - "m4v", - 0, - CODEC_ID_NONE, - CODEC_ID_MPEG4, - NULL, - ff_raw_write_packet, + .name = "m4v", + .long_name = NULL_IF_CONFIG_SMALL("raw MPEG-4 video format"), + .extensions = "m4v", + .audio_codec = CODEC_ID_NONE, + .video_codec = CODEC_ID_MPEG4, + .write_packet = ff_raw_write_packet, .flags= AVFMT_NOTIMESTAMPS, }; #endif #if CONFIG_MJPEG_MUXER AVOutputFormat ff_mjpeg_muxer = { - "mjpeg", - NULL_IF_CONFIG_SMALL("raw MJPEG video"), - "video/x-mjpeg", - "mjpg,mjpeg", - 0, - CODEC_ID_NONE, - CODEC_ID_MJPEG, - NULL, - ff_raw_write_packet, + .name = "mjpeg", + .long_name = NULL_IF_CONFIG_SMALL("raw MJPEG video"), + .mime_type = "video/x-mjpeg", + .extensions = "mjpg,mjpeg", + .audio_codec = CODEC_ID_NONE, + .video_codec = CODEC_ID_MJPEG, + .write_packet = ff_raw_write_packet, .flags= AVFMT_NOTIMESTAMPS, }; #endif #if CONFIG_MLP_MUXER AVOutputFormat ff_mlp_muxer = { - "mlp", - NULL_IF_CONFIG_SMALL("raw MLP"), - NULL, - "mlp", - 0, - CODEC_ID_MLP, - CODEC_ID_NONE, - NULL, - ff_raw_write_packet, + .name = "mlp", + .long_name = NULL_IF_CONFIG_SMALL("raw MLP"), + .extensions = "mlp", + .audio_codec = CODEC_ID_MLP, + .video_codec = CODEC_ID_NONE, + .write_packet = ff_raw_write_packet, .flags= AVFMT_NOTIMESTAMPS, }; #endif @@ -241,60 +209,49 @@ AVOutputFormat ff_srt_muxer = { #if CONFIG_TRUEHD_MUXER AVOutputFormat ff_truehd_muxer = { - "truehd", - NULL_IF_CONFIG_SMALL("raw TrueHD"), - NULL, - "thd", - 0, - CODEC_ID_TRUEHD, - CODEC_ID_NONE, - NULL, - ff_raw_write_packet, + .name = "truehd", + .long_name = NULL_IF_CONFIG_SMALL("raw TrueHD"), + .extensions = "thd", + .audio_codec = CODEC_ID_TRUEHD, + .video_codec = CODEC_ID_NONE, + .write_packet = ff_raw_write_packet, .flags= AVFMT_NOTIMESTAMPS, }; #endif #if CONFIG_MPEG1VIDEO_MUXER AVOutputFormat ff_mpeg1video_muxer = { - "mpeg1video", - NULL_IF_CONFIG_SMALL("raw MPEG-1 video"), - "video/x-mpeg", - "mpg,mpeg,m1v", - 0, - CODEC_ID_NONE, - CODEC_ID_MPEG1VIDEO, - NULL, - ff_raw_write_packet, + .name = "mpeg1video", + .long_name = NULL_IF_CONFIG_SMALL("raw MPEG-1 video"), + .mime_type = "video/x-mpeg", + .extensions = "mpg,mpeg,m1v", + .audio_codec = CODEC_ID_NONE, + .video_codec = CODEC_ID_MPEG1VIDEO, + .write_packet = ff_raw_write_packet, .flags= AVFMT_NOTIMESTAMPS, }; #endif #if CONFIG_MPEG2VIDEO_MUXER AVOutputFormat ff_mpeg2video_muxer = { - "mpeg2video", - NULL_IF_CONFIG_SMALL("raw MPEG-2 video"), - NULL, - "m2v", - 0, - CODEC_ID_NONE, - CODEC_ID_MPEG2VIDEO, - NULL, - ff_raw_write_packet, + .name = "mpeg2video", + .long_name = NULL_IF_CONFIG_SMALL("raw MPEG-2 video"), + .extensions = "m2v", + .audio_codec = CODEC_ID_NONE, + .video_codec = CODEC_ID_MPEG2VIDEO, + .write_packet = ff_raw_write_packet, .flags= AVFMT_NOTIMESTAMPS, }; #endif #if CONFIG_RAWVIDEO_MUXER AVOutputFormat ff_rawvideo_muxer = { - "rawvideo", - NULL_IF_CONFIG_SMALL("raw video format"), - NULL, - "yuv,rgb", - 0, - CODEC_ID_NONE, - CODEC_ID_RAWVIDEO, - NULL, - ff_raw_write_packet, + .name = "rawvideo", + .long_name = NULL_IF_CONFIG_SMALL("raw video format"), + .extensions = "yuv,rgb", + .audio_codec = CODEC_ID_NONE, + .video_codec = CODEC_ID_RAWVIDEO, + .write_packet = ff_raw_write_packet, .flags= AVFMT_NOTIMESTAMPS, }; #endif diff --git a/libavformat/rawvideodec.c b/libavformat/rawvideodec.c index 36f5d954ae..5609575401 100644 --- a/libavformat/rawvideodec.c +++ b/libavformat/rawvideodec.c @@ -45,12 +45,11 @@ static int rawvideo_read_packet(AVFormatContext *s, AVPacket *pkt) } AVInputFormat ff_rawvideo_demuxer = { - "rawvideo", - NULL_IF_CONFIG_SMALL("raw video format"), - sizeof(FFRawVideoDemuxerContext), - NULL, - ff_raw_read_header, - rawvideo_read_packet, + .name = "rawvideo", + .long_name = NULL_IF_CONFIG_SMALL("raw video format"), + .priv_data_size = sizeof(FFRawVideoDemuxerContext), + .read_header = ff_raw_read_header, + .read_packet = rawvideo_read_packet, .flags= AVFMT_GENERIC_INDEX, .extensions = "yuv,cif,qcif,rgb", .value = CODEC_ID_RAWVIDEO, diff --git a/libavformat/rdt.h b/libavformat/rdt.h index 19a4a7bc1f..c2ec94b8b4 100644 --- a/libavformat/rdt.h +++ b/libavformat/rdt.h @@ -80,16 +80,16 @@ void ff_rdt_subscribe_rule(char *cmd, int size, * * @param buf input buffer * @param len length of input buffer - * @param set_id will be set to the set ID this packet belongs to - * @param seq_no will be set to the sequence number of the packet - * @param stream_id will be set to the stream ID this packet belongs to - * @param is_keyframe will be whether this packet belongs to a keyframe - * @param timestamp will be set to the timestamp of the packet + * @param pset_id will be set to the set ID this packet belongs to + * @param pseq_no will be set to the sequence number of the packet + * @param pstream_id will be set to the stream ID this packet belongs to + * @param pis_keyframe will be whether this packet belongs to a keyframe + * @param ptimestamp will be set to the timestamp of the packet * @return the amount of bytes consumed, or negative on error */ int ff_rdt_parse_header(const uint8_t *buf, int len, - int *set_id, int *seq_no, int *stream_id, - int *is_keyframe, uint32_t *timestamp); + int *pset_id, int *pseq_no, int *pstream_id, + int *pis_keyframe, uint32_t *ptimestamp); /** * Parse RDT-style packet data (header + media data). diff --git a/libavformat/riff.c b/libavformat/riff.c index 544c29f116..636d05c636 100644 --- a/libavformat/riff.c +++ b/libavformat/riff.c @@ -19,6 +19,7 @@ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ +#include "libavutil/mathematics.h" #include "libavcodec/avcodec.h" #include "avformat.h" #include "avio_internal.h" @@ -86,6 +87,7 @@ const AVCodecTag ff_codec_bmp_tags[] = { { CODEC_ID_MPEG4, MKTAG('G', 'E', 'O', 'V') }, { CODEC_ID_MPEG4, MKTAG('S', 'I', 'P', 'P') }, /* Samsung SHR-6040 */ { CODEC_ID_MPEG4, MKTAG('X', 'V', 'I', 'X') }, + { CODEC_ID_MPEG4, MKTAG('D', 'r', 'e', 'X') }, { CODEC_ID_MSMPEG4V3, MKTAG('M', 'P', '4', '3') }, { CODEC_ID_MSMPEG4V3, MKTAG('D', 'I', 'V', '3') }, { CODEC_ID_MSMPEG4V3, MKTAG('M', 'P', 'G', '3') }, @@ -130,6 +132,7 @@ const AVCodecTag ff_codec_bmp_tags[] = { { CODEC_ID_MPEG2VIDEO, MKTAG('s', 'l', 'i', 'f') }, { CODEC_ID_MPEG2VIDEO, MKTAG('E', 'M', '2', 'V') }, { CODEC_ID_MPEG2VIDEO, MKTAG('M', '7', '0', '1') }, /* Matrox MPEG2 intra-only */ + { CODEC_ID_MPEG2VIDEO, MKTAG('m', 'p', 'g', 'v') }, { CODEC_ID_MJPEG, MKTAG('M', 'J', 'P', 'G') }, { CODEC_ID_MJPEG, MKTAG('L', 'J', 'P', 'G') }, { CODEC_ID_MJPEG, MKTAG('d', 'm', 'b', '1') }, @@ -171,9 +174,12 @@ const AVCodecTag ff_codec_bmp_tags[] = { { CODEC_ID_RAWVIDEO, MKTAG('y', 'u', 'v', 's') }, { CODEC_ID_RAWVIDEO, MKTAG('P', '4', '2', '2') }, { CODEC_ID_RAWVIDEO, MKTAG('Y', 'V', '1', '2') }, + { CODEC_ID_RAWVIDEO, MKTAG('Y', 'V', '1', '6') }, + { CODEC_ID_RAWVIDEO, MKTAG('Y', 'V', '2', '4') }, { CODEC_ID_RAWVIDEO, MKTAG('U', 'Y', 'V', 'Y') }, { CODEC_ID_RAWVIDEO, MKTAG('V', 'Y', 'U', 'Y') }, { CODEC_ID_RAWVIDEO, MKTAG('I', 'Y', 'U', 'V') }, + { CODEC_ID_RAWVIDEO, MKTAG('Y', '8', ' ', ' ') }, { CODEC_ID_RAWVIDEO, MKTAG('Y', '8', '0', '0') }, { CODEC_ID_RAWVIDEO, MKTAG('H', 'D', 'Y', 'C') }, { CODEC_ID_RAWVIDEO, MKTAG('Y', 'V', 'U', '9') }, diff --git a/libavformat/rl2.c b/libavformat/rl2.c index e41406791e..b9f38be390 100644 --- a/libavformat/rl2.c +++ b/libavformat/rl2.c @@ -23,8 +23,7 @@ * RL2 file demuxer * @file * @author Sascha Sommer (saschasommer@freenet.de) - * For more information regarding the RL2 file format, visit: - * http://wiki.multimedia.cx/index.php?title=RL2 + * @see http://wiki.multimedia.cx/index.php?title=RL2 * * extradata: * 2 byte le initial drawing offset within 320x200 viewport @@ -34,6 +33,7 @@ */ #include "libavutil/intreadwrite.h" +#include "libavutil/mathematics.h" #include "avformat.h" #define EXTRADATA1_SIZE (6 + 256 * 3) ///< video base, clr, palette @@ -286,13 +286,12 @@ static int rl2_read_seek(AVFormatContext *s, int stream_index, int64_t timestamp } AVInputFormat ff_rl2_demuxer = { - "rl2", - NULL_IF_CONFIG_SMALL("RL2 format"), - sizeof(Rl2DemuxContext), - rl2_probe, - rl2_read_header, - rl2_read_packet, - NULL, - rl2_read_seek, + .name = "rl2", + .long_name = NULL_IF_CONFIG_SMALL("RL2 format"), + .priv_data_size = sizeof(Rl2DemuxContext), + .read_probe = rl2_probe, + .read_header = rl2_read_header, + .read_packet = rl2_read_packet, + .read_seek = rl2_read_seek, }; diff --git a/libavformat/rmdec.c b/libavformat/rmdec.c index c2eee815c2..effb7d3a00 100644 --- a/libavformat/rmdec.c +++ b/libavformat/rmdec.c @@ -371,7 +371,7 @@ skip: return 0; } -static int rm_read_header_old(AVFormatContext *s, AVFormatParameters *ap) +static int rm_read_header_old(AVFormatContext *s) { RMDemuxContext *rm = s->priv_data; AVStream *st; @@ -399,7 +399,7 @@ static int rm_read_header(AVFormatContext *s, AVFormatParameters *ap) tag = avio_rl32(pb); if (tag == MKTAG('.', 'r', 'a', 0xfd)) { /* very old .ra format */ - return rm_read_header_old(s, ap); + return rm_read_header_old(s); } else if (tag != MKTAG('.', 'R', 'M', 'F')) { return AVERROR(EIO); } @@ -937,23 +937,19 @@ static int64_t rm_read_dts(AVFormatContext *s, int stream_index, } AVInputFormat ff_rm_demuxer = { - "rm", - NULL_IF_CONFIG_SMALL("RealMedia format"), - sizeof(RMDemuxContext), - rm_probe, - rm_read_header, - rm_read_packet, - rm_read_close, - NULL, - rm_read_dts, + .name = "rm", + .long_name = NULL_IF_CONFIG_SMALL("RealMedia format"), + .priv_data_size = sizeof(RMDemuxContext), + .read_probe = rm_probe, + .read_header = rm_read_header, + .read_packet = rm_read_packet, + .read_close = rm_read_close, + .read_timestamp = rm_read_dts, }; AVInputFormat ff_rdt_demuxer = { - "rdt", - NULL_IF_CONFIG_SMALL("RDT demuxer"), - sizeof(RMDemuxContext), - NULL, - NULL, - NULL, - rm_read_close, + .name = "rdt", + .long_name = NULL_IF_CONFIG_SMALL("RDT demuxer"), + .priv_data_size = sizeof(RMDemuxContext), + .read_close = rm_read_close, }; diff --git a/libavformat/rmenc.c b/libavformat/rmenc.c index 575d895a6a..a601331e2e 100644 --- a/libavformat/rmenc.c +++ b/libavformat/rmenc.c @@ -461,15 +461,15 @@ static int rm_write_trailer(AVFormatContext *s) AVOutputFormat ff_rm_muxer = { - "rm", - NULL_IF_CONFIG_SMALL("RealMedia format"), - "application/vnd.rn-realmedia", - "rm,ra", - sizeof(RMMuxContext), - CODEC_ID_AC3, - CODEC_ID_RV10, - rm_write_header, - rm_write_packet, - rm_write_trailer, + .name = "rm", + .long_name = NULL_IF_CONFIG_SMALL("RealMedia format"), + .mime_type = "application/vnd.rn-realmedia", + .extensions = "rm,ra", + .priv_data_size = sizeof(RMMuxContext), + .audio_codec = CODEC_ID_AC3, + .video_codec = CODEC_ID_RV10, + .write_header = rm_write_header, + .write_packet = rm_write_packet, + .write_trailer = rm_write_trailer, .codec_tag= (const AVCodecTag* const []){ff_rm_codec_tags, 0}, }; diff --git a/libavformat/rpl.c b/libavformat/rpl.c index 151893cdd7..009a67f31a 100644 --- a/libavformat/rpl.c +++ b/libavformat/rpl.c @@ -351,10 +351,10 @@ static int rpl_read_packet(AVFormatContext *s, AVPacket *pkt) } AVInputFormat ff_rpl_demuxer = { - "rpl", - NULL_IF_CONFIG_SMALL("RPL/ARMovie format"), - sizeof(RPLContext), - rpl_probe, - rpl_read_header, - rpl_read_packet, + .name = "rpl", + .long_name = NULL_IF_CONFIG_SMALL("RPL/ARMovie format"), + .priv_data_size = sizeof(RPLContext), + .read_probe = rpl_probe, + .read_header = rpl_read_header, + .read_packet = rpl_read_packet, }; diff --git a/libavformat/rtmppkt.c b/libavformat/rtmppkt.c index 4b6d549f74..7e2ccdc6ac 100644 --- a/libavformat/rtmppkt.c +++ b/libavformat/rtmppkt.c @@ -21,6 +21,7 @@ #include "libavcodec/bytestream.h" #include "libavutil/avstring.h" +#include "libavutil/intfloat_readwrite.h" #include "avformat.h" #include "rtmppkt.h" diff --git a/libavformat/rtmppkt.h b/libavformat/rtmppkt.h index 4c28cd351e..8acbfc116b 100644 --- a/libavformat/rtmppkt.h +++ b/libavformat/rtmppkt.h @@ -138,7 +138,7 @@ int ff_rtmp_packet_write(URLContext *h, RTMPPacket *p, void ff_rtmp_packet_dump(void *ctx, RTMPPacket *p); /** - * @defgroup amffuncs functions used to work with AMF format (which is also used in .flv) + * @name Functions used to work with the AMF format (which is also used in .flv) * @see amf_* funcs in libavformat/flvdec.c * @{ */ diff --git a/libavformat/rtmpproto.c b/libavformat/rtmpproto.c index 3701a77176..fba293eb2c 100644 --- a/libavformat/rtmpproto.c +++ b/libavformat/rtmpproto.c @@ -26,6 +26,7 @@ #include "libavcodec/bytestream.h" #include "libavutil/avstring.h" +#include "libavutil/intfloat_readwrite.h" #include "libavutil/lfg.h" #include "libavutil/sha.h" #include "avformat.h" @@ -768,7 +769,6 @@ static int get_packet(URLContext *s, int for_header) } ff_rtmp_packet_destroy(&rpkt); } - return 0; } static int rtmp_close(URLContext *h) diff --git a/libavformat/rtpdec.c b/libavformat/rtpdec.c index 9fc30d7b66..f9252a9bb3 100644 --- a/libavformat/rtpdec.c +++ b/libavformat/rtpdec.c @@ -19,6 +19,7 @@ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ +#include "libavutil/mathematics.h" #include "libavcodec/get_bits.h" #include "avformat.h" #include "mpegts.h" @@ -217,22 +218,6 @@ static int rtp_valid_packet_in_sequence(RTPStatistics *s, uint16_t seq) return 1; } -#if 0 -/** -* This function is currently unused; without a valid local ntp time, I don't see how we could calculate the -* difference between the arrival and sent timestamp. As a result, the jitter and transit statistics values -* never change. I left this in in case someone else can see a way. (rdm) -*/ -static void rtcp_update_jitter(RTPStatistics *s, uint32_t sent_timestamp, uint32_t arrival_timestamp) -{ - uint32_t transit= arrival_timestamp - sent_timestamp; - int d; - s->transit= transit; - d= FFABS(transit - s->transit); - s->jitter += d - ((s->jitter + 8)>>4); -} -#endif - int rtp_check_and_send_back_rr(RTPDemuxContext *s, int count) { AVIOContext *pb; diff --git a/libavformat/rtpdec_svq3.c b/libavformat/rtpdec_svq3.c index cb5f74d222..7800766ecf 100644 --- a/libavformat/rtpdec_svq3.c +++ b/libavformat/rtpdec_svq3.c @@ -22,8 +22,8 @@ /** * @file * @brief RTP support for the SV3V (SVQ3) payload - * (http://wiki.multimedia.cx/index.php?title=Sorenson_Video_3#Packetization) * @author Ronald S. Bultje <rbultje@ronald.bitfreak.net> + * @see http://wiki.multimedia.cx/index.php?title=Sorenson_Video_3#Packetization */ #include <string.h> diff --git a/libavformat/rtpdec_vp8.c b/libavformat/rtpdec_vp8.c index 862a55e4e9..bfc96570d1 100644 --- a/libavformat/rtpdec_vp8.c +++ b/libavformat/rtpdec_vp8.c @@ -23,7 +23,7 @@ * @file * @brief RTP support for the VP8 payload * @author Josh Allmann <joshua.allmann@gmail.com> - * ( http://www.webmproject.org/code/specs/rtp/ ) + * @see http://www.webmproject.org/code/specs/rtp/ */ #include "libavcodec/bytestream.h" diff --git a/libavformat/rtpenc.c b/libavformat/rtpenc.c index 60629b098b..8664f46b66 100644 --- a/libavformat/rtpenc.c +++ b/libavformat/rtpenc.c @@ -22,6 +22,7 @@ #include "avformat.h" #include "mpegts.h" #include "internal.h" +#include "libavutil/mathematics.h" #include "libavutil/random_seed.h" #include "libavutil/opt.h" @@ -461,15 +462,13 @@ static int rtp_write_trailer(AVFormatContext *s1) } AVOutputFormat ff_rtp_muxer = { - "rtp", - NULL_IF_CONFIG_SMALL("RTP output format"), - NULL, - NULL, - sizeof(RTPMuxContext), - CODEC_ID_PCM_MULAW, - CODEC_ID_NONE, - rtp_write_header, - rtp_write_packet, - rtp_write_trailer, + .name = "rtp", + .long_name = NULL_IF_CONFIG_SMALL("RTP output format"), + .priv_data_size = sizeof(RTPMuxContext), + .audio_codec = CODEC_ID_PCM_MULAW, + .video_codec = CODEC_ID_NONE, + .write_header = rtp_write_header, + .write_packet = rtp_write_packet, + .write_trailer = rtp_write_trailer, .priv_class = &rtp_muxer_class, }; diff --git a/libavformat/rtpproto.c b/libavformat/rtpproto.c index 8b23f25c46..d2ce53c670 100644 --- a/libavformat/rtpproto.c +++ b/libavformat/rtpproto.c @@ -86,7 +86,7 @@ int rtp_set_remote_url(URLContext *h, const char *uri) * "http://host:port/path?option1=val1&option2=val2... */ -static void url_add_option(char *buf, int buf_size, const char *fmt, ...) +static av_printf_format(3, 4) void url_add_option(char *buf, int buf_size, const char *fmt, ...) { char buf1[1024]; va_list ap; @@ -115,6 +115,7 @@ static void build_udp_url(char *buf, int buf_size, url_add_option(buf, buf_size, "pkt_size=%d", max_packet_size); if (connect) url_add_option(buf, buf_size, "connect=1"); + url_add_option(buf, buf_size, "fifo_size=0"); } /** @@ -225,20 +226,6 @@ static int rtp_read(URLContext *h, uint8_t *buf, int size) int len, n; struct pollfd p[2] = {{s->rtp_fd, POLLIN, 0}, {s->rtcp_fd, POLLIN, 0}}; -#if 0 - for(;;) { - from_len = sizeof(from); - len = recvfrom (s->rtp_fd, buf, size, 0, - (struct sockaddr *)&from, &from_len); - if (len < 0) { - if (ff_neterrno() == AVERROR(EAGAIN) || - ff_neterrno() == AVERROR(EINTR)) - continue; - return AVERROR(EIO); - } - break; - } -#else for(;;) { if (url_interrupt_cb()) return AVERROR_EXIT; @@ -277,7 +264,6 @@ static int rtp_read(URLContext *h, uint8_t *buf, int size) return AVERROR(EIO); } } -#endif return len; } @@ -296,14 +282,6 @@ static int rtp_write(URLContext *h, const uint8_t *buf, int size) } ret = ffurl_write(hd, buf, size); -#if 0 - { - struct timespec ts; - ts.tv_sec = 0; - ts.tv_nsec = 10 * 1000000; - nanosleep(&ts, NULL); - } -#endif return ret; } diff --git a/libavformat/rtsp.c b/libavformat/rtsp.c index 8d1cd327cc..a8dd41c7da 100644 --- a/libavformat/rtsp.c +++ b/libavformat/rtsp.c @@ -22,6 +22,7 @@ #include "libavutil/base64.h" #include "libavutil/avstring.h" #include "libavutil/intreadwrite.h" +#include "libavutil/mathematics.h" #include "libavutil/parseutils.h" #include "libavutil/random_seed.h" #include "libavutil/dict.h" @@ -428,11 +429,6 @@ static void sdp_parse_line(AVFormatContext *s, SDPParseState *s1, } } -/** - * Parse the sdp description and allocate the rtp streams and the - * pollfd array used for udp ones. - */ - int ff_sdp_parse(AVFormatContext *s, const char *content) { RTSPState *rt = s->priv_data; @@ -1050,9 +1046,6 @@ retry: return 0; } -/** - * @return 0 on success, <0 on error, 1 if protocol is unavailable. - */ int ff_rtsp_make_setup_request(AVFormatContext *s, const char *host, int port, int lower_transport, const char *real_challenge) { @@ -1078,7 +1071,7 @@ int ff_rtsp_make_setup_request(AVFormatContext *s, const char *host, int port, for (j = RTSP_RTP_PORT_MIN, i = 0; i < rt->nb_rtsp_streams; ++i) { char transport[2048]; - /** + /* * WMS serves all UDP data over a single connection, the RTX, which * isn't necessarily the first in the SDP but has to be the first * to be set up, else the second/third SETUP will fail with a 461. @@ -1123,17 +1116,9 @@ int ff_rtsp_make_setup_request(AVFormatContext *s, const char *host, int port, } } -#if 0 - /* then try on any port */ - if (ffurl_open(&rtsp_st->rtp_handle, "rtp://", AVIO_RDONLY) < 0) { - err = AVERROR_INVALIDDATA; - goto fail; - } -#else av_log(s, AV_LOG_ERROR, "Unable to open an input RTP port\n"); err = AVERROR(EIO); goto fail; -#endif rtp_opened: port = rtp_get_local_rtp_port(rtsp_st->rtp_handle); @@ -1151,7 +1136,7 @@ int ff_rtsp_make_setup_request(AVFormatContext *s, const char *host, int port, /* RTP/TCP */ else if (lower_transport == RTSP_LOWER_TRANSPORT_TCP) { - /** For WMS streams, the application streams are only used for + /* For WMS streams, the application streams are only used for * UDP. When trying to set it up for TCP streams, the server * will return an error. Therefore, we skip those streams. */ if (rt->server_type == RTSP_SERVER_WMS && @@ -1482,14 +1467,14 @@ redirect: cmd[0] = 0; if (rt->server_type == RTSP_SERVER_REAL) av_strlcat(cmd, - /** + /* * The following entries are required for proper * streaming from a Realmedia server. They are * interdependent in some way although we currently * don't quite understand how. Values were copied * from mplayer SVN r23589. - * @param CompanyID is a 16-byte ID in base64 - * @param ClientChallenge is a 16-byte ID in hex + * ClientChallenge is a 16-byte ID in hex + * CompanyID is a 16-byte ID in base64 */ "ClientChallenge: 9e26d33f2984236010ef6253fb1887f7\r\n" "PlayerStarttime: [28/03/2003:22:50:23 00:00]\r\n" @@ -1831,13 +1816,13 @@ static int sdp_read_close(AVFormatContext *s) } AVInputFormat ff_sdp_demuxer = { - "sdp", - NULL_IF_CONFIG_SMALL("SDP"), - sizeof(RTSPState), - sdp_probe, - sdp_read_header, - ff_rtsp_fetch_packet, - sdp_read_close, + .name = "sdp", + .long_name = NULL_IF_CONFIG_SMALL("SDP"), + .priv_data_size = sizeof(RTSPState), + .read_probe = sdp_probe, + .read_header = sdp_read_header, + .read_packet = ff_rtsp_fetch_packet, + .read_close = sdp_read_close, }; #endif /* CONFIG_SDP_DEMUXER */ @@ -1935,13 +1920,13 @@ fail: } AVInputFormat ff_rtp_demuxer = { - "rtp", - NULL_IF_CONFIG_SMALL("RTP input format"), - sizeof(RTSPState), - rtp_probe, - rtp_read_header, - ff_rtsp_fetch_packet, - sdp_read_close, + .name = "rtp", + .long_name = NULL_IF_CONFIG_SMALL("RTP input format"), + .priv_data_size = sizeof(RTSPState), + .read_probe = rtp_probe, + .read_header = rtp_read_header, + .read_packet = ff_rtsp_fetch_packet, + .read_close = sdp_read_close, .flags = AVFMT_NOFILE, }; #endif /* CONFIG_RTP_DEMUXER */ diff --git a/libavformat/rtsp.h b/libavformat/rtsp.h index 6b060923e4..3d0345d35c 100644 --- a/libavformat/rtsp.h +++ b/libavformat/rtsp.h @@ -220,9 +220,6 @@ typedef struct RTSPState { * see rtsp_read_play() and rtsp_read_seek(). */ int64_t seek_timestamp; - /* XXX: currently we use unbuffered input */ - // AVIOContext rtsp_gb; - int seq; /**< RTSP command sequence number */ /** copy of RTSPMessageHeader->session_id, i.e. the server-provided session @@ -488,9 +485,9 @@ void ff_rtsp_close_streams(AVFormatContext *s); /** * Close all connection handles within the RTSP (de)muxer * - * @param rt RTSP (de)muxer context + * @param s RTSP (de)muxer context */ -void ff_rtsp_close_connections(AVFormatContext *rt); +void ff_rtsp_close_connections(AVFormatContext *s); /** * Get the description of the stream and set up the RTSPStream child @@ -505,8 +502,9 @@ int ff_rtsp_setup_input_streams(AVFormatContext *s, RTSPMessageHeader *reply); int ff_rtsp_setup_output_streams(AVFormatContext *s, const char *addr); /** - * Parse a SDP description of streams by populating an RTSPState struct - * within the AVFormatContext. + * Parse an SDP description of streams by populating an RTSPState struct + * within the AVFormatContext; also allocate the RTP streams and the + * pollfd array used for UDP streams. */ int ff_sdp_parse(AVFormatContext *s, const char *content); @@ -525,6 +523,7 @@ int ff_rtsp_fetch_packet(AVFormatContext *s, AVPacket *pkt); /** * Do the SETUP requests for each stream for the chosen * lower transport mode. + * @return 0 on success, <0 on error, 1 if protocol is unavailable */ int ff_rtsp_make_setup_request(AVFormatContext *s, const char *host, int port, int lower_transport, const char *real_challenge); diff --git a/libavformat/rtspdec.c b/libavformat/rtspdec.c index cf95915110..8fe58e7351 100644 --- a/libavformat/rtspdec.c +++ b/libavformat/rtspdec.c @@ -21,6 +21,7 @@ #include "libavutil/avstring.h" #include "libavutil/intreadwrite.h" +#include "libavutil/mathematics.h" #include "libavutil/opt.h" #include "avformat.h" @@ -382,12 +383,6 @@ static int rtsp_read_close(AVFormatContext *s) { RTSPState *rt = s->priv_data; -#if 0 - /* NOTE: it is valid to flush the buffer here */ - if (rt->lower_transport == RTSP_LOWER_TRANSPORT_TCP) { - avio_close(&rt->rtsp_gb); - } -#endif ff_rtsp_send_cmd_async(s, "TEARDOWN", rt->control_uri, NULL); ff_rtsp_close_streams(s); @@ -411,14 +406,14 @@ const AVClass rtsp_demuxer_class = { }; AVInputFormat ff_rtsp_demuxer = { - "rtsp", - NULL_IF_CONFIG_SMALL("RTSP input format"), - sizeof(RTSPState), - rtsp_probe, - rtsp_read_header, - rtsp_read_packet, - rtsp_read_close, - rtsp_read_seek, + .name = "rtsp", + .long_name = NULL_IF_CONFIG_SMALL("RTSP input format"), + .priv_data_size = sizeof(RTSPState), + .read_probe = rtsp_probe, + .read_header = rtsp_read_header, + .read_packet = rtsp_read_packet, + .read_close = rtsp_read_close, + .read_seek = rtsp_read_seek, .flags = AVFMT_NOFILE, .read_play = rtsp_read_play, .read_pause = rtsp_read_pause, diff --git a/libavformat/rtspenc.c b/libavformat/rtspenc.c index 9120d187b4..5196bf4fcd 100644 --- a/libavformat/rtspenc.c +++ b/libavformat/rtspenc.c @@ -241,16 +241,14 @@ static int rtsp_write_close(AVFormatContext *s) } AVOutputFormat ff_rtsp_muxer = { - "rtsp", - NULL_IF_CONFIG_SMALL("RTSP output format"), - NULL, - NULL, - sizeof(RTSPState), - CODEC_ID_AAC, - CODEC_ID_MPEG4, - rtsp_write_header, - rtsp_write_packet, - rtsp_write_close, + .name = "rtsp", + .long_name = NULL_IF_CONFIG_SMALL("RTSP output format"), + .priv_data_size = sizeof(RTSPState), + .audio_codec = CODEC_ID_AAC, + .video_codec = CODEC_ID_MPEG4, + .write_header = rtsp_write_header, + .write_packet = rtsp_write_packet, + .write_trailer = rtsp_write_close, .flags = AVFMT_NOFILE | AVFMT_GLOBALHEADER, .priv_class = &rtsp_muxer_class, }; diff --git a/libavformat/sapdec.c b/libavformat/sapdec.c index e4d3deb60f..ccc6a172e8 100644 --- a/libavformat/sapdec.c +++ b/libavformat/sapdec.c @@ -224,13 +224,13 @@ static int sap_fetch_packet(AVFormatContext *s, AVPacket *pkt) } AVInputFormat ff_sap_demuxer = { - "sap", - NULL_IF_CONFIG_SMALL("SAP input format"), - sizeof(struct SAPState), - sap_probe, - sap_read_header, - sap_fetch_packet, - sap_read_close, + .name = "sap", + .long_name = NULL_IF_CONFIG_SMALL("SAP input format"), + .priv_data_size = sizeof(struct SAPState), + .read_probe = sap_probe, + .read_header = sap_read_header, + .read_packet = sap_fetch_packet, + .read_close = sap_read_close, .flags = AVFMT_NOFILE, }; diff --git a/libavformat/sapenc.c b/libavformat/sapenc.c index 4ca14ad964..5fd17a56c6 100644 --- a/libavformat/sapenc.c +++ b/libavformat/sapenc.c @@ -250,16 +250,14 @@ static int sap_write_packet(AVFormatContext *s, AVPacket *pkt) } AVOutputFormat ff_sap_muxer = { - "sap", - NULL_IF_CONFIG_SMALL("SAP output format"), - NULL, - NULL, - sizeof(struct SAPState), - CODEC_ID_AAC, - CODEC_ID_MPEG4, - sap_write_header, - sap_write_packet, - sap_write_close, + .name = "sap", + .long_name = NULL_IF_CONFIG_SMALL("SAP output format"), + .priv_data_size = sizeof(struct SAPState), + .audio_codec = CODEC_ID_AAC, + .video_codec = CODEC_ID_MPEG4, + .write_header = sap_write_header, + .write_packet = sap_write_packet, + .write_trailer = sap_write_close, .flags = AVFMT_NOFILE | AVFMT_GLOBALHEADER, }; diff --git a/tests/seek_test.c b/libavformat/seek-test.c index 81a7821261..76a3e8ccff 100644 --- a/tests/seek_test.c +++ b/libavformat/seek-test.c @@ -25,6 +25,7 @@ #include <string.h> #include "libavutil/common.h" +#include "libavutil/mathematics.h" #include "libavformat/avformat.h" #undef exit diff --git a/libavformat/seek.c b/libavformat/seek.c index dd6109b68c..65211bfacf 100644 --- a/libavformat/seek.c +++ b/libavformat/seek.c @@ -21,6 +21,7 @@ */ #include "seek.h" +#include "libavutil/mathematics.h" #include "libavutil/mem.h" #include "internal.h" diff --git a/libavformat/segafilm.c b/libavformat/segafilm.c index ae194d496b..7a84daf2ef 100644 --- a/libavformat/segafilm.c +++ b/libavformat/segafilm.c @@ -111,7 +111,9 @@ static int film_read_header(AVFormatContext *s, film->audio_samplerate = AV_RB16(&scratch[24]); film->audio_channels = scratch[21]; film->audio_bits = scratch[22]; - if (film->audio_bits == 8) + if (scratch[23] == 2) + film->audio_type = CODEC_ID_ADPCM_ADX; + else if (film->audio_bits == 8) film->audio_type = CODEC_ID_PCM_S8; else if (film->audio_bits == 16) film->audio_type = CODEC_ID_PCM_S16BE; @@ -149,12 +151,19 @@ static int film_read_header(AVFormatContext *s, st->codec->codec_id = film->audio_type; st->codec->codec_tag = 1; st->codec->channels = film->audio_channels; - st->codec->bits_per_coded_sample = film->audio_bits; st->codec->sample_rate = film->audio_samplerate; + + if (film->audio_type == CODEC_ID_ADPCM_ADX) { + st->codec->bits_per_coded_sample = 18 * 8 / 32; + st->codec->block_align = st->codec->channels * 18; + } else { + st->codec->bits_per_coded_sample = film->audio_bits; + st->codec->block_align = st->codec->channels * + st->codec->bits_per_coded_sample / 8; + } + st->codec->bit_rate = st->codec->channels * st->codec->sample_rate * st->codec->bits_per_coded_sample; - st->codec->block_align = st->codec->channels * - st->codec->bits_per_coded_sample / 8; } /* load the sample table */ @@ -187,8 +196,12 @@ static int film_read_header(AVFormatContext *s, film->sample_table[i].pts *= film->base_clock; film->sample_table[i].pts /= film->audio_samplerate; - audio_frame_counter += (film->sample_table[i].sample_size / - (film->audio_channels * film->audio_bits / 8)); + if (film->audio_type == CODEC_ID_ADPCM_ADX) + audio_frame_counter += (film->sample_table[i].sample_size * 32 / + (18 * film->audio_channels)); + else + audio_frame_counter += (film->sample_table[i].sample_size / + (film->audio_channels * film->audio_bits / 8)); } else { film->sample_table[i].stream = film->video_stream_index; film->sample_table[i].pts = AV_RB32(&scratch[8]) & 0x7FFFFFFF; @@ -227,7 +240,8 @@ static int film_read_packet(AVFormatContext *s, return AVERROR(ENOMEM); avio_read(pb, pkt->data, sample->sample_size); } else if ((sample->stream == film->audio_stream_index) && - (film->audio_channels == 2)) { + (film->audio_channels == 2) && + (film->audio_type != CODEC_ID_ADPCM_ADX)) { /* stereo PCM needs to be interleaved */ if (av_new_packet(pkt, sample->sample_size)) @@ -283,11 +297,11 @@ static int film_read_close(AVFormatContext *s) } AVInputFormat ff_segafilm_demuxer = { - "film_cpk", - NULL_IF_CONFIG_SMALL("Sega FILM/CPK format"), - sizeof(FilmDemuxContext), - film_probe, - film_read_header, - film_read_packet, - film_read_close, + .name = "film_cpk", + .long_name = NULL_IF_CONFIG_SMALL("Sega FILM/CPK format"), + .priv_data_size = sizeof(FilmDemuxContext), + .read_probe = film_probe, + .read_header = film_read_header, + .read_packet = film_read_packet, + .read_close = film_read_close, }; diff --git a/libavformat/sierravmd.c b/libavformat/sierravmd.c index 64836e214b..3b9a9a163d 100644 --- a/libavformat/sierravmd.c +++ b/libavformat/sierravmd.c @@ -281,11 +281,11 @@ static int vmd_read_close(AVFormatContext *s) } AVInputFormat ff_vmd_demuxer = { - "vmd", - NULL_IF_CONFIG_SMALL("Sierra VMD format"), - sizeof(VmdDemuxContext), - vmd_probe, - vmd_read_header, - vmd_read_packet, - vmd_read_close, + .name = "vmd", + .long_name = NULL_IF_CONFIG_SMALL("Sierra VMD format"), + .priv_data_size = sizeof(VmdDemuxContext), + .read_probe = vmd_probe, + .read_header = vmd_read_header, + .read_packet = vmd_read_packet, + .read_close = vmd_read_close, }; diff --git a/libavformat/siff.c b/libavformat/siff.c index d0f682b0cf..d39655f648 100644 --- a/libavformat/siff.c +++ b/libavformat/siff.c @@ -228,11 +228,11 @@ static int siff_read_packet(AVFormatContext *s, AVPacket *pkt) } AVInputFormat ff_siff_demuxer = { - "siff", - NULL_IF_CONFIG_SMALL("Beam Software SIFF"), - sizeof(SIFFContext), - siff_probe, - siff_read_header, - siff_read_packet, + .name = "siff", + .long_name = NULL_IF_CONFIG_SMALL("Beam Software SIFF"), + .priv_data_size = sizeof(SIFFContext), + .read_probe = siff_probe, + .read_header = siff_read_header, + .read_packet = siff_read_packet, .extensions = "vb,son" }; diff --git a/libavformat/smacker.c b/libavformat/smacker.c index 29a66e79fa..02e1e7b985 100644 --- a/libavformat/smacker.c +++ b/libavformat/smacker.c @@ -340,11 +340,11 @@ static int smacker_read_close(AVFormatContext *s) } AVInputFormat ff_smacker_demuxer = { - "smk", - NULL_IF_CONFIG_SMALL("Smacker video"), - sizeof(SmackerContext), - smacker_probe, - smacker_read_header, - smacker_read_packet, - smacker_read_close, + .name = "smk", + .long_name = NULL_IF_CONFIG_SMALL("Smacker video"), + .priv_data_size = sizeof(SmackerContext), + .read_probe = smacker_probe, + .read_header = smacker_read_header, + .read_packet = smacker_read_packet, + .read_close = smacker_read_close, }; diff --git a/libavformat/sol.c b/libavformat/sol.c index c0d2c5d5a2..895623392c 100644 --- a/libavformat/sol.c +++ b/libavformat/sol.c @@ -141,12 +141,10 @@ static int sol_read_packet(AVFormatContext *s, } AVInputFormat ff_sol_demuxer = { - "sol", - NULL_IF_CONFIG_SMALL("Sierra SOL format"), - 0, - sol_probe, - sol_read_header, - sol_read_packet, - NULL, - pcm_read_seek, + .name = "sol", + .long_name = NULL_IF_CONFIG_SMALL("Sierra SOL format"), + .read_probe = sol_probe, + .read_header = sol_read_header, + .read_packet = sol_read_packet, + .read_seek = pcm_read_seek, }; diff --git a/libavformat/soxdec.c b/libavformat/soxdec.c index 16d26ab4a7..af8cfef1ba 100644 --- a/libavformat/soxdec.c +++ b/libavformat/soxdec.c @@ -23,13 +23,14 @@ */ /** - * SoX native format demuxer * @file + * SoX native format demuxer * @author Daniel Verkamp - * @sa http://wiki.multimedia.cx/index.php?title=SoX_native_intermediate_format + * @see http://wiki.multimedia.cx/index.php?title=SoX_native_intermediate_format */ #include "libavutil/intreadwrite.h" +#include "libavutil/intfloat_readwrite.h" #include "libavutil/dict.h" #include "avformat.h" #include "pcm.h" @@ -142,12 +143,10 @@ static int sox_read_packet(AVFormatContext *s, } AVInputFormat ff_sox_demuxer = { - "sox", - NULL_IF_CONFIG_SMALL("SoX native format"), - 0, - sox_probe, - sox_read_header, - sox_read_packet, - NULL, - pcm_read_seek, + .name = "sox", + .long_name = NULL_IF_CONFIG_SMALL("SoX native format"), + .read_probe = sox_probe, + .read_header = sox_read_header, + .read_packet = sox_read_packet, + .read_seek = pcm_read_seek, }; diff --git a/libavformat/soxenc.c b/libavformat/soxenc.c index a07a2068c3..a8549b0ffa 100644 --- a/libavformat/soxenc.c +++ b/libavformat/soxenc.c @@ -23,13 +23,14 @@ */ /** - * SoX native format muxer * @file + * SoX native format muxer * @author Daniel Verkamp - * @sa http://wiki.multimedia.cx/index.php?title=SoX_native_intermediate_format + * @see http://wiki.multimedia.cx/index.php?title=SoX_native_intermediate_format */ #include "libavutil/intreadwrite.h" +#include "libavutil/intfloat_readwrite.h" #include "libavutil/dict.h" #include "avformat.h" #include "avio_internal.h" @@ -115,14 +116,13 @@ static int sox_write_trailer(AVFormatContext *s) } AVOutputFormat ff_sox_muxer = { - "sox", - NULL_IF_CONFIG_SMALL("SoX native format"), - NULL, - "sox", - sizeof(SoXContext), - CODEC_ID_PCM_S32LE, - CODEC_ID_NONE, - sox_write_header, - sox_write_packet, - sox_write_trailer, + .name = "sox", + .long_name = NULL_IF_CONFIG_SMALL("SoX native format"), + .extensions = "sox", + .priv_data_size = sizeof(SoXContext), + .audio_codec = CODEC_ID_PCM_S32LE, + .video_codec = CODEC_ID_NONE, + .write_header = sox_write_header, + .write_packet = sox_write_packet, + .write_trailer = sox_write_trailer, }; diff --git a/libavformat/spdifdec.c b/libavformat/spdifdec.c index dd29a5fecc..64960ffbdd 100644 --- a/libavformat/spdifdec.c +++ b/libavformat/spdifdec.c @@ -226,11 +226,10 @@ static int spdif_read_packet(AVFormatContext *s, AVPacket *pkt) } AVInputFormat ff_spdif_demuxer = { - "spdif", - NULL_IF_CONFIG_SMALL("IEC 61937 (compressed data in S/PDIF)"), - 0, - spdif_probe, - spdif_read_header, - spdif_read_packet, + .name = "spdif", + .long_name = NULL_IF_CONFIG_SMALL("IEC 61937 (compressed data in S/PDIF)"), + .read_probe = spdif_probe, + .read_header = spdif_read_header, + .read_packet = spdif_read_packet, .flags = AVFMT_GENERIC_INDEX, }; diff --git a/libavformat/spdifenc.c b/libavformat/spdifenc.c index 3c170bd7cc..84372f82da 100644 --- a/libavformat/spdifenc.c +++ b/libavformat/spdifenc.c @@ -518,13 +518,13 @@ static int spdif_write_packet(struct AVFormatContext *s, AVPacket *pkt) } if (ctx->extra_bswap ^ (ctx->spdif_flags & SPDIF_FLAG_BIGENDIAN)) { - avio_write(s->pb, ctx->out_buf, ctx->out_bytes & ~1); + avio_write(s->pb, ctx->out_buf, ctx->out_bytes & ~1); } else { - av_fast_malloc(&ctx->buffer, &ctx->buffer_size, ctx->out_bytes + FF_INPUT_BUFFER_PADDING_SIZE); - if (!ctx->buffer) - return AVERROR(ENOMEM); - ff_spdif_bswap_buf16((uint16_t *)ctx->buffer, (uint16_t *)ctx->out_buf, ctx->out_bytes >> 1); - avio_write(s->pb, ctx->buffer, ctx->out_bytes & ~1); + av_fast_malloc(&ctx->buffer, &ctx->buffer_size, ctx->out_bytes + FF_INPUT_BUFFER_PADDING_SIZE); + if (!ctx->buffer) + return AVERROR(ENOMEM); + ff_spdif_bswap_buf16((uint16_t *)ctx->buffer, (uint16_t *)ctx->out_buf, ctx->out_bytes >> 1); + avio_write(s->pb, ctx->buffer, ctx->out_bytes & ~1); } /* a final lone byte has to be MSB aligned */ @@ -541,16 +541,15 @@ static int spdif_write_packet(struct AVFormatContext *s, AVPacket *pkt) } AVOutputFormat ff_spdif_muxer = { - "spdif", - NULL_IF_CONFIG_SMALL("IEC 61937 (used on S/PDIF - IEC958)"), - NULL, - "spdif", - sizeof(IEC61937Context), - CODEC_ID_AC3, - CODEC_ID_NONE, - spdif_write_header, - spdif_write_packet, - spdif_write_trailer, + .name = "spdif", + .long_name = NULL_IF_CONFIG_SMALL("IEC 61937 (used on S/PDIF - IEC958)"), + .extensions = "spdif", + .priv_data_size = sizeof(IEC61937Context), + .audio_codec = CODEC_ID_AC3, + .video_codec = CODEC_ID_NONE, + .write_header = spdif_write_header, + .write_packet = spdif_write_packet, + .write_trailer = spdif_write_trailer, .flags = AVFMT_NOTIMESTAMPS, .priv_class = &class, }; diff --git a/libavformat/swfdec.c b/libavformat/swfdec.c index f90564f3db..d399cc3a5d 100644 --- a/libavformat/swfdec.c +++ b/libavformat/swfdec.c @@ -204,14 +204,13 @@ static int swf_read_packet(AVFormatContext *s, AVPacket *pkt) skip: avio_skip(pb, len); } - return 0; } AVInputFormat ff_swf_demuxer = { - "swf", - NULL_IF_CONFIG_SMALL("Flash format"), - sizeof(SWFContext), - swf_probe, - swf_read_header, - swf_read_packet, + .name = "swf", + .long_name = NULL_IF_CONFIG_SMALL("Flash format"), + .priv_data_size = sizeof(SWFContext), + .read_probe = swf_probe, + .read_header = swf_read_header, + .read_packet = swf_read_packet, }; diff --git a/libavformat/swfenc.c b/libavformat/swfenc.c index 09c2d61f28..af812d09eb 100644 --- a/libavformat/swfenc.c +++ b/libavformat/swfenc.c @@ -507,29 +507,28 @@ static int swf_write_trailer(AVFormatContext *s) #if CONFIG_SWF_MUXER AVOutputFormat ff_swf_muxer = { - "swf", - NULL_IF_CONFIG_SMALL("Flash format"), - "application/x-shockwave-flash", - "swf", - sizeof(SWFContext), - CODEC_ID_MP3, - CODEC_ID_FLV1, - swf_write_header, - swf_write_packet, - swf_write_trailer, + .name = "swf", + .long_name = NULL_IF_CONFIG_SMALL("Flash format"), + .mime_type = "application/x-shockwave-flash", + .extensions = "swf", + .priv_data_size = sizeof(SWFContext), + .audio_codec = CODEC_ID_MP3, + .video_codec = CODEC_ID_FLV1, + .write_header = swf_write_header, + .write_packet = swf_write_packet, + .write_trailer = swf_write_trailer, }; #endif #if CONFIG_AVM2_MUXER AVOutputFormat ff_avm2_muxer = { - "avm2", - NULL_IF_CONFIG_SMALL("Flash 9 (AVM2) format"), - "application/x-shockwave-flash", - NULL, - sizeof(SWFContext), - CODEC_ID_MP3, - CODEC_ID_FLV1, - swf_write_header, - swf_write_packet, - swf_write_trailer, + .name = "avm2", + .long_name = NULL_IF_CONFIG_SMALL("Flash 9 (AVM2) format"), + .mime_type = "application/x-shockwave-flash", + .priv_data_size = sizeof(SWFContext), + .audio_codec = CODEC_ID_MP3, + .video_codec = CODEC_ID_FLV1, + .write_header = swf_write_header, + .write_packet = swf_write_packet, + .write_trailer = swf_write_trailer, }; #endif diff --git a/libavformat/thp.c b/libavformat/thp.c index 2d1f74e38b..17054df78c 100644 --- a/libavformat/thp.c +++ b/libavformat/thp.c @@ -20,6 +20,7 @@ */ #include "libavutil/intreadwrite.h" +#include "libavutil/intfloat_readwrite.h" #include "avformat.h" typedef struct ThpDemuxContext { @@ -188,10 +189,10 @@ static int thp_read_packet(AVFormatContext *s, } AVInputFormat ff_thp_demuxer = { - "thp", - NULL_IF_CONFIG_SMALL("THP"), - sizeof(ThpDemuxContext), - thp_probe, - thp_read_header, - thp_read_packet + .name = "thp", + .long_name = NULL_IF_CONFIG_SMALL("THP"), + .priv_data_size = sizeof(ThpDemuxContext), + .read_probe = thp_probe, + .read_header = thp_read_header, + .read_packet = thp_read_packet }; diff --git a/libavformat/tiertexseq.c b/libavformat/tiertexseq.c index f8a5db1813..d8bdf2a97c 100644 --- a/libavformat/tiertexseq.c +++ b/libavformat/tiertexseq.c @@ -303,11 +303,11 @@ static int seq_read_close(AVFormatContext *s) } AVInputFormat ff_tiertexseq_demuxer = { - "tiertexseq", - NULL_IF_CONFIG_SMALL("Tiertex Limited SEQ format"), - sizeof(SeqDemuxContext), - seq_probe, - seq_read_header, - seq_read_packet, - seq_read_close, + .name = "tiertexseq", + .long_name = NULL_IF_CONFIG_SMALL("Tiertex Limited SEQ format"), + .priv_data_size = sizeof(SeqDemuxContext), + .read_probe = seq_probe, + .read_header = seq_read_header, + .read_packet = seq_read_packet, + .read_close = seq_read_close, }; diff --git a/libavformat/tmv.c b/libavformat/tmv.c index 0a16ae111a..9df11a4d11 100644 --- a/libavformat/tmv.c +++ b/libavformat/tmv.c @@ -20,10 +20,10 @@ */ /** - * 8088flex TMV file demuxer * @file + * 8088flex TMV file demuxer * @author Daniel Verkamp - * @sa http://www.oldskool.org/pc/8088_Corruption + * @see http://www.oldskool.org/pc/8088_Corruption */ #include "libavutil/intreadwrite.h" @@ -179,13 +179,12 @@ static int tmv_read_seek(AVFormatContext *s, int stream_index, } AVInputFormat ff_tmv_demuxer = { - "tmv", - NULL_IF_CONFIG_SMALL("8088flex TMV"), - sizeof(TMVContext), - tmv_probe, - tmv_read_header, - tmv_read_packet, - NULL, - tmv_read_seek, + .name = "tmv", + .long_name = NULL_IF_CONFIG_SMALL("8088flex TMV"), + .priv_data_size = sizeof(TMVContext), + .read_probe = tmv_probe, + .read_header = tmv_read_header, + .read_packet = tmv_read_packet, + .read_seek = tmv_read_seek, .flags = AVFMT_GENERIC_INDEX, }; diff --git a/libavformat/tta.c b/libavformat/tta.c index c37039d0da..5b07b09b65 100644 --- a/libavformat/tta.c +++ b/libavformat/tta.c @@ -145,13 +145,12 @@ static int tta_read_seek(AVFormatContext *s, int stream_index, int64_t timestamp } AVInputFormat ff_tta_demuxer = { - "tta", - NULL_IF_CONFIG_SMALL("True Audio"), - sizeof(TTAContext), - tta_probe, - tta_read_header, - tta_read_packet, - NULL, - tta_read_seek, + .name = "tta", + .long_name = NULL_IF_CONFIG_SMALL("True Audio"), + .priv_data_size = sizeof(TTAContext), + .read_probe = tta_probe, + .read_header = tta_read_header, + .read_packet = tta_read_packet, + .read_seek = tta_read_seek, .extensions = "tta", }; diff --git a/libavformat/udp.c b/libavformat/udp.c index 78077e6595..8f7246de8c 100644 --- a/libavformat/udp.c +++ b/libavformat/udp.c @@ -407,7 +407,7 @@ static int udp_open(URLContext *h, const char *uri, int flags) p = strchr(uri, '?'); if (p) { if (av_find_info_tag(buf, sizeof(buf), "reuse", p)) { - char *endptr=NULL; + char *endptr = NULL; s->reuse_socket = strtol(buf, &endptr, 10); /* assume if no digits were found it is a request to enable it */ if (buf == endptr) @@ -429,7 +429,7 @@ static int udp_open(URLContext *h, const char *uri, int flags) if (av_find_info_tag(buf, sizeof(buf), "connect", p)) { s->is_connected = strtol(buf, NULL, 10); } - if (av_find_info_tag(buf, sizeof(buf), "buf_size", p)) { + if (av_find_info_tag(buf, sizeof(buf), "fifo_size", p)) { s->circular_buffer_size = strtol(buf, NULL, 10)*188; } } @@ -447,7 +447,7 @@ static int udp_open(URLContext *h, const char *uri, int flags) goto fail; } - if (s->is_multicast && !(h->flags & AVIO_WRONLY)) + if ((s->is_multicast || !s->local_port) && !(h->flags & AVIO_WRONLY)) s->local_port = port; udp_fd = udp_socket_create(s, &my_addr, &len); if (udp_fd < 0) diff --git a/libavformat/url.h b/libavformat/url.h index 5ef6a21d7b..103f7b6c00 100644 --- a/libavformat/url.h +++ b/libavformat/url.h @@ -74,12 +74,12 @@ typedef struct URLProtocol { * @return 0 in case of success, a negative value corresponding to an * AVERROR code in case of failure */ -int ffurl_alloc(URLContext **h, const char *url, int flags); +int ffurl_alloc(URLContext **puc, const char *filename, int flags); /** * Connect an URLContext that has been allocated by ffurl_alloc */ -int ffurl_connect(URLContext *h); +int ffurl_connect(URLContext *uc); /** * Create an URLContext for accessing to the resource indicated by @@ -92,7 +92,7 @@ int ffurl_connect(URLContext *h); * @return 0 in case of success, a negative value corresponding to an * AVERROR code in case of failure */ -int ffurl_open(URLContext **h, const char *url, int flags); +int ffurl_open(URLContext **puc, const char *filename, int flags); /** * Read up to size bytes from the resource accessed by h, and store diff --git a/libavformat/utils.c b/libavformat/utils.c index b610276844..8dd76f3e25 100644 --- a/libavformat/utils.c +++ b/libavformat/utils.c @@ -32,6 +32,7 @@ #include "metadata.h" #include "id3v2.h" #include "libavutil/avstring.h" +#include "libavutil/mathematics.h" #include "riff.h" #include "audiointerleave.h" #include "url.h" @@ -79,7 +80,7 @@ const char *avformat_license(void) * @param num must be >= 0 * @param den must be >= 1 */ -static void av_frac_init(AVFrac *f, int64_t val, int64_t num, int64_t den) +static void frac_init(AVFrac *f, int64_t val, int64_t num, int64_t den) { num += (den >> 1); if (num >= den) { @@ -97,7 +98,7 @@ static void av_frac_init(AVFrac *f, int64_t val, int64_t num, int64_t den) * @param f fractional number * @param incr increment, can be positive or negative */ -static void av_frac_add(AVFrac *f, int64_t incr) +static void frac_add(AVFrac *f, int64_t incr) { int64_t num, den; @@ -524,20 +525,27 @@ int av_open_input_stream(AVFormatContext **ic_ptr, opts = convert_format_parameters(ap); if(!ap->prealloced_context) - ic = avformat_alloc_context(); + *ic_ptr = ic = avformat_alloc_context(); else ic = *ic_ptr; if (!ic) { err = AVERROR(ENOMEM); goto fail; } - ic->pb = pb; + if (pb && fmt && fmt->flags & AVFMT_NOFILE) + av_log(ic, AV_LOG_WARNING, "Custom AVIOContext makes no sense and " + "will be ignored with AVFMT_NOFILE format.\n"); + else + ic->pb = pb; - err = avformat_open_input(ic_ptr, filename, fmt, &opts); + if ((err = avformat_open_input(&ic, filename, fmt, &opts)) < 0) + goto fail; + ic->pb = ic->pb ? ic->pb : pb; // don't leak custom pb if it wasn't set above #if FF_API_OLD_METADATA ff_metadata_demux_compat(ic); #endif + *ic_ptr = ic; fail: av_dict_free(&opts); return err; @@ -584,9 +592,9 @@ int av_probe_input_buffer(AVIOContext *pb, AVInputFormat **fmt, return AVERROR(EINVAL); } - for(probe_size= PROBE_BUF_MIN; probe_size<=max_probe_size && !*fmt && ret >= 0; + for(probe_size= PROBE_BUF_MIN; probe_size<=max_probe_size && !*fmt; probe_size = FFMIN(probe_size<<1, FFMAX(max_probe_size, probe_size+1))) { - int ret, score = probe_size < max_probe_size ? AVPROBE_SCORE_MAX/4 : 0; + int score = probe_size < max_probe_size ? AVPROBE_SCORE_MAX/4 : 0; int buf_offset = (probe_size == PROBE_BUF_MIN) ? 0 : probe_size>>1; if (probe_size < offset) { @@ -681,7 +689,7 @@ int avformat_open_input(AVFormatContext **ps, const char *filename, AVInputForma { AVFormatContext *s = *ps; int ret = 0; - AVFormatParameters ap = { 0 }; + AVFormatParameters ap = { { 0 } }; AVDictionary *tmp = NULL; if (!s && !(s = avformat_alloc_context())) @@ -800,6 +808,14 @@ int av_read_packet(AVFormatContext *s, AVPacket *pkt) continue; } + if ((s->flags & AVFMT_FLAG_DISCARD_CORRUPT) && + (pkt->flags & AV_PKT_FLAG_CORRUPT)) { + av_log(s, AV_LOG_WARNING, + "Dropped corrupted packet (stream = %d)\n", + pkt->stream_index); + continue; + } + st= s->streams[pkt->stream_index]; switch(st->codec->codec_type){ @@ -1176,7 +1192,7 @@ static void compute_pkt_fields(AVFormatContext *s, AVStream *st, } -static int av_read_frame_internal(AVFormatContext *s, AVPacket *pkt) +static int read_frame_internal(AVFormatContext *s, AVPacket *pkt) { AVStream *st; int len, ret, i; @@ -1307,7 +1323,7 @@ static int av_read_frame_internal(AVFormatContext *s, AVPacket *pkt) } } if(s->debug & FF_FDEBUG_TS) - av_log(s, AV_LOG_DEBUG, "av_read_frame_internal stream=%d, pts=%"PRId64", dts=%"PRId64", size=%d, duration=%d, flags=%d\n", + av_log(s, AV_LOG_DEBUG, "read_frame_internal stream=%d, pts=%"PRId64", dts=%"PRId64", size=%d, duration=%d, flags=%d\n", pkt->stream_index, pkt->pts, pkt->dts, @@ -1353,7 +1369,7 @@ int av_read_frame(AVFormatContext *s, AVPacket *pkt) } } if(genpts){ - int ret= av_read_frame_internal(s, pkt); + int ret= read_frame_internal(s, pkt); if(ret<0){ if(pktl && ret != AVERROR(EAGAIN)){ eof=1; @@ -1367,7 +1383,7 @@ int av_read_frame(AVFormatContext *s, AVPacket *pkt) return AVERROR(ENOMEM); }else{ assert(!s->packet_buffer); - return av_read_frame_internal(s, pkt); + return read_frame_internal(s, pkt); } } } @@ -1739,7 +1755,7 @@ int64_t av_gen_search(AVFormatContext *s, int stream_index, int64_t target_ts, i return pos; } -static int av_seek_frame_byte(AVFormatContext *s, int stream_index, int64_t pos, int flags){ +static int seek_frame_byte(AVFormatContext *s, int stream_index, int64_t pos, int flags){ int64_t pos_min, pos_max; #if 0 AVStream *st; @@ -1764,7 +1780,7 @@ static int av_seek_frame_byte(AVFormatContext *s, int stream_index, int64_t pos, return 0; } -static int av_seek_frame_generic(AVFormatContext *s, +static int seek_frame_generic(AVFormatContext *s, int stream_index, int64_t timestamp, int flags) { int index; @@ -1832,7 +1848,7 @@ int av_seek_frame(AVFormatContext *s, int stream_index, int64_t timestamp, int f ff_read_frame_flush(s); if(flags & AVSEEK_FLAG_BYTE) - return av_seek_frame_byte(s, stream_index, timestamp, flags); + return seek_frame_byte(s, stream_index, timestamp, flags); if(stream_index < 0){ stream_index= av_find_default_stream_index(s); @@ -1856,7 +1872,7 @@ int av_seek_frame(AVFormatContext *s, int stream_index, int64_t timestamp, int f if(s->iformat->read_timestamp && !(s->iformat->flags & AVFMT_NOBINSEARCH)) return av_seek_frame_binary(s, stream_index, timestamp, flags); else if (!(s->iformat->flags & AVFMT_NOGENSEARCH)) - return av_seek_frame_generic(s, stream_index, timestamp, flags); + return seek_frame_generic(s, stream_index, timestamp, flags); else return -1; } @@ -1880,7 +1896,7 @@ int avformat_seek_file(AVFormatContext *s, int stream_index, int64_t min_ts, int if(s->iformat->read_seek || 1) return av_seek_frame(s, stream_index, ts, flags | (ts - min_ts > (uint64_t)(max_ts - ts) ? AVSEEK_FLAG_BACKWARD : 0)); - // try some generic seek like av_seek_frame_generic() but with new ts semantics + // try some generic seek like seek_frame_generic() but with new ts semantics } /*******************************************************/ @@ -1890,7 +1906,7 @@ int avformat_seek_file(AVFormatContext *s, int stream_index, int64_t min_ts, int * * @return TRUE if the stream has accurate duration for at least one component. */ -static int av_has_duration(AVFormatContext *ic) +static int has_duration(AVFormatContext *ic) { int i; AVStream *st; @@ -1908,7 +1924,7 @@ static int av_has_duration(AVFormatContext *ic) * * Also computes the global bitrate if possible. */ -static void av_update_stream_timings(AVFormatContext *ic) +static void update_stream_timings(AVFormatContext *ic) { int64_t start_time, start_time1, start_time_text, end_time, end_time1; int64_t duration, duration1; @@ -1966,7 +1982,7 @@ static void fill_all_stream_timings(AVFormatContext *ic) int i; AVStream *st; - av_update_stream_timings(ic); + update_stream_timings(ic); for(i = 0;i < ic->nb_streams; i++) { st = ic->streams[i]; if (st->start_time == AV_NOPTS_VALUE) { @@ -1978,7 +1994,7 @@ static void fill_all_stream_timings(AVFormatContext *ic) } } -static void av_estimate_timings_from_bit_rate(AVFormatContext *ic) +static void estimate_timings_from_bit_rate(AVFormatContext *ic) { int64_t filesize, duration; int bit_rate, i; @@ -2015,7 +2031,7 @@ static void av_estimate_timings_from_bit_rate(AVFormatContext *ic) #define DURATION_MAX_RETRY 3 /* only usable for MPEG-PS streams */ -static void av_estimate_timings_from_pts(AVFormatContext *ic, int64_t old_offset) +static void estimate_timings_from_pts(AVFormatContext *ic, int64_t old_offset) { AVPacket pkt1, *pkt = &pkt1; AVStream *st; @@ -2032,7 +2048,7 @@ static void av_estimate_timings_from_pts(AVFormatContext *ic, int64_t old_offset for (i=0; i<ic->nb_streams; i++) { st = ic->streams[i]; if (st->start_time == AV_NOPTS_VALUE && st->first_dts == AV_NOPTS_VALUE) - av_log(st->codec, AV_LOG_WARNING, "start time is not set in av_estimate_timings_from_pts\n"); + av_log(st->codec, AV_LOG_WARNING, "start time is not set in estimate_timings_from_pts\n"); if (st->parser) { av_parser_close(st->parser); @@ -2094,7 +2110,7 @@ static void av_estimate_timings_from_pts(AVFormatContext *ic, int64_t old_offset } } -static void av_estimate_timings(AVFormatContext *ic, int64_t old_offset) +static void estimate_timings(AVFormatContext *ic, int64_t old_offset) { int64_t file_size; @@ -2112,17 +2128,17 @@ static void av_estimate_timings(AVFormatContext *ic, int64_t old_offset) !strcmp(ic->iformat->name, "mpegts")) && file_size && ic->pb->seekable) { /* get accurate estimate from the PTSes */ - av_estimate_timings_from_pts(ic, old_offset); - } else if (av_has_duration(ic)) { + estimate_timings_from_pts(ic, old_offset); + } else if (has_duration(ic)) { /* at least one component has timings - we use them for all the components */ fill_all_stream_timings(ic); } else { av_log(ic, AV_LOG_WARNING, "Estimating duration from bitrate, this may be inaccurate\n"); /* less precise: use bitrate info */ - av_estimate_timings_from_bit_rate(ic); + estimate_timings_from_bit_rate(ic); } - av_update_stream_timings(ic); + update_stream_timings(ic); #if 0 { @@ -2142,30 +2158,30 @@ static void av_estimate_timings(AVFormatContext *ic, int64_t old_offset) #endif } -static int has_codec_parameters(AVCodecContext *enc) +static int has_codec_parameters(AVCodecContext *avctx) { int val; - switch(enc->codec_type) { + switch (avctx->codec_type) { case AVMEDIA_TYPE_AUDIO: - val = enc->sample_rate && enc->channels && enc->sample_fmt != AV_SAMPLE_FMT_NONE; - if(!enc->frame_size && - (enc->codec_id == CODEC_ID_VORBIS || - enc->codec_id == CODEC_ID_AAC || - enc->codec_id == CODEC_ID_MP1 || - enc->codec_id == CODEC_ID_MP2 || - enc->codec_id == CODEC_ID_MP3 || - enc->codec_id == CODEC_ID_SPEEX || - enc->codec_id == CODEC_ID_CELT)) + val = avctx->sample_rate && avctx->channels && avctx->sample_fmt != AV_SAMPLE_FMT_NONE; + if(!avctx->frame_size && + (avctx->codec_id == CODEC_ID_VORBIS || + avctx->codec_id == CODEC_ID_AAC || + avctx->codec_id == CODEC_ID_MP1 || + avctx->codec_id == CODEC_ID_MP2 || + avctx->codec_id == CODEC_ID_MP3 || + avctx->codec_id == CODEC_ID_SPEEX || + avctx->codec_id == CODEC_ID_CELT)) return 0; break; case AVMEDIA_TYPE_VIDEO: - val = enc->width && enc->pix_fmt != PIX_FMT_NONE; + val = avctx->width && avctx->pix_fmt != PIX_FMT_NONE; break; default: val = 1; break; } - return enc->codec_id != CODEC_ID_NONE && val != 0; + return avctx->codec_id != CODEC_ID_NONE && val != 0; } static int has_decode_delay_been_guessed(AVStream *st) @@ -2174,7 +2190,7 @@ static int has_decode_delay_been_guessed(AVStream *st) st->codec_info_nb_frames >= 6 + st->codec->has_b_frames; } -static int try_decode_frame(AVStream *st, AVPacket *avpkt) +static int try_decode_frame(AVStream *st, AVPacket *avpkt, AVDictionary **options) { int16_t *samples; AVCodec *codec; @@ -2185,12 +2201,13 @@ static int try_decode_frame(AVStream *st, AVPacket *avpkt) codec = avcodec_find_decoder(st->codec->codec_id); if (!codec) return -1; - ret = avcodec_open(st->codec, codec); + ret = avcodec_open2(st->codec, codec, options); if (ret < 0) return ret; } - if(!has_codec_parameters(st->codec) || !has_decode_delay_been_guessed(st)){ + if(!has_codec_parameters(st->codec) || !has_decode_delay_been_guessed(st) || + (!st->codec_info_nb_frames && st->codec->codec->capabilities & CODEC_CAP_CHANNEL_CONF)) { switch(st->codec->codec_type) { case AVMEDIA_TYPE_VIDEO: avcodec_get_frame_defaults(&picture); @@ -2304,21 +2321,34 @@ static int tb_unreliable(AVCodecContext *c){ return 0; } +#if FF_API_FORMAT_PARAMETERS int av_find_stream_info(AVFormatContext *ic) { + return avformat_find_stream_info(ic, NULL); +} +#endif + +int avformat_find_stream_info(AVFormatContext *ic, AVDictionary **options) +{ int i, count, ret, read_size, j; AVStream *st; AVPacket pkt1, *pkt; int64_t old_offset = avio_tell(ic->pb); + int orig_nb_streams = ic->nb_streams; // new streams might appear, no options for those for(i=0;i<ic->nb_streams;i++) { AVCodec *codec; st = ic->streams[i]; - if (st->codec->codec_id == CODEC_ID_AAC) { + if (st->codec->codec_id == CODEC_ID_AAC && st->codec->extradata_size) { + // We need to discard these since they can be plain wrong for + // backwards compatible HE-AAC signaling. + // But when we have no extradata we need to keep them or we can't + // play anything at all. st->codec->sample_rate = 0; st->codec->frame_size = 0; st->codec->channels = 0; } + if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO || st->codec->codec_type == AVMEDIA_TYPE_SUBTITLE) { /* if(!st->time_base.num) @@ -2336,22 +2366,15 @@ int av_find_stream_info(AVFormatContext *ic) assert(!st->codec->codec); codec = avcodec_find_decoder(st->codec->codec_id); - /* Force decoding of at least one frame of codec data - * this makes sure the codec initializes the channel configuration - * and does not trust the values from the container. - */ - if (codec && codec->capabilities & CODEC_CAP_CHANNEL_CONF) - st->codec->channels = 0; - /* Ensure that subtitle_header is properly set. */ if (st->codec->codec_type == AVMEDIA_TYPE_SUBTITLE && codec && !st->codec->codec) - avcodec_open(st->codec, codec); + avcodec_open2(st->codec, codec, options ? &options[i] : NULL); //try to just open decoders, in case this is enough to get parameters if(!has_codec_parameters(st->codec)){ if (codec && !st->codec->codec) - avcodec_open(st->codec, codec); + avcodec_open2(st->codec, codec, options ? &options[i] : NULL); } } @@ -2412,8 +2435,11 @@ int av_find_stream_info(AVFormatContext *ic) /* NOTE: a new stream can be added there if no header in file (AVFMTCTX_NOHEADER) */ - ret = av_read_frame_internal(ic, &pkt1); - if (ret < 0 && ret != AVERROR(EAGAIN)) { + ret = read_frame_internal(ic, &pkt1); + if (ret == AVERROR(EAGAIN)) + continue; + + if (ret < 0) { /* EOF or error */ ret = -1; /* we could not have all the codec parameters before EOF */ for(i=0;i<ic->nb_streams;i++) { @@ -2429,9 +2455,6 @@ int av_find_stream_info(AVFormatContext *ic) break; } - if (ret == AVERROR(EAGAIN)) - continue; - pkt= add_to_pktbuf(&ic->packet_buffer, &pkt1, &ic->packet_buffer_end); if ((ret = av_dup_packet(pkt)) < 0) goto find_stream_info_err; @@ -2485,9 +2508,13 @@ int av_find_stream_info(AVFormatContext *ic) /* if still no information, we try to open the codec and to decompress the frame. We try to avoid that in most cases as it takes longer and uses more memory. For MPEG-4, we need to - decompress for QuickTime. */ - if (!has_codec_parameters(st->codec) || !has_decode_delay_been_guessed(st)) - try_decode_frame(st, pkt); + decompress for QuickTime. + + If CODEC_CAP_CHANNEL_CONF is set this will force decoding of at + least one frame of codec data, this makes sure the codec initializes + the channel configuration and does not only trust the values from the container. + */ + try_decode_frame(st, pkt, (options && i < orig_nb_streams )? &options[i] : NULL); st->codec_info_nb_frames++; count++; @@ -2568,7 +2595,7 @@ int av_find_stream_info(AVFormatContext *ic) } } - av_estimate_timings(ic, old_offset); + estimate_timings(ic, old_offset); compute_chapters_end(ic); @@ -2785,7 +2812,7 @@ AVStream *av_new_stream(AVFormatContext *s, int id) return NULL; } - st->codec= avcodec_alloc_context(); + st->codec = avcodec_alloc_context3(NULL); if (s->iformat) { /* no default bitrate if decoding */ st->codec->bit_rate = 0; @@ -2999,6 +3026,9 @@ int avformat_write_header(AVFormatContext *s, AVDictionary **options) av_dict_copy(&tmp, *options, 0); if ((ret = av_opt_set_dict(s, &tmp)) < 0) goto fail; + if (s->priv_data && s->oformat->priv_class && *(const AVClass**)s->priv_data==s->oformat->priv_class && + (ret = av_opt_set_dict(s->priv_data, &tmp)) < 0) + goto fail; // some sanity checks if (s->nb_streams == 0 && !(s->oformat->flags & AVFMT_NOSTREAMS)) { @@ -3113,7 +3143,7 @@ int avformat_write_header(AVFormatContext *s, AVDictionary **options) ret = AVERROR_INVALIDDATA; goto fail; } - av_frac_init(&st->pts, 0, 0, den); + frac_init(&st->pts, 0, 0, den); } } @@ -3191,11 +3221,11 @@ static int compute_pkt_fields2(AVFormatContext *s, AVStream *st, AVPacket *pkt){ likely equal to the encoder delay, but it would be better if we had the real timestamps from the encoder */ if (frame_size >= 0 && (pkt->size || st->pts.num!=st->pts.den>>1 || st->pts.val)) { - av_frac_add(&st->pts, (int64_t)st->time_base.den * frame_size); + frac_add(&st->pts, (int64_t)st->time_base.den * frame_size); } break; case AVMEDIA_TYPE_VIDEO: - av_frac_add(&st->pts, (int64_t)st->time_base.den * st->codec->time_base.num); + frac_add(&st->pts, (int64_t)st->time_base.den * st->codec->time_base.num); break; default: break; @@ -3211,8 +3241,9 @@ int av_write_frame(AVFormatContext *s, AVPacket *pkt) return ret; ret= s->oformat->write_packet(s, pkt); - if(!ret) - ret= url_ferror(s->pb); + + if (ret >= 0) + s->streams[pkt->stream_index]->nb_frames++; return ret; } @@ -3303,7 +3334,7 @@ int av_interleave_packet_per_dts(AVFormatContext *s, AVPacket *out, AVPacket *pk * @return 1 if a packet was output, 0 if no packet could be output, * < 0 if an error occurred */ -static int av_interleave_packet(AVFormatContext *s, AVPacket *out, AVPacket *in, int flush){ +static int interleave_packet(AVFormatContext *s, AVPacket *out, AVPacket *in, int flush){ if(s->oformat->interleave_packet) return s->oformat->interleave_packet(s, out, in, flush); else @@ -3328,11 +3359,13 @@ int av_interleaved_write_frame(AVFormatContext *s, AVPacket *pkt){ for(;;){ AVPacket opkt; - int ret= av_interleave_packet(s, &opkt, pkt, 0); + int ret= interleave_packet(s, &opkt, pkt, 0); if(ret<=0) //FIXME cleanup needed for ret<0 ? return ret; ret= s->oformat->write_packet(s, &opkt); + if (ret >= 0) + s->streams[opkt.stream_index]->nb_frames++; av_free_packet(&opkt); pkt= NULL; @@ -3350,13 +3383,15 @@ int av_write_trailer(AVFormatContext *s) for(;;){ AVPacket pkt; - ret= av_interleave_packet(s, &pkt, NULL, 1); + ret= interleave_packet(s, &pkt, NULL, 1); if(ret<0) //FIXME cleanup needed for ret<0 ? goto fail; if(!ret) break; ret= s->oformat->write_packet(s, &pkt); + if (ret >= 0) + s->streams[pkt.stream_index]->nb_frames++; av_free_packet(&pkt); @@ -3381,6 +3416,15 @@ fail: return ret; } +int av_get_output_timestamp(struct AVFormatContext *s, int stream, + int64_t *dts, int64_t *wall) +{ + if (!s->oformat || !s->oformat->get_output_timestamp) + return AVERROR(ENOSYS); + s->oformat->get_output_timestamp(s, stream, dts, wall); + return 0; +} + void ff_program_add_stream_index(AVFormatContext *ac, int progid, unsigned int idx) { int i, j; @@ -3459,7 +3503,7 @@ static void dump_stream_format(AVFormatContext *ic, int i, int index, int is_out st->codec->width*st->sample_aspect_ratio.num, st->codec->height*st->sample_aspect_ratio.den, 1024*1024); - av_log(NULL, AV_LOG_INFO, ", PAR %d:%d DAR %d:%d", + av_log(NULL, AV_LOG_INFO, ", SAR %d:%d DAR %d:%d", st->sample_aspect_ratio.num, st->sample_aspect_ratio.den, display_aspect_ratio.num, display_aspect_ratio.den); } @@ -4106,3 +4150,16 @@ void ff_make_absolute_url(char *buf, int size, const char *base, } av_strlcat(buf, rel, size); } + +int64_t ff_iso8601_to_unix_time(const char *datestr) +{ +#if HAVE_STRPTIME + struct tm time = {0}; + strptime(datestr, "%Y - %m - %dT%T", &time); + return mktime(&time); +#else + av_log(NULL, AV_LOG_WARNING, "strptime() unavailable on this system, cannot convert " + "the date string.\n"); + return 0; +#endif +} diff --git a/libavformat/vc1test.c b/libavformat/vc1test.c index 4ab9bca1af..5742155a25 100644 --- a/libavformat/vc1test.c +++ b/libavformat/vc1test.c @@ -110,11 +110,10 @@ static int vc1t_read_packet(AVFormatContext *s, } AVInputFormat ff_vc1t_demuxer = { - "vc1test", - NULL_IF_CONFIG_SMALL("VC-1 test bitstream format"), - 0, - vc1t_probe, - vc1t_read_header, - vc1t_read_packet, + .name = "vc1test", + .long_name = NULL_IF_CONFIG_SMALL("VC-1 test bitstream format"), + .read_probe = vc1t_probe, + .read_header = vc1t_read_header, + .read_packet = vc1t_read_packet, .flags = AVFMT_GENERIC_INDEX, }; diff --git a/libavformat/vc1testenc.c b/libavformat/vc1testenc.c index 9a77e924a1..20580fb3cf 100644 --- a/libavformat/vc1testenc.c +++ b/libavformat/vc1testenc.c @@ -82,14 +82,14 @@ static int vc1test_write_trailer(AVFormatContext *s) } AVOutputFormat ff_vc1t_muxer = { - "rcv", - NULL_IF_CONFIG_SMALL("VC-1 test bitstream"), - "", - "rcv", - sizeof(RCVContext), - CODEC_ID_NONE, - CODEC_ID_WMV3, - vc1test_write_header, - vc1test_write_packet, - vc1test_write_trailer, + .name = "rcv", + .long_name = NULL_IF_CONFIG_SMALL("VC-1 test bitstream"), + .mime_type = "", + .extensions = "rcv", + .priv_data_size = sizeof(RCVContext), + .audio_codec = CODEC_ID_NONE, + .video_codec = CODEC_ID_WMV3, + .write_header = vc1test_write_header, + .write_packet = vc1test_write_packet, + .write_trailer = vc1test_write_trailer, }; diff --git a/libavformat/version.h b/libavformat/version.h index 65a3fd298e..993bbeb58c 100644 --- a/libavformat/version.h +++ b/libavformat/version.h @@ -24,7 +24,7 @@ #include "libavutil/avutil.h" #define LIBAVFORMAT_VERSION_MAJOR 52 -#define LIBAVFORMAT_VERSION_MINOR 110 +#define LIBAVFORMAT_VERSION_MINOR 111 #define LIBAVFORMAT_VERSION_MICRO 0 #define LIBAVFORMAT_VERSION_INT AV_VERSION_INT(LIBAVFORMAT_VERSION_MAJOR, \ @@ -122,5 +122,17 @@ #ifndef FF_API_FLAG_RTP_HINT #define FF_API_FLAG_RTP_HINT (LIBAVFORMAT_VERSION_MAJOR < 54) #endif +#ifndef FF_API_AVSTREAM_QUALITY +#define FF_API_AVSTREAM_QUALITY (LIBAVFORMAT_VERSION_MAJOR < 54) +#endif +#ifndef FF_API_LOOP_INPUT +#define FF_API_LOOP_INPUT (LIBAVFORMAT_VERSION_MAJOR < 54) +#endif +#ifndef FF_API_LOOP_OUTPUT +#define FF_API_LOOP_OUTPUT (LIBAVFORMAT_VERSION_MAJOR < 54) +#endif +#ifndef FF_API_TIMESTAMP +#define FF_API_TIMESTAMP (LIBAVFORMAT_VERSION_MAJOR < 54) +#endif #endif /* AVFORMAT_VERSION_H */ diff --git a/libavformat/vocdec.c b/libavformat/vocdec.c index b0195e90c3..fd03c5bec3 100644 --- a/libavformat/vocdec.c +++ b/libavformat/vocdec.c @@ -157,11 +157,11 @@ static int voc_read_packet(AVFormatContext *s, AVPacket *pkt) } AVInputFormat ff_voc_demuxer = { - "voc", - NULL_IF_CONFIG_SMALL("Creative Voice file format"), - sizeof(VocDecContext), - voc_probe, - voc_read_header, - voc_read_packet, + .name = "voc", + .long_name = NULL_IF_CONFIG_SMALL("Creative Voice file format"), + .priv_data_size = sizeof(VocDecContext), + .read_probe = voc_probe, + .read_header = voc_read_header, + .read_packet = voc_read_packet, .codec_tag=(const AVCodecTag* const []){ff_voc_codec_tags, 0}, }; diff --git a/libavformat/vocenc.c b/libavformat/vocenc.c index 74cd4790e4..0a9f24bdc2 100644 --- a/libavformat/vocenc.c +++ b/libavformat/vocenc.c @@ -90,15 +90,15 @@ static int voc_write_trailer(AVFormatContext *s) } AVOutputFormat ff_voc_muxer = { - "voc", - NULL_IF_CONFIG_SMALL("Creative Voice file format"), - "audio/x-voc", - "voc", - sizeof(VocEncContext), - CODEC_ID_PCM_U8, - CODEC_ID_NONE, - voc_write_header, - voc_write_packet, - voc_write_trailer, + .name = "voc", + .long_name = NULL_IF_CONFIG_SMALL("Creative Voice file format"), + .mime_type = "audio/x-voc", + .extensions = "voc", + .priv_data_size = sizeof(VocEncContext), + .audio_codec = CODEC_ID_PCM_U8, + .video_codec = CODEC_ID_NONE, + .write_header = voc_write_header, + .write_packet = voc_write_packet, + .write_trailer = voc_write_trailer, .codec_tag=(const AVCodecTag* const []){ff_voc_codec_tags, 0}, }; diff --git a/libavformat/vqf.c b/libavformat/vqf.c index e06f39349b..1be5931ad9 100644 --- a/libavformat/vqf.c +++ b/libavformat/vqf.c @@ -22,6 +22,7 @@ #include "avformat.h" #include "libavutil/intreadwrite.h" #include "libavutil/dict.h" +#include "libavutil/mathematics.h" typedef struct VqfContext { int frame_bit_len; @@ -249,13 +250,12 @@ static int vqf_read_seek(AVFormatContext *s, } AVInputFormat ff_vqf_demuxer = { - "vqf", - NULL_IF_CONFIG_SMALL("Nippon Telegraph and Telephone Corporation (NTT) TwinVQ"), - sizeof(VqfContext), - vqf_probe, - vqf_read_header, - vqf_read_packet, - NULL, - vqf_read_seek, + .name = "vqf", + .long_name = NULL_IF_CONFIG_SMALL("Nippon Telegraph and Telephone Corporation (NTT) TwinVQ"), + .priv_data_size = sizeof(VqfContext), + .read_probe = vqf_probe, + .read_header = vqf_read_header, + .read_packet = vqf_read_packet, + .read_seek = vqf_read_seek, .extensions = "vqf", }; diff --git a/libavformat/wav.c b/libavformat/wav.c index c5dbd631b4..1ae9413aed 100644 --- a/libavformat/wav.c +++ b/libavformat/wav.c @@ -22,22 +22,86 @@ * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ + +#include "libavutil/avassert.h" +#include "libavutil/dict.h" +#include "libavutil/log.h" +#include "libavutil/mathematics.h" +#include "libavutil/opt.h" #include "avformat.h" #include "avio_internal.h" #include "pcm.h" #include "riff.h" +#include "avio.h" +#include "avio_internal.h" #include "metadata.h" typedef struct { + const AVClass *class; int64_t data; int64_t data_end; int64_t minpts; int64_t maxpts; int last_duration; int w64; + int write_bext; } WAVContext; #if CONFIG_WAV_MUXER +static inline void bwf_write_bext_string(AVFormatContext *s, const char *key, int maxlen) +{ + AVDictionaryEntry *tag; + int len = 0; + + if (tag = av_dict_get(s->metadata, key, NULL, 0)) { + len = strlen(tag->value); + len = FFMIN(len, maxlen); + avio_write(s->pb, tag->value, len); + } + + ffio_fill(s->pb, 0, maxlen - len); +} + +static void bwf_write_bext_chunk(AVFormatContext *s) +{ + AVDictionaryEntry *tmp_tag; + uint64_t time_reference = 0; + int64_t bext = ff_start_tag(s->pb, "bext"); + + bwf_write_bext_string(s, "description", 256); + bwf_write_bext_string(s, "originator", 32); + bwf_write_bext_string(s, "originator_reference", 32); + bwf_write_bext_string(s, "origination_date", 10); + bwf_write_bext_string(s, "origination_time", 8); + + if (tmp_tag = av_dict_get(s->metadata, "time_reference", NULL, 0)) + time_reference = strtoll(tmp_tag->value, NULL, 10); + avio_wl64(s->pb, time_reference); + avio_wl16(s->pb, 1); // set version to 1 + + if (tmp_tag = av_dict_get(s->metadata, "umid", NULL, 0)) { + unsigned char umidpart_str[17] = {0}; + int i; + uint64_t umidpart; + int len = strlen(tmp_tag->value+2); + + for (i = 0; i < len/16; i++) { + memcpy(umidpart_str, tmp_tag->value + 2 + (i*16), 16); + umidpart = strtoll(umidpart_str, NULL, 16); + avio_wb64(s->pb, umidpart); + } + ffio_fill(s->pb, 0, 64 - i*8); + } else + ffio_fill(s->pb, 0, 64); // zero UMID + + ffio_fill(s->pb, 0, 190); // Reserved + + if (tmp_tag = av_dict_get(s->metadata, "coding_history", NULL, 0)) + avio_put_str(s->pb, tmp_tag->value); + + ff_end_tag(s->pb, bext); +} + static int wav_write_header(AVFormatContext *s) { WAVContext *wav = s->priv_data; @@ -64,6 +128,9 @@ static int wav_write_header(AVFormatContext *s) ff_end_tag(pb, fact); } + if (wav->write_bext) + bwf_write_bext_chunk(s); + av_set_pts_info(s->streams[0], 64, 1, s->streams[0]->codec->sample_rate); wav->maxpts = wav->last_duration = 0; wav->minpts = INT64_MAX; @@ -124,18 +191,33 @@ static int wav_write_trailer(AVFormatContext *s) return 0; } +#define OFFSET(x) offsetof(WAVContext, x) +#define ENC AV_OPT_FLAG_ENCODING_PARAM +static const AVOption options[] = { + { "write_bext", "Write BEXT chunk.", OFFSET(write_bext), FF_OPT_TYPE_INT, { 0 }, 0, 1, ENC }, + { NULL }, +}; + +static const AVClass wav_muxer_class = { + .class_name = "WAV muxer", + .item_name = av_default_item_name, + .option = options, + .version = LIBAVUTIL_VERSION_INT, +}; + AVOutputFormat ff_wav_muxer = { - "wav", - NULL_IF_CONFIG_SMALL("WAV format"), - "audio/x-wav", - "wav", - sizeof(WAVContext), - CODEC_ID_PCM_S16LE, - CODEC_ID_NONE, - wav_write_header, - wav_write_packet, - wav_write_trailer, + .name = "wav", + .long_name = NULL_IF_CONFIG_SMALL("WAV format"), + .mime_type = "audio/x-wav", + .extensions = "wav", + .priv_data_size = sizeof(WAVContext), + .audio_codec = CODEC_ID_PCM_S16LE, + .video_codec = CODEC_ID_NONE, + .write_header = wav_write_header, + .write_packet = wav_write_packet, + .write_trailer = wav_write_trailer, .codec_tag= (const AVCodecTag* const []){ff_codec_wav_tags, 0}, + .priv_class = &wav_muxer_class, }; #endif /* CONFIG_WAV_MUXER */ @@ -205,11 +287,13 @@ static int wav_parse_fmt_tag(AVFormatContext *s, int64_t size, AVStream **st) return 0; } -static inline int wav_parse_bext_string(AVFormatContext *s, const char *key, int length) +static inline int wav_parse_bext_string(AVFormatContext *s, const char *key, + int length) { char temp[257]; int ret; + av_assert0(length <= sizeof(temp)); if ((ret = avio_read(s->pb, temp, length)) < 0) return ret; @@ -279,7 +363,7 @@ static int wav_parse_bext_tag(AVFormatContext *s, int64_t size) coding_history[size] = 0; if ((ret = av_dict_set(&s->metadata, "coding_history", coding_history, - AV_METADATA_DONT_STRDUP_VAL)) < 0) + AV_DICT_DONT_STRDUP_VAL)) < 0) return ret; } @@ -335,6 +419,7 @@ static int wav_read_header(AVFormatContext *s, return AVERROR_INVALIDDATA; } avio_skip(pb, size - 24); /* skip rest of ds64 chunk */ + } for (;;) { @@ -376,7 +461,7 @@ static int wav_read_header(AVFormatContext *s, goto break_loop; break; case MKTAG('f','a','c','t'): - if(!sample_count) + if (!sample_count) sample_count = avio_rl32(pb); break; case MKTAG('b','e','x','t'): @@ -490,14 +575,13 @@ static int wav_read_seek(AVFormatContext *s, } AVInputFormat ff_wav_demuxer = { - "wav", - NULL_IF_CONFIG_SMALL("WAV format"), - sizeof(WAVContext), - wav_probe, - wav_read_header, - wav_read_packet, - NULL, - wav_read_seek, + .name = "wav", + .long_name = NULL_IF_CONFIG_SMALL("WAV format"), + .priv_data_size = sizeof(WAVContext), + .read_probe = wav_probe, + .read_header = wav_read_header, + .read_packet = wav_read_packet, + .read_seek = wav_read_seek, .flags= AVFMT_GENERIC_INDEX, .codec_tag= (const AVCodecTag* const []){ff_codec_wav_tags, 0}, }; @@ -579,14 +663,13 @@ static int w64_read_header(AVFormatContext *s, AVFormatParameters *ap) } AVInputFormat ff_w64_demuxer = { - "w64", - NULL_IF_CONFIG_SMALL("Sony Wave64 format"), - sizeof(WAVContext), - w64_probe, - w64_read_header, - wav_read_packet, - NULL, - wav_read_seek, + .name = "w64", + .long_name = NULL_IF_CONFIG_SMALL("Sony Wave64 format"), + .priv_data_size = sizeof(WAVContext), + .read_probe = w64_probe, + .read_header = w64_read_header, + .read_packet = wav_read_packet, + .read_seek = wav_read_seek, .flags = AVFMT_GENERIC_INDEX, .codec_tag = (const AVCodecTag* const []){ff_codec_wav_tags, 0}, }; diff --git a/libavformat/wc3movie.c b/libavformat/wc3movie.c index eb2eae1c9c..8bba572f4e 100644 --- a/libavformat/wc3movie.c +++ b/libavformat/wc3movie.c @@ -152,7 +152,6 @@ static int wc3_read_header(AVFormatContext *s, (uint8_t)fourcc_tag, (uint8_t)(fourcc_tag >> 8), (uint8_t)(fourcc_tag >> 16), (uint8_t)(fourcc_tag >> 24), (uint8_t)fourcc_tag, (uint8_t)(fourcc_tag >> 8), (uint8_t)(fourcc_tag >> 16), (uint8_t)(fourcc_tag >> 24)); return AVERROR_INVALIDDATA; - break; } fourcc_tag = avio_rl32(pb); @@ -293,11 +292,11 @@ static int wc3_read_close(AVFormatContext *s) } AVInputFormat ff_wc3_demuxer = { - "wc3movie", - NULL_IF_CONFIG_SMALL("Wing Commander III movie format"), - sizeof(Wc3DemuxContext), - wc3_probe, - wc3_read_header, - wc3_read_packet, - wc3_read_close, + .name = "wc3movie", + .long_name = NULL_IF_CONFIG_SMALL("Wing Commander III movie format"), + .priv_data_size = sizeof(Wc3DemuxContext), + .read_probe = wc3_probe, + .read_header = wc3_read_header, + .read_packet = wc3_read_packet, + .read_close = wc3_read_close, }; diff --git a/libavformat/westwood.c b/libavformat/westwood.c index 818fe2d8d3..7712865e36 100644 --- a/libavformat/westwood.c +++ b/libavformat/westwood.c @@ -368,21 +368,21 @@ static int wsvqa_read_packet(AVFormatContext *s, #if CONFIG_WSAUD_DEMUXER AVInputFormat ff_wsaud_demuxer = { - "wsaud", - NULL_IF_CONFIG_SMALL("Westwood Studios audio format"), - sizeof(WsAudDemuxContext), - wsaud_probe, - wsaud_read_header, - wsaud_read_packet, + .name = "wsaud", + .long_name = NULL_IF_CONFIG_SMALL("Westwood Studios audio format"), + .priv_data_size = sizeof(WsAudDemuxContext), + .read_probe = wsaud_probe, + .read_header = wsaud_read_header, + .read_packet = wsaud_read_packet, }; #endif #if CONFIG_WSVQA_DEMUXER AVInputFormat ff_wsvqa_demuxer = { - "wsvqa", - NULL_IF_CONFIG_SMALL("Westwood Studios VQA format"), - sizeof(WsVqaDemuxContext), - wsvqa_probe, - wsvqa_read_header, - wsvqa_read_packet, + .name = "wsvqa", + .long_name = NULL_IF_CONFIG_SMALL("Westwood Studios VQA format"), + .priv_data_size = sizeof(WsVqaDemuxContext), + .read_probe = wsvqa_probe, + .read_header = wsvqa_read_header, + .read_packet = wsvqa_read_packet, }; #endif diff --git a/libavformat/wtvdec.c b/libavformat/wtvdec.c index 5fe7e9fe12..f13dc74372 100644 --- a/libavformat/wtvdec.c +++ b/libavformat/wtvdec.c @@ -164,7 +164,6 @@ static AVIOContext * wtvfile_open_sector(int first_sector, uint64_t length, int } wf->sectors[0] = first_sector; wf->nb_sectors = 1; - wf->sector_bits = WTV_SECTOR_BITS; } else if (depth == 1) { wf->sectors = av_malloc(WTV_SECTOR_SIZE); if (!wf->sectors) { @@ -172,7 +171,6 @@ static AVIOContext * wtvfile_open_sector(int first_sector, uint64_t length, int return NULL; } wf->nb_sectors = read_ints(s->pb, wf->sectors, WTV_SECTOR_SIZE / 4); - wf->sector_bits = length & (1ULL<<63) ? WTV_SECTOR_BITS : WTV_BIGSECTOR_BITS; } else if (depth == 2) { uint32_t sectors1[WTV_SECTOR_SIZE / 4]; int nb_sectors1 = read_ints(s->pb, sectors1, WTV_SECTOR_SIZE / 4); @@ -189,12 +187,12 @@ static AVIOContext * wtvfile_open_sector(int first_sector, uint64_t length, int break; wf->nb_sectors += read_ints(s->pb, wf->sectors + i * WTV_SECTOR_SIZE / 4, WTV_SECTOR_SIZE / 4); } - wf->sector_bits = length & (1ULL<<63) ? WTV_SECTOR_BITS : WTV_BIGSECTOR_BITS; } else { av_log(s, AV_LOG_ERROR, "unsupported file allocation table depth (0x%x)\n", depth); av_free(wf); return NULL; } + wf->sector_bits = length & (1ULL<<63) ? WTV_SECTOR_BITS : WTV_BIGSECTOR_BITS; if (!wf->nb_sectors) { av_free(wf->sectors); @@ -716,7 +714,7 @@ enum { * Parse WTV chunks * @param mode SEEK_TO_DATA or SEEK_TO_PTS * @param seekts timestamp - * @param[out] len Length of data chunk + * @param[out] len_ptr Length of data chunk * @return stream index of data chunk, or <0 on error */ static int parse_chunks(AVFormatContext *s, int mode, int64_t seekts, int *len_ptr) diff --git a/libavformat/wv.c b/libavformat/wv.c index 9da416e580..ec59c26521 100644 --- a/libavformat/wv.c +++ b/libavformat/wv.c @@ -110,6 +110,9 @@ static int wv_read_block_header(AVFormatContext *ctx, AVIOContext *pb, int appen size = wc->blksize; } wc->flags = AV_RL32(wc->extra + 4); + // blocks with zero samples don't contain actual audio information and should be ignored + if (!AV_RN32(wc->extra)) + return 0; //parse flags bpp = ((wc->flags & 3) + 1) << 3; chan = 1 + !(wc->flags & WV_MONO); @@ -207,8 +210,14 @@ static int wv_read_header(AVFormatContext *s, AVStream *st; wc->block_parsed = 0; - if(wv_read_block_header(s, pb, 0) < 0) - return -1; + for(;;){ + if(wv_read_block_header(s, pb, 0) < 0) + return -1; + if(!AV_RN32(wc->extra)) + avio_skip(pb, wc->blksize - 24); + else + break; + } /* now we are ready: build format streams */ st = av_new_stream(s, 0); @@ -342,12 +351,11 @@ static int wv_read_seek(AVFormatContext *s, int stream_index, int64_t timestamp, } AVInputFormat ff_wv_demuxer = { - "wv", - NULL_IF_CONFIG_SMALL("WavPack"), - sizeof(WVContext), - wv_probe, - wv_read_header, - wv_read_packet, - NULL, - wv_read_seek, + .name = "wv", + .long_name = NULL_IF_CONFIG_SMALL("WavPack"), + .priv_data_size = sizeof(WVContext), + .read_probe = wv_probe, + .read_header = wv_read_header, + .read_packet = wv_read_packet, + .read_seek = wv_read_seek, }; diff --git a/libavformat/xa.c b/libavformat/xa.c index 3b6a77f4af..de94447622 100644 --- a/libavformat/xa.c +++ b/libavformat/xa.c @@ -119,10 +119,10 @@ static int xa_read_packet(AVFormatContext *s, } AVInputFormat ff_xa_demuxer = { - "xa", - NULL_IF_CONFIG_SMALL("Maxis XA File Format"), - sizeof(MaxisXADemuxContext), - xa_probe, - xa_read_header, - xa_read_packet, + .name = "xa", + .long_name = NULL_IF_CONFIG_SMALL("Maxis XA File Format"), + .priv_data_size = sizeof(MaxisXADemuxContext), + .read_probe = xa_probe, + .read_header = xa_read_header, + .read_packet = xa_read_packet, }; diff --git a/libavformat/xwma.c b/libavformat/xwma.c index d068b8259a..9e100b1b0b 100644 --- a/libavformat/xwma.c +++ b/libavformat/xwma.c @@ -252,10 +252,10 @@ static int xwma_read_packet(AVFormatContext *s, AVPacket *pkt) } AVInputFormat ff_xwma_demuxer = { - "xwma", - NULL_IF_CONFIG_SMALL("Microsoft xWMA"), - sizeof(XWMAContext), - xwma_probe, - xwma_read_header, - xwma_read_packet, + .name = "xwma", + .long_name = NULL_IF_CONFIG_SMALL("Microsoft xWMA"), + .priv_data_size = sizeof(XWMAContext), + .read_probe = xwma_probe, + .read_header = xwma_read_header, + .read_packet = xwma_read_packet, }; diff --git a/libavformat/yop.c b/libavformat/yop.c index 486fdc5616..da4f050ec0 100644 --- a/libavformat/yop.c +++ b/libavformat/yop.c @@ -1,5 +1,4 @@ -/** - * @file +/* * Psygnosis YOP demuxer * * Copyright (C) 2010 Mohamed Naufal Basheer <naufal11@gmail.com> @@ -203,14 +202,14 @@ static int yop_read_seek(AVFormatContext *s, int stream_index, } AVInputFormat ff_yop_demuxer = { - "yop", - NULL_IF_CONFIG_SMALL("Psygnosis YOP Format"), - sizeof(YopDecContext), - yop_probe, - yop_read_header, - yop_read_packet, - yop_read_close, - yop_read_seek, + .name = "yop", + .long_name = NULL_IF_CONFIG_SMALL("Psygnosis YOP Format"), + .priv_data_size = sizeof(YopDecContext), + .read_probe = yop_probe, + .read_header = yop_read_header, + .read_packet = yop_read_packet, + .read_close = yop_read_close, + .read_seek = yop_read_seek, .extensions = "yop", .flags = AVFMT_GENERIC_INDEX, }; diff --git a/libavformat/yuv4mpeg.c b/libavformat/yuv4mpeg.c index 90b222d1d4..a7f55b414a 100644 --- a/libavformat/yuv4mpeg.c +++ b/libavformat/yuv4mpeg.c @@ -176,15 +176,15 @@ static int yuv4_write_header(AVFormatContext *s) } AVOutputFormat ff_yuv4mpegpipe_muxer = { - "yuv4mpegpipe", - NULL_IF_CONFIG_SMALL("YUV4MPEG pipe format"), - "", - "y4m", - sizeof(int), - CODEC_ID_NONE, - CODEC_ID_RAWVIDEO, - yuv4_write_header, - yuv4_write_packet, + .name = "yuv4mpegpipe", + .long_name = NULL_IF_CONFIG_SMALL("YUV4MPEG pipe format"), + .mime_type = "", + .extensions = "y4m", + .priv_data_size = sizeof(int), + .audio_codec = CODEC_ID_NONE, + .video_codec = CODEC_ID_RAWVIDEO, + .write_header = yuv4_write_header, + .write_packet = yuv4_write_packet, .flags = AVFMT_RAWPICTURE, }; #endif @@ -397,12 +397,12 @@ static int yuv4_probe(AVProbeData *pd) #if CONFIG_YUV4MPEGPIPE_DEMUXER AVInputFormat ff_yuv4mpegpipe_demuxer = { - "yuv4mpegpipe", - NULL_IF_CONFIG_SMALL("YUV4MPEG pipe format"), - sizeof(struct frame_attributes), - yuv4_probe, - yuv4_read_header, - yuv4_read_packet, + .name = "yuv4mpegpipe", + .long_name = NULL_IF_CONFIG_SMALL("YUV4MPEG pipe format"), + .priv_data_size = sizeof(struct frame_attributes), + .read_probe = yuv4_probe, + .read_header = yuv4_read_header, + .read_packet = yuv4_read_packet, .extensions = "y4m" }; #endif diff --git a/libavutil/Makefile b/libavutil/Makefile index f6f5fd3b91..1025588657 100644 --- a/libavutil/Makefile +++ b/libavutil/Makefile @@ -78,13 +78,14 @@ OBJS-$(ARCH_ARM) += arm/cpu.o OBJS-$(ARCH_PPC) += ppc/cpu.o OBJS-$(ARCH_X86) += x86/cpu.o -TESTPROGS = adler32 aes base64 cpu crc des eval lls md5 pca sha tree +TESTPROGS = adler32 aes avstring base64 cpu crc des eval file lfg lls \ + md5 opt pca parseutils rational sha tree TESTPROGS-$(HAVE_LZO1X_999_COMPRESS) += lzo DIRS = arm bfin sh4 x86 ARCH_HEADERS = bswap.h intmath.h intreadwrite.h timer.h -include $(SUBDIR)../subdir.mak +include $(SRC_PATH)/subdir.mak $(SUBDIR)lzo-test$(EXESUF): ELIBS = -llzo2 diff --git a/libavutil/adler32.c b/libavutil/adler32.c index 4f2001025b..9d3d896506 100644 --- a/libavutil/adler32.c +++ b/libavutil/adler32.c @@ -26,24 +26,28 @@ #define BASE 65521L /* largest prime smaller than 65536 */ -#define DO1(buf) {s1 += *buf++; s2 += s1;} +#define DO1(buf) { s1 += *buf++; s2 += s1; } #define DO4(buf) DO1(buf); DO1(buf); DO1(buf); DO1(buf); #define DO16(buf) DO4(buf); DO4(buf); DO4(buf); DO4(buf); -unsigned long av_adler32_update(unsigned long adler, const uint8_t *buf, unsigned int len) +unsigned long av_adler32_update(unsigned long adler, const uint8_t * buf, + unsigned int len) { unsigned long s1 = adler & 0xffff; unsigned long s2 = adler >> 16; - while (len>0) { + while (len > 0) { #if CONFIG_SMALL - while(len>4 && s2 < (1U<<31)){ - DO4(buf); len-=4; + while (len > 4 && s2 < (1U << 31)) { + DO4(buf); + len -= 4; + } #else - while(len>16 && s2 < (1U<<31)){ - DO16(buf); len-=16; -#endif + while (len > 16 && s2 < (1U << 31)) { + DO16(buf); + len -= 16; } +#endif DO1(buf); len--; s1 %= BASE; s2 %= BASE; @@ -52,22 +56,32 @@ unsigned long av_adler32_update(unsigned long adler, const uint8_t *buf, unsigne } #ifdef TEST +#include <string.h> #include "log.h" #include "timer.h" #define LEN 7001 volatile int checksum; -int main(void){ +int main(int argc, char **argv) +{ int i; char data[LEN]; + av_log_set_level(AV_LOG_DEBUG); - for(i=0; i<LEN; i++) - data[i]= ((i*i)>>3) + 123*i; - for(i=0; i<1000; i++){ - START_TIMER - checksum= av_adler32_update(1, data, LEN); - STOP_TIMER("adler") + + for (i = 0; i < LEN; i++) + data[i] = ((i * i) >> 3) + 123 * i; + + if (argc > 1 && !strcmp(argv[1], "-t")) { + for (i = 0; i < 1000; i++) { + START_TIMER; + checksum = av_adler32_update(1, data, LEN); + STOP_TIMER("adler"); + } + } else { + checksum = av_adler32_update(1, data, LEN); } - av_log(NULL, AV_LOG_DEBUG, "%X == 50E6E508\n", checksum); - return 0; + + av_log(NULL, AV_LOG_DEBUG, "%X (expected 50E6E508)\n", checksum); + return checksum == 0x50e6e508 ? 0 : 1; } #endif diff --git a/libavutil/aes.c b/libavutil/aes.c index 59f1cf34e8..49093efc53 100644 --- a/libavutil/aes.c +++ b/libavutil/aes.c @@ -22,6 +22,7 @@ #include "common.h" #include "aes.h" +#include "intreadwrite.h" typedef union { uint64_t u64[2]; @@ -30,13 +31,13 @@ typedef union { uint8_t u8[16]; } av_aes_block; -typedef struct AVAES{ +typedef struct AVAES { // Note: round_key[16] is accessed in the init code, but this only // overwrites state, which does not matter (see also r7471). av_aes_block round_key[15]; av_aes_block state[2]; int rounds; -}AVAES; +} AVAES; const int av_aes_size= sizeof(AVAES); @@ -54,23 +55,58 @@ static uint32_t enc_multbl[4][256]; static uint32_t dec_multbl[4][256]; #endif -static inline void addkey(av_aes_block *dst, const av_aes_block *src, const av_aes_block *round_key){ +#if HAVE_BIGENDIAN +# define ROT(x, s) ((x >> s) | (x << (32-s))) +#else +# define ROT(x, s) ((x << s) | (x >> (32-s))) +#endif + +static inline void addkey(av_aes_block *dst, const av_aes_block *src, + const av_aes_block *round_key) +{ dst->u64[0] = src->u64[0] ^ round_key->u64[0]; dst->u64[1] = src->u64[1] ^ round_key->u64[1]; } -static void subshift(av_aes_block s0[2], int s, const uint8_t *box){ - av_aes_block *s1= (av_aes_block *)(s0[0].u8 - s); - av_aes_block *s3= (av_aes_block *)(s0[0].u8 + s); - s0[0].u8[0]=box[s0[1].u8[ 0]]; s0[0].u8[ 4]=box[s0[1].u8[ 4]]; s0[0].u8[ 8]=box[s0[1].u8[ 8]]; s0[0].u8[12]=box[s0[1].u8[12]]; - s1[0].u8[3]=box[s1[1].u8[ 7]]; s1[0].u8[ 7]=box[s1[1].u8[11]]; s1[0].u8[11]=box[s1[1].u8[15]]; s1[0].u8[15]=box[s1[1].u8[ 3]]; - s0[0].u8[2]=box[s0[1].u8[10]]; s0[0].u8[10]=box[s0[1].u8[ 2]]; s0[0].u8[ 6]=box[s0[1].u8[14]]; s0[0].u8[14]=box[s0[1].u8[ 6]]; - s3[0].u8[1]=box[s3[1].u8[13]]; s3[0].u8[13]=box[s3[1].u8[ 9]]; s3[0].u8[ 9]=box[s3[1].u8[ 5]]; s3[0].u8[ 5]=box[s3[1].u8[ 1]]; +static inline void addkey_s(av_aes_block *dst, const uint8_t *src, + const av_aes_block *round_key) +{ + dst->u64[0] = AV_RN64(src) ^ round_key->u64[0]; + dst->u64[1] = AV_RN64(src + 8) ^ round_key->u64[1]; +} + +static inline void addkey_d(uint8_t *dst, const av_aes_block *src, + const av_aes_block *round_key) +{ + AV_WN64(dst, src->u64[0] ^ round_key->u64[0]); + AV_WN64(dst + 8, src->u64[1] ^ round_key->u64[1]); +} + +static void subshift(av_aes_block s0[2], int s, const uint8_t *box) +{ + av_aes_block *s1 = (av_aes_block *) (s0[0].u8 - s); + av_aes_block *s3 = (av_aes_block *) (s0[0].u8 + s); + + s0[0].u8[ 0] = box[s0[1].u8[ 0]]; + s0[0].u8[ 4] = box[s0[1].u8[ 4]]; + s0[0].u8[ 8] = box[s0[1].u8[ 8]]; + s0[0].u8[12] = box[s0[1].u8[12]]; + s1[0].u8[ 3] = box[s1[1].u8[ 7]]; + s1[0].u8[ 7] = box[s1[1].u8[11]]; + s1[0].u8[11] = box[s1[1].u8[15]]; + s1[0].u8[15] = box[s1[1].u8[ 3]]; + s0[0].u8[ 2] = box[s0[1].u8[10]]; + s0[0].u8[10] = box[s0[1].u8[ 2]]; + s0[0].u8[ 6] = box[s0[1].u8[14]]; + s0[0].u8[14] = box[s0[1].u8[ 6]]; + s3[0].u8[ 1] = box[s3[1].u8[13]]; + s3[0].u8[13] = box[s3[1].u8[ 9]]; + s3[0].u8[ 9] = box[s3[1].u8[ 5]]; + s3[0].u8[ 5] = box[s3[1].u8[ 1]]; } static inline int mix_core(uint32_t multbl[][256], int a, int b, int c, int d){ #if CONFIG_SMALL -#define ROT(x,s) ((x<<s)|(x>>(32-s))) return multbl[0][a] ^ ROT(multbl[0][b], 8) ^ ROT(multbl[0][c], 16) ^ ROT(multbl[0][d], 24); #else return multbl[0][a] ^ multbl[1][b] ^ multbl[2][c] ^ multbl[3][d]; @@ -85,117 +121,137 @@ static inline void mix(av_aes_block state[2], uint32_t multbl[][256], int s1, in state[0].u32[3] = mix_core(multbl, src[3][0], src[s1-1][1], src[1][2], src[s3-1][3]); } -static inline void crypt(AVAES *a, int s, const uint8_t *sbox, uint32_t multbl[][256]){ +static inline void crypt(AVAES *a, int s, const uint8_t *sbox, + uint32_t multbl[][256]) +{ int r; - for(r=a->rounds-1; r>0; r--){ - mix(a->state, multbl, 3-s, 1+s); + for (r = a->rounds - 1; r > 0; r--) { + mix(a->state, multbl, 3 - s, 1 + s); addkey(&a->state[1], &a->state[0], &a->round_key[r]); } + subshift(&a->state[0], s, sbox); } -void av_aes_crypt(AVAES *a, uint8_t *dst_, const uint8_t *src_, int count, uint8_t *iv_, int decrypt){ - av_aes_block *dst = (av_aes_block *)dst_; - const av_aes_block *src = (const av_aes_block *)src_; - av_aes_block *iv = (av_aes_block *)iv_; - while(count--){ - addkey(&a->state[1], src, &a->round_key[a->rounds]); - if(decrypt) { +void av_aes_crypt(AVAES *a, uint8_t *dst, const uint8_t *src, + int count, uint8_t *iv, int decrypt) +{ + while (count--) { + addkey_s(&a->state[1], src, &a->round_key[a->rounds]); + if (decrypt) { crypt(a, 0, inv_sbox, dec_multbl); - if(iv){ - addkey(&a->state[0], &a->state[0], iv); + if (iv) { + addkey_s(&a->state[0], iv, &a->state[0]); memcpy(iv, src, 16); } - addkey(dst, &a->state[0], &a->round_key[0]); - }else{ - if(iv) addkey(&a->state[1], &a->state[1], iv); - crypt(a, 2, sbox, enc_multbl); - addkey(dst, &a->state[0], &a->round_key[0]); - if(iv) memcpy(iv, dst, 16); + addkey_d(dst, &a->state[0], &a->round_key[0]); + } else { + if (iv) + addkey_s(&a->state[1], iv, &a->state[1]); + crypt(a, 2, sbox, enc_multbl); + addkey_d(dst, &a->state[0], &a->round_key[0]); + if (iv) + memcpy(iv, dst, 16); } - src++; - dst++; + src += 16; + dst += 16; } } -static void init_multbl2(uint8_t tbl[1024], const int c[4], const uint8_t *log8, const uint8_t *alog8, const uint8_t *sbox){ - int i, j; - for(i=0; i<1024; i++){ - int x= sbox[i>>2]; - if(x) tbl[i]= alog8[ log8[x] + log8[c[i&3]] ]; - } +static void init_multbl2(uint32_t tbl[][256], const int c[4], + const uint8_t *log8, const uint8_t *alog8, + const uint8_t *sbox) +{ + int i; + + for (i = 0; i < 256; i++) { + int x = sbox[i]; + if (x) { + int k, l, m, n; + x = log8[x]; + k = alog8[x + log8[c[0]]]; + l = alog8[x + log8[c[1]]]; + m = alog8[x + log8[c[2]]]; + n = alog8[x + log8[c[3]]]; + tbl[0][i] = AV_NE(MKBETAG(k,l,m,n), MKTAG(k,l,m,n)); #if !CONFIG_SMALL - for(j=256; j<1024; j++) - for(i=0; i<4; i++) - tbl[4*j+i]= tbl[4*j + ((i-1)&3) - 1024]; + tbl[1][i] = ROT(tbl[0][i], 8); + tbl[2][i] = ROT(tbl[0][i], 16); + tbl[3][i] = ROT(tbl[0][i], 24); #endif + } + } } // this is based on the reference AES code by Paulo Barreto and Vincent Rijmen -int av_aes_init(AVAES *a, const uint8_t *key, int key_bits, int decrypt) { +int av_aes_init(AVAES *a, const uint8_t *key, int key_bits, int decrypt) +{ int i, j, t, rconpointer = 0; uint8_t tk[8][4]; - int KC= key_bits>>5; - int rounds= KC + 6; - uint8_t log8[256]; + int KC = key_bits >> 5; + int rounds = KC + 6; + uint8_t log8[256]; uint8_t alog8[512]; - if(!enc_multbl[FF_ARRAY_ELEMS(enc_multbl)-1][FF_ARRAY_ELEMS(enc_multbl[0])-1]){ - j=1; - for(i=0; i<255; i++){ - alog8[i]= - alog8[i+255]= j; - log8[j]= i; - j^= j+j; - if(j>255) j^= 0x11B; + if (!enc_multbl[FF_ARRAY_ELEMS(enc_multbl)-1][FF_ARRAY_ELEMS(enc_multbl[0])-1]) { + j = 1; + for (i = 0; i < 255; i++) { + alog8[i] = alog8[i + 255] = j; + log8[j] = i; + j ^= j + j; + if (j > 255) + j ^= 0x11B; } - for(i=0; i<256; i++){ - j= i ? alog8[255-log8[i]] : 0; - j ^= (j<<1) ^ (j<<2) ^ (j<<3) ^ (j<<4); - j = (j ^ (j>>8) ^ 99) & 255; - inv_sbox[j]= i; - sbox [i]= j; + for (i = 0; i < 256; i++) { + j = i ? alog8[255 - log8[i]] : 0; + j ^= (j << 1) ^ (j << 2) ^ (j << 3) ^ (j << 4); + j = (j ^ (j >> 8) ^ 99) & 255; + inv_sbox[j] = i; + sbox[i] = j; } - init_multbl2(dec_multbl[0], (const int[4]){0xe, 0x9, 0xd, 0xb}, log8, alog8, inv_sbox); - init_multbl2(enc_multbl[0], (const int[4]){0x2, 0x1, 0x1, 0x3}, log8, alog8, sbox); + init_multbl2(dec_multbl, (const int[4]) { 0xe, 0x9, 0xd, 0xb }, + log8, alog8, inv_sbox); + init_multbl2(enc_multbl, (const int[4]) { 0x2, 0x1, 0x1, 0x3 }, + log8, alog8, sbox); } - if(key_bits!=128 && key_bits!=192 && key_bits!=256) + if (key_bits != 128 && key_bits != 192 && key_bits != 256) return -1; - a->rounds= rounds; + a->rounds = rounds; - memcpy(tk, key, KC*4); + memcpy(tk, key, KC * 4); - for(t= 0; t < (rounds+1)*16;) { - memcpy(a->round_key[0].u8+t, tk, KC*4); - t+= KC*4; + for (t = 0; t < (rounds + 1) * 16;) { + memcpy(a->round_key[0].u8 + t, tk, KC * 4); + t += KC * 4; - for(i = 0; i < 4; i++) - tk[0][i] ^= sbox[tk[KC-1][(i+1)&3]]; + for (i = 0; i < 4; i++) + tk[0][i] ^= sbox[tk[KC - 1][(i + 1) & 3]]; tk[0][0] ^= rcon[rconpointer++]; - for(j = 1; j < KC; j++){ - if(KC != 8 || j != KC>>1) - for(i = 0; i < 4; i++) tk[j][i] ^= tk[j-1][i]; + for (j = 1; j < KC; j++) { + if (KC != 8 || j != KC >> 1) + for (i = 0; i < 4; i++) + tk[j][i] ^= tk[j - 1][i]; else - for(i = 0; i < 4; i++) tk[j][i] ^= sbox[tk[j-1][i]]; + for (i = 0; i < 4; i++) + tk[j][i] ^= sbox[tk[j - 1][i]]; } } - if(decrypt){ - for(i=1; i<rounds; i++){ + if (decrypt) { + for (i = 1; i < rounds; i++) { av_aes_block tmp[3]; - memcpy(&tmp[2], &a->round_key[i], 16); + tmp[2] = a->round_key[i]; subshift(&tmp[1], 0, sbox); mix(tmp, dec_multbl, 1, 3); - memcpy(&a->round_key[i], &tmp[0], 16); + a->round_key[i] = tmp[0]; } - }else{ - for(i=0; i<(rounds+1)>>1; i++){ - for(j=0; j<16; j++) - FFSWAP(int, a->round_key[i].u8[j], a->round_key[rounds-i].u8[j]); + } else { + for (i = 0; i < (rounds + 1) >> 1; i++) { + FFSWAP(av_aes_block, a->round_key[i], a->round_key[rounds-i]); } } @@ -203,53 +259,76 @@ int av_aes_init(AVAES *a, const uint8_t *key, int key_bits, int decrypt) { } #ifdef TEST +#include <string.h> #include "lfg.h" #include "log.h" -int main(void){ - int i,j; - AVAES ae, ad, b; - uint8_t rkey[2][16]= { - {0}, - {0x10, 0xa5, 0x88, 0x69, 0xd7, 0x4b, 0xe5, 0xa3, 0x74, 0xcf, 0x86, 0x7c, 0xfb, 0x47, 0x38, 0x59}}; +int main(int argc, char **argv) +{ + int i, j; + AVAES b; + uint8_t rkey[2][16] = { + { 0 }, + { 0x10, 0xa5, 0x88, 0x69, 0xd7, 0x4b, 0xe5, 0xa3, + 0x74, 0xcf, 0x86, 0x7c, 0xfb, 0x47, 0x38, 0x59 } + }; uint8_t pt[16], rpt[2][16]= { - {0x6a, 0x84, 0x86, 0x7c, 0xd7, 0x7e, 0x12, 0xad, 0x07, 0xea, 0x1b, 0xe8, 0x95, 0xc5, 0x3f, 0xa3}, - {0}}; + { 0x6a, 0x84, 0x86, 0x7c, 0xd7, 0x7e, 0x12, 0xad, + 0x07, 0xea, 0x1b, 0xe8, 0x95, 0xc5, 0x3f, 0xa3 }, + { 0 } + }; uint8_t rct[2][16]= { - {0x73, 0x22, 0x81, 0xc0, 0xa0, 0xaa, 0xb8, 0xf7, 0xa5, 0x4a, 0x0c, 0x67, 0xa0, 0xc4, 0x5e, 0xcf}, - {0x6d, 0x25, 0x1e, 0x69, 0x44, 0xb0, 0x51, 0xe0, 0x4e, 0xaa, 0x6f, 0xb4, 0xdb, 0xf7, 0x84, 0x65}}; + { 0x73, 0x22, 0x81, 0xc0, 0xa0, 0xaa, 0xb8, 0xf7, + 0xa5, 0x4a, 0x0c, 0x67, 0xa0, 0xc4, 0x5e, 0xcf }, + { 0x6d, 0x25, 0x1e, 0x69, 0x44, 0xb0, 0x51, 0xe0, + 0x4e, 0xaa, 0x6f, 0xb4, 0xdb, 0xf7, 0x84, 0x65 } + }; uint8_t temp[16]; - AVLFG prng; + int err = 0; - av_aes_init(&ae, "PI=3.141592654..", 128, 0); - av_aes_init(&ad, "PI=3.141592654..", 128, 1); av_log_set_level(AV_LOG_DEBUG); - av_lfg_init(&prng, 1); - for(i=0; i<2; i++){ + for (i = 0; i < 2; i++) { av_aes_init(&b, rkey[i], 128, 1); av_aes_crypt(&b, temp, rct[i], 1, NULL, 1); - for(j=0; j<16; j++) - if(rpt[i][j] != temp[j]) - av_log(NULL, AV_LOG_ERROR, "%d %02X %02X\n", j, rpt[i][j], temp[j]); + for (j = 0; j < 16; j++) { + if (rpt[i][j] != temp[j]) { + av_log(NULL, AV_LOG_ERROR, "%d %02X %02X\n", + j, rpt[i][j], temp[j]); + err = 1; + } + } } - for(i=0; i<10000; i++){ - for(j=0; j<16; j++){ - pt[j] = av_lfg_get(&prng); - } -{START_TIMER - av_aes_crypt(&ae, temp, pt, 1, NULL, 0); - if(!(i&(i-1))) - av_log(NULL, AV_LOG_ERROR, "%02X %02X %02X %02X\n", temp[0], temp[5], temp[10], temp[15]); - av_aes_crypt(&ad, temp, temp, 1, NULL, 1); -STOP_TIMER("aes")} - for(j=0; j<16; j++){ - if(pt[j] != temp[j]){ - av_log(NULL, AV_LOG_ERROR, "%d %d %02X %02X\n", i,j, pt[j], temp[j]); + if (argc > 1 && !strcmp(argv[1], "-t")) { + AVAES ae, ad; + AVLFG prng; + + av_aes_init(&ae, "PI=3.141592654..", 128, 0); + av_aes_init(&ad, "PI=3.141592654..", 128, 1); + av_lfg_init(&prng, 1); + + for (i = 0; i < 10000; i++) { + for (j = 0; j < 16; j++) { + pt[j] = av_lfg_get(&prng); + } + { + START_TIMER; + av_aes_crypt(&ae, temp, pt, 1, NULL, 0); + if (!(i & (i - 1))) + av_log(NULL, AV_LOG_ERROR, "%02X %02X %02X %02X\n", + temp[0], temp[5], temp[10], temp[15]); + av_aes_crypt(&ad, temp, temp, 1, NULL, 1); + STOP_TIMER("aes"); + } + for (j = 0; j < 16; j++) { + if (pt[j] != temp[j]) { + av_log(NULL, AV_LOG_ERROR, "%d %d %02X %02X\n", + i, j, pt[j], temp[j]); + } } } } - return 0; + return err; } #endif diff --git a/libavutil/arm/intmath.h b/libavutil/arm/intmath.h index 4130177549..efe3915350 100644 --- a/libavutil/arm/intmath.h +++ b/libavutil/arm/intmath.h @@ -36,6 +36,7 @@ static av_always_inline av_const int FASTDIV(int a, int b) int r; __asm__ ("cmp %2, #2 \n\t" "ldr %0, [%3, %2, lsl #2] \n\t" + "ite le \n\t" "lsrle %0, %1, #1 \n\t" "smmulgt %0, %0, %1 \n\t" : "=&r"(r) : "r"(a), "r"(b), "r"(ff_inverse) : "cc"); @@ -101,6 +102,7 @@ static av_always_inline av_const int32_t av_clipl_int32_arm(int64_t a) { int x, y; __asm__ ("adds %1, %R2, %Q2, lsr #31 \n\t" + "itet ne \n\t" "mvnne %1, #1<<31 \n\t" "moveq %0, %Q2 \n\t" "eorne %0, %1, %R2, asr #31 \n\t" diff --git a/libavutil/attributes.h b/libavutil/attributes.h index 517b129f37..e97fdfd466 100644 --- a/libavutil/attributes.h +++ b/libavutil/attributes.h @@ -127,8 +127,10 @@ #ifdef __GNUC__ # define av_builtin_constant_p __builtin_constant_p +# define av_printf_format(fmtpos, attrpos) __attribute__((__format__(__printf__, fmtpos, attrpos))) #else # define av_builtin_constant_p(x) 0 +# define av_printf_format(fmtpos, attrpos) #endif #endif /* AVUTIL_ATTRIBUTES_H */ diff --git a/libavutil/audioconvert.c b/libavutil/audioconvert.c index 60743de22a..6e8649d56c 100644 --- a/libavutil/audioconvert.c +++ b/libavutil/audioconvert.c @@ -52,9 +52,9 @@ static const struct { { "4.0", 4, AV_CH_LAYOUT_4POINT0 }, { "quad", 4, AV_CH_LAYOUT_QUAD }, { "5.0", 5, AV_CH_LAYOUT_5POINT0 }, - { "5.0", 5, AV_CH_LAYOUT_5POINT0_BACK }, + { "5.0(back)", 5, AV_CH_LAYOUT_5POINT0_BACK }, { "5.1", 6, AV_CH_LAYOUT_5POINT1 }, - { "5.1", 6, AV_CH_LAYOUT_5POINT1_BACK }, + { "5.1(back)", 6, AV_CH_LAYOUT_5POINT1_BACK }, { "5.1+downmix", 8, AV_CH_LAYOUT_5POINT1|AV_CH_LAYOUT_STEREO_DOWNMIX, }, { "7.1", 8, AV_CH_LAYOUT_7POINT1 }, { "7.1(wide)", 8, AV_CH_LAYOUT_7POINT1_WIDE }, diff --git a/libavutil/avstring.h b/libavutil/avstring.h index 04d1197386..662af6b70d 100644 --- a/libavutil/avstring.h +++ b/libavutil/avstring.h @@ -22,6 +22,7 @@ #define AVUTIL_AVSTRING_H #include <stddef.h> +#include "attributes.h" /** * Return non-zero if pfx is a prefix of str. If it is, *ptr is set to @@ -107,7 +108,7 @@ size_t av_strlcat(char *dst, const char *src, size_t size); * @return the length of the string that would have been generated * if enough space had been available */ -size_t av_strlcatf(char *dst, size_t size, const char *fmt, ...); +size_t av_strlcatf(char *dst, size_t size, const char *fmt, ...) av_printf_format(3, 4); /** * Convert a number to a av_malloced string. diff --git a/libavutil/avutil.h b/libavutil/avutil.h index 05b45ce8bf..e73b5bae97 100644 --- a/libavutil/avutil.h +++ b/libavutil/avutil.h @@ -40,7 +40,7 @@ #define AV_VERSION(a, b, c) AV_VERSION_DOT(a, b, c) #define LIBAVUTIL_VERSION_MAJOR 50 -#define LIBAVUTIL_VERSION_MINOR 43 +#define LIBAVUTIL_VERSION_MINOR 44 #define LIBAVUTIL_VERSION_MICRO 0 #define LIBAVUTIL_VERSION_INT AV_VERSION_INT(LIBAVUTIL_VERSION_MAJOR, \ @@ -112,7 +112,8 @@ enum AVMediaType { #define FF_API_OLD_IMAGE_NAMES (LIBAVUTIL_VERSION_MAJOR < 51) #endif enum AVPictureType { - AV_PICTURE_TYPE_I = 1, ///< Intra + AV_PICTURE_TYPE_NONE = 0, ///< Undefined + AV_PICTURE_TYPE_I, ///< Intra AV_PICTURE_TYPE_P, ///< Predicted AV_PICTURE_TYPE_B, ///< Bi-dir predicted AV_PICTURE_TYPE_S, ///< S(GMC)-VOP MPEG4 diff --git a/libavutil/common.h b/libavutil/common.h index 1cd2de2909..d60e8638a5 100644 --- a/libavutil/common.h +++ b/libavutil/common.h @@ -270,16 +270,16 @@ static av_always_inline av_const int av_popcount_c(uint32_t x) }\ }\ -/*! - * \def PUT_UTF8(val, tmp, PUT_BYTE) +/** + * @def PUT_UTF8(val, tmp, PUT_BYTE) * Convert a 32-bit Unicode character to its UTF-8 encoded form (up to 4 bytes long). - * \param val is an input-only argument and should be of type uint32_t. It holds + * @param val is an input-only argument and should be of type uint32_t. It holds * a UCS-4 encoded Unicode character that is to be converted to UTF-8. If * val is given as a function it is executed only once. - * \param tmp is a temporary variable and should be of type uint8_t. It + * @param tmp is a temporary variable and should be of type uint8_t. It * represents an intermediate value during conversion that is to be * output by PUT_BYTE. - * \param PUT_BYTE writes the converted UTF-8 bytes to any proper destination. + * @param PUT_BYTE writes the converted UTF-8 bytes to any proper destination. * It could be a function or a statement, and uses tmp as the input byte. * For example, PUT_BYTE could be "*output++ = tmp;" PUT_BYTE will be * executed up to 4 times for values in the valid UTF-8 range and up to @@ -306,16 +306,16 @@ static av_always_inline av_const int av_popcount_c(uint32_t x) }\ } -/*! - * \def PUT_UTF16(val, tmp, PUT_16BIT) +/** + * @def PUT_UTF16(val, tmp, PUT_16BIT) * Convert a 32-bit Unicode character to its UTF-16 encoded form (2 or 4 bytes). - * \param val is an input-only argument and should be of type uint32_t. It holds + * @param val is an input-only argument and should be of type uint32_t. It holds * a UCS-4 encoded Unicode character that is to be converted to UTF-16. If * val is given as a function it is executed only once. - * \param tmp is a temporary variable and should be of type uint16_t. It + * @param tmp is a temporary variable and should be of type uint16_t. It * represents an intermediate value during conversion that is to be * output by PUT_16BIT. - * \param PUT_16BIT writes the converted UTF-16 data to any proper destination + * @param PUT_16BIT writes the converted UTF-16 data to any proper destination * in desired endianness. It could be a function or a statement, and uses tmp * as the input byte. For example, PUT_BYTE could be "*output++ = tmp;" * PUT_BYTE will be executed 1 or 2 times depending on input character. diff --git a/libavutil/cpu.c b/libavutil/cpu.c index c439a830c5..32a2eb4ed4 100644 --- a/libavutil/cpu.c +++ b/libavutil/cpu.c @@ -44,32 +44,45 @@ int av_get_cpu_flags(void) #undef printf #include <stdio.h> +static const struct { + int flag; + const char *name; +} cpu_flag_tab[] = { +#if ARCH_ARM + { AV_CPU_FLAG_IWMMXT, "iwmmxt" }, +#elif ARCH_PPC + { AV_CPU_FLAG_ALTIVEC, "altivec" }, +#elif ARCH_X86 + { AV_CPU_FLAG_MMX, "mmx" }, + { AV_CPU_FLAG_MMX2, "mmx2" }, + { AV_CPU_FLAG_SSE, "sse" }, + { AV_CPU_FLAG_SSE2, "sse2" }, + { AV_CPU_FLAG_SSE2SLOW, "sse2(slow)" }, + { AV_CPU_FLAG_SSE3, "sse3" }, + { AV_CPU_FLAG_SSE3SLOW, "sse3(slow)" }, + { AV_CPU_FLAG_SSSE3, "ssse3" }, + { AV_CPU_FLAG_ATOM, "atom" }, + { AV_CPU_FLAG_SSE4, "sse4.1" }, + { AV_CPU_FLAG_SSE42, "sse4.2" }, + { AV_CPU_FLAG_AVX, "avx" }, + { AV_CPU_FLAG_3DNOW, "3dnow" }, + { AV_CPU_FLAG_3DNOWEXT, "3dnowext" }, +#endif + { 0 } +}; + int main(void) { int cpu_flags = av_get_cpu_flags(); + int i; printf("cpu_flags = 0x%08X\n", cpu_flags); - printf("cpu_flags = %s%s%s%s%s%s%s%s%s%s%s%s%s\n", -#if ARCH_ARM - cpu_flags & AV_CPU_FLAG_IWMMXT ? "IWMMXT " : "", -#elif ARCH_PPC - cpu_flags & AV_CPU_FLAG_ALTIVEC ? "ALTIVEC " : "", -#elif ARCH_X86 - cpu_flags & AV_CPU_FLAG_MMX ? "MMX " : "", - cpu_flags & AV_CPU_FLAG_MMX2 ? "MMX2 " : "", - cpu_flags & AV_CPU_FLAG_SSE ? "SSE " : "", - cpu_flags & AV_CPU_FLAG_SSE2 ? "SSE2 " : "", - cpu_flags & AV_CPU_FLAG_SSE2SLOW ? "SSE2(slow) " : "", - cpu_flags & AV_CPU_FLAG_SSE3 ? "SSE3 " : "", - cpu_flags & AV_CPU_FLAG_SSE3SLOW ? "SSE3(slow) " : "", - cpu_flags & AV_CPU_FLAG_SSSE3 ? "SSSE3 " : "", - cpu_flags & AV_CPU_FLAG_ATOM ? "Atom " : "", - cpu_flags & AV_CPU_FLAG_SSE4 ? "SSE4.1 " : "", - cpu_flags & AV_CPU_FLAG_SSE42 ? "SSE4.2 " : "", - cpu_flags & AV_CPU_FLAG_AVX ? "AVX " : "", - cpu_flags & AV_CPU_FLAG_3DNOW ? "3DNow " : "", - cpu_flags & AV_CPU_FLAG_3DNOWEXT ? "3DNowExt " : ""); -#endif + printf("cpu_flags ="); + for (i = 0; cpu_flag_tab[i].flag; i++) + if (cpu_flags & cpu_flag_tab[i].flag) + printf(" %s", cpu_flag_tab[i].name); + printf("\n"); + return 0; } diff --git a/libavutil/des.c b/libavutil/des.c index 9c1a530666..f6643696d6 100644 --- a/libavutil/des.c +++ b/libavutil/des.c @@ -39,6 +39,7 @@ static const uint8_t IP_shuffle[] = { }; #undef T +#if CONFIG_SMALL || defined(GENTABLES) #define T(a, b, c, d) 32-a,32-b,32-c,32-d static const uint8_t P_shuffle[] = { T(16, 7, 20, 21), @@ -51,6 +52,7 @@ static const uint8_t P_shuffle[] = { T(22, 11, 4, 25) }; #undef T +#endif #define T(a, b, c, d, e, f, g) 64-a,64-b,64-c,64-d,64-e,64-f,64-g static const uint8_t PC1_shuffle[] = { @@ -240,7 +242,7 @@ static uint32_t f_func(uint32_t r, uint64_t k) { } /** - * \brief rotate the two halves of the expanded 56 bit key each 1 bit left + * @brief rotate the two halves of the expanded 56 bit key each 1 bit left * * Note: the specification calls this "shift", so I kept it although * it is confusing. @@ -297,10 +299,10 @@ int av_des_init(AVDES *d, const uint8_t *key, int key_bits, int decrypt) { } void av_des_crypt(AVDES *d, uint8_t *dst, const uint8_t *src, int count, uint8_t *iv, int decrypt) { - uint64_t iv_val = iv ? av_be2ne64(*(uint64_t *)iv) : 0; + uint64_t iv_val = iv ? AV_RB64(iv) : 0; while (count-- > 0) { uint64_t dst_val; - uint64_t src_val = src ? av_be2ne64(*(const uint64_t *)src) : 0; + uint64_t src_val = src ? AV_RB64(src) : 0; if (decrypt) { uint64_t tmp = src_val; if (d->triple_des) { @@ -317,12 +319,12 @@ void av_des_crypt(AVDES *d, uint8_t *dst, const uint8_t *src, int count, uint8_t } iv_val = iv ? dst_val : 0; } - *(uint64_t *)dst = av_be2ne64(dst_val); + AV_WB64(dst, dst_val); src += 8; dst += 8; } if (iv) - *(uint64_t *)iv = av_be2ne64(iv_val); + AV_WB64(iv, iv_val); } #ifdef TEST @@ -402,7 +404,7 @@ int main(void) { printf("Partial Monte-Carlo test failed\n"); return 1; } - for (i = 0; i < 1000000; i++) { + for (i = 0; i < 1000; i++) { key[0] = rand64(); key[1] = rand64(); key[2] = rand64(); data = rand64(); av_des_init(&d, key, 192, 0); diff --git a/libavutil/des.h b/libavutil/des.h index e80bdd3e69..dd670869b2 100644 --- a/libavutil/des.h +++ b/libavutil/des.h @@ -30,22 +30,22 @@ struct AVDES { }; /** - * \brief Initializes an AVDES context. + * @brief Initializes an AVDES context. * - * \param key_bits must be 64 or 192 - * \param decrypt 0 for encryption, 1 for decryption + * @param key_bits must be 64 or 192 + * @param decrypt 0 for encryption, 1 for decryption */ int av_des_init(struct AVDES *d, const uint8_t *key, int key_bits, int decrypt); /** - * \brief Encrypts / decrypts using the DES algorithm. + * @brief Encrypts / decrypts using the DES algorithm. * - * \param count number of 8 byte blocks - * \param dst destination array, can be equal to src, must be 8-byte aligned - * \param src source array, can be equal to dst, must be 8-byte aligned, may be NULL - * \param iv initialization vector for CBC mode, if NULL then ECB will be used, + * @param count number of 8 byte blocks + * @param dst destination array, can be equal to src, must be 8-byte aligned + * @param src source array, can be equal to dst, must be 8-byte aligned, may be NULL + * @param iv initialization vector for CBC mode, if NULL then ECB will be used, * must be 8-byte aligned - * \param decrypt 0 for encryption, 1 for decryption + * @param decrypt 0 for encryption, 1 for decryption */ void av_des_crypt(struct AVDES *d, uint8_t *dst, const uint8_t *src, int count, uint8_t *iv, int decrypt); diff --git a/libavutil/dict.h b/libavutil/dict.h index 421be32244..84f58ec473 100644 --- a/libavutil/dict.h +++ b/libavutil/dict.h @@ -18,16 +18,55 @@ */ /** - * @file Public dictionary API. + * @file + * Public dictionary API. + * @deprecated + * AVDictionary is provided for compatibility with libav. It is both in + * implementation as well as API inefficient. It does not scale and is + * extremely slow with large dictionaries. + * It is recommended that new code uses our tree container from tree.c/h + * where applicable, which uses AVL trees to achieve O(log n) performance. */ #ifndef AVUTIL_DICT_H #define AVUTIL_DICT_H +/** + * @defgroup dict_api Public Dictionary API + * @{ + * Dictionaries are used for storing key:value pairs. To create + * an AVDictionary, simply pass an address of a NULL pointer to + * av_dict_set(). NULL can be used as an empty dictionary wherever + * a pointer to an AVDictionary is required. + * Use av_dict_get() to retrieve an entry or iterate over all + * entries and finally av_dict_free() to free the dictionary + * and all its contents. + * + * @code + * AVDictionary *d = NULL; // "create" an empty dictionary + * av_dict_set(&d, "foo", "bar", 0); // add an entry + * + * char *k = av_strdup("key"); // if your strings are already allocated, + * char *v = av_strdup("value"); // you can avoid copying them like this + * av_dict_set(&d, k, v, AV_DICT_DONT_STRDUP_KEY | AV_DICT_DONT_STRDUP_VAL); + * + * AVDictionaryEntry *t = NULL; + * while (t = av_dict_get(d, "", t, AV_DICT_IGNORE_SUFFIX)) { + * <....> // iterate over all entries in d + * } + * + * av_dict_free(&d); + * @endcode + * + * @} + */ + #define AV_DICT_MATCH_CASE 1 #define AV_DICT_IGNORE_SUFFIX 2 -#define AV_DICT_DONT_STRDUP_KEY 4 -#define AV_DICT_DONT_STRDUP_VAL 8 +#define AV_DICT_DONT_STRDUP_KEY 4 /**< Take ownership of a key that's been + allocated with av_malloc() and children. */ +#define AV_DICT_DONT_STRDUP_VAL 8 /**< Take ownership of a value that's been + allocated with av_malloc() and chilren. */ #define AV_DICT_DONT_OVERWRITE 16 ///< Don't overwrite existing entries. #define AV_DICT_APPEND 32 /**< If the entry already exists, append to it. Note that no delimiter is added, the strings are simply concatenated. */ @@ -73,7 +112,8 @@ int av_dict_set(AVDictionary **pm, const char *key, const char *value, int flags void av_dict_copy(AVDictionary **dst, AVDictionary *src, int flags); /** - * Free all the memory allocated for an AVDictionary struct. + * Free all the memory allocated for an AVDictionary struct + * and all keys and values. */ void av_dict_free(AVDictionary **m); diff --git a/libavutil/eval.c b/libavutil/eval.c index fa2999b84c..4e2cb1095c 100644 --- a/libavutil/eval.c +++ b/libavutil/eval.c @@ -28,6 +28,7 @@ #include "avutil.h" #include "eval.h" +#include "log.h" typedef struct Parser { const AVClass *class; @@ -471,7 +472,7 @@ int av_expr_parse(AVExpr **expr, const char *s, const char * const *func2_names, double (* const *funcs2)(void *, double, double), int log_offset, void *log_ctx) { - Parser p; + Parser p = { 0 }; AVExpr *e = NULL; char *w = av_malloc(strlen(s) + 1); char *wp = w; @@ -499,6 +500,7 @@ int av_expr_parse(AVExpr **expr, const char *s, if ((ret = parse_expr(&e, &p)) < 0) goto end; if (*p.s) { + av_expr_free(e); av_log(&p, AV_LOG_ERROR, "Invalid chars '%s' at the end of expression '%s'\n", p.s, s0); ret = AVERROR(EINVAL); goto end; @@ -516,7 +518,7 @@ end: double av_expr_eval(AVExpr *e, const double *const_values, void *opaque) { - Parser p; + Parser p = { 0 }; p.const_values = const_values; p.opaque = opaque; @@ -575,6 +577,8 @@ void av_free_expr(AVExpr *e) #ifdef TEST #undef printf +#include <string.h> + static double const_values[] = { M_PI, M_E, @@ -587,7 +591,7 @@ static const char *const_names[] = { 0 }; -int main(void) +int main(int argc, char **argv) { int i; double d; @@ -598,7 +602,7 @@ int main(void) "-PI", "+PI", "1+(5-2)^(3-1)+1/2+sin(PI)-max(-2.2,-3.1)", - "80G/80Gi" + "80G/80Gi", "1k", "1Gi", "1gi", @@ -656,7 +660,11 @@ int main(void) av_expr_parse_and_eval(&d, *expr, const_names, const_values, NULL, NULL, NULL, NULL, NULL, 0, NULL); - printf("'%s' -> %f\n\n", *expr, d); + if(isnan(d)){ + printf("'%s' -> nan\n\n", *expr); + }else{ + printf("'%s' -> %f\n\n", *expr, d); + } } av_expr_parse_and_eval(&d, "1+(5-2)^(3-1)+1/2+sin(PI)-max(-2.2,-3.1)", @@ -668,13 +676,16 @@ int main(void) NULL, NULL, NULL, NULL, NULL, 0, NULL); printf("%f == 0.931322575\n", d); - for (i=0; i<1050; i++) { - START_TIMER + if (argc > 1 && !strcmp(argv[1], "-t")) { + for (i = 0; i < 1050; i++) { + START_TIMER; av_expr_parse_and_eval(&d, "1+(5-2)^(3-1)+1/2+sin(PI)-max(-2.2,-3.1)", const_names, const_values, NULL, NULL, NULL, NULL, NULL, 0, NULL); - STOP_TIMER("av_expr_parse_and_eval") + STOP_TIMER("av_expr_parse_and_eval"); + } } + return 0; } #endif diff --git a/libavutil/file.c b/libavutil/file.c index 0704080c5b..6998c2946f 100644 --- a/libavutil/file.c +++ b/libavutil/file.c @@ -17,6 +17,7 @@ */ #include "file.h" +#include "log.h" #include <fcntl.h> #include <sys/stat.h> #include <unistd.h> diff --git a/libavutil/file.h b/libavutil/file.h index f94d7803f1..f28627c9d8 100644 --- a/libavutil/file.h +++ b/libavutil/file.h @@ -22,7 +22,8 @@ #include "avutil.h" /** - * @file misc file utilities + * @file + * Misc file utilities. */ /** diff --git a/libavutil/imgutils.c b/libavutil/imgutils.c index 57b3103d6a..64b0b6fd5e 100644 --- a/libavutil/imgutils.c +++ b/libavutil/imgutils.c @@ -23,6 +23,7 @@ #include "imgutils.h" #include "internal.h" +#include "log.h" #include "pixdesc.h" void av_image_fill_max_pixsteps(int max_pixsteps[4], int max_pixstep_comps[4], diff --git a/libavutil/imgutils.h b/libavutil/imgutils.h index 150f2ca3af..b3582e5879 100644 --- a/libavutil/imgutils.h +++ b/libavutil/imgutils.h @@ -69,7 +69,7 @@ int av_image_fill_linesizes(int linesizes[4], enum PixelFormat pix_fmt, int widt * * @param data pointers array to be filled with the pointer for each image plane * @param ptr the pointer to a buffer which will contain the image - * @param linesizes[4] the array containing the linesize for each + * @param linesizes the array containing the linesize for each * plane, should be filled by av_image_fill_linesizes() * @return the size in bytes required for the image buffer, a negative * error code in case of failure @@ -106,8 +106,8 @@ void av_image_copy_plane(uint8_t *dst, int dst_linesize, /** * Copy image in src_data to dst_data. * - * @param dst_linesize linesizes for the image in dst_data - * @param src_linesize linesizes for the image in src_data + * @param dst_linesizes linesizes for the image in dst_data + * @param src_linesizes linesizes for the image in src_data */ void av_image_copy(uint8_t *dst_data[4], int dst_linesizes[4], const uint8_t *src_data[4], const int src_linesizes[4], diff --git a/libavutil/lfg.h b/libavutil/lfg.h index 0e89ea308d..854ffce737 100644 --- a/libavutil/lfg.h +++ b/libavutil/lfg.h @@ -55,7 +55,7 @@ static inline unsigned int av_mlfg_get(AVLFG *c){ * Get the next two numbers generated by a Box-Muller Gaussian * generator using the random numbers issued by lfg. * - * @param out[2] array where the two generated numbers are placed + * @param out array where the two generated numbers are placed */ void av_bmg_get(AVLFG *lfg, double out[2]); diff --git a/libavutil/lls.c b/libavutil/lls.c index 3855792760..dcefc2cbad 100644 --- a/libavutil/lls.c +++ b/libavutil/lls.c @@ -30,105 +30,123 @@ #include "lls.h" -void av_init_lls(LLSModel *m, int indep_count){ +void av_init_lls(LLSModel *m, int indep_count) +{ memset(m, 0, sizeof(LLSModel)); - - m->indep_count= indep_count; + m->indep_count = indep_count; } -void av_update_lls(LLSModel *m, double *var, double decay){ - int i,j; +void av_update_lls(LLSModel *m, double *var, double decay) +{ + int i, j; - for(i=0; i<=m->indep_count; i++){ - for(j=i; j<=m->indep_count; j++){ + for (i = 0; i <= m->indep_count; i++) { + for (j = i; j <= m->indep_count; j++) { m->covariance[i][j] *= decay; - m->covariance[i][j] += var[i]*var[j]; + m->covariance[i][j] += var[i] * var[j]; } } } -void av_solve_lls(LLSModel *m, double threshold, int min_order){ - int i,j,k; - double (*factor)[MAX_VARS+1]= (void*)&m->covariance[1][0]; - double (*covar )[MAX_VARS+1]= (void*)&m->covariance[1][1]; - double *covar_y = m->covariance[0]; - int count= m->indep_count; - - for(i=0; i<count; i++){ - for(j=i; j<count; j++){ - double sum= covar[i][j]; - - for(k=i-1; k>=0; k--) - sum -= factor[i][k]*factor[j][k]; - - if(i==j){ - if(sum < threshold) - sum= 1.0; - factor[i][i]= sqrt(sum); - }else - factor[j][i]= sum / factor[i][i]; +void av_solve_lls(LLSModel *m, double threshold, int min_order) +{ + int i, j, k; + double (*factor)[MAX_VARS + 1] = (void *) &m->covariance[1][0]; + double (*covar) [MAX_VARS + 1] = (void *) &m->covariance[1][1]; + double *covar_y = m->covariance[0]; + int count = m->indep_count; + + for (i = 0; i < count; i++) { + for (j = i; j < count; j++) { + double sum = covar[i][j]; + + for (k = i - 1; k >= 0; k--) + sum -= factor[i][k] * factor[j][k]; + + if (i == j) { + if (sum < threshold) + sum = 1.0; + factor[i][i] = sqrt(sum); + } else { + factor[j][i] = sum / factor[i][i]; + } } } - for(i=0; i<count; i++){ - double sum= covar_y[i+1]; - for(k=i-1; k>=0; k--) - sum -= factor[i][k]*m->coeff[0][k]; - m->coeff[0][i]= sum / factor[i][i]; + + for (i = 0; i < count; i++) { + double sum = covar_y[i + 1]; + + for (k = i - 1; k >= 0; k--) + sum -= factor[i][k] * m->coeff[0][k]; + + m->coeff[0][i] = sum / factor[i][i]; } - for(j=count-1; j>=min_order; j--){ - for(i=j; i>=0; i--){ - double sum= m->coeff[0][i]; - for(k=i+1; k<=j; k++) - sum -= factor[k][i]*m->coeff[j][k]; - m->coeff[j][i]= sum / factor[i][i]; + for (j = count - 1; j >= min_order; j--) { + for (i = j; i >= 0; i--) { + double sum = m->coeff[0][i]; + + for (k = i + 1; k <= j; k++) + sum -= factor[k][i] * m->coeff[j][k]; + + m->coeff[j][i] = sum / factor[i][i]; } - m->variance[j]= covar_y[0]; - for(i=0; i<=j; i++){ - double sum= m->coeff[j][i]*covar[i][i] - 2*covar_y[i+1]; - for(k=0; k<i; k++) - sum += 2*m->coeff[j][k]*covar[k][i]; - m->variance[j] += m->coeff[j][i]*sum; + m->variance[j] = covar_y[0]; + + for (i = 0; i <= j; i++) { + double sum = m->coeff[j][i] * covar[i][i] - 2 * covar_y[i + 1]; + + for (k = 0; k < i; k++) + sum += 2 * m->coeff[j][k] * covar[k][i]; + + m->variance[j] += m->coeff[j][i] * sum; } } } -double av_evaluate_lls(LLSModel *m, double *param, int order){ +double av_evaluate_lls(LLSModel *m, double *param, int order) +{ int i; - double out= 0; + double out = 0; - for(i=0; i<=order; i++) - out+= param[i]*m->coeff[order][i]; + for (i = 0; i <= order; i++) + out += param[i] * m->coeff[order][i]; return out; } #ifdef TEST -#include <stdlib.h> #include <stdio.h> +#include <limits.h> +#include "lfg.h" -int main(void){ +int main(void) +{ LLSModel m; int i, order; + AVLFG lfg; + av_lfg_init(&lfg, 1); av_init_lls(&m, 3); - for(i=0; i<100; i++){ + for (i = 0; i < 100; i++) { double var[4]; double eval; - var[0] = (rand() / (double)RAND_MAX - 0.5)*2; - var[1] = var[0] + rand() / (double)RAND_MAX - 0.5; - var[2] = var[1] + rand() / (double)RAND_MAX - 0.5; - var[3] = var[2] + rand() / (double)RAND_MAX - 0.5; + + var[0] = (av_lfg_get(&lfg) / (double) UINT_MAX - 0.5) * 2; + var[1] = var[0] + av_lfg_get(&lfg) / (double) UINT_MAX - 0.5; + var[2] = var[1] + av_lfg_get(&lfg) / (double) UINT_MAX - 0.5; + var[3] = var[2] + av_lfg_get(&lfg) / (double) UINT_MAX - 0.5; av_update_lls(&m, var, 0.99); av_solve_lls(&m, 0.001, 0); - for(order=0; order<3; order++){ - eval= av_evaluate_lls(&m, var+1, order); + for (order = 0; order < 3; order++) { + eval = av_evaluate_lls(&m, var + 1, order); printf("real:%9f order:%d pred:%9f var:%f coeffs:%f %9f %9f\n", - var[0], order, eval, sqrt(m.variance[order] / (i+1)), - m.coeff[order][0], m.coeff[order][1], m.coeff[order][2]); + var[0], order, eval, sqrt(m.variance[order] / (i + 1)), + m.coeff[order][0], m.coeff[order][1], + m.coeff[order][2]); } } return 0; diff --git a/libavutil/log.h b/libavutil/log.h index 53c8aa0f67..046d19920e 100644 --- a/libavutil/log.h +++ b/libavutil/log.h @@ -23,6 +23,7 @@ #include <stdarg.h> #include "avutil.h" +#include "attributes.h" /** * Describe the class of an AVClass context structure. That is an @@ -129,11 +130,7 @@ typedef struct { * subsequent arguments are converted to output. * @see av_vlog */ -#ifdef __GNUC__ -void av_log(void *avcl, int level, const char *fmt, ...) __attribute__ ((__format__ (__printf__, 3, 4))); -#else -void av_log(void *avcl, int level, const char *fmt, ...); -#endif +void av_log(void *avcl, int level, const char *fmt, ...) av_printf_format(3, 4); void av_vlog(void *avcl, int level, const char *fmt, va_list); int av_log_get_level(void); diff --git a/libavutil/lzo.c b/libavutil/lzo.c index 40a41a424d..bac762ecc3 100644 --- a/libavutil/lzo.c +++ b/libavutil/lzo.c @@ -37,8 +37,8 @@ typedef struct LZOContext { } LZOContext; /** - * \brief Reads one byte from the input buffer, avoiding an overrun. - * \return byte read + * @brief Reads one byte from the input buffer, avoiding an overrun. + * @return byte read */ static inline int get_byte(LZOContext *c) { if (c->in < c->in_end) @@ -54,10 +54,10 @@ static inline int get_byte(LZOContext *c) { #endif /** - * \brief Decodes a length value in the coding used by lzo. - * \param x previous byte value - * \param mask bits used from x - * \return decoded length value + * @brief Decodes a length value in the coding used by lzo. + * @param x previous byte value + * @param mask bits used from x + * @return decoded length value */ static inline int get_len(LZOContext *c, int x, int mask) { int cnt = x & mask; @@ -82,8 +82,8 @@ static inline int get_len(LZOContext *c, int x, int mask) { #endif /** - * \brief Copies bytes from input to output buffer with checking. - * \param cnt number of bytes to copy, must be >= 0 + * @brief Copies bytes from input to output buffer with checking. + * @param cnt number of bytes to copy, must be >= 0 */ static inline void copy(LZOContext *c, int cnt) { register const uint8_t *src = c->in; @@ -111,9 +111,9 @@ static inline void copy(LZOContext *c, int cnt) { static inline void memcpy_backptr(uint8_t *dst, int back, int cnt); /** - * \brief Copies previously decoded bytes to current position. - * \param back how many bytes back we start - * \param cnt number of bytes to copy, must be >= 0 + * @brief Copies previously decoded bytes to current position. + * @param back how many bytes back we start + * @param cnt number of bytes to copy, must be >= 0 * * cnt > back is valid, this will copy the bytes we just copied, * thus creating a repeating pattern with a period length of back. diff --git a/libavutil/lzo.h b/libavutil/lzo.h index 6788054bff..ae5fc53c61 100644 --- a/libavutil/lzo.h +++ b/libavutil/lzo.h @@ -24,7 +24,7 @@ #include <stdint.h> -/** \defgroup errflags Error flags returned by av_lzo1x_decode +/** @name Error flags returned by av_lzo1x_decode * \{ */ //! end of the input buffer reached before decoding finished #define AV_LZO_INPUT_DEPLETED 1 @@ -40,12 +40,12 @@ #define AV_LZO_OUTPUT_PADDING 12 /** - * \brief Decodes LZO 1x compressed data. - * \param out output buffer - * \param outlen size of output buffer, number of bytes left are returned here - * \param in input buffer - * \param inlen size of input buffer, number of bytes left are returned here - * \return 0 on success, otherwise a combination of the error flags above + * @brief Decodes LZO 1x compressed data. + * @param out output buffer + * @param outlen size of output buffer, number of bytes left are returned here + * @param in input buffer + * @param inlen size of input buffer, number of bytes left are returned here + * @return 0 on success, otherwise a combination of the error flags above * * Make sure all buffers are appropriately padded, in must provide * AV_LZO_INPUT_PADDING, out must provide AV_LZO_OUTPUT_PADDING additional bytes. @@ -53,10 +53,10 @@ int av_lzo1x_decode(void *out, int *outlen, const void *in, int *inlen); /** - * \brief deliberately overlapping memcpy implementation - * \param dst destination buffer; must be padded with 12 additional bytes - * \param back how many bytes back we start (the initial size of the overlapping window) - * \param cnt number of bytes to copy, must be >= 0 + * @brief deliberately overlapping memcpy implementation + * @param dst destination buffer; must be padded with 12 additional bytes + * @param back how many bytes back we start (the initial size of the overlapping window) + * @param cnt number of bytes to copy, must be >= 0 * * cnt > back is valid, this will copy the bytes we just copied, * thus creating a repeating pattern with a period length of back. diff --git a/libavutil/mathematics.c b/libavutil/mathematics.c index cfe8fbc9eb..180f72e3f0 100644 --- a/libavutil/mathematics.c +++ b/libavutil/mathematics.c @@ -153,32 +153,3 @@ int64_t av_compare_mod(uint64_t a, uint64_t b, uint64_t mod){ c-= mod; return c; } - -#ifdef TEST -#include "integer.h" -#undef printf -int main(void){ - int64_t a,b,c,d,e; - - for(a=7; a<(1LL<<62); a+=a/3+1){ - for(b=3; b<(1LL<<62); b+=b/4+1){ - for(c=9; c<(1LL<<62); c+=(c*2)/5+3){ - int64_t r= c/2; - AVInteger ai; - ai= av_mul_i(av_int2i(a), av_int2i(b)); - ai= av_add_i(ai, av_int2i(r)); - - d= av_i2int(av_div_i(ai, av_int2i(c))); - - e= av_rescale(a,b,c); - - if((double)a * (double)b / (double)c > (1LL<<63)) - continue; - - if(d!=e) printf("%"PRId64"*%"PRId64"/%"PRId64"= %"PRId64"=%"PRId64"\n", a, b, c, d, e); - } - } - } - return 0; -} -#endif diff --git a/libavutil/md5.c b/libavutil/md5.c index 173ed0623b..471a510a73 100644 --- a/libavutil/md5.c +++ b/libavutil/md5.c @@ -30,8 +30,9 @@ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ -#include <string.h> +#include <stdint.h> #include "bswap.h" +#include "intreadwrite.h" #include "md5.h" typedef struct AVMD5{ @@ -40,7 +41,7 @@ typedef struct AVMD5{ uint32_t ABCD[4]; } AVMD5; -const int av_md5_size= sizeof(AVMD5); +const int av_md5_size = sizeof(AVMD5); static const uint8_t S[4][4] = { { 7, 12, 17, 22 }, /* round 1 */ @@ -71,42 +72,49 @@ static const uint32_t T[64] = { // T[i]= fabs(sin(i+1)<<32) 0xf7537e82, 0xbd3af235, 0x2ad7d2bb, 0xeb86d391, }; -#define CORE(i, a, b, c, d) \ - t = S[i>>4][i&3];\ - a += T[i];\ -\ - if(i<32){\ - if(i<16) a += (d ^ (b&(c^d))) + X[ i &15 ];\ - else a += (c ^ (d&(c^b))) + X[ (1+5*i)&15 ];\ - }else{\ - if(i<48) a += (b^c^d) + X[ (5+3*i)&15 ];\ - else a += (c^(b|~d)) + X[ ( 7*i)&15 ];\ - }\ - a = b + (( a << t ) | ( a >> (32 - t) )); - -static void body(uint32_t ABCD[4], uint32_t X[16]){ - +#define CORE(i, a, b, c, d) do { \ + t = S[i >> 4][i & 3]; \ + a += T[i]; \ + \ + if (i < 32) { \ + if (i < 16) a += (d ^ (b & (c ^ d))) + X[ i & 15]; \ + else a += (c ^ (d & (c ^ b))) + X[(1 + 5*i) & 15]; \ + } else { \ + if (i < 48) a += (b ^ c ^ d) + X[(5 + 3*i) & 15]; \ + else a += (c ^ (b | ~d)) + X[( 7*i) & 15]; \ + } \ + a = b + (a << t | a >> (32 - t)); \ + } while (0) + +static void body(uint32_t ABCD[4], uint32_t X[16]) +{ int t; int i av_unused; - unsigned int a= ABCD[3]; - unsigned int b= ABCD[2]; - unsigned int c= ABCD[1]; - unsigned int d= ABCD[0]; + unsigned int a = ABCD[3]; + unsigned int b = ABCD[2]; + unsigned int c = ABCD[1]; + unsigned int d = ABCD[0]; #if HAVE_BIGENDIAN - for(i=0; i<16; i++) - X[i]= av_bswap32(X[i]); + for (i = 0; i < 16; i++) + X[i] = av_bswap32(X[i]); #endif #if CONFIG_SMALL - for( i = 0; i < 64; i++ ){ - CORE(i,a,b,c,d) - t=d; d=c; c=b; b=a; a=t; + for (i = 0; i < 64; i++) { + CORE(i, a, b, c, d); + t = d; + d = c; + c = b; + b = a; + a = t; } #else -#define CORE2(i) CORE(i,a,b,c,d) CORE((i+1),d,a,b,c) CORE((i+2),c,d,a,b) CORE((i+3),b,c,d,a) -#define CORE4(i) CORE2(i) CORE2((i+4)) CORE2((i+8)) CORE2((i+12)) -CORE4(0) CORE4(16) CORE4(32) CORE4(48) +#define CORE2(i) \ + CORE( i, a,b,c,d); CORE((i+1),d,a,b,c); \ + CORE((i+2),c,d,a,b); CORE((i+3),b,c,d,a) +#define CORE4(i) CORE2(i); CORE2((i+4)); CORE2((i+8)); CORE2((i+12)) + CORE4(0); CORE4(16); CORE4(32); CORE4(48); #endif ABCD[0] += d; @@ -115,8 +123,9 @@ CORE4(0) CORE4(16) CORE4(32) CORE4(48) ABCD[3] += a; } -void av_md5_init(AVMD5 *ctx){ - ctx->len = 0; +void av_md5_init(AVMD5 *ctx) +{ + ctx->len = 0; ctx->ABCD[0] = 0x10325476; ctx->ABCD[1] = 0x98badcfe; @@ -124,59 +133,72 @@ void av_md5_init(AVMD5 *ctx){ ctx->ABCD[3] = 0x67452301; } -void av_md5_update(AVMD5 *ctx, const uint8_t *src, const int len){ +void av_md5_update(AVMD5 *ctx, const uint8_t *src, const int len) +{ int i, j; - j= ctx->len & 63; + j = ctx->len & 63; ctx->len += len; - for( i = 0; i < len; i++ ){ + for (i = 0; i < len; i++) { ctx->block[j++] = src[i]; - if( 64 == j ){ - body(ctx->ABCD, (uint32_t*) ctx->block); + if (j == 64) { + body(ctx->ABCD, (uint32_t *) ctx->block); j = 0; } } } -void av_md5_final(AVMD5 *ctx, uint8_t *dst){ +void av_md5_final(AVMD5 *ctx, uint8_t *dst) +{ int i; - uint64_t finalcount= av_le2ne64(ctx->len<<3); + uint64_t finalcount = av_le2ne64(ctx->len << 3); av_md5_update(ctx, "\200", 1); - while((ctx->len & 63)!=56) + while ((ctx->len & 63) != 56) av_md5_update(ctx, "", 1); - av_md5_update(ctx, (uint8_t*)&finalcount, 8); + av_md5_update(ctx, (uint8_t *)&finalcount, 8); - for(i=0; i<4; i++) - ((uint32_t*)dst)[i]= av_le2ne32(ctx->ABCD[3-i]); + for (i = 0; i < 4; i++) + AV_WL32(dst + 4*i, ctx->ABCD[3 - i]); } -void av_md5_sum(uint8_t *dst, const uint8_t *src, const int len){ - AVMD5 ctx[1]; +void av_md5_sum(uint8_t *dst, const uint8_t *src, const int len) +{ + AVMD5 ctx; - av_md5_init(ctx); - av_md5_update(ctx, src, len); - av_md5_final(ctx, dst); + av_md5_init(&ctx); + av_md5_update(&ctx, src, len); + av_md5_final(&ctx, dst); } #ifdef TEST -#include <stdio.h> -#include <inttypes.h> #undef printf +#include <stdio.h> + +static void print_md5(uint8_t *md5) +{ + int i; + for (i = 0; i < 16; i++) + printf("%02x", md5[i]); + printf("\n"); +} + int main(void){ - uint64_t md5val; + uint8_t md5val[16]; int i; uint8_t in[1000]; - for(i=0; i<1000; i++) in[i]= i*i; - av_md5_sum( (uint8_t*)&md5val, in, 1000); printf("%"PRId64"\n", md5val); - av_md5_sum( (uint8_t*)&md5val, in, 63); printf("%"PRId64"\n", md5val); - av_md5_sum( (uint8_t*)&md5val, in, 64); printf("%"PRId64"\n", md5val); - av_md5_sum( (uint8_t*)&md5val, in, 65); printf("%"PRId64"\n", md5val); - for(i=0; i<1000; i++) in[i]= i % 127; - av_md5_sum( (uint8_t*)&md5val, in, 999); printf("%"PRId64"\n", md5val); + for (i = 0; i < 1000; i++) + in[i] = i * i; + av_md5_sum(md5val, in, 1000); print_md5(md5val); + av_md5_sum(md5val, in, 63); print_md5(md5val); + av_md5_sum(md5val, in, 64); print_md5(md5val); + av_md5_sum(md5val, in, 65); print_md5(md5val); + for (i = 0; i < 1000; i++) + in[i] = i % 127; + av_md5_sum(md5val, in, 999); print_md5(md5val); return 0; } diff --git a/libavutil/mem.h b/libavutil/mem.h index b4059dc32c..eefa375086 100644 --- a/libavutil/mem.h +++ b/libavutil/mem.h @@ -84,10 +84,10 @@ void *av_malloc(FF_INTERNAL_MEM_TYPE size) av_malloc_attrib av_alloc_size(1); * Allocate or reallocate a block of memory. * If ptr is NULL and size > 0, allocate a new block. If * size is zero, free the memory block pointed to by ptr. - * @param size Size in bytes for the memory block to be allocated or - * reallocated. * @param ptr Pointer to a memory block already allocated with * av_malloc(z)() or av_realloc() or NULL. + * @param size Size in bytes for the memory block to be allocated or + * reallocated. * @return Pointer to a newly reallocated block or NULL if the block * cannot be reallocated or the function is used to free the memory block. * @see av_fast_realloc() diff --git a/libavutil/opt.c b/libavutil/opt.c index c0b529b8ea..609167ef62 100644 --- a/libavutil/opt.c +++ b/libavutil/opt.c @@ -30,6 +30,7 @@ #include "opt.h" #include "eval.h" #include "dict.h" +#include "log.h" #if FF_API_FIND_OPT //FIXME order them and do a bin search @@ -195,7 +196,6 @@ int av_set_string3(void *obj, const char *name, const char *val, int alloc, cons return 0; notfirst=1; } - return AVERROR(EINVAL); } if (alloc) { @@ -575,8 +575,9 @@ const AVOption *av_opt_find(void *obj, const char *name, const char *unit, return o; while (o = av_next_option(obj, o)) { - if (!strcmp(o->name, name) && (!unit || (o->unit && !strcmp(o->unit, unit))) && - (o->flags & opt_flags) == opt_flags) + if (!strcmp(o->name, name) && (o->flags & opt_flags) == opt_flags && + ((!unit && o->type != FF_OPT_TYPE_CONST) || + (unit && o->unit && !strcmp(o->unit, unit)))) return o; } return NULL; @@ -603,14 +604,14 @@ typedef struct TestContext #define TEST_FLAG_MU 04 static const AVOption test_options[]= { -{"num", "set num", OFFSET(num), FF_OPT_TYPE_INT, 0, 0, 100 }, -{"toggle", "set toggle", OFFSET(toggle), FF_OPT_TYPE_INT, 0, 0, 1 }, -{"rational", "set rational", OFFSET(rational), FF_OPT_TYPE_RATIONAL, 0, 0, 10 }, -{"string", "set string", OFFSET(string), FF_OPT_TYPE_STRING, 0, CHAR_MIN, CHAR_MAX }, -{"flags", "set flags", OFFSET(flags), FF_OPT_TYPE_FLAGS, 0, 0, INT_MAX, 0, "flags" }, -{"cool", "set cool flag ", 0, FF_OPT_TYPE_CONST, TEST_FLAG_COOL, INT_MIN, INT_MAX, 0, "flags" }, -{"lame", "set lame flag ", 0, FF_OPT_TYPE_CONST, TEST_FLAG_LAME, INT_MIN, INT_MAX, 0, "flags" }, -{"mu", "set mu flag ", 0, FF_OPT_TYPE_CONST, TEST_FLAG_MU, INT_MIN, INT_MAX, 0, "flags" }, +{"num", "set num", OFFSET(num), FF_OPT_TYPE_INT, {0}, 0, 100 }, +{"toggle", "set toggle", OFFSET(toggle), FF_OPT_TYPE_INT, {0}, 0, 1 }, +{"rational", "set rational", OFFSET(rational), FF_OPT_TYPE_RATIONAL, {0}, 0, 10 }, +{"string", "set string", OFFSET(string), FF_OPT_TYPE_STRING, {0}, CHAR_MIN, CHAR_MAX }, +{"flags", "set flags", OFFSET(flags), FF_OPT_TYPE_FLAGS, {0}, 0, INT_MAX, 0, "flags" }, +{"cool", "set cool flag ", 0, FF_OPT_TYPE_CONST, {TEST_FLAG_COOL}, INT_MIN, INT_MAX, 0, "flags" }, +{"lame", "set lame flag ", 0, FF_OPT_TYPE_CONST, {TEST_FLAG_LAME}, INT_MIN, INT_MAX, 0, "flags" }, +{"mu", "set mu flag ", 0, FF_OPT_TYPE_CONST, {TEST_FLAG_MU}, INT_MIN, INT_MAX, 0, "flags" }, {NULL}, }; diff --git a/libavutil/parseutils.c b/libavutil/parseutils.c index b2404b5c0f..2000778a32 100644 --- a/libavutil/parseutils.c +++ b/libavutil/parseutils.c @@ -28,6 +28,7 @@ #include "avstring.h" #include "avutil.h" #include "eval.h" +#include "log.h" #include "random_seed.h" #include "parseutils.h" @@ -400,7 +401,16 @@ static int date_get_num(const char **pp, return val; } -/* small strptime for ffmpeg */ +/** + * Parse the input string p according to the format string fmt and + * store its results in the structure dt. + * This implementation supports only a subset of the formats supported + * by the standard strptime(). + * + * @return a pointer to the first character not processed in this + * function call, or NULL in case the function fails to match all of + * the fmt string and therefore an error occurred + */ static const char *small_strptime(const char *p, const char *fmt, struct tm *dt) @@ -462,7 +472,6 @@ const char *small_strptime(const char *p, const char *fmt, p++; } } - return p; } static time_t mktimegm(struct tm *tm) @@ -484,7 +493,7 @@ static time_t mktimegm(struct tm *tm) return t; } -int av_parse_time(int64_t *timeval, const char *datestr, int duration) +int av_parse_time(int64_t *timeval, const char *timestr, int duration) { const char *p; int64_t t; @@ -506,19 +515,19 @@ int av_parse_time(int64_t *timeval, const char *datestr, int duration) #undef time time_t now = time(0); - len = strlen(datestr); + len = strlen(timestr); if (len > 0) - lastch = datestr[len - 1]; + lastch = timestr[len - 1]; else lastch = '\0'; is_utc = (lastch == 'z' || lastch == 'Z'); memset(&dt, 0, sizeof(dt)); - p = datestr; + p = timestr; q = NULL; if (!duration) { - if (!strncasecmp(datestr, "now", len)) { + if (!strncasecmp(timestr, "now", len)) { *timeval = (int64_t) now * 1000000; return 0; } @@ -555,15 +564,15 @@ int av_parse_time(int64_t *timeval, const char *datestr, int duration) } } } else { - /* parse datestr as a duration */ + /* parse timestr as a duration */ if (p[0] == '-') { negative = 1; ++p; } - /* parse datestr as HH:MM:SS */ + /* parse timestr as HH:MM:SS */ q = small_strptime(p, time_fmt[0], &dt); if (!q) { - /* parse datestr as S+ */ + /* parse timestr as S+ */ dt.tm_sec = strtol(p, (char **)&q, 10); if (q == p) { /* the parsing didn't succeed */ diff --git a/libavutil/parseutils.h b/libavutil/parseutils.h index d31212e257..dfaec5eb9b 100644 --- a/libavutil/parseutils.h +++ b/libavutil/parseutils.h @@ -83,7 +83,7 @@ int av_parse_color(uint8_t *rgba_color, const char *color_string, int slen, * January, 1970 up to the time of the parsed date. If timestr cannot * be successfully parsed, set *time to INT64_MIN. - * @param datestr a string representing a date or a duration. + * @param timestr a string representing a date or a duration. * - If a date the syntax is: * @code * [{YYYY-MM-DD|YYYYMMDD}[T|t| ]]{{HH[:MM[:SS[.m...]]]}|{HH[MM[SS[.m...]]]}}[Z] diff --git a/libavutil/pixdesc.c b/libavutil/pixdesc.c index 491955a020..8a19be1702 100644 --- a/libavutil/pixdesc.c +++ b/libavutil/pixdesc.c @@ -918,9 +918,9 @@ const AVPixFmtDescriptor av_pix_fmt_descriptors[PIX_FMT_NB] = { .log2_chroma_w= 0, .log2_chroma_h= 0, .comp = { - {0,1,1,0,9}, /* Y */ - {1,1,1,0,9}, /* U */ - {2,1,1,0,9}, /* V */ + {0,1,1,0,8}, /* Y */ + {1,1,1,0,8}, /* U */ + {2,1,1,0,8}, /* V */ }, .flags = PIX_FMT_BE, }, diff --git a/libavutil/pixfmt.h b/libavutil/pixfmt.h index 0f170d2094..bac7b45a60 100644 --- a/libavutil/pixfmt.h +++ b/libavutil/pixfmt.h @@ -143,13 +143,12 @@ enum PixelFormat { PIX_FMT_YUV420P9LE, ///< planar YUV 4:2:0, 13.5bpp, (1 Cr & Cb sample per 2x2 Y samples), little-endian PIX_FMT_YUV420P10BE,///< planar YUV 4:2:0, 15bpp, (1 Cr & Cb sample per 2x2 Y samples), big-endian PIX_FMT_YUV420P10LE,///< planar YUV 4:2:0, 15bpp, (1 Cr & Cb sample per 2x2 Y samples), little-endian - PIX_FMT_YUV422P10BE,///< planar YUV 4:2:2, 20bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian - PIX_FMT_YUV422P10LE,///< planar YUV 4:2:2, 20bpp, (1 Cr & Cb sample per 2x1 Y samples), big-endian - PIX_FMT_YUV444P9BE, ///< planar YUV 4:4:4, 27bpp, (1 Cr & Cb sample per 1x1 Y samples), little-endian - PIX_FMT_YUV444P9LE, ///< planar YUV 4:4:4, 27bpp, (1 Cr & Cb sample per 1x1 Y samples), big-endian - PIX_FMT_YUV444P10BE,///< planar YUV 4:4:4, 30bpp, (1 Cr & Cb sample per 1x1 Y samples), little-endian - PIX_FMT_YUV444P10LE,///< planar YUV 4:4:4, 30bpp, (1 Cr & Cb sample per 1x1 Y samples), big-endian - + PIX_FMT_YUV422P10BE,///< planar YUV 4:2:2, 20bpp, (1 Cr & Cb sample per 2x1 Y samples), big-endian + PIX_FMT_YUV422P10LE,///< planar YUV 4:2:2, 20bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian + PIX_FMT_YUV444P9BE, ///< planar YUV 4:4:4, 27bpp, (1 Cr & Cb sample per 1x1 Y samples), big-endian + PIX_FMT_YUV444P9LE, ///< planar YUV 4:4:4, 27bpp, (1 Cr & Cb sample per 1x1 Y samples), little-endian + PIX_FMT_YUV444P10BE,///< planar YUV 4:4:4, 30bpp, (1 Cr & Cb sample per 1x1 Y samples), big-endian + PIX_FMT_YUV444P10LE,///< planar YUV 4:4:4, 30bpp, (1 Cr & Cb sample per 1x1 Y samples), little-endian PIX_FMT_NB, ///< number of pixel formats, DO NOT USE THIS if you want to link with shared libav* because the number of formats might differ between versions }; diff --git a/libavutil/random_seed.c b/libavutil/random_seed.c index db1ba39cf2..678cb8dfb4 100644 --- a/libavutil/random_seed.c +++ b/libavutil/random_seed.c @@ -20,10 +20,11 @@ #include <unistd.h> #include <fcntl.h> +#include <math.h> +#include <time.h> #include "timer.h" -#include "time.h" -#include "random_seed.h" #include "avutil.h" +#include "random_seed.h" static int read_random(uint32_t *dst, const char *file) { diff --git a/libavutil/random_seed.h b/libavutil/random_seed.h index 7f75063233..bb957879be 100644 --- a/libavutil/random_seed.h +++ b/libavutil/random_seed.h @@ -25,6 +25,10 @@ /** * Get a seed to use in conjunction with random functions. + * This function tries to provide a good seed at a best effort bases. + * Its possible to call this function multiple times if more bits are needed. + * It can be quite slow, which is why it should only be used as seed for a faster + * PRNG. The quality of the seed depends on the platform. */ uint32_t av_get_random_seed(void); diff --git a/libavutil/rational.c b/libavutil/rational.c index 701f06717e..b1bd655158 100644 --- a/libavutil/rational.c +++ b/libavutil/rational.c @@ -135,7 +135,8 @@ int av_find_nearest_q_idx(AVRational q, const AVRational* q_list) } #ifdef TEST -main(){ +int main(void) +{ AVRational a,b; for(a.num=-2; a.num<=2; a.num++){ for(a.den=-2; a.den<=2; a.den++){ @@ -151,5 +152,6 @@ main(){ } } } + return 0; } #endif diff --git a/libavutil/rc4.h b/libavutil/rc4.h index 07223a5c9e..9362fd8880 100644 --- a/libavutil/rc4.h +++ b/libavutil/rc4.h @@ -29,21 +29,21 @@ struct AVRC4 { }; /** - * \brief Initializes an AVRC4 context. + * @brief Initializes an AVRC4 context. * - * \param key_bits must be a multiple of 8 - * \param decrypt 0 for encryption, 1 for decryption, currently has no effect + * @param key_bits must be a multiple of 8 + * @param decrypt 0 for encryption, 1 for decryption, currently has no effect */ int av_rc4_init(struct AVRC4 *d, const uint8_t *key, int key_bits, int decrypt); /** - * \brief Encrypts / decrypts using the RC4 algorithm. + * @brief Encrypts / decrypts using the RC4 algorithm. * - * \param count number of bytes - * \param dst destination array, can be equal to src - * \param src source array, can be equal to dst, may be NULL - * \param iv not (yet) used for RC4, should be NULL - * \param decrypt 0 for encryption, 1 for decryption, not (yet) used + * @param count number of bytes + * @param dst destination array, can be equal to src + * @param src source array, can be equal to dst, may be NULL + * @param iv not (yet) used for RC4, should be NULL + * @param decrypt 0 for encryption, 1 for decryption, not (yet) used */ void av_rc4_crypt(struct AVRC4 *d, uint8_t *dst, const uint8_t *src, int count, uint8_t *iv, int decrypt); diff --git a/libavutil/samplefmt.c b/libavutil/samplefmt.c index 2df06a619e..6f7070e43c 100644 --- a/libavutil/samplefmt.c +++ b/libavutil/samplefmt.c @@ -85,7 +85,7 @@ int av_samples_fill_arrays(uint8_t *pointers[8], int linesizes[8], enum AVSampleFormat sample_fmt, int planar, int align) { int i, linesize; - int sample_size = av_get_bits_per_sample_fmt(sample_fmt) >> 3; + int sample_size = av_get_bytes_per_sample(sample_fmt); if (nb_channels * (uint64_t)nb_samples * sample_size >= INT_MAX - align*(uint64_t)nb_channels) return AVERROR(EINVAL); diff --git a/libavutil/sha.c b/libavutil/sha.c index 2657f7eb90..a734d90498 100644 --- a/libavutil/sha.c +++ b/libavutil/sha.c @@ -43,7 +43,7 @@ const int av_sha_size = sizeof(AVSHA); #define rol(value, bits) (((value) << (bits)) | ((value) >> (32 - (bits)))) /* (R0+R1), R2, R3, R4 are the different operations used in SHA1 */ -#define blk0(i) (block[i] = av_be2ne32(((const uint32_t*)buffer)[i])) +#define blk0(i) (block[i] = AV_RB32(buffer + 4 * (i))) #define blk(i) (block[i] = rol(block[i-3] ^ block[i-8] ^ block[i-14] ^ block[i-16], 1)) #define R0(v,w,x,y,z,i) z += ((w&(x^y))^y) + blk0(i) + 0x5A827999 + rol(v, 5); w = rol(w, 30); @@ -68,7 +68,7 @@ static void sha1_transform(uint32_t state[5], const uint8_t buffer[64]) for (i = 0; i < 80; i++) { int t; if (i < 16) - t = av_be2ne32(((uint32_t*)buffer)[i]); + t = AV_RB32(buffer + 4 * i); else t = rol(block[i-3] ^ block[i-8] ^ block[i-14] ^ block[i-16], 1); block[i] = t; diff --git a/libavutil/timer.h b/libavutil/timer.h index cd8fba8ded..8a0cad56ca 100644 --- a/libavutil/timer.h +++ b/libavutil/timer.h @@ -1,7 +1,4 @@ -/** - * @file - * high precision timer, useful to profile code - * +/* * copyright (c) 2006 Michael Niedermayer <michaelni@gmx.at> * * This file is part of FFmpeg. @@ -21,6 +18,11 @@ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ +/** + * @file + * high precision timer, useful to profile code + */ + #ifndef AVUTIL_TIMER_H #define AVUTIL_TIMER_H diff --git a/libpostproc/Makefile b/libpostproc/Makefile index bd0b71e3d4..02257420e9 100644 --- a/libpostproc/Makefile +++ b/libpostproc/Makefile @@ -7,4 +7,4 @@ HEADERS = postprocess.h OBJS = postprocess.o -include $(SUBDIR)../subdir.mak +include $(SRC_PATH)/subdir.mak diff --git a/libpostproc/postprocess.c b/libpostproc/postprocess.c index 589c2cead6..944d581a0f 100644 --- a/libpostproc/postprocess.c +++ b/libpostproc/postprocess.c @@ -246,7 +246,6 @@ static inline int isVertDC_C(uint8_t src[], int stride, PPContext *c) static inline int isHorizMinMaxOk_C(uint8_t src[], int stride, int QP) { int i; -#if 1 for(i=0; i<2; i++){ if((unsigned)(src[0] - src[5] + 2*QP) > 4*QP) return 0; src += stride; @@ -257,19 +256,11 @@ static inline int isHorizMinMaxOk_C(uint8_t src[], int stride, int QP) if((unsigned)(src[6] - src[3] + 2*QP) > 4*QP) return 0; src += stride; } -#else - for(i=0; i<8; i++){ - if((unsigned)(src[0] - src[7] + 2*QP) > 4*QP) return 0; - src += stride; - } -#endif return 1; } static inline int isVertMinMaxOk_C(uint8_t src[], int stride, int QP) { -#if 1 -#if 1 int x; src+= stride*4; for(x=0; x<BLOCK_SIZE; x+=4){ @@ -278,30 +269,7 @@ static inline int isVertMinMaxOk_C(uint8_t src[], int stride, int QP) if((unsigned)(src[2+x + 4*stride] - src[2+x + 1*stride] + 2*QP) > 4*QP) return 0; if((unsigned)(src[3+x + 6*stride] - src[3+x + 3*stride] + 2*QP) > 4*QP) return 0; } -#else - int x; - src+= stride*3; - for(x=0; x<BLOCK_SIZE; x++){ - if((unsigned)(src[x + stride] - src[x + (stride<<3)] + 2*QP) > 4*QP) return 0; - } -#endif - return 1; -#else - int x; - src+= stride*4; - for(x=0; x<BLOCK_SIZE; x++){ - int min=255; - int max=0; - int y; - for(y=0; y<8; y++){ - int v= src[x + y*stride]; - if(v>max) max=v; - if(v<min) min=v; - } - if(max-min > 2*QP) return 0; - } return 1; -#endif } static inline int horizClassify_C(uint8_t src[], int stride, PPContext *c) diff --git a/libpostproc/postprocess_internal.h b/libpostproc/postprocess_internal.h index 3bc51c49a4..fd0c7c1374 100644 --- a/libpostproc/postprocess_internal.h +++ b/libpostproc/postprocess_internal.h @@ -28,6 +28,7 @@ #include <string.h> #include "libavutil/avutil.h" +#include "libavutil/log.h" #include "postprocess.h" #define V_DEBLOCK 0x01 diff --git a/libswscale/Makefile b/libswscale/Makefile index 8bb06baae2..97206200fb 100644 --- a/libswscale/Makefile +++ b/libswscale/Makefile @@ -24,4 +24,4 @@ TESTPROGS = colorspace swscale DIRS = bfin mlib ppc sparc x86 -include $(SUBDIR)../subdir.mak +include $(SRC_PATH)/subdir.mak diff --git a/libswscale/bfin/swscale_bfin.c b/libswscale/bfin/swscale_bfin.c index 4b26ba67c2..870636ea05 100644 --- a/libswscale/bfin/swscale_bfin.c +++ b/libswscale/bfin/swscale_bfin.c @@ -78,8 +78,6 @@ static int yuyvtoyv12_unscaled(SwsContext *c, uint8_t* src[], int srcStride[], i void ff_bfin_get_unscaled_swscale(SwsContext *c) { - SwsFunc swScale = c->swScale; - if (c->dstFormat == PIX_FMT_YUV420P && c->srcFormat == PIX_FMT_UYVY422) { av_log (NULL, AV_LOG_VERBOSE, "selecting Blackfin optimized uyvytoyv12_unscaled\n"); c->swScale = uyvytoyv12_unscaled; diff --git a/libswscale/ppc/swscale_altivec.c b/libswscale/ppc/swscale_altivec.c index 197000beb9..8bc0ddd9d8 100644 --- a/libswscale/ppc/swscale_altivec.c +++ b/libswscale/ppc/swscale_altivec.c @@ -98,10 +98,9 @@ yuv2yuvX_altivec_real(SwsContext *c, int lumFilterSize, const int16_t *chrFilter, const int16_t **chrUSrc, const int16_t **chrVSrc, int chrFilterSize, const int16_t **alpSrc, - uint8_t *dest, uint8_t *uDest, - uint8_t *vDest, uint8_t *aDest, - int dstW, int chrDstW) + uint8_t *dest[4], int dstW, int chrDstW) { + uint8_t *yDest = dest[0], *uDest = dest[1], *vDest = dest[2]; const vector signed int vini = {(1 << 18), (1 << 18), (1 << 18), (1 << 18)}; register int i, j; { @@ -150,7 +149,7 @@ yuv2yuvX_altivec_real(SwsContext *c, val[i] += lumSrc[j][i] * lumFilter[j]; } } - altivec_packIntArrayToCharArray(val, dest, dstW); + altivec_packIntArrayToCharArray(val, yDest, dstW); } if (uDest != 0) { DECLARE_ALIGNED(16, int, u)[chrDstW]; @@ -221,9 +220,8 @@ yuv2yuvX_altivec_real(SwsContext *c, } } -static void hScale_altivec_real(int16_t *dst, int dstW, - const uint8_t *src, int srcW, - int xInc, const int16_t *filter, +static void hScale_altivec_real(SwsContext *c, int16_t *dst, int dstW, + const uint8_t *src, const int16_t *filter, const int16_t *filterPos, int filterSize) { register int i; @@ -408,17 +406,25 @@ void ff_sws_init_swScale_altivec(SwsContext *c) if (!(av_get_cpu_flags() & AV_CPU_FLAG_ALTIVEC)) return; - c->hScale = hScale_altivec_real; - if (!is16BPS(dstFormat) && !is9_OR_10BPS(dstFormat)) { + if (c->scalingBpp == 8) { + c->hScale = hScale_altivec_real; + } + if (!is16BPS(dstFormat) && !is9_OR_10BPS(dstFormat) && + dstFormat != PIX_FMT_NV12 && dstFormat != PIX_FMT_NV21 && + !c->alpPixBuf) { c->yuv2yuvX = yuv2yuvX_altivec_real; } /* The following list of supported dstFormat values should * match what's found in the body of ff_yuv2packedX_altivec() */ - if (!(c->flags & (SWS_BITEXACT | SWS_FULL_CHR_H_INT)) && !c->alpPixBuf && - (c->dstFormat==PIX_FMT_ABGR || c->dstFormat==PIX_FMT_BGRA || - c->dstFormat==PIX_FMT_BGR24 || c->dstFormat==PIX_FMT_RGB24 || - c->dstFormat==PIX_FMT_RGBA || c->dstFormat==PIX_FMT_ARGB)) { - c->yuv2packedX = ff_yuv2packedX_altivec; + if (!(c->flags & (SWS_BITEXACT | SWS_FULL_CHR_H_INT)) && !c->alpPixBuf) { + switch (c->dstFormat) { + case PIX_FMT_ABGR: c->yuv2packedX = ff_yuv2abgr_X_altivec; break; + case PIX_FMT_BGRA: c->yuv2packedX = ff_yuv2bgra_X_altivec; break; + case PIX_FMT_ARGB: c->yuv2packedX = ff_yuv2argb_X_altivec; break; + case PIX_FMT_RGBA: c->yuv2packedX = ff_yuv2rgba_X_altivec; break; + case PIX_FMT_BGR24: c->yuv2packedX = ff_yuv2bgr24_X_altivec; break; + case PIX_FMT_RGB24: c->yuv2packedX = ff_yuv2rgb24_X_altivec; break; } + } } diff --git a/libswscale/ppc/yuv2rgb_altivec.c b/libswscale/ppc/yuv2rgb_altivec.c index e13702b100..8e84c26382 100644 --- a/libswscale/ppc/yuv2rgb_altivec.c +++ b/libswscale/ppc/yuv2rgb_altivec.c @@ -627,13 +627,13 @@ void ff_yuv2rgb_init_tables_altivec(SwsContext *c, const int inv_table[4], int b } -void +static av_always_inline void ff_yuv2packedX_altivec(SwsContext *c, const int16_t *lumFilter, const int16_t **lumSrc, int lumFilterSize, const int16_t *chrFilter, const int16_t **chrUSrc, const int16_t **chrVSrc, int chrFilterSize, const int16_t **alpSrc, uint8_t *dest, - int dstW, int dstY) + int dstW, int dstY, enum PixelFormat target) { int i,j; vector signed short X,X0,X1,Y0,U0,V0,Y1,U1,V1,U,V; @@ -707,7 +707,7 @@ ff_yuv2packedX_altivec(SwsContext *c, const int16_t *lumFilter, G = vec_packclp (G0,G1); B = vec_packclp (B0,B1); - switch(c->dstFormat) { + switch(target) { case PIX_FMT_ABGR: out_abgr (R,G,B,out); break; case PIX_FMT_BGRA: out_bgra (R,G,B,out); break; case PIX_FMT_RGBA: out_rgba (R,G,B,out); break; @@ -786,7 +786,7 @@ ff_yuv2packedX_altivec(SwsContext *c, const int16_t *lumFilter, B = vec_packclp (B0,B1); nout = (vector unsigned char *)scratch; - switch(c->dstFormat) { + switch(target) { case PIX_FMT_ABGR: out_abgr (R,G,B,nout); break; case PIX_FMT_BGRA: out_bgra (R,G,B,nout); break; case PIX_FMT_RGBA: out_rgba (R,G,B,nout); break; @@ -804,3 +804,23 @@ ff_yuv2packedX_altivec(SwsContext *c, const int16_t *lumFilter, } } + +#define YUV2PACKEDX_WRAPPER(suffix, pixfmt) \ +void ff_yuv2 ## suffix ## _X_altivec(SwsContext *c, const int16_t *lumFilter, \ + const int16_t **lumSrc, int lumFilterSize, \ + const int16_t *chrFilter, const int16_t **chrUSrc, \ + const int16_t **chrVSrc, int chrFilterSize, \ + const int16_t **alpSrc, uint8_t *dest, \ + int dstW, int dstY) \ +{ \ + ff_yuv2packedX_altivec(c, lumFilter, lumSrc, lumFilterSize, \ + chrFilter, chrUSrc, chrVSrc, chrFilterSize, \ + alpSrc, dest, dstW, dstY, pixfmt); \ +} + +YUV2PACKEDX_WRAPPER(abgr, PIX_FMT_ABGR); +YUV2PACKEDX_WRAPPER(bgra, PIX_FMT_BGRA); +YUV2PACKEDX_WRAPPER(argb, PIX_FMT_ARGB); +YUV2PACKEDX_WRAPPER(rgba, PIX_FMT_RGBA); +YUV2PACKEDX_WRAPPER(rgb24, PIX_FMT_RGB24); +YUV2PACKEDX_WRAPPER(bgr24, PIX_FMT_BGR24); diff --git a/libswscale/ppc/yuv2rgb_altivec.h b/libswscale/ppc/yuv2rgb_altivec.h index 15385b1d3b..163eba6eb7 100644 --- a/libswscale/ppc/yuv2rgb_altivec.h +++ b/libswscale/ppc/yuv2rgb_altivec.h @@ -24,11 +24,19 @@ #ifndef PPC_YUV2RGB_ALTIVEC_H #define PPC_YUV2RGB_ALTIVEC_H 1 -void ff_yuv2packedX_altivec(SwsContext *c, const int16_t *lumFilter, - const int16_t **lumSrc, int lumFilterSize, - const int16_t *chrFilter, const int16_t **chrUSrc, - const int16_t **chrVSrc, int chrFilterSize, - const int16_t **alpSrc, uint8_t *dest, +#define YUV2PACKEDX_HEADER(suffix) \ +void ff_yuv2 ## suffix ## _X_altivec(SwsContext *c, const int16_t *lumFilter, \ + const int16_t **lumSrc, int lumFilterSize, \ + const int16_t *chrFilter, const int16_t **chrUSrc, \ + const int16_t **chrVSrc, int chrFilterSize, \ + const int16_t **alpSrc, uint8_t *dest, \ int dstW, int dstY); +YUV2PACKEDX_HEADER(abgr); +YUV2PACKEDX_HEADER(bgra); +YUV2PACKEDX_HEADER(argb); +YUV2PACKEDX_HEADER(rgba); +YUV2PACKEDX_HEADER(rgb24); +YUV2PACKEDX_HEADER(bgr24); + #endif /* PPC_YUV2RGB_ALTIVEC_H */ diff --git a/libswscale/swscale.c b/libswscale/swscale.c index abbe375685..9d17868019 100644 --- a/libswscale/swscale.c +++ b/libswscale/swscale.c @@ -80,17 +80,6 @@ untested special converters #define RV ( (int)(0.500*224/255*(1<<RGB2YUV_SHIFT)+0.5)) #define RU (-(int)(0.169*224/255*(1<<RGB2YUV_SHIFT)+0.5)) -static const double rgb2yuv_table[8][9]={ - {0.7152, 0.0722, 0.2126, -0.386, 0.5, -0.115, -0.454, -0.046, 0.5}, //ITU709 - {0.7152, 0.0722, 0.2126, -0.386, 0.5, -0.115, -0.454, -0.046, 0.5}, //ITU709 - {0.587 , 0.114 , 0.299 , -0.331, 0.5, -0.169, -0.419, -0.081, 0.5}, //DEFAULT / ITU601 / ITU624 / SMPTE 170M - {0.587 , 0.114 , 0.299 , -0.331, 0.5, -0.169, -0.419, -0.081, 0.5}, //DEFAULT / ITU601 / ITU624 / SMPTE 170M - {0.59 , 0.11 , 0.30 , -0.331, 0.5, -0.169, -0.421, -0.079, 0.5}, //FCC - {0.587 , 0.114 , 0.299 , -0.331, 0.5, -0.169, -0.419, -0.081, 0.5}, //DEFAULT / ITU601 / ITU624 / SMPTE 170M - {0.587 , 0.114 , 0.299 , -0.331, 0.5, -0.169, -0.419, -0.081, 0.5}, //DEFAULT / ITU601 / ITU624 / SMPTE 170M - {0.701 , 0.087 , 0.212 , -0.384, 0.5, -0.116, -0.445, -0.055, 0.5}, //SMPTE 240M -}; - /* NOTES Special versions: fast Y 1:1 scaling (no interpolation in y direction) @@ -193,6 +182,18 @@ DECLARE_ALIGNED(8, const uint8_t, dither_8x8_220)[8][8]={ { 77, 23, 60, 15, 72, 21, 56, 14, }, }; #endif +DECLARE_ALIGNED(8, const uint8_t, dither_8x8_128)[8][8] = { +{ 36, 68, 60, 92, 34, 66, 58, 90,}, +{ 100, 4,124, 28, 98, 2,122, 26,}, +{ 52, 84, 44, 76, 50, 82, 42, 74,}, +{ 116, 20,108, 12,114, 18,106, 10,}, +{ 32, 64, 56, 88, 38, 70, 62, 94,}, +{ 96, 0,120, 24,102, 6,126, 30,}, +{ 48, 80, 40, 72, 54, 86, 46, 78,}, +{ 112, 16,104, 8,118, 22,110, 14,}, +}; +DECLARE_ALIGNED(8, const uint8_t, ff_sws_pb_64)[8] = +{ 64, 64, 64, 64, 64, 64, 64, 64 }; DECLARE_ALIGNED(8, const uint8_t, dithers)[8][8][8]={ { @@ -290,17 +291,19 @@ const uint16_t dither_scale[15][16]={ }; static av_always_inline void -yuv2yuvX16_c_template(const int16_t *lumFilter, const int16_t **lumSrc, +yuv2yuvX16_c_template(const int16_t *lumFilter, const int32_t **lumSrc, int lumFilterSize, const int16_t *chrFilter, - const int16_t **chrUSrc, const int16_t **chrVSrc, - int chrFilterSize, const int16_t **alpSrc, - uint16_t *dest, uint16_t *uDest, uint16_t *vDest, - uint16_t *aDest, int dstW, int chrDstW, + const int32_t **chrUSrc, const int32_t **chrVSrc, + int chrFilterSize, const int32_t **alpSrc, + uint16_t *dest[4], int dstW, int chrDstW, int big_endian, int output_bits) { //FIXME Optimize (just quickly written not optimized..) int i; - int shift = 11 + 16 - output_bits; + int dword= output_bits == 16; + uint16_t *yDest = dest[0], *uDest = dest[1], *vDest = dest[2], + *aDest = CONFIG_SWSCALE_ALPHA ? dest[3] : NULL; + int shift = 11 + 4*dword + 16 - output_bits - 1; #define output_pixel(pos, val) \ if (big_endian) { \ @@ -317,24 +320,24 @@ yuv2yuvX16_c_template(const int16_t *lumFilter, const int16_t **lumSrc, } \ } for (i = 0; i < dstW; i++) { - int val = 1 << (26-output_bits); + int val = 1 << (26-output_bits + 4*dword - 1); int j; for (j = 0; j < lumFilterSize; j++) - val += lumSrc[j][i] * lumFilter[j]; + val += ((dword ? lumSrc[j][i] : ((int16_t**)lumSrc)[j][i]) * lumFilter[j])>>1; - output_pixel(&dest[i], val); + output_pixel(&yDest[i], val); } if (uDest) { for (i = 0; i < chrDstW; i++) { - int u = 1 << (26-output_bits); - int v = 1 << (26-output_bits); + int u = 1 << (26-output_bits + 4*dword - 1); + int v = 1 << (26-output_bits + 4*dword - 1); int j; for (j = 0; j < chrFilterSize; j++) { - u += chrUSrc[j][i] * chrFilter[j]; - v += chrVSrc[j][i] * chrFilter[j]; + u += ((dword ? chrUSrc[j][i] : ((int16_t**)chrUSrc)[j][i]) * chrFilter[j]) >> 1; + v += ((dword ? chrVSrc[j][i] : ((int16_t**)chrVSrc)[j][i]) * chrFilter[j]) >> 1; } output_pixel(&uDest[i], u); @@ -344,11 +347,11 @@ yuv2yuvX16_c_template(const int16_t *lumFilter, const int16_t **lumSrc, if (CONFIG_SWSCALE_ALPHA && aDest) { for (i = 0; i < dstW; i++) { - int val = 1 << (26-output_bits); + int val = 1 << (26-output_bits + 4*dword - 1); int j; for (j = 0; j < lumFilterSize; j++) - val += alpSrc[j][i] * lumFilter[j]; + val += ((dword ? alpSrc[j][i] : ((int16_t**)alpSrc)[j][i]) * lumFilter[j]) >> 1; output_pixel(&aDest[i], val); } @@ -358,19 +361,19 @@ yuv2yuvX16_c_template(const int16_t *lumFilter, const int16_t **lumSrc, #define yuv2NBPS(bits, BE_LE, is_be) \ static void yuv2yuvX ## bits ## BE_LE ## _c(SwsContext *c, const int16_t *lumFilter, \ - const int16_t **lumSrc, int lumFilterSize, \ - const int16_t *chrFilter, const int16_t **chrUSrc, \ - const int16_t **chrVSrc, \ - int chrFilterSize, const int16_t **alpSrc, \ - uint8_t *_dest, uint8_t *_uDest, uint8_t *_vDest, \ - uint8_t *_aDest, int dstW, int chrDstW) \ + const int16_t **_lumSrc, int lumFilterSize, \ + const int16_t *chrFilter, const int16_t **_chrUSrc, \ + const int16_t **_chrVSrc, \ + int chrFilterSize, const int16_t **_alpSrc, \ + uint8_t *_dest[4], int dstW, int chrDstW) \ { \ - uint16_t *dest = (uint16_t *) _dest, *uDest = (uint16_t *) _uDest, \ - *vDest = (uint16_t *) _vDest, *aDest = (uint16_t *) _aDest; \ + const int32_t **lumSrc = (const int32_t **) _lumSrc, \ + **chrUSrc = (const int32_t **) _chrUSrc, \ + **chrVSrc = (const int32_t **) _chrVSrc, \ + **alpSrc = (const int32_t **) _alpSrc; \ yuv2yuvX16_c_template(lumFilter, lumSrc, lumFilterSize, \ chrFilter, chrUSrc, chrVSrc, chrFilterSize, \ - alpSrc, \ - dest, uDest, vDest, aDest, \ + alpSrc, (uint16_t **) _dest, \ dstW, chrDstW, is_be, bits); \ } yuv2NBPS( 9, BE, 1); @@ -385,25 +388,27 @@ static void yuv2yuvX_c(SwsContext *c, const int16_t *lumFilter, const int16_t *chrFilter, const int16_t **chrUSrc, const int16_t **chrVSrc, int chrFilterSize, const int16_t **alpSrc, - uint8_t *dest, uint8_t *uDest, uint8_t *vDest, - uint8_t *aDest, int dstW, int chrDstW, - const uint8_t *lumDither, const uint8_t *chrDither) + uint8_t *dest[4], int dstW, int chrDstW) { - //FIXME Optimize (just quickly written not optimized..) + uint8_t *yDest = dest[0], *uDest = dest[1], *vDest = dest[2], + *aDest = CONFIG_SWSCALE_ALPHA ? dest[3] : NULL; int i; + const uint8_t *lumDither = c->lumDither8, *chrDither = c->chrDither8; + + //FIXME Optimize (just quickly written not optimized..) for (i=0; i<dstW; i++) { - int val = lumDither[i&7] << 12; + int val = lumDither[i & 7] << 12; int j; for (j=0; j<lumFilterSize; j++) val += lumSrc[j][i] * lumFilter[j]; - dest[i]= av_clip_uint8(val>>19); + yDest[i]= av_clip_uint8(val>>19); } if (uDest) for (i=0; i<chrDstW; i++) { - int u = chrDither[i&7] << 12; - int v = chrDither[(i+3)&7] << 12; + int u = chrDither[i & 7] << 12; + int v = chrDither[(i + 3) & 7] << 12; int j; for (j=0; j<chrFilterSize; j++) { u += chrUSrc[j][i] * chrFilter[j]; @@ -416,7 +421,7 @@ static void yuv2yuvX_c(SwsContext *c, const int16_t *lumFilter, if (CONFIG_SWSCALE_ALPHA && aDest) for (i=0; i<dstW; i++) { - int val = lumDither[i&7] << 12; + int val = lumDither[i & 7] << 12; int j; for (j=0; j<lumFilterSize; j++) val += alpSrc[j][i] * lumFilter[j]; @@ -426,29 +431,31 @@ static void yuv2yuvX_c(SwsContext *c, const int16_t *lumFilter, } static void yuv2yuv1_c(SwsContext *c, const int16_t *lumSrc, - const int16_t *chrUSrc, const int16_t *chrVSrc, - const int16_t *alpSrc, - uint8_t *dest, uint8_t *uDest, uint8_t *vDest, - uint8_t *aDest, int dstW, int chrDstW, const uint8_t *lumDither, const uint8_t *chrDither) + const int16_t *chrUSrc, const int16_t *chrVSrc, + const int16_t *alpSrc, + uint8_t *dest[4], int dstW, int chrDstW) { + uint8_t *yDest = dest[0], *uDest = dest[1], *vDest = dest[2], + *aDest = CONFIG_SWSCALE_ALPHA ? dest[3] : NULL; int i; + const uint8_t *lumDither = c->lumDither8, *chrDither = c->chrDither8; for (i=0; i<dstW; i++) { - int val= (lumSrc[i]+lumDither[i&7])>>7; - dest[i]= av_clip_uint8(val); + int val = (lumSrc[i]+ lumDither[i & 7]) >> 7; + yDest[i]= av_clip_uint8(val); } if (uDest) for (i=0; i<chrDstW; i++) { - int u=(chrUSrc[i]+chrDither[i&7])>>7; - int v=(chrVSrc[i]+chrDither[(i+3)&7])>>7; + int u = (chrUSrc[i] + chrDither[i & 7]) >> 7; + int v = (chrVSrc[i] + chrDither[(i + 3) & 7]) >> 7; uDest[i]= av_clip_uint8(u); vDest[i]= av_clip_uint8(v); } if (CONFIG_SWSCALE_ALPHA && aDest) for (i=0; i<dstW; i++) { - int val= (alpSrc[i]+lumDither[i&7])>>7; + int val = (alpSrc[i] + lumDither[i & 7]) >> 7; aDest[i]= av_clip_uint8(val); } } @@ -457,22 +464,22 @@ static void yuv2nv12X_c(SwsContext *c, const int16_t *lumFilter, const int16_t **lumSrc, int lumFilterSize, const int16_t *chrFilter, const int16_t **chrUSrc, const int16_t **chrVSrc, int chrFilterSize, - const int16_t **alpSrc, uint8_t *dest, uint8_t *uDest, - uint8_t *vDest, uint8_t *aDest, - int dstW, int chrDstW, - const uint8_t *lumDither, const uint8_t *chrDither) + const int16_t **alpSrc, uint8_t *dest[4], + int dstW, int chrDstW) { + uint8_t *yDest = dest[0], *uDest = dest[1]; enum PixelFormat dstFormat = c->dstFormat; + const uint8_t *lumDither = c->lumDither8, *chrDither = c->chrDither8; //FIXME Optimize (just quickly written not optimized..) int i; for (i=0; i<dstW; i++) { - int val = lumDither[i&7]<<12; + int val = lumDither[i & 7] << 12; int j; for (j=0; j<lumFilterSize; j++) val += lumSrc[j][i] * lumFilter[j]; - dest[i]= av_clip_uint8(val>>19); + yDest[i]= av_clip_uint8(val>>19); } if (!uDest) @@ -480,8 +487,8 @@ static void yuv2nv12X_c(SwsContext *c, const int16_t *lumFilter, if (dstFormat == PIX_FMT_NV12) for (i=0; i<chrDstW; i++) { - int u = chrDither[i&7]<<12; - int v = chrDither[(i+3)&7]<<12; + int u = chrDither[i & 7] << 12; + int v = chrDither[(i + 3) & 7] << 12; int j; for (j=0; j<chrFilterSize; j++) { u += chrUSrc[j][i] * chrFilter[j]; @@ -493,8 +500,8 @@ static void yuv2nv12X_c(SwsContext *c, const int16_t *lumFilter, } else for (i=0; i<chrDstW; i++) { - int u = chrDither[i&7]<<12; - int v = chrDither[(i+3)&7]<<12; + int u = chrDither[i & 7] << 12; + int v = chrDither[(i + 3) & 7] << 12; int j; for (j=0; j<chrFilterSize; j++) { u += chrUSrc[j][i] * chrFilter[j]; @@ -515,118 +522,121 @@ static void yuv2nv12X_c(SwsContext *c, const int16_t *lumFilter, static av_always_inline void yuv2gray16_X_c_template(SwsContext *c, const int16_t *lumFilter, - const int16_t **lumSrc, int lumFilterSize, - const int16_t *chrFilter, const int16_t **chrUSrc, - const int16_t **chrVSrc, int chrFilterSize, - const int16_t **alpSrc, uint8_t *dest, int dstW, + const int32_t **lumSrc, int lumFilterSize, + const int16_t *chrFilter, const int32_t **chrUSrc, + const int32_t **chrVSrc, int chrFilterSize, + const int32_t **alpSrc, uint16_t *dest, int dstW, int y, enum PixelFormat target) { int i; for (i = 0; i < (dstW >> 1); i++) { int j; - int Y1 = 1 << 18; - int Y2 = 1 << 18; - const int i2 = 2 * i; + int Y1 = 1 << 14; + int Y2 = 1 << 14; for (j = 0; j < lumFilterSize; j++) { - Y1 += lumSrc[j][i2] * lumFilter[j]; - Y2 += lumSrc[j][i2+1] * lumFilter[j]; + Y1 += lumSrc[j][i * 2] * lumFilter[j]; + Y2 += lumSrc[j][i * 2 + 1] * lumFilter[j]; } - Y1 >>= 11; - Y2 >>= 11; + Y1 >>= 15; + Y2 >>= 15; if ((Y1 | Y2) & 0x10000) { Y1 = av_clip_uint16(Y1); Y2 = av_clip_uint16(Y2); } - output_pixel(&dest[2 * i2 + 0], Y1); - output_pixel(&dest[2 * i2 + 2], Y2); + output_pixel(&dest[i * 2 + 0], Y1); + output_pixel(&dest[i * 2 + 1], Y2); } } static av_always_inline void -yuv2gray16_2_c_template(SwsContext *c, const uint16_t *buf0, - const uint16_t *buf1, const uint16_t *ubuf0, - const uint16_t *ubuf1, const uint16_t *vbuf0, - const uint16_t *vbuf1, const uint16_t *abuf0, - const uint16_t *abuf1, uint8_t *dest, int dstW, +yuv2gray16_2_c_template(SwsContext *c, const int32_t *buf[2], + const int32_t *ubuf[2], const int32_t *vbuf[2], + const int32_t *abuf[2], uint16_t *dest, int dstW, int yalpha, int uvalpha, int y, enum PixelFormat target) { - int yalpha1 = 4095 - yalpha; \ + int yalpha1 = 4095 - yalpha; int i; + const int32_t *buf0 = buf[0], *buf1 = buf[1]; for (i = 0; i < (dstW >> 1); i++) { - const int i2 = 2 * i; - int Y1 = (buf0[i2 ] * yalpha1 + buf1[i2 ] * yalpha) >> 11; - int Y2 = (buf0[i2+1] * yalpha1 + buf1[i2+1] * yalpha) >> 11; + int Y1 = (buf0[i * 2 ] * yalpha1 + buf1[i * 2 ] * yalpha) >> 15; + int Y2 = (buf0[i * 2 + 1] * yalpha1 + buf1[i * 2 + 1] * yalpha) >> 15; - output_pixel(&dest[2 * i2 + 0], Y1); - output_pixel(&dest[2 * i2 + 2], Y2); + output_pixel(&dest[i * 2 + 0], Y1); + output_pixel(&dest[i * 2 + 1], Y2); } } static av_always_inline void -yuv2gray16_1_c_template(SwsContext *c, const uint16_t *buf0, - const uint16_t *ubuf0, const uint16_t *ubuf1, - const uint16_t *vbuf0, const uint16_t *vbuf1, - const uint16_t *abuf0, uint8_t *dest, int dstW, - int uvalpha, enum PixelFormat dstFormat, - int flags, int y, enum PixelFormat target) +yuv2gray16_1_c_template(SwsContext *c, const int32_t *buf0, + const int32_t *ubuf[2], const int32_t *vbuf[2], + const int32_t *abuf0, uint16_t *dest, int dstW, + int uvalpha, int y, enum PixelFormat target) { int i; for (i = 0; i < (dstW >> 1); i++) { - const int i2 = 2 * i; - int Y1 = buf0[i2 ] << 1; - int Y2 = buf0[i2+1] << 1; + int Y1 = (buf0[i * 2 ]+4)>>3; + int Y2 = (buf0[i * 2 + 1]+4)>>3; - output_pixel(&dest[2 * i2 + 0], Y1); - output_pixel(&dest[2 * i2 + 2], Y2); + output_pixel(&dest[i * 2 + 0], Y1); + output_pixel(&dest[i * 2 + 1], Y2); } } #undef output_pixel -#define YUV2PACKEDWRAPPER(name, base, ext, fmt) \ +#define YUV2PACKED16WRAPPER(name, base, ext, fmt) \ static void name ## ext ## _X_c(SwsContext *c, const int16_t *lumFilter, \ - const int16_t **lumSrc, int lumFilterSize, \ - const int16_t *chrFilter, const int16_t **chrUSrc, \ - const int16_t **chrVSrc, int chrFilterSize, \ - const int16_t **alpSrc, uint8_t *dest, int dstW, \ + const int16_t **_lumSrc, int lumFilterSize, \ + const int16_t *chrFilter, const int16_t **_chrUSrc, \ + const int16_t **_chrVSrc, int chrFilterSize, \ + const int16_t **_alpSrc, uint8_t *_dest, int dstW, \ int y) \ { \ + const int32_t **lumSrc = (const int32_t **) _lumSrc, \ + **chrUSrc = (const int32_t **) _chrUSrc, \ + **chrVSrc = (const int32_t **) _chrVSrc, \ + **alpSrc = (const int32_t **) _alpSrc; \ + uint16_t *dest = (uint16_t *) _dest; \ name ## base ## _X_c_template(c, lumFilter, lumSrc, lumFilterSize, \ chrFilter, chrUSrc, chrVSrc, chrFilterSize, \ alpSrc, dest, dstW, y, fmt); \ } \ \ -static void name ## ext ## _2_c(SwsContext *c, const uint16_t *buf0, \ - const uint16_t *buf1, const uint16_t *ubuf0, \ - const uint16_t *ubuf1, const uint16_t *vbuf0, \ - const uint16_t *vbuf1, const uint16_t *abuf0, \ - const uint16_t *abuf1, uint8_t *dest, int dstW, \ +static void name ## ext ## _2_c(SwsContext *c, const int16_t *_buf[2], \ + const int16_t *_ubuf[2], const int16_t *_vbuf[2], \ + const int16_t *_abuf[2], uint8_t *_dest, int dstW, \ int yalpha, int uvalpha, int y) \ { \ - name ## base ## _2_c_template(c, buf0, buf1, ubuf0, ubuf1, \ - vbuf0, vbuf1, abuf0, abuf1, \ + const int32_t **buf = (const int32_t **) _buf, \ + **ubuf = (const int32_t **) _ubuf, \ + **vbuf = (const int32_t **) _vbuf, \ + **abuf = (const int32_t **) _abuf; \ + uint16_t *dest = (uint16_t *) _dest; \ + name ## base ## _2_c_template(c, buf, ubuf, vbuf, abuf, \ dest, dstW, yalpha, uvalpha, y, fmt); \ } \ \ -static void name ## ext ## _1_c(SwsContext *c, const uint16_t *buf0, \ - const uint16_t *ubuf0, const uint16_t *ubuf1, \ - const uint16_t *vbuf0, const uint16_t *vbuf1, \ - const uint16_t *abuf0, uint8_t *dest, int dstW, \ - int uvalpha, enum PixelFormat dstFormat, \ - int flags, int y) \ +static void name ## ext ## _1_c(SwsContext *c, const int16_t *_buf0, \ + const int16_t *_ubuf[2], const int16_t *_vbuf[2], \ + const int16_t *_abuf0, uint8_t *_dest, int dstW, \ + int uvalpha, int y) \ { \ - name ## base ## _1_c_template(c, buf0, ubuf0, ubuf1, vbuf0, \ - vbuf1, abuf0, dest, dstW, uvalpha, \ - dstFormat, flags, y, fmt); \ + const int32_t *buf0 = (const int32_t *) _buf0, \ + **ubuf = (const int32_t **) _ubuf, \ + **vbuf = (const int32_t **) _vbuf, \ + *abuf0 = (const int32_t *) _abuf0; \ + uint16_t *dest = (uint16_t *) _dest; \ + name ## base ## _1_c_template(c, buf0, ubuf, vbuf, abuf0, dest, \ + dstW, uvalpha, y, fmt); \ } -YUV2PACKEDWRAPPER(yuv2gray16,, LE, PIX_FMT_GRAY16LE); -YUV2PACKEDWRAPPER(yuv2gray16,, BE, PIX_FMT_GRAY16BE); +YUV2PACKED16WRAPPER(yuv2gray16,, LE, PIX_FMT_GRAY16LE); +YUV2PACKED16WRAPPER(yuv2gray16,, BE, PIX_FMT_GRAY16BE); #define output_pixel(pos, acc) \ if (target == PIX_FMT_MONOBLACK) { \ @@ -672,14 +682,13 @@ yuv2mono_X_c_template(SwsContext *c, const int16_t *lumFilter, } static av_always_inline void -yuv2mono_2_c_template(SwsContext *c, const uint16_t *buf0, - const uint16_t *buf1, const uint16_t *ubuf0, - const uint16_t *ubuf1, const uint16_t *vbuf0, - const uint16_t *vbuf1, const uint16_t *abuf0, - const uint16_t *abuf1, uint8_t *dest, int dstW, +yuv2mono_2_c_template(SwsContext *c, const int16_t *buf[2], + const int16_t *ubuf[2], const int16_t *vbuf[2], + const int16_t *abuf[2], uint8_t *dest, int dstW, int yalpha, int uvalpha, int y, enum PixelFormat target) { + const int16_t *buf0 = buf[0], *buf1 = buf[1]; const uint8_t * const d128 = dither_8x8_220[y & 7]; uint8_t *g = c->table_gU[128] + c->table_gV[128]; int yalpha1 = 4095 - yalpha; @@ -699,12 +708,10 @@ yuv2mono_2_c_template(SwsContext *c, const uint16_t *buf0, } static av_always_inline void -yuv2mono_1_c_template(SwsContext *c, const uint16_t *buf0, - const uint16_t *ubuf0, const uint16_t *ubuf1, - const uint16_t *vbuf0, const uint16_t *vbuf1, - const uint16_t *abuf0, uint8_t *dest, int dstW, - int uvalpha, enum PixelFormat dstFormat, - int flags, int y, enum PixelFormat target) +yuv2mono_1_c_template(SwsContext *c, const int16_t *buf0, + const int16_t *ubuf[2], const int16_t *vbuf[2], + const int16_t *abuf0, uint8_t *dest, int dstW, + int uvalpha, int y, enum PixelFormat target) { const uint8_t * const d128 = dither_8x8_220[y & 7]; uint8_t *g = c->table_gU[128] + c->table_gV[128]; @@ -725,6 +732,38 @@ yuv2mono_1_c_template(SwsContext *c, const uint16_t *buf0, #undef output_pixel +#define YUV2PACKEDWRAPPER(name, base, ext, fmt) \ +static void name ## ext ## _X_c(SwsContext *c, const int16_t *lumFilter, \ + const int16_t **lumSrc, int lumFilterSize, \ + const int16_t *chrFilter, const int16_t **chrUSrc, \ + const int16_t **chrVSrc, int chrFilterSize, \ + const int16_t **alpSrc, uint8_t *dest, int dstW, \ + int y) \ +{ \ + name ## base ## _X_c_template(c, lumFilter, lumSrc, lumFilterSize, \ + chrFilter, chrUSrc, chrVSrc, chrFilterSize, \ + alpSrc, dest, dstW, y, fmt); \ +} \ + \ +static void name ## ext ## _2_c(SwsContext *c, const int16_t *buf[2], \ + const int16_t *ubuf[2], const int16_t *vbuf[2], \ + const int16_t *abuf[2], uint8_t *dest, int dstW, \ + int yalpha, int uvalpha, int y) \ +{ \ + name ## base ## _2_c_template(c, buf, ubuf, vbuf, abuf, \ + dest, dstW, yalpha, uvalpha, y, fmt); \ +} \ + \ +static void name ## ext ## _1_c(SwsContext *c, const int16_t *buf0, \ + const int16_t *ubuf[2], const int16_t *vbuf[2], \ + const int16_t *abuf0, uint8_t *dest, int dstW, \ + int uvalpha, int y) \ +{ \ + name ## base ## _1_c_template(c, buf0, ubuf, vbuf, \ + abuf0, dest, dstW, uvalpha, \ + y, fmt); \ +} + YUV2PACKEDWRAPPER(yuv2mono,, white, PIX_FMT_MONOWHITE); YUV2PACKEDWRAPPER(yuv2mono,, black, PIX_FMT_MONOBLACK); @@ -781,14 +820,15 @@ yuv2422_X_c_template(SwsContext *c, const int16_t *lumFilter, } static av_always_inline void -yuv2422_2_c_template(SwsContext *c, const uint16_t *buf0, - const uint16_t *buf1, const uint16_t *ubuf0, - const uint16_t *ubuf1, const uint16_t *vbuf0, - const uint16_t *vbuf1, const uint16_t *abuf0, - const uint16_t *abuf1, uint8_t *dest, int dstW, +yuv2422_2_c_template(SwsContext *c, const int16_t *buf[2], + const int16_t *ubuf[2], const int16_t *vbuf[2], + const int16_t *abuf[2], uint8_t *dest, int dstW, int yalpha, int uvalpha, int y, enum PixelFormat target) { + const int16_t *buf0 = buf[0], *buf1 = buf[1], + *ubuf0 = ubuf[0], *ubuf1 = ubuf[1], + *vbuf0 = vbuf[0], *vbuf1 = vbuf[1]; int yalpha1 = 4095 - yalpha; int uvalpha1 = 4095 - uvalpha; int i; @@ -804,13 +844,13 @@ yuv2422_2_c_template(SwsContext *c, const uint16_t *buf0, } static av_always_inline void -yuv2422_1_c_template(SwsContext *c, const uint16_t *buf0, - const uint16_t *ubuf0, const uint16_t *ubuf1, - const uint16_t *vbuf0, const uint16_t *vbuf1, - const uint16_t *abuf0, uint8_t *dest, int dstW, - int uvalpha, enum PixelFormat dstFormat, - int flags, int y, enum PixelFormat target) +yuv2422_1_c_template(SwsContext *c, const int16_t *buf0, + const int16_t *ubuf[2], const int16_t *vbuf[2], + const int16_t *abuf0, uint8_t *dest, int dstW, + int uvalpha, int y, enum PixelFormat target) { + const int16_t *ubuf0 = ubuf[0], *ubuf1 = ubuf[1], + *vbuf0 = vbuf[0], *vbuf1 = vbuf[1]; int i; if (uvalpha < 2048) { @@ -839,26 +879,321 @@ yuv2422_1_c_template(SwsContext *c, const uint16_t *buf0, YUV2PACKEDWRAPPER(yuv2, 422, yuyv422, PIX_FMT_YUYV422); YUV2PACKEDWRAPPER(yuv2, 422, uyvy422, PIX_FMT_UYVY422); -#define r_b ((target == PIX_FMT_RGB48LE || target == PIX_FMT_RGB48BE) ? r : b) -#define b_r ((target == PIX_FMT_RGB48LE || target == PIX_FMT_RGB48BE) ? b : r) +#define R_B ((target == PIX_FMT_RGB48LE || target == PIX_FMT_RGB48BE) ? R : B) +#define B_R ((target == PIX_FMT_RGB48LE || target == PIX_FMT_RGB48BE) ? B : R) +#define output_pixel(pos, val) \ + if (isBE(target)) { \ + AV_WB16(pos, val); \ + } else { \ + AV_WL16(pos, val); \ + } static av_always_inline void yuv2rgb48_X_c_template(SwsContext *c, const int16_t *lumFilter, - const int16_t **lumSrc, int lumFilterSize, - const int16_t *chrFilter, const int16_t **chrUSrc, - const int16_t **chrVSrc, int chrFilterSize, - const int16_t **alpSrc, uint8_t *dest, int dstW, + const int32_t **lumSrc, int lumFilterSize, + const int16_t *chrFilter, const int32_t **chrUSrc, + const int32_t **chrVSrc, int chrFilterSize, + const int32_t **alpSrc, uint16_t *dest, int dstW, int y, enum PixelFormat target) { int i; for (i = 0; i < (dstW >> 1); i++) { int j; + int Y1 = 0; + int Y2 = 0; + int U = -128 << 23; // 19 + int V = -128 << 23; + int R, G, B; + + for (j = 0; j < lumFilterSize; j++) { + Y1 += lumSrc[j][i * 2] * lumFilter[j]; + Y2 += lumSrc[j][i * 2 + 1] * lumFilter[j]; + } + for (j = 0; j < chrFilterSize; j++) { + U += chrUSrc[j][i] * chrFilter[j]; + V += chrVSrc[j][i] * chrFilter[j]; + } + + // 8bit: 12+15=27; 16-bit: 12+19=31 + Y1 >>= 14; // 10 + Y2 >>= 14; + U >>= 14; + V >>= 14; + + // 8bit: 27 -> 17bit, 16bit: 31 - 14 = 17bit + Y1 -= c->yuv2rgb_y_offset; + Y2 -= c->yuv2rgb_y_offset; + Y1 *= c->yuv2rgb_y_coeff; + Y2 *= c->yuv2rgb_y_coeff; + Y1 += 1 << 13; // 21 + Y2 += 1 << 13; + // 8bit: 17 + 13bit = 30bit, 16bit: 17 + 13bit = 30bit + + R = V * c->yuv2rgb_v2r_coeff; + G = V * c->yuv2rgb_v2g_coeff + U * c->yuv2rgb_u2g_coeff; + B = U * c->yuv2rgb_u2b_coeff; + + // 8bit: 30 - 22 = 8bit, 16bit: 30bit - 14 = 16bit + output_pixel(&dest[0], av_clip_uintp2(R_B + Y1, 30) >> 14); + output_pixel(&dest[1], av_clip_uintp2( G + Y1, 30) >> 14); + output_pixel(&dest[2], av_clip_uintp2(B_R + Y1, 30) >> 14); + output_pixel(&dest[3], av_clip_uintp2(R_B + Y2, 30) >> 14); + output_pixel(&dest[4], av_clip_uintp2( G + Y2, 30) >> 14); + output_pixel(&dest[5], av_clip_uintp2(B_R + Y2, 30) >> 14); + dest += 6; + } +} + +static av_always_inline void +yuv2rgb48_2_c_template(SwsContext *c, const int32_t *buf[2], + const int32_t *ubuf[2], const int32_t *vbuf[2], + const int32_t *abuf[2], uint16_t *dest, int dstW, + int yalpha, int uvalpha, int y, + enum PixelFormat target) +{ + const int32_t *buf0 = buf[0], *buf1 = buf[1], + *ubuf0 = ubuf[0], *ubuf1 = ubuf[1], + *vbuf0 = vbuf[0], *vbuf1 = vbuf[1]; + int yalpha1 = 4095 - yalpha; + int uvalpha1 = 4095 - uvalpha; + int i; + + for (i = 0; i < (dstW >> 1); i++) { + int Y1 = (buf0[i * 2] * yalpha1 + buf1[i * 2] * yalpha) >> 14; + int Y2 = (buf0[i * 2 + 1] * yalpha1 + buf1[i * 2 + 1] * yalpha) >> 14; + int U = (ubuf0[i] * uvalpha1 + ubuf1[i] * uvalpha + (-128 << 23)) >> 14; + int V = (vbuf0[i] * uvalpha1 + vbuf1[i] * uvalpha + (-128 << 23)) >> 14; + int R, G, B; + + Y1 -= c->yuv2rgb_y_offset; + Y2 -= c->yuv2rgb_y_offset; + Y1 *= c->yuv2rgb_y_coeff; + Y2 *= c->yuv2rgb_y_coeff; + Y1 += 1 << 13; + Y2 += 1 << 13; + + R = V * c->yuv2rgb_v2r_coeff; + G = V * c->yuv2rgb_v2g_coeff + U * c->yuv2rgb_u2g_coeff; + B = U * c->yuv2rgb_u2b_coeff; + + output_pixel(&dest[0], av_clip_uintp2(R_B + Y1, 30) >> 14); + output_pixel(&dest[1], av_clip_uintp2( G + Y1, 30) >> 14); + output_pixel(&dest[2], av_clip_uintp2(B_R + Y1, 30) >> 14); + output_pixel(&dest[3], av_clip_uintp2(R_B + Y2, 30) >> 14); + output_pixel(&dest[4], av_clip_uintp2( G + Y2, 30) >> 14); + output_pixel(&dest[5], av_clip_uintp2(B_R + Y2, 30) >> 14); + dest += 6; + } +} + +static av_always_inline void +yuv2rgb48_1_c_template(SwsContext *c, const int32_t *buf0, + const int32_t *ubuf[2], const int32_t *vbuf[2], + const int32_t *abuf0, uint16_t *dest, int dstW, + int uvalpha, int y, enum PixelFormat target) +{ + const int32_t *ubuf0 = ubuf[0], *ubuf1 = ubuf[1], + *vbuf0 = vbuf[0], *vbuf1 = vbuf[1]; + int i; + + if (uvalpha < 2048) { + for (i = 0; i < (dstW >> 1); i++) { + int Y1 = (buf0[i * 2] ) >> 2; + int Y2 = (buf0[i * 2 + 1]) >> 2; + int U = (ubuf0[i] + (-128 << 11)) >> 2; + int V = (vbuf0[i] + (-128 << 11)) >> 2; + int R, G, B; + + Y1 -= c->yuv2rgb_y_offset; + Y2 -= c->yuv2rgb_y_offset; + Y1 *= c->yuv2rgb_y_coeff; + Y2 *= c->yuv2rgb_y_coeff; + Y1 += 1 << 13; + Y2 += 1 << 13; + + R = V * c->yuv2rgb_v2r_coeff; + G = V * c->yuv2rgb_v2g_coeff + U * c->yuv2rgb_u2g_coeff; + B = U * c->yuv2rgb_u2b_coeff; + + output_pixel(&dest[0], av_clip_uintp2(R_B + Y1, 30) >> 14); + output_pixel(&dest[1], av_clip_uintp2( G + Y1, 30) >> 14); + output_pixel(&dest[2], av_clip_uintp2(B_R + Y1, 30) >> 14); + output_pixel(&dest[3], av_clip_uintp2(R_B + Y2, 30) >> 14); + output_pixel(&dest[4], av_clip_uintp2( G + Y2, 30) >> 14); + output_pixel(&dest[5], av_clip_uintp2(B_R + Y2, 30) >> 14); + dest += 6; + } + } else { + for (i = 0; i < (dstW >> 1); i++) { + int Y1 = (buf0[i * 2] ) >> 2; + int Y2 = (buf0[i * 2 + 1]) >> 2; + int U = (ubuf0[i] + ubuf1[i] + (-128 << 12)) >> 3; + int V = (vbuf0[i] + vbuf1[i] + (-128 << 12)) >> 3; + int R, G, B; + + Y1 -= c->yuv2rgb_y_offset; + Y2 -= c->yuv2rgb_y_offset; + Y1 *= c->yuv2rgb_y_coeff; + Y2 *= c->yuv2rgb_y_coeff; + Y1 += 1 << 13; + Y2 += 1 << 13; + + R = V * c->yuv2rgb_v2r_coeff; + G = V * c->yuv2rgb_v2g_coeff + U * c->yuv2rgb_u2g_coeff; + B = U * c->yuv2rgb_u2b_coeff; + + output_pixel(&dest[0], av_clip_uintp2(R_B + Y1, 30) >> 14); + output_pixel(&dest[1], av_clip_uintp2( G + Y1, 30) >> 14); + output_pixel(&dest[2], av_clip_uintp2(B_R + Y1, 30) >> 14); + output_pixel(&dest[3], av_clip_uintp2(R_B + Y2, 30) >> 14); + output_pixel(&dest[4], av_clip_uintp2( G + Y2, 30) >> 14); + output_pixel(&dest[5], av_clip_uintp2(B_R + Y2, 30) >> 14); + dest += 6; + } + } +} + +#undef output_pixel +#undef r_b +#undef b_r + +YUV2PACKED16WRAPPER(yuv2, rgb48, rgb48be, PIX_FMT_RGB48BE); +YUV2PACKED16WRAPPER(yuv2, rgb48, rgb48le, PIX_FMT_RGB48LE); +YUV2PACKED16WRAPPER(yuv2, rgb48, bgr48be, PIX_FMT_BGR48BE); +YUV2PACKED16WRAPPER(yuv2, rgb48, bgr48le, PIX_FMT_BGR48LE); + +static av_always_inline void +yuv2rgb_write(uint8_t *_dest, int i, int Y1, int Y2, + int U, int V, int A1, int A2, + const void *_r, const void *_g, const void *_b, int y, + enum PixelFormat target, int hasAlpha) +{ + if (target == PIX_FMT_ARGB || target == PIX_FMT_RGBA || + target == PIX_FMT_ABGR || target == PIX_FMT_BGRA) { + uint32_t *dest = (uint32_t *) _dest; + const uint32_t *r = (const uint32_t *) _r; + const uint32_t *g = (const uint32_t *) _g; + const uint32_t *b = (const uint32_t *) _b; + +#if CONFIG_SMALL + int sh = hasAlpha ? ((target == PIX_FMT_RGB32_1 || target == PIX_FMT_BGR32_1) ? 0 : 24) : 0; + + dest[i * 2 + 0] = r[Y1] + g[Y1] + b[Y1] + (hasAlpha ? A1 << sh : 0); + dest[i * 2 + 1] = r[Y2] + g[Y2] + b[Y2] + (hasAlpha ? A2 << sh : 0); +#else + if (hasAlpha) { + int sh = (target == PIX_FMT_RGB32_1 || target == PIX_FMT_BGR32_1) ? 0 : 24; + + dest[i * 2 + 0] = r[Y1] + g[Y1] + b[Y1] + (A1 << sh); + dest[i * 2 + 1] = r[Y2] + g[Y2] + b[Y2] + (A2 << sh); + } else { + dest[i * 2 + 0] = r[Y1] + g[Y1] + b[Y1]; + dest[i * 2 + 1] = r[Y2] + g[Y2] + b[Y2]; + } +#endif + } else if (target == PIX_FMT_RGB24 || target == PIX_FMT_BGR24) { + uint8_t *dest = (uint8_t *) _dest; + const uint8_t *r = (const uint8_t *) _r; + const uint8_t *g = (const uint8_t *) _g; + const uint8_t *b = (const uint8_t *) _b; + +#define r_b ((target == PIX_FMT_RGB24) ? r : b) +#define b_r ((target == PIX_FMT_RGB24) ? b : r) + + dest[i * 6 + 0] = r_b[Y1]; + dest[i * 6 + 1] = g[Y1]; + dest[i * 6 + 2] = b_r[Y1]; + dest[i * 6 + 3] = r_b[Y2]; + dest[i * 6 + 4] = g[Y2]; + dest[i * 6 + 5] = b_r[Y2]; +#undef r_b +#undef b_r + } else if (target == PIX_FMT_RGB565 || target == PIX_FMT_BGR565 || + target == PIX_FMT_RGB555 || target == PIX_FMT_BGR555 || + target == PIX_FMT_RGB444 || target == PIX_FMT_BGR444) { + uint16_t *dest = (uint16_t *) _dest; + const uint16_t *r = (const uint16_t *) _r; + const uint16_t *g = (const uint16_t *) _g; + const uint16_t *b = (const uint16_t *) _b; + int dr1, dg1, db1, dr2, dg2, db2; + + if (target == PIX_FMT_RGB565 || target == PIX_FMT_BGR565) { + dr1 = dither_2x2_8[ y & 1 ][0]; + dg1 = dither_2x2_4[ y & 1 ][0]; + db1 = dither_2x2_8[(y & 1) ^ 1][0]; + dr2 = dither_2x2_8[ y & 1 ][1]; + dg2 = dither_2x2_4[ y & 1 ][1]; + db2 = dither_2x2_8[(y & 1) ^ 1][1]; + } else if (target == PIX_FMT_RGB555 || target == PIX_FMT_BGR555) { + dr1 = dither_2x2_8[ y & 1 ][0]; + dg1 = dither_2x2_8[ y & 1 ][1]; + db1 = dither_2x2_8[(y & 1) ^ 1][0]; + dr2 = dither_2x2_8[ y & 1 ][1]; + dg2 = dither_2x2_8[ y & 1 ][0]; + db2 = dither_2x2_8[(y & 1) ^ 1][1]; + } else { + dr1 = dither_4x4_16[ y & 3 ][0]; + dg1 = dither_4x4_16[ y & 3 ][1]; + db1 = dither_4x4_16[(y & 3) ^ 3][0]; + dr2 = dither_4x4_16[ y & 3 ][1]; + dg2 = dither_4x4_16[ y & 3 ][0]; + db2 = dither_4x4_16[(y & 3) ^ 3][1]; + } + + dest[i * 2 + 0] = r[Y1 + dr1] + g[Y1 + dg1] + b[Y1 + db1]; + dest[i * 2 + 1] = r[Y2 + dr2] + g[Y2 + dg2] + b[Y2 + db2]; + } else /* 8/4-bit */ { + uint8_t *dest = (uint8_t *) _dest; + const uint8_t *r = (const uint8_t *) _r; + const uint8_t *g = (const uint8_t *) _g; + const uint8_t *b = (const uint8_t *) _b; + int dr1, dg1, db1, dr2, dg2, db2; + + if (target == PIX_FMT_RGB8 || target == PIX_FMT_BGR8) { + const uint8_t * const d64 = dither_8x8_73[y & 7]; + const uint8_t * const d32 = dither_8x8_32[y & 7]; + dr1 = dg1 = d32[(i * 2 + 0) & 7]; + db1 = d64[(i * 2 + 0) & 7]; + dr2 = dg2 = d32[(i * 2 + 1) & 7]; + db2 = d64[(i * 2 + 1) & 7]; + } else { + const uint8_t * const d64 = dither_8x8_73 [y & 7]; + const uint8_t * const d128 = dither_8x8_220[y & 7]; + dr1 = db1 = d128[(i * 2 + 0) & 7]; + dg1 = d64[(i * 2 + 0) & 7]; + dr2 = db2 = d128[(i * 2 + 1) & 7]; + dg2 = d64[(i * 2 + 1) & 7]; + } + + if (target == PIX_FMT_RGB4 || target == PIX_FMT_BGR4) { + dest[i] = r[Y1 + dr1] + g[Y1 + dg1] + b[Y1 + db1] + + ((r[Y2 + dr2] + g[Y2 + dg2] + b[Y2 + db2]) << 4); + } else { + dest[i * 2 + 0] = r[Y1 + dr1] + g[Y1 + dg1] + b[Y1 + db1]; + dest[i * 2 + 1] = r[Y2 + dr2] + g[Y2 + dg2] + b[Y2 + db2]; + } + } +} + +static av_always_inline void +yuv2rgb_X_c_template(SwsContext *c, const int16_t *lumFilter, + const int16_t **lumSrc, int lumFilterSize, + const int16_t *chrFilter, const int16_t **chrUSrc, + const int16_t **chrVSrc, int chrFilterSize, + const int16_t **alpSrc, uint8_t *dest, int dstW, + int y, enum PixelFormat target, int hasAlpha) +{ + int i; + + for (i = 0; i < (dstW >> 1); i++) { + int j; int Y1 = 1 << 18; int Y2 = 1 << 18; int U = 1 << 18; int V = 1 << 18; - const uint8_t *r, *g, *b; + int av_unused A1, A2; + const void *r, *g, *b; for (j = 0; j < lumFilterSize; j++) { Y1 += lumSrc[j][i * 2] * lumFilter[j]; @@ -878,31 +1213,43 @@ yuv2rgb48_X_c_template(SwsContext *c, const int16_t *lumFilter, U = av_clip_uint8(U); V = av_clip_uint8(V); } + if (hasAlpha) { + A1 = 1 << 18; + A2 = 1 << 18; + for (j = 0; j < lumFilterSize; j++) { + A1 += alpSrc[j][i * 2 ] * lumFilter[j]; + A2 += alpSrc[j][i * 2 + 1] * lumFilter[j]; + } + A1 >>= 19; + A2 >>= 19; + if ((A1 | A2) & 0x100) { + A1 = av_clip_uint8(A1); + A2 = av_clip_uint8(A2); + } + } /* FIXME fix tables so that clipping is not needed and then use _NOCLIP*/ - r = (const uint8_t *) c->table_rV[V]; - g = (const uint8_t *)(c->table_gU[U] + c->table_gV[V]); - b = (const uint8_t *) c->table_bU[U]; + r = c->table_rV[V]; + g = (c->table_gU[U] + c->table_gV[V]); + b = c->table_bU[U]; - dest[ 0] = dest[ 1] = r_b[Y1]; - dest[ 2] = dest[ 3] = g[Y1]; - dest[ 4] = dest[ 5] = b_r[Y1]; - dest[ 6] = dest[ 7] = r_b[Y2]; - dest[ 8] = dest[ 9] = g[Y2]; - dest[10] = dest[11] = b_r[Y2]; - dest += 12; + yuv2rgb_write(dest, i, Y1, Y2, U, V, hasAlpha ? A1 : 0, hasAlpha ? A2 : 0, + r, g, b, y, target, hasAlpha); } } static av_always_inline void -yuv2rgb48_2_c_template(SwsContext *c, const uint16_t *buf0, - const uint16_t *buf1, const uint16_t *ubuf0, - const uint16_t *ubuf1, const uint16_t *vbuf0, - const uint16_t *vbuf1, const uint16_t *abuf0, - const uint16_t *abuf1, uint8_t *dest, int dstW, - int yalpha, int uvalpha, int y, - enum PixelFormat target) +yuv2rgb_2_c_template(SwsContext *c, const int16_t *buf[2], + const int16_t *ubuf[2], const int16_t *vbuf[2], + const int16_t *abuf[2], uint8_t *dest, int dstW, + int yalpha, int uvalpha, int y, + enum PixelFormat target, int hasAlpha) { + const int16_t *buf0 = buf[0], *buf1 = buf[1], + *ubuf0 = ubuf[0], *ubuf1 = ubuf[1], + *vbuf0 = vbuf[0], *vbuf1 = vbuf[1], + *abuf0 = hasAlpha ? abuf[0] : NULL, + *abuf1 = hasAlpha ? abuf[1] : NULL; int yalpha1 = 4095 - yalpha; int uvalpha1 = 4095 - uvalpha; int i; @@ -912,28 +1259,30 @@ yuv2rgb48_2_c_template(SwsContext *c, const uint16_t *buf0, int Y2 = (buf0[i * 2 + 1] * yalpha1 + buf1[i * 2 + 1] * yalpha) >> 19; int U = (ubuf0[i] * uvalpha1 + ubuf1[i] * uvalpha) >> 19; int V = (vbuf0[i] * uvalpha1 + vbuf1[i] * uvalpha) >> 19; - const uint8_t *r = (const uint8_t *) c->table_rV[V], - *g = (const uint8_t *)(c->table_gU[U] + c->table_gV[V]), - *b = (const uint8_t *) c->table_bU[U]; + int A1, A2; + const void *r = c->table_rV[V], + *g = (c->table_gU[U] + c->table_gV[V]), + *b = c->table_bU[U]; + + if (hasAlpha) { + A1 = (abuf0[i * 2 ] * yalpha1 + abuf1[i * 2 ] * yalpha) >> 19; + A2 = (abuf0[i * 2 + 1] * yalpha1 + abuf1[i * 2 + 1] * yalpha) >> 19; + } - dest[ 0] = dest[ 1] = r_b[Y1]; - dest[ 2] = dest[ 3] = g[Y1]; - dest[ 4] = dest[ 5] = b_r[Y1]; - dest[ 6] = dest[ 7] = r_b[Y2]; - dest[ 8] = dest[ 9] = g[Y2]; - dest[10] = dest[11] = b_r[Y2]; - dest += 12; + yuv2rgb_write(dest, i, Y1, Y2, U, V, hasAlpha ? A1 : 0, hasAlpha ? A2 : 0, + r, g, b, y, target, hasAlpha); } } static av_always_inline void -yuv2rgb48_1_c_template(SwsContext *c, const uint16_t *buf0, - const uint16_t *ubuf0, const uint16_t *ubuf1, - const uint16_t *vbuf0, const uint16_t *vbuf1, - const uint16_t *abuf0, uint8_t *dest, int dstW, - int uvalpha, enum PixelFormat dstFormat, - int flags, int y, enum PixelFormat target) -{ +yuv2rgb_1_c_template(SwsContext *c, const int16_t *buf0, + const int16_t *ubuf[2], const int16_t *vbuf[2], + const int16_t *abuf0, uint8_t *dest, int dstW, + int uvalpha, int y, enum PixelFormat target, + int hasAlpha) +{ + const int16_t *ubuf0 = ubuf[0], *ubuf1 = ubuf[1], + *vbuf0 = vbuf[0], *vbuf1 = vbuf[1]; int i; if (uvalpha < 2048) { @@ -942,17 +1291,18 @@ yuv2rgb48_1_c_template(SwsContext *c, const uint16_t *buf0, int Y2 = buf0[i * 2 + 1] >> 7; int U = ubuf1[i] >> 7; int V = vbuf1[i] >> 7; - const uint8_t *r = (const uint8_t *) c->table_rV[V], - *g = (const uint8_t *)(c->table_gU[U] + c->table_gV[V]), - *b = (const uint8_t *) c->table_bU[U]; - - dest[ 0] = dest[ 1] = r_b[Y1]; - dest[ 2] = dest[ 3] = g[Y1]; - dest[ 4] = dest[ 5] = b_r[Y1]; - dest[ 6] = dest[ 7] = r_b[Y2]; - dest[ 8] = dest[ 9] = g[Y2]; - dest[10] = dest[11] = b_r[Y2]; - dest += 12; + int A1, A2; + const void *r = c->table_rV[V], + *g = (c->table_gU[U] + c->table_gV[V]), + *b = c->table_bU[U]; + + if (hasAlpha) { + A1 = abuf0[i * 2 ] >> 7; + A2 = abuf0[i * 2 + 1] >> 7; + } + + yuv2rgb_write(dest, i, Y1, Y2, U, V, hasAlpha ? A1 : 0, hasAlpha ? A2 : 0, + r, g, b, y, target, hasAlpha); } } else { for (i = 0; i < (dstW >> 1); i++) { @@ -960,447 +1310,184 @@ yuv2rgb48_1_c_template(SwsContext *c, const uint16_t *buf0, int Y2 = buf0[i * 2 + 1] >> 7; int U = (ubuf0[i] + ubuf1[i]) >> 8; int V = (vbuf0[i] + vbuf1[i]) >> 8; - const uint8_t *r = (const uint8_t *) c->table_rV[V], - *g = (const uint8_t *)(c->table_gU[U] + c->table_gV[V]), - *b = (const uint8_t *) c->table_bU[U]; - - dest[ 0] = dest[ 1] = r_b[Y1]; - dest[ 2] = dest[ 3] = g[Y1]; - dest[ 4] = dest[ 5] = b_r[Y1]; - dest[ 6] = dest[ 7] = r_b[Y2]; - dest[ 8] = dest[ 9] = g[Y2]; - dest[10] = dest[11] = b_r[Y2]; - dest += 12; + int A1, A2; + const void *r = c->table_rV[V], + *g = (c->table_gU[U] + c->table_gV[V]), + *b = c->table_bU[U]; + + if (hasAlpha) { + A1 = abuf0[i * 2 ] >> 7; + A2 = abuf0[i * 2 + 1] >> 7; + } + + yuv2rgb_write(dest, i, Y1, Y2, U, V, hasAlpha ? A1 : 0, hasAlpha ? A2 : 0, + r, g, b, y, target, hasAlpha); } } } -#undef r_b -#undef b_r +#define YUV2RGBWRAPPERX(name, base, ext, fmt, hasAlpha) \ +static void name ## ext ## _X_c(SwsContext *c, const int16_t *lumFilter, \ + const int16_t **lumSrc, int lumFilterSize, \ + const int16_t *chrFilter, const int16_t **chrUSrc, \ + const int16_t **chrVSrc, int chrFilterSize, \ + const int16_t **alpSrc, uint8_t *dest, int dstW, \ + int y) \ +{ \ + name ## base ## _X_c_template(c, lumFilter, lumSrc, lumFilterSize, \ + chrFilter, chrUSrc, chrVSrc, chrFilterSize, \ + alpSrc, dest, dstW, y, fmt, hasAlpha); \ +} +#define YUV2RGBWRAPPER(name, base, ext, fmt, hasAlpha) \ +YUV2RGBWRAPPERX(name, base, ext, fmt, hasAlpha) \ +static void name ## ext ## _2_c(SwsContext *c, const int16_t *buf[2], \ + const int16_t *ubuf[2], const int16_t *vbuf[2], \ + const int16_t *abuf[2], uint8_t *dest, int dstW, \ + int yalpha, int uvalpha, int y) \ +{ \ + name ## base ## _2_c_template(c, buf, ubuf, vbuf, abuf, \ + dest, dstW, yalpha, uvalpha, y, fmt, hasAlpha); \ +} \ + \ +static void name ## ext ## _1_c(SwsContext *c, const int16_t *buf0, \ + const int16_t *ubuf[2], const int16_t *vbuf[2], \ + const int16_t *abuf0, uint8_t *dest, int dstW, \ + int uvalpha, int y) \ +{ \ + name ## base ## _1_c_template(c, buf0, ubuf, vbuf, abuf0, dest, \ + dstW, uvalpha, y, fmt, hasAlpha); \ +} -YUV2PACKEDWRAPPER(yuv2, rgb48, rgb48be, PIX_FMT_RGB48BE); -//YUV2PACKEDWRAPPER(yuv2, rgb48, rgb48le, PIX_FMT_RGB48LE); -YUV2PACKEDWRAPPER(yuv2, rgb48, bgr48be, PIX_FMT_BGR48BE); -//YUV2PACKEDWRAPPER(yuv2, rgb48, bgr48le, PIX_FMT_BGR48LE); - -#define YSCALE_YUV_2_RGBX_C(type,alpha) \ - for (i=0; i<(dstW>>1); i++) {\ - int j;\ - int Y1 = 1<<18;\ - int Y2 = 1<<18;\ - int U = 1<<18;\ - int V = 1<<18;\ - int av_unused A1, A2;\ - type av_unused *r, *b, *g;\ - const int i2= 2*i;\ - \ - for (j=0; j<lumFilterSize; j++) {\ - Y1 += lumSrc[j][i2] * lumFilter[j];\ - Y2 += lumSrc[j][i2+1] * lumFilter[j];\ - }\ - for (j=0; j<chrFilterSize; j++) {\ - U += chrUSrc[j][i] * chrFilter[j];\ - V += chrVSrc[j][i] * chrFilter[j];\ - }\ - Y1>>=19;\ - Y2>>=19;\ - U >>=19;\ - V >>=19;\ - if ((Y1|Y2|U|V)&0x100) {\ - Y1 = av_clip_uint8(Y1); \ - Y2 = av_clip_uint8(Y2); \ - U = av_clip_uint8(U); \ - V = av_clip_uint8(V); \ - }\ - if (alpha) {\ - A1 = 1<<18;\ - A2 = 1<<18;\ - for (j=0; j<lumFilterSize; j++) {\ - A1 += alpSrc[j][i2 ] * lumFilter[j];\ - A2 += alpSrc[j][i2+1] * lumFilter[j];\ - }\ - A1>>=19;\ - A2>>=19;\ - if ((A1|A2)&0x100) {\ - A1 = av_clip_uint8(A1); \ - A2 = av_clip_uint8(A2); \ - }\ - }\ - /* FIXME fix tables so that clipping is not needed and then use _NOCLIP*/\ - r = (type *)c->table_rV[V]; \ - g = (type *)(c->table_gU[U] + c->table_gV[V]); \ - b = (type *)c->table_bU[U]; - -#define YSCALE_YUV_2_RGBX_FULL_C(rnd,alpha) \ - for (i=0; i<dstW; i++) {\ - int j;\ - int Y = 1<<9;\ - int U = (1<<9)-(128<<19);\ - int V = (1<<9)-(128<<19);\ - int av_unused A;\ - int R,G,B;\ - \ - for (j=0; j<lumFilterSize; j++) {\ - Y += lumSrc[j][i ] * lumFilter[j];\ - }\ - for (j=0; j<chrFilterSize; j++) {\ - U += chrUSrc[j][i] * chrFilter[j];\ - V += chrVSrc[j][i] * chrFilter[j];\ - }\ - Y >>=10;\ - U >>=10;\ - V >>=10;\ - if (alpha) {\ - A = rnd>>3;\ - for (j=0; j<lumFilterSize; j++)\ - A += alpSrc[j][i ] * lumFilter[j];\ - A >>=19;\ - if (A&0x100)\ - A = av_clip_uint8(A);\ - }\ - Y-= c->yuv2rgb_y_offset;\ - Y*= c->yuv2rgb_y_coeff;\ - Y+= rnd;\ - R= Y + V*c->yuv2rgb_v2r_coeff;\ - G= Y + V*c->yuv2rgb_v2g_coeff + U*c->yuv2rgb_u2g_coeff;\ - B= Y + U*c->yuv2rgb_u2b_coeff;\ - if ((R|G|B)&(0xC0000000)) {\ - R = av_clip_uintp2(R, 30); \ - G = av_clip_uintp2(G, 30); \ - B = av_clip_uintp2(B, 30); \ - } +#if CONFIG_SMALL +YUV2RGBWRAPPER(yuv2rgb,, 32_1, PIX_FMT_RGB32_1, CONFIG_SWSCALE_ALPHA && c->alpPixBuf); +YUV2RGBWRAPPER(yuv2rgb,, 32, PIX_FMT_RGB32, CONFIG_SWSCALE_ALPHA && c->alpPixBuf); +#else +#if CONFIG_SWSCALE_ALPHA +YUV2RGBWRAPPER(yuv2rgb,, a32_1, PIX_FMT_RGB32_1, 1); +YUV2RGBWRAPPER(yuv2rgb,, a32, PIX_FMT_RGB32, 1); +#endif +YUV2RGBWRAPPER(yuv2rgb,, x32_1, PIX_FMT_RGB32_1, 0); +YUV2RGBWRAPPER(yuv2rgb,, x32, PIX_FMT_RGB32, 0); +#endif +YUV2RGBWRAPPER(yuv2, rgb, rgb24, PIX_FMT_RGB24, 0); +YUV2RGBWRAPPER(yuv2, rgb, bgr24, PIX_FMT_BGR24, 0); +YUV2RGBWRAPPER(yuv2rgb,, 16, PIX_FMT_RGB565, 0); +YUV2RGBWRAPPER(yuv2rgb,, 15, PIX_FMT_RGB555, 0); +YUV2RGBWRAPPER(yuv2rgb,, 12, PIX_FMT_RGB444, 0); +YUV2RGBWRAPPER(yuv2rgb,, 8, PIX_FMT_RGB8, 0); +YUV2RGBWRAPPER(yuv2rgb,, 4, PIX_FMT_RGB4, 0); +YUV2RGBWRAPPER(yuv2rgb,, 4b, PIX_FMT_RGB4_BYTE, 0); -#define YSCALE_YUV_2_RGB2_C(type,alpha) \ - for (i=0; i<(dstW>>1); i++) { \ - const int i2= 2*i; \ - int Y1= (buf0[i2 ]*yalpha1+buf1[i2 ]*yalpha)>>19; \ - int Y2= (buf0[i2+1]*yalpha1+buf1[i2+1]*yalpha)>>19; \ - int U= (ubuf0[i]*uvalpha1+ubuf1[i]*uvalpha)>>19; \ - int V= (vbuf0[i]*uvalpha1+vbuf1[i]*uvalpha)>>19; \ - type av_unused *r, *b, *g; \ - int av_unused A1, A2; \ - if (alpha) {\ - A1= (abuf0[i2 ]*yalpha1+abuf1[i2 ]*yalpha)>>19; \ - A2= (abuf0[i2+1]*yalpha1+abuf1[i2+1]*yalpha)>>19; \ - }\ - r = (type *)c->table_rV[V];\ - g = (type *)(c->table_gU[U] + c->table_gV[V]);\ - b = (type *)c->table_bU[U]; - -#define YSCALE_YUV_2_RGB1_C(type,alpha) \ - for (i=0; i<(dstW>>1); i++) {\ - const int i2= 2*i;\ - int Y1= buf0[i2 ]>>7;\ - int Y2= buf0[i2+1]>>7;\ - int U= (ubuf1[i])>>7;\ - int V= (vbuf1[i])>>7;\ - type av_unused *r, *b, *g;\ - int av_unused A1, A2;\ - if (alpha) {\ - A1= abuf0[i2 ]>>7;\ - A2= abuf0[i2+1]>>7;\ - }\ - r = (type *)c->table_rV[V];\ - g = (type *)(c->table_gU[U] + c->table_gV[V]);\ - b = (type *)c->table_bU[U]; - -#define YSCALE_YUV_2_RGB1B_C(type,alpha) \ - for (i=0; i<(dstW>>1); i++) {\ - const int i2= 2*i;\ - int Y1= buf0[i2 ]>>7;\ - int Y2= buf0[i2+1]>>7;\ - int U= (ubuf0[i] + ubuf1[i])>>8;\ - int V= (vbuf0[i] + vbuf1[i])>>8;\ - type av_unused *r, *b, *g;\ - int av_unused A1, A2;\ - if (alpha) {\ - A1= abuf0[i2 ]>>7;\ - A2= abuf0[i2+1]>>7;\ - }\ - r = (type *)c->table_rV[V];\ - g = (type *)(c->table_gU[U] + c->table_gV[V]);\ - b = (type *)c->table_bU[U]; - -#define YSCALE_YUV_2_ANYRGB_C(func)\ - switch(c->dstFormat) {\ - case PIX_FMT_RGBA:\ - case PIX_FMT_BGRA:\ - if (CONFIG_SMALL) {\ - int needAlpha = CONFIG_SWSCALE_ALPHA && c->alpPixBuf;\ - func(uint32_t,needAlpha)\ - ((uint32_t*)dest)[i2+0]= r[Y1] + g[Y1] + b[Y1] + (needAlpha ? (A1<<24) : 0);\ - ((uint32_t*)dest)[i2+1]= r[Y2] + g[Y2] + b[Y2] + (needAlpha ? (A2<<24) : 0);\ - }\ - } else {\ - if (CONFIG_SWSCALE_ALPHA && c->alpPixBuf) {\ - func(uint32_t,1)\ - ((uint32_t*)dest)[i2+0]= r[Y1] + g[Y1] + b[Y1] + (A1<<24);\ - ((uint32_t*)dest)[i2+1]= r[Y2] + g[Y2] + b[Y2] + (A2<<24);\ - }\ - } else {\ - func(uint32_t,0)\ - ((uint32_t*)dest)[i2+0]= r[Y1] + g[Y1] + b[Y1];\ - ((uint32_t*)dest)[i2+1]= r[Y2] + g[Y2] + b[Y2];\ - }\ - }\ - }\ - break;\ - case PIX_FMT_ARGB:\ - case PIX_FMT_ABGR:\ - if (CONFIG_SMALL) {\ - int needAlpha = CONFIG_SWSCALE_ALPHA && c->alpPixBuf;\ - func(uint32_t,needAlpha)\ - ((uint32_t*)dest)[i2+0]= r[Y1] + g[Y1] + b[Y1] + (needAlpha ? A1 : 0);\ - ((uint32_t*)dest)[i2+1]= r[Y2] + g[Y2] + b[Y2] + (needAlpha ? A2 : 0);\ - }\ - } else {\ - if (CONFIG_SWSCALE_ALPHA && c->alpPixBuf) {\ - func(uint32_t,1)\ - ((uint32_t*)dest)[i2+0]= r[Y1] + g[Y1] + b[Y1] + A1;\ - ((uint32_t*)dest)[i2+1]= r[Y2] + g[Y2] + b[Y2] + A2;\ - }\ - } else {\ - func(uint32_t,0)\ - ((uint32_t*)dest)[i2+0]= r[Y1] + g[Y1] + b[Y1];\ - ((uint32_t*)dest)[i2+1]= r[Y2] + g[Y2] + b[Y2];\ - }\ - }\ - } \ - break;\ - case PIX_FMT_RGB24:\ - func(uint8_t,0)\ - ((uint8_t*)dest)[0]= r[Y1];\ - ((uint8_t*)dest)[1]= g[Y1];\ - ((uint8_t*)dest)[2]= b[Y1];\ - ((uint8_t*)dest)[3]= r[Y2];\ - ((uint8_t*)dest)[4]= g[Y2];\ - ((uint8_t*)dest)[5]= b[Y2];\ - dest+=6;\ - }\ - break;\ - case PIX_FMT_BGR24:\ - func(uint8_t,0)\ - ((uint8_t*)dest)[0]= b[Y1];\ - ((uint8_t*)dest)[1]= g[Y1];\ - ((uint8_t*)dest)[2]= r[Y1];\ - ((uint8_t*)dest)[3]= b[Y2];\ - ((uint8_t*)dest)[4]= g[Y2];\ - ((uint8_t*)dest)[5]= r[Y2];\ - dest+=6;\ - }\ - break;\ - case PIX_FMT_RGB565:\ - case PIX_FMT_BGR565:\ - {\ - const int dr1= dither_2x2_8[y&1 ][0];\ - const int dg1= dither_2x2_4[y&1 ][0];\ - const int db1= dither_2x2_8[(y&1)^1][0];\ - const int dr2= dither_2x2_8[y&1 ][1];\ - const int dg2= dither_2x2_4[y&1 ][1];\ - const int db2= dither_2x2_8[(y&1)^1][1];\ - func(uint16_t,0)\ - ((uint16_t*)dest)[i2+0]= r[Y1+dr1] + g[Y1+dg1] + b[Y1+db1];\ - ((uint16_t*)dest)[i2+1]= r[Y2+dr2] + g[Y2+dg2] + b[Y2+db2];\ - }\ - }\ - break;\ - case PIX_FMT_RGB555:\ - case PIX_FMT_BGR555:\ - {\ - const int dr1= dither_2x2_8[y&1 ][0];\ - const int dg1= dither_2x2_8[y&1 ][1];\ - const int db1= dither_2x2_8[(y&1)^1][0];\ - const int dr2= dither_2x2_8[y&1 ][1];\ - const int dg2= dither_2x2_8[y&1 ][0];\ - const int db2= dither_2x2_8[(y&1)^1][1];\ - func(uint16_t,0)\ - ((uint16_t*)dest)[i2+0]= r[Y1+dr1] + g[Y1+dg1] + b[Y1+db1];\ - ((uint16_t*)dest)[i2+1]= r[Y2+dr2] + g[Y2+dg2] + b[Y2+db2];\ - }\ - }\ - break;\ - case PIX_FMT_RGB444:\ - case PIX_FMT_BGR444:\ - {\ - const int dr1= dither_4x4_16[y&3 ][0];\ - const int dg1= dither_4x4_16[y&3 ][1];\ - const int db1= dither_4x4_16[(y&3)^3][0];\ - const int dr2= dither_4x4_16[y&3 ][1];\ - const int dg2= dither_4x4_16[y&3 ][0];\ - const int db2= dither_4x4_16[(y&3)^3][1];\ - func(uint16_t,0)\ - ((uint16_t*)dest)[i2+0]= r[Y1+dr1] + g[Y1+dg1] + b[Y1+db1];\ - ((uint16_t*)dest)[i2+1]= r[Y2+dr2] + g[Y2+dg2] + b[Y2+db2];\ - }\ - }\ - break;\ - case PIX_FMT_RGB8:\ - case PIX_FMT_BGR8:\ - {\ - const uint8_t * const d64= dither_8x8_73[y&7];\ - const uint8_t * const d32= dither_8x8_32[y&7];\ - func(uint8_t,0)\ - ((uint8_t*)dest)[i2+0]= r[Y1+d32[(i2+0)&7]] + g[Y1+d32[(i2+0)&7]] + b[Y1+d64[(i2+0)&7]];\ - ((uint8_t*)dest)[i2+1]= r[Y2+d32[(i2+1)&7]] + g[Y2+d32[(i2+1)&7]] + b[Y2+d64[(i2+1)&7]];\ - }\ - }\ - break;\ - case PIX_FMT_RGB4:\ - case PIX_FMT_BGR4:\ - {\ - const uint8_t * const d64= dither_8x8_73 [y&7];\ - const uint8_t * const d128=dither_8x8_220[y&7];\ - func(uint8_t,0)\ - ((uint8_t*)dest)[i]= r[Y1+d128[(i2+0)&7]] + g[Y1+d64[(i2+0)&7]] + b[Y1+d128[(i2+0)&7]]\ - + ((r[Y2+d128[(i2+1)&7]] + g[Y2+d64[(i2+1)&7]] + b[Y2+d128[(i2+1)&7]])<<4);\ - }\ - }\ - break;\ - case PIX_FMT_RGB4_BYTE:\ - case PIX_FMT_BGR4_BYTE:\ - {\ - const uint8_t * const d64= dither_8x8_73 [y&7];\ - const uint8_t * const d128=dither_8x8_220[y&7];\ - func(uint8_t,0)\ - ((uint8_t*)dest)[i2+0]= r[Y1+d128[(i2+0)&7]] + g[Y1+d64[(i2+0)&7]] + b[Y1+d128[(i2+0)&7]];\ - ((uint8_t*)dest)[i2+1]= r[Y2+d128[(i2+1)&7]] + g[Y2+d64[(i2+1)&7]] + b[Y2+d128[(i2+1)&7]];\ - }\ - }\ - break;\ - } - -static void yuv2packedX_c(SwsContext *c, const int16_t *lumFilter, +static av_always_inline void +yuv2rgb_full_X_c_template(SwsContext *c, const int16_t *lumFilter, const int16_t **lumSrc, int lumFilterSize, const int16_t *chrFilter, const int16_t **chrUSrc, const int16_t **chrVSrc, int chrFilterSize, - const int16_t **alpSrc, uint8_t *dest, int dstW, int y) + const int16_t **alpSrc, uint8_t *dest, + int dstW, int y, enum PixelFormat target, int hasAlpha) { int i; - YSCALE_YUV_2_ANYRGB_C(YSCALE_YUV_2_RGBX_C) -} + int step = (target == PIX_FMT_RGB24 || target == PIX_FMT_BGR24) ? 3 : 4; -static void yuv2rgbX_c_full(SwsContext *c, const int16_t *lumFilter, - const int16_t **lumSrc, int lumFilterSize, - const int16_t *chrFilter, const int16_t **chrUSrc, - const int16_t **chrVSrc, int chrFilterSize, - const int16_t **alpSrc, uint8_t *dest, int dstW, int y) -{ - int i; - int step= c->dstFormatBpp/8; - int aidx= 3; - - switch(c->dstFormat) { - case PIX_FMT_ARGB: - dest++; - aidx= 0; - case PIX_FMT_RGB24: - aidx--; - case PIX_FMT_RGBA: - if (CONFIG_SMALL) { - int needAlpha = CONFIG_SWSCALE_ALPHA && c->alpPixBuf; - YSCALE_YUV_2_RGBX_FULL_C(1<<21, needAlpha) - dest[aidx]= needAlpha ? A : 255; - dest[0]= R>>22; - dest[1]= G>>22; - dest[2]= B>>22; - dest+= step; - } - } else { - if (CONFIG_SWSCALE_ALPHA && c->alpPixBuf) { - YSCALE_YUV_2_RGBX_FULL_C(1<<21, 1) - dest[aidx]= A; - dest[0]= R>>22; - dest[1]= G>>22; - dest[2]= B>>22; - dest+= step; - } - } else { - YSCALE_YUV_2_RGBX_FULL_C(1<<21, 0) - dest[aidx]= 255; - dest[0]= R>>22; - dest[1]= G>>22; - dest[2]= B>>22; - dest+= step; - } - } + for (i = 0; i < dstW; i++) { + int j; + int Y = 1<<9; + int U = (1<<9)-(128 << 19); + int V = (1<<9)-(128 << 19); + int av_unused A; + int R, G, B; + + for (j = 0; j < lumFilterSize; j++) { + Y += lumSrc[j][i] * lumFilter[j]; } - break; - case PIX_FMT_ABGR: - dest++; - aidx= 0; - case PIX_FMT_BGR24: - aidx--; - case PIX_FMT_BGRA: - if (CONFIG_SMALL) { - int needAlpha = CONFIG_SWSCALE_ALPHA && c->alpPixBuf; - YSCALE_YUV_2_RGBX_FULL_C(1<<21, needAlpha) - dest[aidx]= needAlpha ? A : 255; - dest[0]= B>>22; - dest[1]= G>>22; - dest[2]= R>>22; - dest+= step; - } - } else { - if (CONFIG_SWSCALE_ALPHA && c->alpPixBuf) { - YSCALE_YUV_2_RGBX_FULL_C(1<<21, 1) - dest[aidx]= A; - dest[0]= B>>22; - dest[1]= G>>22; - dest[2]= R>>22; - dest+= step; - } - } else { - YSCALE_YUV_2_RGBX_FULL_C(1<<21, 0) - dest[aidx]= 255; - dest[0]= B>>22; - dest[1]= G>>22; - dest[2]= R>>22; - dest+= step; - } + for (j = 0; j < chrFilterSize; j++) { + U += chrUSrc[j][i] * chrFilter[j]; + V += chrVSrc[j][i] * chrFilter[j]; + } + Y >>= 10; + U >>= 10; + V >>= 10; + if (hasAlpha) { + A = 1 << 18; + for (j = 0; j < lumFilterSize; j++) { + A += alpSrc[j][i] * lumFilter[j]; } + A >>= 19; + if (A & 0x100) + A = av_clip_uint8(A); + } + Y -= c->yuv2rgb_y_offset; + Y *= c->yuv2rgb_y_coeff; + Y += 1 << 21; + R = Y + V*c->yuv2rgb_v2r_coeff; + G = Y + V*c->yuv2rgb_v2g_coeff + U*c->yuv2rgb_u2g_coeff; + B = Y + U*c->yuv2rgb_u2b_coeff; + if ((R | G | B) & 0xC0000000) { + R = av_clip_uintp2(R, 30); + G = av_clip_uintp2(G, 30); + B = av_clip_uintp2(B, 30); } - break; - default: - assert(0); - } -} - -/** - * vertical bilinear scale YV12 to RGB - */ -static void yuv2packed2_c(SwsContext *c, const uint16_t *buf0, - const uint16_t *buf1, const uint16_t *ubuf0, - const uint16_t *ubuf1, const uint16_t *vbuf0, - const uint16_t *vbuf1, const uint16_t *abuf0, - const uint16_t *abuf1, uint8_t *dest, int dstW, - int yalpha, int uvalpha, int y) -{ - int yalpha1=4095- yalpha; - int uvalpha1=4095-uvalpha; - int i; - - YSCALE_YUV_2_ANYRGB_C(YSCALE_YUV_2_RGB2_C) -} - -/** - * YV12 to RGB without scaling or interpolating - */ -static void yuv2packed1_c(SwsContext *c, const uint16_t *buf0, - const uint16_t *ubuf0, const uint16_t *ubuf1, - const uint16_t *vbuf0, const uint16_t *vbuf1, - const uint16_t *abuf0, uint8_t *dest, int dstW, - int uvalpha, enum PixelFormat dstFormat, - int flags, int y) -{ - int i; - if (uvalpha < 2048) { - YSCALE_YUV_2_ANYRGB_C(YSCALE_YUV_2_RGB1_C) - } else { - YSCALE_YUV_2_ANYRGB_C(YSCALE_YUV_2_RGB1B_C) + switch(target) { + case PIX_FMT_ARGB: + dest[0] = hasAlpha ? A : 255; + dest[1] = R >> 22; + dest[2] = G >> 22; + dest[3] = B >> 22; + break; + case PIX_FMT_RGB24: + dest[0] = R >> 22; + dest[1] = G >> 22; + dest[2] = B >> 22; + break; + case PIX_FMT_RGBA: + dest[0] = R >> 22; + dest[1] = G >> 22; + dest[2] = B >> 22; + dest[3] = hasAlpha ? A : 255; + break; + case PIX_FMT_ABGR: + dest[0] = hasAlpha ? A : 255; + dest[1] = B >> 22; + dest[2] = G >> 22; + dest[3] = R >> 22; + break; + case PIX_FMT_BGR24: + dest[0] = B >> 22; + dest[1] = G >> 22; + dest[2] = R >> 22; + break; + case PIX_FMT_BGRA: + dest[0] = B >> 22; + dest[1] = G >> 22; + dest[2] = R >> 22; + dest[3] = hasAlpha ? A : 255; + break; + } + dest += step; } } +#if CONFIG_SMALL +YUV2RGBWRAPPERX(yuv2, rgb_full, bgra32_full, PIX_FMT_BGRA, CONFIG_SWSCALE_ALPHA && c->alpPixBuf); +YUV2RGBWRAPPERX(yuv2, rgb_full, abgr32_full, PIX_FMT_ABGR, CONFIG_SWSCALE_ALPHA && c->alpPixBuf); +YUV2RGBWRAPPERX(yuv2, rgb_full, rgba32_full, PIX_FMT_RGBA, CONFIG_SWSCALE_ALPHA && c->alpPixBuf); +YUV2RGBWRAPPERX(yuv2, rgb_full, argb32_full, PIX_FMT_ARGB, CONFIG_SWSCALE_ALPHA && c->alpPixBuf); +#else +#if CONFIG_SWSCALE_ALPHA +YUV2RGBWRAPPERX(yuv2, rgb_full, bgra32_full, PIX_FMT_BGRA, 1); +YUV2RGBWRAPPERX(yuv2, rgb_full, abgr32_full, PIX_FMT_ABGR, 1); +YUV2RGBWRAPPERX(yuv2, rgb_full, rgba32_full, PIX_FMT_RGBA, 1); +YUV2RGBWRAPPERX(yuv2, rgb_full, argb32_full, PIX_FMT_ARGB, 1); +#endif +YUV2RGBWRAPPERX(yuv2, rgb_full, bgrx32_full, PIX_FMT_BGRA, 0); +YUV2RGBWRAPPERX(yuv2, rgb_full, xbgr32_full, PIX_FMT_ABGR, 0); +YUV2RGBWRAPPERX(yuv2, rgb_full, rgbx32_full, PIX_FMT_RGBA, 0); +YUV2RGBWRAPPERX(yuv2, rgb_full, xrgb32_full, PIX_FMT_ARGB, 0); +#endif +YUV2RGBWRAPPERX(yuv2, rgb_full, bgr24_full, PIX_FMT_BGR24, 0); +YUV2RGBWRAPPERX(yuv2, rgb_full, rgb24_full, PIX_FMT_RGB24, 0); + static av_always_inline void fillPlane(uint8_t* plane, int stride, int width, int height, int y, uint8_t val) @@ -1419,21 +1506,21 @@ static av_always_inline void fillPlane(uint8_t* plane, int stride, #define b ((origin == PIX_FMT_BGR48BE || origin == PIX_FMT_BGR48LE) ? r_b : b_r) static av_always_inline void -rgb48ToY_c_template(int16_t *dst, const uint16_t *src, int width, +rgb48ToY_c_template(uint16_t *dst, const uint16_t *src, int width, enum PixelFormat origin) { int i; for (i = 0; i < width; i++) { - int r_b = input_pixel(&src[i*3+0]); - int g = input_pixel(&src[i*3+1]); - int b_r = input_pixel(&src[i*3+2]); + unsigned int r_b = input_pixel(&src[i*3+0]); + unsigned int g = input_pixel(&src[i*3+1]); + unsigned int b_r = input_pixel(&src[i*3+2]); - dst[i] = (RY*r + GY*g + BY*b + (32<<(RGB2YUV_SHIFT-1+8)) + (1<<(RGB2YUV_SHIFT-7+8))) >> (RGB2YUV_SHIFT-6+8); + dst[i] = (RY*r + GY*g + BY*b + (0x2001<<(RGB2YUV_SHIFT-1))) >> RGB2YUV_SHIFT; } } static av_always_inline void -rgb48ToUV_c_template(int16_t *dstU, int16_t *dstV, +rgb48ToUV_c_template(uint16_t *dstU, uint16_t *dstV, const uint16_t *src1, const uint16_t *src2, int width, enum PixelFormat origin) { @@ -1444,25 +1531,25 @@ rgb48ToUV_c_template(int16_t *dstU, int16_t *dstV, int g = input_pixel(&src1[i*3+1]); int b_r = input_pixel(&src1[i*3+2]); - dstU[i] = (RU*r + GU*g + BU*b + (256<<(RGB2YUV_SHIFT-1+8)) + (1<<(RGB2YUV_SHIFT-7+8))) >> (RGB2YUV_SHIFT-6+8); - dstV[i] = (RV*r + GV*g + BV*b + (256<<(RGB2YUV_SHIFT-1+8)) + (1<<(RGB2YUV_SHIFT-7+8))) >> (RGB2YUV_SHIFT-6+8); + dstU[i] = (RU*r + GU*g + BU*b + (0x10001<<(RGB2YUV_SHIFT-1))) >> RGB2YUV_SHIFT; + dstV[i] = (RV*r + GV*g + BV*b + (0x10001<<(RGB2YUV_SHIFT-1))) >> RGB2YUV_SHIFT; } } static av_always_inline void -rgb48ToUV_half_c_template(int16_t *dstU, int16_t *dstV, +rgb48ToUV_half_c_template(uint16_t *dstU, uint16_t *dstV, const uint16_t *src1, const uint16_t *src2, int width, enum PixelFormat origin) { int i; assert(src1==src2); for (i = 0; i < width; i++) { - int r_b = (input_pixel(&src1[6*i + 0])) + (input_pixel(&src1[6*i + 3])); - int g = (input_pixel(&src1[6*i + 1])) + (input_pixel(&src1[6*i + 4])); - int b_r = (input_pixel(&src1[6*i + 2])) + (input_pixel(&src1[6*i + 5])); + int r_b = (input_pixel(&src1[6 * i + 0]) + input_pixel(&src1[6 * i + 3]) + 1) >> 1; + int g = (input_pixel(&src1[6 * i + 1]) + input_pixel(&src1[6 * i + 4]) + 1) >> 1; + int b_r = (input_pixel(&src1[6 * i + 2]) + input_pixel(&src1[6 * i + 5]) + 1) >> 1; - dstU[i]= (RU*r + GU*g + BU*b + (256U<<(RGB2YUV_SHIFT+8)) + (1<<(RGB2YUV_SHIFT-6+8))) >> (RGB2YUV_SHIFT-5+8); - dstV[i]= (RV*r + GV*g + BV*b + (256U<<(RGB2YUV_SHIFT+8)) + (1<<(RGB2YUV_SHIFT-6+8))) >> (RGB2YUV_SHIFT-5+8); + dstU[i]= (RU*r + GU*g + BU*b + (0x10001<<(RGB2YUV_SHIFT-1))) >> RGB2YUV_SHIFT; + dstV[i]= (RV*r + GV*g + BV*b + (0x10001<<(RGB2YUV_SHIFT-1))) >> RGB2YUV_SHIFT; } } @@ -1471,23 +1558,31 @@ rgb48ToUV_half_c_template(int16_t *dstU, int16_t *dstV, #undef input_pixel #define rgb48funcs(pattern, BE_LE, origin) \ -static void pattern ## 48 ## BE_LE ## ToY_c(uint8_t *dst, const uint8_t *src, \ +static void pattern ## 48 ## BE_LE ## ToY_c(uint8_t *_dst, const uint8_t *_src, \ int width, uint32_t *unused) \ { \ + const uint16_t *src = (const uint16_t *) _src; \ + uint16_t *dst = (uint16_t *) _dst; \ rgb48ToY_c_template(dst, src, width, origin); \ } \ \ -static void pattern ## 48 ## BE_LE ## ToUV_c(uint8_t *dstU, uint8_t *dstV, \ - const uint8_t *src1, const uint8_t *src2, \ +static void pattern ## 48 ## BE_LE ## ToUV_c(uint8_t *_dstU, uint8_t *_dstV, \ + const uint8_t *_src1, const uint8_t *_src2, \ int width, uint32_t *unused) \ { \ + const uint16_t *src1 = (const uint16_t *) _src1, \ + *src2 = (const uint16_t *) _src2; \ + uint16_t *dstU = (uint16_t *) _dstU, *dstV = (uint16_t *) _dstV; \ rgb48ToUV_c_template(dstU, dstV, src1, src2, width, origin); \ } \ \ -static void pattern ## 48 ## BE_LE ## ToUV_half_c(uint8_t *dstU, uint8_t *dstV, \ - const uint8_t *src1, const uint8_t *src2, \ +static void pattern ## 48 ## BE_LE ## ToUV_half_c(uint8_t *_dstU, uint8_t *_dstV, \ + const uint8_t *_src1, const uint8_t *_src2, \ int width, uint32_t *unused) \ { \ + const uint16_t *src1 = (const uint16_t *) _src1, \ + *src2 = (const uint16_t *) _src2; \ + uint16_t *dstU = (uint16_t *) _dstU, *dstV = (uint16_t *) _dstV; \ rgb48ToUV_half_c_template(dstU, dstV, src1, src2, width, origin); \ } @@ -1720,13 +1815,26 @@ static void yuy2ToUV_c(uint8_t *dstU, uint8_t *dstV, const uint8_t *src1, assert(src1 == src2); } -static void LEToUV_c(uint8_t *dstU, uint8_t *dstV, const uint8_t *src1, - const uint8_t *src2, int width, uint32_t *unused) +static void bswap16Y_c(uint8_t *_dst, const uint8_t *_src, int width, uint32_t *unused) { int i; + const uint16_t *src = (const uint16_t *) _src; + uint16_t *dst = (uint16_t *) _dst; for (i=0; i<width; i++) { - dstU[i]= src1[2*i + 1]; - dstV[i]= src2[2*i + 1]; + dst[i] = av_bswap16(src[i]); + } +} + +static void bswap16UV_c(uint8_t *_dstU, uint8_t *_dstV, const uint8_t *_src1, + const uint8_t *_src2, int width, uint32_t *unused) +{ + int i; + const uint16_t *src1 = (const uint16_t *) _src1, + *src2 = (const uint16_t *) _src2; + uint16_t *dstU = (uint16_t *) _dstU, *dstV = (uint16_t *) _dstV; + for (i=0; i<width; i++) { + dstU[i] = av_bswap16(src1[i]); + dstV[i] = av_bswap16(src2[i]); } } @@ -1751,16 +1859,6 @@ static void uyvyToUV_c(uint8_t *dstU, uint8_t *dstV, const uint8_t *src1, assert(src1 == src2); } -static void BEToUV_c(uint8_t *dstU, uint8_t *dstV, const uint8_t *src1, - const uint8_t *src2, int width, uint32_t *unused) -{ - int i; - for (i=0; i<width; i++) { - dstU[i]= src1[2*i]; - dstV[i]= src2[2*i]; - } -} - static av_always_inline void nvXXtoUV_c(uint8_t *dst1, uint8_t *dst2, const uint8_t *src, int width) { @@ -1787,53 +1885,6 @@ static void nv21ToUV_c(uint8_t *dstU, uint8_t *dstV, #define input_pixel(pos) (isBE(origin) ? AV_RB16(pos) : AV_RL16(pos)) -// FIXME Maybe dither instead. -static av_always_inline void -yuv9_OR_10ToUV_c_template(uint8_t *dstU, uint8_t *dstV, - const uint8_t *_srcU, const uint8_t *_srcV, - int width, enum PixelFormat origin, int depth) -{ - int i; - const uint16_t *srcU = (const uint16_t *) _srcU; - const uint16_t *srcV = (const uint16_t *) _srcV; - - for (i = 0; i < width; i++) { - dstU[i] = input_pixel(&srcU[i]) >> (depth - 8); - dstV[i] = input_pixel(&srcV[i]) >> (depth - 8); - } -} - -static av_always_inline void -yuv9_or_10ToY_c_template(uint8_t *dstY, const uint8_t *_srcY, - int width, enum PixelFormat origin, int depth) -{ - int i; - const uint16_t *srcY = (const uint16_t*)_srcY; - - for (i = 0; i < width; i++) - dstY[i] = input_pixel(&srcY[i]) >> (depth - 8); -} - -#undef input_pixel - -#define YUV_NBPS(depth, BE_LE, origin) \ -static void BE_LE ## depth ## ToUV_c(uint8_t *dstU, uint8_t *dstV, \ - const uint8_t *srcU, const uint8_t *srcV, \ - int width, uint32_t *unused) \ -{ \ - yuv9_OR_10ToUV_c_template(dstU, dstV, srcU, srcV, width, origin, depth); \ -} \ -static void BE_LE ## depth ## ToY_c(uint8_t *dstY, const uint8_t *srcY, \ - int width, uint32_t *unused) \ -{ \ - yuv9_or_10ToY_c_template(dstY, srcY, width, origin, depth); \ -} - -YUV_NBPS( 9, LE, PIX_FMT_YUV420P9LE); -YUV_NBPS( 9, BE, PIX_FMT_YUV420P9BE); -YUV_NBPS(10, LE, PIX_FMT_YUV420P10LE); -YUV_NBPS(10, BE, PIX_FMT_YUV420P10BE); - static void bgr24ToY_c(int16_t *dst, const uint8_t *src, int width, uint32_t *unused) { @@ -1920,10 +1971,34 @@ static void rgb24ToUV_half_c(int16_t *dstU, int16_t *dstV, const uint8_t *src1, } } +static void hScale16_c(SwsContext *c, int16_t *_dst, int dstW, const uint8_t *_src, + const int16_t *filter, + const int16_t *filterPos, int filterSize) +{ + int i; + int32_t *dst = (int32_t *) _dst; + const uint16_t *src = (const uint16_t *) _src; + int bits = av_pix_fmt_descriptors[c->srcFormat].comp[0].depth_minus1; + int sh = (bits <= 7) ? 11 : (bits - 4); + + if((isAnyRGB(c->srcFormat) || c->srcFormat==PIX_FMT_PAL8) && av_pix_fmt_descriptors[c->srcFormat].comp[0].depth_minus1<15) + sh= 9; + + for (i = 0; i < dstW; i++) { + int j; + int srcPos = filterPos[i]; + int val = 0; + + for (j = 0; j < filterSize; j++) { + val += src[srcPos + j] * filter[filterSize * i + j]; + } + // filter=14 bit, input=16 bit, output=30 bit, >> 11 makes 19 bit + dst[i] = FFMIN(val >> sh, (1 << 19) - 1); + } +} // bilinear / bicubic scaling -static void hScale_c(int16_t *dst, int dstW, const uint8_t *src, - int srcW, int xInc, +static void hScale_c(SwsContext *c, int16_t *dst, int dstW, const uint8_t *src, const int16_t *filter, const int16_t *filterPos, int filterSize) { @@ -1941,7 +2016,7 @@ static void hScale_c(int16_t *dst, int dstW, const uint8_t *src, } } -static inline void hScale16_c(int16_t *dst, int dstW, const uint16_t *src, int srcW, int xInc, +static inline void hScale16N_c(int16_t *dst, int dstW, const uint16_t *src, int srcW, int xInc, const int16_t *filter, const int16_t *filterPos, long filterSize, int shift) { int i, j; @@ -1956,7 +2031,7 @@ static inline void hScale16_c(int16_t *dst, int dstW, const uint16_t *src, int s } } -static inline void hScale16X_c(int16_t *dst, int dstW, const uint16_t *src, int srcW, int xInc, +static inline void hScale16NX_c(int16_t *dst, int dstW, const uint16_t *src, int srcW, int xInc, const int16_t *filter, const int16_t *filterPos, long filterSize, int shift) { int i, j; @@ -2001,6 +2076,41 @@ static void lumRangeFromJpeg_c(int16_t *dst, int width) dst[i] = (dst[i]*14071 + 33561947)>>14; } +static void chrRangeToJpeg16_c(int16_t *_dstU, int16_t *_dstV, int width) +{ + int i; + int32_t *dstU = (int32_t *) _dstU; + int32_t *dstV = (int32_t *) _dstV; + for (i = 0; i < width; i++) { + dstU[i] = (FFMIN(dstU[i],30775<<4)*4663 - (9289992<<4))>>12; //-264 + dstV[i] = (FFMIN(dstV[i],30775<<4)*4663 - (9289992<<4))>>12; //-264 + } +} +static void chrRangeFromJpeg16_c(int16_t *_dstU, int16_t *_dstV, int width) +{ + int i; + int32_t *dstU = (int32_t *) _dstU; + int32_t *dstV = (int32_t *) _dstV; + for (i = 0; i < width; i++) { + dstU[i] = (dstU[i]*1799 + (4081085<<4))>>11; //1469 + dstV[i] = (dstV[i]*1799 + (4081085<<4))>>11; //1469 + } +} +static void lumRangeToJpeg16_c(int16_t *_dst, int width) +{ + int i; + int32_t *dst = (int32_t *) _dst; + for (i = 0; i < width; i++) + dst[i] = (FFMIN(dst[i],30189<<4)*4769 - (39057361<<2))>>12; +} +static void lumRangeFromJpeg16_c(int16_t *_dst, int width) +{ + int i; + int32_t *dst = (int32_t *) _dst; + for (i = 0; i < width; i++) + dst[i] = (dst[i]*(14071/4) + (33561947<<4)/4)>>12; +} + static void hyscale_fast_c(SwsContext *c, int16_t *dst, int dstWidth, const uint8_t *src, int srcW, int xInc) { @@ -2016,8 +2126,25 @@ static void hyscale_fast_c(SwsContext *c, int16_t *dst, int dstWidth, dst[i] = src[srcW-1]*128; } +static void scale8To16Rv_c(uint16_t *_dst, const uint8_t *src, int len) +{ + int i; + uint8_t *dst = (uint8_t *) _dst; + for (i = len - 1; i >= 0; i--) { + dst[i * 2] = dst[i * 2 + 1] = src[i]; + } +} + +static void scale19To15Fw_c(int16_t *dst, const int32_t *src, int len) +{ + int i; + for (i = 0; i < len; i++) { + dst[i] = src[i] >> 4; + } +} + // *** horizontal scale Y line to temp buffer -static av_always_inline void hyscale(SwsContext *c, uint16_t *dst, int dstWidth, +static av_always_inline void hyscale(SwsContext *c, int16_t *dst, int dstWidth, const uint8_t *src, int srcW, int xInc, const int16_t *hLumFilter, const int16_t *hLumFilterPos, int hLumFilterSize, @@ -2032,17 +2159,26 @@ static av_always_inline void hyscale(SwsContext *c, uint16_t *dst, int dstWidth, src= formatConvBuffer; } + if (av_pix_fmt_descriptors[c->srcFormat].comp[0].depth_minus1 < 8 && c->scalingBpp == 16 && !isAnyRGB(c->srcFormat)) { + c->scale8To16Rv((uint16_t *) formatConvBuffer, src, srcW); + src = formatConvBuffer; + } + if (c->hScale16) { int shift= isAnyRGB(c->srcFormat) || c->srcFormat==PIX_FMT_PAL8 ? 13 : av_pix_fmt_descriptors[c->srcFormat].comp[0].depth_minus1; c->hScale16(dst, dstWidth, (const uint16_t*)src, srcW, xInc, hLumFilter, hLumFilterPos, hLumFilterSize, shift); } else if (!c->hyscale_fast) { - c->hScale(dst, dstWidth, src, srcW, xInc, hLumFilter, hLumFilterPos, hLumFilterSize); + c->hScale(c, dst, dstWidth, src, hLumFilter, hLumFilterPos, hLumFilterSize); } else { // fast bilinear upscale / crap downscale c->hyscale_fast(c, dst, dstWidth, src, srcW, xInc); } if (convertRange) convertRange(dst, dstWidth); + + if (av_pix_fmt_descriptors[c->dstFormat].comp[0].depth_minus1 < 15 && c->scalingBpp == 16) { + c->scale19To15Fw(dst, (int32_t *) dst, dstWidth); + } } static void hcscale_fast_c(SwsContext *c, int16_t *dst1, int16_t *dst2, @@ -2064,7 +2200,7 @@ static void hcscale_fast_c(SwsContext *c, int16_t *dst1, int16_t *dst2, } } -static av_always_inline void hcscale(SwsContext *c, uint16_t *dst1, uint16_t *dst2, int dstWidth, +static av_always_inline void hcscale(SwsContext *c, int16_t *dst1, int16_t *dst2, int dstWidth, const uint8_t *src1, const uint8_t *src2, int srcW, int xInc, const int16_t *hChrFilter, const int16_t *hChrFilterPos, int hChrFilterSize, @@ -2077,19 +2213,32 @@ static av_always_inline void hcscale(SwsContext *c, uint16_t *dst1, uint16_t *ds src2= buf2; } + if (av_pix_fmt_descriptors[c->srcFormat].comp[0].depth_minus1 < 8 && c->scalingBpp == 16 && !isAnyRGB(c->srcFormat)) { + uint8_t *buf2 = (formatConvBuffer + FFALIGN(srcW * 2+78, 16)); + c->scale8To16Rv((uint16_t *) formatConvBuffer, src1, srcW); + c->scale8To16Rv((uint16_t *) buf2, src2, srcW); + src1 = formatConvBuffer; + src2 = buf2; + } + if (c->hScale16) { int shift= isAnyRGB(c->srcFormat) || c->srcFormat==PIX_FMT_PAL8 ? 13 : av_pix_fmt_descriptors[c->srcFormat].comp[0].depth_minus1; c->hScale16(dst1, dstWidth, (const uint16_t*)src1, srcW, xInc, hChrFilter, hChrFilterPos, hChrFilterSize, shift); c->hScale16(dst2, dstWidth, (const uint16_t*)src2, srcW, xInc, hChrFilter, hChrFilterPos, hChrFilterSize, shift); } else if (!c->hcscale_fast) { - c->hScale(dst1, dstWidth, src1, srcW, xInc, hChrFilter, hChrFilterPos, hChrFilterSize); - c->hScale(dst2, dstWidth, src2, srcW, xInc, hChrFilter, hChrFilterPos, hChrFilterSize); + c->hScale(c, dst1, dstWidth, src1, hChrFilter, hChrFilterPos, hChrFilterSize); + c->hScale(c, dst2, dstWidth, src2, hChrFilter, hChrFilterPos, hChrFilterSize); } else { // fast bilinear upscale / crap downscale c->hcscale_fast(c, dst1, dst2, dstWidth, src1, src2, srcW, xInc); } if (c->chrConvertRange) c->chrConvertRange(dst1, dst2, dstWidth); + + if (av_pix_fmt_descriptors[c->dstFormat].comp[0].depth_minus1 < 15 && c->scalingBpp == 16) { + c->scale19To15Fw(dst1, (int32_t *) dst1, dstWidth); + c->scale19To15Fw(dst2, (int32_t *) dst2, dstWidth); + } } static av_always_inline void @@ -2115,8 +2264,74 @@ find_c_packed_planar_out_funcs(SwsContext *c, *yuv2yuvX = yuv2yuvX_c; } if(c->flags & SWS_FULL_CHR_H_INT) { - *yuv2packedX = yuv2rgbX_c_full; + switch (dstFormat) { + case PIX_FMT_RGBA: +#if CONFIG_SMALL + *yuv2packedX = yuv2rgba32_full_X_c; +#else +#if CONFIG_SWSCALE_ALPHA + if (c->alpPixBuf) { + *yuv2packedX = yuv2rgba32_full_X_c; + } else +#endif /* CONFIG_SWSCALE_ALPHA */ + { + *yuv2packedX = yuv2rgbx32_full_X_c; + } +#endif /* !CONFIG_SMALL */ + break; + case PIX_FMT_ARGB: +#if CONFIG_SMALL + *yuv2packedX = yuv2argb32_full_X_c; +#else +#if CONFIG_SWSCALE_ALPHA + if (c->alpPixBuf) { + *yuv2packedX = yuv2argb32_full_X_c; + } else +#endif /* CONFIG_SWSCALE_ALPHA */ + { + *yuv2packedX = yuv2xrgb32_full_X_c; + } +#endif /* !CONFIG_SMALL */ + break; + case PIX_FMT_BGRA: +#if CONFIG_SMALL + *yuv2packedX = yuv2bgra32_full_X_c; +#else +#if CONFIG_SWSCALE_ALPHA + if (c->alpPixBuf) { + *yuv2packedX = yuv2bgra32_full_X_c; + } else +#endif /* CONFIG_SWSCALE_ALPHA */ + { + *yuv2packedX = yuv2bgrx32_full_X_c; + } +#endif /* !CONFIG_SMALL */ + break; + case PIX_FMT_ABGR: +#if CONFIG_SMALL + *yuv2packedX = yuv2abgr32_full_X_c; +#else +#if CONFIG_SWSCALE_ALPHA + if (c->alpPixBuf) { + *yuv2packedX = yuv2abgr32_full_X_c; + } else +#endif /* CONFIG_SWSCALE_ALPHA */ + { + *yuv2packedX = yuv2xbgr32_full_X_c; + } +#endif /* !CONFIG_SMALL */ + break; + case PIX_FMT_RGB24: + *yuv2packedX = yuv2rgb24_full_X_c; + break; + case PIX_FMT_BGR24: + *yuv2packedX = yuv2bgr24_full_X_c; + break; + } + if(!*yuv2packedX) + goto YUV_PACKED; } else { + YUV_PACKED: switch (dstFormat) { case PIX_FMT_GRAY16BE: *yuv2packed1 = yuv2gray16BE_1_c; @@ -2149,29 +2364,118 @@ find_c_packed_planar_out_funcs(SwsContext *c, *yuv2packedX = yuv2uyvy422_X_c; break; case PIX_FMT_RGB48LE: - //*yuv2packed1 = yuv2rgb48le_1_c; - //*yuv2packed2 = yuv2rgb48le_2_c; - //*yuv2packedX = yuv2rgb48le_X_c; - //break; + *yuv2packed1 = yuv2rgb48le_1_c; + *yuv2packed2 = yuv2rgb48le_2_c; + *yuv2packedX = yuv2rgb48le_X_c; + break; case PIX_FMT_RGB48BE: *yuv2packed1 = yuv2rgb48be_1_c; *yuv2packed2 = yuv2rgb48be_2_c; *yuv2packedX = yuv2rgb48be_X_c; break; case PIX_FMT_BGR48LE: - //*yuv2packed1 = yuv2bgr48le_1_c; - //*yuv2packed2 = yuv2bgr48le_2_c; - //*yuv2packedX = yuv2bgr48le_X_c; - //break; + *yuv2packed1 = yuv2bgr48le_1_c; + *yuv2packed2 = yuv2bgr48le_2_c; + *yuv2packedX = yuv2bgr48le_X_c; + break; case PIX_FMT_BGR48BE: *yuv2packed1 = yuv2bgr48be_1_c; *yuv2packed2 = yuv2bgr48be_2_c; *yuv2packedX = yuv2bgr48be_X_c; break; - default: - *yuv2packed1 = yuv2packed1_c; - *yuv2packed2 = yuv2packed2_c; - *yuv2packedX = yuv2packedX_c; + case PIX_FMT_RGB32: + case PIX_FMT_BGR32: +#if CONFIG_SMALL + *yuv2packed1 = yuv2rgb32_1_c; + *yuv2packed2 = yuv2rgb32_2_c; + *yuv2packedX = yuv2rgb32_X_c; +#else +#if CONFIG_SWSCALE_ALPHA + if (c->alpPixBuf) { + *yuv2packed1 = yuv2rgba32_1_c; + *yuv2packed2 = yuv2rgba32_2_c; + *yuv2packedX = yuv2rgba32_X_c; + } else +#endif /* CONFIG_SWSCALE_ALPHA */ + { + *yuv2packed1 = yuv2rgbx32_1_c; + *yuv2packed2 = yuv2rgbx32_2_c; + *yuv2packedX = yuv2rgbx32_X_c; + } +#endif /* !CONFIG_SMALL */ + break; + case PIX_FMT_RGB32_1: + case PIX_FMT_BGR32_1: +#if CONFIG_SMALL + *yuv2packed1 = yuv2rgb32_1_1_c; + *yuv2packed2 = yuv2rgb32_1_2_c; + *yuv2packedX = yuv2rgb32_1_X_c; +#else +#if CONFIG_SWSCALE_ALPHA + if (c->alpPixBuf) { + *yuv2packed1 = yuv2rgba32_1_1_c; + *yuv2packed2 = yuv2rgba32_1_2_c; + *yuv2packedX = yuv2rgba32_1_X_c; + } else +#endif /* CONFIG_SWSCALE_ALPHA */ + { + *yuv2packed1 = yuv2rgbx32_1_1_c; + *yuv2packed2 = yuv2rgbx32_1_2_c; + *yuv2packedX = yuv2rgbx32_1_X_c; + } +#endif /* !CONFIG_SMALL */ + break; + case PIX_FMT_RGB24: + *yuv2packed1 = yuv2rgb24_1_c; + *yuv2packed2 = yuv2rgb24_2_c; + *yuv2packedX = yuv2rgb24_X_c; + break; + case PIX_FMT_BGR24: + *yuv2packed1 = yuv2bgr24_1_c; + *yuv2packed2 = yuv2bgr24_2_c; + *yuv2packedX = yuv2bgr24_X_c; + break; + case PIX_FMT_RGB565LE: + case PIX_FMT_RGB565BE: + case PIX_FMT_BGR565LE: + case PIX_FMT_BGR565BE: + *yuv2packed1 = yuv2rgb16_1_c; + *yuv2packed2 = yuv2rgb16_2_c; + *yuv2packedX = yuv2rgb16_X_c; + break; + case PIX_FMT_RGB555LE: + case PIX_FMT_RGB555BE: + case PIX_FMT_BGR555LE: + case PIX_FMT_BGR555BE: + *yuv2packed1 = yuv2rgb15_1_c; + *yuv2packed2 = yuv2rgb15_2_c; + *yuv2packedX = yuv2rgb15_X_c; + break; + case PIX_FMT_RGB444LE: + case PIX_FMT_RGB444BE: + case PIX_FMT_BGR444LE: + case PIX_FMT_BGR444BE: + *yuv2packed1 = yuv2rgb12_1_c; + *yuv2packed2 = yuv2rgb12_2_c; + *yuv2packedX = yuv2rgb12_X_c; + break; + case PIX_FMT_RGB8: + case PIX_FMT_BGR8: + *yuv2packed1 = yuv2rgb8_1_c; + *yuv2packed2 = yuv2rgb8_2_c; + *yuv2packedX = yuv2rgb8_X_c; + break; + case PIX_FMT_RGB4: + case PIX_FMT_BGR4: + *yuv2packed1 = yuv2rgb4_1_c; + *yuv2packed2 = yuv2rgb4_2_c; + *yuv2packedX = yuv2rgb4_X_c; + break; + case PIX_FMT_RGB4_BYTE: + case PIX_FMT_BGR4_BYTE: + *yuv2packed1 = yuv2rgb4b_1_c; + *yuv2packed2 = yuv2rgb4b_2_c; + *yuv2packedX = yuv2rgb4b_X_c; break; } } @@ -2220,6 +2524,7 @@ static int swScale(SwsContext *c, const uint8_t* src[], const int chrSrcSliceH= -((-srcSliceH) >> c->chrSrcVSubSample); int lastDstY; uint32_t *pal=c->pal_yuv; + int should_dither= isNBPS(c->srcFormat) || is16BPS(c->srcFormat); yuv2planar1_fn yuv2yuv1 = c->yuv2yuv1; yuv2planarX_fn yuv2yuvX = c->yuv2yuvX; @@ -2275,16 +2580,19 @@ static int swScale(SwsContext *c, const uint8_t* src[], lastInChrBuf= -1; } + if (!should_dither) { + c->chrDither8 = c->lumDither8 = ff_sws_pb_64; + } lastDstY= dstY; for (;dstY < dstH; dstY++) { - unsigned char *dest =dst[0]+dstStride[0]*dstY; const int chrDstY= dstY>>c->chrDstVSubSample; - unsigned char *uDest=dst[1]+dstStride[1]*chrDstY; - unsigned char *vDest=dst[2]+dstStride[2]*chrDstY; - unsigned char *aDest=(CONFIG_SWSCALE_ALPHA && alpPixBuf) ? dst[3]+dstStride[3]*dstY : NULL; - const uint8_t *lumDither= should_dither ? dithers[7][dstY &7] : flat64; - const uint8_t *chrDither= should_dither ? dithers[7][chrDstY&7] : flat64; + uint8_t *dest[4] = { + dst[0] + dstStride[0] * dstY, + dst[1] + dstStride[1] * chrDstY, + dst[2] + dstStride[2] * chrDstY, + (CONFIG_SWSCALE_ALPHA && alpPixBuf) ? dst[3] + dstStride[3] * dstY : NULL, + }; const int firstLumSrcY= vLumFilterPos[dstY]; //First line needed as input const int firstLumSrcY2= vLumFilterPos[FFMIN(dstY | ((1<<c->chrDstVSubSample) - 1), dstH-1)]; @@ -2364,6 +2672,10 @@ static int swScale(SwsContext *c, const uint8_t* src[], #if HAVE_MMX updateMMXDitherTables(c, dstY, lumBufIndex, chrBufIndex, lastInLumBuf, lastInChrBuf); #endif + if (should_dither) { + c->chrDither8 = dither_8x8_128[chrDstY & 7]; + c->lumDither8 = dither_8x8_128[dstY & 7]; + } if (dstY >= dstH-2) { // hmm looks like we can't use MMX here without overwriting this array's tail find_c_packed_planar_out_funcs(c, &yuv2yuv1, &yuv2yuvX, @@ -2379,46 +2691,43 @@ static int swScale(SwsContext *c, const uint8_t* src[], if (isPlanarYUV(dstFormat) || dstFormat==PIX_FMT_GRAY8) { //YV12 like const int chrSkipMask= (1<<c->chrDstVSubSample)-1; - if ((dstY&chrSkipMask) || isGray(dstFormat)) uDest=vDest= NULL; //FIXME split functions in lumi / chromi + if ((dstY&chrSkipMask) || isGray(dstFormat)) + dest[1] = dest[2] = NULL; //FIXME split functions in lumi / chromi if (c->yuv2yuv1 && vLumFilterSize == 1 && vChrFilterSize == 1) { // unscaled YV12 - const int16_t *lumBuf = lumSrcPtr[0]; - const int16_t *chrUBuf= chrUSrcPtr[0]; - const int16_t *chrVBuf= chrVSrcPtr[0]; const int16_t *alpBuf= (CONFIG_SWSCALE_ALPHA && alpPixBuf) ? alpSrcPtr[0] : NULL; - yuv2yuv1(c, lumBuf, chrUBuf, chrVBuf, alpBuf, dest, - uDest, vDest, aDest, dstW, chrDstW, lumDither, chrDither); + yuv2yuv1(c, lumSrcPtr[0], chrUSrcPtr[0], chrVSrcPtr[0], alpBuf, + dest, dstW, chrDstW); } else { //General YV12 - yuv2yuvX(c, - vLumFilter+dstY*vLumFilterSize , lumSrcPtr, vLumFilterSize, - vChrFilter+chrDstY*vChrFilterSize, chrUSrcPtr, - chrVSrcPtr, vChrFilterSize, - alpSrcPtr, dest, uDest, vDest, aDest, dstW, chrDstW, lumDither, chrDither); + yuv2yuvX(c, vLumFilter + dstY * vLumFilterSize, + lumSrcPtr, vLumFilterSize, + vChrFilter + chrDstY * vChrFilterSize, + chrUSrcPtr, chrVSrcPtr, vChrFilterSize, + alpSrcPtr, dest, dstW, chrDstW); } } else { assert(lumSrcPtr + vLumFilterSize - 1 < lumPixBuf + vLumBufSize*2); assert(chrUSrcPtr + vChrFilterSize - 1 < chrUPixBuf + vChrBufSize*2); if (c->yuv2packed1 && vLumFilterSize == 1 && vChrFilterSize == 2) { //unscaled RGB - int chrAlpha= vChrFilter[2*dstY+1]; - yuv2packed1(c, *lumSrcPtr, *chrUSrcPtr, *(chrUSrcPtr+1), - *chrVSrcPtr, *(chrVSrcPtr+1), - alpPixBuf ? *alpSrcPtr : NULL, - dest, dstW, chrAlpha, dstFormat, flags, dstY); + int chrAlpha = vChrFilter[2 * dstY + 1]; + yuv2packed1(c, *lumSrcPtr, chrUSrcPtr, chrVSrcPtr, + alpPixBuf ? *alpSrcPtr : NULL, + dest[0], dstW, chrAlpha, dstY); } else if (c->yuv2packed2 && vLumFilterSize == 2 && vChrFilterSize == 2) { //bilinear upscale RGB - int lumAlpha= vLumFilter[2*dstY+1]; - int chrAlpha= vChrFilter[2*dstY+1]; - lumMmxFilter[2]= - lumMmxFilter[3]= vLumFilter[2*dstY ]*0x10001; - chrMmxFilter[2]= - chrMmxFilter[3]= vChrFilter[2*chrDstY]*0x10001; - yuv2packed2(c, *lumSrcPtr, *(lumSrcPtr+1), *chrUSrcPtr, *(chrUSrcPtr+1), - *chrVSrcPtr, *(chrVSrcPtr+1), - alpPixBuf ? *alpSrcPtr : NULL, alpPixBuf ? *(alpSrcPtr+1) : NULL, - dest, dstW, lumAlpha, chrAlpha, dstY); + int lumAlpha = vLumFilter[2 * dstY + 1]; + int chrAlpha = vChrFilter[2 * dstY + 1]; + lumMmxFilter[2] = + lumMmxFilter[3] = vLumFilter[2 * dstY ] * 0x10001; + chrMmxFilter[2] = + chrMmxFilter[3] = vChrFilter[2 * chrDstY] * 0x10001; + yuv2packed2(c, lumSrcPtr, chrUSrcPtr, chrVSrcPtr, + alpPixBuf ? alpSrcPtr : NULL, + dest[0], dstW, lumAlpha, chrAlpha, dstY); } else { //general RGB - yuv2packedX(c, - vLumFilter+dstY*vLumFilterSize, lumSrcPtr, vLumFilterSize, - vChrFilter+dstY*vChrFilterSize, chrUSrcPtr, chrVSrcPtr, vChrFilterSize, - alpSrcPtr, dest, dstW, dstY); + yuv2packedX(c, vLumFilter + dstY * vLumFilterSize, + lumSrcPtr, vLumFilterSize, + vChrFilter + dstY * vChrFilterSize, + chrUSrcPtr, chrVSrcPtr, vChrFilterSize, + alpSrcPtr, dest[0], dstW, dstY); } } } @@ -2451,13 +2760,6 @@ static av_cold void sws_init_swScale_c(SwsContext *c) &c->yuv2packed1, &c->yuv2packed2, &c->yuv2packedX); - c->hScale = hScale_c; - - if (c->flags & SWS_FAST_BILINEAR) { - c->hyscale_fast = hyscale_fast_c; - c->hcscale_fast = hcscale_fast_c; - } - c->chrToYV12 = NULL; switch(srcFormat) { case PIX_FMT_YUYV422 : c->chrToYV12 = yuy2ToUV_c; break; @@ -2469,24 +2771,25 @@ static av_cold void sws_init_swScale_c(SwsContext *c) case PIX_FMT_PAL8 : case PIX_FMT_BGR4_BYTE: case PIX_FMT_RGB4_BYTE: c->chrToYV12 = palToUV_c; break; - case PIX_FMT_GRAY16BE : case PIX_FMT_YUV444P9BE: case PIX_FMT_YUV420P9BE: case PIX_FMT_YUV444P10BE: case PIX_FMT_YUV422P10BE: - case PIX_FMT_YUV420P10BE: - case PIX_FMT_YUV420P16BE: - case PIX_FMT_YUV422P16BE: - case PIX_FMT_YUV444P16BE: c->hScale16= HAVE_BIGENDIAN ? hScale16_c : hScale16X_c; break; - case PIX_FMT_GRAY16LE : + case PIX_FMT_YUV420P10BE: c->hScale16= HAVE_BIGENDIAN ? hScale16N_c : hScale16NX_c; break; case PIX_FMT_YUV444P9LE: case PIX_FMT_YUV420P9LE: case PIX_FMT_YUV422P10LE: case PIX_FMT_YUV420P10LE: - case PIX_FMT_YUV444P10LE: + case PIX_FMT_YUV444P10LE: c->hScale16= HAVE_BIGENDIAN ? hScale16NX_c : hScale16N_c; break; +#if HAVE_BIGENDIAN case PIX_FMT_YUV420P16LE: case PIX_FMT_YUV422P16LE: - case PIX_FMT_YUV444P16LE: c->hScale16= HAVE_BIGENDIAN ? hScale16X_c : hScale16_c; break; + case PIX_FMT_YUV444P16LE: c->chrToYV12 = bswap16UV_c; break; +#else + case PIX_FMT_YUV420P16BE: + case PIX_FMT_YUV422P16BE: + case PIX_FMT_YUV444P16BE: c->chrToYV12 = bswap16UV_c; break; +#endif } if (c->chrSrcHSubSample) { switch(srcFormat) { @@ -2535,11 +2838,20 @@ static av_cold void sws_init_swScale_c(SwsContext *c) c->lumToYV12 = NULL; c->alpToYV12 = NULL; switch (srcFormat) { +#if HAVE_BIGENDIAN + case PIX_FMT_YUV420P16LE: + case PIX_FMT_YUV422P16LE: + case PIX_FMT_YUV444P16LE: + case PIX_FMT_GRAY16LE: c->lumToYV12 = bswap16Y_c; break; +#else + case PIX_FMT_YUV420P16BE: + case PIX_FMT_YUV422P16BE: + case PIX_FMT_YUV444P16BE: + case PIX_FMT_GRAY16BE: c->lumToYV12 = bswap16Y_c; break; +#endif case PIX_FMT_YUYV422 : - case PIX_FMT_GRAY8A : - c->lumToYV12 = yuy2ToY_c; break; - case PIX_FMT_UYVY422 : - c->lumToYV12 = uyvyToY_c; break; + case PIX_FMT_Y400A : c->lumToYV12 = yuy2ToY_c; break; + case PIX_FMT_UYVY422 : c->lumToYV12 = uyvyToY_c; break; case PIX_FMT_BGR24 : c->lumToYV12 = bgr24ToY_c; break; case PIX_FMT_BGR565LE : c->lumToYV12 = bgr16leToY_c; break; case PIX_FMT_BGR565BE : c->lumToYV12 = bgr16beToY_c; break; @@ -2577,8 +2889,16 @@ static av_cold void sws_init_swScale_c(SwsContext *c) } } - if(isAnyRGB(c->srcFormat) || c->srcFormat == PIX_FMT_PAL8) - c->hScale16= hScale16_c; + if((isAnyRGB(c->srcFormat) && av_pix_fmt_descriptors[c->srcFormat].comp[0].depth_minus1<15) + || c->srcFormat == PIX_FMT_PAL8) + c->hScale16= hScale16N_c; + + if (c->scalingBpp == 8) { + c->hScale = hScale_c; + if (c->flags & SWS_FAST_BILINEAR) { + c->hyscale_fast = hyscale_fast_c; + c->hcscale_fast = hcscale_fast_c; + } if (c->srcRange != c->dstRange && !isAnyRGB(c->dstFormat)) { if (c->srcRange) { @@ -2589,6 +2909,26 @@ static av_cold void sws_init_swScale_c(SwsContext *c) c->chrConvertRange = chrRangeToJpeg_c; } } + } else { + if(c->hScale16 == hScale16NX_c && !isAnyRGB(c->srcFormat)){ + c->chrToYV12 = bswap16UV_c; + c->lumToYV12 = bswap16Y_c; + } + c->hScale16 = NULL; + c->hScale = hScale16_c; + c->scale19To15Fw = scale19To15Fw_c; + c->scale8To16Rv = scale8To16Rv_c; + + if (c->srcRange != c->dstRange && !isAnyRGB(c->dstFormat)) { + if (c->srcRange) { + c->lumConvertRange = lumRangeFromJpeg16_c; + c->chrConvertRange = chrRangeFromJpeg16_c; + } else { + c->lumConvertRange = lumRangeToJpeg16_c; + c->chrConvertRange = chrRangeToJpeg16_c; + } + } + } if (!(isGray(srcFormat) || isGray(c->dstFormat) || srcFormat == PIX_FMT_MONOBLACK || srcFormat == PIX_FMT_MONOWHITE)) diff --git a/libswscale/swscale.h b/libswscale/swscale.h index 006118a5a1..bf39f81425 100644 --- a/libswscale/swscale.h +++ b/libswscale/swscale.h @@ -28,6 +28,8 @@ */ #include "libavutil/avutil.h" +#include "libavutil/log.h" +#include "libavutil/pixfmt.h" #define LIBSWSCALE_VERSION_MAJOR 0 #define LIBSWSCALE_VERSION_MINOR 14 @@ -216,7 +218,7 @@ struct SwsContext *sws_getContext(int srcW, int srcH, enum PixelFormat srcFormat * top-bottom or bottom-top order. If slices are provided in * non-sequential order the behavior of the function is undefined. * - * @param context the scaling context previously created with + * @param c the scaling context previously created with * sws_getContext() * @param srcSlice the array containing the pointers to the planes of * the source slice diff --git a/libswscale/swscale_internal.h b/libswscale/swscale_internal.h index c0f8e64d70..30dec99130 100644 --- a/libswscale/swscale_internal.h +++ b/libswscale/swscale_internal.h @@ -28,6 +28,8 @@ #endif #include "libavutil/avutil.h" +#include "libavutil/log.h" +#include "libavutil/pixfmt.h" #define STR(s) AV_TOSTRING(s) //AV_STRINGIFY is too long @@ -59,40 +61,128 @@ typedef int (*SwsFunc)(struct SwsContext *context, const uint8_t* src[], int srcStride[], int srcSliceY, int srcSliceH, uint8_t* dst[], int dstStride[]); +/** + * Write one line of horizontally scaled Y/U/V/A to planar output + * without any additional vertical scaling (or point-scaling). + * + * @param c SWS scaling context + * @param lumSrc scaled luma (Y) source data, 15bit for 8bit output + * @param chrUSrc scaled chroma (U) source data, 15bit for 8bit output + * @param chrVSrc scaled chroma (V) source data, 15bit for 8bit output + * @param alpSrc scaled alpha (A) source data, 15bit for 8bit output + * @param dest pointer to the 4 output planes (Y/U/V/A) + * @param dstW width of dest[0], dest[3], lumSrc and alpSrc in pixels + * @param chrDstW width of dest[1], dest[2], chrUSrc and chrVSrc + */ typedef void (*yuv2planar1_fn) (struct SwsContext *c, const int16_t *lumSrc, const int16_t *chrUSrc, const int16_t *chrVSrc, const int16_t *alpSrc, - uint8_t *dest, - uint8_t *uDest, uint8_t *vDest, uint8_t *aDest, - int dstW, int chrDstW, const uint8_t *lumDither, const uint8_t *chrDither); -typedef void (*yuv2planarX_fn) (struct SwsContext *c, - const int16_t *lumFilter, const int16_t **lumSrc, int lumFilterSize, + uint8_t *dest[4], int dstW, int chrDstW); +/** + * Write one line of horizontally scaled Y/U/V/A to planar output + * with multi-point vertical scaling between input pixels. + * + * @param c SWS scaling context + * @param lumFilter vertical luma/alpha scaling coefficients, 12bit [0,4096] + * @param lumSrc scaled luma (Y) source data, 15bit for 8bit output + * @param lumFilterSize number of vertical luma/alpha input lines to scale + * @param chrFilter vertical chroma scaling coefficients, 12bit [0,4096] + * @param chrUSrc scaled chroma (U) source data, 15bit for 8bit output + * @param chrVSrc scaled chroma (V) source data, 15bit for 8bit output + * @param chrFilterSize number of vertical chroma input lines to scale + * @param alpSrc scaled alpha (A) source data, 15bit for 8bit output + * @param dest pointer to the 4 output planes (Y/U/V/A) + * @param dstW width of dest[0], dest[3], lumSrc and alpSrc in pixels + * @param chrDstW width of dest[1], dest[2], chrUSrc and chrVSrc + */ +typedef void (*yuv2planarX_fn) (struct SwsContext *c, const int16_t *lumFilter, + const int16_t **lumSrc, int lumFilterSize, const int16_t *chrFilter, const int16_t **chrUSrc, - const int16_t **chrVSrc, int chrFilterSize, - const int16_t **alpSrc, - uint8_t *dest, - uint8_t *uDest, uint8_t *vDest, uint8_t *aDest, - int dstW, int chrDstW, const uint8_t *lumDither, const uint8_t *chrDither); -typedef void (*yuv2packed1_fn) (struct SwsContext *c, - const uint16_t *buf0, - const uint16_t *ubuf0, const uint16_t *ubuf1, - const uint16_t *vbuf0, const uint16_t *vbuf1, - const uint16_t *abuf0, - uint8_t *dest, - int dstW, int uvalpha, int dstFormat, int flags, int y); -typedef void (*yuv2packed2_fn) (struct SwsContext *c, - const uint16_t *buf0, const uint16_t *buf1, - const uint16_t *ubuf0, const uint16_t *ubuf1, - const uint16_t *vbuf0, const uint16_t *vbuf1, - const uint16_t *abuf0, const uint16_t *abuf1, - uint8_t *dest, + const int16_t **chrVSrc, int chrFilterSize, + const int16_t **alpSrc, uint8_t *dest[4], + int dstW, int chrDstW); +/** + * Write one line of horizontally scaled Y/U/V/A to packed-pixel YUV/RGB + * output without any additional vertical scaling (or point-scaling). Note + * that this function may do chroma scaling, see the "uvalpha" argument. + * + * @param c SWS scaling context + * @param lumSrc scaled luma (Y) source data, 15bit for 8bit output + * @param chrUSrc scaled chroma (U) source data, 15bit for 8bit output + * @param chrVSrc scaled chroma (V) source data, 15bit for 8bit output + * @param alpSrc scaled alpha (A) source data, 15bit for 8bit output + * @param dest pointer to the output plane + * @param dstW width of lumSrc and alpSrc in pixels, number of pixels + * to write into dest[] + * @param uvalpha chroma scaling coefficient for the second line of chroma + * pixels, either 2048 or 0. If 0, one chroma input is used + * for 2 output pixels (or if the SWS_FLAG_FULL_CHR_INT flag + * is set, it generates 1 output pixel). If 2048, two chroma + * input pixels should be averaged for 2 output pixels (this + * only happens if SWS_FLAG_FULL_CHR_INT is not set) + * @param y vertical line number for this output. This does not need + * to be used to calculate the offset in the destination, + * but can be used to generate comfort noise using dithering + * for some output formats. + */ +typedef void (*yuv2packed1_fn) (struct SwsContext *c, const int16_t *lumSrc, + const int16_t *chrUSrc[2], const int16_t *chrVSrc[2], + const int16_t *alpSrc, uint8_t *dest, + int dstW, int uvalpha, int y); +/** + * Write one line of horizontally scaled Y/U/V/A to packed-pixel YUV/RGB + * output by doing bilinear scaling between two input lines. + * + * @param c SWS scaling context + * @param lumSrc scaled luma (Y) source data, 15bit for 8bit output + * @param chrUSrc scaled chroma (U) source data, 15bit for 8bit output + * @param chrVSrc scaled chroma (V) source data, 15bit for 8bit output + * @param alpSrc scaled alpha (A) source data, 15bit for 8bit output + * @param dest pointer to the output plane + * @param dstW width of lumSrc and alpSrc in pixels, number of pixels + * to write into dest[] + * @param yalpha luma/alpha scaling coefficients for the second input line. + * The first line's coefficients can be calculated by using + * 4096 - yalpha + * @param uvalpha chroma scaling coefficient for the second input line. The + * first line's coefficients can be calculated by using + * 4096 - uvalpha + * @param y vertical line number for this output. This does not need + * to be used to calculate the offset in the destination, + * but can be used to generate comfort noise using dithering + * for some output formats. + */ +typedef void (*yuv2packed2_fn) (struct SwsContext *c, const int16_t *lumSrc[2], + const int16_t *chrUSrc[2], const int16_t *chrVSrc[2], + const int16_t *alpSrc[2], uint8_t *dest, int dstW, int yalpha, int uvalpha, int y); -typedef void (*yuv2packedX_fn) (struct SwsContext *c, - const int16_t *lumFilter, const int16_t **lumSrc, int lumFilterSize, +/** + * Write one line of horizontally scaled Y/U/V/A to packed-pixel YUV/RGB + * output by doing multi-point vertical scaling between input pixels. + * + * @param c SWS scaling context + * @param lumFilter vertical luma/alpha scaling coefficients, 12bit [0,4096] + * @param lumSrc scaled luma (Y) source data, 15bit for 8bit output + * @param lumFilterSize number of vertical luma/alpha input lines to scale + * @param chrFilter vertical chroma scaling coefficients, 12bit [0,4096] + * @param chrUSrc scaled chroma (U) source data, 15bit for 8bit output + * @param chrVSrc scaled chroma (V) source data, 15bit for 8bit output + * @param chrFilterSize number of vertical chroma input lines to scale + * @param alpSrc scaled alpha (A) source data, 15bit for 8bit output + * @param dest pointer to the output plane + * @param dstW width of lumSrc and alpSrc in pixels, number of pixels + * to write into dest[] + * @param y vertical line number for this output. This does not need + * to be used to calculate the offset in the destination, + * but can be used to generate comfort noise using dithering + * or some output formats. + */ +typedef void (*yuv2packedX_fn) (struct SwsContext *c, const int16_t *lumFilter, + const int16_t **lumSrc, int lumFilterSize, const int16_t *chrFilter, const int16_t **chrUSrc, const int16_t **chrVSrc, int chrFilterSize, const int16_t **alpSrc, uint8_t *dest, - int dstW, int dstY); + int dstW, int y); /* This struct should be aligned on at least a 32-byte boundary. */ typedef struct SwsContext { @@ -119,6 +209,7 @@ typedef struct SwsContext { enum PixelFormat srcFormat; ///< Source pixel format. int dstFormatBpp; ///< Number of bits per pixel of the destination pixel format. int srcFormatBpp; ///< Number of bits per pixel of the source pixel format. + int scalingBpp; int chrSrcHSubSample; ///< Binary logarithm of horizontal subsampling factor between luma/alpha and chroma planes in source image. int chrSrcVSubSample; ///< Binary logarithm of vertical subsampling factor between luma/alpha and chroma planes in source image. int chrDstHSubSample; ///< Binary logarithm of horizontal subsampling factor between luma/alpha and chroma planes in destination image. @@ -233,7 +324,7 @@ typedef struct SwsContext { #define UV_OFF "11*8+4*4*256*3+48" #define UV_OFFx2 "11*8+4*4*256*3+56" #define DITHER16 "11*8+4*4*256*3+64" -#define DITHER32 "11*8+4*4*256*3+64+16" +#define DITHER32 "11*8+4*4*256*3+80" DECLARE_ALIGNED(8, uint64_t, redDither); DECLARE_ALIGNED(8, uint64_t, greenDither); @@ -256,10 +347,15 @@ typedef struct SwsContext { DECLARE_ALIGNED(8, uint64_t, v_temp); DECLARE_ALIGNED(8, uint64_t, y_temp); int32_t alpMmxFilter[4*MAX_FILTER_SIZE]; + // alignment of these values is not necessary, but merely here + // to maintain the same offset across x8632 and x86-64. Once we + // use proper offset macros in the asm, they can be removed. DECLARE_ALIGNED(8, ptrdiff_t, uv_off); ///< offset (in pixels) between u and v planes DECLARE_ALIGNED(8, ptrdiff_t, uv_offx2); ///< offset (in bytes) between u and v planes - uint16_t dither16[8]; - uint32_t dither32[8]; + DECLARE_ALIGNED(8, uint16_t, dither16)[8]; + DECLARE_ALIGNED(8, uint32_t, dither32)[8]; + + const uint8_t *chrDither8, *lumDither8; #if HAVE_ALTIVEC vector signed short CY; @@ -304,6 +400,25 @@ typedef struct SwsContext { void (*chrToYV12)(uint8_t *dstU, uint8_t *dstV, const uint8_t *src1, const uint8_t *src2, int width, uint32_t *pal); ///< Unscaled conversion of chroma planes to YV12 for horizontal scaler. + /** + * Scale one horizontal line of input data using a bilinear filter + * to produce one line of output data. Compared to SwsContext->hScale(), + * please take note of the following caveats when using these: + * - Scaling is done using only 7bit instead of 14bit coefficients. + * - You can use no more than 5 input pixels to produce 4 output + * pixels. Therefore, this filter should not be used for downscaling + * by more than ~20% in width (because that equals more than 5/4th + * downscaling and thus more than 5 pixels input per 4 pixels output). + * - In general, bilinear filters create artifacts during downscaling + * (even when <20%), because one output pixel will span more than one + * input pixel, and thus some pixels will need edges of both neighbor + * pixels to interpolate the output pixel. Since you can use at most + * two input pixels per output pixel in bilinear scaling, this is + * impossible and thus downscaling by any size will create artifacts. + * To enable this type of scaling, set SWS_FLAG_FAST_BILINEAR + * in SwsContext->flags. + */ + /** @{ */ void (*hyscale_fast)(struct SwsContext *c, int16_t *dst, int dstWidth, const uint8_t *src, int srcW, int xInc); @@ -311,9 +426,38 @@ typedef struct SwsContext { int16_t *dst1, int16_t *dst2, int dstWidth, const uint8_t *src1, const uint8_t *src2, int srcW, int xInc); + /** @} */ - void (*hScale)(int16_t *dst, int dstW, const uint8_t *src, int srcW, - int xInc, const int16_t *filter, const int16_t *filterPos, + /** + * Scale one horizontal line of input data using a filter over the input + * lines, to produce one (differently sized) line of output data. + * + * @param dst pointer to destination buffer for horizontally scaled + * data. If the scaling depth (SwsContext->scalingBpp) is + * 8, data will be 15bpp in 16bits (int16_t) width. If + * scaling depth is 16, data will be 19bpp in 32bpp + * (int32_t) width. + * @param dstW width of destination image + * @param src pointer to source data to be scaled. If scaling depth + * is 8, this is 8bpp in 8bpp (uint8_t) width. If scaling + * depth is 16, this is native depth in 16bbp (uint16_t) + * width. In other words, for 9-bit YUV input, this is + * 9bpp, for 10-bit YUV input, this is 10bpp, and for + * 16-bit RGB or YUV, this is 16bpp. + * @param filter filter coefficients to be used per output pixel for + * scaling. This contains 14bpp filtering coefficients. + * Guaranteed to contain dstW * filterSize entries. + * @param filterPos position of the first input pixel to be used for + * each output pixel during scaling. Guaranteed to + * contain dstW entries. + * @param filterSize the number of input coefficients to be used (and + * thus the number of input pixels to be used) for + * creating a single output pixel. Is aligned to 4 + * (and input coefficients thus padded with zeroes) + * to simplify creating SIMD code. + */ + void (*hScale)(struct SwsContext *c, int16_t *dst, int dstW, const uint8_t *src, + const int16_t *filter, const int16_t *filterPos, int filterSize); void (*hScale16)(int16_t *dst, int dstW, const uint16_t *src, int srcW, @@ -323,6 +467,15 @@ typedef struct SwsContext { void (*lumConvertRange)(int16_t *dst, int width); ///< Color range conversion function for luma plane if needed. void (*chrConvertRange)(int16_t *dst1, int16_t *dst2, int width); ///< Color range conversion function for chroma planes if needed. + /** + * dst[..] = (src[..] << 8) | src[..]; + */ + void (*scale8To16Rv)(uint16_t *dst, const uint8_t *src, int len); + /** + * dst[..] = src[..] >> 4; + */ + void (*scale19To15Fw)(int16_t *dst, const int32_t *src, int len); + int needs_hcscale; ///< Set if there are chroma planes to be converted. } SwsContext; diff --git a/libswscale/swscale_unscaled.c b/libswscale/swscale_unscaled.c index e17de2b206..4c4dbacebc 100644 --- a/libswscale/swscale_unscaled.c +++ b/libswscale/swscale_unscaled.c @@ -340,7 +340,7 @@ static int rgbToRgbWrapper(SwsContext *c, const uint8_t* src[], int srcStride[], if ((dstFormat == PIX_FMT_RGB32_1 || dstFormat == PIX_FMT_BGR32_1) && !isRGBA32(srcFormat)) dstPtr += ALT32_CORR; - if (dstStride[0]*srcBpp == srcStride[0]*dstBpp && srcStride[0] > 0 && !(srcStride[0]%srcBpp)) + if (dstStride[0]*srcBpp == srcStride[0]*dstBpp && srcStride[0] > 0 && !(srcStride[0] % srcBpp)) conv(srcPtr, dstPtr + dstStride[0]*srcSliceY, srcSliceH*srcStride[0]); else { int i; @@ -686,18 +686,19 @@ static int check_image_pointers(uint8_t *data[4], enum PixelFormat pix_fmt, * swscale wrapper, so we don't need to export the SwsContext. * Assumes planar YUV to be in YUV order instead of YVU. */ -int sws_scale(SwsContext *c, const uint8_t* const src[], const int srcStride[], int srcSliceY, - int srcSliceH, uint8_t* const dst[], const int dstStride[]) +int sws_scale(struct SwsContext *c, const uint8_t* const srcSlice[], + const int srcStride[], int srcSliceY, int srcSliceH, + uint8_t* const dst[], const int dstStride[]) { int i; - const uint8_t* src2[4]= {src[0], src[1], src[2], src[3]}; + const uint8_t* src2[4]= {srcSlice[0], srcSlice[1], srcSlice[2], srcSlice[3]}; uint8_t* dst2[4]= {dst[0], dst[1], dst[2], dst[3]}; // do not mess up sliceDir if we have a "trailing" 0-size slice if (srcSliceH == 0) return 0; - if (!check_image_pointers(src, c->srcFormat, srcStride)) { + if (!check_image_pointers(srcSlice, c->srcFormat, srcStride)) { av_log(c, AV_LOG_ERROR, "bad src image pointers\n"); return 0; } @@ -718,7 +719,7 @@ int sws_scale(SwsContext *c, const uint8_t* const src[], const int srcStride[], for (i=0; i<256; i++) { int p, r, g, b, y, u, v, a = 0xff; if(c->srcFormat == PIX_FMT_PAL8) { - p=((const uint32_t*)(src[1]))[i]; + p=((const uint32_t*)(srcSlice[1]))[i]; a= (p>>24)&0xFF; r= (p>>16)&0xFF; g= (p>> 8)&0xFF; diff --git a/libswscale/utils.c b/libswscale/utils.c index 984f2c52fa..afae13388c 100644 --- a/libswscale/utils.c +++ b/libswscale/utils.c @@ -46,6 +46,7 @@ #include "libavutil/bswap.h" #include "libavutil/opt.h" #include "libavutil/pixdesc.h" +#include "libavutil/avassert.h" unsigned swscale_version(void) { @@ -65,127 +66,88 @@ const char *swscale_license(void) #define RET 0xC3 //near return opcode for x86 -#define isSupportedIn(x) ( \ - (x)==PIX_FMT_YUV420P \ - || (x)==PIX_FMT_YUVA420P \ - || (x)==PIX_FMT_YUYV422 \ - || (x)==PIX_FMT_UYVY422 \ - || (x)==PIX_FMT_RGB48BE \ - || (x)==PIX_FMT_RGB48LE \ - || (x)==PIX_FMT_RGB32 \ - || (x)==PIX_FMT_RGB32_1 \ - || (x)==PIX_FMT_BGR48BE \ - || (x)==PIX_FMT_BGR48LE \ - || (x)==PIX_FMT_BGR24 \ - || (x)==PIX_FMT_BGR565LE \ - || (x)==PIX_FMT_BGR565BE \ - || (x)==PIX_FMT_BGR555LE \ - || (x)==PIX_FMT_BGR555BE \ - || (x)==PIX_FMT_BGR32 \ - || (x)==PIX_FMT_BGR32_1 \ - || (x)==PIX_FMT_RGB24 \ - || (x)==PIX_FMT_RGB565LE \ - || (x)==PIX_FMT_RGB565BE \ - || (x)==PIX_FMT_RGB555LE \ - || (x)==PIX_FMT_RGB555BE \ - || (x)==PIX_FMT_GRAY8 \ - || (x)==PIX_FMT_GRAY8A \ - || (x)==PIX_FMT_YUV410P \ - || (x)==PIX_FMT_YUV440P \ - || (x)==PIX_FMT_NV12 \ - || (x)==PIX_FMT_NV21 \ - || (x)==PIX_FMT_GRAY16BE \ - || (x)==PIX_FMT_GRAY16LE \ - || (x)==PIX_FMT_YUV444P \ - || (x)==PIX_FMT_YUV422P \ - || (x)==PIX_FMT_YUV411P \ - || (x)==PIX_FMT_YUVJ420P \ - || (x)==PIX_FMT_YUVJ422P \ - || (x)==PIX_FMT_YUVJ440P \ - || (x)==PIX_FMT_YUVJ444P \ - || (x)==PIX_FMT_PAL8 \ - || (x)==PIX_FMT_BGR8 \ - || (x)==PIX_FMT_RGB8 \ - || (x)==PIX_FMT_BGR4_BYTE \ - || (x)==PIX_FMT_RGB4_BYTE \ - || (x)==PIX_FMT_YUV440P \ - || (x)==PIX_FMT_MONOWHITE \ - || (x)==PIX_FMT_MONOBLACK \ - || (x)==PIX_FMT_YUV420P9LE \ - || (x)==PIX_FMT_YUV444P9LE \ - || (x)==PIX_FMT_YUV420P10LE \ - || (x)==PIX_FMT_YUV422P10LE \ - || (x)==PIX_FMT_YUV444P10LE \ - || (x)==PIX_FMT_YUV420P16LE \ - || (x)==PIX_FMT_YUV422P16LE \ - || (x)==PIX_FMT_YUV444P16LE \ - || (x)==PIX_FMT_YUV420P9BE \ - || (x)==PIX_FMT_YUV444P9BE \ - || (x)==PIX_FMT_YUV420P10BE \ - || (x)==PIX_FMT_YUV444P10BE \ - || (x)==PIX_FMT_YUV422P10BE \ - || (x)==PIX_FMT_YUV420P16BE \ - || (x)==PIX_FMT_YUV422P16BE \ - || (x)==PIX_FMT_YUV444P16BE \ - || (x)==PIX_FMT_YUV422P10 \ - ) +typedef struct FormatEntry { + int is_supported_in, is_supported_out; +} FormatEntry; + +const static FormatEntry format_entries[PIX_FMT_NB] = { + [PIX_FMT_YUV420P] = { 1 , 1 }, + [PIX_FMT_YUYV422] = { 1 , 1 }, + [PIX_FMT_RGB24] = { 1 , 1 }, + [PIX_FMT_BGR24] = { 1 , 1 }, + [PIX_FMT_YUV422P] = { 1 , 1 }, + [PIX_FMT_YUV444P] = { 1 , 1 }, + [PIX_FMT_YUV410P] = { 1 , 1 }, + [PIX_FMT_YUV411P] = { 1 , 1 }, + [PIX_FMT_GRAY8] = { 1 , 1 }, + [PIX_FMT_MONOWHITE] = { 1 , 1 }, + [PIX_FMT_MONOBLACK] = { 1 , 1 }, + [PIX_FMT_PAL8] = { 1 , 0 }, + [PIX_FMT_YUVJ420P] = { 1 , 1 }, + [PIX_FMT_YUVJ422P] = { 1 , 1 }, + [PIX_FMT_YUVJ444P] = { 1 , 1 }, + [PIX_FMT_UYVY422] = { 1 , 1 }, + [PIX_FMT_UYYVYY411] = { 0 , 0 }, + [PIX_FMT_BGR8] = { 1 , 1 }, + [PIX_FMT_BGR4] = { 0 , 1 }, + [PIX_FMT_BGR4_BYTE] = { 1 , 1 }, + [PIX_FMT_RGB8] = { 1 , 1 }, + [PIX_FMT_RGB4] = { 0 , 1 }, + [PIX_FMT_RGB4_BYTE] = { 1 , 1 }, + [PIX_FMT_NV12] = { 1 , 1 }, + [PIX_FMT_NV21] = { 1 , 1 }, + [PIX_FMT_ARGB] = { 1 , 1 }, + [PIX_FMT_RGBA] = { 1 , 1 }, + [PIX_FMT_ABGR] = { 1 , 1 }, + [PIX_FMT_BGRA] = { 1 , 1 }, + [PIX_FMT_GRAY16BE] = { 1 , 1 }, + [PIX_FMT_GRAY16LE] = { 1 , 1 }, + [PIX_FMT_YUV440P] = { 1 , 1 }, + [PIX_FMT_YUVJ440P] = { 1 , 1 }, + [PIX_FMT_YUVA420P] = { 1 , 1 }, + [PIX_FMT_RGB48BE] = { 1 , 1 }, + [PIX_FMT_RGB48LE] = { 1 , 1 }, + [PIX_FMT_RGB565BE] = { 1 , 1 }, + [PIX_FMT_RGB565LE] = { 1 , 1 }, + [PIX_FMT_RGB555BE] = { 1 , 1 }, + [PIX_FMT_RGB555LE] = { 1 , 1 }, + [PIX_FMT_BGR565BE] = { 1 , 1 }, + [PIX_FMT_BGR565LE] = { 1 , 1 }, + [PIX_FMT_BGR555BE] = { 1 , 1 }, + [PIX_FMT_BGR555LE] = { 1 , 1 }, + [PIX_FMT_YUV420P16LE] = { 1 , 1 }, + [PIX_FMT_YUV420P16BE] = { 1 , 1 }, + [PIX_FMT_YUV422P16LE] = { 1 , 1 }, + [PIX_FMT_YUV422P16BE] = { 1 , 1 }, + [PIX_FMT_YUV444P16LE] = { 1 , 1 }, + [PIX_FMT_YUV444P16BE] = { 1 , 1 }, + [PIX_FMT_RGB444LE] = { 0 , 1 }, + [PIX_FMT_RGB444BE] = { 0 , 1 }, + [PIX_FMT_BGR444LE] = { 0 , 1 }, + [PIX_FMT_BGR444BE] = { 0 , 1 }, + [PIX_FMT_GRAY8A] = { 1 , 0 }, + [PIX_FMT_BGR48BE] = { 1 , 1 }, + [PIX_FMT_BGR48LE] = { 1 , 1 }, + [PIX_FMT_YUV420P9BE] = { 1 , 1 }, + [PIX_FMT_YUV420P9LE] = { 1 , 1 }, + [PIX_FMT_YUV420P10BE] = { 1 , 1 }, + [PIX_FMT_YUV420P10LE] = { 1 , 1 }, + [PIX_FMT_YUV422P10BE] = { 1 , 1 }, + [PIX_FMT_YUV422P10LE] = { 1 , 1 }, + [PIX_FMT_YUV444P9BE] = { 1 , 0 }, + [PIX_FMT_YUV444P9LE] = { 1 , 0 }, + [PIX_FMT_YUV444P10BE] = { 1 , 0 }, + [PIX_FMT_YUV444P10LE] = { 1 , 0 }, +}; int sws_isSupportedInput(enum PixelFormat pix_fmt) { - return isSupportedIn(pix_fmt); + return format_entries[pix_fmt].is_supported_in; } -#define isSupportedOut(x) ( \ - (x)==PIX_FMT_YUV420P \ - || (x)==PIX_FMT_YUVA420P \ - || (x)==PIX_FMT_YUYV422 \ - || (x)==PIX_FMT_UYVY422 \ - || (x)==PIX_FMT_YUV444P \ - || (x)==PIX_FMT_YUV422P \ - || (x)==PIX_FMT_YUV411P \ - || (x)==PIX_FMT_YUVJ420P \ - || (x)==PIX_FMT_YUVJ422P \ - || (x)==PIX_FMT_YUVJ440P \ - || (x)==PIX_FMT_YUVJ444P \ - || isRGBinBytes(x) \ - || isBGRinBytes(x) \ - || (x)==PIX_FMT_RGB565 \ - || (x)==PIX_FMT_RGB555 \ - || (x)==PIX_FMT_RGB444 \ - || (x)==PIX_FMT_BGR565 \ - || (x)==PIX_FMT_BGR555 \ - || (x)==PIX_FMT_BGR444 \ - || (x)==PIX_FMT_RGB8 \ - || (x)==PIX_FMT_BGR8 \ - || (x)==PIX_FMT_RGB4_BYTE \ - || (x)==PIX_FMT_BGR4_BYTE \ - || (x)==PIX_FMT_RGB4 \ - || (x)==PIX_FMT_BGR4 \ - || (x)==PIX_FMT_MONOBLACK \ - || (x)==PIX_FMT_MONOWHITE \ - || (x)==PIX_FMT_NV12 \ - || (x)==PIX_FMT_NV21 \ - || (x)==PIX_FMT_GRAY16BE \ - || (x)==PIX_FMT_GRAY16LE \ - || (x)==PIX_FMT_GRAY8 \ - || (x)==PIX_FMT_YUV410P \ - || (x)==PIX_FMT_YUV440P \ - || (x)==PIX_FMT_YUV422P10 \ - || (x)==PIX_FMT_YUV420P9LE \ - || (x)==PIX_FMT_YUV420P10LE \ - || (x)==PIX_FMT_YUV420P16LE \ - || (x)==PIX_FMT_YUV422P16LE \ - || (x)==PIX_FMT_YUV444P16LE \ - || (x)==PIX_FMT_YUV420P9BE \ - || (x)==PIX_FMT_YUV420P10BE \ - || (x)==PIX_FMT_YUV420P16BE \ - || (x)==PIX_FMT_YUV422P16BE \ - || (x)==PIX_FMT_YUV444P16BE \ - ) - int sws_isSupportedOutput(enum PixelFormat pix_fmt) { - return isSupportedOut(pix_fmt); + return format_entries[pix_fmt].is_supported_out; } extern const int32_t ff_yuv2rgb_coeffs[8][4]; @@ -708,7 +670,9 @@ static void getSubSampleFactors(int *h, int *v, enum PixelFormat format) *v = av_pix_fmt_descriptors[format].log2_chroma_h; } -int sws_setColorspaceDetails(SwsContext *c, const int inv_table[4], int srcRange, const int table[4], int dstRange, int brightness, int contrast, int saturation) +int sws_setColorspaceDetails(struct SwsContext *c, const int inv_table[4], + int srcRange, const int table[4], int dstRange, + int brightness, int contrast, int saturation) { memcpy(c->srcColorspaceTable, inv_table, sizeof(int)*4); memcpy(c->dstColorspaceTable, table, sizeof(int)*4); @@ -731,7 +695,9 @@ int sws_setColorspaceDetails(SwsContext *c, const int inv_table[4], int srcRange return 0; } -int sws_getColorspaceDetails(SwsContext *c, int **inv_table, int *srcRange, int **table, int *dstRange, int *brightness, int *contrast, int *saturation) +int sws_getColorspaceDetails(struct SwsContext *c, int **inv_table, + int *srcRange, int **table, int *dstRange, + int *brightness, int *contrast, int *saturation) { if (!c || isYUV(c->dstFormat) || isGray(c->dstFormat)) return -1; @@ -769,7 +735,7 @@ SwsContext *sws_alloc_context(void) int sws_init_context(SwsContext *c, SwsFilter *srcFilter, SwsFilter *dstFilter) { - int i; + int i, j; int usesVFilter, usesHFilter; int unscaled; SwsFilter dummyFilter= {NULL, NULL, NULL, NULL}; @@ -777,7 +743,7 @@ int sws_init_context(SwsContext *c, SwsFilter *srcFilter, SwsFilter *dstFilter) int srcH= c->srcH; int dstW= c->dstW; int dstH= c->dstH; - int dst_stride = FFALIGN(dstW * sizeof(int16_t)+66, 16), dst_stride_px = dst_stride >> 1; + int dst_stride = FFALIGN(dstW * sizeof(int16_t)+66, 16); int flags, cpu_flags; enum PixelFormat srcFormat= c->srcFormat; enum PixelFormat dstFormat= c->dstFormat; @@ -789,11 +755,11 @@ int sws_init_context(SwsContext *c, SwsFilter *srcFilter, SwsFilter *dstFilter) unscaled = (srcW == dstW && srcH == dstH); - if (!isSupportedIn(srcFormat)) { + if (!sws_isSupportedInput(srcFormat)) { av_log(c, AV_LOG_ERROR, "%s is not supported as input pixel format\n", av_get_pix_fmt_name(srcFormat)); return AVERROR(EINVAL); } - if (!isSupportedOut(dstFormat)) { + if (!sws_isSupportedOutput(dstFormat)) { av_log(c, AV_LOG_ERROR, "%s is not supported as output pixel format\n", av_get_pix_fmt_name(dstFormat)); return AVERROR(EINVAL); } @@ -874,8 +840,14 @@ int sws_init_context(SwsContext *c, SwsFilter *srcFilter, SwsFilter *dstFilter) } } + c->scalingBpp = FFMAX(av_pix_fmt_descriptors[srcFormat].comp[0].depth_minus1, + av_pix_fmt_descriptors[dstFormat].comp[0].depth_minus1) >= 15 ? 16 : 8; + + if (c->scalingBpp == 16) + dst_stride <<= 1; + av_assert0(c->scalingBpp<=16); FF_ALLOC_OR_GOTO(c, c->formatConvBuffer, FFALIGN(srcW*2+78, 16) * 2, fail); - if (HAVE_MMX2 && cpu_flags & AV_CPU_FLAG_MMX2) { + if (HAVE_MMX2 && cpu_flags & AV_CPU_FLAG_MMX2 && c->scalingBpp == 8) { c->canMMX2BeUsed= (dstW >=srcW && (dstW&31)==0 && (srcW&15)==0) ? 1 : 0; if (!c->canMMX2BeUsed && dstW >=srcW && (srcW&15)==0 && (flags&SWS_FAST_BILINEAR)) { if (flags&SWS_PRINT_INFO) @@ -901,7 +873,7 @@ int sws_init_context(SwsContext *c, SwsFilter *srcFilter, SwsFilter *dstFilter) c->chrXInc+= 20; } //we don't use the x86 asm scaler if MMX is available - else if (HAVE_MMX && cpu_flags & AV_CPU_FLAG_MMX) { + else if (HAVE_MMX && cpu_flags & AV_CPU_FLAG_MMX && c->scalingBpp == 8) { c->lumXInc = ((srcW-2)<<16)/(dstW-2) - 20; c->chrXInc = ((c->chrSrcW-2)<<16)/(c->chrDstW-2) - 20; } @@ -1029,25 +1001,32 @@ int sws_init_context(SwsContext *c, SwsFilter *srcFilter, SwsFilter *dstFilter) //Note we need at least one pixel more at the end because of the MMX code (just in case someone wanna replace the 4000/8000) /* align at 16 bytes for AltiVec */ for (i=0; i<c->vLumBufSize; i++) { - FF_ALLOCZ_OR_GOTO(c, c->lumPixBuf[i+c->vLumBufSize], dst_stride+1, fail); + FF_ALLOCZ_OR_GOTO(c, c->lumPixBuf[i+c->vLumBufSize], dst_stride+16, fail); c->lumPixBuf[i] = c->lumPixBuf[i+c->vLumBufSize]; } - c->uv_off = dst_stride_px; - c->uv_offx2 = dst_stride; + // 64 / c->scalingBpp is the same as 16 / sizeof(scaling_intermediate) + c->uv_off = (dst_stride>>1) + 64 / c->scalingBpp; + c->uv_offx2 = dst_stride + 16; for (i=0; i<c->vChrBufSize; i++) { - FF_ALLOC_OR_GOTO(c, c->chrUPixBuf[i+c->vChrBufSize], dst_stride*2+1, fail); + FF_ALLOC_OR_GOTO(c, c->chrUPixBuf[i+c->vChrBufSize], dst_stride*2+32, fail); c->chrUPixBuf[i] = c->chrUPixBuf[i+c->vChrBufSize]; - c->chrVPixBuf[i] = c->chrVPixBuf[i+c->vChrBufSize] = c->chrUPixBuf[i] + dst_stride_px; + c->chrVPixBuf[i] = c->chrVPixBuf[i+c->vChrBufSize] = c->chrUPixBuf[i] + (dst_stride >> 1) + 8; } if (CONFIG_SWSCALE_ALPHA && c->alpPixBuf) for (i=0; i<c->vLumBufSize; i++) { - FF_ALLOCZ_OR_GOTO(c, c->alpPixBuf[i+c->vLumBufSize], dst_stride+1, fail); + FF_ALLOCZ_OR_GOTO(c, c->alpPixBuf[i+c->vLumBufSize], dst_stride+16, fail); c->alpPixBuf[i] = c->alpPixBuf[i+c->vLumBufSize]; } //try to avoid drawing green stuff between the right end and the stride end for (i=0; i<c->vChrBufSize; i++) - memset(c->chrUPixBuf[i], 64, dst_stride*2+1); + if(av_pix_fmt_descriptors[c->dstFormat].comp[0].depth_minus1 == 15){ + av_assert0(c->scalingBpp == 16); + for(j=0; j<dst_stride/2+1; j++) + ((int32_t*)(c->chrUPixBuf[i]))[j] = 1<<18; + } else + for(j=0; j<dst_stride+1; j++) + ((int16_t*)(c->chrUPixBuf[i]))[j] = 1<<14; assert(c->chrDstH <= dstH); @@ -1086,37 +1065,22 @@ int sws_init_context(SwsContext *c, SwsFilter *srcFilter, SwsFilter *dstFilter) if (c->canMMX2BeUsed && (flags&SWS_FAST_BILINEAR)) av_log(c, AV_LOG_VERBOSE, "using FAST_BILINEAR MMX2 scaler for horizontal scaling\n"); else { - if (c->hLumFilterSize==4) - av_log(c, AV_LOG_VERBOSE, "using 4-tap MMX scaler for horizontal luminance scaling\n"); - else if (c->hLumFilterSize==8) - av_log(c, AV_LOG_VERBOSE, "using 8-tap MMX scaler for horizontal luminance scaling\n"); - else - av_log(c, AV_LOG_VERBOSE, "using n-tap MMX scaler for horizontal luminance scaling\n"); - - if (c->hChrFilterSize==4) - av_log(c, AV_LOG_VERBOSE, "using 4-tap MMX scaler for horizontal chrominance scaling\n"); - else if (c->hChrFilterSize==8) - av_log(c, AV_LOG_VERBOSE, "using 8-tap MMX scaler for horizontal chrominance scaling\n"); - else - av_log(c, AV_LOG_VERBOSE, "using n-tap MMX scaler for horizontal chrominance scaling\n"); + av_log(c, AV_LOG_VERBOSE, "using %s-tap MMX scaler for horizontal luminance scaling\n", + c->hLumFilterSize == 4 ? "4" : + c->hLumFilterSize == 8 ? "8" : "n"); + av_log(c, AV_LOG_VERBOSE, "using %s-tap MMX scaler for horizontal chrominance scaling\n", + c->hChrFilterSize == 4 ? "4" : + c->hChrFilterSize == 8 ? "8" : "n"); } } else { -#if HAVE_MMX - av_log(c, AV_LOG_VERBOSE, "using x86 asm scaler for horizontal scaling\n"); -#else - if (flags & SWS_FAST_BILINEAR) - av_log(c, AV_LOG_VERBOSE, "using FAST_BILINEAR C scaler for horizontal scaling\n"); - else - av_log(c, AV_LOG_VERBOSE, "using C scaler for horizontal scaling\n"); -#endif + av_log(c, AV_LOG_VERBOSE, "using %s scaler for horizontal scaling\n", + HAVE_MMX ? "x86 asm" : + flags & SWS_FAST_BILINEAR ? "FAST_BILINEAR C" : "C"); } if (isPlanarYUV(dstFormat)) { - if (c->vLumFilterSize==1) - av_log(c, AV_LOG_VERBOSE, "using 1-tap %s \"scaler\" for vertical scaling (YV12 like)\n", - (HAVE_MMX && cpu_flags & AV_CPU_FLAG_MMX) ? "MMX" : "C"); - else - av_log(c, AV_LOG_VERBOSE, "using n-tap %s scaler for vertical scaling (YV12 like)\n", - (HAVE_MMX && cpu_flags & AV_CPU_FLAG_MMX) ? "MMX" : "C"); + av_log(c, AV_LOG_VERBOSE, "using %s-tap %s \"scaler\" for vertical scaling (YV12 like)\n", + c->vLumFilterSize == 1 ? "1" : "n", + HAVE_MMX && cpu_flags & AV_CPU_FLAG_MMX ? "MMX" : "C"); } else { if (c->vLumFilterSize==1 && c->vChrFilterSize==2) av_log(c, AV_LOG_VERBOSE, "using 1-tap %s \"scaler\" for vertical luminance scaling (BGR)\n" diff --git a/libswscale/x86/swscale_template.c b/libswscale/x86/swscale_template.c index 25399fadef..87248e76fa 100644 --- a/libswscale/x86/swscale_template.c +++ b/libswscale/x86/swscale_template.c @@ -70,29 +70,67 @@ : "%"REG_d, "%"REG_S\ ); +#if !COMPILE_TEMPLATE_MMX2 +static av_always_inline void +dither_8to16(SwsContext *c, const uint8_t *srcDither, int rot) +{ + if (rot) { + __asm__ volatile("pxor %%mm0, %%mm0\n\t" + "movq (%0), %%mm3\n\t" + "movq %%mm3, %%mm4\n\t" + "psrlq $24, %%mm3\n\t" + "psllq $40, %%mm4\n\t" + "por %%mm4, %%mm3\n\t" + "movq %%mm3, %%mm4\n\t" + "punpcklbw %%mm0, %%mm3\n\t" + "punpckhbw %%mm0, %%mm4\n\t" + "psraw $4, %%mm3\n\t" + "psraw $4, %%mm4\n\t" + "movq %%mm3, "DITHER16"+0(%1)\n\t" + "movq %%mm4, "DITHER16"+8(%1)\n\t" + :: "r"(srcDither), "r"(&c->redDither) + ); + } else { + __asm__ volatile("pxor %%mm0, %%mm0\n\t" + "movq (%0), %%mm3\n\t" + "movq %%mm3, %%mm4\n\t" + "punpcklbw %%mm0, %%mm3\n\t" + "punpckhbw %%mm0, %%mm4\n\t" + "psraw $4, %%mm3\n\t" + "psraw $4, %%mm4\n\t" + "movq %%mm3, "DITHER16"+0(%1)\n\t" + "movq %%mm4, "DITHER16"+8(%1)\n\t" + :: "r"(srcDither), "r"(&c->redDither) + ); + } +} +#endif + static void RENAME(yuv2yuvX)(SwsContext *c, const int16_t *lumFilter, const int16_t **lumSrc, int lumFilterSize, const int16_t *chrFilter, const int16_t **chrUSrc, const int16_t **chrVSrc, int chrFilterSize, const int16_t **alpSrc, - uint8_t *dest, uint8_t *uDest, uint8_t *vDest, - uint8_t *aDest, int dstW, int chrDstW, - const uint8_t *lumDither, const uint8_t *chrDither) + uint8_t *dest[4], int dstW, int chrDstW) { int i; + uint8_t *yDest = dest[0], *uDest = dest[1], *vDest = dest[2], + *aDest = CONFIG_SWSCALE_ALPHA ? dest[3] : NULL; + const uint8_t *lumDither = c->lumDither8, *chrDither = c->chrDither8; + if (uDest) { - x86_reg uv_off = c->uv_off; - for(i=0; i<8; i++) c->dither16[i] = chrDither[i]>>4; + x86_reg uv_off = c->uv_offx2 >> 1; + dither_8to16(c, chrDither, 0); YSCALEYUV2YV12X(CHR_MMX_FILTER_OFFSET, uDest, chrDstW, 0) - for(i=0; i<8; i++) c->dither16[i] = chrDither[(i+3)&7]>>4; + dither_8to16(c, chrDither, 1); YSCALEYUV2YV12X(CHR_MMX_FILTER_OFFSET, vDest - uv_off, chrDstW + uv_off, uv_off) } - for(i=0; i<8; i++) c->dither16[i] = lumDither[i]>>4; + dither_8to16(c, lumDither, 0); if (CONFIG_SWSCALE_ALPHA && aDest) { YSCALEYUV2YV12X(ALP_MMX_FILTER_OFFSET, aDest, dstW, 0) } - YSCALEYUV2YV12X(LUM_MMX_FILTER_OFFSET, dest, dstW, 0) + YSCALEYUV2YV12X(LUM_MMX_FILTER_OFFSET, yDest, dstW, 0) } #define YSCALEYUV2YV12X_ACCURATE(offset, dest, end, pos) \ @@ -102,10 +140,6 @@ static void RENAME(yuv2yuvX)(SwsContext *c, const int16_t *lumFilter, "movq "DITHER32"+8(%0), %%mm5 \n\t"\ "movq "DITHER32"+16(%0), %%mm6 \n\t"\ "movq "DITHER32"+24(%0), %%mm7 \n\t"\ - "pxor %%mm4, %%mm4 \n\t"\ - "pxor %%mm5, %%mm5 \n\t"\ - "pxor %%mm6, %%mm6 \n\t"\ - "pxor %%mm7, %%mm7 \n\t"\ "mov (%%"REG_d"), %%"REG_S" \n\t"\ ".p2align 4 \n\t"\ "1: \n\t"\ @@ -155,42 +189,105 @@ static void RENAME(yuv2yuvX)(SwsContext *c, const int16_t *lumFilter, : "%"REG_a, "%"REG_d, "%"REG_S\ ); +#if !COMPILE_TEMPLATE_MMX2 +static av_always_inline void +dither_8to32(SwsContext *c, const uint8_t *srcDither, int rot) +{ +int i; +if(rot) for(i=0; i<8; i++) c->dither32[i] = srcDither[(i+3)&7]<<12; +else for(i=0; i<8; i++) c->dither32[i] = srcDither[i&7]<<12; +return; + + if (rot) { + __asm__ volatile("pxor %%mm0, %%mm0\n\t" + "movq (%0), %%mm4\n\t" + "movq %%mm4, %%mm5\n\t" + "psrlq $24, %%mm4\n\t" + "psllq $40, %%mm5\n\t" + "por %%mm5, %%mm4\n\t" + "movq %%mm4, %%mm6\n\t" + "punpcklbw %%mm0, %%mm4\n\t" + "punpckhbw %%mm0, %%mm6\n\t" + "movq %%mm4, %%mm5\n\t" + "movq %%mm6, %%mm7\n\t" + "punpcklwd %%mm0, %%mm4\n\t" + "punpckhwd %%mm0, %%mm5\n\t" + "punpcklwd %%mm0, %%mm6\n\t" + "punpckhwd %%mm0, %%mm7\n\t" + "psllw $12, %%mm4\n\t" + "psllw $12, %%mm5\n\t" + "psllw $12, %%mm6\n\t" + "psllw $12, %%mm7\n\t" + "movq %%mm4, "DITHER32"+0(%1)\n\t" + "movq %%mm5, "DITHER32"+8(%1)\n\t" + "movq %%mm6, "DITHER32"+16(%1)\n\t" + "movq %%mm7, "DITHER32"+24(%1)\n\t" + :: "r"(srcDither), "r"(&c->redDither) + ); + } else { + __asm__ volatile("pxor %%mm0, %%mm0\n\t" + "movq (%0), %%mm4\n\t" + "movq %%mm4, %%mm6\n\t" + "punpcklbw %%mm0, %%mm4\n\t" + "punpckhbw %%mm0, %%mm6\n\t" + "movq %%mm4, %%mm5\n\t" + "movq %%mm6, %%mm7\n\t" + "punpcklwd %%mm0, %%mm4\n\t" + "punpckhwd %%mm0, %%mm5\n\t" + "punpcklwd %%mm0, %%mm6\n\t" + "punpckhwd %%mm0, %%mm7\n\t" + "psllw $12, %%mm4\n\t" + "psllw $12, %%mm5\n\t" + "psllw $12, %%mm6\n\t" + "psllw $12, %%mm7\n\t" + "movq %%mm4, "DITHER32"+0(%1)\n\t" + "movq %%mm5, "DITHER32"+8(%1)\n\t" + "movq %%mm6, "DITHER32"+16(%1)\n\t" + "movq %%mm7, "DITHER32"+24(%1)\n\t" + :: "r"(srcDither), "r"(&c->redDither) + ); + } +} +#endif + static void RENAME(yuv2yuvX_ar)(SwsContext *c, const int16_t *lumFilter, const int16_t **lumSrc, int lumFilterSize, const int16_t *chrFilter, const int16_t **chrUSrc, const int16_t **chrVSrc, int chrFilterSize, const int16_t **alpSrc, - uint8_t *dest, uint8_t *uDest, uint8_t *vDest, - uint8_t *aDest, int dstW, int chrDstW, - const uint8_t *lumDither, const uint8_t *chrDither) + uint8_t *dest[4], int dstW, int chrDstW) { int i; + uint8_t *yDest = dest[0], *uDest = dest[1], *vDest = dest[2], + *aDest = CONFIG_SWSCALE_ALPHA ? dest[3] : NULL; + const uint8_t *lumDither = c->lumDither8, *chrDither = c->chrDither8; + if (uDest) { - x86_reg uv_off = c->uv_off; - for(i=0; i<8; i++) c->dither32[i] = chrDither[i]<<12; + x86_reg uv_off = c->uv_offx2 >> 1; + dither_8to32(c, chrDither, 0); YSCALEYUV2YV12X_ACCURATE(CHR_MMX_FILTER_OFFSET, uDest, chrDstW, 0) - for(i=0; i<8; i++) c->dither32[i] = chrDither[(i+3)&7]<<12; + dither_8to32(c, chrDither, 1); YSCALEYUV2YV12X_ACCURATE(CHR_MMX_FILTER_OFFSET, vDest - uv_off, chrDstW + uv_off, uv_off) } - for(i=0; i<8; i++) c->dither32[i] = lumDither[i]<<12; + dither_8to32(c, lumDither, 0); if (CONFIG_SWSCALE_ALPHA && aDest) { YSCALEYUV2YV12X_ACCURATE(ALP_MMX_FILTER_OFFSET, aDest, dstW, 0) } - YSCALEYUV2YV12X_ACCURATE(LUM_MMX_FILTER_OFFSET, dest, dstW, 0) + YSCALEYUV2YV12X_ACCURATE(LUM_MMX_FILTER_OFFSET, yDest, dstW, 0) } static void RENAME(yuv2yuv1)(SwsContext *c, const int16_t *lumSrc, const int16_t *chrUSrc, const int16_t *chrVSrc, const int16_t *alpSrc, - uint8_t *dest, uint8_t *uDest, uint8_t *vDest, - uint8_t *aDest, int dstW, int chrDstW, - const uint8_t *lumDither, const uint8_t *chrDither) + uint8_t *dst[4], int dstW, int chrDstW) { int p= 4; - const int16_t *src[4]= { alpSrc + dstW, lumSrc + dstW, chrUSrc + chrDstW, chrVSrc + chrDstW }; - uint8_t *dst[4]= { aDest, dest, uDest, vDest }; - x86_reg counter[4]= { dstW, dstW, chrDstW, chrDstW }; + const int16_t *src[4]= { + lumSrc + dstW, chrUSrc + chrDstW, + chrVSrc + chrDstW, alpSrc + dstW + }; + x86_reg counter[4]= { dstW, chrDstW, chrDstW, dstW }; while (p--) { if (dst[p]) { @@ -217,23 +314,24 @@ static void RENAME(yuv2yuv1)(SwsContext *c, const int16_t *lumSrc, static void RENAME(yuv2yuv1_ar)(SwsContext *c, const int16_t *lumSrc, const int16_t *chrUSrc, const int16_t *chrVSrc, const int16_t *alpSrc, - uint8_t *dest, uint8_t *uDest, uint8_t *vDest, - uint8_t *aDest, int dstW, int chrDstW, - const uint8_t *lumDither, const uint8_t *chrDither) + uint8_t *dst[4], int dstW, int chrDstW) { int p= 4; - const int16_t *src[4]= { alpSrc + dstW, lumSrc + dstW, chrUSrc + chrDstW, chrVSrc + chrDstW }; - uint8_t *dst[4]= { aDest, dest, uDest, vDest }; - x86_reg counter[4]= { dstW, dstW, chrDstW, chrDstW }; + const int16_t *src[4]= { + lumSrc + dstW, chrUSrc + chrDstW, + chrVSrc + chrDstW, alpSrc + dstW + }; + x86_reg counter[4]= { dstW, chrDstW, chrDstW, dstW }; + const uint8_t *lumDither = c->lumDither8, *chrDither = c->chrDither8; while (p--) { if (dst[p]) { int i; - for(i=0; i<8; i++) c->dither16[i] = i<2 ? lumDither[i] : chrDither[i]; + for(i=0; i<8; i++) c->dither16[i] = (p == 2 || p == 3) ? lumDither[i] : chrDither[i]; __asm__ volatile( "mov %2, %%"REG_a" \n\t" - "movq 0(%3), %%mm6 \n\t" - "movq 8(%3), %%mm7 \n\t" + "movq "DITHER16"+0(%3), %%mm6 \n\t" + "movq "DITHER16"+8(%3), %%mm7 \n\t" ".p2align 4 \n\t" /* FIXME Unroll? */ "1: \n\t" "movq (%0, %%"REG_a", 2), %%mm0 \n\t" @@ -247,7 +345,7 @@ static void RENAME(yuv2yuv1_ar)(SwsContext *c, const int16_t *lumSrc, "add $8, %%"REG_a" \n\t" "jnc 1b \n\t" :: "r" (src[p]), "r" (dst[p] + counter[p]), - "g" (-counter[p]), "r"(c->dither16) + "g" (-counter[p]), "r"(&c->redDither) : "%"REG_a ); } @@ -479,7 +577,7 @@ static void RENAME(yuv2rgb32_X_ar)(SwsContext *c, const int16_t *lumFilter, { x86_reg dummy=0; x86_reg dstW_reg = dstW; - x86_reg uv_off = c->uv_off << 1; + x86_reg uv_off = c->uv_offx2; if (CONFIG_SWSCALE_ALPHA && c->alpPixBuf) { YSCALEYUV2PACKEDX_ACCURATE @@ -512,7 +610,7 @@ static void RENAME(yuv2rgb32_X)(SwsContext *c, const int16_t *lumFilter, { x86_reg dummy=0; x86_reg dstW_reg = dstW; - x86_reg uv_off = c->uv_off << 1; + x86_reg uv_off = c->uv_offx2; if (CONFIG_SWSCALE_ALPHA && c->alpPixBuf) { YSCALEYUV2PACKEDX @@ -569,7 +667,7 @@ static void RENAME(yuv2rgb565_X_ar)(SwsContext *c, const int16_t *lumFilter, { x86_reg dummy=0; x86_reg dstW_reg = dstW; - x86_reg uv_off = c->uv_off << 1; + x86_reg uv_off = c->uv_offx2; YSCALEYUV2PACKEDX_ACCURATE YSCALEYUV2RGBX @@ -593,7 +691,7 @@ static void RENAME(yuv2rgb565_X)(SwsContext *c, const int16_t *lumFilter, { x86_reg dummy=0; x86_reg dstW_reg = dstW; - x86_reg uv_off = c->uv_off << 1; + x86_reg uv_off = c->uv_offx2; YSCALEYUV2PACKEDX YSCALEYUV2RGBX @@ -646,7 +744,7 @@ static void RENAME(yuv2rgb555_X_ar)(SwsContext *c, const int16_t *lumFilter, { x86_reg dummy=0; x86_reg dstW_reg = dstW; - x86_reg uv_off = c->uv_off << 1; + x86_reg uv_off = c->uv_offx2; YSCALEYUV2PACKEDX_ACCURATE YSCALEYUV2RGBX @@ -670,7 +768,7 @@ static void RENAME(yuv2rgb555_X)(SwsContext *c, const int16_t *lumFilter, { x86_reg dummy=0; x86_reg dstW_reg = dstW; - x86_reg uv_off = c->uv_off << 1; + x86_reg uv_off = c->uv_offx2; YSCALEYUV2PACKEDX YSCALEYUV2RGBX @@ -803,7 +901,7 @@ static void RENAME(yuv2bgr24_X_ar)(SwsContext *c, const int16_t *lumFilter, { x86_reg dummy=0; x86_reg dstW_reg = dstW; - x86_reg uv_off = c->uv_off << 1; + x86_reg uv_off = c->uv_offx2; YSCALEYUV2PACKEDX_ACCURATE YSCALEYUV2RGBX @@ -827,7 +925,7 @@ static void RENAME(yuv2bgr24_X)(SwsContext *c, const int16_t *lumFilter, { x86_reg dummy=0; x86_reg dstW_reg = dstW; - x86_reg uv_off = c->uv_off << 1; + x86_reg uv_off = c->uv_offx2; YSCALEYUV2PACKEDX YSCALEYUV2RGBX @@ -868,7 +966,7 @@ static void RENAME(yuv2yuyv422_X_ar)(SwsContext *c, const int16_t *lumFilter, { x86_reg dummy=0; x86_reg dstW_reg = dstW; - x86_reg uv_off = c->uv_off << 1; + x86_reg uv_off = c->uv_offx2; YSCALEYUV2PACKEDX_ACCURATE /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */ @@ -889,7 +987,7 @@ static void RENAME(yuv2yuyv422_X)(SwsContext *c, const int16_t *lumFilter, { x86_reg dummy=0; x86_reg dstW_reg = dstW; - x86_reg uv_off = c->uv_off << 1; + x86_reg uv_off = c->uv_offx2; YSCALEYUV2PACKEDX /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */ @@ -981,14 +1079,16 @@ static void RENAME(yuv2yuyv422_X)(SwsContext *c, const int16_t *lumFilter, /** * vertical bilinear scale YV12 to RGB */ -static void RENAME(yuv2rgb32_2)(SwsContext *c, const uint16_t *buf0, - const uint16_t *buf1, const uint16_t *ubuf0, - const uint16_t *ubuf1, const uint16_t *vbuf0, - const uint16_t *vbuf1, const uint16_t *abuf0, - const uint16_t *abuf1, uint8_t *dest, +static void RENAME(yuv2rgb32_2)(SwsContext *c, const int16_t *buf[2], + const int16_t *ubuf[2], const int16_t *vbuf[2], + const int16_t *abuf[2], uint8_t *dest, int dstW, int yalpha, int uvalpha, int y) { + const int16_t *buf0 = buf[0], *buf1 = buf[1], + *ubuf0 = ubuf[0], *ubuf1 = ubuf[1]; + if (CONFIG_SWSCALE_ALPHA && c->alpPixBuf) { + const int16_t *abuf0 = abuf[0], *abuf1 = abuf[1]; #if ARCH_X86_64 __asm__ volatile( YSCALEYUV2RGB(%%r8, %5) @@ -1043,13 +1143,14 @@ static void RENAME(yuv2rgb32_2)(SwsContext *c, const uint16_t *buf0, } } -static void RENAME(yuv2bgr24_2)(SwsContext *c, const uint16_t *buf0, - const uint16_t *buf1, const uint16_t *ubuf0, - const uint16_t *ubuf1, const uint16_t *vbuf0, - const uint16_t *vbuf1, const uint16_t *abuf0, - const uint16_t *abuf1, uint8_t *dest, +static void RENAME(yuv2bgr24_2)(SwsContext *c, const int16_t *buf[2], + const int16_t *ubuf[2], const int16_t *vbuf[2], + const int16_t *abuf[2], uint8_t *dest, int dstW, int yalpha, int uvalpha, int y) { + const int16_t *buf0 = buf[0], *buf1 = buf[1], + *ubuf0 = ubuf[0], *ubuf1 = ubuf[1]; + //Note 8280 == DSTW_OFFSET but the preprocessor can't handle that there :( __asm__ volatile( "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t" @@ -1065,13 +1166,14 @@ static void RENAME(yuv2bgr24_2)(SwsContext *c, const uint16_t *buf0, ); } -static void RENAME(yuv2rgb555_2)(SwsContext *c, const uint16_t *buf0, - const uint16_t *buf1, const uint16_t *ubuf0, - const uint16_t *ubuf1, const uint16_t *vbuf0, - const uint16_t *vbuf1, const uint16_t *abuf0, - const uint16_t *abuf1, uint8_t *dest, +static void RENAME(yuv2rgb555_2)(SwsContext *c, const int16_t *buf[2], + const int16_t *ubuf[2], const int16_t *vbuf[2], + const int16_t *abuf[2], uint8_t *dest, int dstW, int yalpha, int uvalpha, int y) { + const int16_t *buf0 = buf[0], *buf1 = buf[1], + *ubuf0 = ubuf[0], *ubuf1 = ubuf[1]; + //Note 8280 == DSTW_OFFSET but the preprocessor can't handle that there :( __asm__ volatile( "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t" @@ -1093,13 +1195,14 @@ static void RENAME(yuv2rgb555_2)(SwsContext *c, const uint16_t *buf0, ); } -static void RENAME(yuv2rgb565_2)(SwsContext *c, const uint16_t *buf0, - const uint16_t *buf1, const uint16_t *ubuf0, - const uint16_t *ubuf1, const uint16_t *vbuf0, - const uint16_t *vbuf1, const uint16_t *abuf0, - const uint16_t *abuf1, uint8_t *dest, +static void RENAME(yuv2rgb565_2)(SwsContext *c, const int16_t *buf[2], + const int16_t *ubuf[2], const int16_t *vbuf[2], + const int16_t *abuf[2], uint8_t *dest, int dstW, int yalpha, int uvalpha, int y) { + const int16_t *buf0 = buf[0], *buf1 = buf[1], + *ubuf0 = ubuf[0], *ubuf1 = ubuf[1]; + //Note 8280 == DSTW_OFFSET but the preprocessor can't handle that there :( __asm__ volatile( "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t" @@ -1161,13 +1264,14 @@ static void RENAME(yuv2rgb565_2)(SwsContext *c, const uint16_t *buf0, #define YSCALEYUV2PACKED(index, c) REAL_YSCALEYUV2PACKED(index, c) -static void RENAME(yuv2yuyv422_2)(SwsContext *c, const uint16_t *buf0, - const uint16_t *buf1, const uint16_t *ubuf0, - const uint16_t *ubuf1, const uint16_t *vbuf0, - const uint16_t *vbuf1, const uint16_t *abuf0, - const uint16_t *abuf1, uint8_t *dest, +static void RENAME(yuv2yuyv422_2)(SwsContext *c, const int16_t *buf[2], + const int16_t *ubuf[2], const int16_t *vbuf[2], + const int16_t *abuf[2], uint8_t *dest, int dstW, int yalpha, int uvalpha, int y) { + const int16_t *buf0 = buf[0], *buf1 = buf[1], + *ubuf0 = ubuf[0], *ubuf1 = ubuf[1]; + //Note 8280 == DSTW_OFFSET but the preprocessor can't handle that there :( __asm__ volatile( "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t" @@ -1300,14 +1404,13 @@ static void RENAME(yuv2yuyv422_2)(SwsContext *c, const uint16_t *buf0, /** * YV12 to RGB without scaling or interpolating */ -static void RENAME(yuv2rgb32_1)(SwsContext *c, const uint16_t *buf0, - const uint16_t *ubuf0, const uint16_t *ubuf1, - const uint16_t *vbuf0, const uint16_t *vbuf1, - const uint16_t *abuf0, uint8_t *dest, - int dstW, int uvalpha, enum PixelFormat dstFormat, - int flags, int y) +static void RENAME(yuv2rgb32_1)(SwsContext *c, const int16_t *buf0, + const int16_t *ubuf[2], const int16_t *bguf[2], + const int16_t *abuf0, uint8_t *dest, + int dstW, int uvalpha, int y) { - const uint16_t *buf1= buf0; //FIXME needed for RGB1/BGR1 + const int16_t *ubuf0 = ubuf[0], *ubuf1 = ubuf[1]; + const int16_t *buf1= buf0; //FIXME needed for RGB1/BGR1 if (uvalpha < 2048) { // note this is not correct (shifts chrominance by 0.5 pixels) but it is a bit faster if (CONFIG_SWSCALE_ALPHA && c->alpPixBuf) { @@ -1368,14 +1471,13 @@ static void RENAME(yuv2rgb32_1)(SwsContext *c, const uint16_t *buf0, } } -static void RENAME(yuv2bgr24_1)(SwsContext *c, const uint16_t *buf0, - const uint16_t *ubuf0, const uint16_t *ubuf1, - const uint16_t *vbuf0, const uint16_t *vbuf1, - const uint16_t *abuf0, uint8_t *dest, - int dstW, int uvalpha, enum PixelFormat dstFormat, - int flags, int y) +static void RENAME(yuv2bgr24_1)(SwsContext *c, const int16_t *buf0, + const int16_t *ubuf[2], const int16_t *bguf[2], + const int16_t *abuf0, uint8_t *dest, + int dstW, int uvalpha, int y) { - const uint16_t *buf1= buf0; //FIXME needed for RGB1/BGR1 + const int16_t *ubuf0 = ubuf[0], *ubuf1 = ubuf[1]; + const int16_t *buf1= buf0; //FIXME needed for RGB1/BGR1 if (uvalpha < 2048) { // note this is not correct (shifts chrominance by 0.5 pixels) but it is a bit faster __asm__ volatile( @@ -1406,14 +1508,13 @@ static void RENAME(yuv2bgr24_1)(SwsContext *c, const uint16_t *buf0, } } -static void RENAME(yuv2rgb555_1)(SwsContext *c, const uint16_t *buf0, - const uint16_t *ubuf0, const uint16_t *ubuf1, - const uint16_t *vbuf0, const uint16_t *vbuf1, - const uint16_t *abuf0, uint8_t *dest, - int dstW, int uvalpha, enum PixelFormat dstFormat, - int flags, int y) +static void RENAME(yuv2rgb555_1)(SwsContext *c, const int16_t *buf0, + const int16_t *ubuf[2], const int16_t *bguf[2], + const int16_t *abuf0, uint8_t *dest, + int dstW, int uvalpha, int y) { - const uint16_t *buf1= buf0; //FIXME needed for RGB1/BGR1 + const int16_t *ubuf0 = ubuf[0], *ubuf1 = ubuf[1]; + const int16_t *buf1= buf0; //FIXME needed for RGB1/BGR1 if (uvalpha < 2048) { // note this is not correct (shifts chrominance by 0.5 pixels) but it is a bit faster __asm__ volatile( @@ -1456,14 +1557,13 @@ static void RENAME(yuv2rgb555_1)(SwsContext *c, const uint16_t *buf0, } } -static void RENAME(yuv2rgb565_1)(SwsContext *c, const uint16_t *buf0, - const uint16_t *ubuf0, const uint16_t *ubuf1, - const uint16_t *vbuf0, const uint16_t *vbuf1, - const uint16_t *abuf0, uint8_t *dest, - int dstW, int uvalpha, enum PixelFormat dstFormat, - int flags, int y) +static void RENAME(yuv2rgb565_1)(SwsContext *c, const int16_t *buf0, + const int16_t *ubuf[2], const int16_t *bguf[2], + const int16_t *abuf0, uint8_t *dest, + int dstW, int uvalpha, int y) { - const uint16_t *buf1= buf0; //FIXME needed for RGB1/BGR1 + const int16_t *ubuf0 = ubuf[0], *ubuf1 = ubuf[1]; + const int16_t *buf1= buf0; //FIXME needed for RGB1/BGR1 if (uvalpha < 2048) { // note this is not correct (shifts chrominance by 0.5 pixels) but it is a bit faster __asm__ volatile( @@ -1543,14 +1643,13 @@ static void RENAME(yuv2rgb565_1)(SwsContext *c, const uint16_t *buf0, "psraw $7, %%mm7 \n\t" #define YSCALEYUV2PACKED1b(index, c) REAL_YSCALEYUV2PACKED1b(index, c) -static void RENAME(yuv2yuyv422_1)(SwsContext *c, const uint16_t *buf0, - const uint16_t *ubuf0, const uint16_t *ubuf1, - const uint16_t *vbuf0, const uint16_t *vbuf1, - const uint16_t *abuf0, uint8_t *dest, - int dstW, int uvalpha, enum PixelFormat dstFormat, - int flags, int y) +static void RENAME(yuv2yuyv422_1)(SwsContext *c, const int16_t *buf0, + const int16_t *ubuf[2], const int16_t *bguf[2], + const int16_t *abuf0, uint8_t *dest, + int dstW, int uvalpha, int y) { - const uint16_t *buf1= buf0; //FIXME needed for RGB1/BGR1 + const int16_t *ubuf0 = ubuf[0], *ubuf1 = ubuf[1]; + const int16_t *buf1= buf0; //FIXME needed for RGB1/BGR1 if (uvalpha < 2048) { // note this is not correct (shifts chrominance by 0.5 pixels) but it is a bit faster __asm__ volatile( @@ -1630,32 +1729,6 @@ static void RENAME(yuy2ToUV)(uint8_t *dstU, uint8_t *dstV, assert(src1 == src2); } -static void RENAME(LEToUV)(uint8_t *dstU, uint8_t *dstV, - const uint8_t *src1, const uint8_t *src2, - int width, uint32_t *unused) -{ - __asm__ volatile( - "mov %0, %%"REG_a" \n\t" - "1: \n\t" - "movq (%1, %%"REG_a",2), %%mm0 \n\t" - "movq 8(%1, %%"REG_a",2), %%mm1 \n\t" - "movq (%2, %%"REG_a",2), %%mm2 \n\t" - "movq 8(%2, %%"REG_a",2), %%mm3 \n\t" - "psrlw $8, %%mm0 \n\t" - "psrlw $8, %%mm1 \n\t" - "psrlw $8, %%mm2 \n\t" - "psrlw $8, %%mm3 \n\t" - "packuswb %%mm1, %%mm0 \n\t" - "packuswb %%mm3, %%mm2 \n\t" - "movq %%mm0, (%3, %%"REG_a") \n\t" - "movq %%mm2, (%4, %%"REG_a") \n\t" - "add $8, %%"REG_a" \n\t" - " js 1b \n\t" - : : "g" ((x86_reg)-width), "r" (src1+width*2), "r" (src2+width*2), "r" (dstU+width), "r" (dstV+width) - : "%"REG_a - ); -} - /* This is almost identical to the previous, end exists only because * yuy2ToY/UV)(dst, src+1, ...) would have 100% unaligned accesses. */ static void RENAME(uyvyToY)(uint8_t *dst, const uint8_t *src, @@ -1705,33 +1778,6 @@ static void RENAME(uyvyToUV)(uint8_t *dstU, uint8_t *dstV, assert(src1 == src2); } -static void RENAME(BEToUV)(uint8_t *dstU, uint8_t *dstV, - const uint8_t *src1, const uint8_t *src2, - int width, uint32_t *unused) -{ - __asm__ volatile( - "movq "MANGLE(bm01010101)", %%mm4 \n\t" - "mov %0, %%"REG_a" \n\t" - "1: \n\t" - "movq (%1, %%"REG_a",2), %%mm0 \n\t" - "movq 8(%1, %%"REG_a",2), %%mm1 \n\t" - "movq (%2, %%"REG_a",2), %%mm2 \n\t" - "movq 8(%2, %%"REG_a",2), %%mm3 \n\t" - "pand %%mm4, %%mm0 \n\t" - "pand %%mm4, %%mm1 \n\t" - "pand %%mm4, %%mm2 \n\t" - "pand %%mm4, %%mm3 \n\t" - "packuswb %%mm1, %%mm0 \n\t" - "packuswb %%mm3, %%mm2 \n\t" - "movq %%mm0, (%3, %%"REG_a") \n\t" - "movq %%mm2, (%4, %%"REG_a") \n\t" - "add $8, %%"REG_a" \n\t" - " js 1b \n\t" - : : "g" ((x86_reg)-width), "r" (src1+width*2), "r" (src2+width*2), "r" (dstU+width), "r" (dstV+width) - : "%"REG_a - ); -} - static av_always_inline void RENAME(nvXXtoUV)(uint8_t *dst1, uint8_t *dst2, const uint8_t *src, int width) { @@ -1914,9 +1960,8 @@ static void RENAME(rgb24ToUV)(int16_t *dstU, int16_t *dstV, #if !COMPILE_TEMPLATE_MMX2 // bilinear / bicubic scaling -static void RENAME(hScale)(int16_t *dst, int dstW, - const uint8_t *src, int srcW, - int xInc, const int16_t *filter, +static void RENAME(hScale)(SwsContext *c, int16_t *dst, int dstW, + const uint8_t *src, const int16_t *filter, const int16_t *filterPos, int filterSize) { assert(filterSize % 4 == 0 && filterSize>0); @@ -2427,6 +2472,7 @@ static av_cold void RENAME(sws_init_swScale)(SwsContext *c) } } + if (c->scalingBpp == 8) { #if !COMPILE_TEMPLATE_MMX2 c->hScale = RENAME(hScale ); #endif /* !COMPILE_TEMPLATE_MMX2 */ @@ -2444,6 +2490,7 @@ static av_cold void RENAME(sws_init_swScale)(SwsContext *c) #if COMPILE_TEMPLATE_MMX2 } #endif /* COMPILE_TEMPLATE_MMX2 */ + } #if !COMPILE_TEMPLATE_MMX2 switch(srcFormat) { @@ -2451,13 +2498,10 @@ static av_cold void RENAME(sws_init_swScale)(SwsContext *c) case PIX_FMT_UYVY422 : c->chrToYV12 = RENAME(uyvyToUV); break; case PIX_FMT_NV12 : c->chrToYV12 = RENAME(nv12ToUV); break; case PIX_FMT_NV21 : c->chrToYV12 = RENAME(nv21ToUV); break; - case PIX_FMT_GRAY16LE : case PIX_FMT_YUV420P9LE: case PIX_FMT_YUV422P10LE: - case PIX_FMT_YUV420P10LE: - case PIX_FMT_YUV420P16LE: - case PIX_FMT_YUV422P16LE: - case PIX_FMT_YUV444P16LE: c->hScale16= RENAME(hScale16); break; + case PIX_FMT_YUV420P10LE: c->hScale16= RENAME(hScale16); break; + default: break; } #endif /* !COMPILE_TEMPLATE_MMX2 */ if (!c->chrSrcHSubSample) { @@ -2471,10 +2515,8 @@ static av_cold void RENAME(sws_init_swScale)(SwsContext *c) switch (srcFormat) { #if !COMPILE_TEMPLATE_MMX2 case PIX_FMT_YUYV422 : - case PIX_FMT_Y400A : - c->lumToYV12 = RENAME(yuy2ToY); break; - case PIX_FMT_UYVY422 : - c->lumToYV12 = RENAME(uyvyToY); break; + case PIX_FMT_Y400A : c->lumToYV12 = RENAME(yuy2ToY); break; + case PIX_FMT_UYVY422 : c->lumToYV12 = RENAME(uyvyToY); break; #endif /* !COMPILE_TEMPLATE_MMX2 */ case PIX_FMT_BGR24 : c->lumToYV12 = RENAME(bgr24ToY); break; case PIX_FMT_RGB24 : c->lumToYV12 = RENAME(rgb24ToY); break; @@ -2488,6 +2530,8 @@ static av_cold void RENAME(sws_init_swScale)(SwsContext *c) } } #endif /* !COMPILE_TEMPLATE_MMX2 */ - if(isAnyRGB(c->srcFormat)) + if(isAnyRGB(c->srcFormat) && av_pix_fmt_descriptors[c->srcFormat].comp[0].depth_minus1<15) c->hScale16= RENAME(hScale16); + if(c->scalingBpp != 8) + c->hScale16 = NULL; } diff --git a/subdir.mak b/subdir.mak index 092662fc14..9ac824fd0a 100644 --- a/subdir.mak +++ b/subdir.mak @@ -1,6 +1,6 @@ -SRC_DIR := $(SRC_PATH_BARE)/lib$(NAME) +SRC_DIR := $(SRC_PATH)/lib$(NAME) -include $(SUBDIR)../common.mak +include $(SRC_PATH)/common.mak LIBVERSION := $(lib$(NAME)_VERSION) LIBMAJOR := $(lib$(NAME)_VERSION_MAJOR) @@ -11,16 +11,17 @@ all-$(CONFIG_STATIC): $(SUBDIR)$(LIBNAME) all-$(CONFIG_SHARED): $(SUBDIR)$(SLIBNAME) $(SUBDIR)%-test.o: $(SUBDIR)%-test.c - $(CC) $(CPPFLAGS) $(CFLAGS) -DTEST -c $(CC_O) $^ + $(COMPILE_C) $(SUBDIR)%-test.o: $(SUBDIR)%.c - $(CC) $(CPPFLAGS) $(CFLAGS) -DTEST -c $(CC_O) $^ + $(COMPILE_C) $(SUBDIR)x86/%.o: $(SUBDIR)x86/%.asm $(YASMDEP) $(YASMFLAGS) -I $(<D)/ -M -o $@ $< > $(@:.o=.d) $(YASM) $(YASMFLAGS) -I $(<D)/ -o $@ $< -$(OBJS) $(SUBDIR)%.ho $(SUBDIR)%-test.o $(TESTOBJS): CPPFLAGS += -DHAVE_AV_CONFIG_H +$(OBJS) $(SUBDIR)%.ho $(TESTOBJS): CPPFLAGS += -DHAVE_AV_CONFIG_H +$(TESTOBJS): CPPFLAGS += -DTEST $(SUBDIR)$(LIBNAME): $(OBJS) $(RM) $@ @@ -33,15 +34,15 @@ install-libs-$(CONFIG_STATIC): install-lib$(NAME)-static install-libs-$(CONFIG_SHARED): install-lib$(NAME)-shared define RULES -$(SUBDIR)%$(EXESUF): $(SUBDIR)%.o - $$(LD) $(FFLDFLAGS) -o $$@ $$^ -l$(FULLNAME) $(FFEXTRALIBS) $$(ELIBS) +$(EXAMPLES) $(TESTPROGS) $(TOOLS): %$(EXESUF): %.o + $$(LD) $(LDFLAGS) -o $$@ $$^ -l$(FULLNAME) $(FFEXTRALIBS) $$(ELIBS) $(SUBDIR)$(SLIBNAME): $(SUBDIR)$(SLIBNAME_WITH_MAJOR) $(Q)cd ./$(SUBDIR) && $(LN_S) $(SLIBNAME_WITH_MAJOR) $(SLIBNAME) $(SUBDIR)$(SLIBNAME_WITH_MAJOR): $(OBJS) $(SUBDIR)lib$(NAME).ver $(SLIB_CREATE_DEF_CMD) - $$(LD) $(SHFLAGS) $(FFLDFLAGS) -o $$@ $$(filter %.o,$$^) $(FFEXTRALIBS) $(EXTRAOBJS) + $$(LD) $(SHFLAGS) $(LDFLAGS) -o $$@ $$(filter %.o,$$^) $(FFEXTRALIBS) $(EXTRAOBJS) $(SLIB_EXTRA_CMD) ifdef SUBDIR @@ -59,13 +60,12 @@ distclean:: clean install-lib$(NAME)-shared: $(SUBDIR)$(SLIBNAME) $(Q)mkdir -p "$(SHLIBDIR)" - $$(INSTALL) -m 755 $$< "$(SHLIBDIR)/$(SLIBNAME_WITH_VERSION)" - $$(STRIP) "$(SHLIBDIR)/$(SLIBNAME_WITH_VERSION)" - $(Q)cd "$(SHLIBDIR)" && \ - $(LN_S) $(SLIBNAME_WITH_VERSION) $(SLIBNAME_WITH_MAJOR) - $(Q)cd "$(SHLIBDIR)" && \ - $(LN_S) $(SLIBNAME_WITH_VERSION) $(SLIBNAME) - $(SLIB_INSTALL_EXTRA_CMD) + $$(INSTALL) -m 755 $$< "$(SHLIBDIR)/$(SLIB_INSTALL_NAME)" + $$(STRIP) "$(SHLIBDIR)/$(SLIB_INSTALL_NAME)" + $(Q)$(foreach F,$(SLIB_INSTALL_LINKS),cd "$(SHLIBDIR)" && $(LN_S) $(SLIB_INSTALL_NAME) $(F);) + $(if $(SLIB_INSTALL_EXTRA_SHLIB),$$(INSTALL) -m 644 $(SLIB_INSTALL_EXTRA_SHLIB:%=$(SUBDIR)%) "$(SHLIBDIR)") + $(if $(SLIB_INSTALL_EXTRA_LIB),$(Q)mkdir -p "$(LIBDIR)") + $(if $(SLIB_INSTALL_EXTRA_LIB),$$(INSTALL) -m 644 $(SLIB_INSTALL_EXTRA_LIB:%=$(SUBDIR)%) "$(LIBDIR)") install-lib$(NAME)-static: $(SUBDIR)$(LIBNAME) $(Q)mkdir -p "$(LIBDIR)" @@ -84,7 +84,8 @@ uninstall-libs:: -$(RM) "$(SHLIBDIR)/$(SLIBNAME_WITH_MAJOR)" \ "$(SHLIBDIR)/$(SLIBNAME)" \ "$(SHLIBDIR)/$(SLIBNAME_WITH_VERSION)" - -$(SLIB_UNINSTALL_EXTRA_CMD) + -$(RM) $(SLIB_INSTALL_EXTRA_SHLIB:%="$(SHLIBDIR)"%) + -$(RM) $(SLIB_INSTALL_EXTRA_LIB:%="$(LIBDIR)"%) -$(RM) "$(LIBDIR)/$(LIBNAME)" uninstall-headers:: @@ -95,7 +96,7 @@ endef $(eval $(RULES)) -$(EXAMPLES) $(TESTPROGS): $(THIS_LIB) $(DEP_LIBS) +$(EXAMPLES) $(TESTPROGS) $(TOOLS): $(THIS_LIB) $(DEP_LIBS) examples: $(EXAMPLES) testprogs: $(TESTPROGS) diff --git a/tests/Makefile b/tests/Makefile new file mode 100644 index 0000000000..34094da2b7 --- /dev/null +++ b/tests/Makefile @@ -0,0 +1,112 @@ +FFSERVER_REFFILE = $(SRC_PATH)/tests/ffserver.regression.ref + +AREF = fate-acodec-aref +VREF = fate-vsynth1-vref fate-vsynth2-vref +REFS = $(AREF) $(VREF) + +$(VREF): ffmpeg$(EXESUF) tests/vsynth1/00.pgm tests/vsynth2/00.pgm +$(AREF): ffmpeg$(EXESUF) tests/data/asynth1.sw + +ffservertest: ffserver$(EXESUF) tests/vsynth1/00.pgm tests/data/asynth1.sw + @echo + @echo "Unfortunately ffserver is broken and therefore its regression" + @echo "test fails randomly. Treat the results accordingly." + @echo + $(SRC_PATH)/tests/ffserver-regression.sh $(FFSERVER_REFFILE) $(SRC_PATH)/tests/ffserver.conf + +tests/vsynth1/00.pgm: tests/videogen$(HOSTEXESUF) + @mkdir -p tests/vsynth1 + $(M)./$< 'tests/vsynth1/' + +tests/vsynth2/00.pgm: tests/rotozoom$(HOSTEXESUF) + @mkdir -p tests/vsynth2 + $(M)./$< 'tests/vsynth2/' $(SRC_PATH)/tests/lena.pnm + +tests/data/asynth1.sw: tests/audiogen$(HOSTEXESUF) + @mkdir -p tests/data + $(M)./$< $@ + +tests/data/asynth1.sw tests/vsynth%/00.pgm: TAG = GEN + +include $(SRC_PATH)/tests/fate.mak +include $(SRC_PATH)/tests/fate2.mak + +include $(SRC_PATH)/tests/fate/aac.mak +include $(SRC_PATH)/tests/fate/als.mak +include $(SRC_PATH)/tests/fate/amrnb.mak +include $(SRC_PATH)/tests/fate/amrwb.mak +include $(SRC_PATH)/tests/fate/dct.mak +include $(SRC_PATH)/tests/fate/fft.mak +include $(SRC_PATH)/tests/fate/h264.mak +include $(SRC_PATH)/tests/fate/libavutil.mak +include $(SRC_PATH)/tests/fate/mp3.mak +include $(SRC_PATH)/tests/fate/vorbis.mak +include $(SRC_PATH)/tests/fate/vp8.mak + +FATE_ACODEC = $(ACODEC_TESTS:%=fate-acodec-%) +FATE_VSYNTH1 = $(VCODEC_TESTS:%=fate-vsynth1-%) +FATE_VSYNTH2 = $(VCODEC_TESTS:%=fate-vsynth2-%) +FATE_VCODEC = $(FATE_VSYNTH1) $(FATE_VSYNTH2) +FATE_LAVF = $(LAVF_TESTS:%=fate-lavf-%) +FATE_LAVFI = $(LAVFI_TESTS:%=fate-lavfi-%) +FATE_SEEK = $(SEEK_TESTS:seek_%=fate-seek-%) + +FATE = $(FATE_ACODEC) \ + $(FATE_VCODEC) \ + $(FATE_LAVF) \ + $(FATE_LAVFI) \ + $(FATE_SEEK) \ + +$(filter-out %-aref,$(FATE_ACODEC)): $(AREF) +$(filter-out %-vref,$(FATE_VSYNTH1)): fate-vsynth1-vref +$(filter-out %-vref,$(FATE_VSYNTH2)): fate-vsynth2-vref +$(FATE_LAVF): $(REFS) +$(FATE_LAVFI): $(REFS) tools/lavfi-showfiltfmts$(EXESUF) +$(FATE_SEEK): fate-codec fate-lavf libavformat/seek-test$(EXESUF) + +$(FATE_ACODEC): CMD = codectest acodec +$(FATE_VSYNTH1): CMD = codectest vsynth1 +$(FATE_VSYNTH2): CMD = codectest vsynth2 +$(FATE_LAVF): CMD = lavftest +$(FATE_LAVFI): CMD = lavfitest +$(FATE_SEEK): CMD = seektest + +fate-codec: fate-acodec fate-vcodec +fate-acodec: $(FATE_ACODEC) +fate-vcodec: $(FATE_VCODEC) +fate-lavf: $(FATE_LAVF) +fate-lavfi: $(FATE_LAVFI) +fate-seek: $(FATE_SEEK) + +ifdef SAMPLES +FATE += $(FATE_TESTS) +fate-rsync: + rsync -vaLW rsync://fate-suite.libav.org/fate-suite/ $(SAMPLES) +else +fate-rsync: + @echo "use 'make fate-rsync SAMPLES=/path/to/samples' to sync the fate suite" +$(FATE_TESTS): + @echo "SAMPLES not specified, cannot run FATE" +endif + +FATE_UTILS = base64 tiny_psnr + +fate: $(FATE) + +$(FATE): ffmpeg$(EXESUF) $(FATE_UTILS:%=tests/%$(HOSTEXESUF)) + @echo "TEST $(@:fate-%=%)" + $(Q)$(SRC_PATH)/tests/fate-run.sh $@ "$(SAMPLES)" "$(TARGET_EXEC)" "$(TARGET_PATH)" '$(CMD)' '$(CMP)' '$(REF)' '$(FUZZ)' '$(THREADS)' '$(THREAD_TYPE)' + +fate-list: + @printf '%s\n' $(sort $(FATE)) + +clean:: testclean + +testclean: + $(RM) -r tests/vsynth1 tests/vsynth2 tests/data tools/lavfi-showfiltfmts$(EXESUF) + $(RM) $(CLEANSUFFIXES:%=tests/%) + $(RM) $(TESTTOOLS:%=tests/%$(HOSTEXESUF)) + +-include $(wildcard tests/*.d) + +.PHONY: fate* diff --git a/tests/codec-regression.sh b/tests/codec-regression.sh index b210231aea..7e9d88662e 100755 --- a/tests/codec-regression.sh +++ b/tests/codec-regression.sh @@ -11,8 +11,6 @@ set -e eval do_$test=y -rm -f "$logfile" - # generate reference for quality check if [ -n "$do_vref" ]; then do_ffmpeg $raw_ref -f image2 -vcodec pgmyuv -i $raw_src -an -f rawvideo @@ -237,6 +235,11 @@ do_video_encoding dnxhd-720p-rd.dnxhd "-threads 4 -mbd rd -s hd720 -b 90M -pix_f do_video_decoding "" "-s cif -pix_fmt yuv420p" fi +if [ -n "$do_dnxhd_720p_10bit" ] ; then +do_video_encoding dnxhd-720p-10bit.dnxhd "-s hd720 -b 90M -pix_fmt yuv422p10 -vframes 5 -an" +do_video_decoding "" "-s cif -pix_fmt yuv420p" +fi + if [ -n "$do_svq1" ] ; then do_video_encoding svq1.mov "-an -vcodec svq1 -qscale 3 -pix_fmt yuv410p" do_video_decoding "" "-pix_fmt yuv420p" @@ -280,14 +283,14 @@ fi if [ -n "$do_mp2" ] ; then do_audio_encoding mp2.mp2 do_audio_decoding -$tiny_psnr $pcm_dst $pcm_ref 2 1924 >> $logfile +$tiny_psnr $pcm_dst $pcm_ref 2 1924 fi if [ -n "$do_ac3_fixed" ] ; then do_audio_encoding ac3.rm "-vn -acodec ac3_fixed" # binaries configured with --disable-sse decode ac3 differently #do_audio_decoding -#$tiny_psnr $pcm_dst $pcm_ref 2 1024 >> $logfile +#$tiny_psnr $pcm_dst $pcm_ref 2 1024 fi if [ -n "$do_g726" ] ; then @@ -333,12 +336,12 @@ fi if [ -n "$do_wmav1" ] ; then do_audio_encoding wmav1.asf "-acodec wmav1" do_ffmpeg_nomd5 $pcm_dst $DEC_OPTS -i $target_path/$file -f wav -$tiny_psnr $pcm_dst $pcm_ref 2 8192 >> $logfile +$tiny_psnr $pcm_dst $pcm_ref 2 8192 fi if [ -n "$do_wmav2" ] ; then do_audio_encoding wmav2.asf "-acodec wmav2" do_ffmpeg_nomd5 $pcm_dst $DEC_OPTS -i $target_path/$file -f wav -$tiny_psnr $pcm_dst $pcm_ref 2 8192 >> $logfile +$tiny_psnr $pcm_dst $pcm_ref 2 8192 fi #if [ -n "$do_vorbis" ] ; then diff --git a/tests/copy.regression.ref b/tests/copy.regression.ref deleted file mode 100644 index 0f62a7be75..0000000000 --- a/tests/copy.regression.ref +++ /dev/null @@ -1,465 +0,0 @@ ----------------- -./tests/data/a-ac3.rm -first.nut second.nut differ: char 34, line 1 -1dd5a62b7edb3a1bcf77626af0a85bc1 first.nut ----------------- -./tests/data/a-adpcm_ima.wav -first.nut second.nut differ: char 34, line 1 -c95390143078f08db8a3bfba5789c2da first.nut ----------------- -./tests/data/a-adpcm_ms.wav -first.nut second.nut differ: char 34, line 1 -05e4d8842f4001fed506423e1a8ef963 first.nut ----------------- -./tests/data/a-adpcm_qt.aiff -first.nut second.nut differ: char 34, line 1 -7455d87f626f05e20030f4c93ec91e69 first.nut ----------------- -./tests/data/a-adpcm_swf.flv -c0402ee010a483403a655f353e184df1 first.nut ----------------- -./tests/data/a-adpcm_yam.wav -first.nut second.nut differ: char 34, line 1 -f861047f6c6f75cdf3ce7bb78a4003ad first.nut ----------------- -./tests/data/a-alac.m4a -first.nut second.nut differ: char 34, line 1 -ab152b0b01e540e74b04a807e3882083 first.nut ----------------- -./tests/data/a-asv1.avi -636fc0dfef1830cc51cf2c182bd4a7b2 first.nut ----------------- -./tests/data/a-asv2.avi -bbfc299390378c7bdbd7463434d8fcbe first.nut ----------------- -./tests/data/a-dnxhd-1080i.mov -first.nut second.nut differ: char 113, line 1 -037e31900e6cdf7161c2a0df23d9dc9d first.nut ----------------- -./tests/data/a-dnxhd-720p-rd.dnxhd -first.nut second.nut differ: char 113, line 1 -1237abac554ea9adb2a926641eec0de0 first.nut ----------------- -./tests/data/a-dnxhd-720p.dnxhd -first.nut second.nut differ: char 113, line 1 -6694322cefa2f482bc3dac8be22eb5d5 first.nut ----------------- -./tests/data/a-dv.dv -1aa367a56d31bb45f98d820121820909 first.nut ----------------- -./tests/data/a-dv411.dv -7ef296512960e00d96850f2606b4b683 first.nut ----------------- -./tests/data/a-dv50.dv -6424dd39e22a1789a8182d7e8da224a9 first.nut ----------------- -./tests/data/a-error-mpeg4-adv.avi -715b262e3e7c9be2b59525ba0289f30e first.nut ----------------- -./tests/data/a-ffv1.avi -edada4da2170ffd3386636cff67a90f0 first.nut ----------------- -./tests/data/a-flac.flac -d5e0a6d87034c21627afb2a904412a21 first.nut ----------------- -./tests/data/a-flashsv.flv -985076a8a87df1f91b34cbb81ce96217 first.nut ----------------- -./tests/data/a-flv.flv -6d01a0eb07c15ec3d0a70bfad0615bec first.nut ----------------- -./tests/data/a-g726.wav -first.nut second.nut differ: char 34, line 1 -59540b44c97b8e1eafc53ebdaeaf3eb8 first.nut ----------------- -./tests/data/a-h261.avi -18d47cc50e05e5c855a8aec1a5d8d9ec first.nut ----------------- -./tests/data/a-h263.avi -91b67a478420a30cf10c3d872f7e799b first.nut ----------------- -./tests/data/a-h263p.avi -1e9f108181dca2dd3bb621bb45fc5834 first.nut ----------------- -./tests/data/a-huffyuv.avi -62dccc2a428b561c08497f8378ea1567 first.nut ----------------- -./tests/data/a-jpegls.avi -35f1bb0f9b14bf3eb29134784f278c4f first.nut ----------------- -./tests/data/a-ljpeg.avi -45ec1072d8e55d6cfa784cc732830f3c first.nut ----------------- -./tests/data/a-mjpeg.avi -4e6d42fdda880661de8308cfa45652ee first.nut ----------------- -./tests/data/a-mp2.mp2 -6c8d1a33dd994d63c68e5c9953b5cb8c first.nut ----------------- -./tests/data/a-mpeg1.mpg -first.nut second.nut differ: char 34, line 1 -9d444c67713ef70c06d35fd355200ed5 first.nut ----------------- -./tests/data/a-mpeg1b.mpg -first.nut second.nut differ: char 34, line 1 -9d444c67713ef70c06d35fd355200ed5 first.nut ----------------- -./tests/data/a-mpeg2.mpg -first.nut second.nut differ: char 34, line 1 -328f6a0069b76397c5ed0dcea8b69b50 first.nut ----------------- -./tests/data/a-mpeg2.mpg -first.nut second.nut differ: char 34, line 1 -328f6a0069b76397c5ed0dcea8b69b50 first.nut ----------------- -./tests/data/a-mpeg2_422.mpg -first.nut second.nut differ: char 34, line 1 -d27035bcf30801cd1bee6ac59e8f5e3e first.nut ----------------- -./tests/data/a-mpeg2i.mpg -first.nut second.nut differ: char 34, line 1 -c3351b79649825a6b9f62a2a1db633c1 first.nut ----------------- -./tests/data/a-mpeg2ivlc-qprd.mpg -first.nut second.nut differ: char 34, line 1 -d910da52fa10eb1deca10fa9443132d2 first.nut ----------------- -./tests/data/a-mpeg2reuse.mpg -first.nut second.nut differ: char 34, line 1 -c3351b79649825a6b9f62a2a1db633c1 first.nut ----------------- -./tests/data/a-mpeg2thread.mpg -first.nut second.nut differ: char 34, line 1 -c3351b79649825a6b9f62a2a1db633c1 first.nut ----------------- -./tests/data/a-mpeg2threadivlc.mpg -first.nut second.nut differ: char 34, line 1 -c3351b79649825a6b9f62a2a1db633c1 first.nut ----------------- -./tests/data/a-mpeg4-Q.avi -first.nut second.nut differ: char 34, line 1 -305bab90451e2c3b741e3aef51bc2a4c first.nut ----------------- -./tests/data/a-mpeg4-adap.avi -first.nut second.nut differ: char 34, line 1 -5d9315ec49c4122f6f23cf84cab5fc53 first.nut ----------------- -./tests/data/a-mpeg4-adv.avi -5d672bf4c2e879d6a20e349cb4dc09a6 first.nut ----------------- -./tests/data/a-mpeg4-nr.avi -0243b2e03115fe948f99da1ee10ae588 first.nut ----------------- -./tests/data/a-mpeg4-qprd.avi -first.nut second.nut differ: char 34, line 1 -5d9315ec49c4122f6f23cf84cab5fc53 first.nut ----------------- -./tests/data/a-mpeg4-rc.avi -first.nut second.nut differ: char 34, line 1 -5d9315ec49c4122f6f23cf84cab5fc53 first.nut ----------------- -./tests/data/a-mpeg4-thread.avi -first.nut second.nut differ: char 34, line 1 -6aa94d589e9e7626e51575d8a2aec6e7 first.nut ----------------- -./tests/data/a-msmpeg4.avi -836d432509ff22fd363237ef1dced5f3 first.nut ----------------- -./tests/data/a-msmpeg4v2.avi -37f253da3666fb057edecb86ed2dba39 first.nut ----------------- -./tests/data/a-odivx.mp4 -e3bd9d8a3417abc749c489e64119dbf3 first.nut ----------------- -./tests/data/a-pcm_alaw.wav -first.nut second.nut differ: char 34, line 1 -22853e7806b0f0162fd5e2573e34b03c first.nut ----------------- -./tests/data/a-pcm_f32be.au -first.nut second.nut differ: char 34, line 1 -94cb60c3107ec509af79191e86099a0e first.nut ----------------- -./tests/data/a-pcm_f32le.wav -first.nut second.nut differ: char 34, line 1 -8d887b27a8531390af5b682557631986 first.nut ----------------- -./tests/data/a-pcm_f64be.au -first.nut second.nut differ: char 34, line 1 -e0c7b64e13bb9398a57dac60806515fb first.nut ----------------- -./tests/data/a-pcm_f64le.wav -first.nut second.nut differ: char 34, line 1 -9dbb9bda0c990502e910e082a008433f first.nut ----------------- -./tests/data/a-pcm_mulaw.wav -first.nut second.nut differ: char 34, line 1 -78c4aae32fdddaba4f9caa5683018c94 first.nut ----------------- -./tests/data/a-pcm_s16be.mkv -first.nut second.nut differ: char 34, line 1 -279810a0c30a06c4ab7de154e3de140d first.nut ----------------- -./tests/data/a-pcm_s16be.mov -first.nut second.nut differ: char 42, line 1 -0a8ede3d121f17a98e9038771eb98e1a first.nut ----------------- -./tests/data/a-pcm_s16le.mkv -47942f5188f8d081bcbe7fb82550b135 first.nut ----------------- -./tests/data/a-pcm_s16le.wav -first.nut second.nut differ: char 34, line 1 -9f868acb99ba107750f165431f95c382 first.nut ----------------- -./tests/data/a-pcm_s24be.mov -first.nut second.nut differ: char 34, line 1 -9c96762f631851014dec14b506091cc1 first.nut ----------------- -./tests/data/a-pcm_s24daud.302 -60ecb7037b205e2013490fdadab9697b first.nut ----------------- -./tests/data/a-pcm_s24le.wav -first.nut second.nut differ: char 34, line 1 -5805a2e6e2eddede4757fd488d0d6adb first.nut ----------------- -./tests/data/a-pcm_s32be.mov -first.nut second.nut differ: char 34, line 1 -d6c868a1130be573bbe0cfc88913a60c first.nut ----------------- -./tests/data/a-pcm_s32le.wav -first.nut second.nut differ: char 34, line 1 -3e0a81669647739c490f12521f897527 first.nut ----------------- -./tests/data/a-pcm_s8.mov -first.nut second.nut differ: char 34, line 1 -a6fe0827966ee4515f27c7053d579229 first.nut ----------------- -./tests/data/a-pcm_u8.wav -first.nut second.nut differ: char 34, line 1 -f0d398fd651cdedfd7b4c5433c08fd79 first.nut ----------------- -./tests/data/a-pcm_zork.wav -first.nut second.nut differ: char 34, line 1 -69e40cc9266836a7101000677ee14a87 first.nut ----------------- -./tests/data/a-roqav.roq -first.nut second.nut differ: char 34, line 1 -0e7a57bb28054b7e319eac2ba0a4be23 first.nut ----------------- -./tests/data/a-rv10.rm -first.nut second.nut differ: char 34, line 1 -80f982c6bffea91ff45a9b320cb93c14 first.nut ----------------- -./tests/data/a-rv20.rm -first.nut second.nut differ: char 34, line 1 -5b02113c0941578ca6918215eed8a728 first.nut ----------------- -./tests/data/a-snow.avi -e73b88690aa491491ede5970641134ad first.nut ----------------- -./tests/data/a-snow53.avi -18a6b061252c8c74bd22b42a7d5b2bae first.nut ----------------- -./tests/data/a-svq1.mov -first.nut second.nut differ: char 197, line 1 -6bbe90d47c1763654e8388ce51ab911e first.nut ----------------- -./tests/data/a-wmav1.asf -first.nut second.nut differ: char 34, line 1 -c3f7bc239ff166d738b29252b47bd437 first.nut ----------------- -./tests/data/a-wmav2.asf -first.nut second.nut differ: char 34, line 1 -930f1824b9677f0b6b714f1c6ddcf825 first.nut ----------------- -./tests/data/a-wmv1.avi -206bd9985b575f61a8a580656af39beb first.nut ----------------- -./tests/data/a-wmv2.avi -09178a3c2b99d4f7ad1f7a761a2b803a first.nut ----------------- -./tests/data/b-lavf-bgr24.yuv -09178a3c2b99d4f7ad1f7a761a2b803a first.nut ----------------- -./tests/data/b-lavf-gray.yuv -09178a3c2b99d4f7ad1f7a761a2b803a first.nut ----------------- -./tests/data/b-lavf-monob.yuv -09178a3c2b99d4f7ad1f7a761a2b803a first.nut ----------------- -./tests/data/b-lavf-monow.yuv -09178a3c2b99d4f7ad1f7a761a2b803a first.nut ----------------- -./tests/data/b-lavf-rgb24.yuv -09178a3c2b99d4f7ad1f7a761a2b803a first.nut ----------------- -./tests/data/b-lavf-rgb32.yuv -09178a3c2b99d4f7ad1f7a761a2b803a first.nut ----------------- -./tests/data/b-lavf-rgb555.yuv -09178a3c2b99d4f7ad1f7a761a2b803a first.nut ----------------- -./tests/data/b-lavf-rgb565.yuv -09178a3c2b99d4f7ad1f7a761a2b803a first.nut ----------------- -./tests/data/b-lavf-yuv410p.yuv -09178a3c2b99d4f7ad1f7a761a2b803a first.nut ----------------- -./tests/data/b-lavf-yuv411p.yuv -09178a3c2b99d4f7ad1f7a761a2b803a first.nut ----------------- -./tests/data/b-lavf-yuv420p.yuv -09178a3c2b99d4f7ad1f7a761a2b803a first.nut ----------------- -./tests/data/b-lavf-yuv422p.yuv -09178a3c2b99d4f7ad1f7a761a2b803a first.nut ----------------- -./tests/data/b-lavf-yuv440p.yuv -09178a3c2b99d4f7ad1f7a761a2b803a first.nut ----------------- -./tests/data/b-lavf-yuv444p.yuv -09178a3c2b99d4f7ad1f7a761a2b803a first.nut ----------------- -./tests/data/b-lavf-yuvj420p.yuv -09178a3c2b99d4f7ad1f7a761a2b803a first.nut ----------------- -./tests/data/b-lavf-yuvj422p.yuv -09178a3c2b99d4f7ad1f7a761a2b803a first.nut ----------------- -./tests/data/b-lavf-yuvj440p.yuv -09178a3c2b99d4f7ad1f7a761a2b803a first.nut ----------------- -./tests/data/b-lavf-yuvj444p.yuv -09178a3c2b99d4f7ad1f7a761a2b803a first.nut ----------------- -./tests/data/b-lavf-yuyv422.yuv -09178a3c2b99d4f7ad1f7a761a2b803a first.nut ----------------- -./tests/data/b-lavf.aif -first.nut second.nut differ: char 34, line 1 -3f1d3faae1671f1cf862ddb66a5c59d1 first.nut ----------------- -./tests/data/b-lavf.al -e6d4b977e74a535b039a6a1dfed2dbc1 first.nut ----------------- -./tests/data/b-lavf.asf -first.nut second.nut differ: char 34, line 1 -57727c41b3974697c0a79cfd08515ddd first.nut ----------------- -./tests/data/b-lavf.au -first.nut second.nut differ: char 34, line 1 -1da12f41bc5ea1fd851e8a48b222c204 first.nut ----------------- -./tests/data/b-lavf.avi -a88edf9fb8e02e658ba3cae9313a3cdc first.nut ----------------- -./tests/data/b-lavf.dv -first.nut second.nut differ: char 34, line 1 -819018a5d91c55312ffe784e8712ac4b first.nut ----------------- -./tests/data/b-lavf.ffm -first.nut second.nut differ: char 34, line 1 -17f8894a05c71adb51c9a0ff1b9040bb first.nut ----------------- -./tests/data/b-lavf.flv -d74edb56e74e0eea748863f3aeeafa61 first.nut ----------------- -./tests/data/b-lavf.gif -first.nut second.nut differ: char 34, line 1 -ef9ba6bf88f44d9d326049ef2872a4d3 first.nut ----------------- -./tests/data/b-lavf.gxf -first.nut second.nut differ: char 44, line 1 -522957f46ba46051fd03a0868c905e54 first.nut ----------------- -./tests/data/b-lavf.mkv -8c9427bb75c96210d6580d9b881d9e4d first.nut ----------------- -./tests/data/b-lavf.mmf -first.nut second.nut differ: char 42, line 1 -298136aef02389fc5b0844995fe6ac72 first.nut ----------------- -./tests/data/b-lavf.mov -ce895b33ff206fafbae89fd5a8f959d2 first.nut ----------------- -./tests/data/b-lavf.mpg -d279e3343993267241c2fac4f4563cdb first.nut ----------------- -./tests/data/b-lavf.mxf -first.nut second.nut differ: char 34, line 1 -3e98a90d40986b8ea4305be06175927a first.nut ----------------- -./tests/data/b-lavf.mxf_d10 -1ee69644165344a096ddfaaac951a0e9 first.nut ----------------- -./tests/data/b-lavf.nut -1426bca4c65796516a3e94b6bebc5a58 first.nut ----------------- -./tests/data/b-lavf.ogg -c986ce79045f2068ae1bedc2b8702884 first.nut ----------------- -./tests/data/b-lavf.rm -first.nut second.nut differ: char 34, line 1 -a3b2c9d3ec2c86b6d4c3bf0ed91391c3 first.nut ----------------- -./tests/data/b-lavf.swf -first.nut second.nut differ: char 34, line 1 -d4a5c5e6343dc17bed49397d889e0799 first.nut ----------------- -./tests/data/b-lavf.ts -40fd2ece0c8386d3a250943eab023795 first.nut ----------------- -./tests/data/b-lavf.ul -1c4c747e2e9c0fd195656359341eef76 first.nut ----------------- -./tests/data/b-lavf.voc -first.nut second.nut differ: char 42, line 1 -500ef42830c5bc2af849dbdcc4380f1b first.nut ----------------- -./tests/data/b-lavf.wav -first.nut second.nut differ: char 42, line 1 -8d4c6a79af442610ad912625c9b85d02 first.nut ----------------- -./tests/data/b-lavf.y4m -f42a6ff4488de306925b057ecee75b0e first.nut ----------------- -./tests/data/b-lavf02.bmp -first.nut second.nut differ: char 113, line 1 -02e3c782ef3a0c96e820201d4d4b8268 first.nut ----------------- -./tests/data/b-lavf02.jpg -61a19c3012a5aa056d8e9a589e29de2e first.nut ----------------- -./tests/data/b-lavf02.pcx -first.nut second.nut differ: char 113, line 1 -3c4e1b9c8d5dd2bedb8eebd1edc7a2f5 first.nut ----------------- -./tests/data/b-lavf02.pgm -first.nut second.nut differ: char 113, line 1 -cc36bdadd7aef501a6d5d588dec2188b first.nut ----------------- -./tests/data/b-lavf02.ppm -first.nut second.nut differ: char 113, line 1 -453ec690bea6c3668e2b65e0b7ad14c8 first.nut ----------------- -./tests/data/b-lavf02.sgi -first.nut second.nut differ: char 113, line 1 -6cdadd58aaa5ad196697352e96723e52 first.nut ----------------- -./tests/data/b-lavf02.tga -4144d2b4ee2948c1a16f7fc31b381be3 first.nut ----------------- -./tests/data/b-lavf02.tiff -first.nut second.nut differ: char 113, line 1 -237fa2da2d5f4930dae9825c9cf928a6 first.nut ----------------- -./tests/data/b-pbmpipe.pbm -first.nut second.nut differ: char 113, line 1 -2c65ef7188398da8a5f107b9dd5fb998 first.nut ----------------- -./tests/data/b-pgmpipe.pgm -first.nut second.nut differ: char 113, line 1 -b7e98248ada1e6f7170bc7fedee3825c first.nut ----------------- -./tests/data/b-ppmpipe.ppm -first.nut second.nut differ: char 113, line 1 -869fcefe430c35a9a8e46fd5f040b62e first.nut diff --git a/tests/fate-run.sh b/tests/fate-run.sh index 366145d22f..4121035853 100755 --- a/tests/fate-run.sh +++ b/tests/fate-run.sh @@ -76,9 +76,6 @@ pcm(){ regtest(){ t="${test#$2-}" ref=${base}/ref/$2/$t - cleanfiles="$cleanfiles $outfile $errfile" - outfile=tests/data/regression/$2/$t - errfile=tests/data/$t.$2.err ${base}/${1}-regression.sh $t $2 $3 "$target_exec" "$target_path" "$threads" "$thread_type" } @@ -107,7 +104,7 @@ seektest(){ file=$(echo tests/data/$d/$file) ;; esac - $target_exec $target_path/tests/seek_test $target_path/$file + $target_exec $target_path/libavformat/seek-test $target_path/$file } mkdir -p "$outdir" @@ -126,6 +123,7 @@ if test -e "$ref"; then diff) diff -u -w "$ref" "$outfile" >$cmpfile ;; oneoff) oneoff "$ref" "$outfile" "$fuzz" >$cmpfile ;; stddev) stddev "$ref" "$outfile" "$fuzz" >$cmpfile ;; + null) cat "$outfile" >$cmpfile ;; esac cmperr=$? test $err = 0 && err=$cmperr diff --git a/tests/fate/aac.mak b/tests/fate/aac.mak index 6701e149d2..8c805575f3 100644 --- a/tests/fate/aac.mak +++ b/tests/fate/aac.mak @@ -2,10 +2,22 @@ FATE_AAC += fate-aac-al04_44 fate-aac-al04_44: CMD = pcm -i $(SAMPLES)/aac/al04_44.mp4 fate-aac-al04_44: REF = $(SAMPLES)/aac/al04_44.s16 +FATE_AAC += fate-aac-al05_44 +fate-aac-al05_44: CMD = pcm -i $(SAMPLES)/aac/al05_44.mp4 +fate-aac-al05_44: REF = $(SAMPLES)/aac/al05_44.s16 + +FATE_AAC += fate-aac-al06_44 +fate-aac-al06_44: CMD = pcm -i $(SAMPLES)/aac/al06_44.mp4 +fate-aac-al06_44: REF = $(SAMPLES)/aac/al06_44.s16 + FATE_AAC += fate-aac-al07_96 fate-aac-al07_96: CMD = pcm -i $(SAMPLES)/aac/al07_96.mp4 fate-aac-al07_96: REF = $(SAMPLES)/aac/al07_96.s16 +FATE_AAC += fate-aac-al17_44 +fate-aac-al17_44: CMD = pcm -i $(SAMPLES)/aac/al17_44.mp4 +fate-aac-al17_44: REF = $(SAMPLES)/aac/al17_44.s16 + FATE_AAC += fate-aac-am00_88 fate-aac-am00_88: CMD = pcm -i $(SAMPLES)/aac/am00_88.mp4 fate-aac-am00_88: REF = $(SAMPLES)/aac/am00_88.s16 diff --git a/tests/fate/amrnb.mak b/tests/fate/amrnb.mak new file mode 100644 index 0000000000..e633fec65b --- /dev/null +++ b/tests/fate/amrnb.mak @@ -0,0 +1,47 @@ +FATE_TESTS += fate-amrnb-4k75 +fate-amrnb-4k75: CMD = pcm -i $(SAMPLES)/amrnb/4.75k.amr +fate-amrnb-4k75: CMP = stddev +fate-amrnb-4k75: REF = $(SAMPLES)/amrnb/4.75k.pcm +fate-amrnb-4k75: FUZZ = 1 + +FATE_TESTS += fate-amrnb-5k15 +fate-amrnb-5k15: CMD = pcm -i $(SAMPLES)/amrnb/5.15k.amr +fate-amrnb-5k15: CMP = stddev +fate-amrnb-5k15: REF = $(SAMPLES)/amrnb/5.15k.pcm +fate-amrnb-5k15: FUZZ = 1 + +FATE_TESTS += fate-amrnb-5k9 +fate-amrnb-5k9: CMD = pcm -i $(SAMPLES)/amrnb/5.9k.amr +fate-amrnb-5k9: CMP = stddev +fate-amrnb-5k9: REF = $(SAMPLES)/amrnb/5.9k.pcm +fate-amrnb-5k9: FUZZ = 1 + +FATE_TESTS += fate-amrnb-6k7 +fate-amrnb-6k7: CMD = pcm -i $(SAMPLES)/amrnb/6.7k.amr +fate-amrnb-6k7: CMP = stddev +fate-amrnb-6k7: REF = $(SAMPLES)/amrnb/6.7k.pcm +fate-amrnb-6k7: FUZZ = 1 + +FATE_TESTS += fate-amrnb-7k4 +fate-amrnb-7k4: CMD = pcm -i $(SAMPLES)/amrnb/7.4k.amr +fate-amrnb-7k4: CMP = stddev +fate-amrnb-7k4: REF = $(SAMPLES)/amrnb/7.4k.pcm +fate-amrnb-7k4: FUZZ = 1 + +FATE_TESTS += fate-amrnb-7k95 +fate-amrnb-7k95: CMD = pcm -i $(SAMPLES)/amrnb/7.95k.amr +fate-amrnb-7k95: CMP = stddev +fate-amrnb-7k95: REF = $(SAMPLES)/amrnb/7.95k.pcm +fate-amrnb-7k95: FUZZ = 1 + +FATE_TESTS += fate-amrnb-10k2 +fate-amrnb-10k2: CMD = pcm -i $(SAMPLES)/amrnb/10.2k.amr +fate-amrnb-10k2: CMP = stddev +fate-amrnb-10k2: REF = $(SAMPLES)/amrnb/10.2k.pcm +fate-amrnb-10k2: FUZZ = 1 + +FATE_TESTS += fate-amrnb-12k2 +fate-amrnb-12k2: CMD = pcm -i $(SAMPLES)/amrnb/12.2k.amr +fate-amrnb-12k2: CMP = stddev +fate-amrnb-12k2: REF = $(SAMPLES)/amrnb/12.2k.pcm +fate-amrnb-12k2: FUZZ = 1 diff --git a/tests/fate/amrwb.mak b/tests/fate/amrwb.mak new file mode 100644 index 0000000000..d3931c7ccb --- /dev/null +++ b/tests/fate/amrwb.mak @@ -0,0 +1,59 @@ +FATE_TESTS += fate-amrwb-6k60 +fate-amrwb-6k60: CMD = pcm -i $(SAMPLES)/amrwb/seed-6k60.awb +fate-amrwb-6k60: CMP = stddev +fate-amrwb-6k60: REF = $(SAMPLES)/amrwb/seed-6k60.pcm +fate-amrwb-6k60: FUZZ = 1 + +FATE_TESTS += fate-amrwb-8k85 +fate-amrwb-8k85: CMD = pcm -i $(SAMPLES)/amrwb/seed-8k85.awb +fate-amrwb-8k85: CMP = stddev +fate-amrwb-8k85: REF = $(SAMPLES)/amrwb/seed-8k85.pcm +fate-amrwb-8k85: FUZZ = 1 + +FATE_TESTS += fate-amrwb-12k65 +fate-amrwb-12k65: CMD = pcm -i $(SAMPLES)/amrwb/seed-12k65.awb +fate-amrwb-12k65: CMP = stddev +fate-amrwb-12k65: REF = $(SAMPLES)/amrwb/seed-12k65.pcm +fate-amrwb-12k65: FUZZ = 1 + +FATE_TESTS += fate-amrwb-14k25 +fate-amrwb-14k25: CMD = pcm -i $(SAMPLES)/amrwb/seed-14k25.awb +fate-amrwb-14k25: CMP = stddev +fate-amrwb-14k25: REF = $(SAMPLES)/amrwb/seed-14k25.pcm +fate-amrwb-14k25: FUZZ = 2.6 + +FATE_TESTS += fate-amrwb-15k85 +fate-amrwb-15k85: CMD = pcm -i $(SAMPLES)/amrwb/seed-15k85.awb +fate-amrwb-15k85: CMP = stddev +fate-amrwb-15k85: REF = $(SAMPLES)/amrwb/seed-15k85.pcm +fate-amrwb-15k85: FUZZ = 1 + +FATE_TESTS += fate-amrwb-18k25 +fate-amrwb-18k25: CMD = pcm -i $(SAMPLES)/amrwb/seed-18k25.awb +fate-amrwb-18k25: CMP = stddev +fate-amrwb-18k25: REF = $(SAMPLES)/amrwb/seed-18k25.pcm +fate-amrwb-18k25: FUZZ = 1 + +FATE_TESTS += fate-amrwb-19k85 +fate-amrwb-19k85: CMD = pcm -i $(SAMPLES)/amrwb/seed-19k85.awb +fate-amrwb-19k85: CMP = stddev +fate-amrwb-19k85: REF = $(SAMPLES)/amrwb/seed-19k85.pcm +fate-amrwb-19k85: FUZZ = 1 + +FATE_TESTS += fate-amrwb-23k05 +fate-amrwb-23k05: CMD = pcm -i $(SAMPLES)/amrwb/seed-23k05.awb +fate-amrwb-23k05: CMP = stddev +fate-amrwb-23k05: REF = $(SAMPLES)/amrwb/seed-23k05.pcm +fate-amrwb-23k05: FUZZ = 2 + +FATE_TESTS += fate-amrwb-23k85 +fate-amrwb-23k85: CMD = pcm -i $(SAMPLES)/amrwb/seed-23k85.awb +fate-amrwb-23k85: CMP = stddev +fate-amrwb-23k85: REF = $(SAMPLES)/amrwb/seed-23k85.pcm +fate-amrwb-23k85: FUZZ = 2 + +FATE_TESTS += fate-amrwb-23k85-2 +fate-amrwb-23k85-2: CMD = pcm -i $(SAMPLES)/amrwb/deus-23k85.awb +fate-amrwb-23k85-2: CMP = stddev +fate-amrwb-23k85-2: REF = $(SAMPLES)/amrwb/deus-23k85.pcm +fate-amrwb-23k85-2: FUZZ = 1 diff --git a/tests/fate/dct.mak b/tests/fate/dct.mak new file mode 100644 index 0000000000..8f2ab7a8e0 --- /dev/null +++ b/tests/fate/dct.mak @@ -0,0 +1,5 @@ +FATE_TESTS += fate-idct8x8 +fate-idct8x8: libavcodec/dct-test$(EXESUF) +fate-idct8x8: CMD = run libavcodec/dct-test -i +fate-idct8x8: REF = /dev/null +fate-idct8x8: CMP = null diff --git a/tests/fate/fft.mak b/tests/fate/fft.mak index 042a7bf322..feb47afe00 100644 --- a/tests/fate/fft.mak +++ b/tests/fate/fft.mak @@ -1,28 +1,36 @@ -FATE_FFT = fate-fft fate-ifft \ - fate-mdct fate-imdct \ - fate-rdft fate-irdft \ - fate-dct1d fate-idct1d - -fate-fft: CMD = run libavcodec/fft-test -fate-ifft: CMD = run libavcodec/fft-test -i -fate-mdct: CMD = run libavcodec/fft-test -m -fate-imdct: CMD = run libavcodec/fft-test -m -i -fate-rdft: CMD = run libavcodec/fft-test -r -fate-irdft: CMD = run libavcodec/fft-test -r -i -fate-dct1d: CMD = run libavcodec/fft-test -d -fate-idct1d: CMD = run libavcodec/fft-test -d -i +define DEF_FFT +FATE_FFT += fate-fft-$(1) fate-ifft-$(1) \ + fate-mdct-$(1) fate-imdct-$(1) \ + fate-rdft-$(1) fate-irdft-$(1) \ + fate-dct1d-$(1) fate-idct1d-$(1) + +fate-fft-$(N): CMD = run libavcodec/fft-test -n$(1) +fate-ifft-$(N): CMD = run libavcodec/fft-test -n$(1) -i +fate-mdct-$(N): CMD = run libavcodec/fft-test -n$(1) -m +fate-imdct-$(N): CMD = run libavcodec/fft-test -n$(1) -m -i +fate-rdft-$(N): CMD = run libavcodec/fft-test -n$(1) -r +fate-irdft-$(N): CMD = run libavcodec/fft-test -n$(1) -r -i +fate-dct1d-$(N): CMD = run libavcodec/fft-test -n$(1) -d +fate-idct1d-$(N): CMD = run libavcodec/fft-test -n$(1) -d -i +endef + +$(foreach N, 4 5 6 7 8 9 10 11 12, $(eval $(call DEF_FFT,$(N)))) fate-fft-test: $(FATE_FFT) $(FATE_FFT): libavcodec/fft-test$(EXESUF) $(FATE_FFT): REF = /dev/null -FATE_FFT_FIXED = fate-fft-fixed fate-ifft-fixed \ - fate-mdct-fixed fate-imdct-fixed +define DEF_FFT_FIXED +FATE_FFT_FIXED += fate-fft-fixed-$(1) fate-ifft-fixed-$(1) \ + fate-mdct-fixed-$(1) fate-imdct-fixed-$(1) + +fate-fft-fixed-$(1): CMD = run libavcodec/fft-fixed-test -n$(1) +fate-ifft-fixed-$(1): CMD = run libavcodec/fft-fixed-test -n$(1) -i +fate-mdct-fixed-$(1): CMD = run libavcodec/fft-fixed-test -n$(1) -m +fate-imdct-fixed-$(1): CMD = run libavcodec/fft-fixed-test -n$(1) -m -i +endef -fate-fft-fixed: CMD = run libavcodec/fft-fixed-test -fate-ifft-fixed: CMD = run libavcodec/fft-fixed-test -i -fate-mdct-fixed: CMD = run libavcodec/fft-fixed-test -m -fate-imdct-fixed: CMD = run libavcodec/fft-fixed-test -m -i +$(foreach N, 4 5 6 7 8 9 10 11 12, $(eval $(call DEF_FFT_FIXED,$(N)))) fate-fft-fixed-test: $(FATE_FFT_FIXED) $(FATE_FFT_FIXED): libavcodec/fft-fixed-test$(EXESUF) diff --git a/tests/fate/libavutil.mak b/tests/fate/libavutil.mak new file mode 100644 index 0000000000..4299f081ac --- /dev/null +++ b/tests/fate/libavutil.mak @@ -0,0 +1,34 @@ +FATE_TESTS += fate-adler32 +fate-adler32: libavutil/adler32-test$(EXESUF) +fate-adler32: CMD = run libavutil/adler32-test +fate-adler32: REF = /dev/null + +FATE_TESTS += fate-aes +fate-aes: libavutil/aes-test$(EXESUF) +fate-aes: CMD = run libavutil/aes-test +fate-aes: REF = /dev/null + +FATE_TESTS += fate-base64 +fate-base64: libavutil/base64-test$(EXESUF) +fate-base64: CMD = run libavutil/base64-test + +FATE_TESTS += fate-crc +fate-crc: libavutil/crc-test$(EXESUF) +fate-crc: CMD = run libavutil/crc-test + +FATE_TESTS += fate-des +fate-des: libavutil/des-test$(EXESUF) +fate-des: CMD = run libavutil/des-test +fate-des: REF = /dev/null + +FATE_TESTS += fate-eval +fate-eval: libavutil/eval-test$(EXESUF) +fate-eval: CMD = run libavutil/eval-test + +FATE_TESTS += fate-md5 +fate-md5: libavutil/md5-test$(EXESUF) +fate-md5: CMD = run libavutil/md5-test + +FATE_TESTS += fate-sha +fate-sha: libavutil/sha-test$(EXESUF) +fate-sha: CMD = run libavutil/sha-test diff --git a/tests/fate/vp8.mak b/tests/fate/vp8.mak index 2b171305cc..1f442af4ac 100644 --- a/tests/fate/vp8.mak +++ b/tests/fate/vp8.mak @@ -3,7 +3,7 @@ VP8_SUITE = 001 002 003 004 005 006 007 008 009 010 011 012 013 014 015 016 017 define FATE_VP8_SUITE FATE_VP8 += fate-vp8-test-vector$(2)-$(1) fate-vp8-test-vector$(2)-$(1): CMD = framemd5 $(3) -i $(SAMPLES)/vp8-test-vectors-r1/vp80-00-comprehensive-$(1).ivf -fate-vp8-test-vector$(2)-$(1): REF = $(SRC_PATH_BARE)/tests/ref/fate/vp8-test-vector-$(1) +fate-vp8-test-vector$(2)-$(1): REF = $(SRC_PATH)/tests/ref/fate/vp8-test-vector-$(1) endef define FATE_VP8_FULL @@ -11,7 +11,7 @@ $(foreach N,$(VP8_SUITE),$(eval $(call FATE_VP8_SUITE,$(N),$(1),$(2)))) FATE_VP8 += fate-vp8-sign-bias$(1) fate-vp8-sign-bias$(1): CMD = framemd5 $(2) -i $(SAMPLES)/vp8/sintel-signbias.ivf -fate-vp8-sign-bias$(1): REF = $(SRC_PATH_BARE)/tests/ref/fate/vp8-sign-bias +fate-vp8-sign-bias$(1): REF = $(SRC_PATH)/tests/ref/fate/vp8-sign-bias endef $(eval $(call FATE_VP8_FULL)) diff --git a/tests/fate2.mak b/tests/fate2.mak index 066f9ef583..a743f0cd59 100644 --- a/tests/fate2.mak +++ b/tests/fate2.mak @@ -165,7 +165,7 @@ fate-wmapro-2ch: CMP = oneoff fate-wmapro-2ch: REF = $(SAMPLES)/wmapro/Beethovens_9th-1_small.pcm FATE_TESTS += fate-ansi -fate-ansi: CMD = framecrc -ar 44100 -i $(SAMPLES)/ansi/TRE-IOM5.ANS -pix_fmt rgb24 +fate-ansi: CMD = framecrc -chars_per_frame 44100 -i $(SAMPLES)/ansi/TRE-IOM5.ANS -pix_fmt rgb24 FATE_TESTS += fate-wmv8-drm # discard last packet to avoid fails due to overread of VC-1 decoder @@ -213,120 +213,12 @@ fate-mjpegb: CMD = framecrc -idct simple -flags +bitexact -i $(SAMPLES)/mjpegb/m FATE_TESTS += fate-rv30 fate-rv30: CMD = framecrc -flags +bitexact -dct fastint -idct simple -i $(SAMPLES)/real/rv30.rm -an -FATE_TESTS += fate-sha -fate-sha: libavutil/sha-test$(EXESUF) -fate-sha: CMD = run libavutil/sha-test - FATE_TESTS += fate-musepack7 fate-musepack7: CMD = pcm -i $(SAMPLES)/musepack/inside-mp7.mpc fate-musepack7: CMP = oneoff fate-musepack7: REF = $(SAMPLES)/musepack/inside-mp7.pcm fate-musepack7: FUZZ = 1 -FATE_TESTS += fate-amrnb-4k75 -fate-amrnb-4k75: CMD = pcm -i $(SAMPLES)/amrnb/4.75k.amr -fate-amrnb-4k75: CMP = stddev -fate-amrnb-4k75: REF = $(SAMPLES)/amrnb/4.75k.pcm -fate-amrnb-4k75: FUZZ = 1 - -FATE_TESTS += fate-amrnb-5k15 -fate-amrnb-5k15: CMD = pcm -i $(SAMPLES)/amrnb/5.15k.amr -fate-amrnb-5k15: CMP = stddev -fate-amrnb-5k15: REF = $(SAMPLES)/amrnb/5.15k.pcm -fate-amrnb-5k15: FUZZ = 1 - -FATE_TESTS += fate-amrnb-5k9 -fate-amrnb-5k9: CMD = pcm -i $(SAMPLES)/amrnb/5.9k.amr -fate-amrnb-5k9: CMP = stddev -fate-amrnb-5k9: REF = $(SAMPLES)/amrnb/5.9k.pcm -fate-amrnb-5k9: FUZZ = 1 - -FATE_TESTS += fate-amrnb-6k7 -fate-amrnb-6k7: CMD = pcm -i $(SAMPLES)/amrnb/6.7k.amr -fate-amrnb-6k7: CMP = stddev -fate-amrnb-6k7: REF = $(SAMPLES)/amrnb/6.7k.pcm -fate-amrnb-6k7: FUZZ = 1 - -FATE_TESTS += fate-amrnb-7k4 -fate-amrnb-7k4: CMD = pcm -i $(SAMPLES)/amrnb/7.4k.amr -fate-amrnb-7k4: CMP = stddev -fate-amrnb-7k4: REF = $(SAMPLES)/amrnb/7.4k.pcm -fate-amrnb-7k4: FUZZ = 1 - -FATE_TESTS += fate-amrnb-7k95 -fate-amrnb-7k95: CMD = pcm -i $(SAMPLES)/amrnb/7.95k.amr -fate-amrnb-7k95: CMP = stddev -fate-amrnb-7k95: REF = $(SAMPLES)/amrnb/7.95k.pcm -fate-amrnb-7k95: FUZZ = 1 - -FATE_TESTS += fate-amrnb-10k2 -fate-amrnb-10k2: CMD = pcm -i $(SAMPLES)/amrnb/10.2k.amr -fate-amrnb-10k2: CMP = stddev -fate-amrnb-10k2: REF = $(SAMPLES)/amrnb/10.2k.pcm -fate-amrnb-10k2: FUZZ = 1 - -FATE_TESTS += fate-amrnb-12k2 -fate-amrnb-12k2: CMD = pcm -i $(SAMPLES)/amrnb/12.2k.amr -fate-amrnb-12k2: CMP = stddev -fate-amrnb-12k2: REF = $(SAMPLES)/amrnb/12.2k.pcm -fate-amrnb-12k2: FUZZ = 1 - -FATE_TESTS += fate-amrwb-6k60 -fate-amrwb-6k60: CMD = pcm -i $(SAMPLES)/amrwb/seed-6k60.awb -fate-amrwb-6k60: CMP = stddev -fate-amrwb-6k60: REF = $(SAMPLES)/amrwb/seed-6k60.pcm -fate-amrwb-6k60: FUZZ = 1 - -FATE_TESTS += fate-amrwb-8k85 -fate-amrwb-8k85: CMD = pcm -i $(SAMPLES)/amrwb/seed-8k85.awb -fate-amrwb-8k85: CMP = stddev -fate-amrwb-8k85: REF = $(SAMPLES)/amrwb/seed-8k85.pcm -fate-amrwb-8k85: FUZZ = 1 - -FATE_TESTS += fate-amrwb-12k65 -fate-amrwb-12k65: CMD = pcm -i $(SAMPLES)/amrwb/seed-12k65.awb -fate-amrwb-12k65: CMP = stddev -fate-amrwb-12k65: REF = $(SAMPLES)/amrwb/seed-12k65.pcm -fate-amrwb-12k65: FUZZ = 1 - -FATE_TESTS += fate-amrwb-14k25 -fate-amrwb-14k25: CMD = pcm -i $(SAMPLES)/amrwb/seed-14k25.awb -fate-amrwb-14k25: CMP = stddev -fate-amrwb-14k25: REF = $(SAMPLES)/amrwb/seed-14k25.pcm -fate-amrwb-14k25: FUZZ = 2.6 - -FATE_TESTS += fate-amrwb-15k85 -fate-amrwb-15k85: CMD = pcm -i $(SAMPLES)/amrwb/seed-15k85.awb -fate-amrwb-15k85: CMP = stddev -fate-amrwb-15k85: REF = $(SAMPLES)/amrwb/seed-15k85.pcm -fate-amrwb-15k85: FUZZ = 1 - -FATE_TESTS += fate-amrwb-18k25 -fate-amrwb-18k25: CMD = pcm -i $(SAMPLES)/amrwb/seed-18k25.awb -fate-amrwb-18k25: CMP = stddev -fate-amrwb-18k25: REF = $(SAMPLES)/amrwb/seed-18k25.pcm -fate-amrwb-18k25: FUZZ = 1 - -FATE_TESTS += fate-amrwb-19k85 -fate-amrwb-19k85: CMD = pcm -i $(SAMPLES)/amrwb/seed-19k85.awb -fate-amrwb-19k85: CMP = stddev -fate-amrwb-19k85: REF = $(SAMPLES)/amrwb/seed-19k85.pcm -fate-amrwb-19k85: FUZZ = 1 - -FATE_TESTS += fate-amrwb-23k05 -fate-amrwb-23k05: CMD = pcm -i $(SAMPLES)/amrwb/seed-23k05.awb -fate-amrwb-23k05: CMP = stddev -fate-amrwb-23k05: REF = $(SAMPLES)/amrwb/seed-23k05.pcm -fate-amrwb-23k05: FUZZ = 2 - -FATE_TESTS += fate-amrwb-23k85 -fate-amrwb-23k85: CMD = pcm -i $(SAMPLES)/amrwb/seed-23k85.awb -fate-amrwb-23k85: CMP = stddev -fate-amrwb-23k85: REF = $(SAMPLES)/amrwb/seed-23k85.pcm -fate-amrwb-23k85: FUZZ = 2 - -FATE_TESTS += fate-amrwb-23k85-2 -fate-amrwb-23k85-2: CMD = pcm -i $(SAMPLES)/amrwb/deus-23k85.awb -fate-amrwb-23k85-2: CMP = stddev -fate-amrwb-23k85-2: REF = $(SAMPLES)/amrwb/deus-23k85.pcm -fate-amrwb-23k85-2: FUZZ = 1 +FATE_TESTS += fate-iirfilter +fate-iirfilter: libavcodec/iirfilter-test$(EXESUF) +fate-iirfilter: CMD = run libavcodec/iirfilter-test diff --git a/tests/lavf-regression.sh b/tests/lavf-regression.sh index 39e752b3c6..2dc4116ba3 100755 --- a/tests/lavf-regression.sh +++ b/tests/lavf-regression.sh @@ -31,9 +31,9 @@ do_image_formats() mkdir -p "$outfile" file=${outfile}%02d.$1 run_ffmpeg $DEC_OPTS -f image2 -vcodec pgmyuv -i $raw_src $2 $ENC_OPTS $3 -t 0.5 -y -qscale 10 $target_path/$file - do_md5sum ${outfile}02.$1 >> $logfile + do_md5sum ${outfile}02.$1 do_ffmpeg_crc $file $DEC_OPTS $3 -i $target_path/$file - wc -c ${outfile}02.$1 >> $logfile + wc -c ${outfile}02.$1 } do_audio_only() @@ -43,8 +43,6 @@ do_audio_only() do_ffmpeg_crc $file $DEC_OPTS $4 -i $target_path/$file } -rm -f "$logfile" - if [ -n "$do_avi" ] ; then do_lavf avi fi @@ -66,6 +64,9 @@ fi if [ -n "$do_mxf" ] ; then do_lavf mxf "-ar 48000 -bf 2 -timecode_frame_start 264363" +fi + +if [ -n "$do_mxf_d10" ]; then do_lavf mxf_d10 "-ar 48000 -ac 2 -r 25 -s 720x576 -vf pad=720:608:0:32 -vcodec mpeg2video -intra -flags +ildct+low_delay -dc 10 -flags2 +ivlc+non_linear_q -qscale 1 -ps 1 -qmin 1 -rc_max_vbv_use 1 -rc_min_vbv_use 1 -pix_fmt yuv422p -minrate 30000k -maxrate 30000k -b 30000k -bufsize 1200000 -top 1 -rc_init_occupancy 1200000 -qmax 12 -f mxf_d10" fi diff --git a/tests/lavfi-regression.sh b/tests/lavfi-regression.sh index 0322134163..e1666c263c 100755 --- a/tests/lavfi-regression.sh +++ b/tests/lavfi-regression.sh @@ -11,15 +11,13 @@ set -e eval do_$test=y -rm -f "$logfile" - do_video_filter() { label=$1 filters=$2 shift 2 - printf '%-20s' $label >>$logfile + printf '%-20s' $label run_ffmpeg $DEC_OPTS -f image2 -vcodec pgmyuv -i $raw_src \ - $ENC_OPTS -vf "$filters" -vcodec rawvideo $* -f nut md5: >>$logfile + $ENC_OPTS -vf "$filters" -vcodec rawvideo $* -f nut md5: } do_lavfi() { @@ -52,9 +50,9 @@ do_lavfi_pixfmts(){ # exclude pixel formats which are not supported as input $ffmpeg -pix_fmts list 2>/dev/null | sed -ne '9,$p' | grep '^\..\.' | cut -d' ' -f2 | sort >$exclude_fmts - $showfiltfmts scale | awk -F '[ \r]' '/^OUTPUT/{ print $3 }' | sort | comm -23 - $exclude_fmts >$out_fmts + $showfiltfmts scale | awk -F '[ \r]' '/^OUTPUT/{ fmt=substr($3, 5); print fmt }' | sort | comm -23 - $exclude_fmts >$out_fmts - pix_fmts=$($showfiltfmts $filter $filter_args | awk -F '[ \r]' '/^INPUT/{ print $3 }' | sort | comm -12 - $out_fmts) + pix_fmts=$($showfiltfmts $filter $filter_args | awk -F '[ \r]' '/^INPUT/{ fmt=substr($3, 5); print fmt }' | sort | comm -12 - $out_fmts) for pix_fmt in $pix_fmts; do do_video_filter $pix_fmt "slicify=random,format=$pix_fmt,$filter=$filter_args" -pix_fmt $pix_fmt done @@ -71,7 +69,7 @@ do_lavfi_pixfmts "pad" "500:400:20:20" do_lavfi_pixfmts "scale" "200:100" do_lavfi_pixfmts "vflip" "" -if [ -n "$do_pixdesc_be" ] || [ -n "$do_pixdesc_le" ]; then +if [ -n "$do_pixdesc" ]; then pix_fmts="$($ffmpeg -pix_fmts list 2>/dev/null | sed -ne '9,$p' | grep '^IO' | cut -d' ' -f2 | sort)" for pix_fmt in $pix_fmts; do do_video_filter $pix_fmt "slicify=random,format=$pix_fmt,pixdesctest" -pix_fmt $pix_fmt diff --git a/tests/ref/fate/base64 b/tests/ref/fate/base64 new file mode 100644 index 0000000000..24fa9ad909 --- /dev/null +++ b/tests/ref/fate/base64 @@ -0,0 +1,9 @@ +Encoding/decoding tests +Passed! +Passed! +Passed! +Passed! +Passed! +Passed! +Passed! +Passed! diff --git a/tests/ref/fate/crc b/tests/ref/fate/crc new file mode 100644 index 0000000000..4a82680490 --- /dev/null +++ b/tests/ref/fate/crc @@ -0,0 +1,4 @@ +crc EDB88320 =3D5CDD04 +crc 04C11DB7 =E0BAF5C0 +crc 00008005 =BB1F +crc 00000007 =E3 diff --git a/tests/ref/fate/eval b/tests/ref/fate/eval new file mode 100644 index 0000000000..3bc35db633 --- /dev/null +++ b/tests/ref/fate/eval @@ -0,0 +1,164 @@ +Evaluating '' +'' -> nan + +Evaluating '1;2' +'1;2' -> 2.000000 + +Evaluating '-20' +'-20' -> -20.000000 + +Evaluating '-PI' +'-PI' -> -3.141593 + +Evaluating '+PI' +'+PI' -> 3.141593 + +Evaluating '1+(5-2)^(3-1)+1/2+sin(PI)-max(-2.2,-3.1)' +'1+(5-2)^(3-1)+1/2+sin(PI)-max(-2.2,-3.1)' -> 12.700000 + +Evaluating '80G/80Gi' +'80G/80Gi' -> 0.931323 + +Evaluating '1k' +'1k' -> 1000.000000 + +Evaluating '1Gi' +'1Gi' -> 1073741824.000000 + +Evaluating '1gi' +'1gi' -> nan + +Evaluating '1GiFoo' +'1GiFoo' -> nan + +Evaluating '1k+1k' +'1k+1k' -> 2000.000000 + +Evaluating '1Gi*3foo' +'1Gi*3foo' -> nan + +Evaluating 'foo' +'foo' -> nan + +Evaluating 'foo(' +'foo(' -> nan + +Evaluating 'foo()' +'foo()' -> nan + +Evaluating 'foo)' +'foo)' -> nan + +Evaluating 'sin' +'sin' -> nan + +Evaluating 'sin(' +'sin(' -> nan + +Evaluating 'sin()' +'sin()' -> nan + +Evaluating 'sin)' +'sin)' -> nan + +Evaluating 'sin 10' +'sin 10' -> nan + +Evaluating 'sin(1,2,3)' +'sin(1,2,3)' -> nan + +Evaluating 'sin(1 )' +'sin(1 )' -> 0.841471 + +Evaluating '1' +'1' -> 1.000000 + +Evaluating '1foo' +'1foo' -> nan + +Evaluating 'bar + PI + E + 100f*2 + foo' +'bar + PI + E + 100f*2 + foo' -> nan + +Evaluating '13k + 12f - foo(1, 2)' +'13k + 12f - foo(1, 2)' -> nan + +Evaluating '1gi' +'1gi' -> nan + +Evaluating '1Gi' +'1Gi' -> 1073741824.000000 + +Evaluating 'st(0, 123)' +'st(0, 123)' -> 123.000000 + +Evaluating 'st(1, 123); ld(1)' +'st(1, 123); ld(1)' -> 123.000000 + +Evaluating 'st(0, 1); while(lte(ld(0), 100), st(1, ld(1)+ld(0));st(0, ld(0)+1)); ld(1)' +'st(0, 1); while(lte(ld(0), 100), st(1, ld(1)+ld(0));st(0, ld(0)+1)); ld(1)' -> 4950.000000 + +Evaluating 'st(1, 1); st(2, 2); st(0, 1); while(lte(ld(0),10), st(3, ld(1)+ld(2)); st(1, ld(2)); st(2, ld(3)); st(0, ld(0)+1)); ld(3)' +'st(1, 1); st(2, 2); st(0, 1); while(lte(ld(0),10), st(3, ld(1)+ld(2)); st(1, ld(2)); st(2, ld(3)); st(0, ld(0)+1)); ld(3)' -> 144.000000 + +Evaluating 'while(0, 10)' +'while(0, 10)' -> nan + +Evaluating 'st(0, 1); while(lte(ld(0),100), st(1, ld(1)+ld(0)); st(0, ld(0)+1))' +'st(0, 1); while(lte(ld(0),100), st(1, ld(1)+ld(0)); st(0, ld(0)+1))' -> 100.000000 + +Evaluating 'isnan(1)' +'isnan(1)' -> 0.000000 + +Evaluating 'isnan(NAN)' +'isnan(NAN)' -> 1.000000 + +Evaluating 'floor(NAN)' +'floor(NAN)' -> nan + +Evaluating 'floor(123.123)' +'floor(123.123)' -> 123.000000 + +Evaluating 'floor(-123.123)' +'floor(-123.123)' -> -124.000000 + +Evaluating 'trunc(123.123)' +'trunc(123.123)' -> 123.000000 + +Evaluating 'trunc(-123.123)' +'trunc(-123.123)' -> -123.000000 + +Evaluating 'ceil(123.123)' +'ceil(123.123)' -> 124.000000 + +Evaluating 'ceil(-123.123)' +'ceil(-123.123)' -> -123.000000 + +Evaluating 'sqrt(1764)' +'sqrt(1764)' -> 42.000000 + +Evaluating 'sqrt(-1)' +'sqrt(-1)' -> nan + +Evaluating 'not(1)' +'not(1)' -> 0.000000 + +Evaluating 'not(NAN)' +'not(NAN)' -> 0.000000 + +Evaluating 'not(0)' +'not(0)' -> 1.000000 + +Evaluating 'pow(0,1.23)' +'pow(0,1.23)' -> 0.000000 + +Evaluating 'pow(PI,1.23)' +'pow(PI,1.23)' -> 4.087844 + +Evaluating 'PI^1.23' +'PI^1.23' -> 4.087844 + +Evaluating 'pow(-1,1.23)' +'pow(-1,1.23)' -> nan + +12.700000 == 12.7 +0.931323 == 0.931322575 diff --git a/tests/ref/fate/iirfilter b/tests/ref/fate/iirfilter new file mode 100644 index 0000000000..2e5902d766 --- /dev/null +++ b/tests/ref/fate/iirfilter @@ -0,0 +1,1024 @@ + 0 0 + 38 2 + 151 15 + 339 65 + 603 182 + 942 381 + 1356 664 + 1845 1021 + 2409 1450 + 3046 1953 + 3755 2530 + 4535 3182 + 5384 3907 + 6300 4700 + 7278 5563 + 8315 6491 + 9405 7481 + 10541 8529 + 11717 9629 + 12924 10773 + 14151 11956 + 15385 13167 + 16615 14396 + 17825 15630 + 18997 16857 + 20114 18060 + 21156 19222 + 22102 20325 + 22929 21349 + 23613 22273 + 24132 23073 + 24461 23726 + 24575 24208 + 24453 24495 + 24073 24564 + 23416 24392 + 22467 23959 + 21213 23245 + 19649 22236 + 17773 20922 + 15590 19296 + 13116 17360 + 10371 15119 + 7386 12591 + 4201 9797 + 867 6771 + -2559 3554 + -6008 199 + -9405 -3235 +-12667 -6678 +-15707 -10053 +-18435 -13277 +-20762 -16261 +-22602 -18916 +-23875 -21153 +-24511 -22887 +-24457 -24040 +-23675 -24546 +-22151 -24352 +-19895 -23428 +-16946 -21762 +-13370 -19370 + -9265 -16296 + -4757 -12613 + 0 -8423 + 4831 -3858 + 9544 923 + 13934 5743 + 17799 10406 + 20942 14708 + 23189 18447 + 24395 21430 + 24457 23488 + 23323 24483 + 21001 24321 + 17563 22963 + 13148 20426 + 7959 16795 + 2259 12223 + -3643 6922 + -9405 1166 +-14670 -4731 +-19092 -10421 +-22359 -15550 +-24213 -19777 +-24481 -22797 +-23087 -24368 +-20071 -24334 +-15590 -22639 + -9924 -19343 + -3457 -14629 + 3345 -8793 + 9959 -2236 + 15851 4563 + 20517 11078 + 23528 16779 + 24575 21171 + 23506 23846 + 20349 24522 + 15327 23076 + 8845 19572 + 1469 14264 + -6117 7589 +-13180 135 +-18997 -7403 +-22942 -14289 +-24553 -19814 +-23592 -23377 +-20092 -24551 +-14366 -23145 + -6989 -19239 + 1244 -13192 + 9405 -5620 + 16532 2656 + 21744 10697 + 24357 17548 + 23978 22356 + 20579 24483 + 14518 23593 + 6518 19723 + -2409 13293 +-11083 5078 +-18310 -3876 +-23048 -12378 +-24568 -19252 +-22573 -23500 +-17270 -24458 + -9370 -21908 + 0 -16140 + 9439 -7935 + 17484 1526 + 22832 10824 + 24568 18508 + 22327 23330 + 16392 24452 + 7673 21608 + -2409 15181 +-12146 6168 +-19828 -3955 +-24050 -13466 +-23978 -20689 +-19535 -24292 +-11451 -23552 + -1168 -18512 + 9405 -10015 + 18234 416 + 23560 10836 + 24257 19234 + 20092 23929 + 11817 23916 + 1055 19105 + -9993 10379 +-18997 -540 +-23986 -11413 +-23802 -19939 +-18385 -24246 + -8845 -23318 + 2746 -17260 + 13778 -7325 + 21691 4319 + 24575 15045 + 21656 22357 + 13528 24482 + 2071 20823 + -9959 12152 +-19581 484 +-24331 -11367 +-22915 -20460 +-15590 -24459 + -4164 -22257 + 8421 -14315 + 18828 -2603 + 24213 9857 + 23022 19756 + 15474 24383 + 3569 22388 + -9405 14211 +-19761 2031 +-24471 -10785 +-22069 -20591 +-13148 -24512 + -264 -21311 + 12763 -11818 + 21968 1241 + 24457 13990 + 19351 22545 + 8137 24211 + -5715 18362 +-17799 6720 +-24167 -7108 +-22646 -18722 +-13622 -24326 + 0 -21995 + 13685 -12382 + 22762 1409 + 24035 14788 + 16946 23188 + 3867 23644 +-10643 15884 +-21401 2514 +-24457 -11806 +-18584 -21960 + -5825 -24220 + 9160 -17649 + 20762 -4546 + 24527 10303 + 18901 21343 + 5935 24337 + -9405 18028 +-21098 4727 +-24442 -10470 +-17979 -21608 + -4201 -24206 + 11351 -17110 + 22280 -3064 + 23970 12287 + 15590 22636 + 565 23615 +-14760 14693 +-23773 -479 +-22467 -15504 +-11284 -23907 + 4942 -21954 + 19021 -10373 + 24575 5837 + 18973 19502 + 4646 24445 +-11883 18316 +-22929 3786 +-23226 -12541 +-12505 -23090 + 4239 -22841 + 18997 -11784 + 24567 4932 + 18107 19331 + 2671 24403 +-14151 17558 +-23919 2036 +-21602 -14549 + -8244 -23897 + 9405 -21206 + 22232 -7710 + 23473 9785 + 12342 22272 + -5384 23186 +-20286 11920 +-24287 -5693 +-15090 -20319 + 2409 -24060 + 18633 -14771 + 24538 2613 + 16698 18608 + -603 24329 +-17616 16471 +-24575 -682 +-17351 -17495 + 0 -24355 + 17404 -17211 + 24575 -65 + 17136 17163 + -603 24334 +-18031 17094 +-24538 -374 +-16023 -17660 + 2409 -24287 + 19397 -16108 + 24287 1992 + 13872 18902 + -5384 24066 +-21251 14131 +-23473 -4761 +-10473 -20664 + 9405 -23351 + 23151 -10967 + 21602 8573 + 5642 22543 +-14151 21682 +-24430 6431 +-18107 -13147 + 641 -23920 + 18997 -18514 + 24207 -475 + 12505 17922 + -8030 23970 +-22929 13357 +-21511 -6625 + -4646 -21972 + 15619 -21758 + 24575 -6009 + 15561 14083 + -4942 24019 +-21831 16485 +-22467 -3138 + -6227 -20474 + 14760 -22641 + 24569 -7904 + 15590 12791 + -5421 23839 +-22280 16760 +-21797 -3160 + -4201 -20666 + 16754 -22169 + 24442 -6381 + 12602 14430 + -9405 23865 +-23848 14348 +-18901 -6644 + 1545 -22308 + 20762 -19937 + 22804 -1339 + 5825 18365 +-16080 22954 +-24457 8529 +-12080 -13009 + 10643 -23650 + 24269 -14353 + 16946 7119 + -5127 22521 +-22762 18602 +-20413 -1370 + 0 -20152 + 20454 -21342 + 22646 -3797 + 4461 17096 +-17799 22804 +-23902 8148 + -8137 -13817 + 15149 -23296 + 24457 -11618 + 11016 10660 +-12763 23135 +-24574 14243 +-13148 -7861 + 10813 -22608 + 24471 -16124 + 14609 5565 + -9405 21949 +-24315 17379 +-15474 -3849 + 8598 -21336 + 24213 -18120 + 15793 2745 + -8421 20885 +-24220 18429 +-15590 -2263 + 8880 -20663 + 24331 -18359 + 14851 2398 + -9959 20685 +-24488 17917 +-13528 -3137 + 11618 -20918 + 24575 -17075 + 11551 4460 +-13778 21286 +-24421 15770 + -8845 -6328 + 16307 -21661 + 23802 -13916 + 5348 8671 +-18997 21868 +-22452 11421 + -1055 -11371 + 21548 -21685 + 20092 -8212 + -3941 14242 +-23560 20853 +-16476 4267 + 9405 -17009 + 24547 -19106 + 11451 343 +-14911 19309 +-23978 16208 + -5053 -5409 + 19828 -20699 + 21364 -12016 + -2409 10553 +-23347 20700 +-16392 6559 + 10268 -15211 + 24568 -18879 + 9090 -118 +-17484 18664 +-22690 14969 + 0 -6714 + 22719 -20134 + 17270 -9014 + -9717 13022 +-24568 18943 + -8527 1506 + 18310 -17662 + 21934 -14749 + -2409 6538 +-23695 19463 +-14518 7789 + 13433 -13633 + 23978 -17566 + 3270 935 +-21744 18075 +-18184 11812 + 9405 -9654 + 24544 -18394 + 6989 -3082 +-19939 16117 +-20092 13945 + 6881 -6613 + 24553 -18196 + 8809 -5437 +-18997 14458 +-20742 14731 + 6117 -4859 + 24531 -17657 + 8845 -6307 +-19210 13526 +-20349 14588 + 7170 -4444 + 24575 -17109 + 7098 -5874 +-20517 13412 +-18780 13691 + 9959 -5272 + 24347 -16558 + 3457 -4241 +-22482 13949 +-15590 11979 + 14181 -7142 + 23087 -15723 + -2146 -1459 +-24213 14726 +-10200 9228 + 19092 -9692 + 19717 -14108 + -9405 2343 +-24304 15085 + -2259 5221 + 23251 -12286 + 13148 -11127 +-17190 6746 +-21001 14161 + 7745 2 + 24457 -13938 + 2971 -6372 +-23189 10842 +-12860 11094 + 17799 -5795 + 20243 -13421 + -9544 -30 +-24096 13184 + 0 5498 + 24110 -10713 + 9265 -9718 +-20620 6673 +-16946 12155 + 14427 -1883 + 22151 -12653 + -6591 -2861 +-24457 11378 + -1770 6908 + 23875 -8726 + 9648 -9817 +-20762 5209 +-16251 11385 + 15707 -1358 + 21059 -11610 + -9405 -2353 +-23830 10656 + 2559 5560 + 24560 -8789 + 4201 -8028 +-23439 6314 +-10371 9643 + 20783 -3542 + 15590 -10404 +-16973 742 +-19649 10386 + 12407 1870 + 22467 -9720 + -7458 -4144 +-24073 8562 + 2446 5991 + 24575 -7072 + 2371 -7375 +-24132 5398 + -6808 8306 + 22929 -3667 + 10745 -8824 +-21156 1980 +-14120 8987 + 18997 -408 + 16918 -8863 +-16615 -1001 +-19163 8521 + 14151 2220 + 20902 -8027 +-11717 -3241 +-22200 7441 + 9405 4071 + 23126 -6811 + -7278 -4722 +-23754 6177 + 5384 5213 + 24153 -5571 + -3755 -5566 +-24386 5013 + 2409 5801 + 24506 -4521 + -1356 -5939 +-24557 4104 + 603 5999 + 24573 -3765 + -151 -5994 +-24575 3508 + 0 5937 + 24575 -3331 + -151 -5835 +-24573 3232 + 603 5694 + 24557 -3205 + -1356 -5517 +-24506 3244 + 2409 5303 + 24386 -3343 + -3755 -5049 +-24153 3494 + 5384 4752 + 23754 -3685 + -7278 -4407 +-23126 3906 + 9405 4007 + 22200 -4143 +-11717 -3547 +-20902 4380 + 14151 3025 + 19163 -4598 +-16615 -2434 +-16918 4778 + 18997 1780 + 14120 -4898 +-21156 -1066 +-10745 4934 + 22929 304 + 6808 -4862 +-24132 489 + -2371 4664 + 24575 -1288 + -2446 -4320 +-24073 2060 + 7458 3820 + 22467 -2767 +-12407 -3162 +-19649 3365 + 16973 2357 + 15590 -3808 +-20783 -1429 +-10371 4050 + 23439 419 + 4201 -4055 +-24560 616 + 2559 3795 + 23830 -1607 + -9405 -3266 +-21059 2473 + 15707 2486 + 16251 -3130 +-20762 -1499 + -9648 3505 + 23875 386 + 1770 -3539 +-24457 754 + 6591 3205 + 22151 -1798 +-14427 -2518 +-16946 2618 + 20620 1540 + 9265 -3101 +-24110 -381 + 0 3162 + 24096 -809 + -9544 -2775 +-20243 1859 + 17799 1978 + 12860 -2598 +-23189 -879 + -2971 2893 + 24457 -344 + -7745 -2674 +-21001 1478 + 17190 1966 + 13148 -2304 +-23251 -890 + -2259 2647 + 24304 -341 + -9405 -2421 +-19717 1467 + 19092 1662 + 10200 -2229 +-24213 -535 + 2146 2434 + 23087 -692 +-14181 -2022 +-15590 1706 + 22482 1090 + 3457 -2230 +-24347 115 + 9959 2111 + 18780 -1251 +-20517 -1374 + -7098 1975 + 24575 238 + -7170 -2057 +-20349 938 + 19210 1464 + 8845 -1763 +-24531 -392 + 6117 1949 + 20742 -783 +-18997 -1425 + -8809 1630 + 24553 385 + -6881 -1825 +-20092 770 + 19939 1291 + 6989 -1572 +-24544 -244 + 9405 1688 + 18184 -869 +-21744 -1069 + -3270 1559 + 23978 -7 +-13433 -1511 +-14518 1041 + 23695 753 + -2409 -1536 +-21934 344 + 18310 1251 + 8527 -1225 +-24568 -337 + 9717 1436 + 17270 -719 +-22719 -869 + 0 1342 + 22690 -157 +-17484 -1188 + -9090 1051 + 24568 353 +-10268 -1293 +-16392 658 + 23347 745 + -2409 -1224 +-21364 244 + 19828 996 + 5053 -1036 +-23978 -132 + 14911 1114 + 11451 -783 +-24547 -437 + 9405 1124 + 16476 -513 +-23560 -661 + 3941 1058 + 20092 -257 +-21548 -807 + -1055 945 + 22452 -37 +-18997 -887 + -5348 815 + 23802 142 +-16307 -917 + -8845 682 + 24421 276 +-13778 -913 +-11551 563 + 24575 371 +-11618 -888 +-13528 463 + 24488 431 + -9959 -854 +-14851 386 + 24331 462 + -8880 -815 +-15590 333 + 24220 471 + -8421 -779 +-15793 302 + 24213 460 + -8598 -746 +-15474 292 + 24315 433 + -9405 -717 +-14609 300 + 24471 391 +-10813 -689 +-13148 324 + 24574 336 +-12763 -660 +-11016 359 + 24457 267 +-15149 -627 + -8137 400 + 23902 184 +-17799 -584 + -4461 444 + 22646 90 +-20454 -527 + 0 483 + 20413 -15 +-22762 -452 + 5127 511 + 16946 -124 +-24269 -357 + 10643 517 + 12080 -232 +-24457 -241 + 16080 495 + 5825 -328 +-22804 -107 + 20762 440 + -1545 -400 +-18901 35 + 23848 347 + -9405 -437 +-12602 173 + 24442 220 +-16754 -426 + -4201 290 + 21797 69 +-22280 -362 + 5421 366 + 15590 -89 +-24569 -247 + 14760 384 + 6227 -228 +-22467 -95 + 21831 335 + -4942 -321 +-15561 71 + 24575 222 +-15619 -344 + -4646 214 + 21511 65 +-22929 -288 + 8030 300 + 12505 -102 +-24207 -162 + 18997 304 + -641 -232 +-18107 4 + 24430 218 +-14151 -286 + -5642 160 + 21602 68 +-23151 -244 + 9405 253 + 10473 -97 +-23473 -115 + 21251 247 + -5384 -216 +-13872 49 + 24287 142 +-19397 -238 + 2409 183 + 16023 -17 +-24538 -153 + 18031 224 + -603 -158 +-17136 0 + 24575 153 +-17404 -209 + 0 142 + 17351 6 +-24575 -144 + 17616 196 + -603 -133 +-16698 -1 + 24538 131 +-18633 -182 + 2409 132 + 15090 -11 +-24287 -111 + 20286 169 + -5384 -134 +-12342 30 + 23473 86 +-22232 -152 + 9405 138 + 8244 -53 +-21602 -56 + 23919 132 +-14151 -138 + -2671 76 + 18107 21 +-24567 -104 + 18997 134 + -4239 -98 +-12505 16 + 23226 69 +-22929 -119 + 11883 111 + 4646 -53 +-18973 -28 + 24575 92 +-19021 -113 + 4942 82 + 11284 -16 +-22467 -54 + 23773 97 +-14760 -97 + -565 56 + 15590 8 +-23970 -65 + 22280 93 +-11351 -82 + -4201 37 + 17979 21 +-24442 -67 + 21098 85 + -9405 -69 + -5935 26 + 18901 24 +-24527 -64 + 20762 77 + -9160 -61 + -5825 23 + 18584 21 +-24457 -56 + 21401 69 +-10643 -57 + -3867 25 + 16946 15 +-24035 -47 + 22762 62 +-13685 -55 + 0 29 + 13622 4 +-22646 -35 + 24167 53 +-17799 -52 + 5715 36 + 8137 -8 +-19351 -21 + 24457 41 +-21968 -49 + 12763 41 + 264 -21 +-13148 -4 + 22069 27 +-24471 -40 + 19761 42 + -9405 -31 + -3569 12 + 15474 9 +-23022 -27 + 24213 36 +-18828 -35 + 8421 25 + 4164 -9 +-15590 -9 + 22915 23 +-24331 -31 + 19581 31 + -9959 -23 + -2071 10 + 13528 5 +-21656 -18 + 24575 26 +-21691 -27 + 13778 22 + -2746 -12 + -8845 0 + 18385 11 +-23802 -20 + 23986 23 +-18997 -22 + 9993 15 + 1055 -7 +-11817 -3 + 20092 11 +-24257 -17 + 23560 19 +-18234 -17 + 9405 12 + 1168 -5 +-11451 -3 + 19535 10 +-23978 -14 + 24050 16 +-19828 -14 + 12146 11 + -2409 -5 + -7673 0 + 16392 6 +-22327 -10 + 24568 12 +-22832 -12 + 17484 11 + -9439 -7 + 0 3 + 9370 2 +-17270 -5 + 22573 8 +-24568 -9 + 23048 10 +-18310 -8 + 11083 5 + -2409 -3 + -6518 -1 + 14518 3 +-20579 -5 + 23978 7 +-24357 -7 + 21744 7 +-16532 -6 + 9405 3 + -1244 -1 + -6989 -1 + 14366 3 +-20092 -4 + 23592 5 +-24553 -5 + 22942 5 +-18997 -4 + 13180 3 + -6117 -2 + -1469 1 + 8845 2 +-15327 -2 + 20349 3 +-23506 -4 + 24575 3 +-23528 -4 + 20517 3 +-15851 -2 + 9959 1 + -3345 0 + -3457 0 + 9924 1 +-15590 -2 + 20071 2 +-23087 -2 + 24481 3 +-24213 -2 + 22359 2 +-19092 -1 + 14670 1 + -9405 0 + 3643 0 + 2259 0 + -7959 -1 + 13148 1 +-17563 -1 + 21001 1 +-23323 -1 + 24457 1 +-24395 -1 + 23189 1 +-20942 -1 + 17799 1 +-13934 0 + 9544 0 + -4831 0 + 0 0 + 4757 1 + -9265 0 + 13370 0 +-16946 -1 + 19895 0 +-22151 -1 + 23675 0 +-24457 -1 + 24511 0 +-23875 0 + 22602 0 +-20762 0 + 18435 0 +-15707 0 + 12667 0 + -9405 0 + 6008 0 + -2559 0 + -867 0 + 4201 0 + -7386 0 + 10371 0 +-13116 0 + 15590 0 +-17773 0 + 19649 0 +-21213 0 + 22467 0 +-23416 0 + 24073 0 +-24453 0 + 24575 0 +-24461 0 + 24132 0 +-23613 0 + 22929 0 +-22102 0 + 21156 0 +-20114 0 + 18997 0 +-17825 0 + 16615 0 +-15385 0 + 14151 0 +-12924 0 + 11717 0 +-10541 0 + 9405 0 + -8315 0 + 7278 0 + -6300 0 + 5384 0 + -4535 0 + 3755 0 + -3046 0 + 2409 0 + -1845 0 + 1356 0 + -942 0 + 603 0 + -339 0 + 151 0 + -38 0 diff --git a/tests/ref/fate/md5 b/tests/ref/fate/md5 new file mode 100644 index 0000000000..af08a8477f --- /dev/null +++ b/tests/ref/fate/md5 @@ -0,0 +1,5 @@ +0bf1bcc8a1d72e2cf58d42182b637e56 +993a3eb298e52aca83ecfbb6a766b4d0 +07c01ca7c733475fad38c84c56f305c1 +9fc8404827cac26385f48f4f58fd32ce +a22bfef14302c5ca46e0ae91092bc0e0 diff --git a/tests/ref/fate/rv30 b/tests/ref/fate/rv30 index 5b43588bb4..6c99871eb6 100644 --- a/tests/ref/fate/rv30 +++ b/tests/ref/fate/rv30 @@ -16,31 +16,31 @@ 0, 112500, 126720, 0xe572dfc9 0, 120000, 126720, 0xbc3cc34f 0, 127500, 126720, 0xcf8cb0e2 -0, 135000, 126720, 0x6d1c630d -0, 142500, 126720, 0x4338e469 -0, 150000, 126720, 0x9d82ea38 -0, 157500, 126720, 0x55e0b559 -0, 165000, 126720, 0x5eefb5ef -0, 172500, 126720, 0x4b10b746 -0, 180000, 126720, 0x8b07a1db -0, 187500, 126720, 0x8c639b34 -0, 195000, 126720, 0x63eb0b9f -0, 202500, 126720, 0x31c80c83 -0, 210000, 126720, 0x78495352 -0, 217500, 126720, 0x63d609c4 -0, 225000, 126720, 0xcd2a62d8 -0, 232500, 126720, 0x4aea732d -0, 240000, 126720, 0xe3bb352c -0, 247500, 126720, 0x4b9036ad -0, 255000, 126720, 0x88b66e2d -0, 262500, 126720, 0x4a8a1b16 -0, 270000, 126720, 0x2e014eac -0, 277500, 126720, 0x83212c67 -0, 285000, 126720, 0x4937e897 -0, 292500, 126720, 0x2d38babe -0, 300000, 126720, 0xbcb43c09 -0, 307500, 126720, 0x955ffaf4 -0, 315000, 126720, 0x3337d4a2 -0, 322500, 126720, 0xe8f58c33 -0, 330000, 126720, 0x3a7f771f -0, 337500, 126720, 0xb67c39b9 +0, 135000, 126720, 0x75ae61b6 +0, 142500, 126720, 0x554fe3e4 +0, 150000, 126720, 0x72ecea95 +0, 157500, 126720, 0x5d00b5fe +0, 165000, 126720, 0xe39bba0d +0, 172500, 126720, 0x9c21bad8 +0, 180000, 126720, 0x72f2a47d +0, 187500, 126720, 0x4f639ebe +0, 195000, 126720, 0x534a10cc +0, 202500, 126720, 0xfdca11d3 +0, 210000, 126720, 0x0c735615 +0, 217500, 126720, 0x0eaf0c1b +0, 225000, 126720, 0xce5e6794 +0, 232500, 126720, 0x14cf7974 +0, 240000, 126720, 0xbc513f2a +0, 247500, 126720, 0xbc303fae +0, 255000, 126720, 0xd9f67585 +0, 262500, 126720, 0x3378251f +0, 270000, 126720, 0xb3ed5911 +0, 277500, 126720, 0xc15a3577 +0, 285000, 126720, 0x0a24f256 +0, 292500, 126720, 0xfab9c45d +0, 300000, 126720, 0x45464610 +0, 307500, 126720, 0xfe2e057d +0, 315000, 126720, 0x23efdc35 +0, 322500, 126720, 0x4d888b2e +0, 330000, 126720, 0xdd0d74df +0, 337500, 126720, 0x08382b8e diff --git a/tests/ref/lavf/ffm b/tests/ref/lavf/ffm index b89af6e3e5..b20e132b45 100644 --- a/tests/ref/lavf/ffm +++ b/tests/ref/lavf/ffm @@ -1,3 +1,3 @@ -b6acf782a38d313153b68c4ca204fc90 *./tests/data/lavf/lavf.ffm +f9bee27ea1b6b83a06b5f9efb0a4ac1f *./tests/data/lavf/lavf.ffm 376832 ./tests/data/lavf/lavf.ffm ./tests/data/lavf/lavf.ffm CRC=0xf361ed74 diff --git a/tests/ref/lavf/gif b/tests/ref/lavf/gif index fa55d0e66e..4a4ebfb9dc 100644 --- a/tests/ref/lavf/gif +++ b/tests/ref/lavf/gif @@ -1,3 +1,3 @@ -98968ceb210ab260a6a7af36767b94d3 *./tests/data/lavf/lavf.gif -2906382 ./tests/data/lavf/lavf.gif +e6089fd4ef3b9df44090ab3650bdd810 *./tests/data/lavf/lavf.gif +2906401 ./tests/data/lavf/lavf.gif ./tests/data/lavf/lavf.gif CRC=0xe5605ff6 diff --git a/tests/ref/lavf/mxf b/tests/ref/lavf/mxf index 58e75d17cd..73eb307081 100644 --- a/tests/ref/lavf/mxf +++ b/tests/ref/lavf/mxf @@ -1,6 +1,3 @@ 785e38ddd2466046f30aa36399b8f8fa *./tests/data/lavf/lavf.mxf 525881 ./tests/data/lavf/lavf.mxf -./tests/data/lavf/lavf.mxf CRC=0x4ace0849 -b3174e2db508564c1cce0b5e3c1bc1bd *./tests/data/lavf/lavf.mxf_d10 -5330989 ./tests/data/lavf/lavf.mxf_d10 -./tests/data/lavf/lavf.mxf_d10 CRC=0xc3f4f92e +./tests/data/lavf/lavf.mxf CRC=0x4ace0849
\ No newline at end of file diff --git a/tests/ref/lavf/mxf_d10 b/tests/ref/lavf/mxf_d10 new file mode 100644 index 0000000000..2582022d17 --- /dev/null +++ b/tests/ref/lavf/mxf_d10 @@ -0,0 +1,3 @@ +b3174e2db508564c1cce0b5e3c1bc1bd *./tests/data/lavf/lavf.mxf_d10 +5330989 ./tests/data/lavf/lavf.mxf_d10 +./tests/data/lavf/lavf.mxf_d10 CRC=0xc3f4f92e diff --git a/tests/ref/lavf/pixfmt b/tests/ref/lavf/pixfmt index a6618390e4..bf62d9059b 100644 --- a/tests/ref/lavf/pixfmt +++ b/tests/ref/lavf/pixfmt @@ -28,9 +28,9 @@ efa7c0337cc00c796c6df615223716f1 *./tests/data/pixfmt/rgb565.yuv 304128 ./tests/data/pixfmt/rgb555.yuv 6be306b0cce5f8e6c271ea17fef9745b *./tests/data/pixfmt/gray.yuv 304128 ./tests/data/pixfmt/gray.yuv -31398104d2349dd48328a6862bc6711f *./tests/data/pixfmt/monow.yuv +6c719671e39f1bcf67b47eab98fa529b *./tests/data/pixfmt/monow.yuv 304128 ./tests/data/pixfmt/monow.yuv -31398104d2349dd48328a6862bc6711f *./tests/data/pixfmt/monob.yuv +6c719671e39f1bcf67b47eab98fa529b *./tests/data/pixfmt/monob.yuv 304128 ./tests/data/pixfmt/monob.yuv 00b85790df5740bab95e2559d81603a7 *./tests/data/pixfmt/yuv440p.yuv 304128 ./tests/data/pixfmt/yuv440p.yuv diff --git a/tests/ref/lavf/ts b/tests/ref/lavf/ts index efac3ad4ee..3b2dad1b5e 100644 --- a/tests/ref/lavf/ts +++ b/tests/ref/lavf/ts @@ -1,3 +1,3 @@ -178f5094fc874112d21b4a8716121d96 *./tests/data/lavf/lavf.ts +151774afed45b19da9b7e83613a1e72b *./tests/data/lavf/lavf.ts 406644 ./tests/data/lavf/lavf.ts ./tests/data/lavf/lavf.ts CRC=0x133216c1 diff --git a/tests/ref/lavfi/pixfmts_null_le b/tests/ref/lavfi/pixdesc index a96635d508..35e08a6ac7 100644 --- a/tests/ref/lavfi/pixfmts_null_le +++ b/tests/ref/lavfi/pixdesc @@ -1,10 +1,12 @@ abgr 037bf9df6a765520ad6d490066bf4b89 argb c442a8261c2265a07212ef0f72e35f5a bgr24 0d0cb38ab3fa0b2ec0865c14f78b217b -bgr48be 4ba0ff7fc9e011ea264610ad1585bb1f -bgr48le d022bfdd6a07d5dcc693799322a386b4 +bgr48be 74dedaaacae8fd1ef46e05f78cf29d62 +bgr48le 0eb7d30801eac6058814bddd330b3c76 bgr4_byte 50d23cc82d9dcef2fd12adb81fb9b806 +bgr555be 49f01b1f1f0c84fd9e776dd34cc3c280 bgr555le 378d6ac4223651a1adcbf94a3d0d807b +bgr565be 257cf78afa35dc31e9696f139c916715 bgr565le 1dfdd03995c287e3c754b164bf26a355 bgr8 24bd566170343d06fec6fccfff5abc54 bgra 76a18a5151242fa137133f604cd624d2 @@ -16,10 +18,12 @@ monow 9251497f3b0634f1165d12d5a289d943 nv12 e0af357888584d36eec5aa0f673793ef nv21 9a3297f3b34baa038b1f37cb202b512f rgb24 b41eba9651e1b5fe386289b506188105 -rgb48be 460b6de89b156290a12d3941db8bd731 -rgb48le cd93cb34d15996987367dabda3a10128 +rgb48be e3bc84c9af376fb6d0f0293cc7b713a6 +rgb48le f51c0e71638a822458329abb2f4052c7 rgb4_byte c93ba89b74c504e7f5ae9d9ab1546c73 +rgb555be 912a62c5e53bfcbac2a0340e10973cf2 rgb555le a937a0fc764fb57dc1b3af87cba0273c +rgb565be 9cadf742e05ddc23a3b5b270f89aad3c rgb565le d39aa298bb525e9be8860351c6f62dab rgb8 4a9d8e4f2f154e83a7e1735be6300700 rgba 93a5b3712e6eb8c5b9a09ffc7b9fbc12 @@ -34,13 +38,14 @@ yuv420p16le 2d59c4f1d0314a5a957a7cfc4b6fabcc yuv420p9be ce880fa07830e5297c22acf6e20555ce yuv420p9le 16543fda8f87d94a6cf857d2e8d4461a yuv422p c9bba4529821d796a6ab09f6a5fd355a +yuv422p10be bdc13b630fd668b34c6fe1aae28dfc71 yuv422p10le d0607c260a45c973e6639f4e449730ad -yuv422p16be 5499502e1c29534a158a1fe60e889f60 -yuv422p16le e3d61fde6978591596bc36b914386623 +yuv422p16be dc9886f2fccf87cc54b27e071a2c251e +yuv422p16le f181c8d8436f1233ba566d9bc88005ec yuv440p 5a064afe2b453bb52cdb3f176b1aa1cf yuv444p 0a98447b78fd476aa39686da6a74fa2e -yuv444p16be ea602a24b8e6969679265078bd8607b6 -yuv444p16le 1262a0dc57ee147967fc896d04206313 +yuv444p16be af555dbaa401b142a995566864f47545 +yuv444p16le a803e8016997dad95c5b2a72f54c34d6 yuva420p a29884f3f3dfe1e00b961bc17bef3d47 yuvj420p 32eec78ba51857b16ce9b813a49b7189 yuvj422p 0dfa0ed434f73be51428758c69e082cb diff --git a/tests/ref/lavfi/pixdesc_be b/tests/ref/lavfi/pixfmts_copy index afea41d1a1..35e08a6ac7 100644 --- a/tests/ref/lavfi/pixdesc_be +++ b/tests/ref/lavfi/pixfmts_copy @@ -1,11 +1,13 @@ abgr 037bf9df6a765520ad6d490066bf4b89 argb c442a8261c2265a07212ef0f72e35f5a bgr24 0d0cb38ab3fa0b2ec0865c14f78b217b -bgr48be 4ba0ff7fc9e011ea264610ad1585bb1f -bgr48le d022bfdd6a07d5dcc693799322a386b4 +bgr48be 74dedaaacae8fd1ef46e05f78cf29d62 +bgr48le 0eb7d30801eac6058814bddd330b3c76 bgr4_byte 50d23cc82d9dcef2fd12adb81fb9b806 bgr555be 49f01b1f1f0c84fd9e776dd34cc3c280 +bgr555le 378d6ac4223651a1adcbf94a3d0d807b bgr565be 257cf78afa35dc31e9696f139c916715 +bgr565le 1dfdd03995c287e3c754b164bf26a355 bgr8 24bd566170343d06fec6fccfff5abc54 bgra 76a18a5151242fa137133f604cd624d2 gray db08f7f0751900347e6b8649e4164d21 @@ -16,11 +18,13 @@ monow 9251497f3b0634f1165d12d5a289d943 nv12 e0af357888584d36eec5aa0f673793ef nv21 9a3297f3b34baa038b1f37cb202b512f rgb24 b41eba9651e1b5fe386289b506188105 -rgb48be 460b6de89b156290a12d3941db8bd731 -rgb48le cd93cb34d15996987367dabda3a10128 +rgb48be e3bc84c9af376fb6d0f0293cc7b713a6 +rgb48le f51c0e71638a822458329abb2f4052c7 rgb4_byte c93ba89b74c504e7f5ae9d9ab1546c73 rgb555be 912a62c5e53bfcbac2a0340e10973cf2 +rgb555le a937a0fc764fb57dc1b3af87cba0273c rgb565be 9cadf742e05ddc23a3b5b270f89aad3c +rgb565le d39aa298bb525e9be8860351c6f62dab rgb8 4a9d8e4f2f154e83a7e1735be6300700 rgba 93a5b3712e6eb8c5b9a09ffc7b9fbc12 uyvy422 adcf64516a19fce44df77082bdb16291 @@ -35,12 +39,13 @@ yuv420p9be ce880fa07830e5297c22acf6e20555ce yuv420p9le 16543fda8f87d94a6cf857d2e8d4461a yuv422p c9bba4529821d796a6ab09f6a5fd355a yuv422p10be bdc13b630fd668b34c6fe1aae28dfc71 -yuv422p16be 5499502e1c29534a158a1fe60e889f60 -yuv422p16le e3d61fde6978591596bc36b914386623 +yuv422p10le d0607c260a45c973e6639f4e449730ad +yuv422p16be dc9886f2fccf87cc54b27e071a2c251e +yuv422p16le f181c8d8436f1233ba566d9bc88005ec yuv440p 5a064afe2b453bb52cdb3f176b1aa1cf yuv444p 0a98447b78fd476aa39686da6a74fa2e -yuv444p16be ea602a24b8e6969679265078bd8607b6 -yuv444p16le 1262a0dc57ee147967fc896d04206313 +yuv444p16be af555dbaa401b142a995566864f47545 +yuv444p16le a803e8016997dad95c5b2a72f54c34d6 yuva420p a29884f3f3dfe1e00b961bc17bef3d47 yuvj420p 32eec78ba51857b16ce9b813a49b7189 yuvj422p 0dfa0ed434f73be51428758c69e082cb diff --git a/tests/ref/lavfi/pixfmts_copy_le b/tests/ref/lavfi/pixfmts_copy_le deleted file mode 100644 index a96635d508..0000000000 --- a/tests/ref/lavfi/pixfmts_copy_le +++ /dev/null @@ -1,49 +0,0 @@ -abgr 037bf9df6a765520ad6d490066bf4b89 -argb c442a8261c2265a07212ef0f72e35f5a -bgr24 0d0cb38ab3fa0b2ec0865c14f78b217b -bgr48be 4ba0ff7fc9e011ea264610ad1585bb1f -bgr48le d022bfdd6a07d5dcc693799322a386b4 -bgr4_byte 50d23cc82d9dcef2fd12adb81fb9b806 -bgr555le 378d6ac4223651a1adcbf94a3d0d807b -bgr565le 1dfdd03995c287e3c754b164bf26a355 -bgr8 24bd566170343d06fec6fccfff5abc54 -bgra 76a18a5151242fa137133f604cd624d2 -gray db08f7f0751900347e6b8649e4164d21 -gray16be 7becf34ae825a3df3969bf4c6bfeb5e2 -gray16le 10bd87059b5c189f3caef2837f4f2b5c -monob 668ebe8b8103b9046b251b2fa8a1d88f -monow 9251497f3b0634f1165d12d5a289d943 -nv12 e0af357888584d36eec5aa0f673793ef -nv21 9a3297f3b34baa038b1f37cb202b512f -rgb24 b41eba9651e1b5fe386289b506188105 -rgb48be 460b6de89b156290a12d3941db8bd731 -rgb48le cd93cb34d15996987367dabda3a10128 -rgb4_byte c93ba89b74c504e7f5ae9d9ab1546c73 -rgb555le a937a0fc764fb57dc1b3af87cba0273c -rgb565le d39aa298bb525e9be8860351c6f62dab -rgb8 4a9d8e4f2f154e83a7e1735be6300700 -rgba 93a5b3712e6eb8c5b9a09ffc7b9fbc12 -uyvy422 adcf64516a19fce44df77082bdb16291 -yuv410p 2d9225153c83ee1132397d619d94d1b3 -yuv411p 8b298af3e43348ca1b11eb8a3252ac6c -yuv420p eba2f135a08829387e2f698ff72a2939 -yuv420p10be 7605e266c088d0fcf68c7b27c3ceff5f -yuv420p10le 4228ee628c6deec123a13b9784516cc7 -yuv420p16be 16c009a235cd52b74791a895423152a3 -yuv420p16le 2d59c4f1d0314a5a957a7cfc4b6fabcc -yuv420p9be ce880fa07830e5297c22acf6e20555ce -yuv420p9le 16543fda8f87d94a6cf857d2e8d4461a -yuv422p c9bba4529821d796a6ab09f6a5fd355a -yuv422p10le d0607c260a45c973e6639f4e449730ad -yuv422p16be 5499502e1c29534a158a1fe60e889f60 -yuv422p16le e3d61fde6978591596bc36b914386623 -yuv440p 5a064afe2b453bb52cdb3f176b1aa1cf -yuv444p 0a98447b78fd476aa39686da6a74fa2e -yuv444p16be ea602a24b8e6969679265078bd8607b6 -yuv444p16le 1262a0dc57ee147967fc896d04206313 -yuva420p a29884f3f3dfe1e00b961bc17bef3d47 -yuvj420p 32eec78ba51857b16ce9b813a49b7189 -yuvj422p 0dfa0ed434f73be51428758c69e082cb -yuvj440p 657501a28004e27a592757a7509f5189 -yuvj444p 98d3d054f2ec09a75eeed5d328dc75b7 -yuyv422 f2569f2b5069a0ee0cecae33de0455e3 diff --git a/tests/ref/lavfi/pixfmts_crop_le b/tests/ref/lavfi/pixfmts_crop index 01da415947..e3bb88c101 100644 --- a/tests/ref/lavfi/pixfmts_crop_le +++ b/tests/ref/lavfi/pixfmts_crop @@ -1,10 +1,12 @@ abgr cd761690872843d1b7ab0c695393c751 argb 2ec6ef18769bcd651c2e8904d5a3ee67 bgr24 3450fd00cf1493d1ded75544d82ba3ec -bgr48be 90cb5d373a1123432d63c6a10c101afa -bgr48le 9371f54ceda9010f1199e86f4930ac3f +bgr48be a9a7d177cef0914d3f1d266f00dff676 +bgr48le b475d1b529ed80c728ddbacd22d35281 bgr4_byte 2f6ac3cdd4676ab4e2982bdf0664945b +bgr555be d3a7c273604723adeb7e5f5dd1c4272b bgr555le d22442fc13b464f9ba455b08df4e981f +bgr565be fadceef4a64ad6873fcb43ddee0deb3c bgr565le 891664e5a54ae5968901347da92bc5e9 bgr8 4b7159e05765bd4703180072d86423c8 bgra 395c9f706fccda721471acaa5c96c16c @@ -12,10 +14,12 @@ gray 8c4850e66562a587a292dc728a65ea4a gray16be daa5a6b98fb4a280c57c57bff1a2ab5a gray16le 84f5ea7259073edcb893113b42213c8e rgb24 3b90ed64b687d3dc186c6ef521dc71a8 -rgb48be a808128041a1962deaa8620c7448feba -rgb48le ce92d02cc322608d5be377cb1940677b +rgb48be b8f9fd6aaa24d75275ee2f8b8a7b9e55 +rgb48le 3e52e831a040f086c3ae983241172cce rgb4_byte 6958029f73c6cdfed4f71020d816f027 +rgb555be 41a7d1836837bc90f2cae19a9c9df3b3 rgb555le eeb78f8ce6186fba55c941469e60ba67 +rgb565be b2d1cb525f3a0cfe27753c0d479b2fa9 rgb565le 6a49700680be9a0d434411825a769556 rgb8 88b0398c265d1ed7a837dc084fa0917c rgba fd00b24c7597268c32759a84a1de2de4 @@ -25,12 +29,12 @@ yuv420p bfea0188ddd4889787c403caae119cc7 yuv420p16be 8365eff38b8c329aeb95fc605fa229bb yuv420p16le 5e8dd38d973d5854abe1ad4efad20cc1 yuv422p f2f930a91fe00d4252c4720b5ecd8961 -yuv422p16be 167e4338811a7d272925a4c6417d60da -yuv422p16le 3359395d5875d581fa1e975013d30114 +yuv422p16be 93f9b6f33f9529db6de6a9f0ddd70eb5 +yuv422p16le 2e66dcfec54ca6b57aa4bbd9ac234639 yuv440p 2472417d980e395ad6843cbb8b633b29 yuv444p 1f151980486848c96bc5585ced99003e -yuv444p16be d69280c2856865d2ea94bd5292aac1c6 -yuv444p16le 33f43e030bedf9723be4f63c3e9fc80e +yuv444p16be e7d1ecf0c11a41b5db192f761f55bd3c +yuv444p16le 3298a0043d982e7cf1a33a1292fa11f0 yuva420p 7536753dfbc7932560fb50c921369a0e yuvj420p 21f891093006d42d7683b0e1d773a657 yuvj422p 9a43d474c407590ad8f213880586b45e diff --git a/tests/ref/lavfi/pixfmts_hflip_le b/tests/ref/lavfi/pixfmts_hflip index 514eed7b3b..2084d581e1 100644 --- a/tests/ref/lavfi/pixfmts_hflip_le +++ b/tests/ref/lavfi/pixfmts_hflip @@ -1,10 +1,12 @@ abgr 49468c6c9ceee5d52b08b1270a909323 argb 50ba9f16c6475530602f2983278b82d0 bgr24 cc53d2011d097972db0d22756c3699e3 -bgr48be 11641cf0f4516a9aed98f7872720f801 -bgr48le b5440734eed128554dd9f83b34ba582f +bgr48be 90374bc92471f1bd4931d71ef8b73f50 +bgr48le 696f628d0dd32121e60a0d61ac47d6e6 bgr4_byte aac987e7d1a6a96477cfc0b48a4285de +bgr555be bc07265898440116772200390d70c092 bgr555le ccee08679bac84a1f960c6c9070c5538 +bgr565be e088789ce46224b87c6e46610ef19add bgr565le 3703466e19e1b52e03a34fd244a8e8e4 bgr8 50b505a889f0428242305acb642da107 bgra 01ca21e7e6a8d1281b4553bde8e8a404 @@ -12,10 +14,12 @@ gray 03efcb4ab52a24c0af0e03cfd26c9377 gray16be 9bcbca979601ddc4869f846f08f3d1dd gray16le c1b8965adcc7f847ee343149ff507073 rgb24 754f1722fc738590cc407ac65749bfe8 -rgb48be 10743e1577dc3198dbbc7c0b3b8f429e -rgb48le dd945a44f39119221407bf7a04f1bc49 +rgb48be 2397b9d3c296ac15f8a2325a703f81c7 +rgb48le 527043c72546d8b4bb1ce2dea4b294c3 rgb4_byte c8a3f995fcf3e0919239ea2c413ddc29 +rgb555be 045ce8607d3910586f4d97481dda8632 rgb555le 8778ee0cf58ce9ad1d99a1eca9f95e87 +rgb565be c8022a1b2470e72f124e4389fad4c372 rgb565le 2cb690eb3fcb72da3771ad6a48931158 rgb8 9e462b811b9b6173397b9cfc1f6b2f17 rgba d3d0dc1ecef3ed72f26a2986d0efc204 @@ -25,12 +29,12 @@ yuv420p 2d5c80f9ba2ddd85b2aeda3564cc7d64 yuv420p16be 758b0c1e2113b15e7afde48da4e4d024 yuv420p16le 480ccd951dcb806bc875d307e02e50a0 yuv422p 6e728f4eb9eae287c224f396d84be6ea -yuv422p16be a05d43cd62b790087bd37083174557de -yuv422p16le 6954abebcbc62d81068d58d0c62bdd5b +yuv422p16be 8657d2c8d443940300fdb4028d555631 +yuv422p16le 4ab27609981e50de5b1150125718ae76 yuv440p a99e2b57ed601f39852715c9d675d0d3 yuv444p 947e47f7bb5fdccc659d19b7df2b6fc3 -yuv444p16be e5ef45bc3d2f5b0b2542d5151340c382 -yuv444p16le 70793e3d66d0c23a0cdedabe9c24c2a7 +yuv444p16be a5154ce329db0d2caf0bd43f1347dba3 +yuv444p16le 1f703308b90feb048191b3bccc695671 yuva420p d83ec0c01498189f179ec574918185f1 yuvj420p df3aaaec3bb157c3bde5f0365af30f4f yuvj422p d113871528d510a192797af59df9c05c diff --git a/tests/ref/lavfi/pixdesc_le b/tests/ref/lavfi/pixfmts_null index a96635d508..35e08a6ac7 100644 --- a/tests/ref/lavfi/pixdesc_le +++ b/tests/ref/lavfi/pixfmts_null @@ -1,10 +1,12 @@ abgr 037bf9df6a765520ad6d490066bf4b89 argb c442a8261c2265a07212ef0f72e35f5a bgr24 0d0cb38ab3fa0b2ec0865c14f78b217b -bgr48be 4ba0ff7fc9e011ea264610ad1585bb1f -bgr48le d022bfdd6a07d5dcc693799322a386b4 +bgr48be 74dedaaacae8fd1ef46e05f78cf29d62 +bgr48le 0eb7d30801eac6058814bddd330b3c76 bgr4_byte 50d23cc82d9dcef2fd12adb81fb9b806 +bgr555be 49f01b1f1f0c84fd9e776dd34cc3c280 bgr555le 378d6ac4223651a1adcbf94a3d0d807b +bgr565be 257cf78afa35dc31e9696f139c916715 bgr565le 1dfdd03995c287e3c754b164bf26a355 bgr8 24bd566170343d06fec6fccfff5abc54 bgra 76a18a5151242fa137133f604cd624d2 @@ -16,10 +18,12 @@ monow 9251497f3b0634f1165d12d5a289d943 nv12 e0af357888584d36eec5aa0f673793ef nv21 9a3297f3b34baa038b1f37cb202b512f rgb24 b41eba9651e1b5fe386289b506188105 -rgb48be 460b6de89b156290a12d3941db8bd731 -rgb48le cd93cb34d15996987367dabda3a10128 +rgb48be e3bc84c9af376fb6d0f0293cc7b713a6 +rgb48le f51c0e71638a822458329abb2f4052c7 rgb4_byte c93ba89b74c504e7f5ae9d9ab1546c73 +rgb555be 912a62c5e53bfcbac2a0340e10973cf2 rgb555le a937a0fc764fb57dc1b3af87cba0273c +rgb565be 9cadf742e05ddc23a3b5b270f89aad3c rgb565le d39aa298bb525e9be8860351c6f62dab rgb8 4a9d8e4f2f154e83a7e1735be6300700 rgba 93a5b3712e6eb8c5b9a09ffc7b9fbc12 @@ -34,13 +38,14 @@ yuv420p16le 2d59c4f1d0314a5a957a7cfc4b6fabcc yuv420p9be ce880fa07830e5297c22acf6e20555ce yuv420p9le 16543fda8f87d94a6cf857d2e8d4461a yuv422p c9bba4529821d796a6ab09f6a5fd355a +yuv422p10be bdc13b630fd668b34c6fe1aae28dfc71 yuv422p10le d0607c260a45c973e6639f4e449730ad -yuv422p16be 5499502e1c29534a158a1fe60e889f60 -yuv422p16le e3d61fde6978591596bc36b914386623 +yuv422p16be dc9886f2fccf87cc54b27e071a2c251e +yuv422p16le f181c8d8436f1233ba566d9bc88005ec yuv440p 5a064afe2b453bb52cdb3f176b1aa1cf yuv444p 0a98447b78fd476aa39686da6a74fa2e -yuv444p16be ea602a24b8e6969679265078bd8607b6 -yuv444p16le 1262a0dc57ee147967fc896d04206313 +yuv444p16be af555dbaa401b142a995566864f47545 +yuv444p16le a803e8016997dad95c5b2a72f54c34d6 yuva420p a29884f3f3dfe1e00b961bc17bef3d47 yuvj420p 32eec78ba51857b16ce9b813a49b7189 yuvj422p 0dfa0ed434f73be51428758c69e082cb diff --git a/tests/ref/lavfi/pixfmts_pad_le b/tests/ref/lavfi/pixfmts_pad index 03db5a7efd..03db5a7efd 100644 --- a/tests/ref/lavfi/pixfmts_pad_le +++ b/tests/ref/lavfi/pixfmts_pad diff --git a/tests/ref/lavfi/pixfmts_scale_le b/tests/ref/lavfi/pixfmts_scale index 6e9ab9ae49..15f63b3c4f 100644 --- a/tests/ref/lavfi/pixfmts_scale_le +++ b/tests/ref/lavfi/pixfmts_scale @@ -1,25 +1,29 @@ abgr cff82561a074874027ac1cc896fd2730 argb 756dd1eaa5baca2238ce23dbdc452684 bgr24 e44192347a45586c6c157e3059610cd1 -bgr48be 6d01b6ccd2ccf18c12985bcb2fde2218 -bgr48le 4caa6914091ad03b8f67c02d6b050bc0 +bgr48be 07f7a0cc34feb3646434d47c0cec8cee +bgr48le 9abd2c3a66088e6c9078232064eba61e bgr4_byte ee1d35a7baf8e9016891929a2f565c0b +bgr555be 6a2d335856db12e3ea72173d71610e21 bgr555le 41e3e0961478dc634bf68a7bbd670cc9 +bgr565be 21077a3744c889b97032414b11232933 bgr565le 614897eaeb422bd9a972f8ee51909be5 bgr8 7f007fa6c153a16e808a9c51605a4016 bgra 01cfdda1f72fcabb6c46424e27f8c519 gray d7786a7d9d99ac74230cc045cab5632c -gray16be 5ba22d4802b40ec27e62abb22ad1d1cc -gray16le 2d5e83aa875a4c3baa6fecf55e3223bf +gray16be b554d6c1cc8da23967445be4dd3e4a86 +gray16le 715a33aa1c19cb26b14f5cc000e7a3d1 monob cb62f31b701c6e987b574974d1b31e32 monow fd5d417ab7728acddffc06870661df61 nv12 4676d59db43d657dc12841f6bc3ab452 nv21 69c699510ff1fb777b118ebee1002f14 rgb24 13ff53ebeab74dc05492836f1cfbd2c1 -rgb48be f82e99f13d5ede2a53cf3bf7178ca350 -rgb48le 3a09d89e4b27ea1a98f762e662e306a7 +rgb48be f18841c19fc6d9c817a3095f557b9bc5 +rgb48le 819e7b8acd8965ba57ba46198a5cc9bf rgb4_byte d81ffd3add95842a618eec81024f0b5c +rgb555be 491dc49ff83258ffe415289bdcfb50b2 rgb555le bd698d86c03170c4a16607c0fd1f750f +rgb565be 35682c17c85f307147041f23ac8092aa rgb565le bfa0c639d80c3c03fd0c9e5f34296a5e rgb8 091d0170b354ef0e97312b95feb5483f rgba 16873e3ac914e76116629a5ff8940ac4 @@ -27,20 +31,21 @@ uyvy422 314bd486277111a95d9369b944fa0400 yuv410p 7df8f6d69b56a8dcb6c7ee908e5018b5 yuv411p 1143e7c5cc28fe0922b051b17733bc4c yuv420p fdad2d8df8985e3d17e73c71f713cb14 -yuv420p10be c143e77e97d2f7d62c3b518857ba9f9b -yuv420p10le 72d90eccf5c34691ff057dafb7447aa2 -yuv420p16be 01da53e7f4f9882d5189ec1b1165ee05 -yuv420p16le 165f9aaf5332e5d088f44534d8ed2bc9 -yuv420p9be bb87fddca65d1742412c8d2b1caf96c6 -yuv420p9le 828eec50014a41258a5423c1fe56ac97 +yuv420p10be d7695b9117d5b52819c569459e42669b +yuv420p10le 0ac6d448db2df5f3d1346aa81f2b5f50 +yuv420p16be 9688e33e03b8c8275ab2fb1df0f06bee +yuv420p16le cba8b390ad5e7b8678e419b8ce79c008 +yuv420p9be 8fa6e007b1a40f34eaa3e2beb73ea8af +yuv420p9le a7b131a7dd06906a5aef2e36d117b972 yuv422p 918e37701ee7377d16a8a6c119c56a40 -yuv422p10le a10c4a5837547716f13cd61918b145f9 -yuv422p16be 961860aa4f229e09f1249910c687081c -yuv422p16le 7695ee42c0581279bbe68de81deb7aee +yuv422p10be 35206fcd7e00ee582a8c366b37d57d1d +yuv422p10le 396f930e2da02f149ab9dd5b781cbe8d +yuv422p16be 2cf502d7d386db1f1b3b946679d897b1 +yuv422p16le 3002a4e47520731dcee5929aff49eb74 yuv440p 461503fdb9b90451020aa3b25ddf041c yuv444p 81b2eba962d12e8d64f003ac56f6faf2 -yuv444p16be 5f924c2b385826106300cecc4ef4d2df -yuv444p16le 40a55a85858508138b7661c83d95223e +yuv444p16be b9f051ce7335923fe33efd162e48da1d +yuv444p16le fa47e317efac988b4a7fa55141c89126 yuva420p 8673a9131fb47de69788863f93a50eb7 yuvj420p 30427bd6caf5bda93a173dbebe759e09 yuvj422p fc8288f64fd149573f73cf8da05d8e6d diff --git a/tests/ref/lavfi/pixfmts_vflip_le b/tests/ref/lavfi/pixfmts_vflip index 3029d2d550..44f1c131db 100644 --- a/tests/ref/lavfi/pixfmts_vflip_le +++ b/tests/ref/lavfi/pixfmts_vflip @@ -1,10 +1,12 @@ abgr 25e72e9dbd01ab00727c976d577f7be5 argb 19869bf1a5ac0b6af4d8bbe2c104533c bgr24 89108a4ba00201f79b75b9305c42352d -bgr48be ed82382da09b64a8e04728fcf76e6814 -bgr48le 0f1f135608c2ff24d26d03e939fc2112 +bgr48be 908b4edb525fd154a95a3744c4ab5420 +bgr48le 796c2072d6fa13a091f5c5b175417ed5 bgr4_byte 407fcf564ed764c38e1d748f700ab921 +bgr555be f739d2519f7e9d494359bf67a3821537 bgr555le bd7b3ec4d684dfad075d89a606cb8b74 +bgr565be f19e9a4786395e1ddcd51399c98c9f6c bgr565le fdb617533e1e7ff512ea5b6b6233e738 bgr8 c60f93fd152c6903391d1fe9decd3547 bgra 7f9b799fb48544e49ce93e91d7f9fca8 @@ -16,10 +18,12 @@ monow ff9869d067ecb94eb9d90c9750c31fea nv12 046f00f598ce14d9854a3534a5c99114 nv21 01ea369dd2d0d3ed7451dc5c8d61497f rgb24 eaefabc168d0b14576bab45bc1e56e1e -rgb48be 4e0c384163ebab06a08e74637beb02bc -rgb48le a77bfeefcd96750cf0e1917a2e2bf1e7 +rgb48be 8e347deca2902e7dc1ece261322577d8 +rgb48le 2034e485f946e4064b5fb9be09865e55 rgb4_byte 8c6ff02df0b06dd2d574836c3741b2a2 +rgb555be 40dc33cfb5cf56aac1c5a290ac486c36 rgb555le 4f8eaad29a17e0f8e9d8ab743e76b999 +rgb565be b57623ad9df74648339311a0edcebc7b rgb565le 73f247a3315dceaea3022ac7c197c5ef rgb8 13a8d89ef78d8127297d899005456ff0 rgba 1fc6e920a42ec812aaa3b2aa02f37987 @@ -34,13 +38,14 @@ yuv420p16le 0f609e588e5a258644ef85170d70e030 yuv420p9be be40ec975fb2873891643cbbbddbc3b0 yuv420p9le 7e606310d3f5ff12badf911e8f333471 yuv422p d7f5cb44d9b0210d66d6a8762640ab34 +yuv422p10be 588fe319b96513c32e21d3e32b45447f yuv422p10le 11b57f2bd9661024153f3973b9090cdb -yuv422p16be 9bd8f8c961822b586fa4cf992be54acc -yuv422p16le 9c4a1239605c7952b736ac3130163f14 +yuv422p16be 51d9aa4e78d121c226d919ce97976fe4 +yuv422p16le 12965c54bda8932ca72da194419a9908 yuv440p 876385e96165acf51271b20e5d85a416 yuv444p 9c3c667d1613b72d15bc6d851c5eb8f7 -yuv444p16be 0f4afa4a4aacf4bb6b87641abde71ea9 -yuv444p16le 8f31557bc52adfe00ae8b40a9b8c23f8 +yuv444p16be 6502abd75030d462c58d99a8673ec517 +yuv444p16le cd7e88b6d08425450a57555bc86ab210 yuva420p c705d1cf061d8c6580ac690b55f92276 yuvj420p 41fd02b204da0ab62452cd14b595e2e4 yuvj422p 7f6ca9bc1812cde02036d7d29a7cce43 diff --git a/tests/ref/seek/lavf_gif b/tests/ref/seek/lavf_gif index 5da803da42..883f18761e 100644 --- a/tests/ref/seek/lavf_gif +++ b/tests/ref/seek/lavf_gif @@ -1,4 +1,4 @@ -ret: 0 st: 0 flags:1 dts: 0.000000 pts: 0.000000 pos: -1 size:2906382 +ret: 0 st: 0 flags:1 dts: 0.000000 pts: 0.000000 pos: -1 size:2906401 ret:-EINVAL st:-1 flags:0 ts:-1.000000 ret:-EINVAL st:-1 flags:1 ts: 1.894167 ret:-EINVAL st: 0 flags:0 ts: 0.800000 diff --git a/tests/ref/vsynth1/dnxhd_720p_10bit b/tests/ref/vsynth1/dnxhd_720p_10bit new file mode 100644 index 0000000000..b307050338 --- /dev/null +++ b/tests/ref/vsynth1/dnxhd_720p_10bit @@ -0,0 +1,4 @@ +cb29b6ae4e1562d95f9311991fef98df *./tests/data/vsynth1/dnxhd-720p-10bit.dnxhd +2293760 ./tests/data/vsynth1/dnxhd-720p-10bit.dnxhd +2f45bb1af7da5dd3dca870ac87237b7d *./tests/data/dnxhd_720p_10bit.vsynth1.out.yuv +stddev: 6.27 PSNR: 32.18 MAXDIFF: 64 bytes: 760320/ 7603200 diff --git a/tests/ref/vsynth2/dnxhd_720p_10bit b/tests/ref/vsynth2/dnxhd_720p_10bit new file mode 100644 index 0000000000..df30f67368 --- /dev/null +++ b/tests/ref/vsynth2/dnxhd_720p_10bit @@ -0,0 +1,4 @@ +8648511257afb816b5b911706ca391db *./tests/data/vsynth2/dnxhd-720p-10bit.dnxhd +2293760 ./tests/data/vsynth2/dnxhd-720p-10bit.dnxhd +391b6f5aa7c7b488b479cb43d420b860 *./tests/data/dnxhd_720p_10bit.vsynth2.out.yuv +stddev: 1.35 PSNR: 45.46 MAXDIFF: 23 bytes: 760320/ 7603200 diff --git a/tests/regression-funcs.sh b/tests/regression-funcs.sh index e57cdf111e..979157bcf9 100755 --- a/tests/regression-funcs.sh +++ b/tests/regression-funcs.sh @@ -15,10 +15,7 @@ datadir="./tests/data" target_datadir="${target_path}/${datadir}" this="$test.$test_ref" -logdir="$datadir/regression/$test_ref" -logfile="$logdir/$test" outfile="$datadir/$test_ref/" -errfile="$datadir/$this.err" # various files ffmpeg="$target_exec ${target_path}/ffmpeg" @@ -37,12 +34,8 @@ trap 'rm -f -- $cleanfiles' EXIT mkdir -p "$datadir" mkdir -p "$outfile" -mkdir -p "$logdir" - -(exec >&3) 2>/dev/null || exec 3>&2 [ "${V-0}" -gt 0 ] && echov=echov || echov=: -[ "${V-0}" -gt 1 ] || exec 2>$errfile echov(){ echo "$@" >&3 @@ -67,13 +60,13 @@ do_ffmpeg() shift set -- $* ${target_path}/$f run_ffmpeg $* - do_md5sum $f >> $logfile + do_md5sum $f if [ $f = $raw_dst ] ; then - $tiny_psnr $f $raw_ref >> $logfile + $tiny_psnr $f $raw_ref elif [ $f = $pcm_dst ] ; then - $tiny_psnr $f $pcm_ref 2 >> $logfile + $tiny_psnr $f $pcm_ref 2 else - wc -c $f >> $logfile + wc -c $f fi } @@ -84,11 +77,11 @@ do_ffmpeg_nomd5() set -- $* ${target_path}/$f run_ffmpeg $* if [ $f = $raw_dst ] ; then - $tiny_psnr $f $raw_ref >> $logfile + $tiny_psnr $f $raw_ref elif [ $f = $pcm_dst ] ; then - $tiny_psnr $f $pcm_ref 2 >> $logfile + $tiny_psnr $f $pcm_ref 2 else - wc -c $f >> $logfile + wc -c $f fi } @@ -97,7 +90,7 @@ do_ffmpeg_crc() f="$1" shift run_ffmpeg $* -f crc "$target_crcfile" - echo "$f $(cat $crcfile)" >> $logfile + echo "$f $(cat $crcfile)" } do_video_decoding() diff --git a/tools/lavfi-showfiltfmts.c b/tools/lavfi-showfiltfmts.c index cc19e00a8c..a4541bac82 100644 --- a/tools/lavfi-showfiltfmts.c +++ b/tools/lavfi-showfiltfmts.c @@ -20,15 +20,61 @@ #include "libavformat/avformat.h" #include "libavutil/pixdesc.h" +#include "libavutil/samplefmt.h" #include "libavfilter/avfilter.h" +static void print_formats(AVFilterContext *filter_ctx) +{ + int i, j; + +#define PRINT_FMTS(inout, outin, INOUT) \ + for (i = 0; i < filter_ctx->input_count; i++) { \ + if (filter_ctx->inout##puts[i]->type == AVMEDIA_TYPE_VIDEO) { \ + AVFilterFormats *fmts = \ + filter_ctx->inout##puts[i]->outin##_formats; \ + for (j = 0; j < fmts->format_count; j++) \ + printf(#INOUT "PUT[%d] %s: fmt:%s\n", \ + i, filter_ctx->filter->inout##puts[i].name, \ + av_get_pix_fmt_name(fmts->formats[j])); \ + } else if (filter_ctx->inout##puts[i]->type == AVMEDIA_TYPE_AUDIO) { \ + AVFilterFormats *fmts; \ + \ + fmts = filter_ctx->inout##puts[i]->outin##_formats; \ + for (j = 0; j < fmts->format_count; j++) \ + printf(#INOUT "PUT[%d] %s: fmt:%s\n", \ + i, filter_ctx->filter->inout##puts[i].name, \ + av_get_sample_fmt_name(fmts->formats[j])); \ + \ + fmts = filter_ctx->inout##puts[i]->outin##_chlayouts; \ + for (j = 0; j < fmts->format_count; j++) { \ + char buf[256]; \ + av_get_channel_layout_string(buf, sizeof(buf), -1, \ + fmts->formats[j]); \ + printf(#INOUT "PUT[%d] %s: chlayout:%s\n", \ + i, filter_ctx->filter->inout##puts[i].name, buf); \ + } \ + \ + fmts = filter_ctx->inout##puts[i]->outin##_packing; \ + for (j = 0; j < fmts->format_count; j++) { \ + printf(#INOUT "PUT[%d] %s: packing:%s\n", \ + i, filter_ctx->filter->inout##puts[i].name, \ + fmts->formats[j] == AVFILTER_PACKED ? \ + "packed" : "planar"); \ + } \ + } \ + } \ + + PRINT_FMTS(in, out, IN); + PRINT_FMTS(out, in, OUT); +} + int main(int argc, char **argv) { AVFilter *filter; AVFilterContext *filter_ctx; const char *filter_name; const char *filter_args = NULL; - int i, j; + int i; av_log_set_level(AV_LOG_DEBUG); @@ -75,23 +121,7 @@ int main(int argc, char **argv) else avfilter_default_query_formats(filter_ctx); - /* print the supported formats in input */ - for (i = 0; i < filter_ctx->input_count; i++) { - AVFilterFormats *fmts = filter_ctx->inputs[i]->out_formats; - for (j = 0; j < fmts->format_count; j++) - printf("INPUT[%d] %s: %s\n", - i, filter_ctx->filter->inputs[i].name, - av_get_pix_fmt_name(fmts->formats[j])); - } - - /* print the supported formats in output */ - for (i = 0; i < filter_ctx->output_count; i++) { - AVFilterFormats *fmts = filter_ctx->outputs[i]->in_formats; - for (j = 0; j < fmts->format_count; j++) - printf("OUTPUT[%d] %s: %s\n", - i, filter_ctx->filter->outputs[i].name, - av_get_pix_fmt_name(fmts->formats[j])); - } + print_formats(filter_ctx); avfilter_free(filter_ctx); fflush(stdout); diff --git a/version.sh b/version.sh index 41ae520ecc..8d084c2df3 100755 --- a/version.sh +++ b/version.sh @@ -5,9 +5,29 @@ if ! test "$revision"; then revision=$(cd "$1" && git describe --tags --match N 2> /dev/null) fi +# Shallow Git clones (--depth) do not have the N tag: +# use 'git-YYYY-MM-DD-hhhhhhh'. +test "$revision" || revision=$(cd "$1" && + git log -1 --pretty=format:"git-%cd-%h" --date=short 2> /dev/null) + +# Snapshots from gitweb are in a directory called ffmpeg-hhhhhhh or +# ffmpeg-HEAD-hhhhhhh. +if [ -z "$revision" ]; then + srcdir=$(cd "$1" && pwd) + case "$srcdir" in + */ffmpeg-[0-9a-f][0-9a-f][0-9a-f][0-9a-f][0-9a-f][0-9a-f][0-9a-f]) + git_hash="${srcdir##*-}";; + */ffmpeg-HEAD-[0-9a-f][0-9a-f][0-9a-f][0-9a-f][0-9a-f][0-9a-f][0-9a-f]) + git_hash="${srcdir##*-}";; + esac +fi + # no revision number found test "$revision" || revision=$(cd "$1" && cat RELEASE 2> /dev/null) +# Append the Git hash if we have one +test "$revision" && test "$git_hash" && revision="$revision-$git_hash" + # releases extract the version number from the VERSION file version=$(cd "$1" && cat VERSION 2> /dev/null) test "$version" || version=$revision |