aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMichael Niedermayer <michaelni@gmx.at>2011-05-15 19:18:02 +0200
committerMichael Niedermayer <michaelni@gmx.at>2011-05-15 19:18:02 +0200
commitd46aada5c2c71b9b0a259e62699cab25837053b2 (patch)
treeab474244a6fda04d8a10d25201620cdaee11c3c6
parent66b1f210c024a08ba00e4a730e64940d248b8717 (diff)
parent5a153604c930792aa7f00c55cbf3c470f582dfb7 (diff)
downloadffmpeg-d46aada5c2c71b9b0a259e62699cab25837053b2.tar.gz
Merge branch 'master' into oldabi
* master: (403 commits) Initial caf muxer. Support decoding of amr_nb and gsm in caf. Fix decoding of msrle samples with 1bpp. udp: remove resource.h inclusion, it breaks mingw compilation. ffmpeg: Allow seting and cycling through debug modes. Fix FSF address copy paste error in some license headers. Add an aac sample which uses LTP to fate-aac. ffmpeg: Help for interactive keys. UDP: dont use thread_t as truth value. swscale: fix compile on mingw32 [PATCH] Update pixdesc_be fate refs after adding 9/10bit YUV420P formats. arm: properly mark external symbol call ffmpeg: Interactivity support. Try pressing +-hs. swscale: 10l forgot git add this change from ronald. AVFrame: only set parameters from AVCodecContext in decode_video*() when no frame reordering is used. avcodec_default_get_buffer: init picture parameters. swscale: properly inline bits/endianness in yuv2yuvX16inC(). swscale: fix clipping of 9/10bit YUV420P. Add av_clip_uintp2() function Support more QT 1bpp rawvideo files. ... Conflicts: libavcodec/flacenc.c libavcodec/h261dec.c libavcodec/h263dec.c libavcodec/mpeg12.c libavcodec/msrle.c libavcodec/options.c libavcodec/qpeg.c libavcodec/rv34.c libavcodec/svq1dec.c libavcodec/svq3.c libavcodec/vc1dec.c libavcodec/version.h libavfilter/avfilter.h libavformat/file.c libavformat/options.c libavformat/rtpproto.c libavformat/udp.c libavutil/avutil.h Merged-by: Michael Niedermayer <michaelni@gmx.at>
-rw-r--r--Changelog3
-rw-r--r--Makefile6
-rw-r--r--cmdutils.c5
-rw-r--r--cmdutils.h3
-rwxr-xr-xconfigure53
-rw-r--r--doc/APIchanges36
-rw-r--r--doc/avutil.txt1
-rw-r--r--doc/developer.texi24
-rw-r--r--doc/encoders.texi12
-rw-r--r--doc/eval.texi4
-rw-r--r--doc/fate.txt1
-rw-r--r--doc/general.texi3
-rw-r--r--doc/issue_tracker.txt27
-rw-r--r--ffmpeg.c279
-rw-r--r--ffplay.c43
-rw-r--r--ffserver.c2
-rw-r--r--libavcodec/4xm.c10
-rw-r--r--libavcodec/8bps.c1
-rw-r--r--libavcodec/8svx.c13
-rw-r--r--libavcodec/Makefile33
-rw-r--r--libavcodec/a64multienc.c4
-rw-r--r--libavcodec/aaccoder.c3
-rw-r--r--libavcodec/aacdec.c4
-rw-r--r--libavcodec/aacenc.c49
-rw-r--r--libavcodec/aasc.c2
-rw-r--r--libavcodec/ac3.h1
-rw-r--r--libavcodec/ac3dec.c9
-rw-r--r--libavcodec/ac3dsp.c4
-rw-r--r--libavcodec/ac3enc.c378
-rw-r--r--libavcodec/adpcm.c104
-rw-r--r--libavcodec/alacenc.c4
-rw-r--r--libavcodec/allcodecs.c3
-rw-r--r--libavcodec/alpha/dsputil_alpha.c4
-rw-r--r--libavcodec/alpha/simple_idct_alpha.c6
-rw-r--r--libavcodec/anm.c1
-rw-r--r--libavcodec/ansi.c5
-rw-r--r--libavcodec/arm/ac3dsp_armv6.S2
-rw-r--r--libavcodec/arm/dsputil_init_arm.c4
-rw-r--r--libavcodec/arm/dsputil_init_armv6.c4
-rw-r--r--libavcodec/arm/dsputil_init_neon.c6
-rw-r--r--libavcodec/arm/dsputil_iwmmxt.c4
-rw-r--r--libavcodec/arm/h264pred_init_arm.c2
-rw-r--r--libavcodec/arm/vp8_armv6.S2
-rw-r--r--libavcodec/ass.c4
-rw-r--r--libavcodec/asv1.c4
-rw-r--r--libavcodec/aura.c1
-rw-r--r--libavcodec/avcodec.h62
-rw-r--r--libavcodec/avs.c6
-rw-r--r--libavcodec/bethsoftvideo.c1
-rw-r--r--libavcodec/bfi.c5
-rw-r--r--libavcodec/bfin/dsputil_bfin.c6
-rw-r--r--libavcodec/bfin/mathops.h10
-rw-r--r--libavcodec/bmp.c2
-rw-r--r--libavcodec/bmpenc.c2
-rw-r--r--libavcodec/c93.c8
-rw-r--r--libavcodec/cavs.h13
-rw-r--r--libavcodec/cavsdata.h8
-rw-r--r--libavcodec/cavsdec.c30
-rw-r--r--libavcodec/cinepak.c1
-rw-r--r--libavcodec/cljr.c5
-rw-r--r--libavcodec/cscd.c5
-rw-r--r--libavcodec/cyuv.c1
-rw-r--r--libavcodec/dca.c12
-rw-r--r--libavcodec/dct-test.c23
-rw-r--r--libavcodec/dfa.c1
-rw-r--r--libavcodec/dirac_parser.c2
-rw-r--r--libavcodec/dnxhddec.c3
-rw-r--r--libavcodec/dnxhdenc.c4
-rw-r--r--libavcodec/dpxenc.c20
-rw-r--r--libavcodec/dsicinav.c1
-rw-r--r--libavcodec/dsputil.c6
-rw-r--r--libavcodec/dsputil_template.c (renamed from libavcodec/dsputil_internal.h)5
-rw-r--r--libavcodec/dv.c5
-rw-r--r--libavcodec/dvbsubdec.c56
-rw-r--r--libavcodec/dvdsubdec.c14
-rw-r--r--libavcodec/dxa.c11
-rw-r--r--libavcodec/dxva2_h264.c4
-rw-r--r--libavcodec/dxva2_mpeg2.c8
-rw-r--r--libavcodec/dxva2_vc1.c12
-rw-r--r--libavcodec/eacmv.c8
-rw-r--r--libavcodec/eatgq.c2
-rw-r--r--libavcodec/eatgv.c6
-rw-r--r--libavcodec/error_resilience.c18
-rw-r--r--libavcodec/escape124.c4
-rw-r--r--libavcodec/ffv1.c85
-rw-r--r--libavcodec/flacenc.c102
-rw-r--r--libavcodec/flashsv.c5
-rw-r--r--libavcodec/flashsvenc.c4
-rw-r--r--libavcodec/flicvideo.c1
-rw-r--r--libavcodec/flvdec.c8
-rw-r--r--libavcodec/flvenc.c2
-rw-r--r--libavcodec/fraps.c21
-rw-r--r--libavcodec/frwu.c6
-rw-r--r--libavcodec/get_bits.h2
-rw-r--r--libavcodec/gif.c2
-rw-r--r--libavcodec/h261dec.c10
-rw-r--r--libavcodec/h263.c2
-rw-r--r--libavcodec/h263dec.c22
-rw-r--r--libavcodec/h264.c202
-rw-r--r--libavcodec/h264.h23
-rw-r--r--libavcodec/h264_cabac.c46
-rw-r--r--libavcodec/h264_cavlc.c27
-rw-r--r--libavcodec/h264_direct.c2
-rw-r--r--libavcodec/h264_loopfilter.c224
-rw-r--r--libavcodec/h264_parser.c2
-rw-r--r--libavcodec/h264_ps.c2
-rw-r--r--libavcodec/h264_refs.c14
-rw-r--r--libavcodec/h264data.h2
-rw-r--r--libavcodec/h264dsp.c104
-rw-r--r--libavcodec/h264dsp.h4
-rw-r--r--libavcodec/h264dsp_template.c (renamed from libavcodec/h264dsp_internal.h)77
-rw-r--r--libavcodec/h264idct.c6
-rw-r--r--libavcodec/h264idct_template.c (renamed from libavcodec/h264idct_internal.h)2
-rw-r--r--libavcodec/h264pred.c364
-rw-r--r--libavcodec/h264pred_template.c (renamed from libavcodec/h264pred_internal.h)699
-rw-r--r--libavcodec/high_bit_depth.h (renamed from libavcodec/h264_high_depth.h)4
-rw-r--r--libavcodec/huffyuv.c3
-rw-r--r--libavcodec/idcinvideo.c1
-rw-r--r--libavcodec/iff.c84
-rw-r--r--libavcodec/indeo2.c1
-rw-r--r--libavcodec/indeo3.c23
-rw-r--r--libavcodec/indeo5.c2
-rw-r--r--libavcodec/intelh263dec.c2
-rw-r--r--libavcodec/interplayvideo.c3
-rw-r--r--libavcodec/ituh263dec.c26
-rw-r--r--libavcodec/ituh263enc.c10
-rw-r--r--libavcodec/jfdctfst.c18
-rw-r--r--libavcodec/jfdctint.c18
-rw-r--r--libavcodec/jpeglsenc.c2
-rw-r--r--libavcodec/jvdec.c2
-rw-r--r--libavcodec/kgv1dec.c1
-rw-r--r--libavcodec/kmvc.c5
-rw-r--r--libavcodec/lcldec.c1
-rw-r--r--libavcodec/lclenc.c2
-rw-r--r--libavcodec/libopenjpeg.c1
-rw-r--r--libavcodec/libvorbis.c2
-rw-r--r--libavcodec/libvpxenc.c4
-rw-r--r--libavcodec/libx264.c33
-rw-r--r--libavcodec/libxavs.c6
-rw-r--r--libavcodec/libxvid_rc.c2
-rw-r--r--libavcodec/libxvidff.c17
-rw-r--r--libavcodec/ljpegenc.c2
-rw-r--r--libavcodec/loco.c2
-rw-r--r--libavcodec/lpc.c12
-rw-r--r--libavcodec/lpc.h17
-rw-r--r--libavcodec/mdec.c3
-rw-r--r--libavcodec/mimic.c2
-rw-r--r--libavcodec/mjpegdec.c7
-rw-r--r--libavcodec/mlib/dsputil_mlib.c4
-rw-r--r--libavcodec/mlp_parser.c18
-rw-r--r--libavcodec/mlpdec.c6
-rw-r--r--libavcodec/mmvideo.c1
-rw-r--r--libavcodec/motion_est.c6
-rw-r--r--libavcodec/motion_est_template.c2
-rw-r--r--libavcodec/motionpixels.c1
-rw-r--r--libavcodec/mpeg12.c48
-rw-r--r--libavcodec/mpeg12enc.c18
-rw-r--r--libavcodec/mpeg4video.c8
-rw-r--r--libavcodec/mpeg4videodec.c76
-rw-r--r--libavcodec/mpeg4videoenc.c48
-rw-r--r--libavcodec/mpegaudio.h8
-rw-r--r--libavcodec/mpegaudio_tablegen.h2
-rw-r--r--libavcodec/mpegaudiodec.c96
-rw-r--r--libavcodec/mpegaudioenc.c5
-rw-r--r--libavcodec/mpegvideo.c125
-rw-r--r--libavcodec/mpegvideo.h2
-rw-r--r--libavcodec/mpegvideo_common.h6
-rw-r--r--libavcodec/mpegvideo_enc.c74
-rw-r--r--libavcodec/mpegvideo_xvmc.c6
-rw-r--r--libavcodec/msmpeg4.c28
-rw-r--r--libavcodec/msrle.c4
-rw-r--r--libavcodec/msvideo1.c1
-rw-r--r--libavcodec/msvideo1enc.c298
-rw-r--r--libavcodec/mxpegdec.c4
-rw-r--r--libavcodec/nuv.c2
-rw-r--r--libavcodec/options.c787
-rw-r--r--libavcodec/pamenc.c2
-rw-r--r--libavcodec/parser.c4
-rw-r--r--libavcodec/pcx.c2
-rw-r--r--libavcodec/pcxenc.c2
-rw-r--r--libavcodec/pictordec.c12
-rw-r--r--libavcodec/pngdec.c2
-rw-r--r--libavcodec/pngenc.c2
-rw-r--r--libavcodec/pnm.c2
-rw-r--r--libavcodec/pnmdec.c19
-rw-r--r--libavcodec/pnmenc.c2
-rw-r--r--libavcodec/ppc/dsputil_altivec.c6
-rw-r--r--libavcodec/ppc/dsputil_ppc.c4
-rw-r--r--libavcodec/ppc/h264_altivec.c4
-rw-r--r--libavcodec/ppc/vc1dsp_altivec.c63
-rw-r--r--libavcodec/ps2/dsputil_mmi.c4
-rw-r--r--libavcodec/psymodel.c13
-rw-r--r--libavcodec/psymodel.h49
-rw-r--r--libavcodec/ptx.c2
-rw-r--r--libavcodec/qdrw.c5
-rw-r--r--libavcodec/qpeg.c2
-rw-r--r--libavcodec/qtrle.c1
-rw-r--r--libavcodec/qtrleenc.c50
-rw-r--r--libavcodec/r210dec.c2
-rw-r--r--libavcodec/ra144enc.c4
-rw-r--r--libavcodec/ratecontrol.c68
-rw-r--r--libavcodec/raw.c1
-rw-r--r--libavcodec/rawdec.c4
-rw-r--r--libavcodec/rawenc.c2
-rw-r--r--libavcodec/resample.c176
-rw-r--r--libavcodec/rl2.c1
-rw-r--r--libavcodec/roqvideodec.c2
-rw-r--r--libavcodec/rpza.c1
-rw-r--r--libavcodec/rv10.c26
-rw-r--r--libavcodec/rv10enc.c4
-rw-r--r--libavcodec/rv20enc.c2
-rw-r--r--libavcodec/rv30.c2
-rw-r--r--libavcodec/rv34.c20
-rw-r--r--libavcodec/rv40.c2
-rw-r--r--libavcodec/s302m.c151
-rw-r--r--libavcodec/sgidec.c2
-rw-r--r--libavcodec/sgienc.c2
-rw-r--r--libavcodec/sh4/dsputil_align.c8
-rw-r--r--libavcodec/sh4/dsputil_sh4.c4
-rw-r--r--libavcodec/smacker.c5
-rw-r--r--libavcodec/smc.c1
-rw-r--r--libavcodec/snow.c22
-rw-r--r--libavcodec/sonic.c2
-rw-r--r--libavcodec/sparc/dsputil_vis.c4
-rw-r--r--libavcodec/sunrast.c2
-rw-r--r--libavcodec/svq1dec.c16
-rw-r--r--libavcodec/svq1enc.c14
-rw-r--r--libavcodec/svq3.c175
-rw-r--r--libavcodec/targaenc.c2
-rw-r--r--libavcodec/tiertexseqv.c1
-rw-r--r--libavcodec/tiff.c141
-rw-r--r--libavcodec/tiff.h12
-rw-r--r--libavcodec/tiffenc.c12
-rw-r--r--libavcodec/tmv.c11
-rw-r--r--libavcodec/truemotion1.c1
-rw-r--r--libavcodec/truemotion2.c5
-rw-r--r--libavcodec/tscc.c1
-rw-r--r--libavcodec/tta.c20
-rw-r--r--libavcodec/txd.c2
-rw-r--r--libavcodec/ulti.c1
-rw-r--r--libavcodec/utils.c39
-rw-r--r--libavcodec/v210dec.c22
-rw-r--r--libavcodec/v210enc.c24
-rw-r--r--libavcodec/v210x.c2
-rw-r--r--libavcodec/vaapi_h264.c2
-rw-r--r--libavcodec/vaapi_mpeg2.c4
-rw-r--r--libavcodec/vaapi_mpeg4.c8
-rw-r--r--libavcodec/vaapi_vc1.c38
-rw-r--r--libavcodec/vb.c1
-rw-r--r--libavcodec/vc1.c96
-rw-r--r--libavcodec/vc1.h2
-rw-r--r--libavcodec/vc1_parser.c6
-rw-r--r--libavcodec/vc1dec.c406
-rw-r--r--libavcodec/vc1dsp.c108
-rw-r--r--libavcodec/vc1dsp.h12
-rw-r--r--libavcodec/vcr1.c5
-rw-r--r--libavcodec/vdpau.c12
-rw-r--r--libavcodec/version.h5
-rw-r--r--libavcodec/vmdav.c3
-rw-r--r--libavcodec/vmnc.c5
-rw-r--r--libavcodec/vorbis.c8
-rw-r--r--libavcodec/vorbis.h10
-rw-r--r--libavcodec/vorbisdec.c153
-rw-r--r--libavcodec/vorbisenc.c6
-rw-r--r--libavcodec/vp3.c4
-rw-r--r--libavcodec/vp3_parser.c4
-rw-r--r--libavcodec/vp56.c8
-rw-r--r--libavcodec/vp8.c193
-rw-r--r--libavcodec/vp8.h3
-rw-r--r--libavcodec/vp8_parser.c2
-rw-r--r--libavcodec/vqavideo.c1
-rw-r--r--libavcodec/wmavoice.c2
-rw-r--r--libavcodec/wmv2dec.c9
-rw-r--r--libavcodec/wmv2enc.c6
-rw-r--r--libavcodec/wnv1.c1
-rw-r--r--libavcodec/x86/Makefile1
-rw-r--r--libavcodec/x86/ac3dsp.asm2
-rw-r--r--libavcodec/x86/deinterlace.asm2
-rw-r--r--libavcodec/x86/dsputil_mmx.c37
-rw-r--r--libavcodec/x86/dsputil_yasm.asm2
-rw-r--r--libavcodec/x86/dsputilenc_yasm.asm2
-rw-r--r--libavcodec/x86/fft_mmx.asm2
-rw-r--r--libavcodec/x86/fmtconvert.asm2
-rw-r--r--libavcodec/x86/h264_chromamc.asm2
-rw-r--r--libavcodec/x86/h264_deblock.asm356
-rw-r--r--libavcodec/x86/h264_deblock_10bit.asm910
-rw-r--r--libavcodec/x86/h264_idct.asm2
-rw-r--r--libavcodec/x86/h264_intrapred.asm2
-rw-r--r--libavcodec/x86/h264_weight.asm2
-rw-r--r--libavcodec/x86/h264dsp_mmx.c139
-rw-r--r--libavcodec/x86/vc1dsp_yasm.asm2
-rw-r--r--libavcodec/x86/vp3dsp.asm2
-rw-r--r--libavcodec/x86/vp56dsp.asm2
-rw-r--r--libavcodec/x86/vp8dsp.asm2
-rw-r--r--libavcodec/x86/x86util.asm26
-rw-r--r--libavcodec/xan.c2
-rw-r--r--libavcodec/xl.c5
-rw-r--r--libavcodec/xsubdec.c11
-rw-r--r--libavcodec/xsubenc.c2
-rw-r--r--libavcodec/yop.c1
-rw-r--r--libavcodec/zmbv.c5
-rw-r--r--libavcodec/zmbvenc.c2
-rw-r--r--libavdevice/bktr.c4
-rw-r--r--libavdevice/dv1394.c2
-rw-r--r--libavdevice/x11grab.c2
-rw-r--r--libavfilter/Makefile4
-rw-r--r--libavfilter/avcodec.c42
-rw-r--r--libavfilter/avcodec.h40
-rw-r--r--libavfilter/avfilter.c69
-rw-r--r--libavfilter/avfilter.h6
-rw-r--r--libavfilter/defaults.c27
-rw-r--r--libavfilter/internal.h6
-rw-r--r--libavfilter/libmpcodecs/mp_image.h1
-rw-r--r--libavfilter/vf_aspect.c2
-rw-r--r--libavfilter/vf_drawtext.c174
-rw-r--r--libavfilter/vf_frei0r.c2
-rw-r--r--libavfilter/vf_scale.c12
-rw-r--r--libavfilter/vf_showinfo.c2
-rw-r--r--libavfilter/vf_transpose.c8
-rw-r--r--libavfilter/vsrc_buffer.c52
-rw-r--r--libavfilter/vsrc_buffer.h5
-rw-r--r--libavfilter/vsrc_color.c2
-rw-r--r--libavfilter/vsrc_movie.c28
-rw-r--r--libavformat/Makefile4
-rw-r--r--libavformat/aiffdec.c23
-rw-r--r--libavformat/allformats.c2
-rw-r--r--libavformat/ape.c2
-rw-r--r--libavformat/applehttp.c1
-rw-r--r--libavformat/applehttpproto.c1
-rw-r--r--libavformat/asfdec.c14
-rw-r--r--libavformat/avidec.c39
-rw-r--r--libavformat/avienc.c4
-rw-r--r--libavformat/avio.c11
-rw-r--r--libavformat/caf.c2
-rw-r--r--libavformat/cafenc.c182
-rw-r--r--libavformat/crypto.c5
-rw-r--r--libavformat/cutils.c21
-rw-r--r--libavformat/dv.c7
-rw-r--r--libavformat/dv.h2
-rw-r--r--libavformat/file.c28
-rw-r--r--libavformat/gxfenc.c4
-rw-r--r--libavformat/h264dec.c2
-rw-r--r--libavformat/http.c7
-rw-r--r--libavformat/img2.c1
-rw-r--r--libavformat/internal.h6
-rw-r--r--libavformat/isom.c32
-rw-r--r--libavformat/isom.h4
-rw-r--r--libavformat/matroskaenc.c6
-rw-r--r--libavformat/mov.c58
-rw-r--r--libavformat/movenc.c2
-rw-r--r--libavformat/mp3enc.c221
-rw-r--r--libavformat/mpegts.c9
-rw-r--r--libavformat/mpegtsenc.c20
-rw-r--r--libavformat/nsvdec.c20
-rw-r--r--libavformat/nutenc.c6
-rw-r--r--libavformat/oggdec.c106
-rw-r--r--libavformat/oggenc.c25
-rw-r--r--libavformat/options.c48
-rw-r--r--libavformat/os_support.c1
-rw-r--r--libavformat/riff.c4
-rw-r--r--libavformat/rtpdec.c3
-rw-r--r--libavformat/rtpdec_qdm2.c2
-rw-r--r--libavformat/rtpproto.c4
-rw-r--r--libavformat/rtsp.c4
-rw-r--r--libavformat/rtsp.h5
-rw-r--r--libavformat/rtspdec.c4
-rw-r--r--libavformat/spdifenc.c15
-rw-r--r--libavformat/udp.c119
-rw-r--r--libavformat/utils.c30
-rw-r--r--libavformat/wtv.c1058
-rw-r--r--libavformat/wtv.h41
-rw-r--r--libavformat/wtvdec.c1060
-rw-r--r--libavutil/Makefile2
-rw-r--r--libavutil/avutil.h4
-rw-r--r--libavutil/common.h17
-rw-r--r--libavutil/eval.c6
-rw-r--r--libavutil/fifo.c2
-rw-r--r--libavutil/file.c2
-rw-r--r--libavutil/internal.h2
-rw-r--r--libavutil/log.c2
-rw-r--r--libavutil/log.h2
-rw-r--r--libavutil/mathematics.c3
-rw-r--r--libavutil/mem.c38
-rw-r--r--libavutil/mem.h9
-rw-r--r--libavutil/opt.c21
-rw-r--r--libavutil/opt.h55
-rw-r--r--libavutil/pixdesc.c23
-rw-r--r--libavutil/pixfmt.h5
-rw-r--r--libavutil/ppc/cpu.c2
-rw-r--r--libpostproc/postprocess.c4
-rw-r--r--libswscale/options.c66
-rw-r--r--libswscale/ppc/swscale_altivec_template.c8
-rw-r--r--libswscale/ppc/yuv2rgb_altivec.c4
-rw-r--r--libswscale/rgb2rgb_template.c3
-rw-r--r--libswscale/swscale.c314
-rw-r--r--libswscale/swscale_internal.h9
-rw-r--r--libswscale/swscale_template.c54
-rw-r--r--libswscale/utils.c12
-rwxr-xr-xtests/codec-regression.sh144
-rw-r--r--tests/fate/aac.mak4
-rw-r--r--tests/fate/h264.mak12
-rwxr-xr-xtests/lavf-regression.sh35
-rwxr-xr-xtests/lavfi-regression.sh6
-rw-r--r--tests/ref/acodec/adpcm_ima_qt6
-rw-r--r--tests/ref/fate/h264-conformance-frext-pph10i1_panasonic_a10
-rw-r--r--tests/ref/fate/h264-conformance-frext-pph10i2_panasonic_a10
-rw-r--r--tests/ref/fate/h264-conformance-frext-pph10i3_panasonic_a10
-rw-r--r--tests/ref/fate/h264-conformance-frext-pph10i4_panasonic_a19
-rw-r--r--tests/ref/fate/h264-conformance-frext-pph10i5_panasonic_a10
-rw-r--r--tests/ref/fate/h264-conformance-frext-pph10i6_panasonic_a10
-rw-r--r--tests/ref/fate/h264-conformance-frext-pph10i7_panasonic_a10
-rw-r--r--tests/ref/fate/qt-ima4-mono2
-rw-r--r--tests/ref/fate/qt-ima4-stereo2
-rw-r--r--tests/ref/fate/v2102
-rw-r--r--tests/ref/fate/vc130
-rw-r--r--tests/ref/lavfi/pixdesc_be5
-rw-r--r--tests/ref/lavfi/pixdesc_le5
-rw-r--r--tests/ref/lavfi/pixfmts_copy_le5
-rw-r--r--tests/ref/lavfi/pixfmts_null_le5
-rw-r--r--tests/ref/lavfi/pixfmts_scale_le5
-rw-r--r--tests/ref/lavfi/pixfmts_vflip_le5
-rw-r--r--tests/ref/seek/dv411_dv54
-rw-r--r--tests/ref/seek/dv50_dv54
-rw-r--r--tests/ref/seek/dv_dv54
-rw-r--r--tests/ref/seek/lavf_dv54
-rw-r--r--tests/ref/vsynth1/error4
-rw-r--r--tests/ref/vsynth1/msvideo14
-rw-r--r--tests/ref/vsynth1/qtrlegray4
-rw-r--r--tests/ref/vsynth2/error4
-rw-r--r--tests/ref/vsynth2/msvideo14
-rw-r--r--tests/ref/vsynth2/qtrlegray4
-rwxr-xr-xtests/regression-funcs.sh13
-rw-r--r--tests/rotozoom.c232
433 files changed, 9113 insertions, 5716 deletions
diff --git a/Changelog b/Changelog
index 6425a49408..78320b0331 100644
--- a/Changelog
+++ b/Changelog
@@ -12,6 +12,9 @@ version <next>:
- Lots of deprecated API cruft removed
- fft and imdct optimizations for AVX (Sandy Bridge) processors
- showinfo filter added
+- DPX image encoder
+- SMPTE 302M AES3 audio decoder
+- Apple Core Audio Format muxer
version 0.7_beta1:
diff --git a/Makefile b/Makefile
index 6d4db62881..725758eca1 100644
--- a/Makefile
+++ b/Makefile
@@ -280,9 +280,13 @@ fate-seek: $(FATE_SEEK)
ifdef SAMPLES
FATE += $(FATE_TESTS)
+fate-rsync:
+ rsync -vaLW rsync://fate-suite.libav.org/fate-suite/ $(SAMPLES)
else
+fate-rsync:
+ @echo "use 'make fate-rsync SAMPLES=/path/to/samples' to sync the fate suite"
$(FATE_TESTS):
- @echo "SAMPLES not specified, cannot run FATE"
+ @echo "SAMPLES not specified, cannot run FATE. See doc/fate.txt for more information."
endif
FATE_UTILS = base64 tiny_psnr
diff --git a/cmdutils.c b/cmdutils.c
index 5d56977d54..b7eabaf782 100644
--- a/cmdutils.c
+++ b/cmdutils.c
@@ -84,6 +84,7 @@ void uninit_opts(void)
}
av_freep(&opt_names);
av_freep(&opt_values);
+ opt_name_count = 0;
}
void log_callback_help(void* ptr, int level, const char* fmt, va_list vl)
@@ -434,7 +435,7 @@ void set_context_opts(void *ctx, void *opts_ctx, int flags, AVCodec *codec)
const char *str;
if (priv_ctx) {
if (av_find_opt(priv_ctx, opt_names[i], NULL, flags, flags)) {
- if (av_set_string3(priv_ctx, opt_names[i], opt_values[i], 0, NULL) < 0) {
+ if (av_set_string3(priv_ctx, opt_names[i], opt_values[i], 1, NULL) < 0) {
fprintf(stderr, "Invalid value '%s' for option '%s'\n",
opt_names[i], opt_values[i]);
exit(1);
@@ -907,10 +908,12 @@ int get_filtered_video_frame(AVFilterContext *ctx, AVFrame *frame,
memcpy(frame->data, picref->data, sizeof(frame->data));
memcpy(frame->linesize, picref->linesize, sizeof(frame->linesize));
+ frame->pkt_pos = picref->pos;
frame->interlaced_frame = picref->video->interlaced;
frame->top_field_first = picref->video->top_field_first;
frame->key_frame = picref->video->key_frame;
frame->pict_type = picref->video->pict_type;
+ frame->sample_aspect_ratio = picref->video->sample_aspect_ratio;
return 1;
}
diff --git a/cmdutils.h b/cmdutils.h
index db84f55f30..d460200f4e 100644
--- a/cmdutils.h
+++ b/cmdutils.h
@@ -122,7 +122,8 @@ typedef struct {
#define OPT_FUNC2 0x0400
#define OPT_INT64 0x0800
#define OPT_EXIT 0x1000
-#define OPT_DUMMY 0x2000
+#define OPT_DATA 0x2000
+#define OPT_DUMMY 0x4000
union {
void (*func_arg)(const char *); //FIXME passing error code as int return would be nicer then exit() in the func
int *int_arg;
diff --git a/configure b/configure
index 0bb97016c9..a7aad19011 100755
--- a/configure
+++ b/configure
@@ -88,13 +88,12 @@ Configuration options:
--disable-avcodec disable libavcodec build
--disable-avformat disable libavformat build
--disable-swscale disable libswscale build
- --enable-postproc enable GPLed postprocessing support [no]
+ --disable-postproc disable libpostproc build
--disable-avfilter disable video filter support [no]
--disable-pthreads disable pthreads [auto]
--enable-w32threads use Win32 threads [no]
--enable-x11grab enable X11 grabbing [no]
--disable-network disable network support [no]
- --disable-mpegaudio-hp faster (but less accurate) MPEG audio decoding [no]
--enable-gray enable full grayscale support (slower color)
--disable-swscale-alpha disable alpha channel support in swscale
--disable-fastdiv disable table-based division
@@ -761,7 +760,7 @@ check_pkg_config(){
headers="$2"
funcs="$3"
shift 3
- $pkg_config --exists $pkg || return
+ $pkg_config --exists $pkg 2>/dev/null || return
pkg_cflags=$($pkg_config --cflags $pkg)
pkg_libs=$($pkg_config --libs $pkg)
check_func_headers "$headers" "$funcs" $pkg_cflags $pkg_libs "$@" &&
@@ -957,7 +956,6 @@ CONFIG_LIST="
mdct
memalign_hack
mlib
- mpegaudio_hp
network
nonfree
pic
@@ -1252,14 +1250,14 @@ aac_encoder_select="mdct sinewin"
aac_latm_decoder_select="aac_decoder aac_latm_parser"
ac3_decoder_select="mdct ac3dsp ac3_parser"
ac3_encoder_select="mdct ac3dsp"
-ac3_fixed_encoder_select="ac3dsp"
+ac3_fixed_encoder_select="mdct ac3dsp"
alac_encoder_select="lpc"
amrnb_decoder_select="lsp"
amrwb_decoder_select="lsp"
atrac1_decoder_select="mdct sinewin"
atrac3_decoder_select="mdct"
-binkaudio_dct_decoder_select="mdct rdft dct"
-binkaudio_rdft_decoder_select="mdct rdft"
+binkaudio_dct_decoder_select="mdct rdft dct sinewin"
+binkaudio_rdft_decoder_select="mdct rdft sinewin"
cavs_decoder_select="golomb"
cook_decoder_select="mdct sinewin"
cscd_decoder_suggest="zlib"
@@ -1291,7 +1289,7 @@ h264_dxva2_hwaccel_deps="dxva2api_h"
h264_dxva2_hwaccel_select="dxva2 h264_decoder"
h264_vaapi_hwaccel_select="vaapi"
h264_vdpau_decoder_select="vdpau h264_decoder"
-imc_decoder_select="fft mdct"
+imc_decoder_select="fft mdct sinewin"
jpegls_decoder_select="golomb"
jpegls_encoder_select="golomb"
ljpeg_encoder_select="aandct"
@@ -1459,7 +1457,7 @@ alsa_outdev_deps="alsa_asoundlib_h"
bktr_indev_deps_any="dev_bktr_ioctl_bt848_h machine_ioctl_bt848_h dev_video_bktr_ioctl_bt848_h dev_ic_bt8xx_h"
dv1394_indev_deps="dv1394 dv_demuxer"
fbdev_indev_deps="linux_fb_h"
-jack_indev_deps="jack_jack_h"
+jack_indev_deps="jack_jack_h sem_timedwait"
libdc1394_indev_deps="libdc1394"
oss_indev_deps_any="soundcard_h sys_soundcard_h"
oss_outdev_deps_any="soundcard_h sys_soundcard_h"
@@ -1481,7 +1479,7 @@ mmst_protocol_deps="network"
rtmp_protocol_select="tcp_protocol"
rtp_protocol_select="udp_protocol"
tcp_protocol_deps="network"
-udp_protocol_deps="network"
+udp_protocol_deps="network pthreads"
# filters
blackframe_filter_deps="gpl"
@@ -1490,7 +1488,8 @@ drawtext_filter_deps="libfreetype"
frei0r_filter_deps="frei0r dlopen strtok_r"
frei0r_src_filter_deps="frei0r dlopen strtok_r"
hqdn3d_filter_deps="gpl"
-mp_filter_deps="gpl"
+movie_filter_deps="avcodec avformat"
+mp_filter_deps="gpl avcodec"
ocv_filter_deps="libopencv"
scale_filter_deps="swscale"
yadif_filter_deps="gpl"
@@ -1498,6 +1497,7 @@ yadif_filter_deps="gpl"
# libraries
avdevice_deps="avcodec avformat"
avformat_deps="avcodec"
+postproc_deps="gpl"
# programs
ffmpeg_deps="avcodec avformat swscale"
@@ -1666,9 +1666,9 @@ enable ffmpeg
enable ffplay
enable ffprobe
enable ffserver
-enable mpegaudio_hp
enable network
enable optimizations
+enable postproc
enable protocols
enable static
enable stripping
@@ -1691,7 +1691,7 @@ LIB_INSTALL_EXTRA_CMD='$$(RANLIB) "$(LIBDIR)/$(LIBNAME)"'
CC_O='-o $@'
-host_cflags='-D_ISOC99_SOURCE -D_POSIX_C_SOURCE=200112 -O3 -g -Wall'
+host_cflags='-D_ISOC99_SOURCE -O3 -g -Wall'
host_libs='-lm'
target_path='$(CURDIR)'
@@ -1856,6 +1856,11 @@ set_default cc nm pkg_config strip sysinclude
enabled cross_compile || host_cc_default=$cc
set_default host_cc
+if ! $pkg_config --version >/dev/null 2>&1; then
+ warn "$pkg_config not found, library detection may fail."
+ pkg_config=false
+fi
+
exesuf() {
case $1 in
mingw32*|cygwin*|*-dos|freedos|opendos|os/2*|symbian) echo .exe ;;
@@ -2339,7 +2344,7 @@ if test "$?" != 0; then
die "C compiler test failed."
fi
-add_cppflags -D_ISOC99_SOURCE -D_POSIX_C_SOURCE=200112
+add_cppflags -D_ISOC99_SOURCE
check_cflags -std=c99
check_cc -D_FILE_OFFSET_BITS=64 <<EOF && add_cppflags -D_FILE_OFFSET_BITS=64
#include <stdlib.h>
@@ -2387,7 +2392,6 @@ case $target_os in
disable symver
oss_indev_extralibs="-lossaudio"
oss_outdev_extralibs="-lossaudio"
- add_cppflags -D_XOPEN_SOURCE=600
;;
openbsd)
enable malloc_aligned
@@ -2481,6 +2485,7 @@ case $target_os in
enable dos_paths
;;
linux)
+ add_cppflags -D_POSIX_C_SOURCE=200112 -D_XOPEN_SOURCE=600
enable dv1394
;;
irix*)
@@ -2556,7 +2561,6 @@ die_license_disabled() {
die_license_disabled gpl libx264
die_license_disabled gpl libxavs
die_license_disabled gpl libxvid
-die_license_disabled gpl postproc
die_license_disabled gpl x11grab
die_license_disabled nonfree libfaac
@@ -2899,7 +2903,7 @@ enabled libdirac && require_pkg_config dirac \
"libdirac_decoder/dirac_parser.h libdirac_encoder/dirac_encoder.h" \
"dirac_decoder_init dirac_encoder_init"
enabled libfaac && require2 libfaac "stdint.h faac.h" faacEncGetVersion -lfaac
-enabled libfreetype && add_cflags $(pkg-config --cflags freetype2) && require libfreetype ft2build.h FT_Init_FreeType -lfreetype
+enabled libfreetype && require_pkg_config freetype2 "ft2build.h freetype/freetype.h" FT_Init_FreeType
enabled libgsm && require libgsm gsm/gsm.h gsm_create -lgsm
enabled libmp3lame && require "libmp3lame >= 3.98.3" lame/lame.h lame_set_VBR_quality -lmp3lame
enabled libnut && require libnut libnut.h nut_demuxer_init -lnut
@@ -2935,10 +2939,20 @@ if enabled libdc1394; then
die "ERROR: No version of libdc1394 found "
fi
-if check_pkg_config sdl SDL_version.h SDL_Linked_Version; then
+SDL_CONFIG="${cross_prefix}sdl-config"
+if "${SDL_CONFIG}" --version > /dev/null 2>&1; then
+ sdl_cflags=$("${SDL_CONFIG}" --cflags)
+ sdl_libs=$("${SDL_CONFIG}" --libs)
+ check_func_headers SDL.h SDL_Init $sdl_cflags $sdl_libs &&
check_cpp_condition SDL.h "(SDL_MAJOR_VERSION<<16 | SDL_MINOR_VERSION<<8 | SDL_PATCHLEVEL) >= 0x010201" $sdl_cflags &&
enable sdl &&
check_struct SDL.h SDL_VideoInfo current_w $sdl_cflags && enable sdl_video_size
+else
+ if check_pkg_config sdl SDL_version.h SDL_Linked_Version; then
+ check_cpp_condition SDL.h "(SDL_MAJOR_VERSION<<16 | SDL_MINOR_VERSION<<8 | SDL_PATCHLEVEL) >= 0x010201" $sdl_cflags &&
+ enable sdl &&
+ check_struct SDL.h SDL_VideoInfo current_w $sdl_cflags && enable sdl_video_size
+ fi
fi
texi2html -version > /dev/null 2>&1 && enable texi2html || disable texi2html
@@ -2975,7 +2989,7 @@ check_header soundcard.h
enabled_any alsa_indev alsa_outdev && check_lib2 alsa/asoundlib.h snd_pcm_htimestamp -lasound
-enabled jack_indev && check_lib2 jack/jack.h jack_client_open -ljack
+enabled jack_indev && check_lib2 jack/jack.h jack_client_open -ljack && check_func sem_timedwait
enabled_any sndio_indev sndio_outdev && check_lib2 sndio.h sio_open -lsndio
@@ -3017,6 +3031,7 @@ check_cflags -Wwrite-strings
check_cflags -Wtype-limits
check_cflags -Wundef
check_cflags -Wmissing-prototypes
+check_cflags -Wno-pointer-to-int-cast
enabled extra_warnings && check_cflags -Winline
# add some linker flags
diff --git a/doc/APIchanges b/doc/APIchanges
index 0a0fe0892b..6635ec1f30 100644
--- a/doc/APIchanges
+++ b/doc/APIchanges
@@ -13,15 +13,45 @@ libavutil: 2011-04-18
API changes, most recent first:
-2011-04-XX - bebe72f - lavu 51.1.0 - avutil.h
+2011-05-10 - 188dea1 - lavc 53.3.0 - avcodec.h
+ Deprecate AVLPCType and the following fields in
+ AVCodecContext: lpc_coeff_precision, prediction_order_method,
+ min_partition_order, max_partition_order, lpc_type, lpc_passes.
+ Corresponding FLAC encoder options should be used instead.
+
+2011-05-07 - xxxxxxx - lavfi 2.5.0 - avcodec.h
+ Add libavfilter/avcodec.h header and avfilter_copy_frame_props()
+ function.
+
+2011-05-07 - xxxxxxx - lavc 53.5.0 - AVFrame
+ Add format field to AVFrame.
+
+2011-05-07 - xxxxxxx - lavc 53.4.0 - AVFrame
+ Add width and height fields to AVFrame.
+
+2011-05-01 - xxxxxxx - lavfi 2.4.0 - avfilter.h
+ Rename AVFilterBufferRefVideoProps.pixel_aspect to
+ sample_aspect_ratio.
+
+2011-05-01 - xxxxxxx - lavc 53.3.0 - AVFrame
+ Add a sample_aspect_ratio field to AVFrame.
+
+2011-05-01 - xxxxxxx - lavc 53.2.0 - AVFrame
+ Add a pkt_pos field to AVFrame.
+
+2011-04-xx - xxxxxxx - lavu 51.2.0 - mem.h
+ Add av_dynarray_add function for adding
+ an element to a dynamic array.
+
+2011-04-26 - bebe72f - lavu 51.1.0 - avutil.h
Add AVPictureType enum and av_get_picture_type_char(), deprecate
FF_*_TYPE defines and av_get_pict_type_char() defined in
libavcodec/avcodec.h.
-2011-04-xx - 10d3940 - lavfi 2.3.0 - avfilter.h
+2011-04-26 - 10d3940 - lavfi 2.3.0 - avfilter.h
Add pict_type and key_frame fields to AVFilterBufferRefVideo.
-2011-04-xx - 7a11c82 - lavfi 2.2.0 - vsrc_buffer
+2011-04-26 - 7a11c82 - lavfi 2.2.0 - vsrc_buffer
Add sample_aspect_ratio fields to vsrc_buffer arguments
2011-04-21 - 94f7451 - lavc 53.1.0 - avcodec.h
diff --git a/doc/avutil.txt b/doc/avutil.txt
index 210bd07264..0847683d1d 100644
--- a/doc/avutil.txt
+++ b/doc/avutil.txt
@@ -19,7 +19,6 @@ integer.c 128bit integer math
lls.c
mathematics.c greatest common divisor, integer sqrt, integer log2, ...
mem.c memory allocation routines with guaranteed alignment
-softfloat.c
Headers:
bswap.h big/little/native-endian conversion code
diff --git a/doc/developer.texi b/doc/developer.texi
index b78db13441..4ff3f0380b 100644
--- a/doc/developer.texi
+++ b/doc/developer.texi
@@ -180,10 +180,13 @@ should also be avoided if they don't make the code easier to understand.
Always fill out the commit log message. Describe in a few lines what you
changed and why. You can refer to mailing list postings if you fix a
particular bug. Comments such as "fixed!" or "Changed it." are unacceptable.
+ Recommanded format:
+ area changed: Short 1 line description
+
+ details describing what and why and giving references.
@item
- If you apply a patch by someone else, include the name and email address in
- the log message. Since the ffmpeg-cvslog mailing list is publicly
- archived you should add some SPAM protection to the email address. Send an
+ Make sure the author of the commit is set correctly. (see git commit --author)
+ If you apply a patch, send an
answer to ffmpeg-devel (or wherever you got the patch from) saying that
you applied the patch.
@item
@@ -241,10 +244,10 @@ Note, these rules are mostly borrowed from the MPlayer project.
@section Submitting patches
-First, (@pxref{Coding Rules}) above if you did not yet.
+First, read the (@pxref{Coding Rules}) above if you did not yet.
-When you submit your patch, try to send a unified diff (diff '-up'
-option). We cannot read other diffs :-)
+When you submit your patch, please use @code{git format-patch} or
+@code{git send-email}. We cannot read other diffs :-)
Also please do not submit a patch which contains several unrelated changes.
Split it into separate, self-contained pieces. This does not mean splitting
@@ -312,9 +315,14 @@ send a reminder by email. Your patch should eventually be dealt with.
If it depends on a parser or a library, did you add that dependency in
configure?
@item
- Did you "git add" the appropriate files before committing?
+ Did you @code{git add} the appropriate files before committing?
+@item
+ Did you make sure it compiles standalone, i.e. with
+ @code{configure --disable-everything --enable-decoder=foo}
+ (or @code{--enable-demuxer} or whatever your component is)?
@end enumerate
+
@section patch submission checklist
@enumerate
@@ -382,6 +390,8 @@ send a reminder by email. Your patch should eventually be dealt with.
@item
Lines with similar content should be aligned vertically when doing so
improves readability.
+@item
+ Consider to add a regression test for your code.
@end enumerate
@section Patch review process
diff --git a/doc/encoders.texi b/doc/encoders.texi
index 2f347f4fb1..2855d89f7a 100644
--- a/doc/encoders.texi
+++ b/doc/encoders.texi
@@ -353,4 +353,16 @@ HDCD A/D Converter
@end table
+@subheading Other AC-3 Encoding Options
+
+@table @option
+
+@item -stereo_rematrixing @var{boolean}
+Stereo Rematrixing. Enables/Disables use of rematrixing for stereo input. This
+is an optional AC-3 feature that increases quality by selectively encoding
+the left/right channels as mid/side. This option is enabled by default, and it
+is highly recommended that it be left as enabled except for testing purposes.
+
+@end table
+
@c man end ENCODERS
diff --git a/doc/eval.texi b/doc/eval.texi
index d0e04c0c3b..e07267bdfa 100644
--- a/doc/eval.texi
+++ b/doc/eval.texi
@@ -72,6 +72,10 @@ integer. For example, "floor(-1.5)" is "-2.0".
@item trunc(expr)
Round the value of expression @var{expr} towards zero to the nearest
integer. For example, "trunc(-1.5)" is "-1.0".
+
+@item sqrt(expr)
+Compute the square root of @var{expr}. This is equivalent to
+"(@var{expr})^.5".
@end table
Note that:
diff --git a/doc/fate.txt b/doc/fate.txt
index a074ed1e5d..f8ce68ea77 100644
--- a/doc/fate.txt
+++ b/doc/fate.txt
@@ -8,6 +8,7 @@ that is provided separately from the actual source distribution.
Use the following command to get the fate test samples
# rsync -aL rsync://rsync.mplayerhq.hu:/samples/fate-suite/ fate/fate-suite
+# rsync -aL rsync://fate-suite.libav.org:/fate-suite/ fate-suite
To inform the build system about the testsuite location, pass
`--samples=<path to the samples>` to configure or set the SAMPLES Make
diff --git a/doc/general.texi b/doc/general.texi
index 676064ce55..f9787139a4 100644
--- a/doc/general.texi
+++ b/doc/general.texi
@@ -72,7 +72,7 @@ library:
@tab Multimedia format used by Delphine Software games.
@item CD+G @tab @tab X
@tab Video format used by CD+G karaoke disks
-@item Core Audio Format @tab @tab X
+@item Core Audio Format @tab X @tab X
@tab Apple Core Audio Format
@item CRC testing format @tab X @tab
@item Creative Voice @tab X @tab X
@@ -677,6 +677,7 @@ following image formats are supported:
@item Sierra VMD audio @tab @tab X
@tab Used in Sierra VMD files.
@item Smacker audio @tab @tab X
+@item SMPTE 302M AES3 audio @tab @tab X
@item Sonic @tab X @tab X
@tab experimental codec
@item Sonic lossless @tab X @tab X
diff --git a/doc/issue_tracker.txt b/doc/issue_tracker.txt
index e5a74db001..a41b8e5044 100644
--- a/doc/issue_tracker.txt
+++ b/doc/issue_tracker.txt
@@ -5,34 +5,17 @@ NOTE: This is a draft.
Overview:
---------
-FFmpeg uses Roundup for tracking issues, new issues and changes to
-existing issues can be done through a web interface and through email.
+FFmpeg uses Trac for tracking issues, new issues and changes to
+existing issues can be done through a web interface.
It is possible to subscribe to individual issues by adding yourself to the
nosy list or to subscribe to the ffmpeg-issues mailing list which receives
a mail for every change to every issue. Replies to such mails will also
be properly added to the respective issue.
(the above does all work already after light testing)
-The subscription URL for the ffmpeg-issues list is:
-http://live.polito/mailman/listinfo/ffmpeg-issues
+The subscription URL for the ffmpeg-trac list is:
+http(s)://ffmpeg.org/mailman/listinfo/ffmpeg-trac
The URL of the webinterface of the tracker is:
-http(s)://roundup.ffmpeg/roundup/ffmpeg/
-Note the URLs in this document are obfuscated, you must append the top level
-domain for non-profit organizations to the tracker, and of Italy to the
-mailing list.
-
-Email Interface:
-----------------
-There is a mailing list to which all new issues and changes to existing issues
-are sent. You can subscribe through
-http://live.polito/mailman/listinfo/ffmpeg-issues
-Replies to messages there will have their text added to the specific issues.
-Attachments will be added as if they had been uploaded via the web interface.
-You can change the status, substatus, topic, ... by changing the subject in
-your reply like:
-Re: [issue94] register_avcodec and allcodecs.h [type=patch;status=open;substatus=approved]
-Roundup will then change things as you requested and remove the [...] from
-the subject before forwarding the mail to the mailing list.
-
+http(s)://ffmpeg.org/trac/ffmpeg
NOTE: issue = (bug report || patch || feature request)
diff --git a/ffmpeg.c b/ffmpeg.c
index 62814d6564..b67ddc0312 100644
--- a/ffmpeg.c
+++ b/ffmpeg.c
@@ -19,9 +19,6 @@
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
-/* needed for usleep() */
-#define _XOPEN_SOURCE 600
-
#include "config.h"
#include <ctype.h>
#include <string.h>
@@ -50,6 +47,7 @@
#include "libavformat/ffm.h" // not public API
#if CONFIG_AVFILTER
+# include "libavfilter/avcodec.h"
# include "libavfilter/avfilter.h"
# include "libavfilter/avfiltergraph.h"
# include "libavfilter/vsrc_buffer.h"
@@ -195,6 +193,10 @@ static char *subtitle_codec_name = NULL;
static char *subtitle_language = NULL;
static unsigned int subtitle_codec_tag = 0;
+static int data_disable = 0;
+static char *data_codec_name = NULL;
+static unsigned int data_codec_tag = 0;
+
static float mux_preload= 0.5;
static float mux_max_delay= 0.7;
@@ -213,11 +215,12 @@ static const char *pass_logfilename_prefix;
static int audio_stream_copy = 0;
static int video_stream_copy = 0;
static int subtitle_stream_copy = 0;
+static int data_stream_copy = 0;
static int video_sync_method= -1;
static int audio_sync_method= 0;
static float audio_drift_threshold= 0.1;
static int copy_ts= 0;
-static int copy_tb;
+static int copy_tb= 0;
static int opt_shortest = 0;
static int video_global_header = 0;
static char *vstats_filename;
@@ -281,14 +284,13 @@ typedef struct AVOutputStream {
AVBitStreamFilterContext *bitstream_filters;
/* video only */
int video_resample;
- AVFrame pict_tmp; /* temporary image for resampling */
+ AVFrame resample_frame; /* temporary frame for image resampling */
struct SwsContext *img_resample_ctx; /* for image resampling */
int resample_height;
int resample_width;
int resample_pix_fmt;
float frame_aspect_ratio;
-
/* forced key frames */
int64_t *forced_kf_pts;
int forced_kf_count;
@@ -312,6 +314,8 @@ typedef struct AVOutputStream {
char *avfilter;
AVFilterGraph *graph;
#endif
+
+ int sws_flags;
} AVOutputStream;
static AVOutputStream **output_streams_for_file[MAX_FILES] = { NULL };
@@ -389,7 +393,7 @@ static int configure_video_filters(AVInputStream *ist, AVOutputStream *ost)
snprintf(args, 255, "%d:%d:flags=0x%X",
codec->width,
codec->height,
- (int)av_get_int(sws_opts, "sws_flags", NULL));
+ ost->sws_flags);
if ((ret = avfilter_graph_create_filter(&filter, avfilter_get_by_name("scale"),
NULL, args, NULL, ost->graph)) < 0)
return ret;
@@ -398,7 +402,7 @@ static int configure_video_filters(AVInputStream *ist, AVOutputStream *ost)
last_filter = filter;
}
- snprintf(args, sizeof(args), "flags=0x%X", (int)av_get_int(sws_opts, "sws_flags", NULL));
+ snprintf(args, sizeof(args), "flags=0x%X", ost->sws_flags);
ost->graph->scale_sws_opts = av_strdup(args);
if (ost->avfilter) {
@@ -557,6 +561,7 @@ static int ffmpeg_exit(int ret)
av_free(video_codec_name);
av_free(audio_codec_name);
av_free(subtitle_codec_name);
+ av_free(data_codec_name);
av_free(video_standard);
@@ -657,11 +662,11 @@ static void choose_pixel_fmt(AVStream *st, AVCodec *codec)
}
if (*p == -1) {
if(st->codec->pix_fmt != PIX_FMT_NONE)
- av_log(NULL, AV_LOG_WARNING,
- "Incompatible pixel format '%s' for codec '%s', auto-selecting format '%s'\n",
- av_pix_fmt_descriptors[st->codec->pix_fmt].name,
- codec->name,
- av_pix_fmt_descriptors[codec->pix_fmts[0]].name);
+ av_log(NULL, AV_LOG_WARNING,
+ "Incompatible pixel format '%s' for codec '%s', auto-selecting format '%s'\n",
+ av_pix_fmt_descriptors[st->codec->pix_fmt].name,
+ codec->name,
+ av_pix_fmt_descriptors[codec->pix_fmts[0]].name);
st->codec->pix_fmt = codec->pix_fmts[0];
}
}
@@ -685,6 +690,8 @@ static AVOutputStream *new_output_stream(AVFormatContext *oc, int file_idx)
}
ost->file_index = file_idx;
ost->index = idx;
+
+ ost->sws_flags = av_get_int(sws_opts, "sws_flags", NULL);
return ost;
}
@@ -1141,8 +1148,8 @@ static void do_video_out(AVFormatContext *s,
AVFrame *in_picture,
int *frame_size)
{
- int nb_frames, i, ret, resample_changed;
- AVFrame *final_picture, *formatted_picture, *resampling_dst;
+ int nb_frames, i, ret, av_unused resample_changed;
+ AVFrame *final_picture, *formatted_picture;
AVCodecContext *enc, *dec;
double sync_ipts;
@@ -1187,8 +1194,8 @@ static void do_video_out(AVFormatContext *s,
formatted_picture = in_picture;
final_picture = formatted_picture;
- resampling_dst = &ost->pict_tmp;
+#if !CONFIG_AVFILTER
resample_changed = ost->resample_width != dec->width ||
ost->resample_height != dec->height ||
ost->resample_pix_fmt != dec->pix_fmt;
@@ -1199,32 +1206,39 @@ static void do_video_out(AVFormatContext *s,
ist->file_index, ist->index,
ost->resample_width, ost->resample_height, avcodec_get_pix_fmt_name(ost->resample_pix_fmt),
dec->width , dec->height , avcodec_get_pix_fmt_name(dec->pix_fmt));
- if(!ost->video_resample)
- ffmpeg_exit(1);
+ ost->resample_width = dec->width;
+ ost->resample_height = dec->height;
+ ost->resample_pix_fmt = dec->pix_fmt;
}
-#if !CONFIG_AVFILTER
+ ost->video_resample = dec->width != enc->width ||
+ dec->height != enc->height ||
+ dec->pix_fmt != enc->pix_fmt;
+
if (ost->video_resample) {
- final_picture = &ost->pict_tmp;
- if (resample_changed) {
+ final_picture = &ost->resample_frame;
+ if (!ost->img_resample_ctx || resample_changed) {
+ /* initialize the destination picture */
+ if (!ost->resample_frame.data[0]) {
+ avcodec_get_frame_defaults(&ost->resample_frame);
+ if (avpicture_alloc((AVPicture *)&ost->resample_frame, enc->pix_fmt,
+ enc->width, enc->height)) {
+ fprintf(stderr, "Cannot allocate temp picture, check pix fmt\n");
+ ffmpeg_exit(1);
+ }
+ }
/* initialize a new scaler context */
sws_freeContext(ost->img_resample_ctx);
- sws_flags = av_get_int(sws_opts, "sws_flags", NULL);
- ost->img_resample_ctx = sws_getContext(
- ist->st->codec->width,
- ist->st->codec->height,
- ist->st->codec->pix_fmt,
- ost->st->codec->width,
- ost->st->codec->height,
- ost->st->codec->pix_fmt,
- sws_flags, NULL, NULL, NULL);
+ ost->img_resample_ctx = sws_getContext(dec->width, dec->height, dec->pix_fmt,
+ enc->width, enc->height, enc->pix_fmt,
+ ost->sws_flags, NULL, NULL, NULL);
if (ost->img_resample_ctx == NULL) {
fprintf(stderr, "Cannot get resampling context\n");
ffmpeg_exit(1);
}
}
sws_scale(ost->img_resample_ctx, formatted_picture->data, formatted_picture->linesize,
- 0, ost->resample_height, resampling_dst->data, resampling_dst->linesize);
+ 0, ost->resample_height, ost->resample_frame.data, ost->resample_frame.linesize);
}
#endif
@@ -1254,7 +1268,7 @@ static void do_video_out(AVFormatContext *s,
/* better than nothing: use input picture interlaced
settings */
big_picture.interlaced_frame = in_picture->interlaced_frame;
- if(avcodec_opts[AVMEDIA_TYPE_VIDEO]->flags & (CODEC_FLAG_INTERLACED_DCT|CODEC_FLAG_INTERLACED_ME)){
+ if (ost->st->codec->flags & (CODEC_FLAG_INTERLACED_DCT|CODEC_FLAG_INTERLACED_ME)) {
if(top_field_first == -1)
big_picture.top_field_first = in_picture->top_field_first;
else
@@ -1272,7 +1286,7 @@ static void do_video_out(AVFormatContext *s,
//av_log(NULL, AV_LOG_DEBUG, "%"PRId64" -> encoder\n", ost->sync_opts);
if (ost->forced_kf_index < ost->forced_kf_count &&
big_picture.pts >= ost->forced_kf_pts[ost->forced_kf_index]) {
- big_picture.pict_type = FF_I_TYPE;
+ big_picture.pict_type = AV_PICTURE_TYPE_I;
ost->forced_kf_index++;
}
ret = avcodec_encode_video(enc,
@@ -1347,7 +1361,7 @@ static void do_video_stats(AVFormatContext *os, AVOutputStream *ost,
avg_bitrate = (double)(video_size * 8) / ti1 / 1000.0;
fprintf(vstats_file, "s_size= %8.0fkB time= %0.3f br= %7.1fkbits/s avg_br= %7.1fkbits/s ",
(double)video_size / 1024, ti1, bitrate, avg_bitrate);
- fprintf(vstats_file,"type= %c\n", av_get_pict_type_char(enc->coded_frame->pict_type));
+ fprintf(vstats_file, "type= %c\n", av_get_picture_type_char(enc->coded_frame->pict_type));
}
}
@@ -1389,11 +1403,11 @@ static void print_report(AVFormatContext **output_files,
ti1 = 1e10;
vid = 0;
for(i=0;i<nb_ostreams;i++) {
- float q= -1;
+ float q = -1;
ost = ost_table[i];
enc = ost->st->codec;
- if(!ost->st->stream_copy && enc->coded_frame)
- q= enc->coded_frame->quality/(float)FF_QP2LAMBDA;
+ if (!ost->st->stream_copy && enc->coded_frame)
+ q = enc->coded_frame->quality/(float)FF_QP2LAMBDA;
if (vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "q=%2.1f ", q);
}
@@ -1407,7 +1421,7 @@ static void print_report(AVFormatContext **output_files,
snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "L");
if(qp_hist){
int j;
- int qp= lrintf(q);
+ int qp = lrintf(q);
if(qp>=0 && qp<FF_ARRAY_ELEMS(qp_histogram))
qp_histogram[qp]++;
for(j=0; j<32; j++)
@@ -1489,7 +1503,7 @@ static int output_packet(AVInputStream *ist, int ist_index,
AVFormatContext *os;
AVOutputStream *ost;
int ret, i;
- int got_picture;
+ int got_output;
AVFrame picture;
void *buffer_to_free = NULL;
static unsigned int samples_size= 0;
@@ -1521,7 +1535,7 @@ static int output_packet(AVInputStream *ist, int ist_index,
pkt_pts = av_rescale_q(pkt->pts, ist->st->time_base, AV_TIME_BASE_Q);
//while we have more to decode or while the decoder did output something on EOF
- while (avpkt.size > 0 || (!pkt && ist->next_pts != ist->pts)) {
+ while (avpkt.size > 0 || (!pkt && got_output)) {
uint8_t *data_buf, *decoded_data_buf;
int data_size, decoded_data_size;
handle_eof:
@@ -1557,9 +1571,10 @@ static int output_packet(AVInputStream *ist, int ist_index,
avpkt.data += ret;
avpkt.size -= ret;
data_size = ret;
+ got_output = decoded_data_size > 0;
/* Some bug in mpeg audio decoder gives */
/* decoded_data_size < 0, it seems they are overflows */
- if (decoded_data_size <= 0) {
+ if (!got_output) {
/* no audio frame */
continue;
}
@@ -1576,11 +1591,11 @@ static int output_packet(AVInputStream *ist, int ist_index,
pkt_pts = AV_NOPTS_VALUE;
ret = avcodec_decode_video2(ist->st->codec,
- &picture, &got_picture, &avpkt);
+ &picture, &got_output, &avpkt);
ist->st->quality= picture.quality;
if (ret < 0)
goto fail_decode;
- if (!got_picture) {
+ if (!got_output) {
/* no picture yet */
goto discard_packet;
}
@@ -1597,10 +1612,10 @@ static int output_packet(AVInputStream *ist, int ist_index,
break;
case AVMEDIA_TYPE_SUBTITLE:
ret = avcodec_decode_subtitle2(ist->st->codec,
- &subtitle, &got_picture, &avpkt);
+ &subtitle, &got_output, &avpkt);
if (ret < 0)
goto fail_decode;
- if (!got_picture) {
+ if (!got_output) {
goto discard_packet;
}
subtitle_to_free = &subtitle;
@@ -1633,14 +1648,11 @@ static int output_packet(AVInputStream *ist, int ist_index,
for(i=0;i<nb_ostreams;i++) {
ost = ost_table[i];
if (ost->input_video_filter && ost->source_index == ist_index) {
- AVRational sar;
- if (ist->st->sample_aspect_ratio.num) sar = ist->st->sample_aspect_ratio;
- else sar = ist->st->codec->sample_aspect_ratio;
+ if (!picture.sample_aspect_ratio.num)
+ picture.sample_aspect_ratio = ist->st->sample_aspect_ratio;
+ picture.pts = ist->pts;
// add it to be filtered
- av_vsrc_buffer_add_frame2(ost->input_video_filter, &picture,
- ist->pts,
- sar, ist->st->codec->width, ist->st->codec->height,
- ist->st->codec->pix_fmt, ""); //TODO user setable params
+ av_vsrc_buffer_add_frame2(ost->input_video_filter, &picture, ""); //TODO user setable params
}
}
}
@@ -1699,7 +1711,7 @@ static int output_packet(AVInputStream *ist, int ist_index,
case AVMEDIA_TYPE_VIDEO:
#if CONFIG_AVFILTER
if (ost->picref->video && !ost->frame_aspect_ratio)
- ost->st->codec->sample_aspect_ratio = ost->picref->video->pixel_aspect;
+ ost->st->codec->sample_aspect_ratio = ost->picref->video->sample_aspect_ratio;
#endif
do_video_out(os, ost, ist, &picture, &frame_size);
if (vstats_filename && frame_size)
@@ -2257,6 +2269,8 @@ static int transcode(AVFormatContext **output_files,
codec->width = icodec->width;
codec->height = icodec->height;
break;
+ case AVMEDIA_TYPE_DATA:
+ break;
default:
abort();
}
@@ -2284,27 +2298,6 @@ static int transcode(AVFormatContext **output_files,
codec->height != icodec->height ||
codec->pix_fmt != icodec->pix_fmt;
if (ost->video_resample) {
-#if !CONFIG_AVFILTER
- avcodec_get_frame_defaults(&ost->pict_tmp);
- if(avpicture_alloc((AVPicture*)&ost->pict_tmp, codec->pix_fmt,
- codec->width, codec->height)) {
- fprintf(stderr, "Cannot allocate temp picture, check pix fmt\n");
- ffmpeg_exit(1);
- }
- sws_flags = av_get_int(sws_opts, "sws_flags", NULL);
- ost->img_resample_ctx = sws_getContext(
- icodec->width,
- icodec->height,
- icodec->pix_fmt,
- codec->width,
- codec->height,
- codec->pix_fmt,
- sws_flags, NULL, NULL, NULL);
- if (ost->img_resample_ctx == NULL) {
- fprintf(stderr, "Cannot get resampling context\n");
- ffmpeg_exit(1);
- }
-#endif
codec->bits_per_raw_sample= frame_bits_per_raw_sample;
}
ost->resample_height = icodec->height;
@@ -2576,7 +2569,7 @@ static int transcode(AVFormatContext **output_files,
if (!using_stdin) {
if(verbose >= 0)
- fprintf(stderr, "Press [q] to stop encoding\n");
+ fprintf(stderr, "Press [q] to stop, [?] for help\n");
avio_set_interrupt_cb(decode_interrupt_cb);
}
term_init();
@@ -2600,6 +2593,50 @@ static int transcode(AVFormatContext **output_files,
key = read_key();
if (key == 'q')
break;
+ if (key == '+') verbose++;
+ if (key == '-') verbose--;
+ if (key == 's') qp_hist ^= 1;
+ if (key == 'h'){
+ if (do_hex_dump){
+ do_hex_dump = do_pkt_dump = 0;
+ } else if(do_pkt_dump){
+ do_hex_dump = 1;
+ } else
+ do_pkt_dump = 1;
+ av_log_set_level(AV_LOG_DEBUG);
+ }
+ if (key == 'd' || key == 'D'){
+ int debug=0;
+ if(key == 'D') {
+ ist = ist_table[0];
+ debug = ist->st->codec->debug<<1;
+ if(!debug) debug = 1;
+ while(debug & (FF_DEBUG_DCT_COEFF|FF_DEBUG_VIS_QP|FF_DEBUG_VIS_MB_TYPE)) //unsupported, would just crash
+ debug += debug;
+ }else
+ scanf("%d", &debug);
+ for(i=0;i<nb_istreams;i++) {
+ ist = ist_table[i];
+ ist->st->codec->debug = debug;
+ }
+ for(i=0;i<nb_ostreams;i++) {
+ ost = ost_table[i];
+ ost->st->codec->debug = debug;
+ }
+ if(debug) av_log_set_level(AV_LOG_DEBUG);
+ fprintf(stderr,"debug=%d\n", debug);
+ }
+ if (key == '?'){
+ fprintf(stderr, "key function\n"
+ "? show this help\n"
+ "+ increase verbosity\n"
+ "- decrease verbosity\n"
+ "D cycle through available debug modes\n"
+ "h dump packets/hex press to cycle through the 3 states\n"
+ "q quit\n"
+ "s Show QP histogram\n"
+ );
+ }
}
/* select the stream that we must read now by looking at the
@@ -2706,7 +2743,11 @@ static int transcode(AVFormatContext **output_files,
/* finish if recording time exhausted */
if (recording_time != INT64_MAX &&
- av_compare_ts(pkt.pts, ist->st->time_base, recording_time + start_time, (AVRational){1, 1000000}) >= 0) {
+ (pkt.pts != AV_NOPTS_VALUE || pkt.dts != AV_NOPTS_VALUE ?
+ av_compare_ts(pkt.pts, ist->st->time_base, recording_time + start_time, (AVRational){1, 1000000})
+ :
+ av_compare_ts(ist->pts, AV_TIME_BASE_Q, recording_time + start_time, (AVRational){1, 1000000})
+ )>= 0) {
ist->is_past_recording_time = 1;
goto discard_packet;
}
@@ -2796,7 +2837,7 @@ static int transcode(AVFormatContext **output_files,
av_fifo_free(ost->fifo); /* works even if fifo is not
initialized but set to zero */
av_freep(&ost->st->codec->subtitle_header);
- av_free(ost->pict_tmp.data[0]);
+ av_free(ost->resample_frame.data[0]);
av_free(ost->forced_kf_pts);
if (ost->video_resample)
sws_freeContext(ost->img_resample_ctx);
@@ -3019,6 +3060,11 @@ static void opt_subtitle_codec(const char *arg)
opt_codec(&subtitle_stream_copy, &subtitle_codec_name, AVMEDIA_TYPE_SUBTITLE, arg);
}
+static void opt_data_codec(const char *arg)
+{
+ opt_codec(&data_stream_copy, &data_codec_name, AVMEDIA_TYPE_DATA, arg);
+}
+
static int opt_codec_tag(const char *opt, const char *arg)
{
char *tail;
@@ -3410,17 +3456,23 @@ static void opt_input_file(const char *filename)
av_freep(&video_codec_name);
av_freep(&audio_codec_name);
av_freep(&subtitle_codec_name);
+ uninit_opts();
+ init_opts();
}
-static void check_audio_video_sub_inputs(int *has_video_ptr, int *has_audio_ptr,
- int *has_subtitle_ptr)
+static void check_inputs(int *has_video_ptr,
+ int *has_audio_ptr,
+ int *has_subtitle_ptr,
+ int *has_data_ptr)
{
- int has_video, has_audio, has_subtitle, i, j;
+ int has_video, has_audio, has_subtitle, has_data, i, j;
AVFormatContext *ic;
has_video = 0;
has_audio = 0;
has_subtitle = 0;
+ has_data = 0;
+
for(j=0;j<nb_input_files;j++) {
ic = input_files[j];
for(i=0;i<ic->nb_streams;i++) {
@@ -3438,6 +3490,7 @@ static void check_audio_video_sub_inputs(int *has_video_ptr, int *has_audio_ptr,
case AVMEDIA_TYPE_DATA:
case AVMEDIA_TYPE_ATTACHMENT:
case AVMEDIA_TYPE_UNKNOWN:
+ has_data = 1;
break;
default:
abort();
@@ -3447,6 +3500,7 @@ static void check_audio_video_sub_inputs(int *has_video_ptr, int *has_audio_ptr,
*has_video_ptr = has_video;
*has_audio_ptr = has_audio;
*has_subtitle_ptr = has_subtitle;
+ *has_data_ptr = has_data;
}
static void new_video_stream(AVFormatContext *oc, int file_idx)
@@ -3679,6 +3733,45 @@ static void new_audio_stream(AVFormatContext *oc, int file_idx)
audio_stream_copy = 0;
}
+static void new_data_stream(AVFormatContext *oc, int file_idx)
+{
+ AVStream *st;
+ AVOutputStream *ost;
+ AVCodec *codec=NULL;
+ AVCodecContext *data_enc;
+
+ st = av_new_stream(oc, oc->nb_streams < nb_streamid_map ? streamid_map[oc->nb_streams] : 0);
+ if (!st) {
+ fprintf(stderr, "Could not alloc stream\n");
+ ffmpeg_exit(1);
+ }
+ ost = new_output_stream(oc, file_idx);
+ data_enc = st->codec;
+ output_codecs = grow_array(output_codecs, sizeof(*output_codecs), &nb_output_codecs, nb_output_codecs + 1);
+ if (!data_stream_copy) {
+ fprintf(stderr, "Data stream encoding not supported yet (only streamcopy)\n");
+ ffmpeg_exit(1);
+ }
+ avcodec_get_context_defaults3(st->codec, codec);
+
+ data_enc->codec_type = AVMEDIA_TYPE_DATA;
+
+ if (data_codec_tag)
+ data_enc->codec_tag= data_codec_tag;
+
+ if (oc->oformat->flags & AVFMT_GLOBALHEADER) {
+ data_enc->flags |= CODEC_FLAG_GLOBAL_HEADER;
+ avcodec_opts[AVMEDIA_TYPE_DATA]->flags |= CODEC_FLAG_GLOBAL_HEADER;
+ }
+ if (data_stream_copy) {
+ st->stream_copy = 1;
+ }
+
+ data_disable = 0;
+ av_freep(&data_codec_name);
+ data_stream_copy = 0;
+}
+
static void new_subtitle_stream(AVFormatContext *oc, int file_idx)
{
AVStream *st;
@@ -3749,6 +3842,7 @@ static int opt_new_stream(const char *opt, const char *arg)
if (!strcmp(opt, "newvideo" )) new_video_stream (oc, file_idx);
else if (!strcmp(opt, "newaudio" )) new_audio_stream (oc, file_idx);
else if (!strcmp(opt, "newsubtitle")) new_subtitle_stream(oc, file_idx);
+ else if (!strcmp(opt, "newdata" )) new_data_stream (oc, file_idx);
else av_assert0(0);
return 0;
}
@@ -3760,8 +3854,7 @@ static int opt_streamid(const char *opt, const char *arg)
char *p;
char idx_str[16];
- strncpy(idx_str, arg, sizeof(idx_str));
- idx_str[sizeof(idx_str)-1] = '\0';
+ av_strlcpy(idx_str, arg, sizeof(idx_str));
p = strchr(idx_str, ':');
if (!p) {
fprintf(stderr,
@@ -3779,8 +3872,8 @@ static int opt_streamid(const char *opt, const char *arg)
static void opt_output_file(const char *filename)
{
AVFormatContext *oc;
- int err, use_video, use_audio, use_subtitle;
- int input_has_video, input_has_audio, input_has_subtitle;
+ int err, use_video, use_audio, use_subtitle, use_data;
+ int input_has_video, input_has_audio, input_has_subtitle, input_has_data;
AVFormatParameters params, *ap = &params;
AVOutputFormat *file_oformat;
@@ -3808,28 +3901,36 @@ static void opt_output_file(const char *filename)
use_video = file_oformat->video_codec != CODEC_ID_NONE || video_stream_copy || video_codec_name;
use_audio = file_oformat->audio_codec != CODEC_ID_NONE || audio_stream_copy || audio_codec_name;
use_subtitle = file_oformat->subtitle_codec != CODEC_ID_NONE || subtitle_stream_copy || subtitle_codec_name;
+ use_data = data_stream_copy || data_codec_name; /* XXX once generic data codec will be available add a ->data_codec reference and use it here */
/* disable if no corresponding type found and at least one
input file */
if (nb_input_files > 0) {
- check_audio_video_sub_inputs(&input_has_video, &input_has_audio,
- &input_has_subtitle);
+ check_inputs(&input_has_video,
+ &input_has_audio,
+ &input_has_subtitle,
+ &input_has_data);
+
if (!input_has_video)
use_video = 0;
if (!input_has_audio)
use_audio = 0;
if (!input_has_subtitle)
use_subtitle = 0;
+ if (!input_has_data)
+ use_data = 0;
}
/* manual disable */
if (audio_disable) use_audio = 0;
if (video_disable) use_video = 0;
if (subtitle_disable) use_subtitle = 0;
+ if (data_disable) use_data = 0;
if (use_video) new_video_stream(oc, nb_output_files);
if (use_audio) new_audio_stream(oc, nb_output_files);
if (use_subtitle) new_subtitle_stream(oc, nb_output_files);
+ if (use_data) new_data_stream(oc, nb_output_files);
oc->timestamp = recording_timestamp;
@@ -3890,6 +3991,8 @@ static void opt_output_file(const char *filename)
set_context_opts(oc, avformat_opts, AV_OPT_FLAG_ENCODING_PARAM, NULL);
av_freep(&forced_key_frames);
+ uninit_opts();
+ init_opts();
}
/* same option as mencoder */
@@ -4239,6 +4342,8 @@ static int opt_preset(const char *opt, const char *arg)
opt_video_codec(tmp2);
}else if(!strcmp(tmp, "scodec")){
opt_subtitle_codec(tmp2);
+ }else if(!strcmp(tmp, "dcodec")){
+ opt_data_codec(tmp2);
}else if(opt_default(tmp, tmp2) < 0){
fprintf(stderr, "%s: Invalid option or argument: '%s', parsed as '%s' = '%s'\n", filename, line, tmp, tmp2);
ffmpeg_exit(1);
@@ -4392,6 +4497,8 @@ static const OptionDef options[] = {
{ "vpre", OPT_FUNC2 | HAS_ARG | OPT_VIDEO | OPT_EXPERT, {(void*)opt_preset}, "set the video options to the indicated preset", "preset" },
{ "spre", OPT_FUNC2 | HAS_ARG | OPT_SUBTITLE | OPT_EXPERT, {(void*)opt_preset}, "set the subtitle options to the indicated preset", "preset" },
{ "fpre", OPT_FUNC2 | HAS_ARG | OPT_EXPERT, {(void*)opt_preset}, "set options from indicated preset file", "filename" },
+ /* data codec support */
+ { "dcodec", HAS_ARG | OPT_DATA, {(void*)opt_data_codec}, "force data codec ('copy' to copy stream)", "codec" },
{ "default", OPT_FUNC2 | HAS_ARG | OPT_AUDIO | OPT_VIDEO | OPT_EXPERT, {(void*)opt_default}, "generic catch all option", "" },
{ NULL, },
diff --git a/ffplay.c b/ffplay.c
index 30f6a0975c..cb24162ead 100644
--- a/ffplay.c
+++ b/ffplay.c
@@ -19,8 +19,6 @@
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
-#define _XOPEN_SOURCE 600
-
#include "config.h"
#include <inttypes.h>
#include <math.h>
@@ -40,6 +38,7 @@
#include "libavcodec/avfft.h"
#if CONFIG_AVFILTER
+# include "libavfilter/avcodec.h"
# include "libavfilter/avfilter.h"
# include "libavfilter/avfiltergraph.h"
#endif
@@ -692,10 +691,10 @@ static void video_image_display(VideoState *is)
vp = &is->pictq[is->pictq_rindex];
if (vp->bmp) {
#if CONFIG_AVFILTER
- if (vp->picref->video->pixel_aspect.num == 0)
+ if (vp->picref->video->sample_aspect_ratio.num == 0)
aspect_ratio = 0;
else
- aspect_ratio = av_q2d(vp->picref->video->pixel_aspect);
+ aspect_ratio = av_q2d(vp->picref->video->sample_aspect_ratio);
#else
/* XXX: use variable in the frame */
@@ -1381,7 +1380,7 @@ static int queue_picture(VideoState *is, AVFrame *src_frame, double pts1, int64_
#if defined(DEBUG_SYNC) && 0
printf("frame_type=%c clock=%0.3f pts=%0.3f\n",
- av_get_pict_type_char(src_frame->pict_type), pts, pts1);
+ av_get_picture_type_char(src_frame->pict_type), pts, pts1);
#endif
/* wait until we have space to put a new picture */
@@ -1644,7 +1643,7 @@ static int input_init(AVFilterContext *ctx, const char *args, void *opaque)
codec->opaque = ctx;
if((codec->codec->capabilities & CODEC_CAP_DR1)
) {
- codec->flags |= CODEC_FLAG_EMU_EDGE;
+ av_assert0(codec->flags & CODEC_FLAG_EMU_EDGE);
priv->use_dr1 = 1;
codec->get_buffer = input_get_buffer;
codec->release_buffer = input_release_buffer;
@@ -1686,9 +1685,9 @@ static int input_request_frame(AVFilterLink *link)
}
av_free_packet(&pkt);
+ avfilter_copy_frame_props(picref, priv->frame);
picref->pts = pts;
- picref->pos = pkt.pos;
- picref->video->pixel_aspect = priv->is->video_st->codec->sample_aspect_ratio;
+
avfilter_start_frame(link, picref);
avfilter_draw_slice(link, 0, link->h, 1);
avfilter_end_frame(link);
@@ -1831,6 +1830,7 @@ static int video_thread(void *arg)
#else
ret = get_video_frame(is, frame, &pts_int, &pkt);
pos = pkt.pos;
+ av_free_packet(&pkt);
#endif
if (ret < 0) goto the_end;
@@ -1841,9 +1841,7 @@ static int video_thread(void *arg)
pts = pts_int*av_q2d(is->video_st->time_base);
ret = queue_picture(is, frame, pts, pos);
-#if !CONFIG_AVFILTER
- av_free_packet(&pkt);
-#endif
+
if (ret < 0)
goto the_end;
@@ -1937,9 +1935,7 @@ static int subtitle_thread(void *arg)
/* copy samples for viewing in editor window */
static void update_sample_display(VideoState *is, short *samples, int samples_size)
{
- int size, len, channels;
-
- channels = is->audio_st->codec->channels;
+ int size, len;
size = samples_size / sizeof(short);
while (size > 0) {
@@ -2193,6 +2189,9 @@ static int stream_component_open(VideoState *is, int stream_index)
}
codec = avcodec_find_decoder(avctx->codec_id);
+ if (!codec)
+ return -1;
+
avctx->debug_mv = debug_mv;
avctx->debug = debug;
avctx->workaround_bugs = workaround_bugs;
@@ -2209,8 +2208,10 @@ static int stream_component_open(VideoState *is, int stream_index)
set_context_opts(avctx, avcodec_opts[avctx->codec_type], 0, codec);
- if (!codec ||
- avcodec_open(avctx, codec) < 0)
+ if(codec->capabilities & CODEC_CAP_DR1)
+ avctx->flags |= CODEC_FLAG_EMU_EDGE;
+
+ if (avcodec_open(avctx, codec) < 0)
return -1;
/* prepare audio output */
@@ -2381,10 +2382,18 @@ static int read_thread(void *arg)
ap->height= frame_height;
ap->time_base= (AVRational){1, 25};
ap->pix_fmt = frame_pix_fmt;
+ ic->flags |= AVFMT_FLAG_PRIV_OPT;
- set_context_opts(ic, avformat_opts, AV_OPT_FLAG_DECODING_PARAM, NULL);
err = av_open_input_file(&ic, is->filename, is->iformat, 0, ap);
+ if (err >= 0) {
+ set_context_opts(ic, avformat_opts, AV_OPT_FLAG_DECODING_PARAM, NULL);
+ err = av_demuxer_open(ic, ap);
+ if(err < 0){
+ avformat_free_context(ic);
+ ic= NULL;
+ }
+ }
if (err < 0) {
print_error(is->filename, err);
ret = -1;
diff --git a/ffserver.c b/ffserver.c
index ed4ef54bc3..0e29952071 100644
--- a/ffserver.c
+++ b/ffserver.c
@@ -19,8 +19,6 @@
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
-#define _XOPEN_SOURCE 600
-
#include "config.h"
#if !HAVE_CLOSESOCKET
#define closesocket close
diff --git a/libavcodec/4xm.c b/libavcodec/4xm.c
index 3a42642514..d89b494b09 100644
--- a/libavcodec/4xm.c
+++ b/libavcodec/4xm.c
@@ -780,11 +780,11 @@ static int decode_frame(AVCodecContext *avctx,
}
if(frame_4cc == AV_RL32("ifr2")){
- p->pict_type= FF_I_TYPE;
+ p->pict_type= AV_PICTURE_TYPE_I;
if(decode_i2_frame(f, buf-4, frame_size) < 0)
return -1;
}else if(frame_4cc == AV_RL32("ifrm")){
- p->pict_type= FF_I_TYPE;
+ p->pict_type= AV_PICTURE_TYPE_I;
if(decode_i_frame(f, buf, frame_size) < 0)
return -1;
}else if(frame_4cc == AV_RL32("pfrm") || frame_4cc == AV_RL32("pfr2")){
@@ -796,7 +796,7 @@ static int decode_frame(AVCodecContext *avctx,
}
}
- p->pict_type= FF_P_TYPE;
+ p->pict_type= AV_PICTURE_TYPE_P;
if(decode_p_frame(f, buf, frame_size) < 0)
return -1;
}else if(frame_4cc == AV_RL32("snd_")){
@@ -805,7 +805,7 @@ static int decode_frame(AVCodecContext *avctx,
av_log(avctx, AV_LOG_ERROR, "ignoring unknown chunk length:%d\n", buf_size);
}
- p->key_frame= p->pict_type == FF_I_TYPE;
+ p->key_frame= p->pict_type == AV_PICTURE_TYPE_I;
*picture= *p;
*data_size = sizeof(AVPicture);
@@ -832,6 +832,8 @@ static av_cold int decode_init(AVCodecContext *avctx){
return 1;
}
+ avcodec_get_frame_defaults(&f->current_picture);
+ avcodec_get_frame_defaults(&f->last_picture);
f->version= AV_RL32(avctx->extradata)>>16;
common_init(avctx);
init_vlcs(f);
diff --git a/libavcodec/8bps.c b/libavcodec/8bps.c
index 4757057876..390ce8f72f 100644
--- a/libavcodec/8bps.c
+++ b/libavcodec/8bps.c
@@ -157,6 +157,7 @@ static av_cold int decode_init(AVCodecContext *avctx)
c->avctx = avctx;
+ avcodec_get_frame_defaults(&c->pic);
c->pic.data[0] = NULL;
switch (avctx->bits_per_coded_sample) {
diff --git a/libavcodec/8svx.c b/libavcodec/8svx.c
index 3f7d3efc76..4f95d9034e 100644
--- a/libavcodec/8svx.c
+++ b/libavcodec/8svx.c
@@ -1,5 +1,4 @@
/*
- * 8SVX audio decoder
* Copyright (C) 2008 Jaikrishnan Menon
*
* This file is part of FFmpeg.
@@ -22,9 +21,17 @@
/**
* @file
* 8svx audio decoder
- * @author Jaikrishnan Menon
* supports: fibonacci delta encoding
* : exponential encoding
+ *
+ * For more information about the 8SVX format:
+ * http://netghost.narod.ru/gff/vendspec/iff/iff.txt
+ * http://sox.sourceforge.net/AudioFormats-11.html
+ * http://aminet.net/package/mus/misc/wavepak
+ * http://amigan.1emu.net/reg/8SVX.txt
+ *
+ * Samples can be found here:
+ * http://aminet.net/mods/smpl/
*/
#include "avcodec.h"
@@ -40,7 +47,6 @@ static const int16_t fibonacci[16] = { -34<<8, -21<<8, -13<<8, -8<<8, -5<<8,
static const int16_t exponential[16] = { -128<<8, -64<<8, -32<<8, -16<<8, -8<<8, -4<<8, -2<<8, -1<<8,
0, 1<<8, 2<<8, 4<<8, 8<<8, 16<<8, 32<<8, 64<<8 };
-/** decode a frame */
static int eightsvx_decode_frame(AVCodecContext *avctx, void *data, int *data_size,
AVPacket *avpkt)
{
@@ -73,7 +79,6 @@ static int eightsvx_decode_frame(AVCodecContext *avctx, void *data, int *data_si
return consumed;
}
-/** initialize 8svx decoder */
static av_cold int eightsvx_decode_init(AVCodecContext *avctx)
{
EightSvxContext *esc = avctx->priv_data;
diff --git a/libavcodec/Makefile b/libavcodec/Makefile
index 4e693cc251..ab72adc9be 100644
--- a/libavcodec/Makefile
+++ b/libavcodec/Makefile
@@ -143,7 +143,7 @@ OBJS-$(CONFIG_FFV1_ENCODER) += ffv1.o rangecoder.o
OBJS-$(CONFIG_FFVHUFF_DECODER) += huffyuv.o
OBJS-$(CONFIG_FFVHUFF_ENCODER) += huffyuv.o
OBJS-$(CONFIG_FLAC_DECODER) += flacdec.o flacdata.o flac.o vorbis_data.o
-OBJS-$(CONFIG_FLAC_ENCODER) += flacenc.o flacdata.o flac.o
+OBJS-$(CONFIG_FLAC_ENCODER) += flacenc.o flacdata.o flac.o vorbis_data.o
OBJS-$(CONFIG_FLASHSV_DECODER) += flashsv.o
OBJS-$(CONFIG_FLASHSV_ENCODER) += flashsvenc.o
OBJS-$(CONFIG_FLASHSV2_ENCODER) += flashsv2enc.o
@@ -276,8 +276,9 @@ OBJS-$(CONFIG_MSMPEG4V3_ENCODER) += msmpeg4.o msmpeg4data.o h263dec.o \
h263.o ituh263dec.o mpeg4videodec.o
OBJS-$(CONFIG_MSRLE_DECODER) += msrle.o msrledec.o
OBJS-$(CONFIG_MSVIDEO1_DECODER) += msvideo1.o
+OBJS-$(CONFIG_MSVIDEO1_ENCODER) += msvideo1enc.o elbg.o
OBJS-$(CONFIG_MSZH_DECODER) += lcldec.o
-OBJS-$(CONFIG_MXPEG_DECODER) += mxpegdec.o
+OBJS-$(CONFIG_MXPEG_DECODER) += mxpegdec.o mjpegdec.o mjpeg.o
OBJS-$(CONFIG_NELLYMOSER_DECODER) += nellymoserdec.o nellymoser.o
OBJS-$(CONFIG_NELLYMOSER_ENCODER) += nellymoserenc.o nellymoser.o
OBJS-$(CONFIG_NUV_DECODER) += nuv.o rtjpeg.o
@@ -329,6 +330,7 @@ OBJS-$(CONFIG_RV30_DECODER) += rv30.o rv34.o rv30dsp.o \
mpegvideo.o error_resilience.o
OBJS-$(CONFIG_RV40_DECODER) += rv40.o rv34.o rv40dsp.o \
mpegvideo.o error_resilience.o
+OBJS-$(CONFIG_S302M_DECODER) += s302m.o
OBJS-$(CONFIG_SGI_DECODER) += sgidec.o
OBJS-$(CONFIG_SGI_ENCODER) += sgienc.o rle.o
OBJS-$(CONFIG_SHORTEN_DECODER) += shorten.o
@@ -531,30 +533,32 @@ OBJS-$(CONFIG_ADTS_MUXER) += mpeg4audio.o
OBJS-$(CONFIG_CAF_DEMUXER) += mpeg4audio.o mpegaudiodata.o
OBJS-$(CONFIG_DV_DEMUXER) += dvdata.o
OBJS-$(CONFIG_DV_MUXER) += dvdata.o
-OBJS-$(CONFIG_FLAC_DEMUXER) += flacdec.o flacdata.o flac.o
-OBJS-$(CONFIG_FLAC_MUXER) += flacdec.o flacdata.o flac.o
+OBJS-$(CONFIG_FLAC_DEMUXER) += flacdec.o flacdata.o flac.o vorbis_data.o
+OBJS-$(CONFIG_FLAC_MUXER) += flacdec.o flacdata.o flac.o vorbis_data.o
OBJS-$(CONFIG_FLV_DEMUXER) += mpeg4audio.o
OBJS-$(CONFIG_GXF_DEMUXER) += mpeg12data.o
OBJS-$(CONFIG_IFF_DEMUXER) += iff.o
-OBJS-$(CONFIG_MATROSKA_AUDIO_MUXER) += xiph.o mpeg4audio.o \
+OBJS-$(CONFIG_MATROSKA_AUDIO_MUXER) += xiph.o mpeg4audio.o vorbis_data.o \
flacdec.o flacdata.o flac.o
OBJS-$(CONFIG_MATROSKA_DEMUXER) += mpeg4audio.o mpegaudiodata.o
OBJS-$(CONFIG_MATROSKA_MUXER) += xiph.o mpeg4audio.o \
flacdec.o flacdata.o flac.o \
- mpegaudiodata.o
+ mpegaudiodata.o vorbis_data.o
OBJS-$(CONFIG_MOV_DEMUXER) += mpeg4audio.o mpegaudiodata.o
OBJS-$(CONFIG_MOV_MUXER) += mpeg4audio.o mpegaudiodata.o
OBJS-$(CONFIG_MPEGTS_MUXER) += mpegvideo.o mpeg4audio.o
OBJS-$(CONFIG_MPEGTS_DEMUXER) += mpeg4audio.o mpegaudiodata.o
OBJS-$(CONFIG_NUT_MUXER) += mpegaudiodata.o
OBJS-$(CONFIG_OGG_DEMUXER) += flacdec.o flacdata.o flac.o \
- dirac.o mpeg12data.o
-OBJS-$(CONFIG_OGG_MUXER) += xiph.o flacdec.o flacdata.o flac.o
+ dirac.o mpeg12data.o vorbis_data.o
+OBJS-$(CONFIG_OGG_MUXER) += xiph.o flacdec.o flacdata.o flac.o \
+ vorbis_data.o
OBJS-$(CONFIG_RTP_MUXER) += mpegvideo.o xiph.o
OBJS-$(CONFIG_SPDIF_DEMUXER) += aacadtsdec.o mpeg4audio.o
OBJS-$(CONFIG_WEBM_MUXER) += xiph.o mpeg4audio.o \
flacdec.o flacdata.o flac.o \
- mpegaudiodata.o
+ mpegaudiodata.o vorbis_data.o
+OBJS-$(CONFIG_WTV_DEMUXER) += mpeg4audio.o mpegaudiodata.o
# external codec libraries
OBJS-$(CONFIG_LIBCELT_DECODER) += libcelt_dec.o
@@ -598,7 +602,8 @@ OBJS-$(CONFIG_DIRAC_PARSER) += dirac_parser.o
OBJS-$(CONFIG_DNXHD_PARSER) += dnxhd_parser.o
OBJS-$(CONFIG_DVBSUB_PARSER) += dvbsub_parser.o
OBJS-$(CONFIG_DVDSUB_PARSER) += dvdsub_parser.o
-OBJS-$(CONFIG_FLAC_PARSER) += flac_parser.o flacdata.o flac.o
+OBJS-$(CONFIG_FLAC_PARSER) += flac_parser.o flacdata.o flac.o \
+ vorbis_data.o
OBJS-$(CONFIG_H261_PARSER) += h261_parser.o
OBJS-$(CONFIG_H263_PARSER) += h263_parser.o
OBJS-$(CONFIG_H264_PARSER) += h264_parser.o h264.o h264_hl_motion.o \
@@ -691,14 +696,6 @@ $(SUBDIR)cos_fixed_tables.c: $(SUBDIR)costablegen$(HOSTEXESUF)
$(SUBDIR)sin_tables.c: $(SUBDIR)costablegen$(HOSTEXESUF)
$(M)./$< sin > $@
-ifdef CONFIG_MPEGAUDIO_HP
-$(SUBDIR)mpegaudio_tablegen$(HOSTEXESUF): HOSTCFLAGS += -DFRAC_BITS=23
-$(SUBDIR)mpegaudio_tablegen.ho: CPPFLAGS += -DFRAC_BITS=23
-else
-$(SUBDIR)mpegaudio_tablegen$(HOSTEXESUF): HOSTCFLAGS += -DFRAC_BITS=15
-$(SUBDIR)mpegaudio_tablegen.ho: CPPFLAGS += -DFRAC_BITS=15
-endif
-
ifdef CONFIG_SMALL
$(SUBDIR)%_tablegen$(HOSTEXESUF): HOSTCFLAGS += -DCONFIG_SMALL=1
else
diff --git a/libavcodec/a64multienc.c b/libavcodec/a64multienc.c
index aed28ad280..e9b3471925 100644
--- a/libavcodec/a64multienc.c
+++ b/libavcodec/a64multienc.c
@@ -216,7 +216,7 @@ static av_cold int a64multi_init_encoder(AVCodecContext *avctx)
avcodec_get_frame_defaults(&c->picture);
avctx->coded_frame = &c->picture;
- avctx->coded_frame->pict_type = FF_I_TYPE;
+ avctx->coded_frame->pict_type = AV_PICTURE_TYPE_I;
avctx->coded_frame->key_frame = 1;
if (!avctx->codec_tag)
avctx->codec_tag = AV_RL32("a64m");
@@ -290,7 +290,7 @@ static int a64multi_encode_frame(AVCodecContext *avctx, unsigned char *buf,
/* fill up mc_meta_charset with data until lifetime exceeds */
if (c->mc_frame_counter < c->mc_lifetime) {
*p = *pict;
- p->pict_type = FF_I_TYPE;
+ p->pict_type = AV_PICTURE_TYPE_I;
p->key_frame = 1;
to_meta_with_crop(avctx, p, meta + 32000 * c->mc_frame_counter);
c->mc_frame_counter++;
diff --git a/libavcodec/aaccoder.c b/libavcodec/aaccoder.c
index 6d55acbc45..187b4ad972 100644
--- a/libavcodec/aaccoder.c
+++ b/libavcodec/aaccoder.c
@@ -30,6 +30,8 @@
* add sane pulse detection
***********************************/
+#include "libavutil/libm.h" // brought forward to work around cygwin header breakage
+
#include <float.h>
#include <math.h>
#include "avcodec.h"
@@ -37,7 +39,6 @@
#include "aac.h"
#include "aacenc.h"
#include "aactab.h"
-#include "libavutil/libm.h"
/** bits needed to code codebook run value for long windows */
static const uint8_t run_value_bits_long[64] = {
diff --git a/libavcodec/aacdec.c b/libavcodec/aacdec.c
index 76b14a194c..48c0367b06 100644
--- a/libavcodec/aacdec.c
+++ b/libavcodec/aacdec.c
@@ -315,6 +315,10 @@ static int decode_pce(AVCodecContext *avctx, MPEG4AudioConfig *m4ac,
if (get_bits1(gb))
skip_bits(gb, 3); // mixdown_coeff_index and pseudo_surround
+ if (get_bits_left(gb) < 4 * (num_front + num_side + num_back + num_lfe + num_assoc_data + num_cc)) {
+ av_log(avctx, AV_LOG_ERROR, overread_err);
+ return -1;
+ }
decode_channel_map(new_che_pos[TYPE_CPE], new_che_pos[TYPE_SCE], AAC_CHANNEL_FRONT, gb, num_front);
decode_channel_map(new_che_pos[TYPE_CPE], new_che_pos[TYPE_SCE], AAC_CHANNEL_SIDE, gb, num_side );
decode_channel_map(new_che_pos[TYPE_CPE], new_che_pos[TYPE_SCE], AAC_CHANNEL_BACK, gb, num_back );
diff --git a/libavcodec/aacenc.c b/libavcodec/aacenc.c
index 8843cbdb59..b51fccded3 100644
--- a/libavcodec/aacenc.c
+++ b/libavcodec/aacenc.c
@@ -489,7 +489,7 @@ static int aac_encode_frame(AVCodecContext *avctx,
AACEncContext *s = avctx->priv_data;
int16_t *samples = s->samples, *samples2, *la;
ChannelElement *cpe;
- int i, j, chans, tag, start_ch;
+ int i, ch, w, chans, tag, start_ch;
const uint8_t *chan_map = aac_chan_configs[avctx->channels-1];
int chan_el_counter[4];
FFPsyWindowInfo windows[AAC_MAX_CHANNELS];
@@ -524,34 +524,33 @@ static int aac_encode_frame(AVCodecContext *avctx,
tag = chan_map[i+1];
chans = tag == TYPE_CPE ? 2 : 1;
cpe = &s->cpe[i];
- for (j = 0; j < chans; j++) {
- IndividualChannelStream *ics = &cpe->ch[j].ics;
- int k;
- int cur_channel = start_ch + j;
+ for (ch = 0; ch < chans; ch++) {
+ IndividualChannelStream *ics = &cpe->ch[ch].ics;
+ int cur_channel = start_ch + ch;
samples2 = samples + cur_channel;
la = samples2 + (448+64) * avctx->channels;
if (!data)
la = NULL;
if (tag == TYPE_LFE) {
- wi[j].window_type[0] = ONLY_LONG_SEQUENCE;
- wi[j].window_shape = 0;
- wi[j].num_windows = 1;
- wi[j].grouping[0] = 1;
+ wi[ch].window_type[0] = ONLY_LONG_SEQUENCE;
+ wi[ch].window_shape = 0;
+ wi[ch].num_windows = 1;
+ wi[ch].grouping[0] = 1;
} else {
- wi[j] = ff_psy_suggest_window(&s->psy, samples2, la, cur_channel,
+ wi[ch] = s->psy.model->window(&s->psy, samples2, la, cur_channel,
ics->window_sequence[0]);
}
ics->window_sequence[1] = ics->window_sequence[0];
- ics->window_sequence[0] = wi[j].window_type[0];
+ ics->window_sequence[0] = wi[ch].window_type[0];
ics->use_kb_window[1] = ics->use_kb_window[0];
- ics->use_kb_window[0] = wi[j].window_shape;
- ics->num_windows = wi[j].num_windows;
+ ics->use_kb_window[0] = wi[ch].window_shape;
+ ics->num_windows = wi[ch].num_windows;
ics->swb_sizes = s->psy.bands [ics->num_windows == 8];
ics->num_swb = tag == TYPE_LFE ? 12 : s->psy.num_bands[ics->num_windows == 8];
- for (k = 0; k < ics->num_windows; k++)
- ics->group_len[k] = wi[j].grouping[k];
+ for (w = 0; w < ics->num_windows; w++)
+ ics->group_len[w] = wi[ch].grouping[w];
- apply_window_and_mdct(avctx, s, &cpe->ch[j], samples2);
+ apply_window_and_mdct(avctx, s, &cpe->ch[ch], samples2);
}
start_ch += chans;
}
@@ -569,10 +568,10 @@ static int aac_encode_frame(AVCodecContext *avctx,
cpe = &s->cpe[i];
put_bits(&s->pb, 3, tag);
put_bits(&s->pb, 4, chan_el_counter[tag]++);
- for (j = 0; j < chans; j++) {
- s->cur_channel = start_ch + j;
- ff_psy_set_band_info(&s->psy, s->cur_channel, cpe->ch[j].coeffs, &wi[j]);
- s->coder->search_for_quantizers(avctx, s, &cpe->ch[j], s->lambda);
+ for (ch = 0; ch < chans; ch++) {
+ s->cur_channel = start_ch + ch;
+ s->psy.model->analyze(&s->psy, s->cur_channel, cpe->ch[ch].coeffs, &wi[ch]);
+ s->coder->search_for_quantizers(avctx, s, &cpe->ch[ch], s->lambda);
}
cpe->common_window = 0;
if (chans > 1
@@ -580,8 +579,8 @@ static int aac_encode_frame(AVCodecContext *avctx,
&& wi[0].window_shape == wi[1].window_shape) {
cpe->common_window = 1;
- for (j = 0; j < wi[0].num_windows; j++) {
- if (wi[0].grouping[j] != wi[1].grouping[j]) {
+ for (w = 0; w < wi[0].num_windows; w++) {
+ if (wi[0].grouping[w] != wi[1].grouping[w]) {
cpe->common_window = 0;
break;
}
@@ -598,9 +597,9 @@ static int aac_encode_frame(AVCodecContext *avctx,
encode_ms_info(&s->pb, cpe);
}
}
- for (j = 0; j < chans; j++) {
- s->cur_channel = start_ch + j;
- encode_individual_channel(avctx, s, &cpe->ch[j], cpe->common_window);
+ for (ch = 0; ch < chans; ch++) {
+ s->cur_channel = start_ch + ch;
+ encode_individual_channel(avctx, s, &cpe->ch[ch], cpe->common_window);
}
start_ch += chans;
}
diff --git a/libavcodec/aasc.c b/libavcodec/aasc.c
index 44bdcb0b63..e6f363de4c 100644
--- a/libavcodec/aasc.c
+++ b/libavcodec/aasc.c
@@ -50,8 +50,8 @@ static av_cold int aasc_decode_init(AVCodecContext *avctx)
AascContext *s = avctx->priv_data;
s->avctx = avctx;
-
avctx->pix_fmt = PIX_FMT_BGR24;
+ avcodec_get_frame_defaults(&s->frame);
return 0;
}
diff --git a/libavcodec/ac3.h b/libavcodec/ac3.h
index 6baf989394..fcb401c238 100644
--- a/libavcodec/ac3.h
+++ b/libavcodec/ac3.h
@@ -158,6 +158,7 @@ typedef struct AC3EncOptions {
/* other encoding options */
int allow_per_frame_metadata;
+ int stereo_rematrixing;
} AC3EncOptions;
diff --git a/libavcodec/ac3dec.c b/libavcodec/ac3dec.c
index 431f67dc23..b4aae2263a 100644
--- a/libavcodec/ac3dec.c
+++ b/libavcodec/ac3dec.c
@@ -1298,6 +1298,7 @@ static int ac3_decode_frame(AVCodecContext * avctx, void *data, int *data_size,
float *out_samples_flt = (float *)data;
int16_t *out_samples = (int16_t *)data;
int blk, ch, err;
+ int data_size_orig, data_size_tmp;
const uint8_t *channel_map;
const float *output[AC3_MAX_CHANNELS];
@@ -1314,6 +1315,7 @@ static int ac3_decode_frame(AVCodecContext * avctx, void *data, int *data_size,
init_get_bits(&s->gbc, buf, buf_size * 8);
/* parse the syncinfo */
+ data_size_orig = *data_size;
*data_size = 0;
err = parse_frame_header(s);
@@ -1397,6 +1399,11 @@ static int ac3_decode_frame(AVCodecContext * avctx, void *data, int *data_size,
channel_map = ff_ac3_dec_channel_map[s->output_mode & ~AC3_OUTPUT_LFEON][s->lfe_on];
for (ch = 0; ch < s->out_channels; ch++)
output[ch] = s->output[channel_map[ch]];
+ data_size_tmp = s->num_blocks * 256 * avctx->channels;
+ data_size_tmp *= avctx->sample_fmt == AV_SAMPLE_FMT_FLT ? sizeof(*out_samples_flt) : sizeof(*out_samples);
+ if (data_size_orig < data_size_tmp)
+ return -1;
+ *data_size = data_size_tmp;
for (blk = 0; blk < s->num_blocks; blk++) {
if (!err && decode_audio_block(s, blk)) {
av_log(avctx, AV_LOG_ERROR, "error decoding the audio block\n");
@@ -1410,8 +1417,6 @@ static int ac3_decode_frame(AVCodecContext * avctx, void *data, int *data_size,
out_samples += 256 * s->out_channels;
}
}
- *data_size = s->num_blocks * 256 * avctx->channels;
- *data_size *= avctx->sample_fmt == AV_SAMPLE_FMT_FLT ? sizeof(*out_samples_flt) : sizeof(*out_samples);
return FFMIN(buf_size, s->frame_size);
}
diff --git a/libavcodec/ac3dsp.c b/libavcodec/ac3dsp.c
index 0b5b501a27..4ec0f2a19c 100644
--- a/libavcodec/ac3dsp.c
+++ b/libavcodec/ac3dsp.c
@@ -164,8 +164,10 @@ static void ac3_extract_exponents_c(uint8_t *exp, int32_t *coef, int nb_coefs)
if (e >= 24) {
e = 24;
coef[i] = 0;
+ } else if (e < 0) {
+ e = 0;
+ coef[i] = av_clip(coef[i], -16777215, 16777215);
}
- av_assert2(e >= 0);
}
exp[i] = e;
}
diff --git a/libavcodec/ac3enc.c b/libavcodec/ac3enc.c
index 62a4ba161a..eb5a0f94e3 100644
--- a/libavcodec/ac3enc.c
+++ b/libavcodec/ac3enc.c
@@ -33,6 +33,7 @@
#include "libavutil/audioconvert.h"
#include "libavutil/avassert.h"
+#include "libavutil/avstring.h"
#include "libavutil/crc.h"
#include "libavutil/opt.h"
#include "avcodec.h"
@@ -52,12 +53,6 @@
/** Maximum number of exponent groups. +1 for separate DC exponent. */
#define AC3_MAX_EXP_GROUPS 85
-/* stereo rematrixing algorithms */
-#define AC3_REMATRIXING_IS_STATIC 0x1
-#define AC3_REMATRIXING_SUMS 0
-#define AC3_REMATRIXING_NONE 1
-#define AC3_REMATRIXING_ALWAYS 3
-
#if CONFIG_AC3ENC_FLOAT
#define MAC_COEF(d,a,b) ((d)+=(a)*(b))
typedef float SampleType;
@@ -137,10 +132,10 @@ typedef struct AC3EncodeContext {
int loro_surround_mix_level; ///< Lo/Ro surround mix level code
int cutoff; ///< user-specified cutoff frequency, in Hz
- int bandwidth_code[AC3_MAX_CHANNELS]; ///< bandwidth code (0 to 60) (chbwcod)
+ int bandwidth_code; ///< bandwidth code (0 to 60) (chbwcod)
int nb_coefs[AC3_MAX_CHANNELS];
- int rematrixing; ///< determines how rematrixing strategy is calculated
+ int rematrixing_enabled; ///< stereo rematrixing enabled
int num_rematrixing_bands; ///< number of rematrixing bands
/* bitrate allocation control */
@@ -202,44 +197,46 @@ static const float extmixlev_options[EXTMIXLEV_NUM_OPTIONS] = {
#if CONFIG_AC3ENC_FLOAT || !CONFIG_AC3_FLOAT_ENCODER //we need this exactly once compiled in
const AVOption ff_ac3_options[] = {
/* Metadata Options */
-{"per_frame_metadata", "Allow Changing Metadata Per-Frame", OFFSET(allow_per_frame_metadata), FF_OPT_TYPE_INT, 0, 0, 1, AC3ENC_PARAM},
+{"per_frame_metadata", "Allow Changing Metadata Per-Frame", OFFSET(allow_per_frame_metadata), FF_OPT_TYPE_INT, {.dbl = 0 }, 0, 1, AC3ENC_PARAM},
/* downmix levels */
-{"center_mixlev", "Center Mix Level", OFFSET(center_mix_level), FF_OPT_TYPE_FLOAT, LEVEL_MINUS_4POINT5DB, 0.0, 1.0, AC3ENC_PARAM},
-{"surround_mixlev", "Surround Mix Level", OFFSET(surround_mix_level), FF_OPT_TYPE_FLOAT, LEVEL_MINUS_6DB, 0.0, 1.0, AC3ENC_PARAM},
+{"center_mixlev", "Center Mix Level", OFFSET(center_mix_level), FF_OPT_TYPE_FLOAT, {.dbl = LEVEL_MINUS_4POINT5DB }, 0.0, 1.0, AC3ENC_PARAM},
+{"surround_mixlev", "Surround Mix Level", OFFSET(surround_mix_level), FF_OPT_TYPE_FLOAT, {.dbl = LEVEL_MINUS_6DB }, 0.0, 1.0, AC3ENC_PARAM},
/* audio production information */
-{"mixing_level", "Mixing Level", OFFSET(mixing_level), FF_OPT_TYPE_INT, -1, -1, 111, AC3ENC_PARAM},
-{"room_type", "Room Type", OFFSET(room_type), FF_OPT_TYPE_INT, -1, -1, 2, AC3ENC_PARAM, "room_type"},
- {"notindicated", "Not Indicated (default)", 0, FF_OPT_TYPE_CONST, 0, INT_MIN, INT_MAX, AC3ENC_PARAM, "room_type"},
- {"large", "Large Room", 0, FF_OPT_TYPE_CONST, 1, INT_MIN, INT_MAX, AC3ENC_PARAM, "room_type"},
- {"small", "Small Room", 0, FF_OPT_TYPE_CONST, 2, INT_MIN, INT_MAX, AC3ENC_PARAM, "room_type"},
+{"mixing_level", "Mixing Level", OFFSET(mixing_level), FF_OPT_TYPE_INT, {.dbl = -1 }, -1, 111, AC3ENC_PARAM},
+{"room_type", "Room Type", OFFSET(room_type), FF_OPT_TYPE_INT, {.dbl = -1 }, -1, 2, AC3ENC_PARAM, "room_type"},
+ {"notindicated", "Not Indicated (default)", 0, FF_OPT_TYPE_CONST, {.dbl = 0 }, INT_MIN, INT_MAX, AC3ENC_PARAM, "room_type"},
+ {"large", "Large Room", 0, FF_OPT_TYPE_CONST, {.dbl = 1 }, INT_MIN, INT_MAX, AC3ENC_PARAM, "room_type"},
+ {"small", "Small Room", 0, FF_OPT_TYPE_CONST, {.dbl = 2 }, INT_MIN, INT_MAX, AC3ENC_PARAM, "room_type"},
/* other metadata options */
-{"copyright", "Copyright Bit", OFFSET(copyright), FF_OPT_TYPE_INT, 0, 0, 1, AC3ENC_PARAM},
-{"dialnorm", "Dialogue Level (dB)", OFFSET(dialogue_level), FF_OPT_TYPE_INT, -31, -31, -1, AC3ENC_PARAM},
-{"dsur_mode", "Dolby Surround Mode", OFFSET(dolby_surround_mode), FF_OPT_TYPE_INT, 0, 0, 2, AC3ENC_PARAM, "dsur_mode"},
- {"notindicated", "Not Indicated (default)", 0, FF_OPT_TYPE_CONST, 0, INT_MIN, INT_MAX, AC3ENC_PARAM, "dsur_mode"},
- {"on", "Dolby Surround Encoded", 0, FF_OPT_TYPE_CONST, 1, INT_MIN, INT_MAX, AC3ENC_PARAM, "dsur_mode"},
- {"off", "Not Dolby Surround Encoded", 0, FF_OPT_TYPE_CONST, 2, INT_MIN, INT_MAX, AC3ENC_PARAM, "dsur_mode"},
-{"original", "Original Bit Stream", OFFSET(original), FF_OPT_TYPE_INT, 1, 0, 1, AC3ENC_PARAM},
+{"copyright", "Copyright Bit", OFFSET(copyright), FF_OPT_TYPE_INT, {.dbl = 0 }, 0, 1, AC3ENC_PARAM},
+{"dialnorm", "Dialogue Level (dB)", OFFSET(dialogue_level), FF_OPT_TYPE_INT, {.dbl = -31 }, -31, -1, AC3ENC_PARAM},
+{"dsur_mode", "Dolby Surround Mode", OFFSET(dolby_surround_mode), FF_OPT_TYPE_INT, {.dbl = 0 }, 0, 2, AC3ENC_PARAM, "dsur_mode"},
+ {"notindicated", "Not Indicated (default)", 0, FF_OPT_TYPE_CONST, {.dbl = 0 }, INT_MIN, INT_MAX, AC3ENC_PARAM, "dsur_mode"},
+ {"on", "Dolby Surround Encoded", 0, FF_OPT_TYPE_CONST, {.dbl = 1 }, INT_MIN, INT_MAX, AC3ENC_PARAM, "dsur_mode"},
+ {"off", "Not Dolby Surround Encoded", 0, FF_OPT_TYPE_CONST, {.dbl = 2 }, INT_MIN, INT_MAX, AC3ENC_PARAM, "dsur_mode"},
+{"original", "Original Bit Stream", OFFSET(original), FF_OPT_TYPE_INT, {.dbl = 1 }, 0, 1, AC3ENC_PARAM},
/* extended bitstream information */
-{"dmix_mode", "Preferred Stereo Downmix Mode", OFFSET(preferred_stereo_downmix), FF_OPT_TYPE_INT, -1, -1, 2, AC3ENC_PARAM, "dmix_mode"},
- {"notindicated", "Not Indicated (default)", 0, FF_OPT_TYPE_CONST, 0, INT_MIN, INT_MAX, AC3ENC_PARAM, "dmix_mode"},
- {"ltrt", "Lt/Rt Downmix Preferred", 0, FF_OPT_TYPE_CONST, 1, INT_MIN, INT_MAX, AC3ENC_PARAM, "dmix_mode"},
- {"loro", "Lo/Ro Downmix Preferred", 0, FF_OPT_TYPE_CONST, 2, INT_MIN, INT_MAX, AC3ENC_PARAM, "dmix_mode"},
-{"ltrt_cmixlev", "Lt/Rt Center Mix Level", OFFSET(ltrt_center_mix_level), FF_OPT_TYPE_FLOAT, -1.0, -1.0, 2.0, AC3ENC_PARAM},
-{"ltrt_surmixlev", "Lt/Rt Surround Mix Level", OFFSET(ltrt_surround_mix_level), FF_OPT_TYPE_FLOAT, -1.0, -1.0, 2.0, AC3ENC_PARAM},
-{"loro_cmixlev", "Lo/Ro Center Mix Level", OFFSET(loro_center_mix_level), FF_OPT_TYPE_FLOAT, -1.0, -1.0, 2.0, AC3ENC_PARAM},
-{"loro_surmixlev", "Lo/Ro Surround Mix Level", OFFSET(loro_surround_mix_level), FF_OPT_TYPE_FLOAT, -1.0, -1.0, 2.0, AC3ENC_PARAM},
-{"dsurex_mode", "Dolby Surround EX Mode", OFFSET(dolby_surround_ex_mode), FF_OPT_TYPE_INT, -1, -1, 2, AC3ENC_PARAM, "dsurex_mode"},
- {"notindicated", "Not Indicated (default)", 0, FF_OPT_TYPE_CONST, 0, INT_MIN, INT_MAX, AC3ENC_PARAM, "dsurex_mode"},
- {"on", "Dolby Surround EX Encoded", 0, FF_OPT_TYPE_CONST, 1, INT_MIN, INT_MAX, AC3ENC_PARAM, "dsurex_mode"},
- {"off", "Not Dolby Surround EX Encoded", 0, FF_OPT_TYPE_CONST, 2, INT_MIN, INT_MAX, AC3ENC_PARAM, "dsurex_mode"},
-{"dheadphone_mode", "Dolby Headphone Mode", OFFSET(dolby_headphone_mode), FF_OPT_TYPE_INT, -1, -1, 2, AC3ENC_PARAM, "dheadphone_mode"},
- {"notindicated", "Not Indicated (default)", 0, FF_OPT_TYPE_CONST, 0, INT_MIN, INT_MAX, AC3ENC_PARAM, "dheadphone_mode"},
- {"on", "Dolby Headphone Encoded", 0, FF_OPT_TYPE_CONST, 1, INT_MIN, INT_MAX, AC3ENC_PARAM, "dheadphone_mode"},
- {"off", "Not Dolby Headphone Encoded", 0, FF_OPT_TYPE_CONST, 2, INT_MIN, INT_MAX, AC3ENC_PARAM, "dheadphone_mode"},
-{"ad_conv_type", "A/D Converter Type", OFFSET(ad_converter_type), FF_OPT_TYPE_INT, -1, -1, 1, AC3ENC_PARAM, "ad_conv_type"},
- {"standard", "Standard (default)", 0, FF_OPT_TYPE_CONST, 0, INT_MIN, INT_MAX, AC3ENC_PARAM, "ad_conv_type"},
- {"hdcd", "HDCD", 0, FF_OPT_TYPE_CONST, 1, INT_MIN, INT_MAX, AC3ENC_PARAM, "ad_conv_type"},
+{"dmix_mode", "Preferred Stereo Downmix Mode", OFFSET(preferred_stereo_downmix), FF_OPT_TYPE_INT, {.dbl = -1 }, -1, 2, AC3ENC_PARAM, "dmix_mode"},
+ {"notindicated", "Not Indicated (default)", 0, FF_OPT_TYPE_CONST, {.dbl = 0 }, INT_MIN, INT_MAX, AC3ENC_PARAM, "dmix_mode"},
+ {"ltrt", "Lt/Rt Downmix Preferred", 0, FF_OPT_TYPE_CONST, {.dbl = 1 }, INT_MIN, INT_MAX, AC3ENC_PARAM, "dmix_mode"},
+ {"loro", "Lo/Ro Downmix Preferred", 0, FF_OPT_TYPE_CONST, {.dbl = 2 }, INT_MIN, INT_MAX, AC3ENC_PARAM, "dmix_mode"},
+{"ltrt_cmixlev", "Lt/Rt Center Mix Level", OFFSET(ltrt_center_mix_level), FF_OPT_TYPE_FLOAT, {.dbl = -1.0 }, -1.0, 2.0, AC3ENC_PARAM},
+{"ltrt_surmixlev", "Lt/Rt Surround Mix Level", OFFSET(ltrt_surround_mix_level), FF_OPT_TYPE_FLOAT, {.dbl = -1.0 }, -1.0, 2.0, AC3ENC_PARAM},
+{"loro_cmixlev", "Lo/Ro Center Mix Level", OFFSET(loro_center_mix_level), FF_OPT_TYPE_FLOAT, {.dbl = -1.0 }, -1.0, 2.0, AC3ENC_PARAM},
+{"loro_surmixlev", "Lo/Ro Surround Mix Level", OFFSET(loro_surround_mix_level), FF_OPT_TYPE_FLOAT, {.dbl = -1.0 }, -1.0, 2.0, AC3ENC_PARAM},
+{"dsurex_mode", "Dolby Surround EX Mode", OFFSET(dolby_surround_ex_mode), FF_OPT_TYPE_INT, {.dbl = -1 }, -1, 2, AC3ENC_PARAM, "dsurex_mode"},
+ {"notindicated", "Not Indicated (default)", 0, FF_OPT_TYPE_CONST, {.dbl = 0 }, INT_MIN, INT_MAX, AC3ENC_PARAM, "dsurex_mode"},
+ {"on", "Dolby Surround EX Encoded", 0, FF_OPT_TYPE_CONST, {.dbl = 1 }, INT_MIN, INT_MAX, AC3ENC_PARAM, "dsurex_mode"},
+ {"off", "Not Dolby Surround EX Encoded", 0, FF_OPT_TYPE_CONST, {.dbl = 2 }, INT_MIN, INT_MAX, AC3ENC_PARAM, "dsurex_mode"},
+{"dheadphone_mode", "Dolby Headphone Mode", OFFSET(dolby_headphone_mode), FF_OPT_TYPE_INT, {.dbl = -1 }, -1, 2, AC3ENC_PARAM, "dheadphone_mode"},
+ {"notindicated", "Not Indicated (default)", 0, FF_OPT_TYPE_CONST, {.dbl = 0 }, INT_MIN, INT_MAX, AC3ENC_PARAM, "dheadphone_mode"},
+ {"on", "Dolby Headphone Encoded", 0, FF_OPT_TYPE_CONST, {.dbl = 1 }, INT_MIN, INT_MAX, AC3ENC_PARAM, "dheadphone_mode"},
+ {"off", "Not Dolby Headphone Encoded", 0, FF_OPT_TYPE_CONST, {.dbl = 2 }, INT_MIN, INT_MAX, AC3ENC_PARAM, "dheadphone_mode"},
+{"ad_conv_type", "A/D Converter Type", OFFSET(ad_converter_type), FF_OPT_TYPE_INT, {.dbl = -1 }, -1, 1, AC3ENC_PARAM, "ad_conv_type"},
+ {"standard", "Standard (default)", 0, FF_OPT_TYPE_CONST, {.dbl = 0 }, INT_MIN, INT_MAX, AC3ENC_PARAM, "ad_conv_type"},
+ {"hdcd", "HDCD", 0, FF_OPT_TYPE_CONST, {.dbl = 1 }, INT_MIN, INT_MAX, AC3ENC_PARAM, "ad_conv_type"},
+/* Other Encoding Options */
+{"stereo_rematrixing", "Stereo Rematrixing", OFFSET(stereo_rematrixing), FF_OPT_TYPE_INT, {.dbl = 1 }, 0, 1, AC3ENC_PARAM},
{NULL}
};
#endif
@@ -405,28 +402,6 @@ static void apply_mdct(AC3EncodeContext *s)
/**
- * Initialize stereo rematrixing.
- * If the strategy does not change for each frame, set the rematrixing flags.
- */
-static void rematrixing_init(AC3EncodeContext *s)
-{
- if (s->channel_mode == AC3_CHMODE_STEREO)
- s->rematrixing = AC3_REMATRIXING_SUMS;
- else
- s->rematrixing = AC3_REMATRIXING_NONE;
- /* NOTE: AC3_REMATRIXING_ALWAYS might be used in
- the future in conjunction with channel coupling. */
-
- if (s->rematrixing & AC3_REMATRIXING_IS_STATIC) {
- int flag = (s->rematrixing == AC3_REMATRIXING_ALWAYS);
- s->blocks[0].new_rematrixing_strategy = 1;
- memset(s->blocks[0].rematrixing_flags, flag,
- sizeof(s->blocks[0].rematrixing_flags));
- }
-}
-
-
-/**
* Determine rematrixing flags for each block and band.
*/
static void compute_rematrixing_strategy(AC3EncodeContext *s)
@@ -435,16 +410,18 @@ static void compute_rematrixing_strategy(AC3EncodeContext *s)
int blk, bnd, i;
AC3Block *block, *block0;
- s->num_rematrixing_bands = 4;
-
- if (s->rematrixing & AC3_REMATRIXING_IS_STATIC)
+ if (s->channel_mode != AC3_CHMODE_STEREO)
return;
+ s->num_rematrixing_bands = 4;
+
nb_coefs = FFMIN(s->nb_coefs[0], s->nb_coefs[1]);
for (blk = 0; blk < AC3_MAX_BLOCKS; blk++) {
block = &s->blocks[blk];
block->new_rematrixing_strategy = !blk;
+ if (!s->rematrixing_enabled)
+ continue;
for (bnd = 0; bnd < s->num_rematrixing_bands; bnd++) {
/* calculate calculate sum of squared coeffs for one band in one block */
int start = ff_ac3_rematrix_band_tab[bnd];
@@ -488,7 +465,7 @@ static void apply_rematrixing(AC3EncodeContext *s)
int start, end;
uint8_t *flags;
- if (s->rematrixing == AC3_REMATRIXING_NONE)
+ if (!s->rematrixing_enabled)
return;
nb_coefs = FFMIN(s->nb_coefs[0], s->nb_coefs[1]);
@@ -518,11 +495,13 @@ static void apply_rematrixing(AC3EncodeContext *s)
*/
static av_cold void exponent_init(AC3EncodeContext *s)
{
- int i;
- for (i = 73; i < 256; i++) {
- exponent_group_tab[0][i] = (i - 1) / 3;
- exponent_group_tab[1][i] = (i + 2) / 6;
- exponent_group_tab[2][i] = (i + 8) / 12;
+ int expstr, i, grpsize;
+
+ for (expstr = EXP_D15-1; expstr <= EXP_D45-1; expstr++) {
+ grpsize = 3 << expstr;
+ for (i = 73; i < 256; i++) {
+ exponent_group_tab[expstr][i] = (i + grpsize - 4) / grpsize;
+ }
}
/* LFE */
exponent_group_tab[0][7] = 2;
@@ -556,55 +535,46 @@ static void extract_exponents(AC3EncodeContext *s)
/**
- * Calculate exponent strategies for all blocks in a single channel.
- */
-static void compute_exp_strategy_ch(AC3EncodeContext *s, uint8_t *exp_strategy,
- uint8_t *exp)
-{
- int blk, blk1;
- int exp_diff;
-
- /* estimate if the exponent variation & decide if they should be
- reused in the next frame */
- exp_strategy[0] = EXP_NEW;
- exp += AC3_MAX_COEFS;
- for (blk = 1; blk < AC3_MAX_BLOCKS; blk++) {
- exp_diff = s->dsp.sad[0](NULL, exp, exp - AC3_MAX_COEFS, 16, 16);
- if (exp_diff > EXP_DIFF_THRESHOLD)
- exp_strategy[blk] = EXP_NEW;
- else
- exp_strategy[blk] = EXP_REUSE;
- exp += AC3_MAX_COEFS;
- }
-
- /* now select the encoding strategy type : if exponents are often
- recoded, we use a coarse encoding */
- blk = 0;
- while (blk < AC3_MAX_BLOCKS) {
- blk1 = blk + 1;
- while (blk1 < AC3_MAX_BLOCKS && exp_strategy[blk1] == EXP_REUSE)
- blk1++;
- switch (blk1 - blk) {
- case 1: exp_strategy[blk] = EXP_D45; break;
- case 2:
- case 3: exp_strategy[blk] = EXP_D25; break;
- default: exp_strategy[blk] = EXP_D15; break;
- }
- blk = blk1;
- }
-}
-
-
-/**
* Calculate exponent strategies for all channels.
* Array arrangement is reversed to simplify the per-channel calculation.
*/
static void compute_exp_strategy(AC3EncodeContext *s)
{
- int ch, blk;
+ int ch, blk, blk1;
for (ch = 0; ch < s->fbw_channels; ch++) {
- compute_exp_strategy_ch(s, s->exp_strategy[ch], s->blocks[0].exp[ch]);
+ uint8_t *exp_strategy = s->exp_strategy[ch];
+ uint8_t *exp = s->blocks[0].exp[ch];
+ int exp_diff;
+
+ /* estimate if the exponent variation & decide if they should be
+ reused in the next frame */
+ exp_strategy[0] = EXP_NEW;
+ exp += AC3_MAX_COEFS;
+ for (blk = 1; blk < AC3_MAX_BLOCKS; blk++) {
+ exp_diff = s->dsp.sad[0](NULL, exp, exp - AC3_MAX_COEFS, 16, 16);
+ if (exp_diff > EXP_DIFF_THRESHOLD)
+ exp_strategy[blk] = EXP_NEW;
+ else
+ exp_strategy[blk] = EXP_REUSE;
+ exp += AC3_MAX_COEFS;
+ }
+
+ /* now select the encoding strategy type : if exponents are often
+ recoded, we use a coarse encoding */
+ blk = 0;
+ while (blk < AC3_MAX_BLOCKS) {
+ blk1 = blk + 1;
+ while (blk1 < AC3_MAX_BLOCKS && exp_strategy[blk1] == EXP_REUSE)
+ blk1++;
+ switch (blk1 - blk) {
+ case 1: exp_strategy[blk] = EXP_D45; break;
+ case 2:
+ case 3: exp_strategy[blk] = EXP_D25; break;
+ default: exp_strategy[blk] = EXP_D15; break;
+ }
+ blk = blk1;
+ }
}
if (s->lfe_on) {
ch = s->lfe_channel;
@@ -821,35 +791,53 @@ static void count_frame_bits_fixed(AC3EncodeContext *s)
* no auxilliary data
*/
- /* header size */
+ /* header */
frame_bits = 65;
frame_bits += frame_bits_inc[s->channel_mode];
/* audio blocks */
for (blk = 0; blk < AC3_MAX_BLOCKS; blk++) {
- frame_bits += s->fbw_channels * 2 + 2; /* blksw * c, dithflag * c, dynrnge, cplstre */
- if (s->channel_mode == AC3_CHMODE_STEREO) {
- frame_bits++; /* rematstr */
- }
- frame_bits += 2 * s->fbw_channels; /* chexpstr[2] * c */
+ /* block switch flags */
+ frame_bits += s->fbw_channels;
+
+ /* dither flags */
+ frame_bits += s->fbw_channels;
+
+ /* dynamic range */
+ frame_bits++;
+
+ /* coupling strategy */
+ frame_bits++;
+ if (!blk)
+ frame_bits++;
+
+ /* exponent strategy */
+ frame_bits += 2 * s->fbw_channels;
if (s->lfe_on)
- frame_bits++; /* lfeexpstr */
- frame_bits++; /* baie */
- frame_bits++; /* snr */
- frame_bits += 2; /* delta / skip */
+ frame_bits++;
+
+ /* bit allocation params */
+ frame_bits++;
+ if (!blk)
+ frame_bits += 2 + 2 + 2 + 2 + 3;
+
+ /* snr offsets and fast gain codes */
+ frame_bits++;
+ if (!blk)
+ frame_bits += 6 + s->channels * (4 + 3);
+
+ /* delta bit allocation */
+ frame_bits++;
+
+ /* skipped data */
+ frame_bits++;
}
- frame_bits++; /* cplinu for block 0 */
- /* bit alloc info */
- /* sdcycod[2], fdcycod[2], sgaincod[2], dbpbcod[2], floorcod[3] */
- /* csnroffset[6] */
- /* (fsnoffset[4] + fgaincod[4]) * c */
- frame_bits += 2*4 + 3 + 6 + s->channels * (4 + 3);
- /* auxdatae, crcrsv */
- frame_bits += 2;
+ /* auxiliary data */
+ frame_bits++;
/* CRC */
- frame_bits += 16;
+ frame_bits += 1 + 16;
s->frame_bits_fixed = frame_bits;
}
@@ -899,6 +887,7 @@ static void count_frame_bits(AC3EncodeContext *s)
int blk, ch;
int frame_bits = 0;
+ /* header */
if (opt->audio_production_info)
frame_bits += 7;
if (s->bitstream_id == 6) {
@@ -908,18 +897,22 @@ static void count_frame_bits(AC3EncodeContext *s)
frame_bits += 14;
}
+ /* audio blocks */
for (blk = 0; blk < AC3_MAX_BLOCKS; blk++) {
/* stereo rematrixing */
- if (s->channel_mode == AC3_CHMODE_STEREO &&
- s->blocks[blk].new_rematrixing_strategy) {
- frame_bits += s->num_rematrixing_bands;
+ if (s->channel_mode == AC3_CHMODE_STEREO) {
+ frame_bits++;
+ if (s->blocks[blk].new_rematrixing_strategy)
+ frame_bits += s->num_rematrixing_bands;
}
+ /* bandwidth codes & gain range */
for (ch = 0; ch < s->fbw_channels; ch++) {
if (s->exp_strategy[ch][blk] != EXP_REUSE)
- frame_bits += 6 + 2; /* chbwcod[6], gainrng[2] */
+ frame_bits += 6 + 2;
}
}
+
s->frame_bits = s->frame_bits_fixed + frame_bits;
}
@@ -1005,7 +998,8 @@ static int bit_alloc(AC3EncodeContext *s, int snr_offset)
reset_block_bap(s);
mantissa_bits = 0;
for (blk = 0; blk < AC3_MAX_BLOCKS; blk++) {
- AC3Block *block;
+ AC3Block *block = &s->blocks[blk];
+ AC3Block *ref_block;
// initialize grouped mantissa counts. these are set so that they are
// padded to the next whole group size when bits are counted in
// compute_mantissa_size_final
@@ -1017,14 +1011,17 @@ static int bit_alloc(AC3EncodeContext *s, int snr_offset)
blocks within a frame are the exponent values. We can take
advantage of that by reusing the bit allocation pointers
whenever we reuse exponents. */
- block = s->blocks[blk].exp_ref_block[ch];
+ ref_block = block->exp_ref_block[ch];
if (s->exp_strategy[ch][blk] != EXP_REUSE) {
- s->ac3dsp.bit_alloc_calc_bap(block->mask[ch], block->psd[ch], 0,
- s->nb_coefs[ch], snr_offset,
- s->bit_alloc.floor, ff_ac3_bap_tab,
- block->bap[ch]);
+ s->ac3dsp.bit_alloc_calc_bap(ref_block->mask[ch],
+ ref_block->psd[ch], 0,
+ s->nb_coefs[ch], snr_offset,
+ s->bit_alloc.floor, ff_ac3_bap_tab,
+ ref_block->bap[ch]);
}
- mantissa_bits += s->ac3dsp.compute_mantissa_size(mant_cnt, block->bap[ch], s->nb_coefs[ch]);
+ mantissa_bits += s->ac3dsp.compute_mantissa_size(mant_cnt,
+ ref_block->bap[ch],
+ s->nb_coefs[ch]);
}
mantissa_bits += compute_mantissa_size_final(mant_cnt);
}
@@ -1043,7 +1040,8 @@ static int cbr_bit_allocation(AC3EncodeContext *s)
int snr_offset, snr_incr;
bits_left = 8 * s->frame_size - (s->frame_bits + s->exponent_bits);
- av_assert2(bits_left >= 0);
+ if (bits_left < 0)
+ return AVERROR(EINVAL);
snr_offset = s->coarse_snr_offset << 4;
@@ -1122,27 +1120,6 @@ static int downgrade_exponents(AC3EncodeContext *s)
/**
- * Reduce the bandwidth to reduce the number of bits used for a given SNR offset.
- * This is a second fallback for when bit allocation still fails after exponents
- * have been downgraded.
- * @return non-zero if bandwidth reduction was unsuccessful
- */
-static int reduce_bandwidth(AC3EncodeContext *s, int min_bw_code)
-{
- int ch;
-
- if (s->bandwidth_code[0] > min_bw_code) {
- for (ch = 0; ch < s->fbw_channels; ch++) {
- s->bandwidth_code[ch]--;
- s->nb_coefs[ch] = s->bandwidth_code[ch] * 3 + 73;
- }
- return 0;
- }
- return -1;
-}
-
-
-/**
* Perform bit allocation search.
* Finds the SNR offset value that maximizes quality and fits in the specified
* frame size. Output is the SNR offset and a set of bit allocation pointers
@@ -1167,15 +1144,6 @@ static int compute_bit_allocation(AC3EncodeContext *s)
continue;
}
- /* fallback 2: reduce bandwidth */
- /* only do this if the user has not specified a specific cutoff
- frequency */
- if (!s->cutoff && !reduce_bandwidth(s, 0)) {
- process_exponents(s);
- ret = compute_bit_allocation(s);
- continue;
- }
-
/* fallbacks were not enough... */
break;
}
@@ -1436,7 +1404,7 @@ static void output_audio_block(AC3EncodeContext *s, int blk)
/* bandwidth */
for (ch = 0; ch < s->fbw_channels; ch++) {
if (s->exp_strategy[ch][blk] != EXP_REUSE)
- put_bits(&s->pb, 6, s->bandwidth_code[ch]);
+ put_bits(&s->pb, 6, s->bandwidth_code);
}
/* exponents */
@@ -1607,10 +1575,10 @@ static void dprint_options(AVCodecContext *avctx)
char strbuf[32];
switch (s->bitstream_id) {
- case 6: strncpy(strbuf, "AC-3 (alt syntax)", 32); break;
- case 8: strncpy(strbuf, "AC-3 (standard)", 32); break;
- case 9: strncpy(strbuf, "AC-3 (dnet half-rate)", 32); break;
- case 10: strncpy(strbuf, "AC-3 (dnet quater-rate", 32); break;
+ case 6: av_strlcpy(strbuf, "AC-3 (alt syntax)", 32); break;
+ case 8: av_strlcpy(strbuf, "AC-3 (standard)", 32); break;
+ case 9: av_strlcpy(strbuf, "AC-3 (dnet half-rate)", 32); break;
+ case 10: av_strlcpy(strbuf, "AC-3 (dnet quater-rate", 32); break;
default: snprintf(strbuf, 32, "ERROR");
}
av_dlog(avctx, "bitstream_id: %s (%d)\n", strbuf, s->bitstream_id);
@@ -1637,9 +1605,9 @@ static void dprint_options(AVCodecContext *avctx)
if (opt->audio_production_info) {
av_dlog(avctx, "mixing_level: %ddB\n", opt->mixing_level);
switch (opt->room_type) {
- case 0: strncpy(strbuf, "notindicated", 32); break;
- case 1: strncpy(strbuf, "large", 32); break;
- case 2: strncpy(strbuf, "small", 32); break;
+ case 0: av_strlcpy(strbuf, "notindicated", 32); break;
+ case 1: av_strlcpy(strbuf, "large", 32); break;
+ case 2: av_strlcpy(strbuf, "small", 32); break;
default: snprintf(strbuf, 32, "ERROR (%d)", opt->room_type);
}
av_dlog(avctx, "room_type: %s\n", strbuf);
@@ -1651,9 +1619,9 @@ static void dprint_options(AVCodecContext *avctx)
av_dlog(avctx, "dialnorm: %ddB\n", opt->dialogue_level);
if (s->channel_mode == AC3_CHMODE_STEREO) {
switch (opt->dolby_surround_mode) {
- case 0: strncpy(strbuf, "notindicated", 32); break;
- case 1: strncpy(strbuf, "on", 32); break;
- case 2: strncpy(strbuf, "off", 32); break;
+ case 0: av_strlcpy(strbuf, "notindicated", 32); break;
+ case 1: av_strlcpy(strbuf, "on", 32); break;
+ case 2: av_strlcpy(strbuf, "off", 32); break;
default: snprintf(strbuf, 32, "ERROR (%d)", opt->dolby_surround_mode);
}
av_dlog(avctx, "dsur_mode: %s\n", strbuf);
@@ -1665,9 +1633,9 @@ static void dprint_options(AVCodecContext *avctx)
if (s->bitstream_id == 6) {
if (opt->extended_bsi_1) {
switch (opt->preferred_stereo_downmix) {
- case 0: strncpy(strbuf, "notindicated", 32); break;
- case 1: strncpy(strbuf, "ltrt", 32); break;
- case 2: strncpy(strbuf, "loro", 32); break;
+ case 0: av_strlcpy(strbuf, "notindicated", 32); break;
+ case 1: av_strlcpy(strbuf, "ltrt", 32); break;
+ case 2: av_strlcpy(strbuf, "loro", 32); break;
default: snprintf(strbuf, 32, "ERROR (%d)", opt->preferred_stereo_downmix);
}
av_dlog(avctx, "dmix_mode: %s\n", strbuf);
@@ -1684,23 +1652,23 @@ static void dprint_options(AVCodecContext *avctx)
}
if (opt->extended_bsi_2) {
switch (opt->dolby_surround_ex_mode) {
- case 0: strncpy(strbuf, "notindicated", 32); break;
- case 1: strncpy(strbuf, "on", 32); break;
- case 2: strncpy(strbuf, "off", 32); break;
+ case 0: av_strlcpy(strbuf, "notindicated", 32); break;
+ case 1: av_strlcpy(strbuf, "on", 32); break;
+ case 2: av_strlcpy(strbuf, "off", 32); break;
default: snprintf(strbuf, 32, "ERROR (%d)", opt->dolby_surround_ex_mode);
}
av_dlog(avctx, "dsurex_mode: %s\n", strbuf);
switch (opt->dolby_headphone_mode) {
- case 0: strncpy(strbuf, "notindicated", 32); break;
- case 1: strncpy(strbuf, "on", 32); break;
- case 2: strncpy(strbuf, "off", 32); break;
+ case 0: av_strlcpy(strbuf, "notindicated", 32); break;
+ case 1: av_strlcpy(strbuf, "on", 32); break;
+ case 2: av_strlcpy(strbuf, "off", 32); break;
default: snprintf(strbuf, 32, "ERROR (%d)", opt->dolby_headphone_mode);
}
av_dlog(avctx, "dheadphone_mode: %s\n", strbuf);
switch (opt->ad_converter_type) {
- case 0: strncpy(strbuf, "standard", 32); break;
- case 1: strncpy(strbuf, "hdcd", 32); break;
+ case 0: av_strlcpy(strbuf, "standard", 32); break;
+ case 1: av_strlcpy(strbuf, "hdcd", 32); break;
default: snprintf(strbuf, 32, "ERROR (%d)", opt->ad_converter_type);
}
av_dlog(avctx, "ad_conv_type: %s\n", strbuf);
@@ -2062,6 +2030,9 @@ static av_cold int validate_options(AVCodecContext *avctx, AC3EncodeContext *s)
if (ret)
return ret;
+ s->rematrixing_enabled = s->options.stereo_rematrixing &&
+ (s->channel_mode == AC3_CHMODE_STEREO);
+
return 0;
}
@@ -2073,22 +2044,21 @@ static av_cold int validate_options(AVCodecContext *avctx, AC3EncodeContext *s)
*/
static av_cold void set_bandwidth(AC3EncodeContext *s)
{
- int ch, bw_code;
+ int ch;
if (s->cutoff) {
/* calculate bandwidth based on user-specified cutoff frequency */
int fbw_coeffs;
fbw_coeffs = s->cutoff * 2 * AC3_MAX_COEFS / s->sample_rate;
- bw_code = av_clip((fbw_coeffs - 73) / 3, 0, 60);
+ s->bandwidth_code = av_clip((fbw_coeffs - 73) / 3, 0, 60);
} else {
/* use default bandwidth setting */
- bw_code = ac3_bandwidth_tab[s->fbw_channels-1][s->bit_alloc.sr_code][s->frame_size_code/2];
+ s->bandwidth_code = ac3_bandwidth_tab[s->fbw_channels-1][s->bit_alloc.sr_code][s->frame_size_code/2];
}
/* set number of coefficients for each channel */
for (ch = 0; ch < s->fbw_channels; ch++) {
- s->bandwidth_code[ch] = bw_code;
- s->nb_coefs[ch] = bw_code * 3 + 73;
+ s->nb_coefs[ch] = s->bandwidth_code * 3 + 73;
}
if (s->lfe_on)
s->nb_coefs[s->lfe_channel] = 7; /* LFE channel always has 7 coefs */
@@ -2220,8 +2190,6 @@ static av_cold int ac3_encode_init(AVCodecContext *avctx)
set_bandwidth(s);
- rematrixing_init(s);
-
exponent_init(s);
bit_alloc_init(s);
diff --git a/libavcodec/adpcm.c b/libavcodec/adpcm.c
index c1ceca918a..ba312558b0 100644
--- a/libavcodec/adpcm.c
+++ b/libavcodec/adpcm.c
@@ -271,6 +271,42 @@ static inline unsigned char adpcm_ima_compress_sample(ADPCMChannelStatus *c, sho
return nibble;
}
+static inline unsigned char adpcm_ima_qt_compress_sample(ADPCMChannelStatus *c, short sample)
+{
+ int delta = sample - c->prev_sample;
+ int diff, step = step_table[c->step_index];
+ int nibble = 8*(delta < 0);
+
+ delta= abs(delta);
+ diff = delta + (step >> 3);
+
+ if (delta >= step) {
+ nibble |= 4;
+ delta -= step;
+ }
+ step >>= 1;
+ if (delta >= step) {
+ nibble |= 2;
+ delta -= step;
+ }
+ step >>= 1;
+ if (delta >= step) {
+ nibble |= 1;
+ delta -= step;
+ }
+ diff -= delta;
+
+ if (nibble & 8)
+ c->prev_sample -= diff;
+ else
+ c->prev_sample += diff;
+
+ c->prev_sample = av_clip_int16(c->prev_sample);
+ c->step_index = av_clip(c->step_index + index_table[nibble], 0, 88);
+
+ return nibble;
+}
+
static inline unsigned char adpcm_ms_compress_sample(ADPCMChannelStatus *c, short sample)
{
int predictor, nibble, bias;
@@ -604,16 +640,14 @@ static int adpcm_encode_frame(AVCodecContext *avctx,
adpcm_compress_trellis(avctx, samples+ch, buf, &c->status[ch], 64);
for(i=0; i<64; i++)
put_bits(&pb, 4, buf[i^1]);
- c->status[ch].prev_sample = c->status[ch].predictor & ~0x7F;
} else {
for (i=0; i<64; i+=2){
int t1, t2;
- t1 = adpcm_ima_compress_sample(&c->status[ch], samples[avctx->channels*(i+0)+ch]);
- t2 = adpcm_ima_compress_sample(&c->status[ch], samples[avctx->channels*(i+1)+ch]);
+ t1 = adpcm_ima_qt_compress_sample(&c->status[ch], samples[avctx->channels*(i+0)+ch]);
+ t2 = adpcm_ima_qt_compress_sample(&c->status[ch], samples[avctx->channels*(i+1)+ch]);
put_bits(&pb, 4, t2);
put_bits(&pb, 4, t1);
}
- c->status[ch].prev_sample &= ~0x7F;
}
}
@@ -808,6 +842,32 @@ static inline short adpcm_ima_expand_nibble(ADPCMChannelStatus *c, char nibble,
return (short)c->predictor;
}
+static inline int adpcm_ima_qt_expand_nibble(ADPCMChannelStatus *c, int nibble, int shift)
+{
+ int step_index;
+ int predictor;
+ int diff, step;
+
+ step = step_table[c->step_index];
+ step_index = c->step_index + index_table[nibble];
+ step_index = av_clip(step_index, 0, 88);
+
+ diff = step >> 3;
+ if (nibble & 4) diff += step;
+ if (nibble & 2) diff += step >> 1;
+ if (nibble & 1) diff += step >> 2;
+
+ if (nibble & 8)
+ predictor = c->predictor - diff;
+ else
+ predictor = c->predictor + diff;
+
+ c->predictor = av_clip_int16(predictor);
+ c->step_index = step_index;
+
+ return c->predictor;
+}
+
static inline short adpcm_ms_expand_nibble(ADPCMChannelStatus *c, char nibble)
{
int predictor;
@@ -1010,35 +1070,41 @@ static int adpcm_decode_frame(AVCodecContext *avctx,
case CODEC_ID_ADPCM_IMA_QT:
n = buf_size - 2*avctx->channels;
for (channel = 0; channel < avctx->channels; channel++) {
+ int16_t predictor;
+ int step_index;
cs = &(c->status[channel]);
/* (pppppp) (piiiiiii) */
/* Bits 15-7 are the _top_ 9 bits of the 16-bit initial predictor value */
- cs->predictor = (*src++) << 8;
- cs->predictor |= (*src & 0x80);
- cs->predictor &= 0xFF80;
-
- /* sign extension */
- if(cs->predictor & 0x8000)
- cs->predictor -= 0x10000;
-
- cs->predictor = av_clip_int16(cs->predictor);
-
- cs->step_index = (*src++) & 0x7F;
+ predictor = AV_RB16(src);
+ step_index = predictor & 0x7F;
+ predictor &= 0xFF80;
+
+ src += 2;
+
+ if (cs->step_index == step_index) {
+ int diff = (int)predictor - cs->predictor;
+ if (diff < 0)
+ diff = - diff;
+ if (diff > 0x7f)
+ goto update;
+ } else {
+ update:
+ cs->step_index = step_index;
+ cs->predictor = predictor;
+ }
if (cs->step_index > 88){
av_log(avctx, AV_LOG_ERROR, "ERROR: step_index = %i\n", cs->step_index);
cs->step_index = 88;
}
- cs->step = step_table[cs->step_index];
-
samples = (short*)data + channel;
for(m=32; n>0 && m>0; n--, m--) { /* in QuickTime, IMA is encoded by chuncks of 34 bytes (=64 samples) */
- *samples = adpcm_ima_expand_nibble(cs, src[0] & 0x0F, 3);
+ *samples = adpcm_ima_qt_expand_nibble(cs, src[0] & 0x0F, 3);
samples += avctx->channels;
- *samples = adpcm_ima_expand_nibble(cs, src[0] >> 4 , 3);
+ *samples = adpcm_ima_qt_expand_nibble(cs, src[0] >> 4 , 3);
samples += avctx->channels;
src ++;
}
diff --git a/libavcodec/alacenc.c b/libavcodec/alacenc.c
index 9d2865d51e..c3a1fdfa03 100644
--- a/libavcodec/alacenc.c
+++ b/libavcodec/alacenc.c
@@ -146,7 +146,7 @@ static void calc_predictor_params(AlacEncodeContext *s, int ch)
s->min_prediction_order,
s->max_prediction_order,
ALAC_MAX_LPC_PRECISION, coefs, shift,
- AV_LPC_TYPE_LEVINSON, 0,
+ FF_LPC_TYPE_LEVINSON, 0,
ORDER_METHOD_EST, ALAC_MAX_LPC_SHIFT, 1);
s->lpc[ch].lpc_order = opt_order;
@@ -457,7 +457,7 @@ static av_cold int alac_encode_init(AVCodecContext *avctx)
s->avctx = avctx;
ret = ff_lpc_init(&s->lpc_ctx, avctx->frame_size, s->max_prediction_order,
- AV_LPC_TYPE_LEVINSON);
+ FF_LPC_TYPE_LEVINSON);
return ret;
}
diff --git a/libavcodec/allcodecs.c b/libavcodec/allcodecs.c
index 1fa92215d5..fc74eeaf8c 100644
--- a/libavcodec/allcodecs.c
+++ b/libavcodec/allcodecs.c
@@ -158,7 +158,7 @@ void avcodec_register_all(void)
REGISTER_ENCDEC (MSMPEG4V2, msmpeg4v2);
REGISTER_ENCDEC (MSMPEG4V3, msmpeg4v3);
REGISTER_DECODER (MSRLE, msrle);
- REGISTER_DECODER (MSVIDEO1, msvideo1);
+ REGISTER_ENCDEC (MSVIDEO1, msvideo1);
REGISTER_DECODER (MSZH, mszh);
REGISTER_DECODER (MXPEG, mxpeg);
REGISTER_DECODER (NUV, nuv);
@@ -184,6 +184,7 @@ void avcodec_register_all(void)
REGISTER_ENCDEC (RV20, rv20);
REGISTER_DECODER (RV30, rv30);
REGISTER_DECODER (RV40, rv40);
+ REGISTER_DECODER (S302M, s302m);
REGISTER_ENCDEC (SGI, sgi);
REGISTER_DECODER (SMACKER, smacker);
REGISTER_DECODER (SMC, smc);
diff --git a/libavcodec/alpha/dsputil_alpha.c b/libavcodec/alpha/dsputil_alpha.c
index 96e7030e9d..6ce3f4bf15 100644
--- a/libavcodec/alpha/dsputil_alpha.c
+++ b/libavcodec/alpha/dsputil_alpha.c
@@ -270,9 +270,9 @@ static void put_pixels16_axp_asm(uint8_t *block, const uint8_t *pixels,
void dsputil_init_alpha(DSPContext* c, AVCodecContext *avctx)
{
- const int h264_high_depth = avctx->codec_id == CODEC_ID_H264 && avctx->bits_per_raw_sample > 8;
+ const int high_bit_depth = avctx->codec_id == CODEC_ID_H264 && avctx->bits_per_raw_sample > 8;
- if (!h264_high_depth) {
+ if (!high_bit_depth) {
c->put_pixels_tab[0][0] = put_pixels16_axp_asm;
c->put_pixels_tab[0][1] = put_pixels16_x2_axp;
c->put_pixels_tab[0][2] = put_pixels16_y2_axp;
diff --git a/libavcodec/alpha/simple_idct_alpha.c b/libavcodec/alpha/simple_idct_alpha.c
index 7f396bfe5f..522efd2b4d 100644
--- a/libavcodec/alpha/simple_idct_alpha.c
+++ b/libavcodec/alpha/simple_idct_alpha.c
@@ -46,7 +46,7 @@
/* 0: all entries 0, 1: only first entry nonzero, 2: otherwise */
static inline int idct_row(DCTELEM *row)
{
- int_fast32_t a0, a1, a2, a3, b0, b1, b2, b3, t;
+ int a0, a1, a2, a3, b0, b1, b2, b3, t;
uint64_t l, r, t2;
l = ldq(row);
r = ldq(row + 4);
@@ -154,7 +154,7 @@ static inline int idct_row(DCTELEM *row)
static inline void idct_col(DCTELEM *col)
{
- int_fast32_t a0, a1, a2, a3, b0, b1, b2, b3;
+ int a0, a1, a2, a3, b0, b1, b2, b3;
col[0] += (1 << (COL_SHIFT - 1)) / W4;
@@ -235,7 +235,7 @@ static inline void idct_col2(DCTELEM *col)
uint64_t l, r;
for (i = 0; i < 8; ++i) {
- int_fast32_t a0 = col[i] + (1 << (COL_SHIFT - 1)) / W4;
+ int a0 = col[i] + (1 << (COL_SHIFT - 1)) / W4;
a0 *= W4;
col[i] = a0 >> COL_SHIFT;
diff --git a/libavcodec/anm.c b/libavcodec/anm.c
index e216c08441..02244f70e1 100644
--- a/libavcodec/anm.c
+++ b/libavcodec/anm.c
@@ -44,6 +44,7 @@ static av_cold int decode_init(AVCodecContext *avctx)
if (avctx->extradata_size != 16*8 + 4*256)
return -1;
+ avcodec_get_frame_defaults(&s->frame);
s->frame.reference = 1;
buf = avctx->extradata + 16*8;
diff --git a/libavcodec/ansi.c b/libavcodec/ansi.c
index 9d6dc9d3d2..7043b7c9d9 100644
--- a/libavcodec/ansi.c
+++ b/libavcodec/ansi.c
@@ -81,6 +81,7 @@ static av_cold int decode_init(AVCodecContext *avctx)
s->fg = DEFAULT_FG_COLOR;
s->bg = DEFAULT_BG_COLOR;
+ avcodec_get_frame_defaults(&s->frame);
if (!avctx->width || !avctx->height)
avcodec_set_dimensions(avctx, 80<<3, 25<<4);
@@ -226,7 +227,7 @@ static int execute_code(AVCodecContext * avctx, int c)
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret;
}
- s->frame.pict_type = FF_I_TYPE;
+ s->frame.pict_type = AV_PICTURE_TYPE_I;
s->frame.palette_has_changed = 1;
memcpy(s->frame.data[1], ff_cga_palette, 16 * 4);
erase_screen(avctx);
@@ -323,7 +324,7 @@ static int decode_frame(AVCodecContext *avctx,
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret;
}
- s->frame.pict_type = FF_I_TYPE;
+ s->frame.pict_type = AV_PICTURE_TYPE_I;
s->frame.palette_has_changed = 1;
memcpy(s->frame.data[1], ff_cga_palette, 16 * 4);
diff --git a/libavcodec/arm/ac3dsp_armv6.S b/libavcodec/arm/ac3dsp_armv6.S
index c6ce47a2f7..2b2f2acf22 100644
--- a/libavcodec/arm/ac3dsp_armv6.S
+++ b/libavcodec/arm/ac3dsp_armv6.S
@@ -79,5 +79,5 @@ function ff_ac3_bit_alloc_calc_bap_armv6, export=1
ldr r0, [sp, #12]
mov r1, #0
mov r2, #256
- b memset
+ b X(memset)
endfunc
diff --git a/libavcodec/arm/dsputil_init_arm.c b/libavcodec/arm/dsputil_init_arm.c
index 218d162687..0351412761 100644
--- a/libavcodec/arm/dsputil_init_arm.c
+++ b/libavcodec/arm/dsputil_init_arm.c
@@ -75,7 +75,7 @@ static void simple_idct_arm_add(uint8_t *dest, int line_size, DCTELEM *block)
void dsputil_init_arm(DSPContext* c, AVCodecContext *avctx)
{
- const int h264_high_depth = avctx->codec_id == CODEC_ID_H264 && avctx->bits_per_raw_sample > 8;
+ const int high_bit_depth = avctx->codec_id == CODEC_ID_H264 && avctx->bits_per_raw_sample > 8;
ff_put_pixels_clamped = c->put_pixels_clamped;
ff_add_pixels_clamped = c->add_pixels_clamped;
@@ -97,7 +97,7 @@ void dsputil_init_arm(DSPContext* c, AVCodecContext *avctx)
c->add_pixels_clamped = ff_add_pixels_clamped_arm;
- if (!h264_high_depth) {
+ if (!high_bit_depth) {
c->put_pixels_tab[0][0] = ff_put_pixels16_arm;
c->put_pixels_tab[0][1] = ff_put_pixels16_x2_arm;
c->put_pixels_tab[0][2] = ff_put_pixels16_y2_arm;
diff --git a/libavcodec/arm/dsputil_init_armv6.c b/libavcodec/arm/dsputil_init_armv6.c
index fc0f7865f0..9acea4a1d6 100644
--- a/libavcodec/arm/dsputil_init_armv6.c
+++ b/libavcodec/arm/dsputil_init_armv6.c
@@ -72,7 +72,7 @@ int ff_pix_sum_armv6(uint8_t *pix, int line_size);
void av_cold ff_dsputil_init_armv6(DSPContext* c, AVCodecContext *avctx)
{
- const int h264_high_depth = avctx->codec_id == CODEC_ID_H264 && avctx->bits_per_raw_sample > 8;
+ const int high_bit_depth = avctx->codec_id == CODEC_ID_H264 && avctx->bits_per_raw_sample > 8;
if (!avctx->lowres && (avctx->idct_algo == FF_IDCT_AUTO ||
avctx->idct_algo == FF_IDCT_SIMPLEARMV6)) {
@@ -82,7 +82,7 @@ void av_cold ff_dsputil_init_armv6(DSPContext* c, AVCodecContext *avctx)
c->idct_permutation_type = FF_LIBMPEG2_IDCT_PERM;
}
- if (!h264_high_depth) {
+ if (!high_bit_depth) {
c->put_pixels_tab[0][0] = ff_put_pixels16_armv6;
c->put_pixels_tab[0][1] = ff_put_pixels16_x2_armv6;
c->put_pixels_tab[0][2] = ff_put_pixels16_y2_armv6;
diff --git a/libavcodec/arm/dsputil_init_neon.c b/libavcodec/arm/dsputil_init_neon.c
index 9e456f32df..6faf3dc8d0 100644
--- a/libavcodec/arm/dsputil_init_neon.c
+++ b/libavcodec/arm/dsputil_init_neon.c
@@ -173,7 +173,7 @@ void ff_apply_window_int16_neon(int16_t *dst, const int16_t *src,
void ff_dsputil_init_neon(DSPContext *c, AVCodecContext *avctx)
{
- const int h264_high_depth = avctx->codec_id == CODEC_ID_H264 && avctx->bits_per_raw_sample > 8;
+ const int high_bit_depth = avctx->codec_id == CODEC_ID_H264 && avctx->bits_per_raw_sample > 8;
if (!avctx->lowres) {
if (avctx->idct_algo == FF_IDCT_AUTO ||
@@ -192,7 +192,7 @@ void ff_dsputil_init_neon(DSPContext *c, AVCodecContext *avctx)
}
}
- if (!h264_high_depth) {
+ if (!high_bit_depth) {
c->clear_block = ff_clear_block_neon;
c->clear_blocks = ff_clear_blocks_neon;
@@ -223,7 +223,7 @@ void ff_dsputil_init_neon(DSPContext *c, AVCodecContext *avctx)
c->put_signed_pixels_clamped = ff_put_signed_pixels_clamped_neon;
if (CONFIG_H264_DECODER) {
- if (!h264_high_depth) {
+ if (!high_bit_depth) {
c->put_h264_chroma_pixels_tab[0] = ff_put_h264_chroma_mc8_neon;
c->put_h264_chroma_pixels_tab[1] = ff_put_h264_chroma_mc4_neon;
c->put_h264_chroma_pixels_tab[2] = ff_put_h264_chroma_mc2_neon;
diff --git a/libavcodec/arm/dsputil_iwmmxt.c b/libavcodec/arm/dsputil_iwmmxt.c
index 6db1837ba0..85be83148a 100644
--- a/libavcodec/arm/dsputil_iwmmxt.c
+++ b/libavcodec/arm/dsputil_iwmmxt.c
@@ -155,7 +155,7 @@ static void nop(uint8_t *block, const uint8_t *pixels, int line_size, int h)
void ff_dsputil_init_iwmmxt(DSPContext* c, AVCodecContext *avctx)
{
int mm_flags = AV_CPU_FLAG_IWMMXT; /* multimedia extension flags */
- const int h264_high_depth = avctx->codec_id == CODEC_ID_H264 && avctx->bits_per_raw_sample > 8;
+ const int high_bit_depth = avctx->codec_id == CODEC_ID_H264 && avctx->bits_per_raw_sample > 8;
if (avctx->dsp_mask) {
if (avctx->dsp_mask & AV_CPU_FLAG_FORCE)
@@ -168,7 +168,7 @@ void ff_dsputil_init_iwmmxt(DSPContext* c, AVCodecContext *avctx)
c->add_pixels_clamped = add_pixels_clamped_iwmmxt;
- if (!h264_high_depth) {
+ if (!high_bit_depth) {
c->clear_blocks = clear_blocks_iwmmxt;
c->put_pixels_tab[0][0] = put_pixels16_iwmmxt;
diff --git a/libavcodec/arm/h264pred_init_arm.c b/libavcodec/arm/h264pred_init_arm.c
index 5b11b7da88..b1d4f005e8 100644
--- a/libavcodec/arm/h264pred_init_arm.c
+++ b/libavcodec/arm/h264pred_init_arm.c
@@ -74,7 +74,7 @@ static void ff_h264_pred_init_neon(H264PredContext *h, int codec_id, const int b
h->pred16x16[PLANE_PRED8x8 ] = ff_pred16x16_plane_neon;
}
-void ff_h264_pred_init_arm(H264PredContext *h, int codec_id, const int bit_depth)
+void ff_h264_pred_init_arm(H264PredContext *h, int codec_id, int bit_depth)
{
if (HAVE_NEON) ff_h264_pred_init_neon(h, codec_id, bit_depth);
}
diff --git a/libavcodec/arm/vp8_armv6.S b/libavcodec/arm/vp8_armv6.S
index d981db814e..aee9c52608 100644
--- a/libavcodec/arm/vp8_armv6.S
+++ b/libavcodec/arm/vp8_armv6.S
@@ -180,7 +180,7 @@ function ff_decode_block_coeffs_armv6, export=1
rac_get_prob r5, r6, r7, r8, r0, r9, r10
mov r9, #8
addge r12, r12, #1
- movrel r4, ff_vp8_dct_cat_prob
+ movrel r4, X(ff_vp8_dct_cat_prob)
lsl r9, r9, r12
ldr r4, [r4, r12, lsl #2]
add r12, r9, #3
diff --git a/libavcodec/ass.c b/libavcodec/ass.c
index a23567cba4..cb0babf2c8 100644
--- a/libavcodec/ass.c
+++ b/libavcodec/ass.c
@@ -21,6 +21,7 @@
#include "avcodec.h"
#include "ass.h"
+#include "libavutil/avstring.h"
int ff_ass_subtitle_header(AVCodecContext *avctx,
const char *font, int font_size,
@@ -97,8 +98,7 @@ int ff_ass_add_rect(AVSubtitle *sub, const char *dialog,
rects[sub->num_rects]->type = SUBTITLE_ASS;
rects[sub->num_rects]->ass = av_malloc(len + dlen + 1);
strcpy (rects[sub->num_rects]->ass , header);
- strncpy(rects[sub->num_rects]->ass + len, dialog, dlen);
- rects[sub->num_rects]->ass[len+dlen] = 0;
+ av_strlcpy(rects[sub->num_rects]->ass + len, dialog, dlen + 1);
sub->num_rects++;
return dlen;
}
diff --git a/libavcodec/asv1.c b/libavcodec/asv1.c
index a0dc821367..ff0d9eff01 100644
--- a/libavcodec/asv1.c
+++ b/libavcodec/asv1.c
@@ -405,7 +405,7 @@ static int decode_frame(AVCodecContext *avctx,
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return -1;
}
- p->pict_type= FF_I_TYPE;
+ p->pict_type= AV_PICTURE_TYPE_I;
p->key_frame= 1;
av_fast_malloc(&a->bitstream_buffer, &a->bitstream_buffer_size, buf_size + FF_INPUT_BUFFER_PADDING_SIZE);
@@ -470,7 +470,7 @@ static int encode_frame(AVCodecContext *avctx, unsigned char *buf, int buf_size,
init_put_bits(&a->pb, buf, buf_size);
*p = *pict;
- p->pict_type= FF_I_TYPE;
+ p->pict_type= AV_PICTURE_TYPE_I;
p->key_frame= 1;
for(mb_y=0; mb_y<a->mb_height2; mb_y++){
diff --git a/libavcodec/aura.c b/libavcodec/aura.c
index 9041c7cb68..18024f1c08 100644
--- a/libavcodec/aura.c
+++ b/libavcodec/aura.c
@@ -39,6 +39,7 @@ static av_cold int aura_decode_init(AVCodecContext *avctx)
if (avctx->width & 0x3)
return -1;
avctx->pix_fmt = PIX_FMT_YUV422P;
+ avcodec_get_frame_defaults(&s->frame);
return 0;
}
diff --git a/libavcodec/avcodec.h b/libavcodec/avcodec.h
index 78715e4f0f..e4e6a9b23e 100644
--- a/libavcodec/avcodec.h
+++ b/libavcodec/avcodec.h
@@ -241,6 +241,7 @@ enum CodecID {
CODEC_ID_PCM_F64LE,
CODEC_ID_PCM_BLURAY,
CODEC_ID_PCM_LXF,
+ CODEC_ID_S302M,
/* various ADPCM codecs */
CODEC_ID_ADPCM_IMA_QT= 0x11000,
@@ -537,10 +538,11 @@ enum AVChromaLocation{
AVCHROMA_LOC_NB , ///< Not part of ABI
};
+#if FF_API_FLAC_GLOBAL_OPTS
/**
* LPC analysis type
*/
-enum AVLPCType {
+attribute_deprecated enum AVLPCType {
AV_LPC_TYPE_DEFAULT = -1, ///< use the codec default LPC type
AV_LPC_TYPE_NONE = 0, ///< do not use LPC prediction or use all zero coefficients
AV_LPC_TYPE_FIXED = 1, ///< fixed LPC coefficients
@@ -548,6 +550,7 @@ enum AVLPCType {
AV_LPC_TYPE_CHOLESKY = 3, ///< Cholesky factorization
AV_LPC_TYPE_NB , ///< Not part of ABI
};
+#endif
enum AVAudioServiceType {
AV_AUDIO_SERVICE_TYPE_MAIN = 0,
@@ -1025,6 +1028,36 @@ typedef struct AVPanScan{
* - decoding: set by libavcodec, read by user.\
*/\
int64_t best_effort_timestamp;\
+\
+ /**\
+ * reordered pos from the last AVPacket that has been input into the decoder\
+ * - encoding: unused\
+ * - decoding: Read by user.\
+ */\
+ int64_t pkt_pos;\
+\
+ /**\
+ * reordered sample aspect ratio for the video frame, 0/1 if unknown\unspecified
+ * - encoding: unused\
+ * - decoding: Read by user.\
+ */\
+ AVRational sample_aspect_ratio;\
+\
+ /**\
+ * width and height of the video frame\
+ * - encoding: unused\
+ * - decoding: Read by user.\
+ */\
+ int width, height;\
+\
+ /**\
+ * format of the frame, -1 if unknown or unset\
+ * It should be cast to the corresponding enum (enum PixelFormat\
+ * for video, enum AVSampleFormat for audio)\
+ * - encoding: unused\
+ * - decoding: Read by user.\
+ */\
+ int format;\
#define FF_QSCALE_TYPE_MPEG1 0
@@ -2561,7 +2594,6 @@ typedef struct AVCodecContext {
* @deprecated Deprecated in favor of lpc_type and lpc_passes.
*/
int use_lpc;
-#endif
/**
* LPC coefficient precision - used by FLAC encoder
@@ -2569,6 +2601,7 @@ typedef struct AVCodecContext {
* - decoding: unused
*/
int lpc_coeff_precision;
+#endif
/**
* - encoding: Set by user.
@@ -2582,24 +2615,35 @@ typedef struct AVCodecContext {
*/
int max_prediction_order;
+#if FF_API_FLAC_GLOBAL_OPTS
+ /**
+ * @defgroup flac_opts FLAC options
+ * @deprecated Use FLAC encoder private options instead.
+ * @{
+ */
+
/**
* search method for selecting prediction order
* - encoding: Set by user.
* - decoding: unused
*/
- int prediction_order_method;
+ attribute_deprecated int prediction_order_method;
/**
* - encoding: Set by user.
* - decoding: unused
*/
- int min_partition_order;
+ attribute_deprecated int min_partition_order;
/**
* - encoding: Set by user.
* - decoding: unused
*/
- int max_partition_order;
+ attribute_deprecated int max_partition_order;
+ /**
+ * @}
+ */
+#endif
/**
* GOP timecode frame start number, in non drop frame format
@@ -2817,19 +2861,21 @@ typedef struct AVCodecContext {
int log_level_offset;
+#if FF_API_FLAC_GLOBAL_OPTS
/**
* Determines which LPC analysis algorithm to use.
* - encoding: Set by user
* - decoding: unused
*/
- enum AVLPCType lpc_type;
+ attribute_deprecated enum AVLPCType lpc_type;
/**
* Number of passes to use for Cholesky factorization during LPC analysis
* - encoding: Set by user
* - decoding: unused
*/
- int lpc_passes;
+ attribute_deprecated int lpc_passes;
+#endif
/**
* Number of slices.
@@ -4059,7 +4105,7 @@ typedef struct AVCodecParserContext {
/*!
* Set by parser to 1 for key frames and 0 for non-key frames.
* It is initialized to -1, so if the parser doesn't set this flag,
- * old-style fallback using FF_I_TYPE picture type as key frames
+ * old-style fallback using AV_PICTURE_TYPE_I picture type as key frames
* will be used.
*/
int key_frame;
diff --git a/libavcodec/avs.c b/libavcodec/avs.c
index e1a66a9930..06fcd5cd0e 100644
--- a/libavcodec/avs.c
+++ b/libavcodec/avs.c
@@ -63,7 +63,7 @@ avs_decode_frame(AVCodecContext * avctx,
return -1;
}
p->reference = 1;
- p->pict_type = FF_P_TYPE;
+ p->pict_type = AV_PICTURE_TYPE_P;
p->key_frame = 0;
out = avs->picture.data[0];
@@ -93,7 +93,7 @@ avs_decode_frame(AVCodecContext * avctx,
switch (sub_type) {
case AVS_I_FRAME:
- p->pict_type = FF_I_TYPE;
+ p->pict_type = AV_PICTURE_TYPE_I;
p->key_frame = 1;
case AVS_P_FRAME_3X3:
vect_w = 3;
@@ -146,7 +146,9 @@ avs_decode_frame(AVCodecContext * avctx,
static av_cold int avs_decode_init(AVCodecContext * avctx)
{
+ AvsContext *const avs = avctx->priv_data;
avctx->pix_fmt = PIX_FMT_PAL8;
+ avcodec_get_frame_defaults(&avs->picture);
return 0;
}
diff --git a/libavcodec/bethsoftvideo.c b/libavcodec/bethsoftvideo.c
index 5b06e5f12e..3869b87aa2 100644
--- a/libavcodec/bethsoftvideo.c
+++ b/libavcodec/bethsoftvideo.c
@@ -39,6 +39,7 @@ typedef struct BethsoftvidContext {
static av_cold int bethsoftvid_decode_init(AVCodecContext *avctx)
{
BethsoftvidContext *vid = avctx->priv_data;
+ avcodec_get_frame_defaults(&vid->frame);
vid->frame.reference = 1;
vid->frame.buffer_hints = FF_BUFFER_HINTS_VALID |
FF_BUFFER_HINTS_PRESERVE | FF_BUFFER_HINTS_REUSABLE;
diff --git a/libavcodec/bfi.c b/libavcodec/bfi.c
index 6fc1628d2b..9e40b86e50 100644
--- a/libavcodec/bfi.c
+++ b/libavcodec/bfi.c
@@ -41,6 +41,7 @@ static av_cold int bfi_decode_init(AVCodecContext * avctx)
{
BFIContext *bfi = avctx->priv_data;
avctx->pix_fmt = PIX_FMT_PAL8;
+ avcodec_get_frame_defaults(&bfi->frame);
bfi->dst = av_mallocz(avctx->width * avctx->height);
return 0;
}
@@ -69,7 +70,7 @@ static int bfi_decode_frame(AVCodecContext * avctx, void *data,
/* Set frame parameters and palette, if necessary */
if (!avctx->frame_number) {
- bfi->frame.pict_type = FF_I_TYPE;
+ bfi->frame.pict_type = AV_PICTURE_TYPE_I;
bfi->frame.key_frame = 1;
/* Setting the palette */
if(avctx->extradata_size>768) {
@@ -89,7 +90,7 @@ static int bfi_decode_frame(AVCodecContext * avctx, void *data,
memcpy(bfi->pal, bfi->frame.data[1], sizeof(bfi->pal));
bfi->frame.palette_has_changed = 1;
} else {
- bfi->frame.pict_type = FF_P_TYPE;
+ bfi->frame.pict_type = AV_PICTURE_TYPE_P;
bfi->frame.key_frame = 0;
bfi->frame.palette_has_changed = 0;
memcpy(bfi->frame.data[1], bfi->pal, sizeof(bfi->pal));
diff --git a/libavcodec/bfin/dsputil_bfin.c b/libavcodec/bfin/dsputil_bfin.c
index 01d7ec6a44..5b94472326 100644
--- a/libavcodec/bfin/dsputil_bfin.c
+++ b/libavcodec/bfin/dsputil_bfin.c
@@ -197,14 +197,14 @@ static int bfin_pix_abs8_xy2 (void *c, uint8_t *blk1, uint8_t *blk2, int line_si
void dsputil_init_bfin( DSPContext* c, AVCodecContext *avctx )
{
- const int h264_high_depth = avctx->codec_id == CODEC_ID_H264 && avctx->bits_per_raw_sample > 8;
+ const int high_bit_depth = avctx->codec_id == CODEC_ID_H264 && avctx->bits_per_raw_sample > 8;
c->get_pixels = ff_bfin_get_pixels;
c->diff_pixels = ff_bfin_diff_pixels;
c->put_pixels_clamped = ff_bfin_put_pixels_clamped;
c->add_pixels_clamped = ff_bfin_add_pixels_clamped;
- if (!h264_high_depth)
+ if (!high_bit_depth)
c->clear_blocks = bfin_clear_blocks;
c->pix_sum = ff_bfin_pix_sum;
c->pix_norm1 = ff_bfin_pix_norm1;
@@ -231,7 +231,7 @@ void dsputil_init_bfin( DSPContext* c, AVCodecContext *avctx )
c->sse[1] = ff_bfin_sse8;
c->sse[2] = ff_bfin_sse4;
- if (!h264_high_depth) {
+ if (!high_bit_depth) {
c->put_pixels_tab[0][0] = bfin_put_pixels16;
c->put_pixels_tab[0][1] = bfin_put_pixels16_x2;
c->put_pixels_tab[0][2] = bfin_put_pixels16_y2;
diff --git a/libavcodec/bfin/mathops.h b/libavcodec/bfin/mathops.h
index a0e808c81a..50c03160ed 100644
--- a/libavcodec/bfin/mathops.h
+++ b/libavcodec/bfin/mathops.h
@@ -24,7 +24,6 @@
#include "config.h"
-#if CONFIG_MPEGAUDIO_HP
#define MULH(X,Y) ({ int xxo; \
__asm__ ( \
"a1 = %2.L * %1.L (FU);\n\t" \
@@ -34,15 +33,6 @@
"a1 = a1 >>> 16;\n\t" \
"%0 = (a0 += a1);\n\t" \
: "=d" (xxo) : "d" (X), "d" (Y) : "A0","A1"); xxo; })
-#else
-#define MULH(X,Y) ({ int xxo; \
- __asm__ ( \
- "a1 = %2.H * %1.L (IS,M);\n\t" \
- "a0 = %1.H * %2.H, a1+= %1.H * %2.L (IS,M);\n\t"\
- "a1 = a1 >>> 16;\n\t" \
- "%0 = (a0 += a1);\n\t" \
- : "=d" (xxo) : "d" (X), "d" (Y) : "A0","A1"); xxo; })
-#endif
/* signed 16x16 -> 32 multiply */
#define MUL16(a, b) ({ int xxo; \
diff --git a/libavcodec/bmp.c b/libavcodec/bmp.c
index f4a6b769c5..4c5166404b 100644
--- a/libavcodec/bmp.c
+++ b/libavcodec/bmp.c
@@ -200,7 +200,7 @@ static int bmp_decode_frame(AVCodecContext *avctx,
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return -1;
}
- p->pict_type = FF_I_TYPE;
+ p->pict_type = AV_PICTURE_TYPE_I;
p->key_frame = 1;
buf = buf0 + hsize;
diff --git a/libavcodec/bmpenc.c b/libavcodec/bmpenc.c
index b8f9047c55..3719a539f5 100644
--- a/libavcodec/bmpenc.c
+++ b/libavcodec/bmpenc.c
@@ -74,7 +74,7 @@ static int bmp_encode_frame(AVCodecContext *avctx, unsigned char *buf, int buf_s
uint8_t *ptr;
unsigned char* buf0 = buf;
*p = *pict;
- p->pict_type= FF_I_TYPE;
+ p->pict_type= AV_PICTURE_TYPE_I;
p->key_frame= 1;
switch (avctx->pix_fmt) {
case PIX_FMT_RGB565:
diff --git a/libavcodec/c93.c b/libavcodec/c93.c
index ed6e91d831..31296395f7 100644
--- a/libavcodec/c93.c
+++ b/libavcodec/c93.c
@@ -47,6 +47,10 @@ typedef enum {
static av_cold int decode_init(AVCodecContext *avctx)
{
+ C93DecoderContext * const c93 = avctx->priv_data;
+
+ avcodec_get_frame_defaults(&c93->pictures[0]);
+ avcodec_get_frame_defaults(&c93->pictures[1]);
avctx->pix_fmt = PIX_FMT_PAL8;
return 0;
}
@@ -137,10 +141,10 @@ static int decode_frame(AVCodecContext *avctx, void *data,
stride = newpic->linesize[0];
if (buf[0] & C93_FIRST_FRAME) {
- newpic->pict_type = FF_I_TYPE;
+ newpic->pict_type = AV_PICTURE_TYPE_I;
newpic->key_frame = 1;
} else {
- newpic->pict_type = FF_P_TYPE;
+ newpic->pict_type = AV_PICTURE_TYPE_P;
newpic->key_frame = 0;
}
diff --git a/libavcodec/cavs.h b/libavcodec/cavs.h
index dfa320fa84..cb4ab2630b 100644
--- a/libavcodec/cavs.h
+++ b/libavcodec/cavs.h
@@ -233,15 +233,16 @@ extern const struct dec_2dvlc ff_cavs_chroma_dec[5];
extern const uint8_t ff_cavs_chroma_qp[64];
extern const uint8_t ff_cavs_scan3x3[4];
extern const uint8_t ff_cavs_partition_flags[30];
-extern const int_fast8_t ff_left_modifier_l[8];
-extern const int_fast8_t ff_top_modifier_l[8];
-extern const int_fast8_t ff_left_modifier_c[7];
-extern const int_fast8_t ff_top_modifier_c[7];
+extern const int8_t ff_left_modifier_l[8];
+extern const int8_t ff_top_modifier_l[8];
+extern const int8_t ff_left_modifier_c[7];
+extern const int8_t ff_top_modifier_c[7];
extern const cavs_vector ff_cavs_intra_mv;
extern const cavs_vector ff_cavs_un_mv;
extern const cavs_vector ff_cavs_dir_mv;
-static inline void modify_pred(const int_fast8_t *mod_table, int *mode) {
+static inline void modify_pred(const int8_t *mod_table, int *mode)
+{
*mode = mod_table[*mode];
if(*mode < 0) {
av_log(NULL, AV_LOG_ERROR, "Illegal intra prediction mode\n");
@@ -278,7 +279,7 @@ static inline void set_mv_intra(AVSContext *h) {
set_mvs(&h->mv[MV_FWD_X0], BLK_16X16);
h->mv[MV_BWD_X0] = ff_cavs_intra_mv;
set_mvs(&h->mv[MV_BWD_X0], BLK_16X16);
- if(h->pic_type != FF_B_TYPE)
+ if(h->pic_type != AV_PICTURE_TYPE_B)
h->col_type_base[h->mbidx] = I_8X8;
}
diff --git a/libavcodec/cavsdata.h b/libavcodec/cavsdata.h
index 9e52fd5cc4..a93405d380 100644
--- a/libavcodec/cavsdata.h
+++ b/libavcodec/cavsdata.h
@@ -497,9 +497,9 @@ static const uint8_t tc_tab[64] = {
5, 5, 5, 6, 6, 6, 7, 7, 7, 7, 8, 8, 8, 9, 9, 9
};
-const int_fast8_t ff_left_modifier_l[8] = { 0,-1, 6,-1,-1, 7, 6, 7};
-const int_fast8_t ff_top_modifier_l[8] = {-1, 1, 5,-1,-1, 5, 7, 7};
-const int_fast8_t ff_left_modifier_c[7] = { 5,-1, 2,-1, 6, 5, 6};
-const int_fast8_t ff_top_modifier_c[7] = { 4, 1,-1,-1, 4, 6, 6};
+const int8_t ff_left_modifier_l[8] = { 0, -1, 6, -1, -1, 7, 6, 7 };
+const int8_t ff_top_modifier_l[8] = { -1, 1, 5, -1, -1, 5, 7, 7 };
+const int8_t ff_left_modifier_c[7] = { 5, -1, 2, -1, 6, 5, 6 };
+const int8_t ff_top_modifier_c[7] = { 4, 1, -1, -1, 4, 6, 6 };
#endif /* AVCODEC_CAVSDATA_H */
diff --git a/libavcodec/cavsdec.c b/libavcodec/cavsdec.c
index 7ff1a57eb5..c6ccb06524 100644
--- a/libavcodec/cavsdec.c
+++ b/libavcodec/cavsdec.c
@@ -220,7 +220,7 @@ static int decode_mb_i(AVSContext *h, int cbp_code) {
ff_cavs_modify_mb_i(h, &pred_mode_uv);
/* get coded block pattern */
- if(h->pic_type == FF_I_TYPE)
+ if(h->pic_type == AV_PICTURE_TYPE_I)
cbp_code = get_ue_golomb(gb);
if(cbp_code > 63){
av_log(h->s.avctx, AV_LOG_ERROR, "illegal intra cbp\n");
@@ -424,7 +424,7 @@ static inline int decode_slice_header(AVSContext *h, GetBitContext *gb) {
h->qp = get_bits(gb,6);
}
/* inter frame or second slice can have weighting params */
- if((h->pic_type != FF_I_TYPE) || (!h->pic_structure && h->mby >= h->mb_width/2))
+ if((h->pic_type != AV_PICTURE_TYPE_I) || (!h->pic_structure && h->mby >= h->mb_width/2))
if(get_bits1(gb)) { //slice_weighting_flag
av_log(h->s.avctx, AV_LOG_ERROR,
"weighted prediction not yet supported\n");
@@ -470,17 +470,17 @@ static int decode_pic(AVSContext *h) {
}
skip_bits(&s->gb,16);//bbv_dwlay
if(h->stc == PIC_PB_START_CODE) {
- h->pic_type = get_bits(&s->gb,2) + FF_I_TYPE;
- if(h->pic_type > FF_B_TYPE) {
+ h->pic_type = get_bits(&s->gb,2) + AV_PICTURE_TYPE_I;
+ if(h->pic_type > AV_PICTURE_TYPE_B) {
av_log(s->avctx, AV_LOG_ERROR, "illegal picture type\n");
return -1;
}
/* make sure we have the reference frames we need */
if(!h->DPB[0].data[0] ||
- (!h->DPB[1].data[0] && h->pic_type == FF_B_TYPE))
+ (!h->DPB[1].data[0] && h->pic_type == AV_PICTURE_TYPE_B))
return -1;
} else {
- h->pic_type = FF_I_TYPE;
+ h->pic_type = AV_PICTURE_TYPE_I;
if(get_bits1(&s->gb))
skip_bits(&s->gb,24);//time_code
/* old sample clips were all progressive and no low_delay,
@@ -502,7 +502,7 @@ static int decode_pic(AVSContext *h) {
h->picture.poc = get_bits(&s->gb,8)*2;
/* get temporal distances and MV scaling factors */
- if(h->pic_type != FF_B_TYPE) {
+ if(h->pic_type != AV_PICTURE_TYPE_B) {
h->dist[0] = (h->picture.poc - h->DPB[0].poc + 512) % 512;
} else {
h->dist[0] = (h->DPB[0].poc - h->picture.poc + 512) % 512;
@@ -510,7 +510,7 @@ static int decode_pic(AVSContext *h) {
h->dist[1] = (h->picture.poc - h->DPB[1].poc + 512) % 512;
h->scale_den[0] = h->dist[0] ? 512/h->dist[0] : 0;
h->scale_den[1] = h->dist[1] ? 512/h->dist[1] : 0;
- if(h->pic_type == FF_B_TYPE) {
+ if(h->pic_type == AV_PICTURE_TYPE_B) {
h->sym_factor = h->dist[0]*h->scale_den[1];
} else {
h->direct_den[0] = h->dist[0] ? 16384/h->dist[0] : 0;
@@ -529,12 +529,12 @@ static int decode_pic(AVSContext *h) {
skip_bits1(&s->gb); //repeat_first_field
h->qp_fixed = get_bits1(&s->gb);
h->qp = get_bits(&s->gb,6);
- if(h->pic_type == FF_I_TYPE) {
+ if(h->pic_type == AV_PICTURE_TYPE_I) {
if(!h->progressive && !h->pic_structure)
skip_bits1(&s->gb);//what is this?
skip_bits(&s->gb,4); //reserved bits
} else {
- if(!(h->pic_type == FF_B_TYPE && h->pic_structure == 1))
+ if(!(h->pic_type == AV_PICTURE_TYPE_B && h->pic_structure == 1))
h->ref_flag = get_bits1(&s->gb);
skip_bits(&s->gb,4); //reserved bits
h->skip_mode_flag = get_bits1(&s->gb);
@@ -546,12 +546,12 @@ static int decode_pic(AVSContext *h) {
} else {
h->alpha_offset = h->beta_offset = 0;
}
- if(h->pic_type == FF_I_TYPE) {
+ if(h->pic_type == AV_PICTURE_TYPE_I) {
do {
check_for_slice(h);
decode_mb_i(h, 0);
} while(ff_cavs_next_mb(h));
- } else if(h->pic_type == FF_P_TYPE) {
+ } else if(h->pic_type == AV_PICTURE_TYPE_P) {
do {
if(check_for_slice(h))
skip_count = -1;
@@ -567,7 +567,7 @@ static int decode_pic(AVSContext *h) {
decode_mb_p(h,mb_type);
}
} while(ff_cavs_next_mb(h));
- } else { /* FF_B_TYPE */
+ } else { /* AV_PICTURE_TYPE_B */
do {
if(check_for_slice(h))
skip_count = -1;
@@ -584,7 +584,7 @@ static int decode_pic(AVSContext *h) {
}
} while(ff_cavs_next_mb(h));
}
- if(h->pic_type != FF_B_TYPE) {
+ if(h->pic_type != AV_PICTURE_TYPE_B) {
if(h->DPB[1].data[0])
s->avctx->release_buffer(s->avctx, (AVFrame *)&h->DPB[1]);
h->DPB[1] = h->DPB[0];
@@ -684,7 +684,7 @@ static int cavs_decode_frame(AVCodecContext * avctx,void *data, int *data_size,
if(decode_pic(h))
break;
*data_size = sizeof(AVPicture);
- if(h->pic_type != FF_B_TYPE) {
+ if(h->pic_type != AV_PICTURE_TYPE_B) {
if(h->DPB[1].data[0]) {
*picture = *(AVFrame *) &h->DPB[1];
} else {
diff --git a/libavcodec/cinepak.c b/libavcodec/cinepak.c
index 52fde64813..f2cbdc406e 100644
--- a/libavcodec/cinepak.c
+++ b/libavcodec/cinepak.c
@@ -403,6 +403,7 @@ static av_cold int cinepak_decode_init(AVCodecContext *avctx)
avctx->pix_fmt = PIX_FMT_PAL8;
}
+ avcodec_get_frame_defaults(&s->frame);
s->frame.data[0] = NULL;
return 0;
diff --git a/libavcodec/cljr.c b/libavcodec/cljr.c
index 36b6cbbb94..c9b0911674 100644
--- a/libavcodec/cljr.c
+++ b/libavcodec/cljr.c
@@ -64,7 +64,7 @@ static int decode_frame(AVCodecContext *avctx,
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return -1;
}
- p->pict_type= FF_I_TYPE;
+ p->pict_type= AV_PICTURE_TYPE_I;
p->key_frame= 1;
init_get_bits(&a->gb, buf, buf_size);
@@ -100,7 +100,7 @@ static int encode_frame(AVCodecContext *avctx, unsigned char *buf, int buf_size,
int size;
*p = *pict;
- p->pict_type= FF_I_TYPE;
+ p->pict_type= AV_PICTURE_TYPE_I;
p->key_frame= 1;
emms_c();
@@ -118,6 +118,7 @@ static int encode_frame(AVCodecContext *avctx, unsigned char *buf, int buf_size,
static av_cold void common_init(AVCodecContext *avctx){
CLJRContext * const a = avctx->priv_data;
+ avcodec_get_frame_defaults(&a->picture);
avctx->coded_frame= (AVFrame*)&a->picture;
a->avctx= avctx;
}
diff --git a/libavcodec/cscd.c b/libavcodec/cscd.c
index 82a44defc7..9255503e05 100644
--- a/libavcodec/cscd.c
+++ b/libavcodec/cscd.c
@@ -183,7 +183,7 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *data_size,
// flip upside down, add difference frame
if (buf[0] & 1) { // keyframe
- c->pic.pict_type = FF_I_TYPE;
+ c->pic.pict_type = AV_PICTURE_TYPE_I;
c->pic.key_frame = 1;
switch (c->bpp) {
case 16:
@@ -197,7 +197,7 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *data_size,
c->linelen, c->height);
}
} else {
- c->pic.pict_type = FF_P_TYPE;
+ c->pic.pict_type = AV_PICTURE_TYPE_P;
c->pic.key_frame = 0;
switch (c->bpp) {
case 16:
@@ -231,6 +231,7 @@ static av_cold int decode_init(AVCodecContext *avctx) {
return 1;
}
c->bpp = avctx->bits_per_coded_sample;
+ avcodec_get_frame_defaults(&c->pic);
c->pic.data[0] = NULL;
c->linelen = avctx->width * avctx->bits_per_coded_sample / 8;
c->height = avctx->height;
diff --git a/libavcodec/cyuv.c b/libavcodec/cyuv.c
index 79a84412e7..1c665aefc8 100644
--- a/libavcodec/cyuv.c
+++ b/libavcodec/cyuv.c
@@ -53,6 +53,7 @@ static av_cold int cyuv_decode_init(AVCodecContext *avctx)
return -1;
s->height = avctx->height;
avctx->pix_fmt = PIX_FMT_YUV411P;
+ avcodec_get_frame_defaults(&s->frame);
return 0;
}
diff --git a/libavcodec/dca.c b/libavcodec/dca.c
index 1e26eedc3c..7a35631eea 100644
--- a/libavcodec/dca.c
+++ b/libavcodec/dca.c
@@ -1622,6 +1622,7 @@ static int dca_decode_frame(AVCodecContext * avctx,
{
const uint8_t *buf = avpkt->data;
int buf_size = avpkt->size;
+ int data_size_tmp;
int lfe_samples;
int num_core_channels = 0;
@@ -1792,6 +1793,10 @@ static int dca_decode_frame(AVCodecContext * avctx,
s->output = DCA_STEREO;
avctx->channel_layout = AV_CH_LAYOUT_STEREO;
}
+ else if (avctx->request_channel_layout & AV_CH_LAYOUT_NATIVE) {
+ static const int8_t dca_channel_order_native[9] = { 0, 1, 2, 3, 4, 5, 6, 7, 8 };
+ s->channel_order_tab = dca_channel_order_native;
+ }
} else {
av_log(avctx, AV_LOG_ERROR, "Non standard configuration %d !\n",s->amode);
return -1;
@@ -1813,10 +1818,11 @@ static int dca_decode_frame(AVCodecContext * avctx,
return -1;
}
- /* ffdshow custom code */
- if (*data_size < (s->sample_blocks / 8) * 256 * sizeof(samples[0]) * channels)
+ data_size_tmp = (s->sample_blocks / 8) * 256 * channels;
+ data_size_tmp *= avctx->sample_fmt == AV_SAMPLE_FMT_FLT ? sizeof(*samples_flt) : sizeof(*samples);
+ if (*data_size < data_size_tmp)
return -1;
- *data_size = 256 / 8 * s->sample_blocks * sizeof(samples[0]) * channels;
+ *data_size = data_size_tmp;
/* filter to get final output */
for (i = 0; i < (s->sample_blocks / 8); i++) {
diff --git a/libavcodec/dct-test.c b/libavcodec/dct-test.c
index f8e981be9b..2abb05bd3b 100644
--- a/libavcodec/dct-test.c
+++ b/libavcodec/dct-test.c
@@ -195,7 +195,7 @@ static inline void mmx_emms(void)
static void dct_error(const char *name, int is_idct,
void (*fdct_func)(DCTELEM *block),
- void (*fdct_ref)(DCTELEM *block), int form, int test)
+ void (*fdct_ref)(DCTELEM *block), int form, int test, const int bits)
{
int it, i, scale;
int err_inf, v;
@@ -204,6 +204,7 @@ static void dct_error(const char *name, int is_idct,
int maxout=0;
int blockSumErrMax=0, blockSumErr;
AVLFG prng;
+ const int vals=1<<bits;
av_lfg_init(&prng, 1);
@@ -216,7 +217,7 @@ static void dct_error(const char *name, int is_idct,
switch(test){
case 0:
for(i=0;i<64;i++)
- block1[i] = (av_lfg_get(&prng) % 512) -256;
+ block1[i] = (av_lfg_get(&prng) % (2*vals)) -vals;
if (is_idct){
ff_ref_fdct(block1);
@@ -227,10 +228,10 @@ static void dct_error(const char *name, int is_idct,
case 1:{
int num = av_lfg_get(&prng) % 10 + 1;
for(i=0;i<num;i++)
- block1[av_lfg_get(&prng) % 64] = av_lfg_get(&prng) % 512 -256;
+ block1[av_lfg_get(&prng) % 64] = av_lfg_get(&prng) % (2*vals) -vals;
}break;
case 2:
- block1[0] = av_lfg_get(&prng) % 4096 - 2048;
+ block1[0] = av_lfg_get(&prng) % (16*vals) - (8*vals);
block1[63]= (block1[0]&1)^1;
break;
}
@@ -328,7 +329,7 @@ static void dct_error(const char *name, int is_idct,
switch(test){
case 0:
for(i=0;i<64;i++)
- block1[i] = av_lfg_get(&prng) % 512 -256;
+ block1[i] = av_lfg_get(&prng) % (2*vals) -vals;
if (is_idct){
ff_ref_fdct(block1);
@@ -338,10 +339,10 @@ static void dct_error(const char *name, int is_idct,
break;
case 1:{
case 2:
- block1[0] = av_lfg_get(&prng) % 512 -256;
- block1[1] = av_lfg_get(&prng) % 512 -256;
- block1[2] = av_lfg_get(&prng) % 512 -256;
- block1[3] = av_lfg_get(&prng) % 512 -256;
+ block1[0] = av_lfg_get(&prng) % (2*vals) -vals;
+ block1[1] = av_lfg_get(&prng) % (2*vals) -vals;
+ block1[2] = av_lfg_get(&prng) % (2*vals) -vals;
+ block1[3] = av_lfg_get(&prng) % (2*vals) -vals;
}break;
}
@@ -552,6 +553,7 @@ int main(int argc, char **argv)
int test_idct = 0, test_248_dct = 0;
int c,i;
int test=1;
+ int bits=8;
cpu_flags = av_get_cpu_flags();
ff_ref_dct_init();
@@ -582,6 +584,7 @@ int main(int argc, char **argv)
}
if(optind <argc) test= atoi(argv[optind]);
+ if(optind+1 < argc) bits= atoi(argv[optind+1]);
printf("ffmpeg DCT/IDCT test\n");
@@ -590,7 +593,7 @@ int main(int argc, char **argv)
} else {
for (i=0;algos[i].name;i++)
if (algos[i].is_idct == test_idct && !(~cpu_flags & algos[i].mm_support)) {
- dct_error (algos[i].name, algos[i].is_idct, algos[i].func, algos[i].ref, algos[i].format, test);
+ dct_error (algos[i].name, algos[i].is_idct, algos[i].func, algos[i].ref, algos[i].format, test, bits);
}
}
return 0;
diff --git a/libavcodec/dfa.c b/libavcodec/dfa.c
index 07d9d69328..598fedc980 100644
--- a/libavcodec/dfa.c
+++ b/libavcodec/dfa.c
@@ -186,7 +186,6 @@ static int decode_dds1(uint8_t *frame, int width, int height,
static int decode_bdlt(uint8_t *frame, int width, int height,
const uint8_t *src, const uint8_t *src_end)
{
- const uint8_t *frame_end = frame + width * height;
uint8_t *line_ptr;
int count, lines, segments;
diff --git a/libavcodec/dirac_parser.c b/libavcodec/dirac_parser.c
index 0a1135542f..b407168b41 100644
--- a/libavcodec/dirac_parser.c
+++ b/libavcodec/dirac_parser.c
@@ -194,7 +194,7 @@ static int dirac_combine_frame(AVCodecParserContext *s, AVCodecContext *avctx,
avctx->has_b_frames = 1;
}
if (avctx->has_b_frames && s->pts == s->dts)
- s->pict_type = FF_B_TYPE;
+ s->pict_type = AV_PICTURE_TYPE_B;
/* Finally have a complete Dirac data unit */
*buf = pc->dirac_unit;
diff --git a/libavcodec/dnxhddec.c b/libavcodec/dnxhddec.c
index 9b37d1d632..8cbe1a8111 100644
--- a/libavcodec/dnxhddec.c
+++ b/libavcodec/dnxhddec.c
@@ -55,7 +55,8 @@ static av_cold int dnxhd_decode_init(AVCodecContext *avctx)
ctx->avctx = avctx;
dsputil_init(&ctx->dsp, avctx);
avctx->coded_frame = &ctx->picture;
- ctx->picture.type = FF_I_TYPE;
+ avcodec_get_frame_defaults(&ctx->picture);
+ ctx->picture.type = AV_PICTURE_TYPE_I;
ctx->picture.key_frame = 1;
return 0;
}
diff --git a/libavcodec/dnxhdenc.c b/libavcodec/dnxhdenc.c
index 6d1a17fd0a..62bc9f0dd5 100644
--- a/libavcodec/dnxhdenc.c
+++ b/libavcodec/dnxhdenc.c
@@ -33,7 +33,7 @@
#define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
static const AVOption options[]={
- {"nitris_compat", "encode with Avid Nitris compatibility", offsetof(DNXHDEncContext, nitris_compat), FF_OPT_TYPE_INT, 0, 0, 1, VE},
+ {"nitris_compat", "encode with Avid Nitris compatibility", offsetof(DNXHDEncContext, nitris_compat), FF_OPT_TYPE_INT, {.dbl = 0}, 0, 1, VE},
{NULL}
};
static const AVClass class = { "dnxhd", av_default_item_name, options, LIBAVUTIL_VERSION_INT };
@@ -222,7 +222,7 @@ static int dnxhd_encode_init(AVCodecContext *avctx)
FF_ALLOCZ_OR_GOTO(ctx->m.avctx, ctx->mb_qscale, ctx->m.mb_num *sizeof(uint8_t) , fail);
ctx->frame.key_frame = 1;
- ctx->frame.pict_type = FF_I_TYPE;
+ ctx->frame.pict_type = AV_PICTURE_TYPE_I;
ctx->m.avctx->coded_frame = &ctx->frame;
if (avctx->thread_count > MAX_THREADS) {
diff --git a/libavcodec/dpxenc.c b/libavcodec/dpxenc.c
index 1d637b4e81..f69cfdc94e 100644
--- a/libavcodec/dpxenc.c
+++ b/libavcodec/dpxenc.c
@@ -35,7 +35,7 @@ static av_cold int encode_init(AVCodecContext *avctx)
DPXContext *s = avctx->priv_data;
avctx->coded_frame = &s->picture;
- avctx->coded_frame->pict_type = FF_I_TYPE;
+ avctx->coded_frame->pict_type = AV_PICTURE_TYPE_I;
avctx->coded_frame->key_frame = 1;
s->big_endian = 1;
@@ -136,7 +136,7 @@ static int encode_frame(AVCodecContext *avctx, unsigned char *buf, int buf_size,
switch(s->bits_per_component) {
case 8:
case 16:
- size = avpicture_layout((AVPicture*)data, avctx->pix_fmt,
+ size = avpicture_layout(data, avctx->pix_fmt,
avctx->width, avctx->height,
buf + HEADER_SIZE, buf_size - HEADER_SIZE);
if (size < 0)
@@ -146,7 +146,7 @@ static int encode_frame(AVCodecContext *avctx, unsigned char *buf, int buf_size,
size = avctx->height * avctx->width * 4;
if (buf_size < HEADER_SIZE + size)
return -1;
- encode_rgb48_10bit(avctx, (AVPicture*)data, buf + HEADER_SIZE);
+ encode_rgb48_10bit(avctx, data, buf + HEADER_SIZE);
break;
default:
av_log(avctx, AV_LOG_ERROR, "Unsupported bit depth: %d\n", s->bits_per_component);
@@ -160,13 +160,13 @@ static int encode_frame(AVCodecContext *avctx, unsigned char *buf, int buf_size,
}
AVCodec ff_dpx_encoder = {
- "dpx",
- AVMEDIA_TYPE_VIDEO,
- CODEC_ID_DPX,
- sizeof(DPXContext),
- encode_init,
- encode_frame,
- .pix_fmts= (const enum PixelFormat[]){
+ .name = "dpx",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .id = CODEC_ID_DPX,
+ .priv_data_size = sizeof(DPXContext),
+ .init = encode_init,
+ .encode = encode_frame,
+ .pix_fmts = (const enum PixelFormat[]){
PIX_FMT_RGB24,
PIX_FMT_RGBA,
PIX_FMT_RGB48LE,
diff --git a/libavcodec/dsicinav.c b/libavcodec/dsicinav.c
index b4e0579a7b..f12560714a 100644
--- a/libavcodec/dsicinav.c
+++ b/libavcodec/dsicinav.c
@@ -94,6 +94,7 @@ static av_cold int cinvideo_decode_init(AVCodecContext *avctx)
cin->avctx = avctx;
avctx->pix_fmt = PIX_FMT_PAL8;
+ avcodec_get_frame_defaults(&cin->frame);
cin->frame.data[0] = NULL;
cin->bitmap_size = avctx->width * avctx->height;
diff --git a/libavcodec/dsputil.c b/libavcodec/dsputil.c
index 162458a7b5..0e596b1b01 100644
--- a/libavcodec/dsputil.c
+++ b/libavcodec/dsputil.c
@@ -43,15 +43,15 @@ uint8_t ff_cropTbl[256 + 2 * MAX_NEG_CROP] = {0, };
uint32_t ff_squareTbl[512] = {0, };
#define BIT_DEPTH 9
-#include "dsputil_internal.h"
+#include "dsputil_template.c"
#undef BIT_DEPTH
#define BIT_DEPTH 10
-#include "dsputil_internal.h"
+#include "dsputil_template.c"
#undef BIT_DEPTH
#define BIT_DEPTH 8
-#include "dsputil_internal.h"
+#include "dsputil_template.c"
// 0x7f7f7f7f or 0x7f7f7f7f7f7f7f7f or whatever, depending on the cpu's native arithmetic size
#define pb_7f (~0UL/255 * 0x7f)
diff --git a/libavcodec/dsputil_internal.h b/libavcodec/dsputil_template.c
index f3856bb294..1d73a7d858 100644
--- a/libavcodec/dsputil_internal.h
+++ b/libavcodec/dsputil_template.c
@@ -27,7 +27,7 @@
* DSP utils
*/
-#include "h264_high_depth.h"
+#include "high_bit_depth.h"
static inline void FUNC(copy_block2)(uint8_t *dst, const uint8_t *src, int dstStride, int srcStride, int h)
{
@@ -886,7 +886,6 @@ H264_CHROMA_MC(avg_ , op_avg)
#undef op_avg
#undef op_put
-#if 1
#define H264_LOWPASS(OPNAME, OP, OP2) \
static av_unused void FUNC(OPNAME ## h264_qpel2_h_lowpass)(uint8_t *p_dst, uint8_t *p_src, int dstStride, int srcStride){\
const int h=2;\
@@ -1347,7 +1346,6 @@ H264_MC(avg_, 16)
#undef op_put
#undef op2_avg
#undef op2_put
-#endif
#if BIT_DEPTH == 8
# define put_h264_qpel8_mc00_8_c ff_put_pixels8x8_8_c
@@ -1391,3 +1389,4 @@ static void FUNCC(clear_blocks)(DCTELEM *blocks)
{
memset(blocks, 0, sizeof(dctcoef)*6*64);
}
+
diff --git a/libavcodec/dv.c b/libavcodec/dv.c
index 5fca22f9f7..9b24d13967 100644
--- a/libavcodec/dv.c
+++ b/libavcodec/dv.c
@@ -1093,9 +1093,10 @@ static int dvvideo_decode_frame(AVCodecContext *avctx,
if (s->picture.data[0])
avctx->release_buffer(avctx, &s->picture);
+ avcodec_get_frame_defaults(&s->picture);
s->picture.reference = 0;
s->picture.key_frame = 1;
- s->picture.pict_type = FF_I_TYPE;
+ s->picture.pict_type = AV_PICTURE_TYPE_I;
avctx->pix_fmt = s->sys->pix_fmt;
avctx->time_base = s->sys->time_base;
avcodec_set_dimensions(avctx, s->sys->width, s->sys->height);
@@ -1264,7 +1265,7 @@ static int dvvideo_encode_frame(AVCodecContext *c, uint8_t *buf, int buf_size,
c->pix_fmt = s->sys->pix_fmt;
s->picture = *((AVFrame *)data);
s->picture.key_frame = 1;
- s->picture.pict_type = FF_I_TYPE;
+ s->picture.pict_type = AV_PICTURE_TYPE_I;
s->buf = buf;
c->execute(c, dv_encode_video_segment, s->sys->work_chunks, NULL,
diff --git a/libavcodec/dvbsubdec.c b/libavcodec/dvbsubdec.c
index 288e6f51fe..cd2b53decf 100644
--- a/libavcodec/dvbsubdec.c
+++ b/libavcodec/dvbsubdec.c
@@ -321,21 +321,10 @@ static void delete_region_display_list(DVBSubContext *ctx, DVBSubRegion *region)
}
-static void delete_state(DVBSubContext *ctx)
+static void delete_cluts(DVBSubContext *ctx)
{
- DVBSubRegion *region;
DVBSubCLUT *clut;
- while (ctx->region_list) {
- region = ctx->region_list;
-
- ctx->region_list = region->next;
-
- delete_region_display_list(ctx, region);
- av_free(region->pbuf);
- av_free(region);
- }
-
while (ctx->clut_list) {
clut = ctx->clut_list;
@@ -343,12 +332,35 @@ static void delete_state(DVBSubContext *ctx)
av_free(clut);
}
+}
- av_freep(&ctx->display_definition);
+static void delete_objects(DVBSubContext *ctx)
+{
+ DVBSubObject *object;
- /* Should already be null */
- if (ctx->object_list)
- av_log(0, AV_LOG_ERROR, "Memory deallocation error!\n");
+ while (ctx->object_list) {
+ object = ctx->object_list;
+
+ ctx->object_list = object->next;
+
+ av_free(object);
+ }
+}
+
+static void delete_regions(DVBSubContext *ctx)
+{
+ DVBSubRegion *region;
+
+ while (ctx->region_list) {
+ region = ctx->region_list;
+
+ ctx->region_list = region->next;
+
+ delete_region_display_list(ctx, region);
+
+ av_free(region->pbuf);
+ av_free(region);
+ }
}
static av_cold int dvbsub_init_decoder(AVCodecContext *avctx)
@@ -433,7 +445,13 @@ static av_cold int dvbsub_close_decoder(AVCodecContext *avctx)
DVBSubContext *ctx = avctx->priv_data;
DVBSubRegionDisplay *display;
- delete_state(ctx);
+ delete_regions(ctx);
+
+ delete_objects(ctx);
+
+ delete_cluts(ctx);
+
+ av_freep(&ctx->display_definition);
while (ctx->display_list) {
display = ctx->display_list;
@@ -1125,7 +1143,9 @@ static void dvbsub_parse_page_segment(AVCodecContext *avctx,
av_dlog(avctx, "Page time out %ds, state %d\n", ctx->time_out, page_state);
if (page_state == 2) {
- delete_state(ctx);
+ delete_regions(ctx);
+ delete_objects(ctx);
+ delete_cluts(ctx);
}
tmp_display_list = ctx->display_list;
diff --git a/libavcodec/dvdsubdec.c b/libavcodec/dvdsubdec.c
index bb3e124bcd..6d5973c59b 100644
--- a/libavcodec/dvdsubdec.c
+++ b/libavcodec/dvdsubdec.c
@@ -120,6 +120,14 @@ static void guess_palette(uint32_t *rgba_palette,
uint8_t *alpha,
uint32_t subtitle_color)
{
+ static const uint8_t level_map[4][4] = {
+ // this configuration (full range, lowest to highest) in tests
+ // seemed most common, so assume this
+ {0xff},
+ {0x00, 0xff},
+ {0x00, 0x80, 0xff},
+ {0x00, 0x55, 0xaa, 0xff},
+ };
uint8_t color_used[16];
int nb_opaque_colors, i, level, j, r, g, b;
@@ -138,18 +146,18 @@ static void guess_palette(uint32_t *rgba_palette,
if (nb_opaque_colors == 0)
return;
- j = nb_opaque_colors;
+ j = 0;
memset(color_used, 0, 16);
for(i = 0; i < 4; i++) {
if (alpha[i] != 0) {
if (!color_used[colormap[i]]) {
- level = (0xff * j) / nb_opaque_colors;
+ level = level_map[nb_opaque_colors][j];
r = (((subtitle_color >> 16) & 0xff) * level) >> 8;
g = (((subtitle_color >> 8) & 0xff) * level) >> 8;
b = (((subtitle_color >> 0) & 0xff) * level) >> 8;
rgba_palette[i] = b | (g << 8) | (r << 16) | ((alpha[i] * 17) << 24);
color_used[colormap[i]] = (i + 1);
- j--;
+ j++;
} else {
rgba_palette[i] = (rgba_palette[color_used[colormap[i]] - 1] & 0x00ffffff) |
((alpha[i] * 17) << 24);
diff --git a/libavcodec/dxa.c b/libavcodec/dxa.c
index 00156292c9..807ecd85ee 100644
--- a/libavcodec/dxa.c
+++ b/libavcodec/dxa.c
@@ -240,13 +240,13 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPac
switch(compr){
case -1:
c->pic.key_frame = 0;
- c->pic.pict_type = FF_P_TYPE;
+ c->pic.pict_type = AV_PICTURE_TYPE_P;
if(c->prev.data[0])
memcpy(c->pic.data[0], c->prev.data[0], c->pic.linesize[0] * avctx->height);
else{ // Should happen only when first frame is 'NULL'
memset(c->pic.data[0], 0, c->pic.linesize[0] * avctx->height);
c->pic.key_frame = 1;
- c->pic.pict_type = FF_I_TYPE;
+ c->pic.pict_type = AV_PICTURE_TYPE_I;
}
break;
case 2:
@@ -254,7 +254,7 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPac
case 4:
case 5:
c->pic.key_frame = !(compr & 1);
- c->pic.pict_type = (compr & 1) ? FF_P_TYPE : FF_I_TYPE;
+ c->pic.pict_type = (compr & 1) ? AV_PICTURE_TYPE_P : AV_PICTURE_TYPE_I;
for(j = 0; j < avctx->height; j++){
if(compr & 1){
for(i = 0; i < avctx->width; i++)
@@ -269,7 +269,7 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPac
case 12: // ScummVM coding
case 13:
c->pic.key_frame = 0;
- c->pic.pict_type = FF_P_TYPE;
+ c->pic.pict_type = AV_PICTURE_TYPE_P;
decode_13(avctx, c, c->pic.data[0], srcptr, c->prev.data[0]);
break;
default:
@@ -295,6 +295,9 @@ static av_cold int decode_init(AVCodecContext *avctx)
c->avctx = avctx;
avctx->pix_fmt = PIX_FMT_PAL8;
+ avcodec_get_frame_defaults(&c->pic);
+ avcodec_get_frame_defaults(&c->prev);
+
c->dsize = avctx->width * avctx->height * 2;
if((c->decomp_buf = av_malloc(c->dsize)) == NULL) {
av_log(avctx, AV_LOG_ERROR, "Can't allocate decompression buffer.\n");
diff --git a/libavcodec/dxva2_h264.c b/libavcodec/dxva2_h264.c
index 17fb2b55c3..bc80e982fb 100644
--- a/libavcodec/dxva2_h264.c
+++ b/libavcodec/dxva2_h264.c
@@ -246,7 +246,7 @@ static void fill_slice_long(AVCodecContext *avctx, DXVA_Slice_H264_Long *slice,
slice->slice_qs_delta = 0; /* XXX not implemented by FFmpeg */
slice->slice_qp_delta = s->qscale - h->pps.init_qp;
slice->redundant_pic_cnt = h->redundant_pic_count;
- if (h->slice_type == FF_B_TYPE)
+ if (h->slice_type == AV_PICTURE_TYPE_B)
slice->direct_spatial_mv_pred_flag = h->direct_spatial_mv_pred;
slice->cabac_init_idc = h->pps.cabac ? h->cabac_init_idc : 0;
if (h->deblocking_filter < 2)
@@ -403,7 +403,7 @@ static int decode_slice(AVCodecContext *avctx,
position, size);
ctx_pic->slice_count++;
- if (h->slice_type != FF_I_TYPE && h->slice_type != FF_SI_TYPE)
+ if (h->slice_type != AV_PICTURE_TYPE_I && h->slice_type != AV_PICTURE_TYPE_SI)
ctx_pic->pp.wBitFields &= ~(1 << 15); /* Set IntraPicFlag to 0 */
return 0;
}
diff --git a/libavcodec/dxva2_mpeg2.c b/libavcodec/dxva2_mpeg2.c
index 780542a6c1..62e6ec1cfa 100644
--- a/libavcodec/dxva2_mpeg2.c
+++ b/libavcodec/dxva2_mpeg2.c
@@ -44,11 +44,11 @@ static void fill_picture_parameters(AVCodecContext *avctx,
memset(pp, 0, sizeof(*pp));
pp->wDecodedPictureIndex = ff_dxva2_get_surface_index(ctx, current_picture);
pp->wDeblockedPictureIndex = 0;
- if (s->pict_type != FF_I_TYPE)
+ if (s->pict_type != AV_PICTURE_TYPE_I)
pp->wForwardRefPictureIndex = ff_dxva2_get_surface_index(ctx, &s->last_picture);
else
pp->wForwardRefPictureIndex = 0xffff;
- if (s->pict_type == FF_B_TYPE)
+ if (s->pict_type == AV_PICTURE_TYPE_B)
pp->wBackwardRefPictureIndex = ff_dxva2_get_surface_index(ctx, &s->next_picture);
else
pp->wBackwardRefPictureIndex = 0xffff;
@@ -61,8 +61,8 @@ static void fill_picture_parameters(AVCodecContext *avctx,
pp->bBPPminus1 = 7;
pp->bPicStructure = s->picture_structure;
pp->bSecondField = is_field && !s->first_field;
- pp->bPicIntra = s->pict_type == FF_I_TYPE;
- pp->bPicBackwardPrediction = s->pict_type == FF_B_TYPE;
+ pp->bPicIntra = s->pict_type == AV_PICTURE_TYPE_I;
+ pp->bPicBackwardPrediction = s->pict_type == AV_PICTURE_TYPE_B;
pp->bBidirectionalAveragingMode = 0;
pp->bMVprecisionAndChromaRelation= 0; /* FIXME */
pp->bChromaFormat = s->chroma_format;
diff --git a/libavcodec/dxva2_vc1.c b/libavcodec/dxva2_vc1.c
index 2b9a690ebc..5b9bb692e0 100644
--- a/libavcodec/dxva2_vc1.c
+++ b/libavcodec/dxva2_vc1.c
@@ -42,11 +42,11 @@ static void fill_picture_parameters(AVCodecContext *avctx,
memset(pp, 0, sizeof(*pp));
pp->wDecodedPictureIndex =
pp->wDeblockedPictureIndex = ff_dxva2_get_surface_index(ctx, current_picture);
- if (s->pict_type != FF_I_TYPE)
+ if (s->pict_type != AV_PICTURE_TYPE_I)
pp->wForwardRefPictureIndex = ff_dxva2_get_surface_index(ctx, &s->last_picture);
else
pp->wForwardRefPictureIndex = 0xffff;
- if (s->pict_type == FF_B_TYPE)
+ if (s->pict_type == AV_PICTURE_TYPE_B)
pp->wBackwardRefPictureIndex = ff_dxva2_get_surface_index(ctx, &s->next_picture);
else
pp->wBackwardRefPictureIndex = 0xffff;
@@ -69,8 +69,8 @@ static void fill_picture_parameters(AVCodecContext *avctx,
if (s->picture_structure & PICT_BOTTOM_FIELD)
pp->bPicStructure |= 0x02;
pp->bSecondField = v->interlace && v->fcm != 0x03 && !s->first_field;
- pp->bPicIntra = s->pict_type == FF_I_TYPE;
- pp->bPicBackwardPrediction = s->pict_type == FF_B_TYPE;
+ pp->bPicIntra = s->pict_type == AV_PICTURE_TYPE_I;
+ pp->bPicBackwardPrediction = s->pict_type == AV_PICTURE_TYPE_B;
pp->bBidirectionalAveragingMode = (1 << 7) |
((ctx->cfg->ConfigIntraResidUnsigned != 0) << 6) |
((ctx->cfg->ConfigResidDiffAccelerator != 0) << 5) |
@@ -108,10 +108,10 @@ static void fill_picture_parameters(AVCodecContext *avctx,
(v->interlace << 5) |
(v->tfcntrflag << 4) |
(v->finterpflag << 3) |
- ((s->pict_type != FF_B_TYPE) << 2) |
+ ((s->pict_type != AV_PICTURE_TYPE_B) << 2) |
(v->psf << 1) |
(v->extended_dmv );
- if (s->pict_type != FF_I_TYPE)
+ if (s->pict_type != AV_PICTURE_TYPE_I)
pp->bPic4MVallowed = v->mv_mode == MV_PMODE_MIXED_MV ||
(v->mv_mode == MV_PMODE_INTENSITY_COMP &&
v->mv_mode2 == MV_PMODE_MIXED_MV);
diff --git a/libavcodec/eacmv.c b/libavcodec/eacmv.c
index 7f6c2a4577..408d948812 100644
--- a/libavcodec/eacmv.c
+++ b/libavcodec/eacmv.c
@@ -43,6 +43,10 @@ typedef struct CmvContext {
static av_cold int cmv_decode_init(AVCodecContext *avctx){
CmvContext *s = avctx->priv_data;
+ avcodec_get_frame_defaults(&s->frame);
+ avcodec_get_frame_defaults(&s->last_frame);
+ avcodec_get_frame_defaults(&s->last2_frame);
+
s->avctx = avctx;
avctx->pix_fmt = PIX_FMT_PAL8;
return 0;
@@ -180,10 +184,10 @@ static int cmv_decode_frame(AVCodecContext *avctx,
if ((buf[0]&1)) { // subtype
cmv_decode_inter(s, buf+2, buf_end);
s->frame.key_frame = 0;
- s->frame.pict_type = FF_P_TYPE;
+ s->frame.pict_type = AV_PICTURE_TYPE_P;
}else{
s->frame.key_frame = 1;
- s->frame.pict_type = FF_I_TYPE;
+ s->frame.pict_type = AV_PICTURE_TYPE_I;
cmv_decode_intra(s, buf+2, buf_end);
}
diff --git a/libavcodec/eatgq.c b/libavcodec/eatgq.c
index 6f35a1513b..a353580a15 100644
--- a/libavcodec/eatgq.c
+++ b/libavcodec/eatgq.c
@@ -218,7 +218,7 @@ static int tgq_decode_frame(AVCodecContext *avctx,
if (!s->frame.data[0]) {
s->frame.key_frame = 1;
- s->frame.pict_type = FF_I_TYPE;
+ s->frame.pict_type = AV_PICTURE_TYPE_I;
s->frame.buffer_hints = FF_BUFFER_HINTS_VALID;
if (avctx->get_buffer(avctx, &s->frame)) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
diff --git a/libavcodec/eatgv.c b/libavcodec/eatgv.c
index 9ad083475b..0855f10417 100644
--- a/libavcodec/eatgv.c
+++ b/libavcodec/eatgv.c
@@ -55,6 +55,8 @@ static av_cold int tgv_decode_init(AVCodecContext *avctx){
s->avctx = avctx;
avctx->time_base = (AVRational){1, 15};
avctx->pix_fmt = PIX_FMT_PAL8;
+ avcodec_get_frame_defaults(&s->frame);
+ avcodec_get_frame_defaults(&s->last_frame);
return 0;
}
@@ -300,7 +302,7 @@ static int tgv_decode_frame(AVCodecContext *avctx,
if(chunk_type==kVGT_TAG) {
s->frame.key_frame = 1;
- s->frame.pict_type = FF_I_TYPE;
+ s->frame.pict_type = AV_PICTURE_TYPE_I;
if (unpack(buf, buf_end, s->frame.data[0], s->avctx->width, s->avctx->height)<0) {
av_log(avctx, AV_LOG_WARNING, "truncated intra frame\n");
return -1;
@@ -311,7 +313,7 @@ static int tgv_decode_frame(AVCodecContext *avctx,
return buf_size;
}
s->frame.key_frame = 0;
- s->frame.pict_type = FF_P_TYPE;
+ s->frame.pict_type = AV_PICTURE_TYPE_P;
if (tgv_decode_inter(s, buf, buf_end)<0) {
av_log(avctx, AV_LOG_WARNING, "truncated inter frame\n");
return -1;
diff --git a/libavcodec/error_resilience.c b/libavcodec/error_resilience.c
index dc015b9f6a..c9231e206f 100644
--- a/libavcodec/error_resilience.c
+++ b/libavcodec/error_resilience.c
@@ -381,6 +381,14 @@ static void guess_mv(MpegEncContext *s){
fixed[mb_xy]= f;
if(f==MV_FROZEN)
num_avail++;
+ else if(s->last_picture.data[0] && s->last_picture.motion_val[0]){
+ const int mb_y= mb_xy / s->mb_stride;
+ const int mb_x= mb_xy % s->mb_stride;
+ const int mot_index= (mb_x + mb_y*mot_stride) * mot_step;
+ s->current_picture.motion_val[0][mot_index][0]= s->last_picture.motion_val[0][mot_index][0];
+ s->current_picture.motion_val[0][mot_index][1]= s->last_picture.motion_val[0][mot_index][1];
+ s->current_picture.ref_index[0][4*mb_xy] = s->last_picture.ref_index[0][4*mb_xy];
+ }
}
if((!(s->avctx->error_concealment&FF_EC_GUESS_MVS)) || num_avail <= mb_width/2){
@@ -639,7 +647,7 @@ static int is_intra_more_likely(MpegEncContext *s){
if(undamaged_count < 5) return 0; //almost all MBs damaged -> use temporal prediction
//prevent dsp.sad() check, that requires access to the image
- if(CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration && s->pict_type == FF_I_TYPE)
+ if(CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration && s->pict_type == AV_PICTURE_TYPE_I)
return 1;
skip_amount= FFMAX(undamaged_count/50, 1); //check only upto 50 MBs
@@ -658,11 +666,12 @@ static int is_intra_more_likely(MpegEncContext *s){
j++;
if((j%skip_amount) != 0) continue; //skip a few to speed things up
- if(s->pict_type==FF_I_TYPE){
+ if(s->pict_type==AV_PICTURE_TYPE_I){
uint8_t *mb_ptr = s->current_picture.data[0] + mb_x*16 + mb_y*16*s->linesize;
uint8_t *last_mb_ptr= s->last_picture.data [0] + mb_x*16 + mb_y*16*s->linesize;
is_intra_likely += s->dsp.sad[0](NULL, last_mb_ptr, mb_ptr , s->linesize, 16);
+ // FIXME need await_progress() here
is_intra_likely -= s->dsp.sad[0](NULL, last_mb_ptr, last_mb_ptr+s->linesize*16, s->linesize, 16);
}else{
if(IS_INTRA(s->current_picture.mb_type[mb_xy]))
@@ -977,7 +986,7 @@ void ff_er_frame_end(MpegEncContext *s){
}
/* guess MVs */
- if(s->pict_type==FF_B_TYPE){
+ if(s->pict_type==AV_PICTURE_TYPE_B){
for(mb_y=0; mb_y<s->mb_height; mb_y++){
for(mb_x=0; mb_x<s->mb_width; mb_x++){
int xy= mb_x*2 + mb_y*2*s->b8_stride;
@@ -1000,6 +1009,7 @@ void ff_er_frame_end(MpegEncContext *s){
int time_pp= s->pp_time;
int time_pb= s->pb_time;
+ // FIXME await_progress here
s->mv[0][0][0] = s->next_picture.motion_val[0][xy][0]*time_pb/time_pp;
s->mv[0][0][1] = s->next_picture.motion_val[0][xy][1]*time_pb/time_pp;
s->mv[1][0][0] = s->next_picture.motion_val[0][xy][0]*(time_pb - time_pp)/time_pp;
@@ -1114,7 +1124,7 @@ ec_clean:
const int mb_xy= s->mb_index2xy[i];
int error= s->error_status_table[mb_xy];
- if(s->pict_type!=FF_B_TYPE && (error&(DC_ERROR|MV_ERROR|AC_ERROR))){
+ if(s->pict_type!=AV_PICTURE_TYPE_B && (error&(DC_ERROR|MV_ERROR|AC_ERROR))){
s->mbskip_table[mb_xy]=0;
}
s->mbintra_table[mb_xy]=1;
diff --git a/libavcodec/escape124.c b/libavcodec/escape124.c
index 5a6769d342..12e478fe19 100644
--- a/libavcodec/escape124.c
+++ b/libavcodec/escape124.c
@@ -61,6 +61,7 @@ static av_cold int escape124_decode_init(AVCodecContext *avctx)
{
Escape124Context *s = avctx->priv_data;
+ avcodec_get_frame_defaults(&s->frame);
avctx->pix_fmt = PIX_FMT_RGB555;
s->num_superblocks = ((unsigned)avctx->width / 8) *
@@ -214,7 +215,8 @@ static int escape124_decode_frame(AVCodecContext *avctx,
uint16_t* old_frame_data, *new_frame_data;
unsigned old_stride, new_stride;
- AVFrame new_frame = { { 0 } };
+ AVFrame new_frame;
+ avcodec_get_frame_defaults(&new_frame);
init_get_bits(&gb, buf, buf_size * 8);
diff --git a/libavcodec/ffv1.c b/libavcodec/ffv1.c
index 0a982e8754..4e0e5f7b62 100644
--- a/libavcodec/ffv1.c
+++ b/libavcodec/ffv1.c
@@ -250,8 +250,9 @@ typedef struct FFV1Context{
uint8_t (*initial_states[MAX_QUANT_TABLES])[32];
int run_index;
int colorspace;
- int_fast16_t *sample_buffer;
+ int16_t *sample_buffer;
int gob_count;
+ int packed_at_lsb;
int quant_table_count;
@@ -279,7 +280,8 @@ static av_always_inline int fold(int diff, int bits){
return diff;
}
-static inline int predict(int_fast16_t *src, int_fast16_t *last){
+static inline int predict(int16_t *src, int16_t *last)
+{
const int LT= last[-1];
const int T= last[ 0];
const int L = src[-1];
@@ -287,7 +289,9 @@ static inline int predict(int_fast16_t *src, int_fast16_t *last){
return mid_pred(L, L + T - LT, T);
}
-static inline int get_context(PlaneContext *p, int_fast16_t *src, int_fast16_t *last, int_fast16_t *last2){
+static inline int get_context(PlaneContext *p, int16_t *src,
+ int16_t *last, int16_t *last2)
+{
const int LT= last[-1];
const int T= last[ 0];
const int RT= last[ 1];
@@ -506,7 +510,10 @@ static inline int get_vlc_symbol(GetBitContext *gb, VlcState * const state, int
}
#if CONFIG_FFV1_ENCODER
-static av_always_inline int encode_line(FFV1Context *s, int w, int_fast16_t *sample[2], int plane_index, int bits){
+static av_always_inline int encode_line(FFV1Context *s, int w,
+ int16_t *sample[2],
+ int plane_index, int bits)
+{
PlaneContext * const p= &s->plane[plane_index];
RangeCoder * const c= &s->c;
int x;
@@ -591,7 +598,7 @@ static av_always_inline int encode_line(FFV1Context *s, int w, int_fast16_t *sam
static void encode_plane(FFV1Context *s, uint8_t *src, int w, int h, int stride, int plane_index){
int x,y,i;
const int ring_size= s->avctx->context_model ? 3 : 2;
- int_fast16_t *sample[3];
+ int16_t *sample[3];
s->run_index=0;
memset(s->sample_buffer, 0, ring_size*(w+6)*sizeof(*s->sample_buffer));
@@ -609,8 +616,14 @@ static void encode_plane(FFV1Context *s, uint8_t *src, int w, int h, int stride,
}
encode_line(s, w, sample, plane_index, 8);
}else{
- for(x=0; x<w; x++){
- sample[0][x]= ((uint16_t*)(src + stride*y))[x] >> (16 - s->avctx->bits_per_raw_sample);
+ if(s->packed_at_lsb){
+ for(x=0; x<w; x++){
+ sample[0][x]= ((uint16_t*)(src + stride*y))[x];
+ }
+ }else{
+ for(x=0; x<w; x++){
+ sample[0][x]= ((uint16_t*)(src + stride*y))[x] >> (16 - s->avctx->bits_per_raw_sample);
+ }
}
encode_line(s, w, sample, plane_index, s->avctx->bits_per_raw_sample);
}
@@ -621,7 +634,7 @@ static void encode_plane(FFV1Context *s, uint8_t *src, int w, int h, int stride,
static void encode_rgb_frame(FFV1Context *s, uint32_t *src, int w, int h, int stride){
int x, y, p, i;
const int ring_size= s->avctx->context_model ? 3 : 2;
- int_fast16_t *sample[3][3];
+ int16_t *sample[3][3];
s->run_index=0;
memset(s->sample_buffer, 0, ring_size*3*(w+6)*sizeof(*s->sample_buffer));
@@ -725,6 +738,8 @@ static av_cold int common_init(AVCodecContext *avctx){
s->avctx= avctx;
s->flags= avctx->flags;
+ avcodec_get_frame_defaults(&s->picture);
+
dsputil_init(&s->dsp, avctx);
s->width = avctx->width;
@@ -964,6 +979,10 @@ static av_cold int encode_init(AVCodecContext *avctx)
avctx->coded_frame= &s->picture;
switch(avctx->pix_fmt){
+ case PIX_FMT_YUV420P9:
+ case PIX_FMT_YUV420P10:
+ case PIX_FMT_YUV422P10:
+ s->packed_at_lsb = 1;
case PIX_FMT_YUV444P16:
case PIX_FMT_YUV422P16:
case PIX_FMT_YUV420P16:
@@ -1160,7 +1179,7 @@ static int encode_frame(AVCodecContext *avctx, unsigned char *buf, int buf_size,
ff_build_rac_states(c, 0.05*(1LL<<32), 256-8);
*p = *pict;
- p->pict_type= FF_I_TYPE;
+ p->pict_type= AV_PICTURE_TYPE_I;
if(avctx->gop_size==0 || f->picture_number % avctx->gop_size == 0){
put_rac(c, &keystate, 1);
@@ -1305,7 +1324,10 @@ static av_cold int common_end(AVCodecContext *avctx){
return 0;
}
-static av_always_inline void decode_line(FFV1Context *s, int w, int_fast16_t *sample[2], int plane_index, int bits){
+static av_always_inline void decode_line(FFV1Context *s, int w,
+ int16_t *sample[2],
+ int plane_index, int bits)
+{
PlaneContext * const p= &s->plane[plane_index];
RangeCoder * const c= &s->c;
int x;
@@ -1365,7 +1387,7 @@ static av_always_inline void decode_line(FFV1Context *s, int w, int_fast16_t *sa
static void decode_plane(FFV1Context *s, uint8_t *src, int w, int h, int stride, int plane_index){
int x, y;
- int_fast16_t *sample[2];
+ int16_t *sample[2];
sample[0]=s->sample_buffer +3;
sample[1]=s->sample_buffer+w+6+3;
@@ -1374,7 +1396,7 @@ static void decode_plane(FFV1Context *s, uint8_t *src, int w, int h, int stride,
memset(s->sample_buffer, 0, 2*(w+6)*sizeof(*s->sample_buffer));
for(y=0; y<h; y++){
- int_fast16_t *temp= sample[0]; //FIXME try a normal buffer
+ int16_t *temp = sample[0]; //FIXME try a normal buffer
sample[0]= sample[1];
sample[1]= temp;
@@ -1390,8 +1412,14 @@ static void decode_plane(FFV1Context *s, uint8_t *src, int w, int h, int stride,
}
}else{
decode_line(s, w, sample, plane_index, s->avctx->bits_per_raw_sample);
- for(x=0; x<w; x++){
- ((uint16_t*)(src + stride*y))[x]= sample[1][x] << (16 - s->avctx->bits_per_raw_sample);
+ if(s->packed_at_lsb){
+ for(x=0; x<w; x++){
+ ((uint16_t*)(src + stride*y))[x]= sample[1][x];
+ }
+ }else{
+ for(x=0; x<w; x++){
+ ((uint16_t*)(src + stride*y))[x]= sample[1][x] << (16 - s->avctx->bits_per_raw_sample);
+ }
}
}
//STOP_TIMER("decode-line")}
@@ -1400,7 +1428,7 @@ static void decode_plane(FFV1Context *s, uint8_t *src, int w, int h, int stride,
static void decode_rgb_frame(FFV1Context *s, uint32_t *src, int w, int h, int stride){
int x, y, p;
- int_fast16_t *sample[3][2];
+ int16_t *sample[3][2];
for(x=0; x<3; x++){
sample[x][0] = s->sample_buffer + x*2 *(w+6) + 3;
sample[x][1] = s->sample_buffer + (x*2+1)*(w+6) + 3;
@@ -1412,7 +1440,7 @@ static void decode_rgb_frame(FFV1Context *s, uint32_t *src, int w, int h, int st
for(y=0; y<h; y++){
for(p=0; p<3; p++){
- int_fast16_t *temp= sample[p][0]; //FIXME try a normal buffer
+ int16_t *temp = sample[p][0]; //FIXME try a normal buffer
sample[p][0]= sample[p][1];
sample[p][1]= temp;
@@ -1606,7 +1634,25 @@ static int read_header(FFV1Context *f){
av_log(f->avctx, AV_LOG_ERROR, "format not supported\n");
return -1;
}
- }else{
+ }else if(f->avctx->bits_per_raw_sample==9) {
+ switch(16*f->chroma_h_shift + f->chroma_v_shift){
+ case 0x00: f->avctx->pix_fmt= PIX_FMT_YUV444P16; break;
+ case 0x10: f->avctx->pix_fmt= PIX_FMT_YUV422P16; break;
+ case 0x11: f->avctx->pix_fmt= PIX_FMT_YUV420P9 ; f->packed_at_lsb=1; break;
+ default:
+ av_log(f->avctx, AV_LOG_ERROR, "format not supported\n");
+ return -1;
+ }
+ }else if(f->avctx->bits_per_raw_sample==10) {
+ switch(16*f->chroma_h_shift + f->chroma_v_shift){
+ case 0x00: f->avctx->pix_fmt= PIX_FMT_YUV444P16; break;
+ case 0x10: f->avctx->pix_fmt= PIX_FMT_YUV422P10; f->packed_at_lsb=1; break;
+ case 0x11: f->avctx->pix_fmt= PIX_FMT_YUV420P10; f->packed_at_lsb=1; break;
+ default:
+ av_log(f->avctx, AV_LOG_ERROR, "format not supported\n");
+ return -1;
+ }
+ }else {
switch(16*f->chroma_h_shift + f->chroma_v_shift){
case 0x00: f->avctx->pix_fmt= PIX_FMT_YUV444P16; break;
case 0x10: f->avctx->pix_fmt= PIX_FMT_YUV422P16; break;
@@ -1643,6 +1689,7 @@ static int read_header(FFV1Context *f){
for(j=0; j<f->slice_count; j++){
FFV1Context *fs= f->slice_context[j];
fs->ac= f->ac;
+ fs->packed_at_lsb= f->packed_at_lsb;
if(f->version >= 2){
fs->slice_x = get_symbol(c, state, 0) *f->width ;
@@ -1723,7 +1770,7 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPac
ff_build_rac_states(c, 0.05*(1LL<<32), 256-8);
- p->pict_type= FF_I_TYPE; //FIXME I vs. P
+ p->pict_type= AV_PICTURE_TYPE_I; //FIXME I vs. P
if(get_rac(c, &keystate)){
p->key_frame= 1;
if(read_header(f) < 0)
@@ -1810,7 +1857,7 @@ AVCodec ff_ffv1_encoder = {
encode_frame,
common_end,
.capabilities = CODEC_CAP_SLICE_THREADS,
- .pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_YUV444P, PIX_FMT_YUV422P, PIX_FMT_YUV411P, PIX_FMT_YUV410P, PIX_FMT_RGB32, PIX_FMT_YUV420P16, PIX_FMT_YUV422P16, PIX_FMT_YUV444P16, PIX_FMT_NONE},
+ .pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_YUV444P, PIX_FMT_YUV422P, PIX_FMT_YUV411P, PIX_FMT_YUV410P, PIX_FMT_RGB32, PIX_FMT_YUV420P16, PIX_FMT_YUV422P16, PIX_FMT_YUV444P16, PIX_FMT_YUV420P9, PIX_FMT_YUV420P10, PIX_FMT_YUV422P10, PIX_FMT_NONE},
.long_name= NULL_IF_CONFIG_SMALL("FFmpeg video codec #1"),
};
#endif
diff --git a/libavcodec/flacenc.c b/libavcodec/flacenc.c
index af1c65b319..012adb3f83 100644
--- a/libavcodec/flacenc.c
+++ b/libavcodec/flacenc.c
@@ -21,6 +21,7 @@
#include "libavutil/crc.h"
#include "libavutil/md5.h"
+#include "libavutil/opt.h"
#include "avcodec.h"
#include "get_bits.h"
#include "golomb.h"
@@ -43,7 +44,7 @@
typedef struct CompressionOptions {
int compression_level;
int block_time_ms;
- enum AVLPCType lpc_type;
+ enum FFLPCType lpc_type;
int lpc_passes;
int lpc_coeff_precision;
int min_prediction_order;
@@ -80,6 +81,7 @@ typedef struct FlacFrame {
} FlacFrame;
typedef struct FlacEncodeContext {
+ AVClass *class;
PutBitContext pb;
int channels;
int samplerate;
@@ -156,16 +158,16 @@ static av_cold void dprint_compression_options(FlacEncodeContext *s)
av_log(avctx, AV_LOG_DEBUG, " compression: %d\n", opt->compression_level);
switch (opt->lpc_type) {
- case AV_LPC_TYPE_NONE:
+ case FF_LPC_TYPE_NONE:
av_log(avctx, AV_LOG_DEBUG, " lpc type: None\n");
break;
- case AV_LPC_TYPE_FIXED:
+ case FF_LPC_TYPE_FIXED:
av_log(avctx, AV_LOG_DEBUG, " lpc type: Fixed pre-defined coefficients\n");
break;
- case AV_LPC_TYPE_LEVINSON:
+ case FF_LPC_TYPE_LEVINSON:
av_log(avctx, AV_LOG_DEBUG, " lpc type: Levinson-Durbin recursion with Welch window\n");
break;
- case AV_LPC_TYPE_CHOLESKY:
+ case FF_LPC_TYPE_CHOLESKY:
av_log(avctx, AV_LOG_DEBUG, " lpc type: Cholesky factorization, %d pass%s\n",
opt->lpc_passes, opt->lpc_passes == 1 ? "" : "es");
break;
@@ -266,23 +268,32 @@ static av_cold int flac_encode_init(AVCodecContext *avctx)
s->options.block_time_ms = ((int[]){ 27, 27, 27,105,105,105,105,105,105,105,105,105,105})[level];
- s->options.lpc_type = ((int[]){ AV_LPC_TYPE_FIXED, AV_LPC_TYPE_FIXED, AV_LPC_TYPE_FIXED,
- AV_LPC_TYPE_LEVINSON, AV_LPC_TYPE_LEVINSON, AV_LPC_TYPE_LEVINSON,
- AV_LPC_TYPE_LEVINSON, AV_LPC_TYPE_LEVINSON, AV_LPC_TYPE_LEVINSON,
- AV_LPC_TYPE_LEVINSON, AV_LPC_TYPE_LEVINSON, AV_LPC_TYPE_LEVINSON,
- AV_LPC_TYPE_LEVINSON})[level];
+ if (s->options.lpc_type == FF_LPC_TYPE_DEFAULT)
+ s->options.lpc_type = ((int[]){ FF_LPC_TYPE_FIXED, FF_LPC_TYPE_FIXED, FF_LPC_TYPE_FIXED,
+ FF_LPC_TYPE_LEVINSON, FF_LPC_TYPE_LEVINSON, FF_LPC_TYPE_LEVINSON,
+ FF_LPC_TYPE_LEVINSON, FF_LPC_TYPE_LEVINSON, FF_LPC_TYPE_LEVINSON,
+ FF_LPC_TYPE_LEVINSON, FF_LPC_TYPE_LEVINSON, FF_LPC_TYPE_LEVINSON,
+ FF_LPC_TYPE_LEVINSON})[level];
s->options.min_prediction_order = ((int[]){ 2, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1})[level];
s->options.max_prediction_order = ((int[]){ 3, 4, 4, 6, 8, 8, 8, 8, 12, 12, 12, 32, 32})[level];
- s->options.prediction_order_method = ((int[]){ ORDER_METHOD_EST, ORDER_METHOD_EST, ORDER_METHOD_EST,
- ORDER_METHOD_EST, ORDER_METHOD_EST, ORDER_METHOD_EST,
- ORDER_METHOD_4LEVEL, ORDER_METHOD_LOG, ORDER_METHOD_4LEVEL,
- ORDER_METHOD_LOG, ORDER_METHOD_SEARCH, ORDER_METHOD_LOG,
- ORDER_METHOD_SEARCH})[level];
+ if (s->options.prediction_order_method < 0)
+ s->options.prediction_order_method = ((int[]){ ORDER_METHOD_EST, ORDER_METHOD_EST, ORDER_METHOD_EST,
+ ORDER_METHOD_EST, ORDER_METHOD_EST, ORDER_METHOD_EST,
+ ORDER_METHOD_4LEVEL, ORDER_METHOD_LOG, ORDER_METHOD_4LEVEL,
+ ORDER_METHOD_LOG, ORDER_METHOD_SEARCH, ORDER_METHOD_LOG,
+ ORDER_METHOD_SEARCH})[level];
- s->options.min_partition_order = ((int[]){ 2, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0})[level];
- s->options.max_partition_order = ((int[]){ 2, 2, 3, 3, 3, 8, 8, 8, 8, 8, 8, 8, 8})[level];
+ if (s->options.min_partition_order > s->options.max_partition_order) {
+ av_log(avctx, AV_LOG_ERROR, "invalid partition orders: min=%d max=%d\n",
+ s->options.min_partition_order, s->options.max_partition_order);
+ return AVERROR(EINVAL);
+ }
+ if (s->options.min_partition_order < 0)
+ s->options.min_partition_order = ((int[]){ 2, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0})[level];
+ if (s->options.max_partition_order < 0)
+ s->options.max_partition_order = ((int[]){ 2, 2, 3, 3, 3, 8, 8, 8, 8, 8, 8, 8, 8})[level];
/* set compression option overrides from AVCodecContext */
#if FF_API_USE_LPC
@@ -296,13 +307,14 @@ static av_cold int flac_encode_init(AVCodecContext *avctx)
s->options.lpc_passes = avctx->use_lpc - 1;
}
#endif
- if (avctx->lpc_type > AV_LPC_TYPE_DEFAULT) {
- if (avctx->lpc_type > AV_LPC_TYPE_CHOLESKY) {
+#if FF_API_FLAC_GLOBAL_OPTS
+ if (avctx->lpc_type > FF_LPC_TYPE_DEFAULT) {
+ if (avctx->lpc_type > FF_LPC_TYPE_CHOLESKY) {
av_log(avctx, AV_LOG_ERROR, "unknown lpc type: %d\n", avctx->lpc_type);
return -1;
}
s->options.lpc_type = avctx->lpc_type;
- if (s->options.lpc_type == AV_LPC_TYPE_CHOLESKY) {
+ if (s->options.lpc_type == FF_LPC_TYPE_CHOLESKY) {
if (avctx->lpc_passes < 0) {
// default number of passes for Cholesky
s->options.lpc_passes = 2;
@@ -315,11 +327,12 @@ static av_cold int flac_encode_init(AVCodecContext *avctx)
}
}
}
+#endif
- if (s->options.lpc_type == AV_LPC_TYPE_NONE) {
+ if (s->options.lpc_type == FF_LPC_TYPE_NONE) {
s->options.min_prediction_order = 0;
} else if (avctx->min_prediction_order >= 0) {
- if (s->options.lpc_type == AV_LPC_TYPE_FIXED) {
+ if (s->options.lpc_type == FF_LPC_TYPE_FIXED) {
if (avctx->min_prediction_order > MAX_FIXED_ORDER) {
av_log(avctx, AV_LOG_ERROR, "invalid min prediction order: %d\n",
avctx->min_prediction_order);
@@ -333,10 +346,10 @@ static av_cold int flac_encode_init(AVCodecContext *avctx)
}
s->options.min_prediction_order = avctx->min_prediction_order;
}
- if (s->options.lpc_type == AV_LPC_TYPE_NONE) {
+ if (s->options.lpc_type == FF_LPC_TYPE_NONE) {
s->options.max_prediction_order = 0;
} else if (avctx->max_prediction_order >= 0) {
- if (s->options.lpc_type == AV_LPC_TYPE_FIXED) {
+ if (s->options.lpc_type == FF_LPC_TYPE_FIXED) {
if (avctx->max_prediction_order > MAX_FIXED_ORDER) {
av_log(avctx, AV_LOG_ERROR, "invalid max prediction order: %d\n",
avctx->max_prediction_order);
@@ -356,6 +369,7 @@ static av_cold int flac_encode_init(AVCodecContext *avctx)
return -1;
}
+#if FF_API_FLAC_GLOBAL_OPTS
if (avctx->prediction_order_method >= 0) {
if (avctx->prediction_order_method > ORDER_METHOD_LOG) {
av_log(avctx, AV_LOG_ERROR, "invalid prediction order method: %d\n",
@@ -386,6 +400,7 @@ static av_cold int flac_encode_init(AVCodecContext *avctx)
s->options.min_partition_order, s->options.max_partition_order);
return -1;
}
+#endif
if (avctx->frame_size > 0) {
if (avctx->frame_size < FLAC_MIN_BLOCKSIZE ||
@@ -399,6 +414,7 @@ static av_cold int flac_encode_init(AVCodecContext *avctx)
}
s->max_blocksize = s->avctx->frame_size;
+#if FF_API_FLAC_GLOBAL_OPTS
/* set LPC precision */
if (avctx->lpc_coeff_precision > 0) {
if (avctx->lpc_coeff_precision > MAX_LPC_PRECISION) {
@@ -407,10 +423,8 @@ static av_cold int flac_encode_init(AVCodecContext *avctx)
return -1;
}
s->options.lpc_coeff_precision = avctx->lpc_coeff_precision;
- } else {
- /* default LPC precision */
- s->options.lpc_coeff_precision = 15;
}
+#endif
/* set maximum encoded frame size in verbatim mode */
s->max_framesize = ff_flac_get_max_frame_size(s->avctx->frame_size,
@@ -459,7 +473,7 @@ static av_cold int flac_encode_init(AVCodecContext *avctx)
}
ret = ff_lpc_init(&s->lpc_ctx, avctx->frame_size,
- s->options.max_prediction_order, AV_LPC_TYPE_LEVINSON);
+ s->options.max_prediction_order, FF_LPC_TYPE_LEVINSON);
dprint_compression_options(s);
@@ -900,8 +914,8 @@ static int encode_residual_ch(FlacEncodeContext *s, int ch)
/* FIXED */
sub->type = FLAC_SUBFRAME_FIXED;
- if (s->options.lpc_type == AV_LPC_TYPE_NONE ||
- s->options.lpc_type == AV_LPC_TYPE_FIXED || n <= max_order) {
+ if (s->options.lpc_type == FF_LPC_TYPE_NONE ||
+ s->options.lpc_type == FF_LPC_TYPE_FIXED || n <= max_order) {
uint32_t bits[MAX_FIXED_ORDER+1];
if (max_order > MAX_FIXED_ORDER)
max_order = MAX_FIXED_ORDER;
@@ -1347,6 +1361,33 @@ static av_cold int flac_encode_close(AVCodecContext *avctx)
return 0;
}
+#define FLAGS AV_OPT_FLAG_ENCODING_PARAM | AV_OPT_FLAG_AUDIO_PARAM
+static const AVOption options[] = {
+{ "lpc_coeff_precision", "LPC coefficient precision", offsetof(FlacEncodeContext, options.lpc_coeff_precision), FF_OPT_TYPE_INT, {.dbl = 15 }, 0, MAX_LPC_PRECISION, FLAGS },
+{ "lpc_type", "LPC algorithm", offsetof(FlacEncodeContext, options.lpc_type), FF_OPT_TYPE_INT, {.dbl = FF_LPC_TYPE_DEFAULT }, FF_LPC_TYPE_DEFAULT, FF_LPC_TYPE_NB-1, FLAGS, "lpc_type" },
+{ "none", NULL, 0, FF_OPT_TYPE_CONST, {.dbl = FF_LPC_TYPE_NONE }, INT_MIN, INT_MAX, FLAGS, "lpc_type" },
+{ "fixed", NULL, 0, FF_OPT_TYPE_CONST, {.dbl = FF_LPC_TYPE_FIXED }, INT_MIN, INT_MAX, FLAGS, "lpc_type" },
+{ "levinson", NULL, 0, FF_OPT_TYPE_CONST, {.dbl = FF_LPC_TYPE_LEVINSON }, INT_MIN, INT_MAX, FLAGS, "lpc_type" },
+{ "cholesky", NULL, 0, FF_OPT_TYPE_CONST, {.dbl = FF_LPC_TYPE_CHOLESKY }, INT_MIN, INT_MAX, FLAGS, "lpc_type" },
+{ "lpc_passes", "Number of passes to use for Cholesky factorization during LPC analysis", offsetof(FlacEncodeContext, options.lpc_passes), FF_OPT_TYPE_INT, {.dbl = -1 }, INT_MIN, INT_MAX, FLAGS },
+{ "min_partition_order", NULL, offsetof(FlacEncodeContext, options.min_partition_order), FF_OPT_TYPE_INT, {.dbl = -1 }, -1, MAX_PARTITION_ORDER, FLAGS },
+{ "max_partition_order", NULL, offsetof(FlacEncodeContext, options.max_partition_order), FF_OPT_TYPE_INT, {.dbl = -1 }, -1, MAX_PARTITION_ORDER, FLAGS },
+{ "prediction_order_method", "Search method for selecting prediction order", offsetof(FlacEncodeContext, options.prediction_order_method), FF_OPT_TYPE_INT, {.dbl = -1 }, -1, ORDER_METHOD_LOG, FLAGS, "predm" },
+{ "estimation", NULL, 0, FF_OPT_TYPE_CONST, {.dbl = ORDER_METHOD_EST }, INT_MIN, INT_MAX, FLAGS, "predm" },
+{ "2level", NULL, 0, FF_OPT_TYPE_CONST, {.dbl = ORDER_METHOD_2LEVEL }, INT_MIN, INT_MAX, FLAGS, "predm" },
+{ "4level", NULL, 0, FF_OPT_TYPE_CONST, {.dbl = ORDER_METHOD_4LEVEL }, INT_MIN, INT_MAX, FLAGS, "predm" },
+{ "8level", NULL, 0, FF_OPT_TYPE_CONST, {.dbl = ORDER_METHOD_8LEVEL }, INT_MIN, INT_MAX, FLAGS, "predm" },
+{ "search", NULL, 0, FF_OPT_TYPE_CONST, {.dbl = ORDER_METHOD_SEARCH }, INT_MIN, INT_MAX, FLAGS, "predm" },
+{ "log", NULL, 0, FF_OPT_TYPE_CONST, {.dbl = ORDER_METHOD_LOG }, INT_MIN, INT_MAX, FLAGS, "predm" },
+{ NULL },
+};
+
+static const AVClass flac_encoder_class = {
+ "FLAC encoder",
+ av_default_item_name,
+ options,
+ LIBAVUTIL_VERSION_INT,
+};
AVCodec ff_flac_encoder = {
"flac",
@@ -1360,4 +1401,5 @@ AVCodec ff_flac_encoder = {
.capabilities = CODEC_CAP_SMALL_LAST_FRAME | CODEC_CAP_DELAY,
.sample_fmts = (const enum AVSampleFormat[]){AV_SAMPLE_FMT_S16,AV_SAMPLE_FMT_NONE},
.long_name = NULL_IF_CONFIG_SMALL("FLAC (Free Lossless Audio Codec)"),
+ .priv_class = &flac_encoder_class,
};
diff --git a/libavcodec/flashsv.c b/libavcodec/flashsv.c
index 12aabe6004..a57e851078 100644
--- a/libavcodec/flashsv.c
+++ b/libavcodec/flashsv.c
@@ -92,6 +92,7 @@ static av_cold int flashsv_decode_init(AVCodecContext *avctx)
return 1;
}
avctx->pix_fmt = PIX_FMT_BGR24;
+ avcodec_get_frame_defaults(&s->frame);
s->frame.data[0] = NULL;
return 0;
@@ -148,7 +149,7 @@ static int flashsv_decode_frame(AVCodecContext *avctx, void *data,
if ((avctx->width != s->image_width) || (avctx->height != s->image_height)) {
av_log(avctx, AV_LOG_ERROR, "Frame width or height differs from first frames!\n");
av_log(avctx, AV_LOG_ERROR, "fh = %d, fv %d vs ch = %d, cv = %d\n", avctx->height,
- avctx->width,s->image_height, s->image_width);
+ avctx->width, s->image_height, s->image_width);
return -1;
}
@@ -195,7 +196,7 @@ static int flashsv_decode_frame(AVCodecContext *avctx, void *data,
s->zstream.next_in = buf + (get_bits_count(&gb) / 8);
s->zstream.avail_in = size;
s->zstream.next_out = s->tmpblock;
- s->zstream.avail_out = s->block_size*3;
+ s->zstream.avail_out = s->block_size * 3;
ret = inflate(&(s->zstream), Z_FINISH);
if (ret == Z_DATA_ERROR) {
av_log(avctx, AV_LOG_ERROR, "Zlib resync occurred\n");
diff --git a/libavcodec/flashsvenc.c b/libavcodec/flashsvenc.c
index 042efb5fed..c0327a911b 100644
--- a/libavcodec/flashsvenc.c
+++ b/libavcodec/flashsvenc.c
@@ -260,12 +260,12 @@ static int flashsv_encode_frame(AVCodecContext *avctx, uint8_t *buf,
//mark the frame type so the muxer can mux it correctly
if (I_frame) {
- p->pict_type = FF_I_TYPE;
+ p->pict_type = AV_PICTURE_TYPE_I;
p->key_frame = 1;
s->last_key_frame = avctx->frame_number;
av_log(avctx, AV_LOG_DEBUG, "Inserting key frame at frame %d\n", avctx->frame_number);
} else {
- p->pict_type = FF_P_TYPE;
+ p->pict_type = AV_PICTURE_TYPE_P;
p->key_frame = 0;
}
diff --git a/libavcodec/flicvideo.c b/libavcodec/flicvideo.c
index 7d2fd87647..8cc72e241e 100644
--- a/libavcodec/flicvideo.c
+++ b/libavcodec/flicvideo.c
@@ -118,6 +118,7 @@ static av_cold int flic_decode_init(AVCodecContext *avctx)
return -1;
}
+ avcodec_get_frame_defaults(&s->frame);
s->frame.data[0] = NULL;
s->new_palette = 0;
diff --git a/libavcodec/flvdec.c b/libavcodec/flvdec.c
index cf4dfe43bc..2a6694403b 100644
--- a/libavcodec/flvdec.c
+++ b/libavcodec/flvdec.c
@@ -88,10 +88,10 @@ int ff_flv_decode_picture_header(MpegEncContext *s)
s->width = width;
s->height = height;
- s->pict_type = FF_I_TYPE + get_bits(&s->gb, 2);
- s->dropable= s->pict_type > FF_P_TYPE;
+ s->pict_type = AV_PICTURE_TYPE_I + get_bits(&s->gb, 2);
+ s->dropable= s->pict_type > AV_PICTURE_TYPE_P;
if (s->dropable)
- s->pict_type = FF_P_TYPE;
+ s->pict_type = AV_PICTURE_TYPE_P;
skip_bits1(&s->gb); /* deblocking flag */
s->chroma_qscale= s->qscale = get_bits(&s->gb, 5);
@@ -109,7 +109,7 @@ int ff_flv_decode_picture_header(MpegEncContext *s)
if(s->avctx->debug & FF_DEBUG_PICT_INFO){
av_log(s->avctx, AV_LOG_DEBUG, "%c esc_type:%d, qp:%d num:%d\n",
- s->dropable ? 'D' : av_get_pict_type_char(s->pict_type), s->h263_flv-1, s->qscale, s->picture_number);
+ s->dropable ? 'D' : av_get_picture_type_char(s->pict_type), s->h263_flv-1, s->qscale, s->picture_number);
}
s->y_dc_scale_table=
diff --git a/libavcodec/flvenc.c b/libavcodec/flvenc.c
index 5e81dcb15d..fc8c2a474f 100644
--- a/libavcodec/flvenc.c
+++ b/libavcodec/flvenc.c
@@ -53,7 +53,7 @@ void ff_flv_encode_picture_header(MpegEncContext * s, int picture_number)
put_bits(&s->pb, 16, s->width);
put_bits(&s->pb, 16, s->height);
}
- put_bits(&s->pb, 2, s->pict_type == FF_P_TYPE); /* PictureType */
+ put_bits(&s->pb, 2, s->pict_type == AV_PICTURE_TYPE_P); /* PictureType */
put_bits(&s->pb, 1, 1); /* DeblockingFlag: on */
put_bits(&s->pb, 5, s->qscale); /* Quantizer */
put_bits(&s->pb, 1, 0); /* ExtraInformation */
diff --git a/libavcodec/fraps.c b/libavcodec/fraps.c
index 947c52e21d..7e96b0d312 100644
--- a/libavcodec/fraps.c
+++ b/libavcodec/fraps.c
@@ -60,6 +60,7 @@ static av_cold int decode_init(AVCodecContext *avctx)
{
FrapsContext * const s = avctx->priv_data;
+ avcodec_get_frame_defaults(&s->frame);
avctx->coded_frame = (AVFrame*)&s->frame;
s->avctx = avctx;
@@ -180,10 +181,10 @@ static int decode_frame(AVCodecContext *avctx,
return -1;
}
/* bit 31 means same as previous pic */
- f->pict_type = (header & (1U<<31))? FF_P_TYPE : FF_I_TYPE;
- f->key_frame = f->pict_type == FF_I_TYPE;
+ f->pict_type = (header & (1U<<31))? AV_PICTURE_TYPE_P : AV_PICTURE_TYPE_I;
+ f->key_frame = f->pict_type == AV_PICTURE_TYPE_I;
- if (f->pict_type == FF_I_TYPE) {
+ if (f->pict_type == AV_PICTURE_TYPE_I) {
buf32=(const uint32_t*)buf;
for(y=0; y<avctx->height/2; y++){
luma1=(uint32_t*)&f->data[0][ y*2*f->linesize[0] ];
@@ -223,10 +224,10 @@ static int decode_frame(AVCodecContext *avctx,
return -1;
}
/* bit 31 means same as previous pic */
- f->pict_type = (header & (1U<<31))? FF_P_TYPE : FF_I_TYPE;
- f->key_frame = f->pict_type == FF_I_TYPE;
+ f->pict_type = (header & (1U<<31))? AV_PICTURE_TYPE_P : AV_PICTURE_TYPE_I;
+ f->key_frame = f->pict_type == AV_PICTURE_TYPE_I;
- if (f->pict_type == FF_I_TYPE) {
+ if (f->pict_type == AV_PICTURE_TYPE_I) {
for(y=0; y<avctx->height; y++)
memcpy(&f->data[0][ (avctx->height-y)*f->linesize[0] ],
&buf[y*avctx->width*3],
@@ -252,11 +253,11 @@ static int decode_frame(AVCodecContext *avctx,
}
/* skip frame */
if(buf_size == 8) {
- f->pict_type = FF_P_TYPE;
+ f->pict_type = AV_PICTURE_TYPE_P;
f->key_frame = 0;
break;
}
- f->pict_type = FF_I_TYPE;
+ f->pict_type = AV_PICTURE_TYPE_I;
f->key_frame = 1;
if ((AV_RL32(buf) != FPS_TAG)||(buf_size < (planes*1024 + 24))) {
av_log(avctx, AV_LOG_ERROR, "Fraps: error in data stream\n");
@@ -297,11 +298,11 @@ static int decode_frame(AVCodecContext *avctx,
}
/* skip frame */
if(buf_size == 8) {
- f->pict_type = FF_P_TYPE;
+ f->pict_type = AV_PICTURE_TYPE_P;
f->key_frame = 0;
break;
}
- f->pict_type = FF_I_TYPE;
+ f->pict_type = AV_PICTURE_TYPE_I;
f->key_frame = 1;
if ((AV_RL32(buf) != FPS_TAG)||(buf_size < (planes*1024 + 24))) {
av_log(avctx, AV_LOG_ERROR, "Fraps: error in data stream\n");
diff --git a/libavcodec/frwu.c b/libavcodec/frwu.c
index 306166bdc5..08dfbf0c9b 100644
--- a/libavcodec/frwu.c
+++ b/libavcodec/frwu.c
@@ -27,7 +27,7 @@
static av_cold int decode_init(AVCodecContext *avctx)
{
if (avctx->width & 1) {
- av_log(avctx, AV_LOG_ERROR, "FRWU needs even width\n");
+ av_log(avctx, AV_LOG_ERROR, "frwu needs even width\n");
return -1;
}
avctx->pix_fmt = PIX_FMT_UYVY422;
@@ -61,7 +61,7 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *data_size,
if (avctx->get_buffer(avctx, pic) < 0)
return -1;
- pic->pict_type = FF_I_TYPE;
+ pic->pict_type = AV_PICTURE_TYPE_I;
pic->key_frame = 1;
pic->interlaced_frame = 1;
pic->top_field_first = 1;
@@ -110,7 +110,7 @@ static av_cold int decode_close(AVCodecContext *avctx)
}
AVCodec ff_frwu_decoder = {
- "FRWU",
+ "frwu",
AVMEDIA_TYPE_VIDEO,
CODEC_ID_FRWU,
0,
diff --git a/libavcodec/get_bits.h b/libavcodec/get_bits.h
index 210ccc7ca6..8579c87cd1 100644
--- a/libavcodec/get_bits.h
+++ b/libavcodec/get_bits.h
@@ -127,7 +127,7 @@ for examples see get_bits, show_bits, skip_bits, get_vlc
# define OPEN_READER(name, gb) \
unsigned int name##_index = (gb)->index; \
- int name##_cache = 0
+ av_unused unsigned int name##_cache
# define CLOSE_READER(name, gb) (gb)->index = name##_index
diff --git a/libavcodec/gif.c b/libavcodec/gif.c
index 218b80babb..121b873888 100644
--- a/libavcodec/gif.c
+++ b/libavcodec/gif.c
@@ -150,7 +150,7 @@ static int gif_encode_frame(AVCodecContext *avctx, unsigned char *outbuf, int bu
uint8_t *end = outbuf + buf_size;
*p = *pict;
- p->pict_type = FF_I_TYPE;
+ p->pict_type = AV_PICTURE_TYPE_I;
p->key_frame = 1;
gif_image_write_header(avctx, &outbuf_ptr, (uint32_t *)pict->data[1]);
gif_image_write_image(avctx, &outbuf_ptr, end, pict->data[0], pict->linesize[0]);
diff --git a/libavcodec/h261dec.c b/libavcodec/h261dec.c
index 060aef6173..856fae3d9c 100644
--- a/libavcodec/h261dec.c
+++ b/libavcodec/h261dec.c
@@ -497,9 +497,9 @@ static int h261_decode_picture_header(H261Context *h){
skip_bits(&s->gb, 8);
}
- // h261 has no I-FRAMES, but if we pass FF_I_TYPE for the first frame, the codec crashes if it does
+ // h261 has no I-FRAMES, but if we pass AV_PICTURE_TYPE_I for the first frame, the codec crashes if it does
// not contain all I-blocks (e.g. when a packet is lost)
- s->pict_type = FF_P_TYPE;
+ s->pict_type = AV_PICTURE_TYPE_P;
h->gob_number = 0;
return 0;
@@ -597,14 +597,14 @@ retry:
// for skipping the frame
s->current_picture.pict_type= s->pict_type;
- s->current_picture.key_frame= s->pict_type == FF_I_TYPE;
+ s->current_picture.key_frame= s->pict_type == AV_PICTURE_TYPE_I;
#if FF_API_HURRY_UP
/* skip everything if we are in a hurry>=5 */
if(avctx->hurry_up>=5) return get_consumed_bytes(s, buf_size);
#endif
- if( (avctx->skip_frame >= AVDISCARD_NONREF && s->pict_type==FF_B_TYPE)
- ||(avctx->skip_frame >= AVDISCARD_NONKEY && s->pict_type!=FF_I_TYPE)
+ if( (avctx->skip_frame >= AVDISCARD_NONREF && s->pict_type==AV_PICTURE_TYPE_B)
+ ||(avctx->skip_frame >= AVDISCARD_NONKEY && s->pict_type!=AV_PICTURE_TYPE_I)
|| avctx->skip_frame >= AVDISCARD_ALL)
return get_consumed_bytes(s, buf_size);
diff --git a/libavcodec/h263.c b/libavcodec/h263.c
index 50ea6ce2b8..43d5b4b3e9 100644
--- a/libavcodec/h263.c
+++ b/libavcodec/h263.c
@@ -148,7 +148,7 @@ void ff_h263_loop_filter(MpegEncContext * s){
uint8_t *dest_cb= s->dest[1];
uint8_t *dest_cr= s->dest[2];
-// if(s->pict_type==FF_B_TYPE && !s->readable) return;
+// if(s->pict_type==AV_PICTURE_TYPE_B && !s->readable) return;
/*
Diag Top
diff --git a/libavcodec/h263dec.c b/libavcodec/h263dec.c
index 78e432aaf5..ec5cb8164c 100644
--- a/libavcodec/h263dec.c
+++ b/libavcodec/h263dec.c
@@ -218,7 +218,7 @@ static int decode_slice(MpegEncContext *s){
//printf("%d %d %06X\n", ret, get_bits_count(&s->gb), show_bits(&s->gb, 24));
ret= s->decode_mb(s, s->block);
- if (s->pict_type!=FF_B_TYPE)
+ if (s->pict_type!=AV_PICTURE_TYPE_B)
ff_h263_update_motion_val(s);
if(ret<0){
@@ -310,7 +310,7 @@ static int decode_slice(MpegEncContext *s){
int max_extra=7;
/* no markers in M$ crap */
- if(s->msmpeg4_version && s->pict_type==FF_I_TYPE)
+ if(s->msmpeg4_version && s->pict_type==AV_PICTURE_TYPE_I)
max_extra+= 17;
/* buggy padding but the frame should still end approximately at the bitstream end */
@@ -600,16 +600,16 @@ retry:
// for skipping the frame
s->current_picture.pict_type= s->pict_type;
- s->current_picture.key_frame= s->pict_type == FF_I_TYPE;
+ s->current_picture.key_frame= s->pict_type == AV_PICTURE_TYPE_I;
/* skip B-frames if we don't have reference frames */
- if(s->last_picture_ptr==NULL && (s->pict_type==FF_B_TYPE || s->dropable)) return get_consumed_bytes(s, buf_size);
+ if(s->last_picture_ptr==NULL && (s->pict_type==AV_PICTURE_TYPE_B || s->dropable)) return get_consumed_bytes(s, buf_size);
#if FF_API_HURRY_UP
/* skip b frames if we are in a hurry */
if(avctx->hurry_up && s->pict_type==FF_B_TYPE) return get_consumed_bytes(s, buf_size);
#endif
- if( (avctx->skip_frame >= AVDISCARD_NONREF && s->pict_type==FF_B_TYPE)
- || (avctx->skip_frame >= AVDISCARD_NONKEY && s->pict_type!=FF_I_TYPE)
+ if( (avctx->skip_frame >= AVDISCARD_NONREF && s->pict_type==AV_PICTURE_TYPE_B)
+ || (avctx->skip_frame >= AVDISCARD_NONKEY && s->pict_type!=AV_PICTURE_TYPE_I)
|| avctx->skip_frame >= AVDISCARD_ALL)
return get_consumed_bytes(s, buf_size);
#if FF_API_HURRY_UP
@@ -618,16 +618,16 @@ retry:
#endif
if(s->next_p_frame_damaged){
- if(s->pict_type==FF_B_TYPE)
+ if(s->pict_type==AV_PICTURE_TYPE_B)
return get_consumed_bytes(s, buf_size);
else
s->next_p_frame_damaged=0;
}
- if((s->avctx->flags2 & CODEC_FLAG2_FAST) && s->pict_type==FF_B_TYPE){
+ if((s->avctx->flags2 & CODEC_FLAG2_FAST) && s->pict_type==AV_PICTURE_TYPE_B){
s->me.qpel_put= s->dsp.put_2tap_qpel_pixels_tab;
s->me.qpel_avg= s->dsp.avg_2tap_qpel_pixels_tab;
- }else if((!s->no_rounding) || s->pict_type==FF_B_TYPE){
+ }else if((!s->no_rounding) || s->pict_type==AV_PICTURE_TYPE_B){
s->me.qpel_put= s->dsp.put_qpel_pixels_tab;
s->me.qpel_avg= s->dsp.avg_qpel_pixels_tab;
}else{
@@ -680,7 +680,7 @@ retry:
decode_slice(s);
}
- if (s->h263_msmpeg4 && s->msmpeg4_version<4 && s->pict_type==FF_I_TYPE)
+ if (s->h263_msmpeg4 && s->msmpeg4_version<4 && s->pict_type==AV_PICTURE_TYPE_I)
if(!CONFIG_MSMPEG4_DECODER || msmpeg4_decode_ext_header(s, buf_size) < 0){
s->error_status_table[s->mb_num-1]= AC_ERROR|DC_ERROR|MV_ERROR;
}
@@ -730,7 +730,7 @@ intrax8_decoded:
assert(s->current_picture.pict_type == s->current_picture_ptr->pict_type);
assert(s->current_picture.pict_type == s->pict_type);
- if (s->pict_type == FF_B_TYPE || s->low_delay) {
+ if (s->pict_type == AV_PICTURE_TYPE_B || s->low_delay) {
*pict= *(AVFrame*)s->current_picture_ptr;
} else if (s->last_picture_ptr != NULL) {
*pict= *(AVFrame*)s->last_picture_ptr;
diff --git a/libavcodec/h264.c b/libavcodec/h264.c
index 8e3ec336aa..e00e5e17d4 100644
--- a/libavcodec/h264.c
+++ b/libavcodec/h264.c
@@ -45,11 +45,11 @@
//#undef NDEBUG
#include <assert.h>
-static const uint8_t rem6[QP_MAX_MAX+1]={
+static const uint8_t rem6[QP_MAX_NUM+1]={
0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3,
};
-static const uint8_t div6[QP_MAX_MAX+1]={
+static const uint8_t div6[QP_MAX_NUM+1]={
0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 6, 6, 6, 6, 6, 6, 7, 7, 7, 7, 7, 7, 8, 8, 8, 8, 8, 8, 9, 9, 9, 9, 9, 9,10,10,10,10,
};
@@ -586,6 +586,7 @@ av_cold int ff_h264_decode_init(AVCodecContext *avctx){
h->sps.bit_depth_luma = avctx->bits_per_raw_sample = 8;
h->pixel_shift = 0;
+ h->sps.bit_depth_luma = avctx->bits_per_raw_sample = 8;
h->thread_context[0] = h;
h->outputed_poc = h->next_outputed_poc = INT_MIN;
@@ -733,6 +734,7 @@ static int decode_update_thread_context(AVCodecContext *dst, const AVCodecContex
int ff_h264_frame_start(H264Context *h){
MpegEncContext * const s = &h->s;
int i;
+ const int pixel_shift = h->pixel_shift;
if(MPV_frame_start(s, s->avctx) < 0)
return -1;
@@ -749,14 +751,14 @@ int ff_h264_frame_start(H264Context *h){
assert(s->linesize && s->uvlinesize);
for(i=0; i<16; i++){
- h->block_offset[i]= (4*((scan8[i] - scan8[0])&7)<<h->pixel_shift) + 4*s->linesize*((scan8[i] - scan8[0])>>3);
- h->block_offset[24+i]= (4*((scan8[i] - scan8[0])&7)<<h->pixel_shift) + 8*s->linesize*((scan8[i] - scan8[0])>>3);
+ h->block_offset[i]= (4*((scan8[i] - scan8[0])&7) << pixel_shift) + 4*s->linesize*((scan8[i] - scan8[0])>>3);
+ h->block_offset[24+i]= (4*((scan8[i] - scan8[0])&7) << pixel_shift) + 8*s->linesize*((scan8[i] - scan8[0])>>3);
}
for(i=0; i<4; i++){
h->block_offset[16+i]=
- h->block_offset[20+i]= (4*((scan8[i] - scan8[0])&7)<<h->pixel_shift) + 4*s->uvlinesize*((scan8[i] - scan8[0])>>3);
+ h->block_offset[20+i]= (4*((scan8[i] - scan8[0])&7) << pixel_shift) + 4*s->uvlinesize*((scan8[i] - scan8[0])>>3);
h->block_offset[24+16+i]=
- h->block_offset[24+20+i]= (4*((scan8[i] - scan8[0])&7)<<h->pixel_shift) + 8*s->uvlinesize*((scan8[i] - scan8[0])>>3);
+ h->block_offset[24+20+i]= (4*((scan8[i] - scan8[0])&7) << pixel_shift) + 8*s->uvlinesize*((scan8[i] - scan8[0])>>3);
}
/* can't be in alloc_tables because linesize isn't known there.
@@ -918,7 +920,7 @@ static void decode_postinit(H264Context *h){
else if((out_of_order && pics-1 == s->avctx->has_b_frames && s->avctx->has_b_frames < MAX_DELAYED_PIC_COUNT)
|| (s->low_delay &&
((h->next_outputed_poc != INT_MIN && out->poc > h->next_outputed_poc + 2)
- || cur->pict_type == FF_B_TYPE)))
+ || cur->pict_type == AV_PICTURE_TYPE_B)))
{
s->low_delay = 0;
s->avctx->has_b_frames++;
@@ -948,6 +950,7 @@ static inline void backup_mb_border(H264Context *h, uint8_t *src_y, uint8_t *src
MpegEncContext * const s = &h->s;
uint8_t *top_border;
int top_idx = 1;
+ const int pixel_shift = h->pixel_shift;
src_y -= linesize;
src_cb -= uvlinesize;
@@ -958,10 +961,10 @@ static inline void backup_mb_border(H264Context *h, uint8_t *src_y, uint8_t *src
if(!MB_MBAFF){
top_border = h->top_borders[0][s->mb_x];
AV_COPY128(top_border, src_y + 15*linesize);
- if (h->pixel_shift)
+ if (pixel_shift)
AV_COPY128(top_border+16, src_y+15*linesize+16);
if(simple || !CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
- if (h->pixel_shift) {
+ if (pixel_shift) {
AV_COPY128(top_border+32, src_cb+7*uvlinesize);
AV_COPY128(top_border+48, src_cr+7*uvlinesize);
} else {
@@ -980,11 +983,11 @@ static inline void backup_mb_border(H264Context *h, uint8_t *src_y, uint8_t *src
// There are two lines saved, the line above the the top macroblock of a pair,
// and the line above the bottom macroblock
AV_COPY128(top_border, src_y + 16*linesize);
- if (h->pixel_shift)
+ if (pixel_shift)
AV_COPY128(top_border+16, src_y+16*linesize+16);
if(simple || !CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
- if (h->pixel_shift) {
+ if (pixel_shift) {
AV_COPY128(top_border+32, src_cb+8*uvlinesize);
AV_COPY128(top_border+48, src_cr+8*uvlinesize);
} else {
@@ -994,7 +997,10 @@ static inline void backup_mb_border(H264Context *h, uint8_t *src_y, uint8_t *src
}
}
-static inline void xchg_mb_border(H264Context *h, uint8_t *src_y, uint8_t *src_cb, uint8_t *src_cr, int linesize, int uvlinesize, int xchg, int simple, int pixel_shift){
+static inline void xchg_mb_border(H264Context *h, uint8_t *src_y,
+ uint8_t *src_cb, uint8_t *src_cr,
+ int linesize, int uvlinesize,
+ int xchg, int simple, int pixel_shift){
MpegEncContext * const s = &h->s;
int deblock_left;
int deblock_top;
@@ -1040,38 +1046,38 @@ else AV_COPY64(b,a);
if(deblock_top){
if(deblock_left){
- XCHG(top_border_m1+(8<<pixel_shift), src_y -(7<<h->pixel_shift), 1);
+ XCHG(top_border_m1 + (8 << pixel_shift), src_y - (7 << pixel_shift), 1);
}
- XCHG(top_border+(0<<pixel_shift), src_y +(1<<pixel_shift), xchg);
- XCHG(top_border+(8<<pixel_shift), src_y +(9<<pixel_shift), 1);
+ XCHG(top_border + (0 << pixel_shift), src_y + (1 << pixel_shift), xchg);
+ XCHG(top_border + (8 << pixel_shift), src_y + (9 << pixel_shift), 1);
if(s->mb_x+1 < s->mb_width){
- XCHG(h->top_borders[top_idx][s->mb_x+1], src_y +(17<<pixel_shift), 1);
+ XCHG(h->top_borders[top_idx][s->mb_x+1], src_y + (17 << pixel_shift), 1);
}
}
if(simple || !CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
if(deblock_top){
if(deblock_left){
- XCHG(top_border_m1+(16<<pixel_shift), src_cb -(7<<pixel_shift), 1);
- XCHG(top_border_m1+(24<<pixel_shift), src_cr -(7<<pixel_shift), 1);
+ XCHG(top_border_m1 + (16 << pixel_shift), src_cb - (7 << pixel_shift), 1);
+ XCHG(top_border_m1 + (24 << pixel_shift), src_cr - (7 << pixel_shift), 1);
}
- XCHG(top_border+(16<<pixel_shift), src_cb+1+pixel_shift, 1);
- XCHG(top_border+(24<<pixel_shift), src_cr+1+pixel_shift, 1);
+ XCHG(top_border + (16 << pixel_shift), src_cb+1+pixel_shift, 1);
+ XCHG(top_border + (24 << pixel_shift), src_cr+1+pixel_shift, 1);
}
}
}
-static av_always_inline int dctcoef_get(H264Context *h, DCTELEM *mb, int index, int pixel_shift) {
- if (!pixel_shift)
- return mb[index];
- else
- return ((int32_t*)mb)[index];
+static av_always_inline int dctcoef_get(DCTELEM *mb, int high_bit_depth, int index) {
+ if (high_bit_depth) {
+ return AV_RN32A(((int32_t*)mb) + index);
+ } else
+ return AV_RN16A(mb + index);
}
-static av_always_inline void dctcoef_set(H264Context *h, DCTELEM *mb, int index, int value, int pixel_shift) {
- if (!pixel_shift)
- mb[index] = value;
- else
- ((int32_t*)mb)[index] = value;
+static av_always_inline void dctcoef_set(DCTELEM *mb, int high_bit_depth, int index, int value) {
+ if (high_bit_depth) {
+ AV_WN32A(((int32_t*)mb) + index, value);
+ } else
+ AV_WN16A(mb + index, value);
}
static av_always_inline void hl_decode_mb_internal(H264Context *h, int simple, int pixel_shift){
@@ -1090,12 +1096,12 @@ static av_always_inline void hl_decode_mb_internal(H264Context *h, int simple, i
void (*idct_add)(uint8_t *dst, DCTELEM *block, int stride);
void (*idct_dc_add)(uint8_t *dst, DCTELEM *block, int stride);
- dest_y = s->current_picture.data[0] + ((mb_x<<pixel_shift) + mb_y * s->linesize ) * 16;
- dest_cb = s->current_picture.data[1] + ((mb_x<<pixel_shift) + mb_y * s->uvlinesize) * 8;
- dest_cr = s->current_picture.data[2] + ((mb_x<<pixel_shift) + mb_y * s->uvlinesize) * 8;
+ dest_y = s->current_picture.data[0] + ((mb_x << pixel_shift) + mb_y * s->linesize ) * 16;
+ dest_cb = s->current_picture.data[1] + ((mb_x << pixel_shift) + mb_y * s->uvlinesize) * 8;
+ dest_cr = s->current_picture.data[2] + ((mb_x << pixel_shift) + mb_y * s->uvlinesize) * 8;
- s->dsp.prefetch(dest_y + (s->mb_x&3)*4*s->linesize + (64<<pixel_shift), s->linesize, 4);
- s->dsp.prefetch(dest_cb + (s->mb_x&7)*s->uvlinesize + (64<<pixel_shift), dest_cr - dest_cb, 2);
+ s->dsp.prefetch(dest_y + (s->mb_x&3)*4*s->linesize + (64 << pixel_shift), s->linesize, 4);
+ s->dsp.prefetch(dest_cb + (s->mb_x&7)*s->uvlinesize + (64 << pixel_shift), dest_cr - dest_cb, 2);
h->list_counts[mb_xy]= h->list_count;
@@ -1186,16 +1192,16 @@ static av_always_inline void hl_decode_mb_internal(H264Context *h, int simple, i
uint8_t * const ptr= dest_y + block_offset[i];
const int dir= h->intra4x4_pred_mode_cache[ scan8[i] ];
if(transform_bypass && h->sps.profile_idc==244 && dir<=1){
- h->hpc.pred8x8l_add[dir](ptr, h->mb + (i*16<<pixel_shift), linesize);
+ h->hpc.pred8x8l_add[dir](ptr, h->mb + (i*16 << pixel_shift), linesize);
}else{
const int nnz = h->non_zero_count_cache[ scan8[i] ];
h->hpc.pred8x8l[ dir ](ptr, (h->topleft_samples_available<<i)&0x8000,
(h->topright_samples_available<<i)&0x4000, linesize);
if(nnz){
- if(nnz == 1 && dctcoef_get(h, h->mb, i*16, pixel_shift))
- idct_dc_add(ptr, h->mb + (i*16<<pixel_shift), linesize);
+ if(nnz == 1 && dctcoef_get(h->mb, pixel_shift, i*16))
+ idct_dc_add(ptr, h->mb + (i*16 << pixel_shift), linesize);
else
- idct_add (ptr, h->mb + (i*16<<pixel_shift), linesize);
+ idct_add (ptr, h->mb + (i*16 << pixel_shift), linesize);
}
}
}
@@ -1212,7 +1218,7 @@ static av_always_inline void hl_decode_mb_internal(H264Context *h, int simple, i
const int dir= h->intra4x4_pred_mode_cache[ scan8[i] ];
if(transform_bypass && h->sps.profile_idc==244 && dir<=1){
- h->hpc.pred4x4_add[dir](ptr, h->mb + (i*16<<pixel_shift), linesize);
+ h->hpc.pred4x4_add[dir](ptr, h->mb + (i*16 << pixel_shift), linesize);
}else{
uint8_t *topright;
int nnz, tr;
@@ -1229,7 +1235,7 @@ static av_always_inline void hl_decode_mb_internal(H264Context *h, int simple, i
topright= (uint8_t*) &tr;
}
}else
- topright= ptr + (4<<pixel_shift) - linesize;
+ topright= ptr + (4 << pixel_shift) - linesize;
}else
topright= NULL;
@@ -1237,8 +1243,8 @@ static av_always_inline void hl_decode_mb_internal(H264Context *h, int simple, i
nnz = h->non_zero_count_cache[ scan8[i] ];
if(nnz){
if(is_h264){
- if(nnz == 1 && dctcoef_get(h, h->mb, i*16, pixel_shift))
- idct_dc_add(ptr, h->mb + (i*16<<pixel_shift), linesize);
+ if(nnz == 1 && dctcoef_get(h->mb, pixel_shift, i*16))
+ idct_dc_add(ptr, h->mb + (i*16 << pixel_shift), linesize);
else
idct_add (ptr, h->mb + (i*16<<pixel_shift), linesize);
}
@@ -1261,7 +1267,7 @@ static av_always_inline void hl_decode_mb_internal(H264Context *h, int simple, i
static const uint8_t dc_mapping[16] = { 0*16, 1*16, 4*16, 5*16, 2*16, 3*16, 6*16, 7*16,
8*16, 9*16,12*16,13*16,10*16,11*16,14*16,15*16};
for(i = 0; i < 16; i++)
- dctcoef_set(h, h->mb, dc_mapping[i], dctcoef_get(h, h->mb_luma_dc, i,pixel_shift),pixel_shift);
+ dctcoef_set(h->mb, pixel_shift, dc_mapping[i], dctcoef_get(h->mb_luma_dc, pixel_shift, i));
}
}
}
@@ -1288,8 +1294,8 @@ static av_always_inline void hl_decode_mb_internal(H264Context *h, int simple, i
h->hpc.pred16x16_add[h->intra16x16_pred_mode](dest_y, block_offset, h->mb, linesize);
}else{
for(i=0; i<16; i++){
- if(h->non_zero_count_cache[ scan8[i] ] || dctcoef_get(h, h->mb, i*16,pixel_shift))
- s->dsp.add_pixels4(dest_y + block_offset[i], h->mb + (i*16<<pixel_shift), linesize);
+ if(h->non_zero_count_cache[ scan8[i] ] || dctcoef_get(h->mb, pixel_shift, i*16))
+ s->dsp.add_pixels4(dest_y + block_offset[i], h->mb + (i*16 << pixel_shift), linesize);
}
}
}else{
@@ -1301,7 +1307,7 @@ static av_always_inline void hl_decode_mb_internal(H264Context *h, int simple, i
idct_add= IS_8x8DCT(mb_type) ? s->dsp.add_pixels8 : s->dsp.add_pixels4;
for(i=0; i<16; i+=di){
if(h->non_zero_count_cache[ scan8[i] ]){
- idct_add(dest_y + block_offset[i], h->mb + (i*16<<pixel_shift), linesize);
+ idct_add(dest_y + block_offset[i], h->mb + (i*16 << pixel_shift), linesize);
}
}
}else{
@@ -1329,21 +1335,21 @@ static av_always_inline void hl_decode_mb_internal(H264Context *h, int simple, i
uint8_t *dest[2] = {dest_cb, dest_cr};
if(transform_bypass){
if(IS_INTRA(mb_type) && h->sps.profile_idc==244 && (h->chroma_pred_mode==VERT_PRED8x8 || h->chroma_pred_mode==HOR_PRED8x8)){
- h->hpc.pred8x8_add[h->chroma_pred_mode](dest[0], block_offset + 16, h->mb + (16*16<<pixel_shift), uvlinesize);
- h->hpc.pred8x8_add[h->chroma_pred_mode](dest[1], block_offset + 20, h->mb + (20*16<<pixel_shift), uvlinesize);
+ h->hpc.pred8x8_add[h->chroma_pred_mode](dest[0], block_offset + 16, h->mb + (16*16 << pixel_shift), uvlinesize);
+ h->hpc.pred8x8_add[h->chroma_pred_mode](dest[1], block_offset + 20, h->mb + (20*16 << pixel_shift), uvlinesize);
}else{
idct_add = s->dsp.add_pixels4;
for(i=16; i<16+8; i++){
- if(h->non_zero_count_cache[ scan8[i] ] || dctcoef_get(h, h->mb, i*16,pixel_shift))
- idct_add (dest[(i&4)>>2] + block_offset[i], h->mb + (i*16<<pixel_shift), uvlinesize);
+ if(h->non_zero_count_cache[ scan8[i] ] || dctcoef_get(h->mb, pixel_shift, i*16))
+ idct_add (dest[(i&4)>>2] + block_offset[i], h->mb + (i*16 << pixel_shift), uvlinesize);
}
}
}else{
if(is_h264){
if(h->non_zero_count_cache[ scan8[CHROMA_DC_BLOCK_INDEX+0] ])
- h->h264dsp.h264_chroma_dc_dequant_idct(h->mb + (16*16<<pixel_shift) , h->dequant4_coeff[IS_INTRA(mb_type) ? 1:4][h->chroma_qp[0]][0]);
+ h->h264dsp.h264_chroma_dc_dequant_idct(h->mb + (16*16 << pixel_shift) , h->dequant4_coeff[IS_INTRA(mb_type) ? 1:4][h->chroma_qp[0]][0]);
if(h->non_zero_count_cache[ scan8[CHROMA_DC_BLOCK_INDEX+1] ])
- h->h264dsp.h264_chroma_dc_dequant_idct(h->mb + ((16*16+4*16)<<pixel_shift), h->dequant4_coeff[IS_INTRA(mb_type) ? 2:5][h->chroma_qp[1]][0]);
+ h->h264dsp.h264_chroma_dc_dequant_idct(h->mb + ((16*16+4*16) << pixel_shift), h->dequant4_coeff[IS_INTRA(mb_type) ? 2:5][h->chroma_qp[1]][0]);
h->h264dsp.h264_idct_add8(dest, block_offset,
h->mb, uvlinesize,
h->non_zero_count_cache);
@@ -1370,9 +1376,12 @@ static av_always_inline void hl_decode_mb_internal(H264Context *h, int simple, i
/**
* Process a macroblock; this case avoids checks for expensive uncommon cases.
*/
-static void hl_decode_mb_simple8(H264Context *h){
- hl_decode_mb_internal(h, 1, 0);
+#define hl_decode_mb_simple(sh, bits) \
+static void hl_decode_mb_simple_ ## bits(H264Context *h){ \
+ hl_decode_mb_internal(h, 1, sh); \
}
+hl_decode_mb_simple(0, 8);
+hl_decode_mb_simple(1, 16);
/**
* Process a macroblock; this handles edge cases, such as interlacing.
@@ -1387,11 +1396,12 @@ void ff_h264_hl_decode_mb(H264Context *h){
const int mb_type= s->current_picture.mb_type[mb_xy];
int is_complex = CONFIG_SMALL || h->is_complex || IS_INTRA_PCM(mb_type) || s->qscale == 0;
- if (is_complex || h->pixel_shift)
+ if (is_complex) {
hl_decode_mb_complex(h);
- else{
- hl_decode_mb_simple8(h);
- }
+ } else if (h->pixel_shift) {
+ hl_decode_mb_simple_16(h);
+ } else
+ hl_decode_mb_simple_8(h);
}
static int pred_weight_table(H264Context *h){
@@ -1449,7 +1459,7 @@ static int pred_weight_table(H264Context *h){
}
}
}
- if(h->slice_type_nos != FF_B_TYPE) break;
+ if(h->slice_type_nos != AV_PICTURE_TYPE_B) break;
}
h->use_weight= h->use_weight || h->use_weight_chroma;
return 0;
@@ -1817,7 +1827,7 @@ static int decode_slice_header(H264Context *h, H264Context *h0){
h->slice_type_fixed=0;
slice_type= golomb_to_pict_type[ slice_type ];
- if (slice_type == FF_I_TYPE
+ if (slice_type == AV_PICTURE_TYPE_I
|| (h0->current_slice != 0 && slice_type == h0->last_slice_type) ) {
default_ref_list_done = 1;
}
@@ -2110,15 +2120,15 @@ static int decode_slice_header(H264Context *h, H264Context *h0){
h->ref_count[0]= h->pps.ref_count[0];
h->ref_count[1]= h->pps.ref_count[1];
- if(h->slice_type_nos != FF_I_TYPE){
- if(h->slice_type_nos == FF_B_TYPE){
+ if(h->slice_type_nos != AV_PICTURE_TYPE_I){
+ if(h->slice_type_nos == AV_PICTURE_TYPE_B){
h->direct_spatial_mv_pred= get_bits1(&s->gb);
}
num_ref_idx_active_override_flag= get_bits1(&s->gb);
if(num_ref_idx_active_override_flag){
h->ref_count[0]= get_ue_golomb(&s->gb) + 1;
- if(h->slice_type_nos==FF_B_TYPE)
+ if(h->slice_type_nos==AV_PICTURE_TYPE_B)
h->ref_count[1]= get_ue_golomb(&s->gb) + 1;
if(h->ref_count[0]-1 > 32-1 || h->ref_count[1]-1 > 32-1){
@@ -2127,7 +2137,7 @@ static int decode_slice_header(H264Context *h, H264Context *h0){
return -1;
}
}
- if(h->slice_type_nos == FF_B_TYPE)
+ if(h->slice_type_nos == AV_PICTURE_TYPE_B)
h->list_count= 2;
else
h->list_count= 1;
@@ -2138,22 +2148,22 @@ static int decode_slice_header(H264Context *h, H264Context *h0){
ff_h264_fill_default_ref_list(h);
}
- if(h->slice_type_nos!=FF_I_TYPE && ff_h264_decode_ref_pic_list_reordering(h) < 0)
+ if(h->slice_type_nos!=AV_PICTURE_TYPE_I && ff_h264_decode_ref_pic_list_reordering(h) < 0)
return -1;
- if(h->slice_type_nos!=FF_I_TYPE){
+ if(h->slice_type_nos!=AV_PICTURE_TYPE_I){
s->last_picture_ptr= &h->ref_list[0][0];
ff_copy_picture(&s->last_picture, s->last_picture_ptr);
}
- if(h->slice_type_nos==FF_B_TYPE){
+ if(h->slice_type_nos==AV_PICTURE_TYPE_B){
s->next_picture_ptr= &h->ref_list[1][0];
ff_copy_picture(&s->next_picture, s->next_picture_ptr);
}
- if( (h->pps.weighted_pred && h->slice_type_nos == FF_P_TYPE )
- || (h->pps.weighted_bipred_idc==1 && h->slice_type_nos== FF_B_TYPE ) )
+ if( (h->pps.weighted_pred && h->slice_type_nos == AV_PICTURE_TYPE_P )
+ || (h->pps.weighted_bipred_idc==1 && h->slice_type_nos== AV_PICTURE_TYPE_B ) )
pred_weight_table(h);
- else if(h->pps.weighted_bipred_idc==2 && h->slice_type_nos== FF_B_TYPE){
+ else if(h->pps.weighted_bipred_idc==2 && h->slice_type_nos== AV_PICTURE_TYPE_B){
implicit_weight_table(h, -1);
}else {
h->use_weight = 0;
@@ -2169,17 +2179,17 @@ static int decode_slice_header(H264Context *h, H264Context *h0){
if(FRAME_MBAFF){
ff_h264_fill_mbaff_ref_list(h);
- if(h->pps.weighted_bipred_idc==2 && h->slice_type_nos== FF_B_TYPE){
+ if(h->pps.weighted_bipred_idc==2 && h->slice_type_nos== AV_PICTURE_TYPE_B){
implicit_weight_table(h, 0);
implicit_weight_table(h, 1);
}
}
- if(h->slice_type_nos==FF_B_TYPE && !h->direct_spatial_mv_pred)
+ if(h->slice_type_nos==AV_PICTURE_TYPE_B && !h->direct_spatial_mv_pred)
ff_h264_direct_dist_scale_factor(h);
ff_h264_direct_ref_list_init(h);
- if( h->slice_type_nos != FF_I_TYPE && h->pps.cabac ){
+ if( h->slice_type_nos != AV_PICTURE_TYPE_I && h->pps.cabac ){
tmp = get_ue_golomb_31(&s->gb);
if(tmp > 2){
av_log(s->avctx, AV_LOG_ERROR, "cabac_init_idc overflow\n");
@@ -2198,10 +2208,10 @@ static int decode_slice_header(H264Context *h, H264Context *h0){
h->chroma_qp[0] = get_chroma_qp(h, 0, s->qscale);
h->chroma_qp[1] = get_chroma_qp(h, 1, s->qscale);
//FIXME qscale / qp ... stuff
- if(h->slice_type == FF_SP_TYPE){
+ if(h->slice_type == AV_PICTURE_TYPE_SP){
get_bits1(&s->gb); /* sp_for_switch_flag */
}
- if(h->slice_type==FF_SP_TYPE || h->slice_type == FF_SI_TYPE){
+ if(h->slice_type==AV_PICTURE_TYPE_SP || h->slice_type == AV_PICTURE_TYPE_SI){
get_se_golomb(&s->gb); /* slice_qs_delta */
}
@@ -2230,8 +2240,8 @@ static int decode_slice_header(H264Context *h, H264Context *h0){
}
if( s->avctx->skip_loop_filter >= AVDISCARD_ALL
- ||(s->avctx->skip_loop_filter >= AVDISCARD_NONKEY && h->slice_type_nos != FF_I_TYPE)
- ||(s->avctx->skip_loop_filter >= AVDISCARD_BIDIR && h->slice_type_nos == FF_B_TYPE)
+ ||(s->avctx->skip_loop_filter >= AVDISCARD_NONKEY && h->slice_type_nos != AV_PICTURE_TYPE_I)
+ ||(s->avctx->skip_loop_filter >= AVDISCARD_BIDIR && h->slice_type_nos == AV_PICTURE_TYPE_B)
||(s->avctx->skip_loop_filter >= AVDISCARD_NONREF && h->nal_ref_idc == 0))
h->deblocking_filter= 0;
@@ -2307,7 +2317,7 @@ static int decode_slice_header(H264Context *h, H264Context *h0){
h->slice_num,
(s->picture_structure==PICT_FRAME ? "F" : s->picture_structure==PICT_TOP_FIELD ? "T" : "B"),
first_mb_in_slice,
- av_get_pict_type_char(h->slice_type), h->slice_type_fixed ? " fix" : "", h->nal_unit_type == NAL_IDR_SLICE ? " IDR" : "",
+ av_get_picture_type_char(h->slice_type), h->slice_type_fixed ? " fix" : "", h->nal_unit_type == NAL_IDR_SLICE ? " IDR" : "",
pps_id, h->frame_num,
s->current_picture_ptr->field_poc[0], s->current_picture_ptr->field_poc[1],
h->ref_count[0], h->ref_count[1],
@@ -2315,7 +2325,7 @@ static int decode_slice_header(H264Context *h, H264Context *h0){
h->deblocking_filter, h->slice_alpha_c0_offset/2-26, h->slice_beta_offset/2-26,
h->use_weight,
h->use_weight==1 && h->use_weight_chroma ? "c" : "",
- h->slice_type == FF_B_TYPE ? (h->direct_spatial_mv_pred ? "SPAT" : "TEMP") : ""
+ h->slice_type == AV_PICTURE_TYPE_B ? (h->direct_spatial_mv_pred ? "SPAT" : "TEMP") : ""
);
}
@@ -2325,11 +2335,11 @@ static int decode_slice_header(H264Context *h, H264Context *h0){
int ff_h264_get_slice_type(const H264Context *h)
{
switch (h->slice_type) {
- case FF_P_TYPE: return 0;
- case FF_B_TYPE: return 1;
- case FF_I_TYPE: return 2;
- case FF_SP_TYPE: return 3;
- case FF_SI_TYPE: return 4;
+ case AV_PICTURE_TYPE_P: return 0;
+ case AV_PICTURE_TYPE_B: return 1;
+ case AV_PICTURE_TYPE_I: return 2;
+ case AV_PICTURE_TYPE_SP: return 3;
+ case AV_PICTURE_TYPE_SI: return 4;
default: return -1;
}
}
@@ -2557,6 +2567,7 @@ static void loop_filter(H264Context *h){
const int end_mb_y= s->mb_y + FRAME_MBAFF;
const int old_slice_type= h->slice_type;
const int end_mb_x = s->mb_x;
+ const int pixel_shift = h->pixel_shift;
if(h->deblocking_filter) {
int start_x= s->resync_mb_y == s->mb_y ? s->resync_mb_x : 0;
@@ -2573,9 +2584,9 @@ static void loop_filter(H264Context *h){
s->mb_x= mb_x;
s->mb_y= mb_y;
- dest_y = s->current_picture.data[0] + ((mb_x<<h->pixel_shift) + mb_y * s->linesize ) * 16;
- dest_cb = s->current_picture.data[1] + ((mb_x<<h->pixel_shift) + mb_y * s->uvlinesize) * 8;
- dest_cr = s->current_picture.data[2] + ((mb_x<<h->pixel_shift) + mb_y * s->uvlinesize) * 8;
+ dest_y = s->current_picture.data[0] + ((mb_x << pixel_shift) + mb_y * s->linesize ) * 16;
+ dest_cb = s->current_picture.data[1] + ((mb_x << pixel_shift) + mb_y * s->uvlinesize) * 8;
+ dest_cr = s->current_picture.data[2] + ((mb_x << pixel_shift) + mb_y * s->uvlinesize) * 8;
//FIXME simplify above
if (MB_FIELD) {
@@ -2952,7 +2963,7 @@ static int decode_nal_units(H264Context *h, const uint8_t *buf, int buf_size){
bit_length= !dst_length ? 0 : (8*dst_length - ff_h264_decode_rbsp_trailing(h, ptr + dst_length - 1));
if(s->avctx->debug&FF_DEBUG_STARTCODE){
- av_log(h->s.avctx, AV_LOG_DEBUG, "NAL %d at %d/%d length %d\n", hx->nal_unit_type, buf_index, buf_size, dst_length);
+ av_log(h->s.avctx, AV_LOG_DEBUG, "NAL %d/%d at %d/%d length %d\n", hx->nal_unit_type, hx->nal_ref_idc, buf_index, buf_size, dst_length);
}
if (h->is_avc && (nalsize != consumed) && nalsize){
@@ -3007,8 +3018,8 @@ static int decode_nal_units(H264Context *h, const uint8_t *buf, int buf_size){
&& hx->s.hurry_up < 5
#endif
&& (avctx->skip_frame < AVDISCARD_NONREF || hx->nal_ref_idc)
- && (avctx->skip_frame < AVDISCARD_BIDIR || hx->slice_type_nos!=FF_B_TYPE)
- && (avctx->skip_frame < AVDISCARD_NONKEY || hx->slice_type_nos==FF_I_TYPE)
+ && (avctx->skip_frame < AVDISCARD_BIDIR || hx->slice_type_nos!=AV_PICTURE_TYPE_B)
+ && (avctx->skip_frame < AVDISCARD_NONKEY || hx->slice_type_nos==AV_PICTURE_TYPE_I)
&& avctx->skip_frame < AVDISCARD_ALL){
if(avctx->hwaccel) {
if (avctx->hwaccel->decode_slice(avctx, &buf[buf_index - consumed], consumed) < 0)
@@ -3047,8 +3058,8 @@ static int decode_nal_units(H264Context *h, const uint8_t *buf, int buf_size){
&& s->hurry_up < 5
#endif
&& (avctx->skip_frame < AVDISCARD_NONREF || hx->nal_ref_idc)
- && (avctx->skip_frame < AVDISCARD_BIDIR || hx->slice_type_nos!=FF_B_TYPE)
- && (avctx->skip_frame < AVDISCARD_NONKEY || hx->slice_type_nos==FF_I_TYPE)
+ && (avctx->skip_frame < AVDISCARD_BIDIR || hx->slice_type_nos!=AV_PICTURE_TYPE_B)
+ && (avctx->skip_frame < AVDISCARD_NONKEY || hx->slice_type_nos==AV_PICTURE_TYPE_I)
&& avctx->skip_frame < AVDISCARD_ALL)
context_count++;
break;
@@ -3060,7 +3071,8 @@ static int decode_nal_units(H264Context *h, const uint8_t *buf, int buf_size){
init_get_bits(&s->gb, ptr, bit_length);
ff_h264_decode_seq_parameter_set(h);
- if(s->flags& CODEC_FLAG_LOW_DELAY)
+ if(s->flags& CODEC_FLAG_LOW_DELAY ||
+ (h->sps.bitstream_restriction_flag && !h->sps.num_reorder_frames))
s->low_delay=1;
if(avctx->has_b_frames < 2)
@@ -3069,7 +3081,7 @@ static int decode_nal_units(H264Context *h, const uint8_t *buf, int buf_size){
if (avctx->bits_per_raw_sample != h->sps.bit_depth_luma) {
if (h->sps.bit_depth_luma >= 8 && h->sps.bit_depth_luma <= 10) {
avctx->bits_per_raw_sample = h->sps.bit_depth_luma;
- h->pixel_shift = h->sps.bit_depth_luma/9;
+ h->pixel_shift = h->sps.bit_depth_luma > 8;
ff_h264dsp_init(&h->h264dsp, h->sps.bit_depth_luma);
ff_h264_pred_init(&h->hpc, s->codec_id, h->sps.bit_depth_luma);
diff --git a/libavcodec/h264.h b/libavcodec/h264.h
index a0f688d180..04da701750 100644
--- a/libavcodec/h264.h
+++ b/libavcodec/h264.h
@@ -108,7 +108,7 @@
*/
#define DELAYED_PIC_REF 4
-#define QP_MAX_MAX (51 + 2*6) // The maximum supported qp
+#define QP_MAX_NUM (51 + 2*6) // The maximum supported qp
/* NAL unit types */
enum {
@@ -266,7 +266,7 @@ typedef struct MMCO{
typedef struct H264Context{
MpegEncContext s;
H264DSPContext h264dsp;
- int pixel_shift;
+ int pixel_shift; ///< 0 for 8-bit H264, 1 for high-bit-depth H264
int chroma_qp[2]; //QPc
int qp_thresh; ///< QP threshold to skip loopfilter
@@ -355,8 +355,8 @@ typedef struct H264Context{
*/
PPS pps; //FIXME move to Picture perhaps? (->no) do we need that?
- uint32_t dequant4_buffer[6][QP_MAX_MAX+1][16]; //FIXME should these be moved down?
- uint32_t dequant8_buffer[2][QP_MAX_MAX+1][64];
+ uint32_t dequant4_buffer[6][QP_MAX_NUM+1][16]; //FIXME should these be moved down?
+ uint32_t dequant8_buffer[2][QP_MAX_NUM+1][64];
uint32_t (*dequant4_coeff[6])[16];
uint32_t (*dequant8_coeff[2])[64];
@@ -594,17 +594,10 @@ typedef struct H264Context{
// Timestamp stuff
int sei_buffering_period_present; ///< Buffering period SEI flag
int initial_cpb_removal_delay[32]; ///< Initial timestamps for CPBs
-
- //SVQ3 specific fields
- int halfpel_flag;
- int thirdpel_flag;
- int unknown_svq3_flag;
- int next_slice_index;
- uint32_t svq3_watermark_key;
}H264Context;
-extern const uint8_t ff_h264_chroma_qp[3][QP_MAX_MAX+1]; ///< One chroma qp table for each supported bit depth (8, 9, 10).
+extern const uint8_t ff_h264_chroma_qp[3][QP_MAX_NUM+1]; ///< One chroma qp table for each supported bit depth (8, 9, 10).
/**
* Decode SEI
@@ -1118,7 +1111,7 @@ static void fill_decode_caches(H264Context *h, int mb_type){
}
AV_ZERO16(h->mvd_cache [list][scan8[4 ]]);
AV_ZERO16(h->mvd_cache [list][scan8[12]]);
- if(h->slice_type_nos == FF_B_TYPE){
+ if(h->slice_type_nos == AV_PICTURE_TYPE_B){
fill_rectangle(&h->direct_cache[scan8[0]], 4, 4, 8, MB_TYPE_16x16>>1, 1);
if(IS_DIRECT(top_type)){
@@ -1255,7 +1248,7 @@ static inline void write_back_motion(H264Context *h, int mb_type){
}
}
- if(h->slice_type_nos == FF_B_TYPE && CABAC){
+ if(h->slice_type_nos == AV_PICTURE_TYPE_B && CABAC){
if(IS_8X8(mb_type)){
uint8_t *direct_table = &h->direct_table[4*h->mb_xy];
direct_table[1] = h->sub_mb_type[1]>>1;
@@ -1286,7 +1279,7 @@ static void av_unused decode_mb_skip(H264Context *h){
if(MB_FIELD)
mb_type|= MB_TYPE_INTERLACED;
- if( h->slice_type_nos == FF_B_TYPE )
+ if( h->slice_type_nos == AV_PICTURE_TYPE_B )
{
// just for fill_caches. pred_direct_motion will set the real mb_type
mb_type|= MB_TYPE_L0L1|MB_TYPE_DIRECT2|MB_TYPE_SKIP;
diff --git a/libavcodec/h264_cabac.c b/libavcodec/h264_cabac.c
index 7873c28a26..925ac44498 100644
--- a/libavcodec/h264_cabac.c
+++ b/libavcodec/h264_cabac.c
@@ -691,7 +691,7 @@ void ff_h264_init_cabac_states(H264Context *h) {
const int8_t (*tab)[2];
const int slice_qp = av_clip(s->qscale - 6*(h->sps.bit_depth_luma-8), 0, 51);
- if( h->slice_type_nos == FF_I_TYPE ) tab = cabac_context_init_I;
+ if( h->slice_type_nos == AV_PICTURE_TYPE_I ) tab = cabac_context_init_I;
else tab = cabac_context_init_PB[h->cabac_init_idc];
/* calculate pre-state */
@@ -779,7 +779,7 @@ static int decode_cabac_mb_skip( H264Context *h, int mb_x, int mb_y ) {
if( h->slice_table[mbb_xy] == h->slice_num && !IS_SKIP( s->current_picture.mb_type[mbb_xy] ))
ctx++;
- if( h->slice_type_nos == FF_B_TYPE )
+ if( h->slice_type_nos == AV_PICTURE_TYPE_B )
ctx += 13;
return get_cabac_noinline( &h->cabac, &h->cabac_state[11+ctx] );
}
@@ -888,7 +888,7 @@ static int decode_cabac_mb_ref( H264Context *h, int list, int n ) {
int ref = 0;
int ctx = 0;
- if( h->slice_type_nos == FF_B_TYPE) {
+ if( h->slice_type_nos == AV_PICTURE_TYPE_B) {
if( refa > 0 && !(h->direct_cache[scan8[n] - 1]&(MB_TYPE_DIRECT2>>1)) )
ctx++;
if( refb > 0 && !(h->direct_cache[scan8[n] - 8]&(MB_TYPE_DIRECT2>>1)) )
@@ -1103,10 +1103,11 @@ static av_always_inline void decode_cabac_residual_internal( H264Context *h, DCT
#define STORE_BLOCK(type) \
- do {\
- uint8_t *ctx = coeff_abs_level1_ctx[node_ctx] + abs_level_m1_ctx_base;\
-\
- int j= scantable[index[--coeff_count]];\
+ do { \
+ uint8_t *ctx = coeff_abs_level1_ctx[node_ctx] + abs_level_m1_ctx_base; \
+ \
+ int j= scantable[index[--coeff_count]]; \
+ \
if( get_cabac( CC, ctx ) == 0 ) { \
node_ctx = coeff_abs_level_transition[0][node_ctx]; \
if( is_dc ) { \
@@ -1141,8 +1142,8 @@ static av_always_inline void decode_cabac_residual_internal( H264Context *h, DCT
}else{ \
((type*)block)[j] = ((int)(get_cabac_bypass_sign( CC, -coeff_abs ) * qmul[j] + 32)) >> 6; \
} \
- }\
- } while( coeff_count );
+ } \
+ } while ( coeff_count );
if (h->pixel_shift) {
STORE_BLOCK(int32_t)
@@ -1204,11 +1205,12 @@ int ff_h264_decode_mb_cabac(H264Context *h) {
int mb_xy;
int mb_type, partition_count, cbp = 0;
int dct8x8_allowed= h->pps.transform_8x8_mode;
+ const int pixel_shift = h->pixel_shift;
mb_xy = h->mb_xy = s->mb_x + s->mb_y*s->mb_stride;
tprintf(s->avctx, "pic:%d mb:%d/%d\n", h->frame_num, s->mb_x, s->mb_y);
- if( h->slice_type_nos != FF_I_TYPE ) {
+ if( h->slice_type_nos != AV_PICTURE_TYPE_I ) {
int skip;
/* a skipped mb needs the aff flag from the following mb */
if( FRAME_MBAFF && (s->mb_y&1)==1 && h->prev_mb_skipped )
@@ -1244,9 +1246,9 @@ int ff_h264_decode_mb_cabac(H264Context *h) {
fill_decode_neighbors(h, -(MB_FIELD));
- if( h->slice_type_nos == FF_B_TYPE ) {
+ if( h->slice_type_nos == AV_PICTURE_TYPE_B ) {
int ctx = 0;
- assert(h->slice_type_nos == FF_B_TYPE);
+ assert(h->slice_type_nos == AV_PICTURE_TYPE_B);
if( !IS_DIRECT( h->left_type[0]-1 ) )
ctx++;
@@ -1279,7 +1281,7 @@ int ff_h264_decode_mb_cabac(H264Context *h) {
}
partition_count= b_mb_type_info[mb_type].partition_count;
mb_type= b_mb_type_info[mb_type].type;
- } else if( h->slice_type_nos == FF_P_TYPE ) {
+ } else if( h->slice_type_nos == AV_PICTURE_TYPE_P ) {
if( get_cabac_noinline( &h->cabac, &h->cabac_state[14] ) == 0 ) {
/* P-type */
if( get_cabac_noinline( &h->cabac, &h->cabac_state[15] ) == 0 ) {
@@ -1297,9 +1299,9 @@ int ff_h264_decode_mb_cabac(H264Context *h) {
}
} else {
mb_type= decode_cabac_intra_mb_type(h, 3, 1);
- if(h->slice_type == FF_SI_TYPE && mb_type)
+ if(h->slice_type == AV_PICTURE_TYPE_SI && mb_type)
mb_type--;
- assert(h->slice_type_nos == FF_I_TYPE);
+ assert(h->slice_type_nos == AV_PICTURE_TYPE_I);
decode_intra_mb:
partition_count = 0;
cbp= i_mb_type_info[mb_type].cbp;
@@ -1312,7 +1314,7 @@ decode_intra_mb:
h->slice_table[ mb_xy ]= h->slice_num;
if(IS_INTRA_PCM(mb_type)) {
- const int mb_size = 384*h->sps.bit_depth_luma/8;
+ const int mb_size = (384*h->sps.bit_depth_luma) >> 3;
const uint8_t *ptr;
// We assume these blocks are very rare so we do not optimize it.
@@ -1388,7 +1390,7 @@ decode_intra_mb:
} else if( partition_count == 4 ) {
int i, j, sub_partition_count[4], list, ref[2][4];
- if( h->slice_type_nos == FF_B_TYPE ) {
+ if( h->slice_type_nos == AV_PICTURE_TYPE_B ) {
for( i = 0; i < 4; i++ ) {
h->sub_mb_type[i] = decode_cabac_b_mb_sub_type( h );
sub_partition_count[i]= b_sub_mb_type_info[ h->sub_mb_type[i] ].partition_count;
@@ -1670,7 +1672,7 @@ decode_intra_mb:
qmul = h->dequant4_coeff[0][s->qscale];
for( i = 0; i < 16; i++ ) {
//av_log( s->avctx, AV_LOG_ERROR, "INTRA16x16 AC:%d\n", i );
- decode_cabac_residual_nondc(h, h->mb + (16*i<<h->pixel_shift), 1, i, scan + 1, qmul, 15);
+ decode_cabac_residual_nondc(h, h->mb + (16*i << pixel_shift), 1, i, scan + 1, qmul, 15);
}
} else {
fill_rectangle(&h->non_zero_count_cache[scan8[0]], 4, 4, 8, 0, 1);
@@ -1680,7 +1682,7 @@ decode_intra_mb:
for( i8x8 = 0; i8x8 < 4; i8x8++ ) {
if( cbp & (1<<i8x8) ) {
if( IS_8x8DCT(mb_type) ) {
- decode_cabac_residual_nondc(h, h->mb + (64*i8x8<<h->pixel_shift), 5, 4*i8x8,
+ decode_cabac_residual_nondc(h, h->mb + (64*i8x8 << pixel_shift), 5, 4*i8x8,
scan8x8, h->dequant8_coeff[IS_INTRA( mb_type ) ? 0:1][s->qscale], 64);
} else {
qmul = h->dequant4_coeff[IS_INTRA( mb_type ) ? 0:3][s->qscale];
@@ -1688,7 +1690,7 @@ decode_intra_mb:
const int index = 4*i8x8 + i4x4;
//av_log( s->avctx, AV_LOG_ERROR, "Luma4x4: %d\n", index );
//START_TIMER
- decode_cabac_residual_nondc(h, h->mb + (16*index<<h->pixel_shift), 2, index, scan, qmul, 16);
+ decode_cabac_residual_nondc(h, h->mb + (16*index << pixel_shift), 2, index, scan, qmul, 16);
//STOP_TIMER("decode_residual")
}
}
@@ -1703,7 +1705,7 @@ decode_intra_mb:
int c;
for( c = 0; c < 2; c++ ) {
//av_log( s->avctx, AV_LOG_ERROR, "INTRA C%d-DC\n",c );
- decode_cabac_residual_dc(h, h->mb + ((256 + 16*4*c)<<h->pixel_shift), 3, CHROMA_DC_BLOCK_INDEX+c, chroma_dc_scan, 4);
+ decode_cabac_residual_dc(h, h->mb + ((256 + 16*4*c) << pixel_shift), 3, CHROMA_DC_BLOCK_INDEX+c, chroma_dc_scan, 4);
}
}
@@ -1714,7 +1716,7 @@ decode_intra_mb:
for( i = 0; i < 4; i++ ) {
const int index = 16 + 4 * c + i;
//av_log( s->avctx, AV_LOG_ERROR, "INTRA C%d-AC %d\n",c, index - 16 );
- decode_cabac_residual_nondc(h, h->mb + (16*index<<h->pixel_shift), 4, index, scan + 1, qmul, 15);
+ decode_cabac_residual_nondc(h, h->mb + (16*index << pixel_shift), 4, index, scan + 1, qmul, 15);
}
}
} else {
diff --git a/libavcodec/h264_cavlc.c b/libavcodec/h264_cavlc.c
index 92911c13e7..62e30f1311 100644
--- a/libavcodec/h264_cavlc.c
+++ b/libavcodec/h264_cavlc.c
@@ -542,13 +542,14 @@ int ff_h264_decode_mb_cavlc(H264Context *h){
int partition_count;
unsigned int mb_type, cbp;
int dct8x8_allowed= h->pps.transform_8x8_mode;
+ const int pixel_shift = h->pixel_shift;
mb_xy = h->mb_xy = s->mb_x + s->mb_y*s->mb_stride;
tprintf(s->avctx, "pic:%d mb:%d/%d\n", h->frame_num, s->mb_x, s->mb_y);
cbp = 0; /* avoid warning. FIXME: find a solution without slowing
down the code */
- if(h->slice_type_nos != FF_I_TYPE){
+ if(h->slice_type_nos != AV_PICTURE_TYPE_I){
if(s->mb_skip_run==-1)
s->mb_skip_run= get_ue_golomb(&s->gb);
@@ -569,7 +570,7 @@ int ff_h264_decode_mb_cavlc(H264Context *h){
h->prev_mb_skipped= 0;
mb_type= get_ue_golomb(&s->gb);
- if(h->slice_type_nos == FF_B_TYPE){
+ if(h->slice_type_nos == AV_PICTURE_TYPE_B){
if(mb_type < 23){
partition_count= b_mb_type_info[mb_type].partition_count;
mb_type= b_mb_type_info[mb_type].type;
@@ -577,7 +578,7 @@ int ff_h264_decode_mb_cavlc(H264Context *h){
mb_type -= 23;
goto decode_intra_mb;
}
- }else if(h->slice_type_nos == FF_P_TYPE){
+ }else if(h->slice_type_nos == AV_PICTURE_TYPE_P){
if(mb_type < 5){
partition_count= p_mb_type_info[mb_type].partition_count;
mb_type= p_mb_type_info[mb_type].type;
@@ -586,12 +587,12 @@ int ff_h264_decode_mb_cavlc(H264Context *h){
goto decode_intra_mb;
}
}else{
- assert(h->slice_type_nos == FF_I_TYPE);
- if(h->slice_type == FF_SI_TYPE && mb_type)
+ assert(h->slice_type_nos == AV_PICTURE_TYPE_I);
+ if(h->slice_type == AV_PICTURE_TYPE_SI && mb_type)
mb_type--;
decode_intra_mb:
if(mb_type > 25){
- av_log(h->s.avctx, AV_LOG_ERROR, "mb_type %d in %c slice too large at %d %d\n", mb_type, av_get_pict_type_char(h->slice_type), s->mb_x, s->mb_y);
+ av_log(h->s.avctx, AV_LOG_ERROR, "mb_type %d in %c slice too large at %d %d\n", mb_type, av_get_picture_type_char(h->slice_type), s->mb_x, s->mb_y);
return -1;
}
partition_count=0;
@@ -678,7 +679,7 @@ decode_intra_mb:
}else if(partition_count==4){
int i, j, sub_partition_count[4], list, ref[2][4];
- if(h->slice_type_nos == FF_B_TYPE){
+ if(h->slice_type_nos == AV_PICTURE_TYPE_B){
for(i=0; i<4; i++){
h->sub_mb_type[i]= get_ue_golomb_31(&s->gb);
if(h->sub_mb_type[i] >=13){
@@ -696,7 +697,7 @@ decode_intra_mb:
h->ref_cache[1][scan8[12]] = PART_NOT_AVAILABLE;
}
}else{
- assert(h->slice_type_nos == FF_P_TYPE); //FIXME SP correct ?
+ assert(h->slice_type_nos == AV_PICTURE_TYPE_P); //FIXME SP correct ?
for(i=0; i<4; i++){
h->sub_mb_type[i]= get_ue_golomb_31(&s->gb);
if(h->sub_mb_type[i] >=4){
@@ -961,7 +962,7 @@ decode_intra_mb:
for(i8x8=0; i8x8<4; i8x8++){
for(i4x4=0; i4x4<4; i4x4++){
const int index= i4x4 + 4*i8x8;
- if( decode_residual(h, h->intra_gb_ptr, h->mb + (16*index<<h->pixel_shift), index, scan + 1, h->dequant4_coeff[0][s->qscale], 15) < 0 ){
+ if( decode_residual(h, h->intra_gb_ptr, h->mb + (16*index << pixel_shift), index, scan + 1, h->dequant4_coeff[0][s->qscale], 15) < 0 ){
return -1;
}
}
@@ -973,7 +974,7 @@ decode_intra_mb:
for(i8x8=0; i8x8<4; i8x8++){
if(cbp & (1<<i8x8)){
if(IS_8x8DCT(mb_type)){
- DCTELEM *buf = &h->mb[64*i8x8<<h->pixel_shift];
+ DCTELEM *buf = &h->mb[64*i8x8 << pixel_shift];
uint8_t *nnz;
for(i4x4=0; i4x4<4; i4x4++){
if( decode_residual(h, gb, buf, i4x4+4*i8x8, scan8x8+16*i4x4,
@@ -986,7 +987,7 @@ decode_intra_mb:
for(i4x4=0; i4x4<4; i4x4++){
const int index= i4x4 + 4*i8x8;
- if( decode_residual(h, gb, h->mb + (16*index<<h->pixel_shift), index, scan, h->dequant4_coeff[IS_INTRA( mb_type ) ? 0:3][s->qscale], 16) <0 ){
+ if( decode_residual(h, gb, h->mb + (16*index << pixel_shift), index, scan, h->dequant4_coeff[IS_INTRA( mb_type ) ? 0:3][s->qscale], 16) <0 ){
return -1;
}
}
@@ -1000,7 +1001,7 @@ decode_intra_mb:
if(cbp&0x30){
for(chroma_idx=0; chroma_idx<2; chroma_idx++)
- if( decode_residual(h, gb, h->mb + ((256 + 16*4*chroma_idx)<<h->pixel_shift), CHROMA_DC_BLOCK_INDEX+chroma_idx, chroma_dc_scan, NULL, 4) < 0){
+ if( decode_residual(h, gb, h->mb + ((256 + 16*4*chroma_idx) << pixel_shift), CHROMA_DC_BLOCK_INDEX+chroma_idx, chroma_dc_scan, NULL, 4) < 0){
return -1;
}
}
@@ -1010,7 +1011,7 @@ decode_intra_mb:
const uint32_t *qmul = h->dequant4_coeff[chroma_idx+1+(IS_INTRA( mb_type ) ? 0:3)][h->chroma_qp[chroma_idx]];
for(i4x4=0; i4x4<4; i4x4++){
const int index= 16 + 4*chroma_idx + i4x4;
- if( decode_residual(h, gb, h->mb + (16*index<<h->pixel_shift), index, scan + 1, qmul, 15) < 0){
+ if( decode_residual(h, gb, h->mb + (16*index << pixel_shift), index, scan + 1, qmul, 15) < 0){
return -1;
}
}
diff --git a/libavcodec/h264_direct.c b/libavcodec/h264_direct.c
index a26a51a330..34a4ff727f 100644
--- a/libavcodec/h264_direct.c
+++ b/libavcodec/h264_direct.c
@@ -130,7 +130,7 @@ void ff_h264_direct_ref_list_init(H264Context * const h){
h->col_fieldoff= 2*(h->ref_list[1][0].reference) - 3;
}
- if(cur->pict_type != FF_B_TYPE || h->direct_spatial_mv_pred)
+ if(cur->pict_type != AV_PICTURE_TYPE_B || h->direct_spatial_mv_pred)
return;
for(list=0; list<2; list++){
diff --git a/libavcodec/h264_loopfilter.c b/libavcodec/h264_loopfilter.c
index 325fd3cc61..2e61a3110a 100644
--- a/libavcodec/h264_loopfilter.c
+++ b/libavcodec/h264_loopfilter.c
@@ -101,197 +101,92 @@ static const uint8_t tc0_table[52*3][4] = {
};
static void av_always_inline filter_mb_edgev( uint8_t *pix, int stride, int16_t bS[4], unsigned int qp, H264Context *h) {
- const int bit_depth = h->sps.bit_depth_luma;
- const int qp_bd_offset = 6*(bit_depth-8);
+ const int qp_bd_offset = 6 * (h->sps.bit_depth_luma - 8);
const unsigned int index_a = qp - qp_bd_offset + h->slice_alpha_c0_offset;
- const int alpha = alpha_table[index_a] << (bit_depth-8);
- const int beta = beta_table[qp - qp_bd_offset + h->slice_beta_offset] << (bit_depth-8);
+ const int alpha = alpha_table[index_a];
+ const int beta = beta_table[qp - qp_bd_offset + h->slice_beta_offset];
if (alpha ==0 || beta == 0) return;
if( bS[0] < 4 ) {
int8_t tc[4];
- tc[0] = tc0_table[index_a][bS[0]] << (bit_depth-8);
- tc[1] = tc0_table[index_a][bS[1]] << (bit_depth-8);
- tc[2] = tc0_table[index_a][bS[2]] << (bit_depth-8);
- tc[3] = tc0_table[index_a][bS[3]] << (bit_depth-8);
+ tc[0] = tc0_table[index_a][bS[0]];
+ tc[1] = tc0_table[index_a][bS[1]];
+ tc[2] = tc0_table[index_a][bS[2]];
+ tc[3] = tc0_table[index_a][bS[3]];
h->h264dsp.h264_h_loop_filter_luma(pix, stride, alpha, beta, tc);
} else {
h->h264dsp.h264_h_loop_filter_luma_intra(pix, stride, alpha, beta);
}
}
static void av_always_inline filter_mb_edgecv( uint8_t *pix, int stride, int16_t bS[4], unsigned int qp, H264Context *h ) {
- const int bit_depth = h->sps.bit_depth_luma;
- const int qp_bd_offset = 6*(bit_depth-8);
+ const int qp_bd_offset = 6 * (h->sps.bit_depth_luma - 8);
const unsigned int index_a = qp - qp_bd_offset + h->slice_alpha_c0_offset;
- const int alpha = alpha_table[index_a] << (bit_depth-8);
- const int beta = beta_table[qp - qp_bd_offset + h->slice_beta_offset] << (bit_depth-8);
+ const int alpha = alpha_table[index_a];
+ const int beta = beta_table[qp - qp_bd_offset + h->slice_beta_offset];
if (alpha ==0 || beta == 0) return;
if( bS[0] < 4 ) {
int8_t tc[4];
- tc[0] = (tc0_table[index_a][bS[0]] << (bit_depth-8))+1;
- tc[1] = (tc0_table[index_a][bS[1]] << (bit_depth-8))+1;
- tc[2] = (tc0_table[index_a][bS[2]] << (bit_depth-8))+1;
- tc[3] = (tc0_table[index_a][bS[3]] << (bit_depth-8))+1;
+ tc[0] = tc0_table[index_a][bS[0]]+1;
+ tc[1] = tc0_table[index_a][bS[1]]+1;
+ tc[2] = tc0_table[index_a][bS[2]]+1;
+ tc[3] = tc0_table[index_a][bS[3]]+1;
h->h264dsp.h264_h_loop_filter_chroma(pix, stride, alpha, beta, tc);
} else {
h->h264dsp.h264_h_loop_filter_chroma_intra(pix, stride, alpha, beta);
}
}
-static void filter_mb_mbaff_edgev( H264Context *h, uint8_t *pix, int stride, int16_t bS[4], int bsi, int qp ) {
- int i;
- const int bit_depth = h->sps.bit_depth_luma;
- const int qp_bd_offset = 6*(bit_depth-8);
+static void filter_mb_mbaff_edgev( H264Context *h, uint8_t *pix, int stride, int16_t bS[7], int bsi, int qp ) {
+ const int qp_bd_offset = 6 * (h->sps.bit_depth_luma - 8);
int index_a = qp - qp_bd_offset + h->slice_alpha_c0_offset;
- int alpha = alpha_table[index_a] << (bit_depth-8);
- int beta = beta_table[qp - qp_bd_offset + h->slice_beta_offset] << (bit_depth-8);
- for( i = 0; i < 8; i++, pix += stride) {
- const int bS_index = (i >> 1) * bsi;
-
- if( bS[bS_index] == 0 ) {
- continue;
- }
-
- if( bS[bS_index] < 4 ) {
- const int tc0 = tc0_table[index_a][bS[bS_index]] << (bit_depth-8);
- const int p0 = pix[-1];
- const int p1 = pix[-2];
- const int p2 = pix[-3];
- const int q0 = pix[0];
- const int q1 = pix[1];
- const int q2 = pix[2];
-
- if( FFABS( p0 - q0 ) < alpha &&
- FFABS( p1 - p0 ) < beta &&
- FFABS( q1 - q0 ) < beta ) {
- int tc = tc0;
- int i_delta;
-
- if( FFABS( p2 - p0 ) < beta ) {
- if(tc0)
- pix[-2] = p1 + av_clip( ( p2 + ( ( p0 + q0 + 1 ) >> 1 ) - ( p1 << 1 ) ) >> 1, -tc0, tc0 );
- tc++;
- }
- if( FFABS( q2 - q0 ) < beta ) {
- if(tc0)
- pix[1] = q1 + av_clip( ( q2 + ( ( p0 + q0 + 1 ) >> 1 ) - ( q1 << 1 ) ) >> 1, -tc0, tc0 );
- tc++;
- }
-
- i_delta = av_clip( (((q0 - p0 ) << 2) + (p1 - q1) + 4) >> 3, -tc, tc );
- pix[-1] = av_clip_uint8( p0 + i_delta ); /* p0' */
- pix[0] = av_clip_uint8( q0 - i_delta ); /* q0' */
- tprintf(h->s.avctx, "filter_mb_mbaff_edgev i:%d, qp:%d, indexA:%d, alpha:%d, beta:%d, tc:%d\n# bS:%d -> [%02x, %02x, %02x, %02x, %02x, %02x] =>[%02x, %02x, %02x, %02x]\n", i, qp[qp_index], index_a, alpha, beta, tc, bS[bS_index], pix[-3], p1, p0, q0, q1, pix[2], p1, pix[-1], pix[0], q1);
- }
- }else{
- const int p0 = pix[-1];
- const int p1 = pix[-2];
- const int p2 = pix[-3];
-
- const int q0 = pix[0];
- const int q1 = pix[1];
- const int q2 = pix[2];
-
- if( FFABS( p0 - q0 ) < alpha &&
- FFABS( p1 - p0 ) < beta &&
- FFABS( q1 - q0 ) < beta ) {
+ int alpha = alpha_table[index_a];
+ int beta = beta_table[qp - qp_bd_offset + h->slice_beta_offset];
+ if (alpha ==0 || beta == 0) return;
- if(FFABS( p0 - q0 ) < (( alpha >> 2 ) + 2 )){
- if( FFABS( p2 - p0 ) < beta)
- {
- const int p3 = pix[-4];
- /* p0', p1', p2' */
- pix[-1] = ( p2 + 2*p1 + 2*p0 + 2*q0 + q1 + 4 ) >> 3;
- pix[-2] = ( p2 + p1 + p0 + q0 + 2 ) >> 2;
- pix[-3] = ( 2*p3 + 3*p2 + p1 + p0 + q0 + 4 ) >> 3;
- } else {
- /* p0' */
- pix[-1] = ( 2*p1 + p0 + q1 + 2 ) >> 2;
- }
- if( FFABS( q2 - q0 ) < beta)
- {
- const int q3 = pix[3];
- /* q0', q1', q2' */
- pix[0] = ( p1 + 2*p0 + 2*q0 + 2*q1 + q2 + 4 ) >> 3;
- pix[1] = ( p0 + q0 + q1 + q2 + 2 ) >> 2;
- pix[2] = ( 2*q3 + 3*q2 + q1 + q0 + p0 + 4 ) >> 3;
- } else {
- /* q0' */
- pix[0] = ( 2*q1 + q0 + p1 + 2 ) >> 2;
- }
- }else{
- /* p0', q0' */
- pix[-1] = ( 2*p1 + p0 + q1 + 2 ) >> 2;
- pix[ 0] = ( 2*q1 + q0 + p1 + 2 ) >> 2;
- }
- tprintf(h->s.avctx, "filter_mb_mbaff_edgev i:%d, qp:%d, indexA:%d, alpha:%d, beta:%d\n# bS:4 -> [%02x, %02x, %02x, %02x, %02x, %02x] =>[%02x, %02x, %02x, %02x, %02x, %02x]\n", i, qp[qp_index], index_a, alpha, beta, p2, p1, p0, q0, q1, q2, pix[-3], pix[-2], pix[-1], pix[0], pix[1], pix[2]);
- }
- }
+ if( bS[0] < 4 ) {
+ int8_t tc[4];
+ tc[0] = tc0_table[index_a][bS[0*bsi]];
+ tc[1] = tc0_table[index_a][bS[1*bsi]];
+ tc[2] = tc0_table[index_a][bS[2*bsi]];
+ tc[3] = tc0_table[index_a][bS[3*bsi]];
+ h->h264dsp.h264_h_loop_filter_luma_mbaff(pix, stride, alpha, beta, tc);
+ } else {
+ h->h264dsp.h264_h_loop_filter_luma_mbaff_intra(pix, stride, alpha, beta);
}
}
-static void filter_mb_mbaff_edgecv( H264Context *h, uint8_t *pix, int stride, int16_t bS[4], int bsi, int qp ) {
- int i;
- const int bit_depth = h->sps.bit_depth_luma;
- const int qp_bd_offset = 6*(bit_depth-8);
+static void filter_mb_mbaff_edgecv( H264Context *h, uint8_t *pix, int stride, int16_t bS[7], int bsi, int qp ) {
+ const int qp_bd_offset = 6 * (h->sps.bit_depth_luma - 8);
int index_a = qp - qp_bd_offset + h->slice_alpha_c0_offset;
- int alpha = alpha_table[index_a] << (bit_depth-8);
- int beta = beta_table[qp - qp_bd_offset + h->slice_beta_offset] << (bit_depth-8);
- for( i = 0; i < 4; i++, pix += stride) {
- const int bS_index = i*bsi;
-
- if( bS[bS_index] == 0 ) {
- continue;
- }
+ int alpha = alpha_table[index_a];
+ int beta = beta_table[qp - qp_bd_offset + h->slice_beta_offset];
+ if (alpha ==0 || beta == 0) return;
- if( bS[bS_index] < 4 ) {
- const int tc = (tc0_table[index_a][bS[bS_index]] << (bit_depth-8)) + 1;
- const int p0 = pix[-1];
- const int p1 = pix[-2];
- const int q0 = pix[0];
- const int q1 = pix[1];
-
- if( FFABS( p0 - q0 ) < alpha &&
- FFABS( p1 - p0 ) < beta &&
- FFABS( q1 - q0 ) < beta ) {
- const int i_delta = av_clip( (((q0 - p0 ) << 2) + (p1 - q1) + 4) >> 3, -tc, tc );
-
- pix[-1] = av_clip_uint8( p0 + i_delta ); /* p0' */
- pix[0] = av_clip_uint8( q0 - i_delta ); /* q0' */
- tprintf(h->s.avctx, "filter_mb_mbaff_edgecv i:%d, qp:%d, indexA:%d, alpha:%d, beta:%d, tc:%d\n# bS:%d -> [%02x, %02x, %02x, %02x, %02x, %02x] =>[%02x, %02x, %02x, %02x]\n", i, qp[qp_index], index_a, alpha, beta, tc, bS[bS_index], pix[-3], p1, p0, q0, q1, pix[2], p1, pix[-1], pix[0], q1);
- }
- }else{
- const int p0 = pix[-1];
- const int p1 = pix[-2];
- const int q0 = pix[0];
- const int q1 = pix[1];
-
- if( FFABS( p0 - q0 ) < alpha &&
- FFABS( p1 - p0 ) < beta &&
- FFABS( q1 - q0 ) < beta ) {
-
- pix[-1] = ( 2*p1 + p0 + q1 + 2 ) >> 2; /* p0' */
- pix[0] = ( 2*q1 + q0 + p1 + 2 ) >> 2; /* q0' */
- tprintf(h->s.avctx, "filter_mb_mbaff_edgecv i:%d\n# bS:4 -> [%02x, %02x, %02x, %02x, %02x, %02x] =>[%02x, %02x, %02x, %02x, %02x, %02x]\n", i, pix[-3], p1, p0, q0, q1, pix[2], pix[-3], pix[-2], pix[-1], pix[0], pix[1], pix[2]);
- }
- }
+ if( bS[0] < 4 ) {
+ int8_t tc[4];
+ tc[0] = tc0_table[index_a][bS[0*bsi]] + 1;
+ tc[1] = tc0_table[index_a][bS[1*bsi]] + 1;
+ tc[2] = tc0_table[index_a][bS[2*bsi]] + 1;
+ tc[3] = tc0_table[index_a][bS[3*bsi]] + 1;
+ h->h264dsp.h264_h_loop_filter_chroma_mbaff(pix, stride, alpha, beta, tc);
+ } else {
+ h->h264dsp.h264_h_loop_filter_chroma_mbaff_intra(pix, stride, alpha, beta);
}
}
static void av_always_inline filter_mb_edgeh( uint8_t *pix, int stride, int16_t bS[4], unsigned int qp, H264Context *h ) {
- const int bit_depth = h->sps.bit_depth_luma;
- const int qp_bd_offset = 6*(bit_depth-8);
+ const int qp_bd_offset = 6 * (h->sps.bit_depth_luma - 8);
const unsigned int index_a = qp - qp_bd_offset + h->slice_alpha_c0_offset;
- const int alpha = alpha_table[index_a] << (bit_depth-8);
- const int beta = beta_table[qp - qp_bd_offset + h->slice_beta_offset] << (bit_depth-8);
+ const int alpha = alpha_table[index_a];
+ const int beta = beta_table[qp - qp_bd_offset + h->slice_beta_offset];
if (alpha ==0 || beta == 0) return;
if( bS[0] < 4 ) {
int8_t tc[4];
- tc[0] = tc0_table[index_a][bS[0]] << (bit_depth-8);
- tc[1] = tc0_table[index_a][bS[1]] << (bit_depth-8);
- tc[2] = tc0_table[index_a][bS[2]] << (bit_depth-8);
- tc[3] = tc0_table[index_a][bS[3]] << (bit_depth-8);
+ tc[0] = tc0_table[index_a][bS[0]];
+ tc[1] = tc0_table[index_a][bS[1]];
+ tc[2] = tc0_table[index_a][bS[2]];
+ tc[3] = tc0_table[index_a][bS[3]];
h->h264dsp.h264_v_loop_filter_luma(pix, stride, alpha, beta, tc);
} else {
h->h264dsp.h264_v_loop_filter_luma_intra(pix, stride, alpha, beta);
@@ -299,19 +194,18 @@ static void av_always_inline filter_mb_edgeh( uint8_t *pix, int stride, int16_t
}
static void av_always_inline filter_mb_edgech( uint8_t *pix, int stride, int16_t bS[4], unsigned int qp, H264Context *h ) {
- const int bit_depth = h->sps.bit_depth_luma;
- const int qp_bd_offset = 6*(bit_depth-8);
+ const int qp_bd_offset = 6 * (h->sps.bit_depth_luma - 8);
const unsigned int index_a = qp - qp_bd_offset + h->slice_alpha_c0_offset;
- const int alpha = alpha_table[index_a] << (bit_depth-8);
- const int beta = beta_table[qp - qp_bd_offset + h->slice_beta_offset] << (bit_depth-8);
+ const int alpha = alpha_table[index_a];
+ const int beta = beta_table[qp - qp_bd_offset + h->slice_beta_offset];
if (alpha ==0 || beta == 0) return;
if( bS[0] < 4 ) {
int8_t tc[4];
- tc[0] = (tc0_table[index_a][bS[0]] << (bit_depth-8))+1;
- tc[1] = (tc0_table[index_a][bS[1]] << (bit_depth-8))+1;
- tc[2] = (tc0_table[index_a][bS[2]] << (bit_depth-8))+1;
- tc[3] = (tc0_table[index_a][bS[3]] << (bit_depth-8))+1;
+ tc[0] = tc0_table[index_a][bS[0]]+1;
+ tc[1] = tc0_table[index_a][bS[1]]+1;
+ tc[2] = tc0_table[index_a][bS[2]]+1;
+ tc[3] = tc0_table[index_a][bS[3]]+1;
h->h264dsp.h264_v_loop_filter_chroma(pix, stride, alpha, beta, tc);
} else {
h->h264dsp.h264_v_loop_filter_chroma_intra(pix, stride, alpha, beta);
@@ -650,10 +544,10 @@ static av_always_inline void filter_mb_dir(H264Context *h, int mb_x, int mb_y, u
tprintf(s->avctx, "filter mb:%d/%d dir:%d edge:%d, QPy:%d ls:%d uvls:%d", mb_x, mb_y, dir, edge, qp, linesize, uvlinesize);
//{ int i; for (i = 0; i < 4; i++) tprintf(s->avctx, " bS[%d]:%d", i, bS[i]); tprintf(s->avctx, "\n"); }
if( dir == 0 ) {
- filter_mb_edgev( &img_y[4*edge<<h->pixel_shift], linesize, bS, qp, h );
+ filter_mb_edgev( &img_y[4*edge << h->pixel_shift], linesize, bS, qp, h );
if( (edge&1) == 0 ) {
- filter_mb_edgecv( &img_cb[2*edge<<h->pixel_shift], uvlinesize, bS, h->chroma_qp[0], h);
- filter_mb_edgecv( &img_cr[2*edge<<h->pixel_shift], uvlinesize, bS, h->chroma_qp[1], h);
+ filter_mb_edgecv( &img_cb[2*edge << h->pixel_shift], uvlinesize, bS, h->chroma_qp[0], h);
+ filter_mb_edgecv( &img_cr[2*edge << h->pixel_shift], uvlinesize, bS, h->chroma_qp[1], h);
}
} else {
filter_mb_edgeh( &img_y[4*edge*linesize], linesize, bS, qp, h );
diff --git a/libavcodec/h264_parser.c b/libavcodec/h264_parser.c
index 7ae5d5ab7d..080b6a93b5 100644
--- a/libavcodec/h264_parser.c
+++ b/libavcodec/h264_parser.c
@@ -117,7 +117,7 @@ static inline int parse_nal_units(AVCodecParserContext *s,
const uint8_t *ptr;
/* set some sane default values */
- s->pict_type = FF_I_TYPE;
+ s->pict_type = AV_PICTURE_TYPE_I;
s->key_frame = 0;
h->s.avctx= avctx;
diff --git a/libavcodec/h264_ps.c b/libavcodec/h264_ps.c
index ab20ecfeb7..f77a013112 100644
--- a/libavcodec/h264_ps.c
+++ b/libavcodec/h264_ps.c
@@ -70,7 +70,7 @@ static const AVRational pixel_aspect[17]={
QP(37,d), QP(37,d), QP(37,d), QP(38,d), QP(38,d), QP(38,d),\
QP(39,d), QP(39,d), QP(39,d), QP(39,d)
-const uint8_t ff_h264_chroma_qp[3][QP_MAX_MAX+1] = {
+const uint8_t ff_h264_chroma_qp[3][QP_MAX_NUM+1] = {
{
CHROMA_QP_TABLE_END(8)
},
diff --git a/libavcodec/h264_refs.c b/libavcodec/h264_refs.c
index 74eaaa8ada..9554201522 100644
--- a/libavcodec/h264_refs.c
+++ b/libavcodec/h264_refs.c
@@ -25,6 +25,7 @@
* @author Michael Niedermayer <michaelni@gmx.at>
*/
+#include "libavutil/avassert.h"
#include "internal.h"
#include "dsputil.h"
#include "avcodec.h"
@@ -109,7 +110,7 @@ int ff_h264_fill_default_ref_list(H264Context *h){
MpegEncContext * const s = &h->s;
int i, len;
- if(h->slice_type_nos==FF_B_TYPE){
+ if(h->slice_type_nos==AV_PICTURE_TYPE_B){
Picture *sorted[32];
int cur_poc, list;
int lens[2];
@@ -148,7 +149,7 @@ int ff_h264_fill_default_ref_list(H264Context *h){
for (i=0; i<h->ref_count[0]; i++) {
tprintf(h->s.avctx, "List0: %s fn:%d 0x%p\n", (h->default_ref_list[0][i].long_ref ? "LT" : "ST"), h->default_ref_list[0][i].pic_id, h->default_ref_list[0][i].data[0]);
}
- if(h->slice_type_nos==FF_B_TYPE){
+ if(h->slice_type_nos==AV_PICTURE_TYPE_B){
for (i=0; i<h->ref_count[1]; i++) {
tprintf(h->s.avctx, "List1: %s fn:%d 0x%p\n", (h->default_ref_list[1][i].long_ref ? "LT" : "ST"), h->default_ref_list[1][i].pic_id, h->default_ref_list[1][i].data[0]);
}
@@ -476,7 +477,7 @@ static void print_long_term(H264Context *h) {
void ff_generate_sliding_window_mmcos(H264Context *h) {
MpegEncContext * const s = &h->s;
- assert(h->long_ref_count + h->short_ref_count <= h->sps.ref_frame_count);
+ av_assert0(h->long_ref_count + h->short_ref_count <= h->sps.ref_frame_count);
h->mmco_index= 0;
if(h->short_ref_count && h->long_ref_count + h->short_ref_count == h->sps.ref_frame_count &&
@@ -621,15 +622,16 @@ int ff_h264_execute_ref_pic_marking(H264Context *h, MMCO *mmco, int mmco_count){
}
}
- if (h->long_ref_count + h->short_ref_count > h->sps.ref_frame_count){
+ if (h->long_ref_count + h->short_ref_count > FFMAX(h->sps.ref_frame_count, 1)){
/* We have too many reference frames, probably due to corrupted
* stream. Need to discard one frame. Prevents overrun of the
* short_ref and long_ref buffers.
*/
av_log(h->s.avctx, AV_LOG_ERROR,
- "number of reference frames exceeds max (probably "
- "corrupt input), discarding one\n");
+ "number of reference frames (%d+%d) exceeds max (%d; probably "
+ "corrupt input), discarding one\n",
+ h->long_ref_count, h->short_ref_count, h->sps.ref_frame_count);
if (h->long_ref_count && !h->short_ref_count) {
for (i = 0; i < 16; ++i)
diff --git a/libavcodec/h264data.h b/libavcodec/h264data.h
index b3631da93b..c459523f71 100644
--- a/libavcodec/h264data.h
+++ b/libavcodec/h264data.h
@@ -36,7 +36,7 @@
static const uint8_t golomb_to_pict_type[5]=
-{FF_P_TYPE, FF_B_TYPE, FF_I_TYPE, FF_SP_TYPE, FF_SI_TYPE};
+{AV_PICTURE_TYPE_P, AV_PICTURE_TYPE_B, AV_PICTURE_TYPE_I, AV_PICTURE_TYPE_SP, AV_PICTURE_TYPE_SI};
static const uint8_t golomb_to_intra4x4_cbp[48]={
47, 31, 15, 0, 23, 27, 29, 30, 7, 11, 13, 14, 39, 43, 45, 46,
diff --git a/libavcodec/h264dsp.c b/libavcodec/h264dsp.c
index 04c6ea6df4..96a38ff77d 100644
--- a/libavcodec/h264dsp.c
+++ b/libavcodec/h264dsp.c
@@ -30,15 +30,15 @@
#include "h264dsp.h"
#define BIT_DEPTH 8
-#include "h264dsp_internal.h"
+#include "h264dsp_template.c"
#undef BIT_DEPTH
#define BIT_DEPTH 9
-#include "h264dsp_internal.h"
+#include "h264dsp_template.c"
#undef BIT_DEPTH
#define BIT_DEPTH 10
-#include "h264dsp_internal.h"
+#include "h264dsp_template.c"
#undef BIT_DEPTH
void ff_h264dsp_init(H264DSPContext *c, const int bit_depth)
@@ -47,58 +47,62 @@ void ff_h264dsp_init(H264DSPContext *c, const int bit_depth)
#define FUNC(a, depth) a ## _ ## depth ## _c
#define H264_DSP(depth) \
- c->h264_idct_add = FUNC(ff_h264_idct_add , depth);\
- c->h264_idct8_add = FUNC(ff_h264_idct8_add , depth);\
- c->h264_idct_dc_add = FUNC(ff_h264_idct_dc_add , depth);\
- c->h264_idct8_dc_add = FUNC(ff_h264_idct8_dc_add , depth);\
- c->h264_idct_add16 = FUNC(ff_h264_idct_add16 , depth);\
- c->h264_idct8_add4 = FUNC(ff_h264_idct8_add4 , depth);\
- c->h264_idct_add8 = FUNC(ff_h264_idct_add8 , depth);\
- c->h264_idct_add16intra = FUNC(ff_h264_idct_add16intra , depth);\
- c->h264_luma_dc_dequant_idct = FUNC(ff_h264_luma_dc_dequant_idct , depth);\
- c->h264_chroma_dc_dequant_idct = FUNC(ff_h264_chroma_dc_dequant_idct , depth);\
+ c->h264_idct_add= FUNC(ff_h264_idct_add, depth);\
+ c->h264_idct8_add= FUNC(ff_h264_idct8_add, depth);\
+ c->h264_idct_dc_add= FUNC(ff_h264_idct_dc_add, depth);\
+ c->h264_idct8_dc_add= FUNC(ff_h264_idct8_dc_add, depth);\
+ c->h264_idct_add16 = FUNC(ff_h264_idct_add16, depth);\
+ c->h264_idct8_add4 = FUNC(ff_h264_idct8_add4, depth);\
+ c->h264_idct_add8 = FUNC(ff_h264_idct_add8, depth);\
+ c->h264_idct_add16intra= FUNC(ff_h264_idct_add16intra, depth);\
+ c->h264_luma_dc_dequant_idct= FUNC(ff_h264_luma_dc_dequant_idct, depth);\
+ c->h264_chroma_dc_dequant_idct= FUNC(ff_h264_chroma_dc_dequant_idct, depth);\
\
- c->weight_h264_pixels_tab[0] = FUNC( weight_h264_pixels16x16 , depth);\
- c->weight_h264_pixels_tab[1] = FUNC( weight_h264_pixels16x8 , depth);\
- c->weight_h264_pixels_tab[2] = FUNC( weight_h264_pixels8x16 , depth);\
- c->weight_h264_pixels_tab[3] = FUNC( weight_h264_pixels8x8 , depth);\
- c->weight_h264_pixels_tab[4] = FUNC( weight_h264_pixels8x4 , depth);\
- c->weight_h264_pixels_tab[5] = FUNC( weight_h264_pixels4x8 , depth);\
- c->weight_h264_pixels_tab[6] = FUNC( weight_h264_pixels4x4 , depth);\
- c->weight_h264_pixels_tab[7] = FUNC( weight_h264_pixels4x2 , depth);\
- c->weight_h264_pixels_tab[8] = FUNC( weight_h264_pixels2x4 , depth);\
- c->weight_h264_pixels_tab[9] = FUNC( weight_h264_pixels2x2 , depth);\
- c->biweight_h264_pixels_tab[0] = FUNC(biweight_h264_pixels16x16 , depth);\
- c->biweight_h264_pixels_tab[1] = FUNC(biweight_h264_pixels16x8 , depth);\
- c->biweight_h264_pixels_tab[2] = FUNC(biweight_h264_pixels8x16 , depth);\
- c->biweight_h264_pixels_tab[3] = FUNC(biweight_h264_pixels8x8 , depth);\
- c->biweight_h264_pixels_tab[4] = FUNC(biweight_h264_pixels8x4 , depth);\
- c->biweight_h264_pixels_tab[5] = FUNC(biweight_h264_pixels4x8 , depth);\
- c->biweight_h264_pixels_tab[6] = FUNC(biweight_h264_pixels4x4 , depth);\
- c->biweight_h264_pixels_tab[7] = FUNC(biweight_h264_pixels4x2 , depth);\
- c->biweight_h264_pixels_tab[8] = FUNC(biweight_h264_pixels2x4 , depth);\
- c->biweight_h264_pixels_tab[9] = FUNC(biweight_h264_pixels2x2 , depth);\
+ c->weight_h264_pixels_tab[0]= FUNC(weight_h264_pixels16x16, depth);\
+ c->weight_h264_pixels_tab[1]= FUNC(weight_h264_pixels16x8, depth);\
+ c->weight_h264_pixels_tab[2]= FUNC(weight_h264_pixels8x16, depth);\
+ c->weight_h264_pixels_tab[3]= FUNC(weight_h264_pixels8x8, depth);\
+ c->weight_h264_pixels_tab[4]= FUNC(weight_h264_pixels8x4, depth);\
+ c->weight_h264_pixels_tab[5]= FUNC(weight_h264_pixels4x8, depth);\
+ c->weight_h264_pixels_tab[6]= FUNC(weight_h264_pixels4x4, depth);\
+ c->weight_h264_pixels_tab[7]= FUNC(weight_h264_pixels4x2, depth);\
+ c->weight_h264_pixels_tab[8]= FUNC(weight_h264_pixels2x4, depth);\
+ c->weight_h264_pixels_tab[9]= FUNC(weight_h264_pixels2x2, depth);\
+ c->biweight_h264_pixels_tab[0]= FUNC(biweight_h264_pixels16x16, depth);\
+ c->biweight_h264_pixels_tab[1]= FUNC(biweight_h264_pixels16x8, depth);\
+ c->biweight_h264_pixels_tab[2]= FUNC(biweight_h264_pixels8x16, depth);\
+ c->biweight_h264_pixels_tab[3]= FUNC(biweight_h264_pixels8x8, depth);\
+ c->biweight_h264_pixels_tab[4]= FUNC(biweight_h264_pixels8x4, depth);\
+ c->biweight_h264_pixels_tab[5]= FUNC(biweight_h264_pixels4x8, depth);\
+ c->biweight_h264_pixels_tab[6]= FUNC(biweight_h264_pixels4x4, depth);\
+ c->biweight_h264_pixels_tab[7]= FUNC(biweight_h264_pixels4x2, depth);\
+ c->biweight_h264_pixels_tab[8]= FUNC(biweight_h264_pixels2x4, depth);\
+ c->biweight_h264_pixels_tab[9]= FUNC(biweight_h264_pixels2x2, depth);\
\
- c->h264_v_loop_filter_luma = FUNC(h264_v_loop_filter_luma , depth);\
- c->h264_h_loop_filter_luma = FUNC(h264_h_loop_filter_luma , depth);\
- c->h264_v_loop_filter_luma_intra = FUNC(h264_v_loop_filter_luma_intra , depth);\
- c->h264_h_loop_filter_luma_intra = FUNC(h264_h_loop_filter_luma_intra , depth);\
- c->h264_v_loop_filter_chroma = FUNC(h264_v_loop_filter_chroma , depth);\
- c->h264_h_loop_filter_chroma = FUNC(h264_h_loop_filter_chroma , depth);\
- c->h264_v_loop_filter_chroma_intra = FUNC(h264_v_loop_filter_chroma_intra, depth);\
- c->h264_h_loop_filter_chroma_intra = FUNC(h264_h_loop_filter_chroma_intra, depth);\
+ c->h264_v_loop_filter_luma= FUNC(h264_v_loop_filter_luma, depth);\
+ c->h264_h_loop_filter_luma= FUNC(h264_h_loop_filter_luma, depth);\
+ c->h264_h_loop_filter_luma_mbaff= FUNC(h264_h_loop_filter_luma_mbaff, depth);\
+ c->h264_v_loop_filter_luma_intra= FUNC(h264_v_loop_filter_luma_intra, depth);\
+ c->h264_h_loop_filter_luma_intra= FUNC(h264_h_loop_filter_luma_intra, depth);\
+ c->h264_h_loop_filter_luma_mbaff_intra= FUNC(h264_h_loop_filter_luma_mbaff_intra, depth);\
+ c->h264_v_loop_filter_chroma= FUNC(h264_v_loop_filter_chroma, depth);\
+ c->h264_h_loop_filter_chroma= FUNC(h264_h_loop_filter_chroma, depth);\
+ c->h264_h_loop_filter_chroma_mbaff= FUNC(h264_h_loop_filter_chroma_mbaff, depth);\
+ c->h264_v_loop_filter_chroma_intra= FUNC(h264_v_loop_filter_chroma_intra, depth);\
+ c->h264_h_loop_filter_chroma_intra= FUNC(h264_h_loop_filter_chroma_intra, depth);\
+ c->h264_h_loop_filter_chroma_mbaff_intra= FUNC(h264_h_loop_filter_chroma_mbaff_intra, depth);\
c->h264_loop_filter_strength= NULL;
switch (bit_depth) {
- case 9:
- H264_DSP(9);
- break;
- case 10:
- H264_DSP(10);
- break;
- default:
- H264_DSP(8);
- break;
+ case 9:
+ H264_DSP(9);
+ break;
+ case 10:
+ H264_DSP(10);
+ break;
+ default:
+ H264_DSP(8);
+ break;
}
if (ARCH_ARM) ff_h264dsp_init_arm(c, bit_depth);
diff --git a/libavcodec/h264dsp.h b/libavcodec/h264dsp.h
index 8a0b9ae72b..4b606efa17 100644
--- a/libavcodec/h264dsp.h
+++ b/libavcodec/h264dsp.h
@@ -45,13 +45,17 @@ typedef struct H264DSPContext{
/* loop filter */
void (*h264_v_loop_filter_luma)(uint8_t *pix/*align 16*/, int stride, int alpha, int beta, int8_t *tc0);
void (*h264_h_loop_filter_luma)(uint8_t *pix/*align 4 */, int stride, int alpha, int beta, int8_t *tc0);
+ void (*h264_h_loop_filter_luma_mbaff)(uint8_t *pix/*align 16*/, int stride, int alpha, int beta, int8_t *tc0);
/* v/h_loop_filter_luma_intra: align 16 */
void (*h264_v_loop_filter_luma_intra)(uint8_t *pix, int stride, int alpha, int beta);
void (*h264_h_loop_filter_luma_intra)(uint8_t *pix, int stride, int alpha, int beta);
+ void (*h264_h_loop_filter_luma_mbaff_intra)(uint8_t *pix/*align 16*/, int stride, int alpha, int beta);
void (*h264_v_loop_filter_chroma)(uint8_t *pix/*align 8*/, int stride, int alpha, int beta, int8_t *tc0);
void (*h264_h_loop_filter_chroma)(uint8_t *pix/*align 4*/, int stride, int alpha, int beta, int8_t *tc0);
+ void (*h264_h_loop_filter_chroma_mbaff)(uint8_t *pix/*align 8*/, int stride, int alpha, int beta, int8_t *tc0);
void (*h264_v_loop_filter_chroma_intra)(uint8_t *pix/*align 8*/, int stride, int alpha, int beta);
void (*h264_h_loop_filter_chroma_intra)(uint8_t *pix/*align 8*/, int stride, int alpha, int beta);
+ void (*h264_h_loop_filter_chroma_mbaff_intra)(uint8_t *pix/*align 8*/, int stride, int alpha, int beta);
// h264_loop_filter_strength: simd only. the C version is inlined in h264.c
void (*h264_loop_filter_strength)(int16_t bS[2][4][4], uint8_t nnz[40], int8_t ref[2][40], int16_t mv[2][40][2],
int bidir, int edges, int step, int mask_mv0, int mask_mv1, int field);
diff --git a/libavcodec/h264dsp_internal.h b/libavcodec/h264dsp_template.c
index f9a717b8c4..6816e81ff2 100644
--- a/libavcodec/h264dsp_internal.h
+++ b/libavcodec/h264dsp_template.c
@@ -25,7 +25,7 @@
* @author Michael Niedermayer <michaelni@gmx.at>
*/
-#include "h264_high_depth.h"
+#include "high_bit_depth.h"
#define op_scale1(x) block[x] = av_clip_pixel( (block[x]*weight + offset) >> log2_denom )
#define op_scale2(x) dst[x] = av_clip_pixel( (src[x]*weights + dst[x]*weightd + offset) >> (log2_denom+1))
@@ -102,18 +102,21 @@ H264_WEIGHT(2,2)
#undef op_scale2
#undef H264_WEIGHT
-static av_always_inline av_flatten void FUNCC(h264_loop_filter_luma)(uint8_t *p_pix, int xstride, int ystride, int alpha, int beta, int8_t *tc0)
+static av_always_inline av_flatten void FUNCC(h264_loop_filter_luma)(uint8_t *p_pix, int xstride, int ystride, int inner_iters, int alpha, int beta, int8_t *tc0)
{
pixel *pix = (pixel*)p_pix;
int i, d;
xstride >>= sizeof(pixel)-1;
ystride >>= sizeof(pixel)-1;
+ alpha <<= BIT_DEPTH - 8;
+ beta <<= BIT_DEPTH - 8;
for( i = 0; i < 4; i++ ) {
- if( tc0[i] < 0 ) {
- pix += 4*ystride;
+ const int tc_orig = tc0[i] << (BIT_DEPTH - 8);
+ if( tc_orig < 0 ) {
+ pix += inner_iters*ystride;
continue;
}
- for( d = 0; d < 4; d++ ) {
+ for( d = 0; d < inner_iters; d++ ) {
const int p0 = pix[-1*xstride];
const int p1 = pix[-2*xstride];
const int p2 = pix[-3*xstride];
@@ -125,17 +128,17 @@ static av_always_inline av_flatten void FUNCC(h264_loop_filter_luma)(uint8_t *p_
FFABS( p1 - p0 ) < beta &&
FFABS( q1 - q0 ) < beta ) {
- int tc = tc0[i];
+ int tc = tc_orig;
int i_delta;
if( FFABS( p2 - p0 ) < beta ) {
- if(tc0[i])
- pix[-2*xstride] = p1 + av_clip( (( p2 + ( ( p0 + q0 + 1 ) >> 1 ) ) >> 1) - p1, -tc0[i], tc0[i] );
+ if(tc_orig)
+ pix[-2*xstride] = p1 + av_clip( (( p2 + ( ( p0 + q0 + 1 ) >> 1 ) ) >> 1) - p1, -tc_orig, tc_orig );
tc++;
}
if( FFABS( q2 - q0 ) < beta ) {
- if(tc0[i])
- pix[ xstride] = q1 + av_clip( (( q2 + ( ( p0 + q0 + 1 ) >> 1 ) ) >> 1) - q1, -tc0[i], tc0[i] );
+ if(tc_orig)
+ pix[ xstride] = q1 + av_clip( (( q2 + ( ( p0 + q0 + 1 ) >> 1 ) ) >> 1) - q1, -tc_orig, tc_orig );
tc++;
}
@@ -149,20 +152,26 @@ static av_always_inline av_flatten void FUNCC(h264_loop_filter_luma)(uint8_t *p_
}
static void FUNCC(h264_v_loop_filter_luma)(uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0)
{
- FUNCC(h264_loop_filter_luma)(pix, stride, sizeof(pixel), alpha, beta, tc0);
+ FUNCC(h264_loop_filter_luma)(pix, stride, sizeof(pixel), 4, alpha, beta, tc0);
}
static void FUNCC(h264_h_loop_filter_luma)(uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0)
{
- FUNCC(h264_loop_filter_luma)(pix, sizeof(pixel), stride, alpha, beta, tc0);
+ FUNCC(h264_loop_filter_luma)(pix, sizeof(pixel), stride, 4, alpha, beta, tc0);
+}
+static void FUNCC(h264_h_loop_filter_luma_mbaff)(uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0)
+{
+ FUNCC(h264_loop_filter_luma)(pix, sizeof(pixel), stride, 2, alpha, beta, tc0);
}
-static av_always_inline av_flatten void FUNCC(h264_loop_filter_luma_intra)(uint8_t *p_pix, int xstride, int ystride, int alpha, int beta)
+static av_always_inline av_flatten void FUNCC(h264_loop_filter_luma_intra)(uint8_t *p_pix, int xstride, int ystride, int inner_iters, int alpha, int beta)
{
pixel *pix = (pixel*)p_pix;
int d;
xstride >>= sizeof(pixel)-1;
ystride >>= sizeof(pixel)-1;
- for( d = 0; d < 16; d++ ) {
+ alpha <<= BIT_DEPTH - 8;
+ beta <<= BIT_DEPTH - 8;
+ for( d = 0; d < 4 * inner_iters; d++ ) {
const int p2 = pix[-3*xstride];
const int p1 = pix[-2*xstride];
const int p0 = pix[-1*xstride];
@@ -209,26 +218,32 @@ static av_always_inline av_flatten void FUNCC(h264_loop_filter_luma_intra)(uint8
}
static void FUNCC(h264_v_loop_filter_luma_intra)(uint8_t *pix, int stride, int alpha, int beta)
{
- FUNCC(h264_loop_filter_luma_intra)(pix, stride, sizeof(pixel), alpha, beta);
+ FUNCC(h264_loop_filter_luma_intra)(pix, stride, sizeof(pixel), 4, alpha, beta);
}
static void FUNCC(h264_h_loop_filter_luma_intra)(uint8_t *pix, int stride, int alpha, int beta)
{
- FUNCC(h264_loop_filter_luma_intra)(pix, sizeof(pixel), stride, alpha, beta);
+ FUNCC(h264_loop_filter_luma_intra)(pix, sizeof(pixel), stride, 4, alpha, beta);
+}
+static void FUNCC(h264_h_loop_filter_luma_mbaff_intra)(uint8_t *pix, int stride, int alpha, int beta)
+{
+ FUNCC(h264_loop_filter_luma_intra)(pix, sizeof(pixel), stride, 2, alpha, beta);
}
-static av_always_inline av_flatten void FUNCC(h264_loop_filter_chroma)(uint8_t *p_pix, int xstride, int ystride, int alpha, int beta, int8_t *tc0)
+static av_always_inline av_flatten void FUNCC(h264_loop_filter_chroma)(uint8_t *p_pix, int xstride, int ystride, int inner_iters, int alpha, int beta, int8_t *tc0)
{
pixel *pix = (pixel*)p_pix;
int i, d;
+ alpha <<= BIT_DEPTH - 8;
+ beta <<= BIT_DEPTH - 8;
xstride >>= sizeof(pixel)-1;
ystride >>= sizeof(pixel)-1;
for( i = 0; i < 4; i++ ) {
- const int tc = tc0[i];
+ const int tc = ((tc0[i] - 1) << (BIT_DEPTH - 8)) + 1;
if( tc <= 0 ) {
- pix += 2*ystride;
+ pix += inner_iters*ystride;
continue;
}
- for( d = 0; d < 2; d++ ) {
+ for( d = 0; d < inner_iters; d++ ) {
const int p0 = pix[-1*xstride];
const int p1 = pix[-2*xstride];
const int q0 = pix[0];
@@ -249,20 +264,26 @@ static av_always_inline av_flatten void FUNCC(h264_loop_filter_chroma)(uint8_t *
}
static void FUNCC(h264_v_loop_filter_chroma)(uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0)
{
- FUNCC(h264_loop_filter_chroma)(pix, stride, sizeof(pixel), alpha, beta, tc0);
+ FUNCC(h264_loop_filter_chroma)(pix, stride, sizeof(pixel), 2, alpha, beta, tc0);
}
static void FUNCC(h264_h_loop_filter_chroma)(uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0)
{
- FUNCC(h264_loop_filter_chroma)(pix, sizeof(pixel), stride, alpha, beta, tc0);
+ FUNCC(h264_loop_filter_chroma)(pix, sizeof(pixel), stride, 2, alpha, beta, tc0);
+}
+static void FUNCC(h264_h_loop_filter_chroma_mbaff)(uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0)
+{
+ FUNCC(h264_loop_filter_chroma)(pix, sizeof(pixel), stride, 1, alpha, beta, tc0);
}
-static av_always_inline av_flatten void FUNCC(h264_loop_filter_chroma_intra)(uint8_t *p_pix, int xstride, int ystride, int alpha, int beta)
+static av_always_inline av_flatten void FUNCC(h264_loop_filter_chroma_intra)(uint8_t *p_pix, int xstride, int ystride, int inner_iters, int alpha, int beta)
{
pixel *pix = (pixel*)p_pix;
int d;
xstride >>= sizeof(pixel)-1;
ystride >>= sizeof(pixel)-1;
- for( d = 0; d < 8; d++ ) {
+ alpha <<= BIT_DEPTH - 8;
+ beta <<= BIT_DEPTH - 8;
+ for( d = 0; d < 4 * inner_iters; d++ ) {
const int p0 = pix[-1*xstride];
const int p1 = pix[-2*xstride];
const int q0 = pix[0];
@@ -280,9 +301,13 @@ static av_always_inline av_flatten void FUNCC(h264_loop_filter_chroma_intra)(uin
}
static void FUNCC(h264_v_loop_filter_chroma_intra)(uint8_t *pix, int stride, int alpha, int beta)
{
- FUNCC(h264_loop_filter_chroma_intra)(pix, stride, sizeof(pixel), alpha, beta);
+ FUNCC(h264_loop_filter_chroma_intra)(pix, stride, sizeof(pixel), 2, alpha, beta);
}
static void FUNCC(h264_h_loop_filter_chroma_intra)(uint8_t *pix, int stride, int alpha, int beta)
{
- FUNCC(h264_loop_filter_chroma_intra)(pix, sizeof(pixel), stride, alpha, beta);
+ FUNCC(h264_loop_filter_chroma_intra)(pix, sizeof(pixel), stride, 2, alpha, beta);
+}
+static void FUNCC(h264_h_loop_filter_chroma_mbaff_intra)(uint8_t *pix, int stride, int alpha, int beta)
+{
+ FUNCC(h264_loop_filter_chroma_intra)(pix, sizeof(pixel), stride, 1, alpha, beta);
}
diff --git a/libavcodec/h264idct.c b/libavcodec/h264idct.c
index 920356d01f..7d1ee007bc 100644
--- a/libavcodec/h264idct.c
+++ b/libavcodec/h264idct.c
@@ -26,13 +26,13 @@
*/
#define BIT_DEPTH 8
-#include "h264idct_internal.h"
+#include "h264idct_template.c"
#undef BIT_DEPTH
#define BIT_DEPTH 9
-#include "h264idct_internal.h"
+#include "h264idct_template.c"
#undef BIT_DEPTH
#define BIT_DEPTH 10
-#include "h264idct_internal.h"
+#include "h264idct_template.c"
#undef BIT_DEPTH
diff --git a/libavcodec/h264idct_internal.h b/libavcodec/h264idct_template.c
index d19051df90..1b3c635319 100644
--- a/libavcodec/h264idct_internal.h
+++ b/libavcodec/h264idct_template.c
@@ -25,7 +25,7 @@
* @author Michael Niedermayer <michaelni@gmx.at>
*/
-#include "h264_high_depth.h"
+#include "high_bit_depth.h"
#ifndef AVCODEC_H264IDCT_INTERNAL_H
#define AVCODEC_H264IDCT_INTERNAL_H
diff --git a/libavcodec/h264pred.c b/libavcodec/h264pred.c
index 8d2c6f0355..f6533cf9ba 100644
--- a/libavcodec/h264pred.c
+++ b/libavcodec/h264pred.c
@@ -28,17 +28,338 @@
#include "h264pred.h"
#define BIT_DEPTH 8
-#include "h264pred_internal.h"
+#include "h264pred_template.c"
#undef BIT_DEPTH
#define BIT_DEPTH 9
-#include "h264pred_internal.h"
+#include "h264pred_template.c"
#undef BIT_DEPTH
#define BIT_DEPTH 10
-#include "h264pred_internal.h"
+#include "h264pred_template.c"
#undef BIT_DEPTH
+static void pred4x4_vertical_vp8_c(uint8_t *src, const uint8_t *topright, int stride){
+ const int lt= src[-1-1*stride];
+ LOAD_TOP_EDGE
+ LOAD_TOP_RIGHT_EDGE
+ uint32_t v = PACK_4U8((lt + 2*t0 + t1 + 2) >> 2,
+ (t0 + 2*t1 + t2 + 2) >> 2,
+ (t1 + 2*t2 + t3 + 2) >> 2,
+ (t2 + 2*t3 + t4 + 2) >> 2);
+
+ AV_WN32A(src+0*stride, v);
+ AV_WN32A(src+1*stride, v);
+ AV_WN32A(src+2*stride, v);
+ AV_WN32A(src+3*stride, v);
+}
+
+static void pred4x4_horizontal_vp8_c(uint8_t *src, const uint8_t *topright, int stride){
+ const int lt= src[-1-1*stride];
+ LOAD_LEFT_EDGE
+
+ AV_WN32A(src+0*stride, ((lt + 2*l0 + l1 + 2) >> 2)*0x01010101);
+ AV_WN32A(src+1*stride, ((l0 + 2*l1 + l2 + 2) >> 2)*0x01010101);
+ AV_WN32A(src+2*stride, ((l1 + 2*l2 + l3 + 2) >> 2)*0x01010101);
+ AV_WN32A(src+3*stride, ((l2 + 2*l3 + l3 + 2) >> 2)*0x01010101);
+}
+
+static void pred4x4_down_left_svq3_c(uint8_t *src, const uint8_t *topright, int stride){
+ LOAD_TOP_EDGE
+ LOAD_LEFT_EDGE
+ const av_unused int unu0= t0;
+ const av_unused int unu1= l0;
+
+ src[0+0*stride]=(l1 + t1)>>1;
+ src[1+0*stride]=
+ src[0+1*stride]=(l2 + t2)>>1;
+ src[2+0*stride]=
+ src[1+1*stride]=
+ src[0+2*stride]=
+ src[3+0*stride]=
+ src[2+1*stride]=
+ src[1+2*stride]=
+ src[0+3*stride]=
+ src[3+1*stride]=
+ src[2+2*stride]=
+ src[1+3*stride]=
+ src[3+2*stride]=
+ src[2+3*stride]=
+ src[3+3*stride]=(l3 + t3)>>1;
+}
+
+static void pred4x4_down_left_rv40_c(uint8_t *src, const uint8_t *topright, int stride){
+ LOAD_TOP_EDGE
+ LOAD_TOP_RIGHT_EDGE
+ LOAD_LEFT_EDGE
+ LOAD_DOWN_LEFT_EDGE
+
+ src[0+0*stride]=(t0 + t2 + 2*t1 + 2 + l0 + l2 + 2*l1 + 2)>>3;
+ src[1+0*stride]=
+ src[0+1*stride]=(t1 + t3 + 2*t2 + 2 + l1 + l3 + 2*l2 + 2)>>3;
+ src[2+0*stride]=
+ src[1+1*stride]=
+ src[0+2*stride]=(t2 + t4 + 2*t3 + 2 + l2 + l4 + 2*l3 + 2)>>3;
+ src[3+0*stride]=
+ src[2+1*stride]=
+ src[1+2*stride]=
+ src[0+3*stride]=(t3 + t5 + 2*t4 + 2 + l3 + l5 + 2*l4 + 2)>>3;
+ src[3+1*stride]=
+ src[2+2*stride]=
+ src[1+3*stride]=(t4 + t6 + 2*t5 + 2 + l4 + l6 + 2*l5 + 2)>>3;
+ src[3+2*stride]=
+ src[2+3*stride]=(t5 + t7 + 2*t6 + 2 + l5 + l7 + 2*l6 + 2)>>3;
+ src[3+3*stride]=(t6 + t7 + 1 + l6 + l7 + 1)>>2;
+}
+
+static void pred4x4_down_left_rv40_nodown_c(uint8_t *src, const uint8_t *topright, int stride){
+ LOAD_TOP_EDGE
+ LOAD_TOP_RIGHT_EDGE
+ LOAD_LEFT_EDGE
+
+ src[0+0*stride]=(t0 + t2 + 2*t1 + 2 + l0 + l2 + 2*l1 + 2)>>3;
+ src[1+0*stride]=
+ src[0+1*stride]=(t1 + t3 + 2*t2 + 2 + l1 + l3 + 2*l2 + 2)>>3;
+ src[2+0*stride]=
+ src[1+1*stride]=
+ src[0+2*stride]=(t2 + t4 + 2*t3 + 2 + l2 + 3*l3 + 2)>>3;
+ src[3+0*stride]=
+ src[2+1*stride]=
+ src[1+2*stride]=
+ src[0+3*stride]=(t3 + t5 + 2*t4 + 2 + l3*4 + 2)>>3;
+ src[3+1*stride]=
+ src[2+2*stride]=
+ src[1+3*stride]=(t4 + t6 + 2*t5 + 2 + l3*4 + 2)>>3;
+ src[3+2*stride]=
+ src[2+3*stride]=(t5 + t7 + 2*t6 + 2 + l3*4 + 2)>>3;
+ src[3+3*stride]=(t6 + t7 + 1 + 2*l3 + 1)>>2;
+}
+
+static void pred4x4_vertical_left_rv40(uint8_t *src, const uint8_t *topright, int stride,
+ const int l0, const int l1, const int l2, const int l3, const int l4){
+ LOAD_TOP_EDGE
+ LOAD_TOP_RIGHT_EDGE
+
+ src[0+0*stride]=(2*t0 + 2*t1 + l1 + 2*l2 + l3 + 4)>>3;
+ src[1+0*stride]=
+ src[0+2*stride]=(t1 + t2 + 1)>>1;
+ src[2+0*stride]=
+ src[1+2*stride]=(t2 + t3 + 1)>>1;
+ src[3+0*stride]=
+ src[2+2*stride]=(t3 + t4+ 1)>>1;
+ src[3+2*stride]=(t4 + t5+ 1)>>1;
+ src[0+1*stride]=(t0 + 2*t1 + t2 + l2 + 2*l3 + l4 + 4)>>3;
+ src[1+1*stride]=
+ src[0+3*stride]=(t1 + 2*t2 + t3 + 2)>>2;
+ src[2+1*stride]=
+ src[1+3*stride]=(t2 + 2*t3 + t4 + 2)>>2;
+ src[3+1*stride]=
+ src[2+3*stride]=(t3 + 2*t4 + t5 + 2)>>2;
+ src[3+3*stride]=(t4 + 2*t5 + t6 + 2)>>2;
+}
+
+static void pred4x4_vertical_left_rv40_c(uint8_t *src, const uint8_t *topright, int stride){
+ LOAD_LEFT_EDGE
+ LOAD_DOWN_LEFT_EDGE
+
+ pred4x4_vertical_left_rv40(src, topright, stride, l0, l1, l2, l3, l4);
+}
+
+static void pred4x4_vertical_left_rv40_nodown_c(uint8_t *src, const uint8_t *topright, int stride){
+ LOAD_LEFT_EDGE
+
+ pred4x4_vertical_left_rv40(src, topright, stride, l0, l1, l2, l3, l3);
+}
+
+static void pred4x4_vertical_left_vp8_c(uint8_t *src, const uint8_t *topright, int stride){
+ LOAD_TOP_EDGE
+ LOAD_TOP_RIGHT_EDGE
+
+ src[0+0*stride]=(t0 + t1 + 1)>>1;
+ src[1+0*stride]=
+ src[0+2*stride]=(t1 + t2 + 1)>>1;
+ src[2+0*stride]=
+ src[1+2*stride]=(t2 + t3 + 1)>>1;
+ src[3+0*stride]=
+ src[2+2*stride]=(t3 + t4 + 1)>>1;
+ src[0+1*stride]=(t0 + 2*t1 + t2 + 2)>>2;
+ src[1+1*stride]=
+ src[0+3*stride]=(t1 + 2*t2 + t3 + 2)>>2;
+ src[2+1*stride]=
+ src[1+3*stride]=(t2 + 2*t3 + t4 + 2)>>2;
+ src[3+1*stride]=
+ src[2+3*stride]=(t3 + 2*t4 + t5 + 2)>>2;
+ src[3+2*stride]=(t4 + 2*t5 + t6 + 2)>>2;
+ src[3+3*stride]=(t5 + 2*t6 + t7 + 2)>>2;
+}
+
+static void pred4x4_horizontal_up_rv40_c(uint8_t *src, const uint8_t *topright, int stride){
+ LOAD_LEFT_EDGE
+ LOAD_DOWN_LEFT_EDGE
+ LOAD_TOP_EDGE
+ LOAD_TOP_RIGHT_EDGE
+
+ src[0+0*stride]=(t1 + 2*t2 + t3 + 2*l0 + 2*l1 + 4)>>3;
+ src[1+0*stride]=(t2 + 2*t3 + t4 + l0 + 2*l1 + l2 + 4)>>3;
+ src[2+0*stride]=
+ src[0+1*stride]=(t3 + 2*t4 + t5 + 2*l1 + 2*l2 + 4)>>3;
+ src[3+0*stride]=
+ src[1+1*stride]=(t4 + 2*t5 + t6 + l1 + 2*l2 + l3 + 4)>>3;
+ src[2+1*stride]=
+ src[0+2*stride]=(t5 + 2*t6 + t7 + 2*l2 + 2*l3 + 4)>>3;
+ src[3+1*stride]=
+ src[1+2*stride]=(t6 + 3*t7 + l2 + 3*l3 + 4)>>3;
+ src[3+2*stride]=
+ src[1+3*stride]=(l3 + 2*l4 + l5 + 2)>>2;
+ src[0+3*stride]=
+ src[2+2*stride]=(t6 + t7 + l3 + l4 + 2)>>2;
+ src[2+3*stride]=(l4 + l5 + 1)>>1;
+ src[3+3*stride]=(l4 + 2*l5 + l6 + 2)>>2;
+}
+
+static void pred4x4_horizontal_up_rv40_nodown_c(uint8_t *src, const uint8_t *topright, int stride){
+ LOAD_LEFT_EDGE
+ LOAD_TOP_EDGE
+ LOAD_TOP_RIGHT_EDGE
+
+ src[0+0*stride]=(t1 + 2*t2 + t3 + 2*l0 + 2*l1 + 4)>>3;
+ src[1+0*stride]=(t2 + 2*t3 + t4 + l0 + 2*l1 + l2 + 4)>>3;
+ src[2+0*stride]=
+ src[0+1*stride]=(t3 + 2*t4 + t5 + 2*l1 + 2*l2 + 4)>>3;
+ src[3+0*stride]=
+ src[1+1*stride]=(t4 + 2*t5 + t6 + l1 + 2*l2 + l3 + 4)>>3;
+ src[2+1*stride]=
+ src[0+2*stride]=(t5 + 2*t6 + t7 + 2*l2 + 2*l3 + 4)>>3;
+ src[3+1*stride]=
+ src[1+2*stride]=(t6 + 3*t7 + l2 + 3*l3 + 4)>>3;
+ src[3+2*stride]=
+ src[1+3*stride]=l3;
+ src[0+3*stride]=
+ src[2+2*stride]=(t6 + t7 + 2*l3 + 2)>>2;
+ src[2+3*stride]=
+ src[3+3*stride]=l3;
+}
+
+static void pred4x4_tm_vp8_c(uint8_t *src, const uint8_t *topright, int stride){
+ uint8_t *cm = ff_cropTbl + MAX_NEG_CROP - src[-1-stride];
+ uint8_t *top = src-stride;
+ int y;
+
+ for (y = 0; y < 4; y++) {
+ uint8_t *cm_in = cm + src[-1];
+ src[0] = cm_in[top[0]];
+ src[1] = cm_in[top[1]];
+ src[2] = cm_in[top[2]];
+ src[3] = cm_in[top[3]];
+ src += stride;
+ }
+}
+
+static void pred16x16_plane_svq3_c(uint8_t *src, int stride){
+ pred16x16_plane_compat_8_c(src, stride, 1, 0);
+}
+
+static void pred16x16_plane_rv40_c(uint8_t *src, int stride){
+ pred16x16_plane_compat_8_c(src, stride, 0, 1);
+}
+
+static void pred16x16_tm_vp8_c(uint8_t *src, int stride){
+ uint8_t *cm = ff_cropTbl + MAX_NEG_CROP - src[-1-stride];
+ uint8_t *top = src-stride;
+ int y;
+
+ for (y = 0; y < 16; y++) {
+ uint8_t *cm_in = cm + src[-1];
+ src[0] = cm_in[top[0]];
+ src[1] = cm_in[top[1]];
+ src[2] = cm_in[top[2]];
+ src[3] = cm_in[top[3]];
+ src[4] = cm_in[top[4]];
+ src[5] = cm_in[top[5]];
+ src[6] = cm_in[top[6]];
+ src[7] = cm_in[top[7]];
+ src[8] = cm_in[top[8]];
+ src[9] = cm_in[top[9]];
+ src[10] = cm_in[top[10]];
+ src[11] = cm_in[top[11]];
+ src[12] = cm_in[top[12]];
+ src[13] = cm_in[top[13]];
+ src[14] = cm_in[top[14]];
+ src[15] = cm_in[top[15]];
+ src += stride;
+ }
+}
+
+static void pred8x8_left_dc_rv40_c(uint8_t *src, int stride){
+ int i;
+ int dc0;
+
+ dc0=0;
+ for(i=0;i<8; i++)
+ dc0+= src[-1+i*stride];
+ dc0= 0x01010101*((dc0 + 4)>>3);
+
+ for(i=0; i<8; i++){
+ ((uint32_t*)(src+i*stride))[0]=
+ ((uint32_t*)(src+i*stride))[1]= dc0;
+ }
+}
+
+static void pred8x8_top_dc_rv40_c(uint8_t *src, int stride){
+ int i;
+ int dc0;
+
+ dc0=0;
+ for(i=0;i<8; i++)
+ dc0+= src[i-stride];
+ dc0= 0x01010101*((dc0 + 4)>>3);
+
+ for(i=0; i<8; i++){
+ ((uint32_t*)(src+i*stride))[0]=
+ ((uint32_t*)(src+i*stride))[1]= dc0;
+ }
+}
+
+static void pred8x8_dc_rv40_c(uint8_t *src, int stride){
+ int i;
+ int dc0=0;
+
+ for(i=0;i<4; i++){
+ dc0+= src[-1+i*stride] + src[i-stride];
+ dc0+= src[4+i-stride];
+ dc0+= src[-1+(i+4)*stride];
+ }
+ dc0= 0x01010101*((dc0 + 8)>>4);
+
+ for(i=0; i<4; i++){
+ ((uint32_t*)(src+i*stride))[0]= dc0;
+ ((uint32_t*)(src+i*stride))[1]= dc0;
+ }
+ for(i=4; i<8; i++){
+ ((uint32_t*)(src+i*stride))[0]= dc0;
+ ((uint32_t*)(src+i*stride))[1]= dc0;
+ }
+}
+
+static void pred8x8_tm_vp8_c(uint8_t *src, int stride){
+ uint8_t *cm = ff_cropTbl + MAX_NEG_CROP - src[-1-stride];
+ uint8_t *top = src-stride;
+ int y;
+
+ for (y = 0; y < 8; y++) {
+ uint8_t *cm_in = cm + src[-1];
+ src[0] = cm_in[top[0]];
+ src[1] = cm_in[top[1]];
+ src[2] = cm_in[top[2]];
+ src[3] = cm_in[top[3]];
+ src[4] = cm_in[top[4]];
+ src[5] = cm_in[top[5]];
+ src[6] = cm_in[top[6]];
+ src[7] = cm_in[top[7]];
+ src += stride;
+ }
+}
+
/**
* Set the intra prediction function pointers.
*/
@@ -49,26 +370,27 @@ void ff_h264_pred_init(H264PredContext *h, int codec_id, const int bit_depth){
#undef FUNCC
#define FUNC(a, depth) a ## _ ## depth
#define FUNCC(a, depth) a ## _ ## depth ## _c
+#define FUNCD(a) a ## _c
#define H264_PRED(depth) \
if(codec_id != CODEC_ID_RV40){\
if(codec_id == CODEC_ID_VP8) {\
- h->pred4x4[VERT_PRED ]= FUNCC(pred4x4_vertical_vp8 , depth);\
- h->pred4x4[HOR_PRED ]= FUNCC(pred4x4_horizontal_vp8 , depth);\
+ h->pred4x4[VERT_PRED ]= FUNCD(pred4x4_vertical_vp8);\
+ h->pred4x4[HOR_PRED ]= FUNCD(pred4x4_horizontal_vp8);\
} else {\
h->pred4x4[VERT_PRED ]= FUNCC(pred4x4_vertical , depth);\
h->pred4x4[HOR_PRED ]= FUNCC(pred4x4_horizontal , depth);\
}\
h->pred4x4[DC_PRED ]= FUNCC(pred4x4_dc , depth);\
if(codec_id == CODEC_ID_SVQ3)\
- h->pred4x4[DIAG_DOWN_LEFT_PRED ]= FUNCC(pred4x4_down_left_svq3, depth);\
+ h->pred4x4[DIAG_DOWN_LEFT_PRED ]= FUNCD(pred4x4_down_left_svq3);\
else\
h->pred4x4[DIAG_DOWN_LEFT_PRED ]= FUNCC(pred4x4_down_left , depth);\
h->pred4x4[DIAG_DOWN_RIGHT_PRED]= FUNCC(pred4x4_down_right , depth);\
h->pred4x4[VERT_RIGHT_PRED ]= FUNCC(pred4x4_vertical_right , depth);\
h->pred4x4[HOR_DOWN_PRED ]= FUNCC(pred4x4_horizontal_down , depth);\
if (codec_id == CODEC_ID_VP8) {\
- h->pred4x4[VERT_LEFT_PRED ]= FUNCC(pred4x4_vertical_left_vp8 , depth);\
+ h->pred4x4[VERT_LEFT_PRED ]= FUNCD(pred4x4_vertical_left_vp8);\
} else\
h->pred4x4[VERT_LEFT_PRED ]= FUNCC(pred4x4_vertical_left , depth);\
h->pred4x4[HOR_UP_PRED ]= FUNCC(pred4x4_horizontal_up , depth);\
@@ -77,7 +399,7 @@ void ff_h264_pred_init(H264PredContext *h, int codec_id, const int bit_depth){
h->pred4x4[TOP_DC_PRED ]= FUNCC(pred4x4_top_dc , depth);\
h->pred4x4[DC_128_PRED ]= FUNCC(pred4x4_128_dc , depth);\
} else {\
- h->pred4x4[TM_VP8_PRED ]= FUNCC(pred4x4_tm_vp8 , depth);\
+ h->pred4x4[TM_VP8_PRED ]= FUNCD(pred4x4_tm_vp8);\
h->pred4x4[DC_127_PRED ]= FUNCC(pred4x4_127_dc , depth);\
h->pred4x4[DC_129_PRED ]= FUNCC(pred4x4_129_dc , depth);\
h->pred4x4[VERT_VP8_PRED ]= FUNCC(pred4x4_vertical , depth);\
@@ -87,18 +409,18 @@ void ff_h264_pred_init(H264PredContext *h, int codec_id, const int bit_depth){
h->pred4x4[VERT_PRED ]= FUNCC(pred4x4_vertical , depth);\
h->pred4x4[HOR_PRED ]= FUNCC(pred4x4_horizontal , depth);\
h->pred4x4[DC_PRED ]= FUNCC(pred4x4_dc , depth);\
- h->pred4x4[DIAG_DOWN_LEFT_PRED ]= FUNCC(pred4x4_down_left_rv40 , depth);\
+ h->pred4x4[DIAG_DOWN_LEFT_PRED ]= FUNCD(pred4x4_down_left_rv40);\
h->pred4x4[DIAG_DOWN_RIGHT_PRED]= FUNCC(pred4x4_down_right , depth);\
h->pred4x4[VERT_RIGHT_PRED ]= FUNCC(pred4x4_vertical_right , depth);\
h->pred4x4[HOR_DOWN_PRED ]= FUNCC(pred4x4_horizontal_down , depth);\
- h->pred4x4[VERT_LEFT_PRED ]= FUNCC(pred4x4_vertical_left_rv40, depth);\
- h->pred4x4[HOR_UP_PRED ]= FUNCC(pred4x4_horizontal_up_rv40, depth);\
+ h->pred4x4[VERT_LEFT_PRED ]= FUNCD(pred4x4_vertical_left_rv40);\
+ h->pred4x4[HOR_UP_PRED ]= FUNCD(pred4x4_horizontal_up_rv40);\
h->pred4x4[LEFT_DC_PRED ]= FUNCC(pred4x4_left_dc , depth);\
h->pred4x4[TOP_DC_PRED ]= FUNCC(pred4x4_top_dc , depth);\
h->pred4x4[DC_128_PRED ]= FUNCC(pred4x4_128_dc , depth);\
- h->pred4x4[DIAG_DOWN_LEFT_PRED_RV40_NODOWN]= FUNCC(pred4x4_down_left_rv40_nodown, depth);\
- h->pred4x4[HOR_UP_PRED_RV40_NODOWN]= FUNCC(pred4x4_horizontal_up_rv40_nodown , depth);\
- h->pred4x4[VERT_LEFT_PRED_RV40_NODOWN]= FUNCC(pred4x4_vertical_left_rv40_nodown , depth);\
+ h->pred4x4[DIAG_DOWN_LEFT_PRED_RV40_NODOWN]= FUNCD(pred4x4_down_left_rv40_nodown);\
+ h->pred4x4[HOR_UP_PRED_RV40_NODOWN]= FUNCD(pred4x4_horizontal_up_rv40_nodown);\
+ h->pred4x4[VERT_LEFT_PRED_RV40_NODOWN]= FUNCD(pred4x4_vertical_left_rv40_nodown);\
}\
\
h->pred8x8l[VERT_PRED ]= FUNCC(pred8x8l_vertical , depth);\
@@ -119,7 +441,7 @@ void ff_h264_pred_init(H264PredContext *h, int codec_id, const int bit_depth){
if (codec_id != CODEC_ID_VP8) {\
h->pred8x8[PLANE_PRED8x8]= FUNCC(pred8x8_plane , depth);\
} else\
- h->pred8x8[PLANE_PRED8x8]= FUNCC(pred8x8_tm_vp8 , depth);\
+ h->pred8x8[PLANE_PRED8x8]= FUNCD(pred8x8_tm_vp8);\
if(codec_id != CODEC_ID_RV40 && codec_id != CODEC_ID_VP8){\
h->pred8x8[DC_PRED8x8 ]= FUNCC(pred8x8_dc , depth);\
h->pred8x8[LEFT_DC_PRED8x8]= FUNCC(pred8x8_left_dc , depth);\
@@ -129,9 +451,9 @@ void ff_h264_pred_init(H264PredContext *h, int codec_id, const int bit_depth){
h->pred8x8[ALZHEIMER_DC_L00_PRED8x8 ]= FUNC(pred8x8_mad_cow_dc_l00, depth);\
h->pred8x8[ALZHEIMER_DC_0L0_PRED8x8 ]= FUNC(pred8x8_mad_cow_dc_0l0, depth);\
}else{\
- h->pred8x8[DC_PRED8x8 ]= FUNCC(pred8x8_dc_rv40 , depth);\
- h->pred8x8[LEFT_DC_PRED8x8]= FUNCC(pred8x8_left_dc_rv40 , depth);\
- h->pred8x8[TOP_DC_PRED8x8 ]= FUNCC(pred8x8_top_dc_rv40 , depth);\
+ h->pred8x8[DC_PRED8x8 ]= FUNCD(pred8x8_dc_rv40);\
+ h->pred8x8[LEFT_DC_PRED8x8]= FUNCD(pred8x8_left_dc_rv40);\
+ h->pred8x8[TOP_DC_PRED8x8 ]= FUNCD(pred8x8_top_dc_rv40);\
if (codec_id == CODEC_ID_VP8) {\
h->pred8x8[DC_127_PRED8x8]= FUNCC(pred8x8_127_dc , depth);\
h->pred8x8[DC_129_PRED8x8]= FUNCC(pred8x8_129_dc , depth);\
@@ -144,13 +466,13 @@ void ff_h264_pred_init(H264PredContext *h, int codec_id, const int bit_depth){
h->pred16x16[HOR_PRED8x8 ]= FUNCC(pred16x16_horizontal , depth);\
switch(codec_id){\
case CODEC_ID_SVQ3:\
- h->pred16x16[PLANE_PRED8x8 ]= FUNCC(pred16x16_plane_svq3 , depth);\
+ h->pred16x16[PLANE_PRED8x8 ]= FUNCD(pred16x16_plane_svq3);\
break;\
case CODEC_ID_RV40:\
- h->pred16x16[PLANE_PRED8x8 ]= FUNCC(pred16x16_plane_rv40 , depth);\
+ h->pred16x16[PLANE_PRED8x8 ]= FUNCD(pred16x16_plane_rv40);\
break;\
case CODEC_ID_VP8:\
- h->pred16x16[PLANE_PRED8x8 ]= FUNCC(pred16x16_tm_vp8 , depth);\
+ h->pred16x16[PLANE_PRED8x8 ]= FUNCD(pred16x16_tm_vp8);\
h->pred16x16[DC_127_PRED8x8]= FUNCC(pred16x16_127_dc , depth);\
h->pred16x16[DC_129_PRED8x8]= FUNCC(pred16x16_129_dc , depth);\
break;\
diff --git a/libavcodec/h264pred_internal.h b/libavcodec/h264pred_template.c
index a51be6c382..3cd4463d76 100644
--- a/libavcodec/h264pred_internal.h
+++ b/libavcodec/h264pred_template.c
@@ -26,86 +26,96 @@
*/
#include "mathops.h"
-#include "h264_high_depth.h"
+#include "high_bit_depth.h"
-static void FUNCC(pred4x4_vertical)(uint8_t *p_src, const uint8_t *topright, int p_stride){
- pixel *src = (pixel*)p_src;
- int stride = p_stride>>(sizeof(pixel)-1);
- const pixel4 a= ((pixel4*)(src-stride))[0];
- ((pixel4*)(src+0*stride))[0]= a;
- ((pixel4*)(src+1*stride))[0]= a;
- ((pixel4*)(src+2*stride))[0]= a;
- ((pixel4*)(src+3*stride))[0]= a;
+static void FUNCC(pred4x4_vertical)(uint8_t *_src, const uint8_t *topright, int _stride){
+ pixel *src = (pixel*)_src;
+ int stride = _stride>>(sizeof(pixel)-1);
+ const pixel4 a= AV_RN4PA(src-stride);
+
+ AV_WN4PA(src+0*stride, a);
+ AV_WN4PA(src+1*stride, a);
+ AV_WN4PA(src+2*stride, a);
+ AV_WN4PA(src+3*stride, a);
}
-static void FUNCC(pred4x4_horizontal)(uint8_t *p_src, const uint8_t *topright, int p_stride){
- pixel *src = (pixel*)p_src;
- int stride = p_stride>>(sizeof(pixel)-1);
- ((pixel4*)(src+0*stride))[0]= PIXEL_SPLAT_X4(src[-1+0*stride]);
- ((pixel4*)(src+1*stride))[0]= PIXEL_SPLAT_X4(src[-1+1*stride]);
- ((pixel4*)(src+2*stride))[0]= PIXEL_SPLAT_X4(src[-1+2*stride]);
- ((pixel4*)(src+3*stride))[0]= PIXEL_SPLAT_X4(src[-1+3*stride]);
+static void FUNCC(pred4x4_horizontal)(uint8_t *_src, const uint8_t *topright, int _stride){
+ pixel *src = (pixel*)_src;
+ int stride = _stride>>(sizeof(pixel)-1);
+ AV_WN4PA(src+0*stride, PIXEL_SPLAT_X4(src[-1+0*stride]));
+ AV_WN4PA(src+1*stride, PIXEL_SPLAT_X4(src[-1+1*stride]));
+ AV_WN4PA(src+2*stride, PIXEL_SPLAT_X4(src[-1+2*stride]));
+ AV_WN4PA(src+3*stride, PIXEL_SPLAT_X4(src[-1+3*stride]));
}
-static void FUNCC(pred4x4_dc)(uint8_t *p_src, const uint8_t *topright, int p_stride){
- pixel *src = (pixel*)p_src;
- int stride = p_stride>>(sizeof(pixel)-1);
+static void FUNCC(pred4x4_dc)(uint8_t *_src, const uint8_t *topright, int _stride){
+ pixel *src = (pixel*)_src;
+ int stride = _stride>>(sizeof(pixel)-1);
const int dc= ( src[-stride] + src[1-stride] + src[2-stride] + src[3-stride]
+ src[-1+0*stride] + src[-1+1*stride] + src[-1+2*stride] + src[-1+3*stride] + 4) >>3;
+ const pixel4 a = PIXEL_SPLAT_X4(dc);
- ((pixel4*)(src+0*stride))[0]=
- ((pixel4*)(src+1*stride))[0]=
- ((pixel4*)(src+2*stride))[0]=
- ((pixel4*)(src+3*stride))[0]= PIXEL_SPLAT_X4(dc);
+ AV_WN4PA(src+0*stride, a);
+ AV_WN4PA(src+1*stride, a);
+ AV_WN4PA(src+2*stride, a);
+ AV_WN4PA(src+3*stride, a);
}
-static void FUNCC(pred4x4_left_dc)(uint8_t *p_src, const uint8_t *topright, int p_stride){
- pixel *src = (pixel*)p_src;
- int stride = p_stride>>(sizeof(pixel)-1);
+static void FUNCC(pred4x4_left_dc)(uint8_t *_src, const uint8_t *topright, int _stride){
+ pixel *src = (pixel*)_src;
+ int stride = _stride>>(sizeof(pixel)-1);
const int dc= ( src[-1+0*stride] + src[-1+1*stride] + src[-1+2*stride] + src[-1+3*stride] + 2) >>2;
+ const pixel4 a = PIXEL_SPLAT_X4(dc);
- ((pixel4*)(src+0*stride))[0]=
- ((pixel4*)(src+1*stride))[0]=
- ((pixel4*)(src+2*stride))[0]=
- ((pixel4*)(src+3*stride))[0]= PIXEL_SPLAT_X4(dc);
+ AV_WN4PA(src+0*stride, a);
+ AV_WN4PA(src+1*stride, a);
+ AV_WN4PA(src+2*stride, a);
+ AV_WN4PA(src+3*stride, a);
}
-static void FUNCC(pred4x4_top_dc)(uint8_t *p_src, const uint8_t *topright, int p_stride){
- pixel *src = (pixel*)p_src;
- int stride = p_stride>>(sizeof(pixel)-1);
+static void FUNCC(pred4x4_top_dc)(uint8_t *_src, const uint8_t *topright, int _stride){
+ pixel *src = (pixel*)_src;
+ int stride = _stride>>(sizeof(pixel)-1);
const int dc= ( src[-stride] + src[1-stride] + src[2-stride] + src[3-stride] + 2) >>2;
+ const pixel4 a = PIXEL_SPLAT_X4(dc);
- ((pixel4*)(src+0*stride))[0]=
- ((pixel4*)(src+1*stride))[0]=
- ((pixel4*)(src+2*stride))[0]=
- ((pixel4*)(src+3*stride))[0]= PIXEL_SPLAT_X4(dc);
+ AV_WN4PA(src+0*stride, a);
+ AV_WN4PA(src+1*stride, a);
+ AV_WN4PA(src+2*stride, a);
+ AV_WN4PA(src+3*stride, a);
}
-static void FUNCC(pred4x4_128_dc)(uint8_t *p_src, const uint8_t *topright, int p_stride){
- pixel *src = (pixel*)p_src;
- int stride = p_stride>>(sizeof(pixel)-1);
- ((pixel4*)(src+0*stride))[0]=
- ((pixel4*)(src+1*stride))[0]=
- ((pixel4*)(src+2*stride))[0]=
- ((pixel4*)(src+3*stride))[0]= PIXEL_SPLAT_X4(1<<(BIT_DEPTH-1));
+static void FUNCC(pred4x4_128_dc)(uint8_t *_src, const uint8_t *topright, int _stride){
+ pixel *src = (pixel*)_src;
+ int stride = _stride>>(sizeof(pixel)-1);
+ const pixel4 a = PIXEL_SPLAT_X4(1<<(BIT_DEPTH-1));
+
+ AV_WN4PA(src+0*stride, a);
+ AV_WN4PA(src+1*stride, a);
+ AV_WN4PA(src+2*stride, a);
+ AV_WN4PA(src+3*stride, a);
}
-static void FUNCC(pred4x4_127_dc)(uint8_t *p_src, const uint8_t *topright, int p_stride){
- pixel *src = (pixel*)p_src;
- int stride = p_stride>>(sizeof(pixel)-1);
- ((pixel4*)(src+0*stride))[0]=
- ((pixel4*)(src+1*stride))[0]=
- ((pixel4*)(src+2*stride))[0]=
- ((pixel4*)(src+3*stride))[0]= PIXEL_SPLAT_X4((1<<(BIT_DEPTH-1))-1);
+static void FUNCC(pred4x4_127_dc)(uint8_t *_src, const uint8_t *topright, int _stride){
+ pixel *src = (pixel*)_src;
+ int stride = _stride>>(sizeof(pixel)-1);
+ const pixel4 a = PIXEL_SPLAT_X4((1<<(BIT_DEPTH-1))-1);
+
+ AV_WN4PA(src+0*stride, a);
+ AV_WN4PA(src+1*stride, a);
+ AV_WN4PA(src+2*stride, a);
+ AV_WN4PA(src+3*stride, a);
}
-static void FUNCC(pred4x4_129_dc)(uint8_t *p_src, const uint8_t *topright, int p_stride){
- pixel *src = (pixel*)p_src;
- int stride = p_stride>>(sizeof(pixel)-1);
- ((pixel4*)(src+0*stride))[0]=
- ((pixel4*)(src+1*stride))[0]=
- ((pixel4*)(src+2*stride))[0]=
- ((pixel4*)(src+3*stride))[0]= PIXEL_SPLAT_X4((1<<(BIT_DEPTH-1))+1);
+static void FUNCC(pred4x4_129_dc)(uint8_t *_src, const uint8_t *topright, int _stride){
+ pixel *src = (pixel*)_src;
+ int stride = _stride>>(sizeof(pixel)-1);
+ const pixel4 a = PIXEL_SPLAT_X4((1<<(BIT_DEPTH-1))+1);
+
+ AV_WN4PA(src+0*stride, a);
+ AV_WN4PA(src+1*stride, a);
+ AV_WN4PA(src+2*stride, a);
+ AV_WN4PA(src+3*stride, a);
}
@@ -133,39 +143,9 @@ static void FUNCC(pred4x4_129_dc)(uint8_t *p_src, const uint8_t *topright, int p
const int av_unused t2= src[ 2-1*stride];\
const int av_unused t3= src[ 3-1*stride];\
-static void FUNCC(pred4x4_vertical_vp8)(uint8_t *p_src, const uint8_t *p_topright, int p_stride){
- pixel *src = (pixel*)p_src;
- const pixel *topright = (const pixel*)p_topright;
- int stride = p_stride>>(sizeof(pixel)-1);
- const int lt= src[-1-1*stride];
- LOAD_TOP_EDGE
- LOAD_TOP_RIGHT_EDGE
- pixel4 v = PACK_4U8((lt + 2*t0 + t1 + 2) >> 2,
- (t0 + 2*t1 + t2 + 2) >> 2,
- (t1 + 2*t2 + t3 + 2) >> 2,
- (t2 + 2*t3 + t4 + 2) >> 2);
-
- AV_WN4PA(src+0*stride, v);
- AV_WN4PA(src+1*stride, v);
- AV_WN4PA(src+2*stride, v);
- AV_WN4PA(src+3*stride, v);
-}
-
-static void FUNCC(pred4x4_horizontal_vp8)(uint8_t *p_src, const uint8_t *topright, int p_stride){
- pixel *src = (pixel*)p_src;
- int stride = p_stride>>(sizeof(pixel)-1);
- const int lt= src[-1-1*stride];
- LOAD_LEFT_EDGE
-
- AV_WN4PA(src+0*stride, PIXEL_SPLAT_X4((lt + 2*l0 + l1 + 2) >> 2));
- AV_WN4PA(src+1*stride, PIXEL_SPLAT_X4((l0 + 2*l1 + l2 + 2) >> 2));
- AV_WN4PA(src+2*stride, PIXEL_SPLAT_X4((l1 + 2*l2 + l3 + 2) >> 2));
- AV_WN4PA(src+3*stride, PIXEL_SPLAT_X4((l2 + 2*l3 + l3 + 2) >> 2));
-}
-
-static void FUNCC(pred4x4_down_right)(uint8_t *p_src, const uint8_t *topright, int p_stride){
- pixel *src = (pixel*)p_src;
- int stride = p_stride>>(sizeof(pixel)-1);
+static void FUNCC(pred4x4_down_right)(uint8_t *_src, const uint8_t *topright, int _stride){
+ pixel *src = (pixel*)_src;
+ int stride = _stride>>(sizeof(pixel)-1);
const int lt= src[-1-1*stride];
LOAD_TOP_EDGE
LOAD_LEFT_EDGE
@@ -188,10 +168,10 @@ static void FUNCC(pred4x4_down_right)(uint8_t *p_src, const uint8_t *topright, i
src[3+0*stride]=(t1 + 2*t2 + t3 + 2)>>2;
}
-static void FUNCC(pred4x4_down_left)(uint8_t *p_src, const uint8_t *p_topright, int p_stride){
- pixel *src = (pixel*)p_src;
- const pixel *topright = (const pixel*)p_topright;
- int stride = p_stride>>(sizeof(pixel)-1);
+static void FUNCC(pred4x4_down_left)(uint8_t *_src, const uint8_t *_topright, int _stride){
+ pixel *src = (pixel*)_src;
+ const pixel *topright = (const pixel*)_topright;
+ int stride = _stride>>(sizeof(pixel)-1);
LOAD_TOP_EDGE
LOAD_TOP_RIGHT_EDGE
// LOAD_LEFT_EDGE
@@ -214,88 +194,9 @@ static void FUNCC(pred4x4_down_left)(uint8_t *p_src, const uint8_t *p_topright,
src[3+3*stride]=(t6 + 3*t7 + 2)>>2;
}
-static void FUNCC(pred4x4_down_left_svq3)(uint8_t *p_src, const uint8_t *topright, int p_stride){
- pixel *src = (pixel*)p_src;
- int stride = p_stride>>(sizeof(pixel)-1);
- LOAD_TOP_EDGE
- LOAD_LEFT_EDGE
- const av_unused int unu0= t0;
- const av_unused int unu1= l0;
-
- src[0+0*stride]=(l1 + t1)>>1;
- src[1+0*stride]=
- src[0+1*stride]=(l2 + t2)>>1;
- src[2+0*stride]=
- src[1+1*stride]=
- src[0+2*stride]=
- src[3+0*stride]=
- src[2+1*stride]=
- src[1+2*stride]=
- src[0+3*stride]=
- src[3+1*stride]=
- src[2+2*stride]=
- src[1+3*stride]=
- src[3+2*stride]=
- src[2+3*stride]=
- src[3+3*stride]=(l3 + t3)>>1;
-}
-
-static void FUNCC(pred4x4_down_left_rv40)(uint8_t *p_src, const uint8_t *p_topright, int p_stride){
- pixel *src = (pixel*)p_src;
- const pixel *topright = (const pixel*)p_topright;
- int stride = p_stride>>(sizeof(pixel)-1);
- LOAD_TOP_EDGE
- LOAD_TOP_RIGHT_EDGE
- LOAD_LEFT_EDGE
- LOAD_DOWN_LEFT_EDGE
-
- src[0+0*stride]=(t0 + t2 + 2*t1 + 2 + l0 + l2 + 2*l1 + 2)>>3;
- src[1+0*stride]=
- src[0+1*stride]=(t1 + t3 + 2*t2 + 2 + l1 + l3 + 2*l2 + 2)>>3;
- src[2+0*stride]=
- src[1+1*stride]=
- src[0+2*stride]=(t2 + t4 + 2*t3 + 2 + l2 + l4 + 2*l3 + 2)>>3;
- src[3+0*stride]=
- src[2+1*stride]=
- src[1+2*stride]=
- src[0+3*stride]=(t3 + t5 + 2*t4 + 2 + l3 + l5 + 2*l4 + 2)>>3;
- src[3+1*stride]=
- src[2+2*stride]=
- src[1+3*stride]=(t4 + t6 + 2*t5 + 2 + l4 + l6 + 2*l5 + 2)>>3;
- src[3+2*stride]=
- src[2+3*stride]=(t5 + t7 + 2*t6 + 2 + l5 + l7 + 2*l6 + 2)>>3;
- src[3+3*stride]=(t6 + t7 + 1 + l6 + l7 + 1)>>2;
-}
-
-static void FUNCC(pred4x4_down_left_rv40_nodown)(uint8_t *p_src, const uint8_t *p_topright, int p_stride){
- pixel *src = (pixel*)p_src;
- const pixel *topright = (const pixel*)p_topright;
- int stride = p_stride>>(sizeof(pixel)-1);
- LOAD_TOP_EDGE
- LOAD_TOP_RIGHT_EDGE
- LOAD_LEFT_EDGE
-
- src[0+0*stride]=(t0 + t2 + 2*t1 + 2 + l0 + l2 + 2*l1 + 2)>>3;
- src[1+0*stride]=
- src[0+1*stride]=(t1 + t3 + 2*t2 + 2 + l1 + l3 + 2*l2 + 2)>>3;
- src[2+0*stride]=
- src[1+1*stride]=
- src[0+2*stride]=(t2 + t4 + 2*t3 + 2 + l2 + 3*l3 + 2)>>3;
- src[3+0*stride]=
- src[2+1*stride]=
- src[1+2*stride]=
- src[0+3*stride]=(t3 + t5 + 2*t4 + 2 + l3*4 + 2)>>3;
- src[3+1*stride]=
- src[2+2*stride]=
- src[1+3*stride]=(t4 + t6 + 2*t5 + 2 + l3*4 + 2)>>3;
- src[3+2*stride]=
- src[2+3*stride]=(t5 + t7 + 2*t6 + 2 + l3*4 + 2)>>3;
- src[3+3*stride]=(t6 + t7 + 1 + 2*l3 + 1)>>2;
-}
-
-static void FUNCC(pred4x4_vertical_right)(uint8_t *p_src, const uint8_t *topright, int p_stride){
- pixel *src = (pixel*)p_src;
- int stride = p_stride>>(sizeof(pixel)-1);
+static void FUNCC(pred4x4_vertical_right)(uint8_t *_src, const uint8_t *topright, int _stride){
+ pixel *src = (pixel*)_src;
+ int stride = _stride>>(sizeof(pixel)-1);
const int lt= src[-1-1*stride];
LOAD_TOP_EDGE
LOAD_LEFT_EDGE
@@ -318,10 +219,10 @@ static void FUNCC(pred4x4_vertical_right)(uint8_t *p_src, const uint8_t *toprigh
src[0+3*stride]=(l0 + 2*l1 + l2 + 2)>>2;
}
-static void FUNCC(pred4x4_vertical_left)(uint8_t *p_src, const uint8_t *p_topright, int p_stride){
- pixel *src = (pixel*)p_src;
- const pixel *topright = (const pixel*)p_topright;
- int stride = p_stride>>(sizeof(pixel)-1);
+static void FUNCC(pred4x4_vertical_left)(uint8_t *_src, const uint8_t *_topright, int _stride){
+ pixel *src = (pixel*)_src;
+ const pixel *topright = (const pixel*)_topright;
+ int stride = _stride>>(sizeof(pixel)-1);
LOAD_TOP_EDGE
LOAD_TOP_RIGHT_EDGE
@@ -343,77 +244,9 @@ static void FUNCC(pred4x4_vertical_left)(uint8_t *p_src, const uint8_t *p_toprig
src[3+3*stride]=(t4 + 2*t5 + t6 + 2)>>2;
}
-static void FUNCC(pred4x4_vertical_left_rv40_internal)(uint8_t *p_src, const uint8_t *p_topright, int p_stride,
- const int l0, const int l1, const int l2, const int l3, const int l4){
- pixel *src = (pixel*)p_src;
- const pixel *topright = (const pixel*)p_topright;
- int stride = p_stride>>(sizeof(pixel)-1);
- LOAD_TOP_EDGE
- LOAD_TOP_RIGHT_EDGE
-
- src[0+0*stride]=(2*t0 + 2*t1 + l1 + 2*l2 + l3 + 4)>>3;
- src[1+0*stride]=
- src[0+2*stride]=(t1 + t2 + 1)>>1;
- src[2+0*stride]=
- src[1+2*stride]=(t2 + t3 + 1)>>1;
- src[3+0*stride]=
- src[2+2*stride]=(t3 + t4+ 1)>>1;
- src[3+2*stride]=(t4 + t5+ 1)>>1;
- src[0+1*stride]=(t0 + 2*t1 + t2 + l2 + 2*l3 + l4 + 4)>>3;
- src[1+1*stride]=
- src[0+3*stride]=(t1 + 2*t2 + t3 + 2)>>2;
- src[2+1*stride]=
- src[1+3*stride]=(t2 + 2*t3 + t4 + 2)>>2;
- src[3+1*stride]=
- src[2+3*stride]=(t3 + 2*t4 + t5 + 2)>>2;
- src[3+3*stride]=(t4 + 2*t5 + t6 + 2)>>2;
-}
-
-static void FUNCC(pred4x4_vertical_left_rv40)(uint8_t *p_src, const uint8_t *topright, int p_stride){
- pixel *src = (pixel*)p_src;
- int stride = p_stride>>(sizeof(pixel)-1);
- LOAD_LEFT_EDGE
- LOAD_DOWN_LEFT_EDGE
-
- FUNCC(pred4x4_vertical_left_rv40_internal)(p_src, topright, p_stride, l0, l1, l2, l3, l4);
-}
-
-static void FUNCC(pred4x4_vertical_left_rv40_nodown)(uint8_t *p_src, const uint8_t *topright, int p_stride){
- pixel *src = (pixel*)p_src;
- int stride = p_stride>>(sizeof(pixel)-1);
- LOAD_LEFT_EDGE
-
- FUNCC(pred4x4_vertical_left_rv40_internal)(p_src, topright, p_stride, l0, l1, l2, l3, l3);
-}
-
-static void FUNCC(pred4x4_vertical_left_vp8)(uint8_t *p_src, const uint8_t *p_topright, int p_stride){
- pixel *src = (pixel*)p_src;
- const pixel *topright = (const pixel*)p_topright;
- int stride = p_stride>>(sizeof(pixel)-1);
- LOAD_TOP_EDGE
- LOAD_TOP_RIGHT_EDGE
-
- src[0+0*stride]=(t0 + t1 + 1)>>1;
- src[1+0*stride]=
- src[0+2*stride]=(t1 + t2 + 1)>>1;
- src[2+0*stride]=
- src[1+2*stride]=(t2 + t3 + 1)>>1;
- src[3+0*stride]=
- src[2+2*stride]=(t3 + t4 + 1)>>1;
- src[0+1*stride]=(t0 + 2*t1 + t2 + 2)>>2;
- src[1+1*stride]=
- src[0+3*stride]=(t1 + 2*t2 + t3 + 2)>>2;
- src[2+1*stride]=
- src[1+3*stride]=(t2 + 2*t3 + t4 + 2)>>2;
- src[3+1*stride]=
- src[2+3*stride]=(t3 + 2*t4 + t5 + 2)>>2;
- src[3+2*stride]=(t4 + 2*t5 + t6 + 2)>>2;
- src[3+3*stride]=(t5 + 2*t6 + t7 + 2)>>2;
-}
-
-static void FUNCC(pred4x4_horizontal_up)(uint8_t *p_src, const uint8_t *topright, int p_stride){
- pixel *src = (pixel*)p_src;
- int stride = p_stride>>(sizeof(pixel)-1);
+static void FUNCC(pred4x4_horizontal_up)(uint8_t *_src, const uint8_t *topright, int _stride){
+ pixel *src = (pixel*)_src;
+ int stride = _stride>>(sizeof(pixel)-1);
LOAD_LEFT_EDGE
src[0+0*stride]=(l0 + l1 + 1)>>1;
@@ -434,62 +267,9 @@ static void FUNCC(pred4x4_horizontal_up)(uint8_t *p_src, const uint8_t *topright
src[3+3*stride]=l3;
}
-static void FUNCC(pred4x4_horizontal_up_rv40)(uint8_t *p_src, const uint8_t *p_topright, int p_stride){
- pixel *src = (pixel*)p_src;
- const pixel *topright = (const pixel*)p_topright;
- int stride = p_stride>>(sizeof(pixel)-1);
- LOAD_LEFT_EDGE
- LOAD_DOWN_LEFT_EDGE
- LOAD_TOP_EDGE
- LOAD_TOP_RIGHT_EDGE
-
- src[0+0*stride]=(t1 + 2*t2 + t3 + 2*l0 + 2*l1 + 4)>>3;
- src[1+0*stride]=(t2 + 2*t3 + t4 + l0 + 2*l1 + l2 + 4)>>3;
- src[2+0*stride]=
- src[0+1*stride]=(t3 + 2*t4 + t5 + 2*l1 + 2*l2 + 4)>>3;
- src[3+0*stride]=
- src[1+1*stride]=(t4 + 2*t5 + t6 + l1 + 2*l2 + l3 + 4)>>3;
- src[2+1*stride]=
- src[0+2*stride]=(t5 + 2*t6 + t7 + 2*l2 + 2*l3 + 4)>>3;
- src[3+1*stride]=
- src[1+2*stride]=(t6 + 3*t7 + l2 + 3*l3 + 4)>>3;
- src[3+2*stride]=
- src[1+3*stride]=(l3 + 2*l4 + l5 + 2)>>2;
- src[0+3*stride]=
- src[2+2*stride]=(t6 + t7 + l3 + l4 + 2)>>2;
- src[2+3*stride]=(l4 + l5 + 1)>>1;
- src[3+3*stride]=(l4 + 2*l5 + l6 + 2)>>2;
-}
-
-static void FUNCC(pred4x4_horizontal_up_rv40_nodown)(uint8_t *p_src, const uint8_t *p_topright, int p_stride){
- pixel *src = (pixel*)p_src;
- const pixel *topright = (const pixel*)p_topright;
- int stride = p_stride>>(sizeof(pixel)-1);
- LOAD_LEFT_EDGE
- LOAD_TOP_EDGE
- LOAD_TOP_RIGHT_EDGE
-
- src[0+0*stride]=(t1 + 2*t2 + t3 + 2*l0 + 2*l1 + 4)>>3;
- src[1+0*stride]=(t2 + 2*t3 + t4 + l0 + 2*l1 + l2 + 4)>>3;
- src[2+0*stride]=
- src[0+1*stride]=(t3 + 2*t4 + t5 + 2*l1 + 2*l2 + 4)>>3;
- src[3+0*stride]=
- src[1+1*stride]=(t4 + 2*t5 + t6 + l1 + 2*l2 + l3 + 4)>>3;
- src[2+1*stride]=
- src[0+2*stride]=(t5 + 2*t6 + t7 + 2*l2 + 2*l3 + 4)>>3;
- src[3+1*stride]=
- src[1+2*stride]=(t6 + 3*t7 + l2 + 3*l3 + 4)>>3;
- src[3+2*stride]=
- src[1+3*stride]=l3;
- src[0+3*stride]=
- src[2+2*stride]=(t6 + t7 + 2*l3 + 2)>>2;
- src[2+3*stride]=
- src[3+3*stride]=l3;
-}
-
-static void FUNCC(pred4x4_horizontal_down)(uint8_t *p_src, const uint8_t *topright, int p_stride){
- pixel *src = (pixel*)p_src;
- int stride = p_stride>>(sizeof(pixel)-1);
+static void FUNCC(pred4x4_horizontal_down)(uint8_t *_src, const uint8_t *topright, int _stride){
+ pixel *src = (pixel*)_src;
+ int stride = _stride>>(sizeof(pixel)-1);
const int lt= src[-1-1*stride];
LOAD_TOP_EDGE
LOAD_LEFT_EDGE
@@ -512,65 +292,50 @@ static void FUNCC(pred4x4_horizontal_down)(uint8_t *p_src, const uint8_t *toprig
src[1+3*stride]=(l1 + 2*l2 + l3 + 2)>>2;
}
-static void FUNCC(pred4x4_tm_vp8)(uint8_t *p_src, const uint8_t *topright, int p_stride){
- pixel *src = (pixel*)p_src;
- int stride = p_stride>>(sizeof(pixel)-1);
- uint8_t *cm = ff_cropTbl + MAX_NEG_CROP - src[-1-stride];
- pixel *top = src-stride;
- int y;
-
- for (y = 0; y < 4; y++) {
- uint8_t *cm_in = cm + src[-1];
- src[0] = cm_in[top[0]];
- src[1] = cm_in[top[1]];
- src[2] = cm_in[top[2]];
- src[3] = cm_in[top[3]];
- src += stride;
- }
-}
-
-static void FUNCC(pred16x16_vertical)(uint8_t *p_src, int p_stride){
+static void FUNCC(pred16x16_vertical)(uint8_t *_src, int _stride){
int i;
- pixel *src = (pixel*)p_src;
- int stride = p_stride>>(sizeof(pixel)-1);
- const pixel4 a = ((pixel4*)(src-stride))[0];
- const pixel4 b = ((pixel4*)(src-stride))[1];
- const pixel4 c = ((pixel4*)(src-stride))[2];
- const pixel4 d = ((pixel4*)(src-stride))[3];
+ pixel *src = (pixel*)_src;
+ int stride = _stride>>(sizeof(pixel)-1);
+ const pixel4 a = AV_RN4PA(((pixel4*)(src-stride))+0);
+ const pixel4 b = AV_RN4PA(((pixel4*)(src-stride))+1);
+ const pixel4 c = AV_RN4PA(((pixel4*)(src-stride))+2);
+ const pixel4 d = AV_RN4PA(((pixel4*)(src-stride))+3);
for(i=0; i<16; i++){
- ((pixel4*)(src+i*stride))[0] = a;
- ((pixel4*)(src+i*stride))[1] = b;
- ((pixel4*)(src+i*stride))[2] = c;
- ((pixel4*)(src+i*stride))[3] = d;
+ AV_WN4PA(((pixel4*)(src+i*stride))+0, a);
+ AV_WN4PA(((pixel4*)(src+i*stride))+1, b);
+ AV_WN4PA(((pixel4*)(src+i*stride))+2, c);
+ AV_WN4PA(((pixel4*)(src+i*stride))+3, d);
}
}
-static void FUNCC(pred16x16_horizontal)(uint8_t *p_src, int stride){
+static void FUNCC(pred16x16_horizontal)(uint8_t *_src, int stride){
int i;
- pixel *src = (pixel*)p_src;
+ pixel *src = (pixel*)_src;
stride >>= sizeof(pixel)-1;
for(i=0; i<16; i++){
- ((pixel4*)(src+i*stride))[0] =
- ((pixel4*)(src+i*stride))[1] =
- ((pixel4*)(src+i*stride))[2] =
- ((pixel4*)(src+i*stride))[3] = PIXEL_SPLAT_X4(src[-1+i*stride]);
+ const pixel4 a = PIXEL_SPLAT_X4(src[-1+i*stride]);
+
+ AV_WN4PA(((pixel4*)(src+i*stride))+0, a);
+ AV_WN4PA(((pixel4*)(src+i*stride))+1, a);
+ AV_WN4PA(((pixel4*)(src+i*stride))+2, a);
+ AV_WN4PA(((pixel4*)(src+i*stride))+3, a);
}
}
#define PREDICT_16x16_DC(v)\
for(i=0; i<16; i++){\
- AV_WN4P(src+ 0, v);\
- AV_WN4P(src+ 4, v);\
- AV_WN4P(src+ 8, v);\
- AV_WN4P(src+12, v);\
+ AV_WN4PA(src+ 0, v);\
+ AV_WN4PA(src+ 4, v);\
+ AV_WN4PA(src+ 8, v);\
+ AV_WN4PA(src+12, v);\
src += stride;\
}
-static void FUNCC(pred16x16_dc)(uint8_t *p_src, int stride){
+static void FUNCC(pred16x16_dc)(uint8_t *_src, int stride){
int i, dc=0;
- pixel *src = (pixel*)p_src;
+ pixel *src = (pixel*)_src;
pixel4 dcsplat;
stride >>= sizeof(pixel)-1;
@@ -586,9 +351,9 @@ static void FUNCC(pred16x16_dc)(uint8_t *p_src, int stride){
PREDICT_16x16_DC(dcsplat);
}
-static void FUNCC(pred16x16_left_dc)(uint8_t *p_src, int stride){
+static void FUNCC(pred16x16_left_dc)(uint8_t *_src, int stride){
int i, dc=0;
- pixel *src = (pixel*)p_src;
+ pixel *src = (pixel*)_src;
pixel4 dcsplat;
stride >>= sizeof(pixel)-1;
@@ -600,9 +365,9 @@ static void FUNCC(pred16x16_left_dc)(uint8_t *p_src, int stride){
PREDICT_16x16_DC(dcsplat);
}
-static void FUNCC(pred16x16_top_dc)(uint8_t *p_src, int stride){
+static void FUNCC(pred16x16_top_dc)(uint8_t *_src, int stride){
int i, dc=0;
- pixel *src = (pixel*)p_src;
+ pixel *src = (pixel*)_src;
pixel4 dcsplat;
stride >>= sizeof(pixel)-1;
@@ -615,9 +380,9 @@ static void FUNCC(pred16x16_top_dc)(uint8_t *p_src, int stride){
}
#define PRED16x16_X(n, v) \
-static void FUNCC(pred16x16_##n##_dc)(uint8_t *p_src, int stride){\
+static void FUNCC(pred16x16_##n##_dc)(uint8_t *_src, int stride){\
int i;\
- pixel *src = (pixel*)p_src;\
+ pixel *src = (pixel*)_src;\
stride >>= sizeof(pixel)-1;\
PREDICT_16x16_DC(PIXEL_SPLAT_X4(v));\
}
@@ -675,73 +440,40 @@ static void FUNCC(pred16x16_plane)(uint8_t *src, int stride){
FUNCC(pred16x16_plane_compat)(src, stride, 0, 0);
}
-static void FUNCC(pred16x16_plane_svq3)(uint8_t *src, int stride){
- FUNCC(pred16x16_plane_compat)(src, stride, 1, 0);
-}
-
-static void FUNCC(pred16x16_plane_rv40)(uint8_t *src, int stride){
- FUNCC(pred16x16_plane_compat)(src, stride, 0, 1);
-}
-
-static void FUNCC(pred16x16_tm_vp8)(uint8_t *src, int stride){
- uint8_t *cm = ff_cropTbl + MAX_NEG_CROP - src[-1-stride];
- uint8_t *top = src-stride;
- int y;
-
- for (y = 0; y < 16; y++) {
- uint8_t *cm_in = cm + src[-1];
- src[0] = cm_in[top[0]];
- src[1] = cm_in[top[1]];
- src[2] = cm_in[top[2]];
- src[3] = cm_in[top[3]];
- src[4] = cm_in[top[4]];
- src[5] = cm_in[top[5]];
- src[6] = cm_in[top[6]];
- src[7] = cm_in[top[7]];
- src[8] = cm_in[top[8]];
- src[9] = cm_in[top[9]];
- src[10] = cm_in[top[10]];
- src[11] = cm_in[top[11]];
- src[12] = cm_in[top[12]];
- src[13] = cm_in[top[13]];
- src[14] = cm_in[top[14]];
- src[15] = cm_in[top[15]];
- src += stride;
- }
-}
-
-static void FUNCC(pred8x8_vertical)(uint8_t *p_src, int p_stride){
+static void FUNCC(pred8x8_vertical)(uint8_t *_src, int _stride){
int i;
- pixel *src = (pixel*)p_src;
- int stride = p_stride>>(sizeof(pixel)-1);
- const pixel4 a= ((pixel4*)(src-stride))[0];
- const pixel4 b= ((pixel4*)(src-stride))[1];
+ pixel *src = (pixel*)_src;
+ int stride = _stride>>(sizeof(pixel)-1);
+ const pixel4 a= AV_RN4PA(((pixel4*)(src-stride))+0);
+ const pixel4 b= AV_RN4PA(((pixel4*)(src-stride))+1);
for(i=0; i<8; i++){
- ((pixel4*)(src+i*stride))[0]= a;
- ((pixel4*)(src+i*stride))[1]= b;
+ AV_WN4PA(((pixel4*)(src+i*stride))+0, a);
+ AV_WN4PA(((pixel4*)(src+i*stride))+1, b);
}
}
-static void FUNCC(pred8x8_horizontal)(uint8_t *p_src, int stride){
+static void FUNCC(pred8x8_horizontal)(uint8_t *_src, int stride){
int i;
- pixel *src = (pixel*)p_src;
+ pixel *src = (pixel*)_src;
stride >>= sizeof(pixel)-1;
for(i=0; i<8; i++){
- ((pixel4*)(src+i*stride))[0]=
- ((pixel4*)(src+i*stride))[1]= PIXEL_SPLAT_X4(src[-1+i*stride]);
+ const pixel4 a = PIXEL_SPLAT_X4(src[-1+i*stride]);
+ AV_WN4PA(((pixel4*)(src+i*stride))+0, a);
+ AV_WN4PA(((pixel4*)(src+i*stride))+1, a);
}
}
#define PRED8x8_X(n, v)\
-static void FUNCC(pred8x8_##n##_dc)(uint8_t *p_src, int stride){\
+static void FUNCC(pred8x8_##n##_dc)(uint8_t *_src, int stride){\
int i;\
- pixel *src = (pixel*)p_src;\
+ const pixel4 a = PIXEL_SPLAT_X4(v);\
+ pixel *src = (pixel*)_src;\
stride >>= sizeof(pixel)-1;\
for(i=0; i<8; i++){\
- ((pixel4*)(src+i*stride))[0]=\
- ((pixel4*)(src+i*stride))[1]= PIXEL_SPLAT_X4(v);\
+ AV_WN4PA(((pixel4*)(src+i*stride))+0, a);\
+ AV_WN4PA(((pixel4*)(src+i*stride))+1, a);\
}\
}
@@ -749,11 +481,11 @@ PRED8x8_X(127, (1<<(BIT_DEPTH-1))-1);
PRED8x8_X(128, (1<<(BIT_DEPTH-1))+0);
PRED8x8_X(129, (1<<(BIT_DEPTH-1))+1);
-static void FUNCC(pred8x8_left_dc)(uint8_t *p_src, int stride){
+static void FUNCC(pred8x8_left_dc)(uint8_t *_src, int stride){
int i;
int dc0, dc2;
pixel4 dc0splat, dc2splat;
- pixel *src = (pixel*)p_src;
+ pixel *src = (pixel*)_src;
stride >>= sizeof(pixel)-1;
dc0=dc2=0;
@@ -765,38 +497,20 @@ static void FUNCC(pred8x8_left_dc)(uint8_t *p_src, int stride){
dc2splat = PIXEL_SPLAT_X4((dc2 + 2)>>2);
for(i=0; i<4; i++){
- ((pixel4*)(src+i*stride))[0]=
- ((pixel4*)(src+i*stride))[1]= dc0splat;
+ AV_WN4PA(((pixel4*)(src+i*stride))+0, dc0splat);
+ AV_WN4PA(((pixel4*)(src+i*stride))+1, dc0splat);
}
for(i=4; i<8; i++){
- ((pixel4*)(src+i*stride))[0]=
- ((pixel4*)(src+i*stride))[1]= dc2splat;
+ AV_WN4PA(((pixel4*)(src+i*stride))+0, dc2splat);
+ AV_WN4PA(((pixel4*)(src+i*stride))+1, dc2splat);
}
}
-static void FUNCC(pred8x8_left_dc_rv40)(uint8_t *p_src, int stride){
- int i;
- int dc0;
- pixel4 dc0splat;
- pixel *src = (pixel*)p_src;
- stride >>= sizeof(pixel)-1;
-
- dc0=0;
- for(i=0;i<8; i++)
- dc0+= src[-1+i*stride];
- dc0splat = PIXEL_SPLAT_X4((dc0 + 4)>>3);
-
- for(i=0; i<8; i++){
- ((pixel4*)(src+i*stride))[0]=
- ((pixel4*)(src+i*stride))[1]= dc0splat;
- }
-}
-
-static void FUNCC(pred8x8_top_dc)(uint8_t *p_src, int stride){
+static void FUNCC(pred8x8_top_dc)(uint8_t *_src, int stride){
int i;
int dc0, dc1;
pixel4 dc0splat, dc1splat;
- pixel *src = (pixel*)p_src;
+ pixel *src = (pixel*)_src;
stride >>= sizeof(pixel)-1;
dc0=dc1=0;
@@ -808,39 +522,20 @@ static void FUNCC(pred8x8_top_dc)(uint8_t *p_src, int stride){
dc1splat = PIXEL_SPLAT_X4((dc1 + 2)>>2);
for(i=0; i<4; i++){
- ((pixel4*)(src+i*stride))[0]= dc0splat;
- ((pixel4*)(src+i*stride))[1]= dc1splat;
+ AV_WN4PA(((pixel4*)(src+i*stride))+0, dc0splat);
+ AV_WN4PA(((pixel4*)(src+i*stride))+1, dc1splat);
}
for(i=4; i<8; i++){
- ((pixel4*)(src+i*stride))[0]= dc0splat;
- ((pixel4*)(src+i*stride))[1]= dc1splat;
- }
-}
-
-static void FUNCC(pred8x8_top_dc_rv40)(uint8_t *p_src, int stride){
- int i;
- int dc0;
- pixel4 dc0splat;
- pixel *src = (pixel*)p_src;
- stride >>= sizeof(pixel)-1;
-
- dc0=0;
- for(i=0;i<8; i++)
- dc0+= src[i-stride];
- dc0splat = PIXEL_SPLAT_X4((dc0 + 4)>>3);
-
- for(i=0; i<8; i++){
- ((pixel4*)(src+i*stride))[0]=
- ((pixel4*)(src+i*stride))[1]= dc0splat;
+ AV_WN4PA(((pixel4*)(src+i*stride))+0, dc0splat);
+ AV_WN4PA(((pixel4*)(src+i*stride))+1, dc1splat);
}
}
-
-static void FUNCC(pred8x8_dc)(uint8_t *p_src, int stride){
+static void FUNCC(pred8x8_dc)(uint8_t *_src, int stride){
int i;
int dc0, dc1, dc2;
pixel4 dc0splat, dc1splat, dc2splat, dc3splat;
- pixel *src = (pixel*)p_src;
+ pixel *src = (pixel*)_src;
stride >>= sizeof(pixel)-1;
dc0=dc1=dc2=0;
@@ -855,12 +550,12 @@ static void FUNCC(pred8x8_dc)(uint8_t *p_src, int stride){
dc3splat = PIXEL_SPLAT_X4((dc1 + dc2 + 4)>>3);
for(i=0; i<4; i++){
- ((pixel4*)(src+i*stride))[0]= dc0splat;
- ((pixel4*)(src+i*stride))[1]= dc1splat;
+ AV_WN4PA(((pixel4*)(src+i*stride))+0, dc0splat);
+ AV_WN4PA(((pixel4*)(src+i*stride))+1, dc1splat);
}
for(i=4; i<8; i++){
- ((pixel4*)(src+i*stride))[0]= dc2splat;
- ((pixel4*)(src+i*stride))[1]= dc3splat;
+ AV_WN4PA(((pixel4*)(src+i*stride))+0, dc2splat);
+ AV_WN4PA(((pixel4*)(src+i*stride))+1, dc3splat);
}
}
@@ -887,36 +582,12 @@ static void FUNC(pred8x8_mad_cow_dc_0l0)(uint8_t *src, int stride){
FUNCC(pred4x4_128_dc)(src + 4*sizeof(pixel), NULL, stride);
}
-static void FUNCC(pred8x8_dc_rv40)(uint8_t *p_src, int stride){
- int i;
- int dc0=0;
- pixel4 dc0splat;
- pixel *src = (pixel*)p_src;
- stride >>= sizeof(pixel)-1;
-
- for(i=0;i<4; i++){
- dc0+= src[-1+i*stride] + src[i-stride];
- dc0+= src[4+i-stride];
- dc0+= src[-1+(i+4)*stride];
- }
- dc0splat = PIXEL_SPLAT_X4((dc0 + 8)>>4);
-
- for(i=0; i<4; i++){
- ((pixel4*)(src+i*stride))[0]= dc0splat;
- ((pixel4*)(src+i*stride))[1]= dc0splat;
- }
- for(i=4; i<8; i++){
- ((pixel4*)(src+i*stride))[0]= dc0splat;
- ((pixel4*)(src+i*stride))[1]= dc0splat;
- }
-}
-
-static void FUNCC(pred8x8_plane)(uint8_t *p_src, int p_stride){
+static void FUNCC(pred8x8_plane)(uint8_t *_src, int _stride){
int j, k;
int a;
INIT_CLIP
- pixel *src = (pixel*)p_src;
- int stride = p_stride>>(sizeof(pixel)-1);
+ pixel *src = (pixel*)_src;
+ int stride = _stride>>(sizeof(pixel)-1);
const pixel * const src0 = src +3-stride;
const pixel * src1 = src +4*stride-1;
const pixel * src2 = src1-2*stride; // == src+2*stride-1;
@@ -946,27 +617,6 @@ static void FUNCC(pred8x8_plane)(uint8_t *p_src, int p_stride){
}
}
-static void FUNCC(pred8x8_tm_vp8)(uint8_t *p_src, int p_stride){
- pixel *src = (pixel*)p_src;
- int stride = p_stride>>(sizeof(pixel)-1);
- uint8_t *cm = ff_cropTbl + MAX_NEG_CROP - src[-1-stride];
- pixel *top = src-stride;
- int y;
-
- for (y = 0; y < 8; y++) {
- uint8_t *cm_in = cm + src[-1];
- src[0] = cm_in[top[0]];
- src[1] = cm_in[top[1]];
- src[2] = cm_in[top[2]];
- src[3] = cm_in[top[3]];
- src[4] = cm_in[top[4]];
- src[5] = cm_in[top[5]];
- src[6] = cm_in[top[6]];
- src[7] = cm_in[top[7]];
- src += stride;
- }
-}
-
#define SRC(x,y) src[(x)+(y)*stride]
#define PL(y) \
const int l##y = (SRC(-1,y-1) + 2*SRC(-1,y) + SRC(-1,y+1) + 2) >> 2;
@@ -1000,22 +650,22 @@ static void FUNCC(pred8x8_tm_vp8)(uint8_t *p_src, int p_stride){
#define PREDICT_8x8_DC(v) \
int y; \
for( y = 0; y < 8; y++ ) { \
- ((pixel4*)src)[0] = \
- ((pixel4*)src)[1] = v; \
+ AV_WN4PA(((pixel4*)src)+0, v); \
+ AV_WN4PA(((pixel4*)src)+1, v); \
src += stride; \
}
-static void FUNCC(pred8x8l_128_dc)(uint8_t *p_src, int has_topleft, int has_topright, int p_stride)
+static void FUNCC(pred8x8l_128_dc)(uint8_t *_src, int has_topleft, int has_topright, int _stride)
{
- pixel *src = (pixel*)p_src;
- int stride = p_stride>>(sizeof(pixel)-1);
+ pixel *src = (pixel*)_src;
+ int stride = _stride>>(sizeof(pixel)-1);
PREDICT_8x8_DC(PIXEL_SPLAT_X4(1<<(BIT_DEPTH-1)));
}
-static void FUNCC(pred8x8l_left_dc)(uint8_t *p_src, int has_topleft, int has_topright, int p_stride)
+static void FUNCC(pred8x8l_left_dc)(uint8_t *_src, int has_topleft, int has_topright, int _stride)
{
- pixel *src = (pixel*)p_src;
- int stride = p_stride>>(sizeof(pixel)-1);
+ pixel *src = (pixel*)_src;
+ int stride = _stride>>(sizeof(pixel)-1);
PREDICT_8x8_LOAD_LEFT;
const pixel4 dc = PIXEL_SPLAT_X4((l0+l1+l2+l3+l4+l5+l6+l7+4) >> 3);
@@ -1045,18 +695,21 @@ static void FUNCC(pred8x8l_horizontal)(uint8_t *p_src, int has_topleft, int has_
{
pixel *src = (pixel*)p_src;
int stride = p_stride>>(sizeof(pixel)-1);
+ pixel4 a;
PREDICT_8x8_LOAD_LEFT;
-#define ROW(y) ((pixel4*)(src+y*stride))[0] =\
- ((pixel4*)(src+y*stride))[1] = PIXEL_SPLAT_X4(l##y)
+#define ROW(y) a = PIXEL_SPLAT_X4(l##y); \
+ AV_WN4PA(src+y*stride, a); \
+ AV_WN4PA(src+y*stride+4, a);
ROW(0); ROW(1); ROW(2); ROW(3); ROW(4); ROW(5); ROW(6); ROW(7);
#undef ROW
}
-static void FUNCC(pred8x8l_vertical)(uint8_t *p_src, int has_topleft, int has_topright, int p_stride)
+static void FUNCC(pred8x8l_vertical)(uint8_t *_src, int has_topleft, int has_topright, int _stride)
{
int y;
- pixel *src = (pixel*)p_src;
- int stride = p_stride>>(sizeof(pixel)-1);
+ pixel *src = (pixel*)_src;
+ int stride = _stride>>(sizeof(pixel)-1);
+ pixel4 a, b;
PREDICT_8x8_LOAD_TOP;
src[0] = t0;
@@ -1067,9 +720,11 @@ static void FUNCC(pred8x8l_vertical)(uint8_t *p_src, int has_topleft, int has_to
src[5] = t5;
src[6] = t6;
src[7] = t7;
+ a = AV_RN4PA(((pixel4*)src)+0);
+ b = AV_RN4PA(((pixel4*)src)+1);
for( y = 1; y < 8; y++ ) {
- ((pixel4*)(src+y*stride))[0] = ((pixel4*)src)[0];
- ((pixel4*)(src+y*stride))[1] = ((pixel4*)src)[1];
+ AV_WN4PA(((pixel4*)(src+y*stride))+0, a);
+ AV_WN4PA(((pixel4*)(src+y*stride))+1, b);
}
}
static void FUNCC(pred8x8l_down_left)(uint8_t *p_src, int has_topleft, int has_topright, int p_stride)
diff --git a/libavcodec/h264_high_depth.h b/libavcodec/high_bit_depth.h
index da12d94b7c..511cd00f3a 100644
--- a/libavcodec/h264_high_depth.h
+++ b/libavcodec/high_bit_depth.h
@@ -14,6 +14,7 @@
# undef rnd_avg_pixel4
# undef AV_RN2P
# undef AV_RN4P
+# undef AV_RN4PA
# undef AV_WN2P
# undef AV_WN4P
# undef AV_WN4PA
@@ -46,6 +47,7 @@ CLIP_PIXEL(10)
# define rnd_avg_pixel4 rnd_avg64
# define AV_RN2P AV_RN32
# define AV_RN4P AV_RN64
+# define AV_RN4PA AV_RN64A
# define AV_WN2P AV_WN32
# define AV_WN4P AV_WN64
# define AV_WN4PA AV_WN64A
@@ -61,6 +63,7 @@ CLIP_PIXEL(10)
# define rnd_avg_pixel4 rnd_avg32
# define AV_RN2P AV_RN16
# define AV_RN4P AV_RN32
+# define AV_RN4PA AV_RN32A
# define AV_WN2P AV_WN16
# define AV_WN4P AV_WN32
# define AV_WN4PA AV_WN32A
@@ -83,4 +86,3 @@ CLIP_PIXEL(10)
# define FUNC(a) a ## _10
# define FUNCC(a) a ## _10_c
#endif
-
diff --git a/libavcodec/huffyuv.c b/libavcodec/huffyuv.c
index 6895968426..0f59421bb7 100644
--- a/libavcodec/huffyuv.c
+++ b/libavcodec/huffyuv.c
@@ -433,6 +433,7 @@ static av_cold int decode_init(AVCodecContext *avctx)
memset(s->vlc, 0, 3*sizeof(VLC));
avctx->coded_frame= &s->picture;
+ avcodec_get_frame_defaults(&s->picture);
s->interlaced= s->height > 288;
s->bgr32=1;
@@ -1238,7 +1239,7 @@ static int encode_frame(AVCodecContext *avctx, unsigned char *buf, int buf_size,
int i, j, size=0;
*p = *pict;
- p->pict_type= FF_I_TYPE;
+ p->pict_type= AV_PICTURE_TYPE_I;
p->key_frame= 1;
if(s->context){
diff --git a/libavcodec/idcinvideo.c b/libavcodec/idcinvideo.c
index 64421db19a..78ac77bf25 100644
--- a/libavcodec/idcinvideo.c
+++ b/libavcodec/idcinvideo.c
@@ -165,6 +165,7 @@ static av_cold int idcin_decode_init(AVCodecContext *avctx)
huff_build_tree(s, i);
}
+ avcodec_get_frame_defaults(&s->frame);
s->frame.data[0] = NULL;
return 0;
diff --git a/libavcodec/iff.c b/libavcodec/iff.c
index 2467781537..195ef10ac7 100644
--- a/libavcodec/iff.c
+++ b/libavcodec/iff.c
@@ -38,52 +38,6 @@ typedef enum {
MASK_LASSO
} mask_type;
-/**
- * Gets the actual extra data after video preperties which contains
- * the raw CMAP palette data beyond the IFF extra context.
- *
- * @param avctx the AVCodecContext where to extract raw palette data from
- * @return pointer to raw CMAP palette data
- */
-static av_always_inline uint8_t *get_palette_data(const AVCodecContext *const avctx) {
- return avctx->extradata + AV_RB16(avctx->extradata);
-}
-
-/**
- * Gets the size of CMAP palette data beyond the IFF extra context.
- * Please note that any value < 2 of IFF extra context or
- * raw extradata < 0 is considered as illegal extradata.
- *
- * @param avctx the AVCodecContext where to extract palette data size from
- * @return size of raw palette data in bytes
- */
-static av_always_inline int get_palette_size(const AVCodecContext *const avctx) {
- return avctx->extradata_size - AV_RB16(avctx->extradata);
-}
-
-/**
- * Gets the actual raw image data after video properties which
- * contains the raw image data beyond the IFF extra context.
- *
- * @param avpkt the AVPacket where to extract raw image data from
- * @return pointer to raw image data
- */
-static av_always_inline uint8_t *get_image_data(const AVPacket *const avpkt) {
- return avpkt->data + AV_RB16(avpkt->data);
-}
-
-/**
- * Gets the size of raw image data beyond the IFF extra context.
- * Please note that any value < 2 of either IFF extra context
- * or raw image data is considered as an illegal packet.
- *
- * @param avpkt the AVPacket where to extract image data size from
- * @return size of raw image data in bytes
- */
-static av_always_inline int get_image_size(const AVPacket *const avpkt) {
- return avpkt->size - AV_RB16(avpkt->data);
-}
-
typedef struct {
AVFrame frame;
int planesize;
@@ -184,7 +138,8 @@ static av_always_inline uint32_t gray2rgb(const uint32_t x) {
static int ff_cmap_read_palette(AVCodecContext *avctx, uint32_t *pal)
{
int count, i;
- const uint8_t *const extradata = get_palette_data(avctx);
+ const uint8_t *const palette = avctx->extradata + AV_RB16(avctx->extradata);
+ int palette_size = avctx->extradata_size - AV_RB16(avctx->extradata);
if (avctx->bits_per_coded_sample > 8) {
av_log(avctx, AV_LOG_ERROR, "bit_per_coded_sample > 8 not supported\n");
@@ -193,10 +148,10 @@ static int ff_cmap_read_palette(AVCodecContext *avctx, uint32_t *pal)
count = 1 << avctx->bits_per_coded_sample;
// If extradata is smaller than actually needed, fill the remaining with black.
- count = FFMIN(get_palette_size(avctx) / 3, count);
+ count = FFMIN(palette_size / 3, count);
if (count) {
for (i=0; i < count; i++) {
- pal[i] = 0xFF000000 | AV_RB24(extradata + i*3);
+ pal[i] = 0xFF000000 | AV_RB24(palette + i*3);
}
} else { // Create gray-scale color palette for bps < 8
count = 1 << avctx->bits_per_coded_sample;
@@ -221,15 +176,19 @@ static int extract_header(AVCodecContext *const avctx,
const uint8_t *buf;
unsigned buf_size;
IffContext *s = avctx->priv_data;
+ int palette_size = avctx->extradata_size - AV_RB16(avctx->extradata);
+
if (avpkt) {
+ int image_size;
if (avpkt->size < 2)
return AVERROR_INVALIDDATA;
+ image_size = avpkt->size - AV_RB16(avpkt->data);
buf = avpkt->data;
buf_size = bytestream_get_be16(&buf);
- if (buf_size <= 1 || get_image_size(avpkt) <= 1) {
+ if (buf_size <= 1 || image_size <= 1) {
av_log(avctx, AV_LOG_ERROR,
"Invalid image size received: %u -> image data offset: %d\n",
- buf_size, get_image_size(avpkt));
+ buf_size, image_size);
return AVERROR_INVALIDDATA;
}
} else {
@@ -237,10 +196,10 @@ static int extract_header(AVCodecContext *const avctx,
return AVERROR_INVALIDDATA;
buf = avctx->extradata;
buf_size = bytestream_get_be16(&buf);
- if (buf_size <= 1 || get_palette_size(avctx) < 0) {
+ if (buf_size <= 1 || palette_size < 0) {
av_log(avctx, AV_LOG_ERROR,
"Invalid palette size received: %u -> palette data offset: %d\n",
- buf_size, get_palette_size(avctx));
+ buf_size, palette_size);
return AVERROR_INVALIDDATA;
}
}
@@ -271,8 +230,8 @@ static int extract_header(AVCodecContext *const avctx,
av_freep(&s->ham_palbuf);
if (s->ham) {
- int i, count = FFMIN(get_palette_size(avctx) / 3, 1 << s->ham);
- const uint8_t *const extradata = get_palette_data(avctx);
+ int i, count = FFMIN(palette_size / 3, 1 << s->ham);
+ const uint8_t *const palette = avctx->extradata + AV_RB16(avctx->extradata);
s->ham_buf = av_malloc((s->planesize * 8) + FF_INPUT_BUFFER_PADDING_SIZE);
if (!s->ham_buf)
return AVERROR(ENOMEM);
@@ -287,7 +246,7 @@ static int extract_header(AVCodecContext *const avctx,
// prefill with black and palette and set HAM take direct value mask to zero
memset(s->ham_palbuf, 0, (1 << s->ham) * 2 * sizeof (uint32_t));
for (i=0; i < count; i++) {
- s->ham_palbuf[i*2+1] = AV_RL24(extradata + i*3);
+ s->ham_palbuf[i*2+1] = AV_RL24(palette + i*3);
}
count = 1 << s->ham;
} else { // HAM with grayscale color palette
@@ -322,9 +281,9 @@ static av_cold int decode_init(AVCodecContext *avctx)
int err;
if (avctx->bits_per_coded_sample <= 8) {
+ int palette_size = avctx->extradata_size - AV_RB16(avctx->extradata);
avctx->pix_fmt = (avctx->bits_per_coded_sample < 8) ||
- (avctx->extradata_size >= 2 && get_palette_size(avctx)) ? PIX_FMT_PAL8
- : PIX_FMT_GRAY8;
+ (avctx->extradata_size >= 2 && palette_size) ? PIX_FMT_PAL8 : PIX_FMT_GRAY8;
} else if (avctx->bits_per_coded_sample <= 32) {
avctx->pix_fmt = PIX_FMT_BGR32;
} else {
@@ -339,6 +298,7 @@ static av_cold int decode_init(AVCodecContext *avctx)
return AVERROR(ENOMEM);
s->bpp = avctx->bits_per_coded_sample;
+ avcodec_get_frame_defaults(&s->frame);
if ((err = extract_header(avctx, NULL)) < 0)
return err;
@@ -458,8 +418,8 @@ static int decode_frame_ilbm(AVCodecContext *avctx,
AVPacket *avpkt)
{
IffContext *s = avctx->priv_data;
- const uint8_t *buf = avpkt->size >= 2 ? get_image_data(avpkt) : NULL;
- const int buf_size = avpkt->size >= 2 ? get_image_size(avpkt) : 0;
+ const uint8_t *buf = avpkt->size >= 2 ? avpkt->data + AV_RB16(avpkt->data) : NULL;
+ const int buf_size = avpkt->size >= 2 ? avpkt->size - AV_RB16(avpkt->data) : 0;
const uint8_t *buf_end = buf+buf_size;
int y, plane, res;
@@ -535,8 +495,8 @@ static int decode_frame_byterun1(AVCodecContext *avctx,
AVPacket *avpkt)
{
IffContext *s = avctx->priv_data;
- const uint8_t *buf = avpkt->size >= 2 ? get_image_data(avpkt) : NULL;
- const int buf_size = avpkt->size >= 2 ? get_image_size(avpkt) : 0;
+ const uint8_t *buf = avpkt->size >= 2 ? avpkt->data + AV_RB16(avpkt->data) : NULL;
+ const int buf_size = avpkt->size >= 2 ? avpkt->size - AV_RB16(avpkt->data) : 0;
const uint8_t *buf_end = buf+buf_size;
int y, plane, res;
diff --git a/libavcodec/indeo2.c b/libavcodec/indeo2.c
index c4d410faf5..f58804bab3 100644
--- a/libavcodec/indeo2.c
+++ b/libavcodec/indeo2.c
@@ -192,6 +192,7 @@ static av_cold int ir2_decode_init(AVCodecContext *avctx){
Ir2Context * const ic = avctx->priv_data;
static VLC_TYPE vlc_tables[1 << CODE_VLC_BITS][2];
+ avcodec_get_frame_defaults(&ic->picture);
ic->avctx = avctx;
avctx->pix_fmt= PIX_FMT_YUV410P;
diff --git a/libavcodec/indeo3.c b/libavcodec/indeo3.c
index c9d8573692..588a5b4cac 100644
--- a/libavcodec/indeo3.c
+++ b/libavcodec/indeo3.c
@@ -149,13 +149,13 @@ static av_cold void iv_free_func(Indeo3DecodeContext *s)
}
struct ustr {
- long xpos;
- long ypos;
- long width;
- long height;
- long split_flag;
- long split_direction;
- long usl7;
+ int xpos;
+ int ypos;
+ int width;
+ int height;
+ int split_flag;
+ int split_direction;
+ int usl7;
};
@@ -203,12 +203,12 @@ struct ustr {
static void iv_Decode_Chunk(Indeo3DecodeContext *s,
uint8_t *cur, uint8_t *ref, int width, int height,
- const uint8_t *buf1, long cb_offset, const uint8_t *hdr,
+ const uint8_t *buf1, int cb_offset, const uint8_t *hdr,
const uint8_t *buf2, int min_width_160)
{
uint8_t bit_buf;
- unsigned long bit_pos, lv, lv1, lv2;
- long *width_tbl, width_tbl_arr[10];
+ unsigned int bit_pos, lv, lv1, lv2;
+ int *width_tbl, width_tbl_arr[10];
const signed char *ref_vectors;
uint8_t *cur_frm_pos, *ref_frm_pos, *cp, *cp2;
uint32_t *cur_lp, *ref_lp;
@@ -967,6 +967,7 @@ static av_cold int indeo3_decode_init(AVCodecContext *avctx)
s->width = avctx->width;
s->height = avctx->height;
avctx->pix_fmt = PIX_FMT_YUV410P;
+ avcodec_get_frame_defaults(&s->frame);
if (!(ret = build_modpred(s)))
ret = iv_alloc_frames(s);
@@ -982,7 +983,7 @@ static int iv_decode_frame(AVCodecContext *avctx,
Indeo3DecodeContext *s = avctx->priv_data;
unsigned int image_width, image_height,
chroma_width, chroma_height;
- unsigned long flags, cb_offset, data_size,
+ unsigned int flags, cb_offset, data_size,
y_offset, v_offset, u_offset, mc_vector_count;
const uint8_t *hdr_pos, *buf_pos;
diff --git a/libavcodec/indeo5.c b/libavcodec/indeo5.c
index d96b31032f..5135c46bc7 100644
--- a/libavcodec/indeo5.c
+++ b/libavcodec/indeo5.c
@@ -713,6 +713,8 @@ static av_cold int decode_init(AVCodecContext *avctx)
ctx->pic_conf.tile_height = avctx->height;
ctx->pic_conf.luma_bands = ctx->pic_conf.chroma_bands = 1;
+ avcodec_get_frame_defaults(&ctx->frame);
+
result = ff_ivi_init_planes(ctx->planes, &ctx->pic_conf);
if (result) {
av_log(avctx, AV_LOG_ERROR, "Couldn't allocate color planes!\n");
diff --git a/libavcodec/intelh263dec.c b/libavcodec/intelh263dec.c
index 73946ce132..d5a644ee7c 100644
--- a/libavcodec/intelh263dec.c
+++ b/libavcodec/intelh263dec.c
@@ -52,7 +52,7 @@ int ff_intel_h263_decode_picture_header(MpegEncContext *s)
}
s->h263_plus = 0;
- s->pict_type = FF_I_TYPE + get_bits1(&s->gb);
+ s->pict_type = AV_PICTURE_TYPE_I + get_bits1(&s->gb);
s->unrestricted_mv = get_bits1(&s->gb);
s->h263_long_vectors = s->unrestricted_mv;
diff --git a/libavcodec/interplayvideo.c b/libavcodec/interplayvideo.c
index 18702b21d0..246408df80 100644
--- a/libavcodec/interplayvideo.c
+++ b/libavcodec/interplayvideo.c
@@ -1033,6 +1033,9 @@ static av_cold int ipvideo_decode_init(AVCodecContext *avctx)
/* decoding map contains 4 bits of information per 8x8 block */
s->decoding_map_size = avctx->width * avctx->height / (8 * 8 * 2);
+ avcodec_get_frame_defaults(&s->second_last_frame);
+ avcodec_get_frame_defaults(&s->last_frame);
+ avcodec_get_frame_defaults(&s->current_frame);
s->current_frame.data[0] = s->last_frame.data[0] =
s->second_last_frame.data[0] = NULL;
diff --git a/libavcodec/ituh263dec.c b/libavcodec/ituh263dec.c
index 728019accc..e0d57a763d 100644
--- a/libavcodec/ituh263dec.c
+++ b/libavcodec/ituh263dec.c
@@ -71,7 +71,7 @@ static const int h263_mb_type_b_map[15]= {
void ff_h263_show_pict_info(MpegEncContext *s){
if(s->avctx->debug&FF_DEBUG_PICT_INFO){
av_log(s->avctx, AV_LOG_DEBUG, "qp:%d %c size:%d rnd:%d%s%s%s%s%s%s%s%s%s %d/%d\n",
- s->qscale, av_get_pict_type_char(s->pict_type),
+ s->qscale, av_get_picture_type_char(s->pict_type),
s->gb.size_in_bits, 1-s->no_rounding,
s->obmc ? " AP" : "",
s->umvplus ? " UMV" : "",
@@ -347,7 +347,7 @@ static void preview_obmc(MpegEncContext *s){
s->block_index[i]+= 1;
s->mb_x++;
- assert(s->pict_type == FF_P_TYPE);
+ assert(s->pict_type == AV_PICTURE_TYPE_P);
do{
if (get_bits1(&s->gb)) {
@@ -460,7 +460,7 @@ static int h263_decode_block(MpegEncContext * s, DCTELEM * block,
/* DC coef */
if(s->codec_id == CODEC_ID_RV10){
#if CONFIG_RV10_DECODER
- if (s->rv10_version == 3 && s->pict_type == FF_I_TYPE) {
+ if (s->rv10_version == 3 && s->pict_type == AV_PICTURE_TYPE_I) {
int component, diff;
component = (n <= 3 ? 0 : n - 4 + 1);
level = s->last_dc[component];
@@ -608,7 +608,7 @@ int ff_h263_decode_mb(MpegEncContext *s,
assert(!s->h263_pred);
- if (s->pict_type == FF_P_TYPE) {
+ if (s->pict_type == AV_PICTURE_TYPE_P) {
do{
if (get_bits1(&s->gb)) {
/* skip mb */
@@ -700,7 +700,7 @@ int ff_h263_decode_mb(MpegEncContext *s,
mot_val[1] = my;
}
}
- } else if(s->pict_type==FF_B_TYPE) {
+ } else if(s->pict_type==AV_PICTURE_TYPE_B) {
int mb_type;
const int stride= s->b8_stride;
int16_t *mot_val0 = s->current_picture.motion_val[0][ 2*(s->mb_x + s->mb_y*stride) ];
@@ -843,7 +843,7 @@ intra:
if(s->pb_frame && h263_skip_b_part(s, cbpb) < 0)
return -1;
if(s->obmc && !s->mb_intra){
- if(s->pict_type == FF_P_TYPE && s->mb_x+1<s->mb_width && s->mb_num_left != 1)
+ if(s->pict_type == AV_PICTURE_TYPE_P && s->mb_x+1<s->mb_width && s->mb_num_left != 1)
preview_obmc(s);
}
end:
@@ -921,7 +921,7 @@ int h263_decode_picture_header(MpegEncContext *s)
if (!width)
return -1;
- s->pict_type = FF_I_TYPE + get_bits1(&s->gb);
+ s->pict_type = AV_PICTURE_TYPE_I + get_bits1(&s->gb);
s->h263_long_vectors = get_bits1(&s->gb);
@@ -985,11 +985,11 @@ int h263_decode_picture_header(MpegEncContext *s)
/* MPPTYPE */
s->pict_type = get_bits(&s->gb, 3);
switch(s->pict_type){
- case 0: s->pict_type= FF_I_TYPE;break;
- case 1: s->pict_type= FF_P_TYPE;break;
- case 2: s->pict_type= FF_P_TYPE;s->pb_frame = 3;break;
- case 3: s->pict_type= FF_B_TYPE;break;
- case 7: s->pict_type= FF_I_TYPE;break; //ZYGO
+ case 0: s->pict_type= AV_PICTURE_TYPE_I;break;
+ case 1: s->pict_type= AV_PICTURE_TYPE_P;break;
+ case 2: s->pict_type= AV_PICTURE_TYPE_P;s->pb_frame = 3;break;
+ case 3: s->pict_type= AV_PICTURE_TYPE_B;break;
+ case 7: s->pict_type= AV_PICTURE_TYPE_I;break; //ZYGO
default:
return -1;
}
@@ -1112,7 +1112,7 @@ int h263_decode_picture_header(MpegEncContext *s)
}
ff_h263_show_pict_info(s);
- if (s->pict_type == FF_I_TYPE && s->codec_tag == AV_RL32("ZYGO")){
+ if (s->pict_type == AV_PICTURE_TYPE_I && s->codec_tag == AV_RL32("ZYGO")){
int i,j;
for(i=0; i<85; i++) av_log(s->avctx, AV_LOG_DEBUG, "%d", get_bits1(&s->gb));
av_log(s->avctx, AV_LOG_DEBUG, "\n");
diff --git a/libavcodec/ituh263enc.c b/libavcodec/ituh263enc.c
index 6a84636ec3..320f82a83f 100644
--- a/libavcodec/ituh263enc.c
+++ b/libavcodec/ituh263enc.c
@@ -145,7 +145,7 @@ void h263_encode_picture_header(MpegEncContext * s, int picture_number)
if (!s->h263_plus) {
/* H.263v1 */
put_bits(&s->pb, 3, format);
- put_bits(&s->pb, 1, (s->pict_type == FF_P_TYPE));
+ put_bits(&s->pb, 1, (s->pict_type == AV_PICTURE_TYPE_P));
/* By now UMV IS DISABLED ON H.263v1, since the restrictions
of H.263v1 UMV implies to check the predicted MV after
calculation of the current MB to see if we're on the limits */
@@ -181,7 +181,7 @@ void h263_encode_picture_header(MpegEncContext * s, int picture_number)
put_bits(&s->pb,1,1); /* "1" to prevent start code emulation */
put_bits(&s->pb,3,0); /* Reserved */
- put_bits(&s->pb, 3, s->pict_type == FF_P_TYPE);
+ put_bits(&s->pb, 3, s->pict_type == AV_PICTURE_TYPE_P);
put_bits(&s->pb,1,0); /* Reference Picture Resampling: off */
put_bits(&s->pb,1,0); /* Reduced-Resolution Update: off */
@@ -260,12 +260,12 @@ void h263_encode_gob_header(MpegEncContext * s, int mb_line)
put_bits(&s->pb, 1, 1);
put_bits(&s->pb, 5, s->qscale); /* GQUANT */
put_bits(&s->pb, 1, 1);
- put_bits(&s->pb, 2, s->pict_type == FF_I_TYPE); /* GFID */
+ put_bits(&s->pb, 2, s->pict_type == AV_PICTURE_TYPE_I); /* GFID */
}else{
int gob_number= mb_line / s->gob_index;
put_bits(&s->pb, 5, gob_number); /* GN */
- put_bits(&s->pb, 2, s->pict_type == FF_I_TYPE); /* GFID */
+ put_bits(&s->pb, 2, s->pict_type == AV_PICTURE_TYPE_I); /* GFID */
put_bits(&s->pb, 5, s->qscale); /* GQUANT */
}
}
@@ -607,7 +607,7 @@ void h263_encode_mb(MpegEncContext * s,
}
cbpc = cbp & 3;
- if (s->pict_type == FF_I_TYPE) {
+ if (s->pict_type == AV_PICTURE_TYPE_I) {
if(s->dquant) cbpc+=4;
put_bits(&s->pb,
ff_h263_intra_MCBPC_bits[cbpc],
diff --git a/libavcodec/jfdctfst.c b/libavcodec/jfdctfst.c
index b911909ec3..8d3448a676 100644
--- a/libavcodec/jfdctfst.c
+++ b/libavcodec/jfdctfst.c
@@ -145,9 +145,9 @@
#define MULTIPLY(var,const) ((DCTELEM) DESCALE((var) * (const), CONST_BITS))
static av_always_inline void row_fdct(DCTELEM * data){
- int_fast16_t tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7;
- int_fast16_t tmp10, tmp11, tmp12, tmp13;
- int_fast16_t z1, z2, z3, z4, z5, z11, z13;
+ int tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7;
+ int tmp10, tmp11, tmp12, tmp13;
+ int z1, z2, z3, z4, z5, z11, z13;
DCTELEM *dataptr;
int ctr;
@@ -209,9 +209,9 @@ static av_always_inline void row_fdct(DCTELEM * data){
GLOBAL(void)
fdct_ifast (DCTELEM * data)
{
- int_fast16_t tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7;
- int_fast16_t tmp10, tmp11, tmp12, tmp13;
- int_fast16_t z1, z2, z3, z4, z5, z11, z13;
+ int tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7;
+ int tmp10, tmp11, tmp12, tmp13;
+ int z1, z2, z3, z4, z5, z11, z13;
DCTELEM *dataptr;
int ctr;
@@ -275,9 +275,9 @@ fdct_ifast (DCTELEM * data)
GLOBAL(void)
fdct_ifast248 (DCTELEM * data)
{
- int_fast16_t tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7;
- int_fast16_t tmp10, tmp11, tmp12, tmp13;
- int_fast16_t z1;
+ int tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7;
+ int tmp10, tmp11, tmp12, tmp13;
+ int z1;
DCTELEM *dataptr;
int ctr;
diff --git a/libavcodec/jfdctint.c b/libavcodec/jfdctint.c
index f6e8c4e939..072c7440b5 100644
--- a/libavcodec/jfdctint.c
+++ b/libavcodec/jfdctint.c
@@ -181,9 +181,9 @@
static av_always_inline void row_fdct(DCTELEM * data){
- int_fast32_t tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7;
- int_fast32_t tmp10, tmp11, tmp12, tmp13;
- int_fast32_t z1, z2, z3, z4, z5;
+ int tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7;
+ int tmp10, tmp11, tmp12, tmp13;
+ int z1, z2, z3, z4, z5;
DCTELEM *dataptr;
int ctr;
@@ -259,9 +259,9 @@ static av_always_inline void row_fdct(DCTELEM * data){
GLOBAL(void)
ff_jpeg_fdct_islow (DCTELEM * data)
{
- int_fast32_t tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7;
- int_fast32_t tmp10, tmp11, tmp12, tmp13;
- int_fast32_t z1, z2, z3, z4, z5;
+ int tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7;
+ int tmp10, tmp11, tmp12, tmp13;
+ int z1, z2, z3, z4, z5;
DCTELEM *dataptr;
int ctr;
@@ -345,9 +345,9 @@ ff_jpeg_fdct_islow (DCTELEM * data)
GLOBAL(void)
ff_fdct248_islow (DCTELEM * data)
{
- int_fast32_t tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7;
- int_fast32_t tmp10, tmp11, tmp12, tmp13;
- int_fast32_t z1;
+ int tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7;
+ int tmp10, tmp11, tmp12, tmp13;
+ int z1;
DCTELEM *dataptr;
int ctr;
diff --git a/libavcodec/jpeglsenc.c b/libavcodec/jpeglsenc.c
index ce6dc10f39..fef06815ac 100644
--- a/libavcodec/jpeglsenc.c
+++ b/libavcodec/jpeglsenc.c
@@ -245,7 +245,7 @@ static int encode_picture_ls(AVCodecContext *avctx, unsigned char *buf, int buf_
init_put_bits(&pb2, buf2, buf_size);
*p = *pict;
- p->pict_type= FF_I_TYPE;
+ p->pict_type= AV_PICTURE_TYPE_I;
p->key_frame= 1;
if(avctx->pix_fmt == PIX_FMT_GRAY8 || avctx->pix_fmt == PIX_FMT_GRAY16)
diff --git a/libavcodec/jvdec.c b/libavcodec/jvdec.c
index 9e978521ba..f4941992f6 100644
--- a/libavcodec/jvdec.c
+++ b/libavcodec/jvdec.c
@@ -180,7 +180,7 @@ static int decode_frame(AVCodecContext *avctx,
if (video_size) {
s->frame.key_frame = 1;
- s->frame.pict_type = FF_I_TYPE;
+ s->frame.pict_type = AV_PICTURE_TYPE_I;
s->frame.palette_has_changed = s->palette_has_changed;
s->palette_has_changed = 0;
memcpy(s->frame.data[1], s->palette, AVPALETTE_SIZE);
diff --git a/libavcodec/kgv1dec.c b/libavcodec/kgv1dec.c
index 2f6a80c2c6..c364cfc593 100644
--- a/libavcodec/kgv1dec.c
+++ b/libavcodec/kgv1dec.c
@@ -150,6 +150,7 @@ static av_cold int decode_init(AVCodecContext *avctx)
c->avctx = avctx;
avctx->pix_fmt = PIX_FMT_RGB555;
+ avcodec_get_frame_defaults(&c->pic);
return 0;
}
diff --git a/libavcodec/kmvc.c b/libavcodec/kmvc.c
index bd628d85c0..9ea18e87d6 100644
--- a/libavcodec/kmvc.c
+++ b/libavcodec/kmvc.c
@@ -258,10 +258,10 @@ static int decode_frame(AVCodecContext * avctx, void *data, int *data_size, AVPa
if (header & KMVC_KEYFRAME) {
ctx->pic.key_frame = 1;
- ctx->pic.pict_type = FF_I_TYPE;
+ ctx->pic.pict_type = AV_PICTURE_TYPE_I;
} else {
ctx->pic.key_frame = 0;
- ctx->pic.pict_type = FF_P_TYPE;
+ ctx->pic.pict_type = AV_PICTURE_TYPE_P;
}
/* if palette has been changed, copy it from palctrl */
@@ -379,6 +379,7 @@ static av_cold int decode_init(AVCodecContext * avctx)
}
}
+ avcodec_get_frame_defaults(&c->pic);
avctx->pix_fmt = PIX_FMT_PAL8;
return 0;
diff --git a/libavcodec/lcldec.c b/libavcodec/lcldec.c
index f2e5bb0c80..57735ac6ff 100644
--- a/libavcodec/lcldec.c
+++ b/libavcodec/lcldec.c
@@ -453,6 +453,7 @@ static av_cold int decode_init(AVCodecContext *avctx)
unsigned int max_basesize = FFALIGN(avctx->width, 4) * FFALIGN(avctx->height, 4) + AV_LZO_OUTPUT_PADDING;
unsigned int max_decomp_size;
+ avcodec_get_frame_defaults(&c->pic);
if (avctx->extradata_size < 8) {
av_log(avctx, AV_LOG_ERROR, "Extradata size too small.\n");
return 1;
diff --git a/libavcodec/lclenc.c b/libavcodec/lclenc.c
index a90c1cf588..178fe0ae26 100644
--- a/libavcodec/lclenc.c
+++ b/libavcodec/lclenc.c
@@ -76,7 +76,7 @@ static int encode_frame(AVCodecContext *avctx, unsigned char *buf, int buf_size,
int zret; // Zlib return code
*p = *pict;
- p->pict_type= FF_I_TYPE;
+ p->pict_type= AV_PICTURE_TYPE_I;
p->key_frame= 1;
if(avctx->pix_fmt != PIX_FMT_BGR24){
diff --git a/libavcodec/libopenjpeg.c b/libavcodec/libopenjpeg.c
index 1f8530c7c6..39747e78ea 100644
--- a/libavcodec/libopenjpeg.c
+++ b/libavcodec/libopenjpeg.c
@@ -53,6 +53,7 @@ static av_cold int libopenjpeg_decode_init(AVCodecContext *avctx)
LibOpenJPEGContext *ctx = avctx->priv_data;
opj_set_default_decoder_parameters(&ctx->dec_params);
+ avcodec_get_frame_defaults(&ctx->image);
avctx->coded_frame = &ctx->image;
return 0;
}
diff --git a/libavcodec/libvorbis.c b/libavcodec/libvorbis.c
index 1af0f8a32f..bc219ded9b 100644
--- a/libavcodec/libvorbis.c
+++ b/libavcodec/libvorbis.c
@@ -55,7 +55,7 @@ typedef struct OggVorbisContext {
} OggVorbisContext ;
static const AVOption options[]={
-{"iblock", "Sets the impulse block bias", offsetof(OggVorbisContext, iblock), FF_OPT_TYPE_DOUBLE, 0, -15, 0, AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_ENCODING_PARAM},
+{"iblock", "Sets the impulse block bias", offsetof(OggVorbisContext, iblock), FF_OPT_TYPE_DOUBLE, {.dbl = 0}, -15, 0, AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_ENCODING_PARAM},
{NULL}
};
static const AVClass class = { "libvorbis", av_default_item_name, options, LIBAVUTIL_VERSION_INT };
diff --git a/libavcodec/libvpxenc.c b/libavcodec/libvpxenc.c
index 1cdac740b4..ef85b3074a 100644
--- a/libavcodec/libvpxenc.c
+++ b/libavcodec/libvpxenc.c
@@ -358,9 +358,9 @@ static int storeframe(AVCodecContext *avctx, struct FrameListData *cx_frame,
coded_frame->key_frame = !!(cx_frame->flags & VPX_FRAME_IS_KEY);
if (coded_frame->key_frame)
- coded_frame->pict_type = FF_I_TYPE;
+ coded_frame->pict_type = AV_PICTURE_TYPE_I;
else
- coded_frame->pict_type = FF_P_TYPE;
+ coded_frame->pict_type = AV_PICTURE_TYPE_P;
} else {
av_log(avctx, AV_LOG_ERROR,
"Compressed frame larger than storage provided! (%zu/%d)\n",
diff --git a/libavcodec/libx264.c b/libavcodec/libx264.c
index 0dd954d710..838cb703e8 100644
--- a/libavcodec/libx264.c
+++ b/libavcodec/libx264.c
@@ -35,13 +35,13 @@ typedef struct X264Context {
uint8_t *sei;
int sei_size;
AVFrame out_pic;
- const char *preset;
- const char *tune;
- const char *profile;
- const char *level;
+ char *preset;
+ char *tune;
+ char *profile;
+ char *level;
int fastfirstpass;
- const char *stats;
- const char *weightp;
+ char *stats;
+ char *weightp;
} X264Context;
static void X264_log(void *p, int level, const char *fmt, va_list args)
@@ -110,9 +110,9 @@ static int X264_frame(AVCodecContext *ctx, uint8_t *buf,
x4->pic.i_pts = frame->pts;
x4->pic.i_type =
- frame->pict_type == FF_I_TYPE ? X264_TYPE_KEYFRAME :
- frame->pict_type == FF_P_TYPE ? X264_TYPE_P :
- frame->pict_type == FF_B_TYPE ? X264_TYPE_B :
+ frame->pict_type == AV_PICTURE_TYPE_I ? X264_TYPE_KEYFRAME :
+ frame->pict_type == AV_PICTURE_TYPE_P ? X264_TYPE_P :
+ frame->pict_type == AV_PICTURE_TYPE_B ? X264_TYPE_B :
X264_TYPE_AUTO;
if (x4->params.b_tff != frame->top_field_first) {
x4->params.b_tff = frame->top_field_first;
@@ -135,14 +135,14 @@ static int X264_frame(AVCodecContext *ctx, uint8_t *buf,
switch (pic_out.i_type) {
case X264_TYPE_IDR:
case X264_TYPE_I:
- x4->out_pic.pict_type = FF_I_TYPE;
+ x4->out_pic.pict_type = AV_PICTURE_TYPE_I;
break;
case X264_TYPE_P:
- x4->out_pic.pict_type = FF_P_TYPE;
+ x4->out_pic.pict_type = AV_PICTURE_TYPE_P;
break;
case X264_TYPE_B:
case X264_TYPE_BREF:
- x4->out_pic.pict_type = FF_B_TYPE;
+ x4->out_pic.pict_type = AV_PICTURE_TYPE_B;
break;
}
@@ -163,6 +163,13 @@ static av_cold int X264_close(AVCodecContext *avctx)
if (x4->enc)
x264_encoder_close(x4->enc);
+ av_free(x4->preset);
+ av_free(x4->tune);
+ av_free(x4->profile);
+ av_free(x4->level);
+ av_free(x4->stats);
+ av_free(x4->weightp);
+
return 0;
}
@@ -185,7 +192,7 @@ static void check_default_settings(AVCodecContext *avctx)
score += x4->params.analyse.inter == 0 && x4->params.analyse.i_subpel_refine == 8;
if (score >= 5) {
av_log(avctx, AV_LOG_ERROR, "Default settings detected, using medium profile\n");
- x4->preset = "medium";
+ x4->preset = av_strdup("medium");
if (avctx->bit_rate == 200*100)
avctx->crf = 23;
}
diff --git a/libavcodec/libxavs.c b/libavcodec/libxavs.c
index 08b93f5895..4b604bdf99 100644
--- a/libavcodec/libxavs.c
+++ b/libavcodec/libxavs.c
@@ -138,14 +138,14 @@ static int XAVS_frame(AVCodecContext *ctx, uint8_t *buf,
switch (pic_out.i_type) {
case XAVS_TYPE_IDR:
case XAVS_TYPE_I:
- x4->out_pic.pict_type = FF_I_TYPE;
+ x4->out_pic.pict_type = AV_PICTURE_TYPE_I;
break;
case XAVS_TYPE_P:
- x4->out_pic.pict_type = FF_P_TYPE;
+ x4->out_pic.pict_type = AV_PICTURE_TYPE_P;
break;
case XAVS_TYPE_B:
case XAVS_TYPE_BREF:
- x4->out_pic.pict_type = FF_B_TYPE;
+ x4->out_pic.pict_type = AV_PICTURE_TYPE_B;
break;
}
diff --git a/libavcodec/libxvid_rc.c b/libavcodec/libxvid_rc.c
index 08e89c8df1..dbf7b0b6c1 100644
--- a/libavcodec/libxvid_rc.c
+++ b/libavcodec/libxvid_rc.c
@@ -134,7 +134,7 @@ float ff_xvid_rate_estimate_qscale(MpegEncContext *s, int dry_run){
if(!dry_run)
s->rc_context.dry_run_qscale= 0;
- if(s->pict_type == FF_B_TYPE) //FIXME this is not exactly identical to xvid
+ if(s->pict_type == AV_PICTURE_TYPE_B) //FIXME this is not exactly identical to xvid
return xvid_plg_data.quant * FF_QP2LAMBDA * s->avctx->b_quant_factor + s->avctx->b_quant_offset;
else
return xvid_plg_data.quant * FF_QP2LAMBDA;
diff --git a/libavcodec/libxvidff.c b/libavcodec/libxvidff.c
index 65069d0d87..9b5c17c59d 100644
--- a/libavcodec/libxvidff.c
+++ b/libavcodec/libxvidff.c
@@ -25,9 +25,6 @@
* @author Adam Thayer (krevnik@comcast.net)
*/
-/* needed for mkstemp() */
-#define _XOPEN_SOURCE 600
-
#include <xvid.h>
#include <unistd.h>
#include "avcodec.h"
@@ -450,9 +447,9 @@ static int xvid_encode_frame(AVCodecContext *avctx,
xvid_enc_frame.vol_flags = x->vol_flags;
xvid_enc_frame.motion = x->me_flags;
xvid_enc_frame.type =
- picture->pict_type == FF_I_TYPE ? XVID_TYPE_IVOP :
- picture->pict_type == FF_P_TYPE ? XVID_TYPE_PVOP :
- picture->pict_type == FF_B_TYPE ? XVID_TYPE_BVOP :
+ picture->pict_type == AV_PICTURE_TYPE_I ? XVID_TYPE_IVOP :
+ picture->pict_type == AV_PICTURE_TYPE_P ? XVID_TYPE_PVOP :
+ picture->pict_type == AV_PICTURE_TYPE_B ? XVID_TYPE_BVOP :
XVID_TYPE_AUTO;
/* Pixel aspect ratio setting */
@@ -493,13 +490,13 @@ static int xvid_encode_frame(AVCodecContext *avctx,
if( 0 <= xerr ) {
p->quality = xvid_enc_stats.quant * FF_QP2LAMBDA;
if( xvid_enc_stats.type == XVID_TYPE_PVOP )
- p->pict_type = FF_P_TYPE;
+ p->pict_type = AV_PICTURE_TYPE_P;
else if( xvid_enc_stats.type == XVID_TYPE_BVOP )
- p->pict_type = FF_B_TYPE;
+ p->pict_type = AV_PICTURE_TYPE_B;
else if( xvid_enc_stats.type == XVID_TYPE_SVOP )
- p->pict_type = FF_S_TYPE;
+ p->pict_type = AV_PICTURE_TYPE_S;
else
- p->pict_type = FF_I_TYPE;
+ p->pict_type = AV_PICTURE_TYPE_I;
if( xvid_enc_frame.out_flags & XVID_KEYFRAME ) {
p->key_frame = 1;
if( x->quicktime_format )
diff --git a/libavcodec/ljpegenc.c b/libavcodec/ljpegenc.c
index 56336cb3bf..e5d19fcaaa 100644
--- a/libavcodec/ljpegenc.c
+++ b/libavcodec/ljpegenc.c
@@ -49,7 +49,7 @@ static int encode_picture_lossless(AVCodecContext *avctx, unsigned char *buf, in
init_put_bits(&s->pb, buf, buf_size);
*p = *pict;
- p->pict_type= FF_I_TYPE;
+ p->pict_type= AV_PICTURE_TYPE_I;
p->key_frame= 1;
ff_mjpeg_encode_picture_header(s);
diff --git a/libavcodec/loco.c b/libavcodec/loco.c
index 8f2b8ff325..f5807b8f0a 100644
--- a/libavcodec/loco.c
+++ b/libavcodec/loco.c
@@ -272,6 +272,8 @@ static av_cold int decode_init(AVCodecContext *avctx){
if(avctx->debug & FF_DEBUG_PICT_INFO)
av_log(avctx, AV_LOG_INFO, "lossy:%i, version:%i, mode: %i\n", l->lossy, version, l->mode);
+ avcodec_get_frame_defaults(&l->pic);
+
return 0;
}
diff --git a/libavcodec/lpc.c b/libavcodec/lpc.c
index 6d7671c81f..d041cafe85 100644
--- a/libavcodec/lpc.c
+++ b/libavcodec/lpc.c
@@ -158,7 +158,7 @@ int ff_lpc_calc_coefs(LPCContext *s,
const int32_t *samples, int blocksize, int min_order,
int max_order, int precision,
int32_t coefs[][MAX_LPC_ORDER], int *shift,
- enum AVLPCType lpc_type, int lpc_passes,
+ enum FFLPCType lpc_type, int lpc_passes,
int omethod, int max_shift, int zero_shift)
{
double autoc[MAX_LPC_ORDER+1];
@@ -168,7 +168,7 @@ int ff_lpc_calc_coefs(LPCContext *s,
int opt_order;
assert(max_order >= MIN_LPC_ORDER && max_order <= MAX_LPC_ORDER &&
- lpc_type > AV_LPC_TYPE_FIXED);
+ lpc_type > FF_LPC_TYPE_FIXED);
/* reinit LPC context if parameters have changed */
if (blocksize != s->blocksize || max_order != s->max_order ||
@@ -177,7 +177,7 @@ int ff_lpc_calc_coefs(LPCContext *s,
ff_lpc_init(s, blocksize, max_order, lpc_type);
}
- if (lpc_type == AV_LPC_TYPE_LEVINSON) {
+ if (lpc_type == FF_LPC_TYPE_LEVINSON) {
double *windowed_samples = s->windowed_samples + max_order;
s->lpc_apply_welch_window(samples, blocksize, windowed_samples);
@@ -188,7 +188,7 @@ int ff_lpc_calc_coefs(LPCContext *s,
for(i=0; i<max_order; i++)
ref[i] = fabs(lpc[i][i]);
- } else if (lpc_type == AV_LPC_TYPE_CHOLESKY) {
+ } else if (lpc_type == FF_LPC_TYPE_CHOLESKY) {
LLSModel m[2];
double var[MAX_LPC_ORDER+1], av_uninit(weight);
@@ -241,13 +241,13 @@ int ff_lpc_calc_coefs(LPCContext *s,
}
av_cold int ff_lpc_init(LPCContext *s, int blocksize, int max_order,
- enum AVLPCType lpc_type)
+ enum FFLPCType lpc_type)
{
s->blocksize = blocksize;
s->max_order = max_order;
s->lpc_type = lpc_type;
- if (lpc_type == AV_LPC_TYPE_LEVINSON) {
+ if (lpc_type == FF_LPC_TYPE_LEVINSON) {
s->windowed_samples = av_mallocz((blocksize + max_order + 2) *
sizeof(*s->windowed_samples));
if (!s->windowed_samples)
diff --git a/libavcodec/lpc.h b/libavcodec/lpc.h
index 96b66df909..9db5dbac30 100644
--- a/libavcodec/lpc.h
+++ b/libavcodec/lpc.h
@@ -35,11 +35,22 @@
#define MIN_LPC_ORDER 1
#define MAX_LPC_ORDER 32
+/**
+ * LPC analysis type
+ */
+enum FFLPCType {
+ FF_LPC_TYPE_DEFAULT = -1, ///< use the codec default LPC type
+ FF_LPC_TYPE_NONE = 0, ///< do not use LPC prediction or use all zero coefficients
+ FF_LPC_TYPE_FIXED = 1, ///< fixed LPC coefficients
+ FF_LPC_TYPE_LEVINSON = 2, ///< Levinson-Durbin recursion
+ FF_LPC_TYPE_CHOLESKY = 3, ///< Cholesky factorization
+ FF_LPC_TYPE_NB , ///< Not part of ABI
+};
typedef struct LPCContext {
int blocksize;
int max_order;
- enum AVLPCType lpc_type;
+ enum FFLPCType lpc_type;
double *windowed_samples;
/**
@@ -77,14 +88,14 @@ int ff_lpc_calc_coefs(LPCContext *s,
const int32_t *samples, int blocksize, int min_order,
int max_order, int precision,
int32_t coefs[][MAX_LPC_ORDER], int *shift,
- enum AVLPCType lpc_type, int lpc_passes,
+ enum FFLPCType lpc_type, int lpc_passes,
int omethod, int max_shift, int zero_shift);
/**
* Initialize LPCContext.
*/
int ff_lpc_init(LPCContext *s, int blocksize, int max_order,
- enum AVLPCType lpc_type);
+ enum FFLPCType lpc_type);
void ff_lpc_init_x86(LPCContext *s);
/**
diff --git a/libavcodec/mdec.c b/libavcodec/mdec.c
index ba06641ab0..30cd3ab176 100644
--- a/libavcodec/mdec.c
+++ b/libavcodec/mdec.c
@@ -170,7 +170,7 @@ static int decode_frame(AVCodecContext *avctx,
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return -1;
}
- p->pict_type= FF_I_TYPE;
+ p->pict_type= AV_PICTURE_TYPE_I;
p->key_frame= 1;
av_fast_malloc(&a->bitstream_buffer, &a->bitstream_buffer_size, buf_size + FF_INPUT_BUFFER_PADDING_SIZE);
@@ -218,6 +218,7 @@ static av_cold void mdec_common_init(AVCodecContext *avctx){
a->mb_width = (avctx->coded_width + 15) / 16;
a->mb_height = (avctx->coded_height + 15) / 16;
+ avcodec_get_frame_defaults(&a->picture);
avctx->coded_frame= &a->picture;
a->avctx= avctx;
}
diff --git a/libavcodec/mimic.c b/libavcodec/mimic.c
index 2f3f9d9930..ee625d0dbf 100644
--- a/libavcodec/mimic.c
+++ b/libavcodec/mimic.c
@@ -352,7 +352,7 @@ static int mimic_decode_frame(AVCodecContext *avctx, void *data,
}
ctx->buf_ptrs[ctx->cur_index].reference = 1;
- ctx->buf_ptrs[ctx->cur_index].pict_type = is_pframe ? FF_P_TYPE:FF_I_TYPE;
+ ctx->buf_ptrs[ctx->cur_index].pict_type = is_pframe ? AV_PICTURE_TYPE_P:AV_PICTURE_TYPE_I;
if(ff_thread_get_buffer(avctx, &ctx->buf_ptrs[ctx->cur_index])) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return -1;
diff --git a/libavcodec/mjpegdec.c b/libavcodec/mjpegdec.c
index 2fe2070cc0..afcc1b74a7 100644
--- a/libavcodec/mjpegdec.c
+++ b/libavcodec/mjpegdec.c
@@ -84,6 +84,7 @@ av_cold int ff_mjpeg_decode_init(AVCodecContext *avctx)
if (!s->picture_ptr)
s->picture_ptr = &s->picture;
+ avcodec_get_frame_defaults(&s->picture);
s->avctx = avctx;
dsputil_init(&s->dsp, avctx);
@@ -353,7 +354,7 @@ int ff_mjpeg_decode_sof(MJpegDecodeContext *s)
av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return -1;
}
- s->picture_ptr->pict_type= FF_I_TYPE;
+ s->picture_ptr->pict_type= AV_PICTURE_TYPE_I;
s->picture_ptr->key_frame= 1;
s->got_picture = 1;
@@ -1281,9 +1282,7 @@ static int find_marker(const uint8_t **pbuf_ptr, const uint8_t *buf_end)
const uint8_t *buf_ptr;
unsigned int v, v2;
int val;
-#ifdef DEBUG
int skipped=0;
-#endif
buf_ptr = *pbuf_ptr;
while (buf_ptr < buf_end) {
@@ -1293,9 +1292,7 @@ static int find_marker(const uint8_t **pbuf_ptr, const uint8_t *buf_end)
val = *buf_ptr++;
goto found;
}
-#ifdef DEBUG
skipped++;
-#endif
}
val = -1;
found:
diff --git a/libavcodec/mlib/dsputil_mlib.c b/libavcodec/mlib/dsputil_mlib.c
index 3b2d693d88..1a18a8a223 100644
--- a/libavcodec/mlib/dsputil_mlib.c
+++ b/libavcodec/mlib/dsputil_mlib.c
@@ -421,13 +421,13 @@ static void ff_fdct_mlib(DCTELEM *data)
void dsputil_init_mlib(DSPContext* c, AVCodecContext *avctx)
{
- const int h264_high_depth = avctx->codec_id == CODEC_ID_H264 && avctx->bits_per_raw_sample > 8;
+ const int high_bit_depth = avctx->codec_id == CODEC_ID_H264 && avctx->bits_per_raw_sample > 8;
c->get_pixels = get_pixels_mlib;
c->diff_pixels = diff_pixels_mlib;
c->add_pixels_clamped = add_pixels_clamped_mlib;
- if (!h264_high_depth) {
+ if (!high_bit_depth) {
c->put_pixels_tab[0][0] = put_pixels16_mlib;
c->put_pixels_tab[0][1] = put_pixels16_x2_mlib;
c->put_pixels_tab[0][2] = put_pixels16_y2_mlib;
diff --git a/libavcodec/mlp_parser.c b/libavcodec/mlp_parser.c
index 3b87f432ad..a6bdc0c0a5 100644
--- a/libavcodec/mlp_parser.c
+++ b/libavcodec/mlp_parser.c
@@ -47,24 +47,24 @@ const uint64_t ff_mlp_layout[32] = {
AV_CH_LAYOUT_MONO,
AV_CH_LAYOUT_STEREO,
AV_CH_LAYOUT_2_1,
- AV_CH_LAYOUT_2_2,
+ AV_CH_LAYOUT_QUAD,
AV_CH_LAYOUT_STEREO|AV_CH_LOW_FREQUENCY,
AV_CH_LAYOUT_2_1|AV_CH_LOW_FREQUENCY,
- AV_CH_LAYOUT_2_2|AV_CH_LOW_FREQUENCY,
+ AV_CH_LAYOUT_QUAD|AV_CH_LOW_FREQUENCY,
AV_CH_LAYOUT_SURROUND,
AV_CH_LAYOUT_4POINT0,
- AV_CH_LAYOUT_5POINT0,
+ AV_CH_LAYOUT_5POINT0_BACK,
AV_CH_LAYOUT_SURROUND|AV_CH_LOW_FREQUENCY,
AV_CH_LAYOUT_4POINT0|AV_CH_LOW_FREQUENCY,
- AV_CH_LAYOUT_5POINT1,
+ AV_CH_LAYOUT_5POINT1_BACK,
AV_CH_LAYOUT_4POINT0,
- AV_CH_LAYOUT_5POINT0,
+ AV_CH_LAYOUT_5POINT0_BACK,
AV_CH_LAYOUT_SURROUND|AV_CH_LOW_FREQUENCY,
AV_CH_LAYOUT_4POINT0|AV_CH_LOW_FREQUENCY,
- AV_CH_LAYOUT_5POINT1,
- AV_CH_LAYOUT_2_2|AV_CH_LOW_FREQUENCY,
- AV_CH_LAYOUT_5POINT0,
- AV_CH_LAYOUT_5POINT1,
+ AV_CH_LAYOUT_5POINT1_BACK,
+ AV_CH_LAYOUT_QUAD|AV_CH_LOW_FREQUENCY,
+ AV_CH_LAYOUT_5POINT0_BACK,
+ AV_CH_LAYOUT_5POINT1_BACK,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
};
diff --git a/libavcodec/mlpdec.c b/libavcodec/mlpdec.c
index 7b3bd710bb..50826d0ff5 100644
--- a/libavcodec/mlpdec.c
+++ b/libavcodec/mlpdec.c
@@ -459,13 +459,13 @@ static int read_restart_header(MLPDecodeContext *m, GetBitContext *gbp,
}
if (m->avctx->codec_id == CODEC_ID_MLP && m->needs_reordering) {
- if (m->avctx->channel_layout == (AV_CH_LAYOUT_2_2|AV_CH_LOW_FREQUENCY) ||
- m->avctx->channel_layout == AV_CH_LAYOUT_5POINT0) {
+ if (m->avctx->channel_layout == (AV_CH_LAYOUT_QUAD|AV_CH_LOW_FREQUENCY) ||
+ m->avctx->channel_layout == AV_CH_LAYOUT_5POINT0_BACK) {
int i = s->ch_assign[4];
s->ch_assign[4] = s->ch_assign[3];
s->ch_assign[3] = s->ch_assign[2];
s->ch_assign[2] = i;
- } else if (m->avctx->channel_layout == AV_CH_LAYOUT_5POINT1) {
+ } else if (m->avctx->channel_layout == AV_CH_LAYOUT_5POINT1_BACK) {
FFSWAP(int, s->ch_assign[2], s->ch_assign[4]);
FFSWAP(int, s->ch_assign[3], s->ch_assign[5]);
}
diff --git a/libavcodec/mmvideo.c b/libavcodec/mmvideo.c
index 92420b1d25..707ddc5f7e 100644
--- a/libavcodec/mmvideo.c
+++ b/libavcodec/mmvideo.c
@@ -58,6 +58,7 @@ static av_cold int mm_decode_init(AVCodecContext *avctx)
avctx->pix_fmt = PIX_FMT_PAL8;
+ avcodec_get_frame_defaults(&s->frame);
s->frame.reference = 1;
return 0;
diff --git a/libavcodec/motion_est.c b/libavcodec/motion_est.c
index 62fe2d47aa..c12ebf4c7c 100644
--- a/libavcodec/motion_est.c
+++ b/libavcodec/motion_est.c
@@ -1119,8 +1119,6 @@ void ff_estimate_p_frame_motion(MpegEncContext * s,
// pic->mb_cmp_score[s->mb_stride * mb_y + mb_x] = dmin;
c->mc_mb_var_sum_temp += (vard+128)>>8;
- av_dlog(s, "varc=%4d avg_var=%4d (sum=%4d) vard=%4d mx=%2d my=%2d\n",
- varc, s->avg_mb_var, sum, vard, mx - xx, my - yy);
if(mb_type){
int p_score= FFMIN(vard, varc-500+(s->lambda2>>FF_LAMBDA_SHIFT)*100);
int i_score= varc-500+(s->lambda2>>FF_LAMBDA_SHIFT)*20;
@@ -1893,7 +1891,7 @@ int ff_get_best_fcode(MpegEncContext * s, int16_t (*mv_table)[2], int type)
continue;
for(j=0; j<fcode && j<8; j++){
- if(s->pict_type==FF_B_TYPE || s->current_picture.mc_mb_var[xy] < s->current_picture.mb_var[xy])
+ if(s->pict_type==AV_PICTURE_TYPE_B || s->current_picture.mc_mb_var[xy] < s->current_picture.mb_var[xy])
score[j]-= 170;
}
}
@@ -1925,7 +1923,7 @@ void ff_fix_long_p_mvs(MpegEncContext * s)
MotionEstContext * const c= &s->me;
const int f_code= s->f_code;
int y, range;
- assert(s->pict_type==FF_P_TYPE);
+ assert(s->pict_type==AV_PICTURE_TYPE_P);
range = (((s->out_format == FMT_MPEG1 || s->msmpeg4_version) ? 8 : 16) << f_code);
diff --git a/libavcodec/motion_est_template.c b/libavcodec/motion_est_template.c
index 87cd5be404..461e85932b 100644
--- a/libavcodec/motion_est_template.c
+++ b/libavcodec/motion_est_template.c
@@ -1037,7 +1037,7 @@ static av_always_inline int epzs_motion_search_internal(MpegEncContext * s, int
score_map[0]= dmin;
//FIXME precalc first term below?
- if((s->pict_type == FF_B_TYPE && !(c->flags & FLAG_DIRECT)) || s->flags&CODEC_FLAG_MV0)
+ if((s->pict_type == AV_PICTURE_TYPE_B && !(c->flags & FLAG_DIRECT)) || s->flags&CODEC_FLAG_MV0)
dmin += (mv_penalty[pred_x] + mv_penalty[pred_y])*penalty_factor;
/* first line */
diff --git a/libavcodec/motionpixels.c b/libavcodec/motionpixels.c
index ed0af4d90e..01558ab95b 100644
--- a/libavcodec/motionpixels.c
+++ b/libavcodec/motionpixels.c
@@ -61,6 +61,7 @@ static av_cold int mp_decode_init(AVCodecContext *avctx)
mp->vpt = av_mallocz(avctx->height * sizeof(YuvPixel));
mp->hpt = av_mallocz(avctx->height * avctx->width / 16 * sizeof(YuvPixel));
avctx->pix_fmt = PIX_FMT_RGB555;
+ avcodec_get_frame_defaults(&mp->frame);
return 0;
}
diff --git a/libavcodec/mpeg12.c b/libavcodec/mpeg12.c
index 319a513860..c3394b9c81 100644
--- a/libavcodec/mpeg12.c
+++ b/libavcodec/mpeg12.c
@@ -214,7 +214,7 @@ static int mpeg_decode_mb(MpegEncContext *s,
assert(s->mb_skipped==0);
if (s->mb_skip_run-- != 0) {
- if (s->pict_type == FF_P_TYPE) {
+ if (s->pict_type == AV_PICTURE_TYPE_P) {
s->mb_skipped = 1;
s->current_picture.mb_type[ s->mb_x + s->mb_y*s->mb_stride ]= MB_TYPE_SKIP | MB_TYPE_L0 | MB_TYPE_16x16;
} else {
@@ -240,7 +240,7 @@ static int mpeg_decode_mb(MpegEncContext *s,
switch(s->pict_type) {
default:
- case FF_I_TYPE:
+ case AV_PICTURE_TYPE_I:
if (get_bits1(&s->gb) == 0) {
if (get_bits1(&s->gb) == 0){
av_log(s->avctx, AV_LOG_ERROR, "invalid mb type in I Frame at %d %d\n", s->mb_x, s->mb_y);
@@ -251,7 +251,7 @@ static int mpeg_decode_mb(MpegEncContext *s,
mb_type = MB_TYPE_INTRA;
}
break;
- case FF_P_TYPE:
+ case AV_PICTURE_TYPE_P:
mb_type = get_vlc2(&s->gb, mb_ptype_vlc.table, MB_PTYPE_VLC_BITS, 1);
if (mb_type < 0){
av_log(s->avctx, AV_LOG_ERROR, "invalid mb type in P Frame at %d %d\n", s->mb_x, s->mb_y);
@@ -259,7 +259,7 @@ static int mpeg_decode_mb(MpegEncContext *s,
}
mb_type = ptype2mb_type[ mb_type ];
break;
- case FF_B_TYPE:
+ case AV_PICTURE_TYPE_B:
mb_type = get_vlc2(&s->gb, mb_btype_vlc.table, MB_BTYPE_VLC_BITS, 1);
if (mb_type < 0){
av_log(s->avctx, AV_LOG_ERROR, "invalid mb type in B Frame at %d %d\n", s->mb_x, s->mb_y);
@@ -1198,7 +1198,7 @@ static int mpeg_decode_update_thread_context(AVCodecContext *avctx, const AVCode
if(!ctx->mpeg_enc_ctx_allocated)
memcpy(s + 1, s1 + 1, sizeof(Mpeg1Context) - sizeof(MpegEncContext));
- if(!(s->pict_type == FF_B_TYPE || s->low_delay))
+ if(!(s->pict_type == AV_PICTURE_TYPE_B || s->low_delay))
s->picture_number++;
return 0;
@@ -1408,7 +1408,7 @@ static int mpeg1_decode_picture(AVCodecContext *avctx,
return -1;
vbv_delay= get_bits(&s->gb, 16);
- if (s->pict_type == FF_P_TYPE || s->pict_type == FF_B_TYPE) {
+ if (s->pict_type == AV_PICTURE_TYPE_P || s->pict_type == AV_PICTURE_TYPE_B) {
s->full_pel[0] = get_bits1(&s->gb);
f_code = get_bits(&s->gb, 3);
if (f_code == 0 && avctx->error_recognition >= FF_ER_COMPLIANT)
@@ -1416,7 +1416,7 @@ static int mpeg1_decode_picture(AVCodecContext *avctx,
s->mpeg_f_code[0][0] = f_code;
s->mpeg_f_code[0][1] = f_code;
}
- if (s->pict_type == FF_B_TYPE) {
+ if (s->pict_type == AV_PICTURE_TYPE_B) {
s->full_pel[1] = get_bits1(&s->gb);
f_code = get_bits(&s->gb, 3);
if (f_code == 0 && avctx->error_recognition >= FF_ER_COMPLIANT)
@@ -1425,7 +1425,7 @@ static int mpeg1_decode_picture(AVCodecContext *avctx,
s->mpeg_f_code[1][1] = f_code;
}
s->current_picture.pict_type= s->pict_type;
- s->current_picture.key_frame= s->pict_type == FF_I_TYPE;
+ s->current_picture.key_frame= s->pict_type == AV_PICTURE_TYPE_I;
if(avctx->debug & FF_DEBUG_PICT_INFO)
av_log(avctx, AV_LOG_DEBUG, "vbv_delay %d, ref %d type:%d\n", vbv_delay, ref, s->pict_type);
@@ -1573,13 +1573,13 @@ static void mpeg_decode_picture_coding_extension(Mpeg1Context *s1)
av_log(s->avctx, AV_LOG_ERROR, "Missing picture start code, guessing missing values\n");
if(s->mpeg_f_code[1][0] == 15 && s->mpeg_f_code[1][1]==15){
if(s->mpeg_f_code[0][0] == 15 && s->mpeg_f_code[0][1] == 15)
- s->pict_type= FF_I_TYPE;
+ s->pict_type= AV_PICTURE_TYPE_I;
else
- s->pict_type= FF_P_TYPE;
+ s->pict_type= AV_PICTURE_TYPE_P;
}else
- s->pict_type= FF_B_TYPE;
+ s->pict_type= AV_PICTURE_TYPE_B;
s->current_picture.pict_type= s->pict_type;
- s->current_picture.key_frame= s->pict_type == FF_I_TYPE;
+ s->current_picture.key_frame= s->pict_type == AV_PICTURE_TYPE_I;
}
s->intra_dc_precision = get_bits(&s->gb, 2);
s->picture_structure = get_bits(&s->gb, 2);
@@ -1790,7 +1790,7 @@ static int mpeg_decode_slice(Mpeg1Context *s1, int mb_y,
if(s->avctx->debug&FF_DEBUG_PICT_INFO){
av_log(s->avctx, AV_LOG_DEBUG, "qp:%d fc:%2d%2d%2d%2d %s %s %s %s %s dc:%d pstruct:%d fdct:%d cmv:%d qtype:%d ivlc:%d rff:%d %s\n",
s->qscale, s->mpeg_f_code[0][0],s->mpeg_f_code[0][1],s->mpeg_f_code[1][0],s->mpeg_f_code[1][1],
- s->pict_type == FF_I_TYPE ? "I" : (s->pict_type == FF_P_TYPE ? "P" : (s->pict_type == FF_B_TYPE ? "B" : "S")),
+ s->pict_type == AV_PICTURE_TYPE_I ? "I" : (s->pict_type == AV_PICTURE_TYPE_P ? "P" : (s->pict_type == AV_PICTURE_TYPE_B ? "B" : "S")),
s->progressive_sequence ? "ps" :"", s->progressive_frame ? "pf" : "", s->alternate_scan ? "alt" :"", s->top_field_first ? "top" :"",
s->intra_dc_precision, s->picture_structure, s->frame_pred_frame_dct, s->concealment_motion_vectors,
s->q_scale_type, s->intra_vlc_format, s->repeat_first_field, s->chroma_420_type ? "420" :"");
@@ -1813,7 +1813,7 @@ static int mpeg_decode_slice(Mpeg1Context *s1, int mb_y,
for(i=0; i<2; i++){
for(dir=0; dir<2; dir++){
- if (s->mb_intra || (dir==1 && s->pict_type != FF_B_TYPE)) {
+ if (s->mb_intra || (dir==1 && s->pict_type != AV_PICTURE_TYPE_B)) {
motion_x = motion_y = 0;
}else if (s->mv_type == MV_TYPE_16X16 || (s->mv_type == MV_TYPE_FIELD && field_pic)){
motion_x = s->mv[dir][0][0];
@@ -1853,7 +1853,7 @@ static int mpeg_decode_slice(Mpeg1Context *s1, int mb_y,
if(s->mb_y >= s->mb_height){
int left= get_bits_left(&s->gb);
- int is_d10= s->chroma_format==2 && s->pict_type==FF_I_TYPE && avctx->profile==0 && avctx->level==5
+ int is_d10= s->chroma_format==2 && s->pict_type==AV_PICTURE_TYPE_I && avctx->profile==0 && avctx->level==5
&& s->intra_dc_precision == 2 && s->q_scale_type == 1 && s->alternate_scan == 0
&& s->progressive_frame == 0 /* vbv_delay == 0xBBB || 0xE10*/;
@@ -1896,7 +1896,7 @@ static int mpeg_decode_slice(Mpeg1Context *s1, int mb_y,
}
if(s->mb_skip_run){
int i;
- if(s->pict_type == FF_I_TYPE){
+ if(s->pict_type == AV_PICTURE_TYPE_I){
av_log(s->avctx, AV_LOG_ERROR, "skipped MB in I frame at %d %d\n", s->mb_x, s->mb_y);
return -1;
}
@@ -1909,7 +1909,7 @@ static int mpeg_decode_slice(Mpeg1Context *s1, int mb_y,
s->mv_type = MV_TYPE_16X16;
else
s->mv_type = MV_TYPE_FIELD;
- if (s->pict_type == FF_P_TYPE) {
+ if (s->pict_type == AV_PICTURE_TYPE_P) {
/* if P type, zero motion vector is implied */
s->mv_dir = MV_DIR_FORWARD;
s->mv[0][0][0] = s->mv[0][0][1] = 0;
@@ -2000,7 +2000,7 @@ static int slice_end(AVCodecContext *avctx, AVFrame *pict)
MPV_frame_end(s);
- if (s->pict_type == FF_B_TYPE || s->low_delay) {
+ if (s->pict_type == AV_PICTURE_TYPE_B || s->low_delay) {
*pict= *(AVFrame*)s->current_picture_ptr;
ff_print_debug_info(s, pict);
} else {
@@ -2339,7 +2339,7 @@ static int decode_chunks(AVCodecContext *avctx,
uint32_t start_code = -1;
buf_ptr = ff_find_start_code(buf_ptr,buf_end, &start_code);
if (start_code > 0x1ff){
- if(s2->pict_type != FF_B_TYPE || avctx->skip_frame <= AVDISCARD_DEFAULT){
+ if(s2->pict_type != AV_PICTURE_TYPE_B || avctx->skip_frame <= AVDISCARD_DEFAULT){
if(HAVE_THREADS && avctx->active_thread_type&FF_THREAD_SLICE){
int i;
@@ -2465,23 +2465,23 @@ static int decode_chunks(AVCodecContext *avctx,
if(s2->last_picture_ptr==NULL){
/* Skip B-frames if we do not have reference frames and gop is not closed */
- if(s2->pict_type==FF_B_TYPE){
+ if(s2->pict_type==AV_PICTURE_TYPE_B){
if(!s2->closed_gop)
break;
}
}
- if(s2->pict_type==FF_I_TYPE)
+ if(s2->pict_type==AV_PICTURE_TYPE_I)
s->sync=1;
if(s2->next_picture_ptr==NULL){
/* Skip P-frames if we do not have a reference frame or we have an invalid header. */
- if(s2->pict_type==FF_P_TYPE && !s->sync) break;
+ if(s2->pict_type==AV_PICTURE_TYPE_P && !s->sync) break;
}
#if FF_API_HURRY_UP
/* Skip B-frames if we are in a hurry. */
if(avctx->hurry_up && s2->pict_type==FF_B_TYPE) break;
#endif
- if( (avctx->skip_frame >= AVDISCARD_NONREF && s2->pict_type==FF_B_TYPE)
- ||(avctx->skip_frame >= AVDISCARD_NONKEY && s2->pict_type!=FF_I_TYPE)
+ if( (avctx->skip_frame >= AVDISCARD_NONREF && s2->pict_type==AV_PICTURE_TYPE_B)
+ ||(avctx->skip_frame >= AVDISCARD_NONKEY && s2->pict_type!=AV_PICTURE_TYPE_I)
|| avctx->skip_frame >= AVDISCARD_ALL)
break;
#if FF_API_HURRY_UP
diff --git a/libavcodec/mpeg12enc.c b/libavcodec/mpeg12enc.c
index ce5f29df59..487551171c 100644
--- a/libavcodec/mpeg12enc.c
+++ b/libavcodec/mpeg12enc.c
@@ -354,7 +354,7 @@ void mpeg1_encode_picture_header(MpegEncContext *s, int picture_number)
put_bits(&s->pb, 16, 0xFFFF); /* vbv_delay */
// RAL: Forward f_code also needed for B frames
- if (s->pict_type == FF_P_TYPE || s->pict_type == FF_B_TYPE) {
+ if (s->pict_type == AV_PICTURE_TYPE_P || s->pict_type == AV_PICTURE_TYPE_B) {
put_bits(&s->pb, 1, 0); /* half pel coordinates */
if(s->codec_id == CODEC_ID_MPEG1VIDEO)
put_bits(&s->pb, 3, s->f_code); /* forward_f_code */
@@ -363,7 +363,7 @@ void mpeg1_encode_picture_header(MpegEncContext *s, int picture_number)
}
// RAL: Backward f_code necessary for B frames
- if (s->pict_type == FF_B_TYPE) {
+ if (s->pict_type == AV_PICTURE_TYPE_B) {
put_bits(&s->pb, 1, 0); /* half pel coordinates */
if(s->codec_id == CODEC_ID_MPEG1VIDEO)
put_bits(&s->pb, 3, s->b_code); /* backward_f_code */
@@ -377,13 +377,13 @@ void mpeg1_encode_picture_header(MpegEncContext *s, int picture_number)
if(s->codec_id == CODEC_ID_MPEG2VIDEO){
put_header(s, EXT_START_CODE);
put_bits(&s->pb, 4, 8); //pic ext
- if (s->pict_type == FF_P_TYPE || s->pict_type == FF_B_TYPE) {
+ if (s->pict_type == AV_PICTURE_TYPE_P || s->pict_type == AV_PICTURE_TYPE_B) {
put_bits(&s->pb, 4, s->f_code);
put_bits(&s->pb, 4, s->f_code);
}else{
put_bits(&s->pb, 8, 255);
}
- if (s->pict_type == FF_B_TYPE) {
+ if (s->pict_type == AV_PICTURE_TYPE_B) {
put_bits(&s->pb, 4, s->b_code);
put_bits(&s->pb, 4, s->b_code);
}else{
@@ -456,15 +456,15 @@ static av_always_inline void mpeg1_encode_mb_internal(MpegEncContext *s,
if (cbp == 0 && !first_mb && s->mv_type == MV_TYPE_16X16 &&
(mb_x != s->mb_width - 1 || (mb_y != s->mb_height - 1 && s->codec_id == CODEC_ID_MPEG1VIDEO)) &&
- ((s->pict_type == FF_P_TYPE && (motion_x | motion_y) == 0) ||
- (s->pict_type == FF_B_TYPE && s->mv_dir == s->last_mv_dir && (((s->mv_dir & MV_DIR_FORWARD) ? ((s->mv[0][0][0] - s->last_mv[0][0][0])|(s->mv[0][0][1] - s->last_mv[0][0][1])) : 0) |
+ ((s->pict_type == AV_PICTURE_TYPE_P && (motion_x | motion_y) == 0) ||
+ (s->pict_type == AV_PICTURE_TYPE_B && s->mv_dir == s->last_mv_dir && (((s->mv_dir & MV_DIR_FORWARD) ? ((s->mv[0][0][0] - s->last_mv[0][0][0])|(s->mv[0][0][1] - s->last_mv[0][0][1])) : 0) |
((s->mv_dir & MV_DIR_BACKWARD) ? ((s->mv[1][0][0] - s->last_mv[1][0][0])|(s->mv[1][0][1] - s->last_mv[1][0][1])) : 0)) == 0))) {
s->mb_skip_run++;
s->qscale -= s->dquant;
s->skip_count++;
s->misc_bits++;
s->last_bits++;
- if(s->pict_type == FF_P_TYPE){
+ if(s->pict_type == AV_PICTURE_TYPE_P){
s->last_mv[0][1][0]= s->last_mv[0][0][0]=
s->last_mv[0][1][1]= s->last_mv[0][0][1]= 0;
}
@@ -476,7 +476,7 @@ static av_always_inline void mpeg1_encode_mb_internal(MpegEncContext *s,
encode_mb_skip_run(s, s->mb_skip_run);
}
- if (s->pict_type == FF_I_TYPE) {
+ if (s->pict_type == AV_PICTURE_TYPE_I) {
if(s->dquant && cbp){
put_mb_modes(s, 2, 1, 0, 0); /* macroblock_type : macroblock_quant = 1 */
put_qscale(s);
@@ -497,7 +497,7 @@ static av_always_inline void mpeg1_encode_mb_internal(MpegEncContext *s,
s->misc_bits+= get_bits_diff(s);
s->i_count++;
memset(s->last_mv, 0, sizeof(s->last_mv));
- } else if (s->pict_type == FF_P_TYPE) {
+ } else if (s->pict_type == AV_PICTURE_TYPE_P) {
if(s->mv_type == MV_TYPE_16X16){
if (cbp != 0) {
if ((motion_x|motion_y) == 0) {
diff --git a/libavcodec/mpeg4video.c b/libavcodec/mpeg4video.c
index dd4dd8ad95..f4e9a8a1f8 100644
--- a/libavcodec/mpeg4video.c
+++ b/libavcodec/mpeg4video.c
@@ -28,12 +28,12 @@ uint8_t ff_mpeg4_static_rl_table_store[3][2][2*MAX_RUN + MAX_LEVEL + 3];
int ff_mpeg4_get_video_packet_prefix_length(MpegEncContext *s){
switch(s->pict_type){
- case FF_I_TYPE:
+ case AV_PICTURE_TYPE_I:
return 16;
- case FF_P_TYPE:
- case FF_S_TYPE:
+ case AV_PICTURE_TYPE_P:
+ case AV_PICTURE_TYPE_S:
return s->f_code+15;
- case FF_B_TYPE:
+ case AV_PICTURE_TYPE_B:
return FFMAX3(s->f_code, s->b_code, 2) + 15;
default:
return -1;
diff --git a/libavcodec/mpeg4videodec.c b/libavcodec/mpeg4videodec.c
index b293bada32..a9d67f220c 100644
--- a/libavcodec/mpeg4videodec.c
+++ b/libavcodec/mpeg4videodec.c
@@ -118,7 +118,7 @@ static inline int mpeg4_is_resync(MpegEncContext *s){
}
while(v<=0xFF){
- if(s->pict_type==FF_B_TYPE || (v>>(8-s->pict_type)!=1) || s->partitioned_frame)
+ if(s->pict_type==AV_PICTURE_TYPE_B || (v>>(8-s->pict_type)!=1) || s->partitioned_frame)
break;
skip_bits(&s->gb, 8+s->pict_type);
bits_count+= 8+s->pict_type;
@@ -373,7 +373,7 @@ int mpeg4_decode_video_packet_header(MpegEncContext *s)
av_log(s->avctx, AV_LOG_ERROR, "illegal mb_num in video packet (%d %d) \n", mb_num, s->mb_num);
return -1;
}
- if(s->pict_type == FF_B_TYPE){
+ if(s->pict_type == AV_PICTURE_TYPE_B){
int mb_x = 0, mb_y = 0;
while(s->next_picture.mbskip_table[ s->mb_index2xy[ mb_num ] ]) {
@@ -413,20 +413,20 @@ int mpeg4_decode_video_packet_header(MpegEncContext *s)
if(s->shape != BIN_ONLY_SHAPE){
skip_bits(&s->gb, 3); /* intra dc vlc threshold */
//FIXME don't just ignore everything
- if(s->pict_type == FF_S_TYPE && s->vol_sprite_usage==GMC_SPRITE){
+ if(s->pict_type == AV_PICTURE_TYPE_S && s->vol_sprite_usage==GMC_SPRITE){
mpeg4_decode_sprite_trajectory(s, &s->gb);
av_log(s->avctx, AV_LOG_ERROR, "untested\n");
}
//FIXME reduced res stuff here
- if (s->pict_type != FF_I_TYPE) {
+ if (s->pict_type != AV_PICTURE_TYPE_I) {
int f_code = get_bits(&s->gb, 3); /* fcode_for */
if(f_code==0){
av_log(s->avctx, AV_LOG_ERROR, "Error, video packet header damaged (f_code=0)\n");
}
}
- if (s->pict_type == FF_B_TYPE) {
+ if (s->pict_type == AV_PICTURE_TYPE_B) {
int b_code = get_bits(&s->gb, 3);
if(b_code==0){
av_log(s->avctx, AV_LOG_ERROR, "Error, video packet header damaged (b_code=0)\n");
@@ -555,7 +555,7 @@ static int mpeg4_decode_partition_a(MpegEncContext *s){
if(s->mb_x == s->resync_mb_x && s->mb_y == s->resync_mb_y+1)
s->first_slice_line=0;
- if(s->pict_type==FF_I_TYPE){
+ if(s->pict_type==AV_PICTURE_TYPE_I){
int i;
do{
@@ -604,7 +604,7 @@ try_again:
skip_bits1(&s->gb);
if(bits&0x10000){
/* skip mb */
- if(s->pict_type==FF_S_TYPE && s->vol_sprite_usage==GMC_SPRITE){
+ if(s->pict_type==AV_PICTURE_TYPE_S && s->vol_sprite_usage==GMC_SPRITE){
s->current_picture.mb_type[xy]= MB_TYPE_SKIP | MB_TYPE_16x16 | MB_TYPE_GMC | MB_TYPE_L0;
mx= get_amv(s, 0);
my= get_amv(s, 1);
@@ -645,7 +645,7 @@ try_again:
if(s->mbintra_table[xy])
ff_clean_intra_table_entries(s);
- if(s->pict_type==FF_S_TYPE && s->vol_sprite_usage==GMC_SPRITE && (cbpc & 16) == 0)
+ if(s->pict_type==AV_PICTURE_TYPE_S && s->vol_sprite_usage==GMC_SPRITE && (cbpc & 16) == 0)
s->mcsel= get_bits1(&s->gb);
else s->mcsel= 0;
@@ -717,7 +717,7 @@ static int mpeg4_decode_partition_b(MpegEncContext *s, int mb_count){
if(s->mb_x == s->resync_mb_x && s->mb_y == s->resync_mb_y+1)
s->first_slice_line=0;
- if(s->pict_type==FF_I_TYPE){
+ if(s->pict_type==AV_PICTURE_TYPE_I){
int ac_pred= get_bits1(&s->gb);
int cbpy = get_vlc2(&s->gb, ff_h263_cbpy_vlc.table, CBPY_VLC_BITS, 1);
if(cbpy<0){
@@ -791,8 +791,8 @@ static int mpeg4_decode_partition_b(MpegEncContext *s, int mb_count){
int ff_mpeg4_decode_partitions(MpegEncContext *s)
{
int mb_num;
- const int part_a_error= s->pict_type==FF_I_TYPE ? (DC_ERROR|MV_ERROR) : MV_ERROR;
- const int part_a_end = s->pict_type==FF_I_TYPE ? (DC_END |MV_END) : MV_END;
+ const int part_a_error= s->pict_type==AV_PICTURE_TYPE_I ? (DC_ERROR|MV_ERROR) : MV_ERROR;
+ const int part_a_end = s->pict_type==AV_PICTURE_TYPE_I ? (DC_END |MV_END) : MV_END;
mb_num= mpeg4_decode_partition_a(s);
if(mb_num<0){
@@ -808,7 +808,7 @@ int ff_mpeg4_decode_partitions(MpegEncContext *s)
s->mb_num_left= mb_num;
- if(s->pict_type==FF_I_TYPE){
+ if(s->pict_type==AV_PICTURE_TYPE_I){
while(show_bits(&s->gb, 9) == 1)
skip_bits(&s->gb, 9);
if(get_bits_long(&s->gb, 19)!=DC_MARKER){
@@ -826,11 +826,11 @@ int ff_mpeg4_decode_partitions(MpegEncContext *s)
ff_er_add_slice(s, s->resync_mb_x, s->resync_mb_y, s->mb_x-1, s->mb_y, part_a_end);
if( mpeg4_decode_partition_b(s, mb_num) < 0){
- if(s->pict_type==FF_P_TYPE)
+ if(s->pict_type==AV_PICTURE_TYPE_P)
ff_er_add_slice(s, s->resync_mb_x, s->resync_mb_y, s->mb_x, s->mb_y, DC_ERROR);
return -1;
}else{
- if(s->pict_type==FF_P_TYPE)
+ if(s->pict_type==AV_PICTURE_TYPE_P)
ff_er_add_slice(s, s->resync_mb_x, s->resync_mb_y, s->mb_x-1, s->mb_y, DC_END);
}
@@ -1101,7 +1101,7 @@ static int mpeg4_decode_partitioned_mb(MpegEncContext *s, DCTELEM block[6][64])
ff_set_qscale(s, s->current_picture.qscale_table[xy] );
}
- if (s->pict_type == FF_P_TYPE || s->pict_type==FF_S_TYPE) {
+ if (s->pict_type == AV_PICTURE_TYPE_P || s->pict_type==AV_PICTURE_TYPE_S) {
int i;
for(i=0; i<4; i++){
s->mv[0][i][0] = s->current_picture.motion_val[0][ s->block_index[i] ][0];
@@ -1115,7 +1115,7 @@ static int mpeg4_decode_partitioned_mb(MpegEncContext *s, DCTELEM block[6][64])
s->block_last_index[i] = -1;
s->mv_dir = MV_DIR_FORWARD;
s->mv_type = MV_TYPE_16X16;
- if(s->pict_type==FF_S_TYPE && s->vol_sprite_usage==GMC_SPRITE){
+ if(s->pict_type==AV_PICTURE_TYPE_S && s->vol_sprite_usage==GMC_SPRITE){
s->mcsel=1;
s->mb_skipped = 0;
}else{
@@ -1179,7 +1179,7 @@ static int mpeg4_decode_mb(MpegEncContext *s,
assert(s->h263_pred);
- if (s->pict_type == FF_P_TYPE || s->pict_type==FF_S_TYPE) {
+ if (s->pict_type == AV_PICTURE_TYPE_P || s->pict_type==AV_PICTURE_TYPE_S) {
do{
if (get_bits1(&s->gb)) {
/* skip mb */
@@ -1188,7 +1188,7 @@ static int mpeg4_decode_mb(MpegEncContext *s,
s->block_last_index[i] = -1;
s->mv_dir = MV_DIR_FORWARD;
s->mv_type = MV_TYPE_16X16;
- if(s->pict_type==FF_S_TYPE && s->vol_sprite_usage==GMC_SPRITE){
+ if(s->pict_type==AV_PICTURE_TYPE_S && s->vol_sprite_usage==GMC_SPRITE){
s->current_picture.mb_type[xy]= MB_TYPE_SKIP | MB_TYPE_GMC | MB_TYPE_16x16 | MB_TYPE_L0;
s->mcsel=1;
s->mv[0][0][0]= get_amv(s, 0);
@@ -1216,7 +1216,7 @@ static int mpeg4_decode_mb(MpegEncContext *s,
s->mb_intra = ((cbpc & 4) != 0);
if (s->mb_intra) goto intra;
- if(s->pict_type==FF_S_TYPE && s->vol_sprite_usage==GMC_SPRITE && (cbpc & 16) == 0)
+ if(s->pict_type==AV_PICTURE_TYPE_S && s->vol_sprite_usage==GMC_SPRITE && (cbpc & 16) == 0)
s->mcsel= get_bits1(&s->gb);
else s->mcsel= 0;
cbpy = get_vlc2(&s->gb, ff_h263_cbpy_vlc.table, CBPY_VLC_BITS, 1) ^ 0x0F;
@@ -1295,7 +1295,7 @@ static int mpeg4_decode_mb(MpegEncContext *s,
mot_val[1] = my;
}
}
- } else if(s->pict_type==FF_B_TYPE) {
+ } else if(s->pict_type==AV_PICTURE_TYPE_B) {
int modb1; // first bit of modb
int modb2; // second bit of modb
int mb_type;
@@ -1492,12 +1492,12 @@ end:
if(mpeg4_is_resync(s)){
const int delta= s->mb_x + 1 == s->mb_width ? 2 : 1;
- if(s->pict_type==FF_B_TYPE){
+ if(s->pict_type==AV_PICTURE_TYPE_B){
ff_thread_await_progress((AVFrame*)s->next_picture_ptr,
(s->mb_x + delta >= s->mb_width) ? FFMIN(s->mb_y+1, s->mb_height-1) : s->mb_y, 0);
}
- if(s->pict_type==FF_B_TYPE && s->next_picture.mbskip_table[xy + delta])
+ if(s->pict_type==AV_PICTURE_TYPE_B && s->next_picture.mbskip_table[xy + delta])
return SLICE_OK;
return SLICE_END;
}
@@ -1897,13 +1897,13 @@ static int decode_user_data(MpegEncContext *s, GetBitContext *gb){
static int decode_vop_header(MpegEncContext *s, GetBitContext *gb){
int time_incr, time_increment;
- s->pict_type = get_bits(gb, 2) + FF_I_TYPE; /* pict type: I = 0 , P = 1 */
- if(s->pict_type==FF_B_TYPE && s->low_delay && s->vol_control_parameters==0 && !(s->flags & CODEC_FLAG_LOW_DELAY)){
+ s->pict_type = get_bits(gb, 2) + AV_PICTURE_TYPE_I; /* pict type: I = 0 , P = 1 */
+ if(s->pict_type==AV_PICTURE_TYPE_B && s->low_delay && s->vol_control_parameters==0 && !(s->flags & CODEC_FLAG_LOW_DELAY)){
av_log(s->avctx, AV_LOG_ERROR, "low_delay flag incorrectly, clearing it\n");
s->low_delay=0;
}
- s->partitioned_frame= s->data_partitioning && s->pict_type!=FF_B_TYPE;
+ s->partitioned_frame= s->data_partitioning && s->pict_type!=AV_PICTURE_TYPE_B;
if(s->partitioned_frame)
s->decode_mb= mpeg4_decode_partitioned_mb;
else
@@ -1919,8 +1919,8 @@ static int decode_vop_header(MpegEncContext *s, GetBitContext *gb){
av_log(s->avctx, AV_LOG_ERROR, "hmm, seems the headers are not complete, trying to guess time_increment_bits\n");
for(s->time_increment_bits=1 ;s->time_increment_bits<16; s->time_increment_bits++){
- if ( s->pict_type == FF_P_TYPE
- || (s->pict_type == FF_S_TYPE && s->vol_sprite_usage==GMC_SPRITE)) {
+ if ( s->pict_type == AV_PICTURE_TYPE_P
+ || (s->pict_type == AV_PICTURE_TYPE_S && s->vol_sprite_usage==GMC_SPRITE)) {
if((show_bits(gb, s->time_increment_bits+6)&0x37) == 0x30) break;
}else
if((show_bits(gb, s->time_increment_bits+5)&0x1F) == 0x18) break;
@@ -1932,7 +1932,7 @@ static int decode_vop_header(MpegEncContext *s, GetBitContext *gb){
if(IS_3IV1) time_increment= get_bits1(gb); //FIXME investigate further
else time_increment= get_bits(gb, s->time_increment_bits);
- if(s->pict_type!=FF_B_TYPE){
+ if(s->pict_type!=AV_PICTURE_TYPE_B){
s->last_time_base= s->time_base;
s->time_base+= time_incr;
s->time= s->time_base*s->avctx->time_base.den + time_increment;
@@ -1982,8 +1982,8 @@ static int decode_vop_header(MpegEncContext *s, GetBitContext *gb){
av_log(s->avctx, AV_LOG_ERROR, "vop not coded\n");
return FRAME_SKIPPED;
}
- if (s->shape != BIN_ONLY_SHAPE && ( s->pict_type == FF_P_TYPE
- || (s->pict_type == FF_S_TYPE && s->vol_sprite_usage==GMC_SPRITE))) {
+ if (s->shape != BIN_ONLY_SHAPE && ( s->pict_type == AV_PICTURE_TYPE_P
+ || (s->pict_type == AV_PICTURE_TYPE_S && s->vol_sprite_usage==GMC_SPRITE))) {
/* rounding type for motion estimation */
s->no_rounding = get_bits1(gb);
} else {
@@ -1992,7 +1992,7 @@ static int decode_vop_header(MpegEncContext *s, GetBitContext *gb){
//FIXME reduced res stuff
if (s->shape != RECT_SHAPE) {
- if (s->vol_sprite_usage != 1 || s->pict_type != FF_I_TYPE) {
+ if (s->vol_sprite_usage != 1 || s->pict_type != AV_PICTURE_TYPE_I) {
int width, height, hor_spat_ref, ver_spat_ref;
width = get_bits(gb, 13);
@@ -2013,9 +2013,9 @@ static int decode_vop_header(MpegEncContext *s, GetBitContext *gb){
if (s->shape != BIN_ONLY_SHAPE) {
skip_bits_long(gb, s->cplx_estimation_trash_i);
- if(s->pict_type != FF_I_TYPE)
+ if(s->pict_type != AV_PICTURE_TYPE_I)
skip_bits_long(gb, s->cplx_estimation_trash_p);
- if(s->pict_type == FF_B_TYPE)
+ if(s->pict_type == AV_PICTURE_TYPE_B)
skip_bits_long(gb, s->cplx_estimation_trash_b);
s->intra_dc_threshold= mpeg4_dc_threshold[ get_bits(gb, 3) ];
@@ -2038,7 +2038,7 @@ static int decode_vop_header(MpegEncContext *s, GetBitContext *gb){
ff_init_scantable(s->dsp.idct_permutation, &s->intra_v_scantable, ff_alternate_vertical_scan);
}
- if(s->pict_type == FF_S_TYPE && (s->vol_sprite_usage==STATIC_SPRITE || s->vol_sprite_usage==GMC_SPRITE)){
+ if(s->pict_type == AV_PICTURE_TYPE_S && (s->vol_sprite_usage==STATIC_SPRITE || s->vol_sprite_usage==GMC_SPRITE)){
mpeg4_decode_sprite_trajectory(s, gb);
if(s->sprite_brightness_change) av_log(s->avctx, AV_LOG_ERROR, "sprite_brightness_change not supported\n");
if(s->vol_sprite_usage==STATIC_SPRITE) av_log(s->avctx, AV_LOG_ERROR, "static sprite not supported\n");
@@ -2051,7 +2051,7 @@ static int decode_vop_header(MpegEncContext *s, GetBitContext *gb){
return -1; // makes no sense to continue, as there is nothing left from the image then
}
- if (s->pict_type != FF_I_TYPE) {
+ if (s->pict_type != AV_PICTURE_TYPE_I) {
s->f_code = get_bits(gb, 3); /* fcode_for */
if(s->f_code==0){
av_log(s->avctx, AV_LOG_ERROR, "Error, header damaged or not MPEG4 header (f_code=0)\n");
@@ -2060,7 +2060,7 @@ static int decode_vop_header(MpegEncContext *s, GetBitContext *gb){
}else
s->f_code=1;
- if (s->pict_type == FF_B_TYPE) {
+ if (s->pict_type == AV_PICTURE_TYPE_B) {
s->b_code = get_bits(gb, 3);
}else
s->b_code=1;
@@ -2068,14 +2068,14 @@ static int decode_vop_header(MpegEncContext *s, GetBitContext *gb){
if(s->avctx->debug&FF_DEBUG_PICT_INFO){
av_log(s->avctx, AV_LOG_DEBUG, "qp:%d fc:%d,%d %s size:%d pro:%d alt:%d top:%d %spel part:%d resync:%d w:%d a:%d rnd:%d vot:%d%s dc:%d ce:%d/%d/%d\n",
s->qscale, s->f_code, s->b_code,
- s->pict_type == FF_I_TYPE ? "I" : (s->pict_type == FF_P_TYPE ? "P" : (s->pict_type == FF_B_TYPE ? "B" : "S")),
+ s->pict_type == AV_PICTURE_TYPE_I ? "I" : (s->pict_type == AV_PICTURE_TYPE_P ? "P" : (s->pict_type == AV_PICTURE_TYPE_B ? "B" : "S")),
gb->size_in_bits,s->progressive_sequence, s->alternate_scan, s->top_field_first,
s->quarter_sample ? "q" : "h", s->data_partitioning, s->resync_marker, s->num_sprite_warping_points,
s->sprite_warping_accuracy, 1-s->no_rounding, s->vo_type, s->vol_control_parameters ? " VOLC" : " ", s->intra_dc_threshold, s->cplx_estimation_trash_i, s->cplx_estimation_trash_p, s->cplx_estimation_trash_b);
}
if(!s->scalability){
- if (s->shape!=RECT_SHAPE && s->pict_type!=FF_I_TYPE) {
+ if (s->shape!=RECT_SHAPE && s->pict_type!=AV_PICTURE_TYPE_I) {
skip_bits1(gb); // vop shape coding type
}
}else{
diff --git a/libavcodec/mpeg4videoenc.c b/libavcodec/mpeg4videoenc.c
index f6a18b77cc..bdff535a99 100644
--- a/libavcodec/mpeg4videoenc.c
+++ b/libavcodec/mpeg4videoenc.c
@@ -205,7 +205,7 @@ void ff_clean_mpeg4_qscales(MpegEncContext *s){
ff_clean_h263_qscales(s);
- if(s->pict_type== FF_B_TYPE){
+ if(s->pict_type== AV_PICTURE_TYPE_B){
int odd=0;
/* ok, come on, this isn't funny anymore, there's more code for handling this mpeg4 mess than for the actual adaptive quantization */
@@ -497,14 +497,14 @@ void mpeg4_encode_mb(MpegEncContext * s,
{
int cbpc, cbpy, pred_x, pred_y;
PutBitContext * const pb2 = s->data_partitioning ? &s->pb2 : &s->pb;
- PutBitContext * const tex_pb = s->data_partitioning && s->pict_type!=FF_B_TYPE ? &s->tex_pb : &s->pb;
- PutBitContext * const dc_pb = s->data_partitioning && s->pict_type!=FF_I_TYPE ? &s->pb2 : &s->pb;
+ PutBitContext * const tex_pb = s->data_partitioning && s->pict_type!=AV_PICTURE_TYPE_B ? &s->tex_pb : &s->pb;
+ PutBitContext * const dc_pb = s->data_partitioning && s->pict_type!=AV_PICTURE_TYPE_I ? &s->pb2 : &s->pb;
const int interleaved_stats= (s->flags&CODEC_FLAG_PASS1) && !s->data_partitioning ? 1 : 0;
if (!s->mb_intra) {
int i, cbp;
- if(s->pict_type==FF_B_TYPE){
+ if(s->pict_type==AV_PICTURE_TYPE_B){
static const int mb_type_table[8]= {-1, 3, 2, 1,-1,-1,-1, 0}; /* convert from mv_dir to type */
int mb_type= mb_type_table[s->mv_dir];
@@ -637,7 +637,7 @@ void mpeg4_encode_mb(MpegEncContext * s,
s->p_tex_bits+= get_bits_diff(s);
}
- }else{ /* s->pict_type==FF_B_TYPE */
+ }else{ /* s->pict_type==AV_PICTURE_TYPE_B */
cbp= get_p_cbp(s, block, motion_x, motion_y);
if ((cbp | motion_x | motion_y | s->dquant) == 0 && s->mv_type==MV_TYPE_16X16) {
@@ -660,7 +660,7 @@ void mpeg4_encode_mb(MpegEncContext * s,
int diff;
Picture *pic= s->reordered_input_picture[i+1];
- if(pic==NULL || pic->pict_type!=FF_B_TYPE) break;
+ if(pic==NULL || pic->pict_type!=AV_PICTURE_TYPE_B) break;
b_pic= pic->data[0] + offset;
if(pic->type != FF_BUFFER_TYPE_SHARED)
@@ -824,7 +824,7 @@ void mpeg4_encode_mb(MpegEncContext * s,
}
cbpc = cbp & 3;
- if (s->pict_type == FF_I_TYPE) {
+ if (s->pict_type == AV_PICTURE_TYPE_I) {
if(s->dquant) cbpc+=4;
put_bits(&s->pb,
ff_h263_intra_MCBPC_bits[cbpc],
@@ -876,11 +876,11 @@ void ff_mpeg4_stuffing(PutBitContext * pbc)
/* must be called before writing the header */
void ff_set_mpeg4_time(MpegEncContext * s){
- if(s->pict_type==FF_B_TYPE){
+ if(s->pict_type==AV_PICTURE_TYPE_B){
ff_mpeg4_init_direct_mv(s);
}else{
s->last_time_base= s->time_base;
- s->time_base= s->time/s->avctx->time_base.den;
+ s->time_base= FFUDIV(s->time, s->avctx->time_base.den);
}
}
@@ -895,11 +895,12 @@ static void mpeg4_encode_gop_header(MpegEncContext * s){
if(s->reordered_input_picture[1])
time= FFMIN(time, s->reordered_input_picture[1]->pts);
time= time*s->avctx->time_base.num;
+ s->last_time_base= FFUDIV(time, s->avctx->time_base.den);
- seconds= time/s->avctx->time_base.den;
- minutes= seconds/60; seconds %= 60;
- hours= minutes/60; minutes %= 60;
- hours%=24;
+ seconds= FFUDIV(time, s->avctx->time_base.den);
+ minutes= FFUDIV(seconds, 60); FFUMOD(seconds, 60);
+ hours = FFUDIV(minutes, 60); FFUMOD(minutes, 60);
+ hours = FFUMOD(hours , 24);
put_bits(&s->pb, 5, hours);
put_bits(&s->pb, 6, minutes);
@@ -909,8 +910,6 @@ static void mpeg4_encode_gop_header(MpegEncContext * s){
put_bits(&s->pb, 1, !!(s->flags&CODEC_FLAG_CLOSED_GOP));
put_bits(&s->pb, 1, 0); //broken link == NO
- s->last_time_base= time / s->avctx->time_base.den;
-
ff_mpeg4_stuffing(&s->pb);
}
@@ -1066,7 +1065,7 @@ void mpeg4_encode_picture_header(MpegEncContext * s, int picture_number)
int time_incr;
int time_div, time_mod;
- if(s->pict_type==FF_I_TYPE){
+ if(s->pict_type==AV_PICTURE_TYPE_I){
if(!(s->flags&CODEC_FLAG_GLOBAL_HEADER)){
if(s->strict_std_compliance < FF_COMPLIANCE_VERY_STRICT) //HACK, the reference sw is buggy
mpeg4_encode_visual_object_header(s);
@@ -1077,15 +1076,14 @@ void mpeg4_encode_picture_header(MpegEncContext * s, int picture_number)
mpeg4_encode_gop_header(s);
}
- s->partitioned_frame= s->data_partitioning && s->pict_type!=FF_B_TYPE;
+ s->partitioned_frame= s->data_partitioning && s->pict_type!=AV_PICTURE_TYPE_B;
put_bits(&s->pb, 16, 0); /* vop header */
put_bits(&s->pb, 16, VOP_STARTCODE); /* vop header */
put_bits(&s->pb, 2, s->pict_type - 1); /* pict type: I = 0 , P = 1 */
- assert(s->time>=0);
- time_div= s->time/s->avctx->time_base.den;
- time_mod= s->time%s->avctx->time_base.den;
+ time_div= FFUDIV(s->time, s->avctx->time_base.den);
+ time_mod= FFUMOD(s->time, s->avctx->time_base.den);
time_incr= time_div - s->last_time_base;
assert(time_incr >= 0);
while(time_incr--)
@@ -1097,8 +1095,8 @@ void mpeg4_encode_picture_header(MpegEncContext * s, int picture_number)
put_bits(&s->pb, s->time_increment_bits, time_mod); /* time increment */
put_bits(&s->pb, 1, 1); /* marker */
put_bits(&s->pb, 1, 1); /* vop coded */
- if ( s->pict_type == FF_P_TYPE
- || (s->pict_type == FF_S_TYPE && s->vol_sprite_usage==GMC_SPRITE)) {
+ if ( s->pict_type == AV_PICTURE_TYPE_P
+ || (s->pict_type == AV_PICTURE_TYPE_S && s->vol_sprite_usage==GMC_SPRITE)) {
put_bits(&s->pb, 1, s->no_rounding); /* rounding type */
}
put_bits(&s->pb, 3, 0); /* intra dc VLC threshold */
@@ -1110,9 +1108,9 @@ void mpeg4_encode_picture_header(MpegEncContext * s, int picture_number)
put_bits(&s->pb, 5, s->qscale);
- if (s->pict_type != FF_I_TYPE)
+ if (s->pict_type != AV_PICTURE_TYPE_I)
put_bits(&s->pb, 3, s->f_code); /* fcode_for */
- if (s->pict_type == FF_B_TYPE)
+ if (s->pict_type == AV_PICTURE_TYPE_B)
put_bits(&s->pb, 3, s->b_code); /* fcode_back */
}
@@ -1317,7 +1315,7 @@ void ff_mpeg4_merge_partitions(MpegEncContext *s)
const int tex_pb_len= put_bits_count(&s->tex_pb);
const int bits= put_bits_count(&s->pb);
- if(s->pict_type==FF_I_TYPE){
+ if(s->pict_type==AV_PICTURE_TYPE_I){
put_bits(&s->pb, 19, DC_MARKER);
s->misc_bits+=19 + pb2_len + bits - s->last_bits;
s->i_tex_bits+= tex_pb_len;
diff --git a/libavcodec/mpegaudio.h b/libavcodec/mpegaudio.h
index 4d78566463..30ef349399 100644
--- a/libavcodec/mpegaudio.h
+++ b/libavcodec/mpegaudio.h
@@ -58,12 +58,9 @@
#define MP3_MASK 0xFFFE0CCF
-#if CONFIG_MPEGAUDIO_HP
+#ifndef FRAC_BITS
#define FRAC_BITS 23 /* fractional bits for sb_samples and dct */
#define WFRAC_BITS 16 /* fractional bits for window */
-#else
-#define FRAC_BITS 15 /* fractional bits for sb_samples and dct */
-#define WFRAC_BITS 14 /* fractional bits for window */
#endif
#define FRAC_ONE (1 << FRAC_BITS)
@@ -150,9 +147,6 @@ typedef struct MPADecodeContext {
DECLARE_ALIGNED(16, INTFLOAT, sb_samples)[MPA_MAX_CHANNELS][36][SBLIMIT];
INTFLOAT mdct_buf[MPA_MAX_CHANNELS][SBLIMIT * 18]; /* previous samples, for layer 3 MDCT */
GranuleDef granules[2][2]; /* Used in Layer 3 */
-#ifdef DEBUG
- int frame_count;
-#endif
int adu_mode; ///< 0 for standard mp3, 1 for adu formatted mp3
int dither_state;
int error_recognition;
diff --git a/libavcodec/mpegaudio_tablegen.h b/libavcodec/mpegaudio_tablegen.h
index 6b5ff2280e..01c4174a60 100644
--- a/libavcodec/mpegaudio_tablegen.h
+++ b/libavcodec/mpegaudio_tablegen.h
@@ -38,6 +38,8 @@ static uint32_t expval_table[512][16];
static float exp_table_float[512];
static float expval_table_float[512][16];
+#define FRAC_BITS 23
+
static void mpegaudio_tableinit(void)
{
int i, value, exponent;
diff --git a/libavcodec/mpegaudiodec.c b/libavcodec/mpegaudiodec.c
index 5d420dca3e..b1c9ef93a0 100644
--- a/libavcodec/mpegaudiodec.c
+++ b/libavcodec/mpegaudiodec.c
@@ -28,18 +28,16 @@
#include "avcodec.h"
#include "get_bits.h"
#include "dsputil.h"
+#include "mathops.h"
/*
* TODO:
- * - in low precision mode, use more 16 bit multiplies in synth filter
* - test lsf / mpeg25 extensively.
*/
#include "mpegaudio.h"
#include "mpegaudiodecheader.h"
-#include "mathops.h"
-
#if CONFIG_FLOAT
# define SHR(a,b) ((a)*(1.0f/(1<<(b))))
# define compute_antialias compute_antialias_float
@@ -248,14 +246,6 @@ static inline int l3_unscale(int value, int exponent)
static int dev_4_3_coefs[DEV_ORDER];
-#if 0 /* unused */
-static int pow_mult3[3] = {
- POW_FIX(1.0),
- POW_FIX(1.25992104989487316476),
- POW_FIX(1.58740105196819947474),
-};
-#endif
-
static av_cold void int_pow_init(void)
{
int i, a;
@@ -267,53 +257,6 @@ static av_cold void int_pow_init(void)
}
}
-#if 0 /* unused, remove? */
-/* return the mantissa and the binary exponent */
-static int int_pow(int i, int *exp_ptr)
-{
- int e, er, eq, j;
- int a, a1;
-
- /* renormalize */
- a = i;
- e = POW_FRAC_BITS;
- while (a < (1 << (POW_FRAC_BITS - 1))) {
- a = a << 1;
- e--;
- }
- a -= (1 << POW_FRAC_BITS);
- a1 = 0;
- for(j = DEV_ORDER - 1; j >= 0; j--)
- a1 = POW_MULL(a, dev_4_3_coefs[j] + a1);
- a = (1 << POW_FRAC_BITS) + a1;
- /* exponent compute (exact) */
- e = e * 4;
- er = e % 3;
- eq = e / 3;
- a = POW_MULL(a, pow_mult3[er]);
- while (a >= 2 * POW_FRAC_ONE) {
- a = a >> 1;
- eq++;
- }
- /* convert to float */
- while (a < POW_FRAC_ONE) {
- a = a << 1;
- eq--;
- }
- /* now POW_FRAC_ONE <= a < 2 * POW_FRAC_ONE */
-#if POW_FRAC_BITS > FRAC_BITS
- a = (a + (1 << (POW_FRAC_BITS - FRAC_BITS - 1))) >> (POW_FRAC_BITS - FRAC_BITS);
- /* correct overflow */
- if (a >= 2 * (1 << FRAC_BITS)) {
- a = a >> 1;
- eq++;
- }
-#endif
- *exp_ptr = eq;
- return a;
-}
-#endif
-
static av_cold int decode_init(AVCodecContext * avctx)
{
MPADecodeContext *s = avctx->priv_data;
@@ -540,24 +483,6 @@ static inline float round_sample(float *sum)
#define MLSS(rt, ra, rb) rt-=(ra)*(rb)
-#elif FRAC_BITS <= 15
-
-static inline int round_sample(int *sum)
-{
- int sum1;
- sum1 = (*sum) >> OUT_SHIFT;
- *sum &= (1<<OUT_SHIFT)-1;
- return av_clip(sum1, OUT_MIN, OUT_MAX);
-}
-
-/* signed 16x16 -> 32 multiply add accumulate */
-#define MACS(rt, ra, rb) MAC16(rt, ra, rb)
-
-/* signed 16x16 -> 32 multiply */
-#define MULS(ra, rb) MUL16(ra, rb)
-
-#define MLSS(rt, ra, rb) MLS16(rt, ra, rb)
-
#else
static inline int round_sample(int64_t *sum)
@@ -624,8 +549,6 @@ void av_cold RENAME(ff_mpa_synth_init)(MPA_INT *window)
v = ff_mpa_enwindow[i];
#if CONFIG_FLOAT
v *= 1.0 / (1LL<<(16 + FRAC_BITS));
-#elif WFRAC_BITS < 16
- v = (v + (1 << (16 - WFRAC_BITS - 1))) >> (16 - WFRAC_BITS);
#endif
window[i] = v;
if ((i & 63) != 0)
@@ -652,8 +575,6 @@ static void apply_window_mp3_c(MPA_INT *synth_buf, MPA_INT *window,
OUT_INT *samples2;
#if CONFIG_FLOAT
float sum, sum2;
-#elif FRAC_BITS <= 15
- int sum, sum2;
#else
int64_t sum, sum2;
#endif
@@ -710,25 +631,11 @@ void ff_mpa_synth_filter(MPA_INT *synth_buf_ptr, int *synth_buf_offset,
{
register MPA_INT *synth_buf;
int offset;
-#if FRAC_BITS <= 15
- int32_t tmp[32];
- int j;
-#endif
offset = *synth_buf_offset;
synth_buf = synth_buf_ptr + offset;
-#if FRAC_BITS <= 15
- dct32(tmp, sb_samples);
- for(j=0;j<32;j++) {
- /* NOTE: can cause a loss in precision if very high amplitude
- sound */
- synth_buf[j] = av_clip_int16(tmp[j]);
- }
-#else
dct32(synth_buf, sb_samples);
-#endif
-
apply_window_mp3_c(synth_buf, window, dither_state, samples, incr);
offset = (offset - 32) & 511;
@@ -1965,7 +1872,6 @@ static int mp_decode_frame(MPADecodeContext *s,
if (s->error_protection)
skip_bits(&s->gb, 16);
- av_dlog(s->avctx, "frame %d:\n", s->frame_count);
switch(s->layer) {
case 1:
s->avctx->frame_size = 384;
diff --git a/libavcodec/mpegaudioenc.c b/libavcodec/mpegaudioenc.c
index de2a336e34..515da6f670 100644
--- a/libavcodec/mpegaudioenc.c
+++ b/libavcodec/mpegaudioenc.c
@@ -27,8 +27,9 @@
#include "avcodec.h"
#include "put_bits.h"
-#undef CONFIG_MPEGAUDIO_HP
-#define CONFIG_MPEGAUDIO_HP 0
+#define FRAC_BITS 15 /* fractional bits for sb_samples and dct */
+#define WFRAC_BITS 14 /* fractional bits for window */
+
#include "mpegaudio.h"
/* currently, cannot change these constants (need to modify
diff --git a/libavcodec/mpegvideo.c b/libavcodec/mpegvideo.c
index 819f58dc95..53230cf6d8 100644
--- a/libavcodec/mpegvideo.c
+++ b/libavcodec/mpegvideo.c
@@ -313,8 +313,8 @@ int ff_alloc_picture(MpegEncContext *s, Picture *pic, int shared){
/* It might be nicer if the application would keep track of these
* but it would require an API change. */
memmove(s->prev_pict_types+1, s->prev_pict_types, PREV_PICT_TYPES_BUFFER_SIZE-1);
- s->prev_pict_types[0]= s->dropable ? FF_B_TYPE : s->pict_type;
- if(pic->age < PREV_PICT_TYPES_BUFFER_SIZE && s->prev_pict_types[pic->age] == FF_B_TYPE)
+ s->prev_pict_types[0]= s->dropable ? AV_PICTURE_TYPE_B : s->pict_type;
+ if(pic->age < PREV_PICT_TYPES_BUFFER_SIZE && s->prev_pict_types[pic->age] == AV_PICTURE_TYPE_B)
pic->age= INT_MAX; // Skipped MBs in B-frames are quite rare in MPEG-1/2 and it is a bit tricky to skip them anyway.
pic->owner2 = s;
@@ -527,7 +527,7 @@ int ff_mpeg_update_thread_context(AVCodecContext *dst, const AVCodecContext *src
s->last_pict_type= s1->pict_type;
if (s1->current_picture_ptr) s->last_lambda_for[s1->pict_type] = s1->current_picture_ptr->quality;
- if(s1->pict_type!=FF_B_TYPE){
+ if(s1->pict_type!=AV_PICTURE_TYPE_B){
s->last_non_b_pict_type= s1->pict_type;
}
}
@@ -680,6 +680,7 @@ av_cold int MPV_common_init(MpegEncContext *s)
FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_offset, 2 * 64 * sizeof(uint16_t), fail)
}
}
+
s->picture_count = MAX_PICTURE_COUNT * FFMAX(1, s->avctx->thread_count);
FF_ALLOCZ_OR_GOTO(s->avctx, s->picture, s->picture_count * sizeof(Picture), fail)
for(i = 0; i < s->picture_count; i++) {
@@ -754,7 +755,7 @@ av_cold int MPV_common_init(MpegEncContext *s)
for(i=0; i<threads; i++){
if(init_duplicate_context(s->thread_context[i], s) < 0)
- goto fail;
+ goto fail;
s->thread_context[i]->start_mb_y= (s->mb_height*(i ) + s->avctx->thread_count/2) / s->avctx->thread_count;
s->thread_context[i]->end_mb_y = (s->mb_height*(i+1) + s->avctx->thread_count/2) / s->avctx->thread_count;
}
@@ -1023,7 +1024,7 @@ int MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx)
assert(s->last_picture_ptr==NULL || s->out_format != FMT_H264 || s->codec_id == CODEC_ID_SVQ3);
/* mark&release old frames */
- if (s->pict_type != FF_B_TYPE && s->last_picture_ptr && s->last_picture_ptr != s->next_picture_ptr && s->last_picture_ptr->data[0]) {
+ if (s->pict_type != AV_PICTURE_TYPE_B && s->last_picture_ptr && s->last_picture_ptr != s->next_picture_ptr && s->last_picture_ptr->data[0]) {
if(s->out_format != FMT_H264 || s->codec_id == CODEC_ID_SVQ3){
free_frame_buffer(s, s->last_picture_ptr);
@@ -1054,7 +1055,7 @@ int MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx)
if (!s->dropable){
if (s->codec_id == CODEC_ID_H264)
pic->reference = s->picture_structure;
- else if (s->pict_type != FF_B_TYPE)
+ else if (s->pict_type != AV_PICTURE_TYPE_B)
pic->reference = 3;
}
@@ -1077,11 +1078,11 @@ int MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx)
s->current_picture_ptr->pict_type= s->pict_type;
// if(s->flags && CODEC_FLAG_QSCALE)
// s->current_picture_ptr->quality= s->new_picture_ptr->quality;
- s->current_picture_ptr->key_frame= s->pict_type == FF_I_TYPE;
+ s->current_picture_ptr->key_frame= s->pict_type == AV_PICTURE_TYPE_I;
ff_copy_picture(&s->current_picture, s->current_picture_ptr);
- if (s->pict_type != FF_B_TYPE) {
+ if (s->pict_type != AV_PICTURE_TYPE_B) {
s->last_picture_ptr= s->next_picture_ptr;
if(!s->dropable)
s->next_picture_ptr= s->current_picture_ptr;
@@ -1093,8 +1094,13 @@ int MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx)
s->pict_type, s->dropable);*/
if(s->codec_id != CODEC_ID_H264){
- if((s->last_picture_ptr==NULL || s->last_picture_ptr->data[0]==NULL) && s->pict_type!=FF_I_TYPE){
- av_log(avctx, AV_LOG_ERROR, "warning: first frame is no keyframe\n");
+ if((s->last_picture_ptr==NULL || s->last_picture_ptr->data[0]==NULL) &&
+ (s->pict_type!=AV_PICTURE_TYPE_I || s->picture_structure != PICT_FRAME)){
+ if (s->pict_type != AV_PICTURE_TYPE_I)
+ av_log(avctx, AV_LOG_ERROR, "warning: first frame is no keyframe\n");
+ else if (s->picture_structure != PICT_FRAME)
+ av_log(avctx, AV_LOG_INFO, "allocate dummy last picture for field based first keyframe\n");
+
/* Allocate a dummy frame */
i= ff_find_unused_picture(s, 0);
s->last_picture_ptr= &s->picture[i];
@@ -1103,7 +1109,7 @@ int MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx)
ff_thread_report_progress((AVFrame*)s->last_picture_ptr, INT_MAX, 0);
ff_thread_report_progress((AVFrame*)s->last_picture_ptr, INT_MAX, 1);
}
- if((s->next_picture_ptr==NULL || s->next_picture_ptr->data[0]==NULL) && s->pict_type==FF_B_TYPE){
+ if((s->next_picture_ptr==NULL || s->next_picture_ptr->data[0]==NULL) && s->pict_type==AV_PICTURE_TYPE_B){
/* Allocate a dummy frame */
i= ff_find_unused_picture(s, 0);
s->next_picture_ptr= &s->picture[i];
@@ -1117,7 +1123,7 @@ int MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx)
if(s->last_picture_ptr) ff_copy_picture(&s->last_picture, s->last_picture_ptr);
if(s->next_picture_ptr) ff_copy_picture(&s->next_picture, s->next_picture_ptr);
- assert(s->pict_type == FF_I_TYPE || (s->last_picture_ptr && s->last_picture_ptr->data[0]));
+ assert(s->pict_type == AV_PICTURE_TYPE_I || (s->last_picture_ptr && s->last_picture_ptr->data[0]));
if(s->picture_structure!=PICT_FRAME && s->out_format != FMT_H264){
int i;
@@ -1188,7 +1194,7 @@ void MPV_frame_end(MpegEncContext *s)
s->last_pict_type = s->pict_type;
s->last_lambda_for[s->pict_type]= s->current_picture_ptr->quality;
- if(s->pict_type!=FF_B_TYPE){
+ if(s->pict_type!=AV_PICTURE_TYPE_B){
s->last_non_b_pict_type= s->pict_type;
}
#if 0
@@ -1317,12 +1323,12 @@ void ff_print_debug_info(MpegEncContext *s, AVFrame *pict){
av_log(s->avctx,AV_LOG_DEBUG,"New frame, type: ");
switch (pict->pict_type) {
- case FF_I_TYPE: av_log(s->avctx,AV_LOG_DEBUG,"I\n"); break;
- case FF_P_TYPE: av_log(s->avctx,AV_LOG_DEBUG,"P\n"); break;
- case FF_B_TYPE: av_log(s->avctx,AV_LOG_DEBUG,"B\n"); break;
- case FF_S_TYPE: av_log(s->avctx,AV_LOG_DEBUG,"S\n"); break;
- case FF_SI_TYPE: av_log(s->avctx,AV_LOG_DEBUG,"SI\n"); break;
- case FF_SP_TYPE: av_log(s->avctx,AV_LOG_DEBUG,"SP\n"); break;
+ case AV_PICTURE_TYPE_I: av_log(s->avctx,AV_LOG_DEBUG,"I\n"); break;
+ case AV_PICTURE_TYPE_P: av_log(s->avctx,AV_LOG_DEBUG,"P\n"); break;
+ case AV_PICTURE_TYPE_B: av_log(s->avctx,AV_LOG_DEBUG,"B\n"); break;
+ case AV_PICTURE_TYPE_S: av_log(s->avctx,AV_LOG_DEBUG,"S\n"); break;
+ case AV_PICTURE_TYPE_SI: av_log(s->avctx,AV_LOG_DEBUG,"SI\n"); break;
+ case AV_PICTURE_TYPE_SP: av_log(s->avctx,AV_LOG_DEBUG,"SP\n"); break;
}
for(y=0; y<s->mb_height; y++){
for(x=0; x<s->mb_width; x++){
@@ -1418,15 +1424,15 @@ void ff_print_debug_info(MpegEncContext *s, AVFrame *pict){
for(type=0; type<3; type++){
int direction = 0;
switch (type) {
- case 0: if ((!(s->avctx->debug_mv&FF_DEBUG_VIS_MV_P_FOR)) || (pict->pict_type!=FF_P_TYPE))
+ case 0: if ((!(s->avctx->debug_mv&FF_DEBUG_VIS_MV_P_FOR)) || (pict->pict_type!=AV_PICTURE_TYPE_P))
continue;
direction = 0;
break;
- case 1: if ((!(s->avctx->debug_mv&FF_DEBUG_VIS_MV_B_FOR)) || (pict->pict_type!=FF_B_TYPE))
+ case 1: if ((!(s->avctx->debug_mv&FF_DEBUG_VIS_MV_B_FOR)) || (pict->pict_type!=AV_PICTURE_TYPE_B))
continue;
direction = 0;
break;
- case 2: if ((!(s->avctx->debug_mv&FF_DEBUG_VIS_MV_B_BACK)) || (pict->pict_type!=FF_B_TYPE))
+ case 2: if ((!(s->avctx->debug_mv&FF_DEBUG_VIS_MV_B_BACK)) || (pict->pict_type!=AV_PICTURE_TYPE_B))
continue;
direction = 1;
break;
@@ -1620,7 +1626,7 @@ static av_always_inline void mpeg_motion_lowres(MpegEncContext *s,
uint8_t *ptr_y, *ptr_cb, *ptr_cr;
int mx, my, src_x, src_y, uvsrc_x, uvsrc_y, uvlinesize, linesize, sx, sy, uvsx, uvsy;
const int lowres= s->avctx->lowres;
- const int op_index= FFMIN(lowres, 2);
+ const int op_index= FFMIN(lowres-1+s->chroma_x_shift, 2);
const int block_s= 8>>lowres;
const int s_mask= (2<<lowres)-1;
const int h_edge_pos = s->h_edge_pos >> lowres;
@@ -1655,12 +1661,29 @@ static av_always_inline void mpeg_motion_lowres(MpegEncContext *s,
uvsrc_x = s->mb_x*block_s + (mx >> lowres);
uvsrc_y = mb_y*block_s + (my >> lowres);
} else {
- mx = motion_x / 2;
- my = motion_y / 2;
- uvsx = mx & s_mask;
- uvsy = my & s_mask;
- uvsrc_x = s->mb_x*block_s + (mx >> (lowres+1));
- uvsrc_y =( mb_y*block_s>>field_based) + (my >> (lowres+1));
+ if(s->chroma_y_shift){
+ mx = motion_x / 2;
+ my = motion_y / 2;
+ uvsx = mx & s_mask;
+ uvsy = my & s_mask;
+ uvsrc_x = s->mb_x*block_s + (mx >> (lowres+1));
+ uvsrc_y =( mb_y*block_s>>field_based) + (my >> (lowres+1));
+ } else {
+ if(s->chroma_x_shift){
+ //Chroma422
+ mx = motion_x / 2;
+ uvsx = mx & s_mask;
+ uvsy = motion_y & s_mask;
+ uvsrc_y = src_y;
+ uvsrc_x = s->mb_x*block_s + (mx >> (lowres+1));
+ } else {
+ //Chroma444
+ uvsx = motion_x & s_mask;
+ uvsy = motion_y & s_mask;
+ uvsrc_x = src_x;
+ uvsrc_y = src_y;
+ }
+ }
}
ptr_y = ref_picture[0] + src_y * linesize + src_x;
@@ -1822,7 +1845,7 @@ static inline void MPV_motion_lowres(MpegEncContext *s,
ref_picture, pix_op,
s->mv[dir][1][0], s->mv[dir][1][1], block_s, mb_y);
} else {
- if(s->picture_structure != s->field_select[dir][0] + 1 && s->pict_type != FF_B_TYPE && !s->first_field){
+ if(s->picture_structure != s->field_select[dir][0] + 1 && s->pict_type != AV_PICTURE_TYPE_B && !s->first_field){
ref_picture= s->current_picture_ptr->data;
}
@@ -1836,7 +1859,7 @@ static inline void MPV_motion_lowres(MpegEncContext *s,
for(i=0; i<2; i++){
uint8_t ** ref2picture;
- if(s->picture_structure == s->field_select[dir][i] + 1 || s->pict_type == FF_B_TYPE || s->first_field){
+ if(s->picture_structure == s->field_select[dir][i] + 1 || s->pict_type == AV_PICTURE_TYPE_B || s->first_field){
ref2picture= ref_picture;
}else{
ref2picture= s->current_picture_ptr->data;
@@ -2032,14 +2055,14 @@ void MPV_decode_mb_internal(MpegEncContext *s, DCTELEM block[12][64],
else if (!is_mpeg12 && (s->h263_pred || s->h263_aic))
s->mbintra_table[mb_xy]=1;
- if ((s->flags&CODEC_FLAG_PSNR) || !(s->encoding && (s->intra_only || s->pict_type==FF_B_TYPE) && s->avctx->mb_decision != FF_MB_DECISION_RD)) { //FIXME precalc
+ if ((s->flags&CODEC_FLAG_PSNR) || !(s->encoding && (s->intra_only || s->pict_type==AV_PICTURE_TYPE_B) && s->avctx->mb_decision != FF_MB_DECISION_RD)) { //FIXME precalc
uint8_t *dest_y, *dest_cb, *dest_cr;
int dct_linesize, dct_offset;
op_pixels_func (*op_pix)[4];
qpel_mc_func (*op_qpix)[16];
const int linesize= s->current_picture.linesize[0]; //not s->linesize as this would be wrong for field pics
const int uvlinesize= s->current_picture.linesize[1];
- const int readable= s->pict_type != FF_B_TYPE || s->encoding || s->avctx->draw_horiz_band || lowres_flag;
+ const int readable= s->pict_type != AV_PICTURE_TYPE_B || s->encoding || s->avctx->draw_horiz_band || lowres_flag;
const int block_size= lowres_flag ? 8>>s->avctx->lowres : 8;
/* avoid copy if macroblock skipped in last frame too */
@@ -2052,7 +2075,7 @@ void MPV_decode_mb_internal(MpegEncContext *s, DCTELEM block[12][64],
if (s->mb_skipped) {
s->mb_skipped= 0;
- assert(s->pict_type!=FF_I_TYPE);
+ assert(s->pict_type!=AV_PICTURE_TYPE_I);
(*mbskip_ptr) ++; /* indicate that this time we skipped it */
if(*mbskip_ptr >99) *mbskip_ptr= 99;
@@ -2108,7 +2131,7 @@ void MPV_decode_mb_internal(MpegEncContext *s, DCTELEM block[12][64],
}
}else{
op_qpix= s->me.qpel_put;
- if ((!s->no_rounding) || s->pict_type==FF_B_TYPE){
+ if ((!s->no_rounding) || s->pict_type==AV_PICTURE_TYPE_B){
op_pix = s->dsp.put_pixels_tab;
}else{
op_pix = s->dsp.put_no_rnd_pixels_tab;
@@ -2129,8 +2152,8 @@ void MPV_decode_mb_internal(MpegEncContext *s, DCTELEM block[12][64],
if(s->hurry_up>1) goto skip_idct;
#endif
if(s->avctx->skip_idct){
- if( (s->avctx->skip_idct >= AVDISCARD_NONREF && s->pict_type == FF_B_TYPE)
- ||(s->avctx->skip_idct >= AVDISCARD_NONKEY && s->pict_type != FF_I_TYPE)
+ if( (s->avctx->skip_idct >= AVDISCARD_NONREF && s->pict_type == AV_PICTURE_TYPE_B)
+ ||(s->avctx->skip_idct >= AVDISCARD_NONKEY && s->pict_type != AV_PICTURE_TYPE_I)
|| s->avctx->skip_idct >= AVDISCARD_ALL)
goto skip_idct;
}
@@ -2169,17 +2192,17 @@ void MPV_decode_mb_internal(MpegEncContext *s, DCTELEM block[12][64],
}else{
//chroma422
dct_linesize = uvlinesize << s->interlaced_dct;
- dct_offset =(s->interlaced_dct)? uvlinesize : uvlinesize*8;
+ dct_offset =(s->interlaced_dct)? uvlinesize : uvlinesize*block_size;
add_dct(s, block[4], 4, dest_cb, dct_linesize);
add_dct(s, block[5], 5, dest_cr, dct_linesize);
add_dct(s, block[6], 6, dest_cb+dct_offset, dct_linesize);
add_dct(s, block[7], 7, dest_cr+dct_offset, dct_linesize);
if(!s->chroma_x_shift){//Chroma444
- add_dct(s, block[8], 8, dest_cb+8, dct_linesize);
- add_dct(s, block[9], 9, dest_cr+8, dct_linesize);
- add_dct(s, block[10], 10, dest_cb+8+dct_offset, dct_linesize);
- add_dct(s, block[11], 11, dest_cr+8+dct_offset, dct_linesize);
+ add_dct(s, block[8], 8, dest_cb+block_size, dct_linesize);
+ add_dct(s, block[9], 9, dest_cr+block_size, dct_linesize);
+ add_dct(s, block[10], 10, dest_cb+block_size+dct_offset, dct_linesize);
+ add_dct(s, block[11], 11, dest_cr+block_size+dct_offset, dct_linesize);
}
}
}//fi gray
@@ -2221,17 +2244,17 @@ void MPV_decode_mb_internal(MpegEncContext *s, DCTELEM block[12][64],
}else{
dct_linesize = uvlinesize << s->interlaced_dct;
- dct_offset =(s->interlaced_dct)? uvlinesize : uvlinesize*8;
+ dct_offset =(s->interlaced_dct)? uvlinesize : uvlinesize*block_size;
s->dsp.idct_put(dest_cb, dct_linesize, block[4]);
s->dsp.idct_put(dest_cr, dct_linesize, block[5]);
s->dsp.idct_put(dest_cb + dct_offset, dct_linesize, block[6]);
s->dsp.idct_put(dest_cr + dct_offset, dct_linesize, block[7]);
if(!s->chroma_x_shift){//Chroma444
- s->dsp.idct_put(dest_cb + 8, dct_linesize, block[8]);
- s->dsp.idct_put(dest_cr + 8, dct_linesize, block[9]);
- s->dsp.idct_put(dest_cb + 8 + dct_offset, dct_linesize, block[10]);
- s->dsp.idct_put(dest_cr + 8 + dct_offset, dct_linesize, block[11]);
+ s->dsp.idct_put(dest_cb + block_size, dct_linesize, block[8]);
+ s->dsp.idct_put(dest_cr + block_size, dct_linesize, block[9]);
+ s->dsp.idct_put(dest_cb + block_size + dct_offset, dct_linesize, block[10]);
+ s->dsp.idct_put(dest_cr + block_size + dct_offset, dct_linesize, block[11]);
}
}
}//gray
@@ -2293,14 +2316,14 @@ void ff_draw_horiz_band(MpegEncContext *s, int y, int h){
AVFrame *src;
int offset[4];
- if(s->pict_type==FF_B_TYPE || s->low_delay || (s->avctx->slice_flags&SLICE_FLAG_CODED_ORDER))
+ if(s->pict_type==AV_PICTURE_TYPE_B || s->low_delay || (s->avctx->slice_flags&SLICE_FLAG_CODED_ORDER))
src= (AVFrame*)s->current_picture_ptr;
else if(s->last_picture_ptr)
src= (AVFrame*)s->last_picture_ptr;
else
return;
- if(s->pict_type==FF_B_TYPE && s->picture_structure == PICT_FRAME && s->out_format != FMT_H264){
+ if(s->pict_type==AV_PICTURE_TYPE_B && s->picture_structure == PICT_FRAME && s->out_format != FMT_H264){
offset[0]=
offset[1]=
offset[2]=
@@ -2336,7 +2359,7 @@ void ff_init_block_index(MpegEncContext *s){ //FIXME maybe rename
s->dest[1] = s->current_picture.data[1] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
s->dest[2] = s->current_picture.data[2] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
- if(!(s->pict_type==FF_B_TYPE && s->avctx->draw_horiz_band && s->picture_structure==PICT_FRAME))
+ if(!(s->pict_type==AV_PICTURE_TYPE_B && s->avctx->draw_horiz_band && s->picture_structure==PICT_FRAME))
{
if(s->picture_structure==PICT_FRAME){
s->dest[0] += s->mb_y * linesize << mb_size;
@@ -2615,6 +2638,6 @@ void ff_set_qscale(MpegEncContext * s, int qscale)
void MPV_report_decode_progress(MpegEncContext *s)
{
- if (s->pict_type != FF_B_TYPE && !s->partitioned_frame)
+ if (s->pict_type != AV_PICTURE_TYPE_B && !s->partitioned_frame)
ff_thread_report_progress((AVFrame*)s->current_picture_ptr, s->mb_y, 0);
}
diff --git a/libavcodec/mpegvideo.h b/libavcodec/mpegvideo.h
index 3836a6ef38..c8a38f5b6a 100644
--- a/libavcodec/mpegvideo.h
+++ b/libavcodec/mpegvideo.h
@@ -330,7 +330,7 @@ typedef struct MpegEncContext {
int adaptive_quant; ///< use adaptive quantization
int dquant; ///< qscale difference to prev qscale
int closed_gop; ///< MPEG1/2 GOP is closed
- int pict_type; ///< FF_I_TYPE, FF_P_TYPE, FF_B_TYPE, ...
+ int pict_type; ///< AV_PICTURE_TYPE_I, AV_PICTURE_TYPE_P, AV_PICTURE_TYPE_B, ...
int last_pict_type; //FIXME removes
int last_non_b_pict_type; ///< used for mpeg4 gmc b-frames & ratecontrol
int dropable;
diff --git a/libavcodec/mpegvideo_common.h b/libavcodec/mpegvideo_common.h
index d607e45ae1..18e49a63c3 100644
--- a/libavcodec/mpegvideo_common.h
+++ b/libavcodec/mpegvideo_common.h
@@ -649,7 +649,7 @@ static av_always_inline void MPV_motion_internal(MpegEncContext *s,
prefetch_motion(s, ref_picture, dir);
- if(!is_mpeg12 && s->obmc && s->pict_type != FF_B_TYPE){
+ if(!is_mpeg12 && s->obmc && s->pict_type != AV_PICTURE_TYPE_B){
int16_t mv_cache[4][4][2];
const int xy= s->mb_x + s->mb_y*s->mb_stride;
const int mot_stride= s->b8_stride;
@@ -816,7 +816,7 @@ static av_always_inline void MPV_motion_internal(MpegEncContext *s,
s->mv[dir][1][0], s->mv[dir][1][1], 8, mb_y);
}
} else {
- if(s->picture_structure != s->field_select[dir][0] + 1 && s->pict_type != FF_B_TYPE && !s->first_field){
+ if(s->picture_structure != s->field_select[dir][0] + 1 && s->pict_type != AV_PICTURE_TYPE_B && !s->first_field){
ref_picture= s->current_picture_ptr->data;
}
@@ -831,7 +831,7 @@ static av_always_inline void MPV_motion_internal(MpegEncContext *s,
uint8_t ** ref2picture;
if(s->picture_structure == s->field_select[dir][i] + 1
- || s->pict_type == FF_B_TYPE || s->first_field){
+ || s->pict_type == AV_PICTURE_TYPE_B || s->first_field){
ref2picture= ref_picture;
}else{
ref2picture= s->current_picture_ptr->data;
diff --git a/libavcodec/mpegvideo_enc.c b/libavcodec/mpegvideo_enc.c
index 7760ee58b6..cf5faac2b9 100644
--- a/libavcodec/mpegvideo_enc.c
+++ b/libavcodec/mpegvideo_enc.c
@@ -968,8 +968,8 @@ static int estimate_best_b_count(MpegEncContext *s){
assert(scale>=0 && scale <=3);
// emms_c();
- p_lambda= s->last_lambda_for[FF_P_TYPE]; //s->next_picture_ptr->quality;
- b_lambda= s->last_lambda_for[FF_B_TYPE]; //p_lambda *FFABS(s->avctx->b_quant_factor) + s->avctx->b_quant_offset;
+ p_lambda= s->last_lambda_for[AV_PICTURE_TYPE_P]; //s->next_picture_ptr->quality;
+ b_lambda= s->last_lambda_for[AV_PICTURE_TYPE_B]; //p_lambda *FFABS(s->avctx->b_quant_factor) + s->avctx->b_quant_offset;
if(!b_lambda) b_lambda= p_lambda; //FIXME we should do this somewhere else
lambda2= (b_lambda*b_lambda + (1<<FF_LAMBDA_SHIFT)/2 ) >> FF_LAMBDA_SHIFT;
@@ -1024,7 +1024,7 @@ static int estimate_best_b_count(MpegEncContext *s){
c->error[0]= c->error[1]= c->error[2]= 0;
- input[0].pict_type= FF_I_TYPE;
+ input[0].pict_type= AV_PICTURE_TYPE_I;
input[0].quality= 1 * FF_QP2LAMBDA;
out_size = avcodec_encode_video(c, outbuf, outbuf_size, &input[0]);
// rd += (out_size * lambda2) >> FF_LAMBDA_SHIFT;
@@ -1032,7 +1032,7 @@ static int estimate_best_b_count(MpegEncContext *s){
for(i=0; i<s->max_b_frames+1; i++){
int is_p= i % (j+1) == j || i==s->max_b_frames;
- input[i+1].pict_type= is_p ? FF_P_TYPE : FF_B_TYPE;
+ input[i+1].pict_type= is_p ? AV_PICTURE_TYPE_P : AV_PICTURE_TYPE_B;
input[i+1].quality= is_p ? p_lambda : b_lambda;
out_size = avcodec_encode_video(c, outbuf, outbuf_size, &input[i+1]);
rd += (out_size * lambda2) >> (FF_LAMBDA_SHIFT - 3);
@@ -1074,7 +1074,7 @@ static int select_input_picture(MpegEncContext *s){
if(s->reordered_input_picture[0]==NULL && s->input_picture[0]){
if(/*s->picture_in_gop_number >= s->gop_size ||*/ s->next_picture_ptr==NULL || s->intra_only){
s->reordered_input_picture[0]= s->input_picture[0];
- s->reordered_input_picture[0]->pict_type= FF_I_TYPE;
+ s->reordered_input_picture[0]->pict_type= AV_PICTURE_TYPE_I;
s->reordered_input_picture[0]->coded_picture_number= s->coded_picture_number++;
}else{
int b_frames;
@@ -1109,7 +1109,7 @@ static int select_input_picture(MpegEncContext *s){
if(pict_num >= s->rc_context.num_entries)
break;
if(!s->input_picture[i]){
- s->rc_context.entry[pict_num-1].new_pict_type = FF_P_TYPE;
+ s->rc_context.entry[pict_num-1].new_pict_type = AV_PICTURE_TYPE_P;
break;
}
@@ -1153,10 +1153,10 @@ static int select_input_picture(MpegEncContext *s){
for(i= b_frames - 1; i>=0; i--){
int type= s->input_picture[i]->pict_type;
- if(type && type != FF_B_TYPE)
+ if(type && type != AV_PICTURE_TYPE_B)
b_frames= i;
}
- if(s->input_picture[b_frames]->pict_type == FF_B_TYPE && b_frames == s->max_b_frames){
+ if(s->input_picture[b_frames]->pict_type == AV_PICTURE_TYPE_B && b_frames == s->max_b_frames){
av_log(s->avctx, AV_LOG_ERROR, "warning, too many b frames in a row\n");
}
@@ -1166,29 +1166,29 @@ static int select_input_picture(MpegEncContext *s){
}else{
if(s->flags & CODEC_FLAG_CLOSED_GOP)
b_frames=0;
- s->input_picture[b_frames]->pict_type= FF_I_TYPE;
+ s->input_picture[b_frames]->pict_type= AV_PICTURE_TYPE_I;
}
}
if( (s->flags & CODEC_FLAG_CLOSED_GOP)
&& b_frames
- && s->input_picture[b_frames]->pict_type== FF_I_TYPE)
+ && s->input_picture[b_frames]->pict_type== AV_PICTURE_TYPE_I)
b_frames--;
s->reordered_input_picture[0]= s->input_picture[b_frames];
- if(s->reordered_input_picture[0]->pict_type != FF_I_TYPE)
- s->reordered_input_picture[0]->pict_type= FF_P_TYPE;
+ if(s->reordered_input_picture[0]->pict_type != AV_PICTURE_TYPE_I)
+ s->reordered_input_picture[0]->pict_type= AV_PICTURE_TYPE_P;
s->reordered_input_picture[0]->coded_picture_number= s->coded_picture_number++;
for(i=0; i<b_frames; i++){
s->reordered_input_picture[i+1]= s->input_picture[i];
- s->reordered_input_picture[i+1]->pict_type= FF_B_TYPE;
+ s->reordered_input_picture[i+1]->pict_type= AV_PICTURE_TYPE_B;
s->reordered_input_picture[i+1]->coded_picture_number= s->coded_picture_number++;
}
}
}
no_output_pic:
if(s->reordered_input_picture[0]){
- s->reordered_input_picture[0]->reference= s->reordered_input_picture[0]->pict_type!=FF_B_TYPE ? 3 : 0;
+ s->reordered_input_picture[0]->reference= s->reordered_input_picture[0]->pict_type!=AV_PICTURE_TYPE_B ? 3 : 0;
ff_copy_picture(&s->new_picture, s->reordered_input_picture[0]);
@@ -1296,11 +1296,11 @@ vbv_retry:
s->lambda_table[i]= FFMAX(s->lambda_table[i]+1, s->lambda_table[i]*(s->qscale+1) / s->qscale);
}
s->mb_skipped = 0; //done in MPV_frame_start()
- if(s->pict_type==FF_P_TYPE){ //done in encode_picture() so we must undo it
+ if(s->pict_type==AV_PICTURE_TYPE_P){ //done in encode_picture() so we must undo it
if(s->flipflop_rounding || s->codec_id == CODEC_ID_H263P || s->codec_id == CODEC_ID_MPEG4)
s->no_rounding ^= 1;
}
- if(s->pict_type!=FF_B_TYPE){
+ if(s->pict_type!=AV_PICTURE_TYPE_B){
s->time_base= s->last_time_base;
s->last_non_b_time= s->time - s->pp_time;
}
@@ -1528,7 +1528,7 @@ static av_always_inline void encode_mb_internal(MpegEncContext *s, int motion_x,
if(s->codec_id==CODEC_ID_MPEG4){
if(!s->mb_intra){
- if(s->pict_type == FF_B_TYPE){
+ if(s->pict_type == AV_PICTURE_TYPE_B){
if(s->dquant&1 || s->mv_dir&MV_DIRECT)
s->dquant= 0;
}
@@ -1605,7 +1605,7 @@ static av_always_inline void encode_mb_internal(MpegEncContext *s, int motion_x,
dest_cb = s->dest[1];
dest_cr = s->dest[2];
- if ((!s->no_rounding) || s->pict_type==FF_B_TYPE){
+ if ((!s->no_rounding) || s->pict_type==AV_PICTURE_TYPE_B){
op_pix = s->dsp.put_pixels_tab;
op_qpix= s->dsp.put_qpel_pixels_tab;
}else{
@@ -1995,7 +1995,7 @@ static int estimate_motion_thread(AVCodecContext *c, void *arg){
s->block_index[3]+=2;
/* compute motion vector & mb_type and store in context */
- if(s->pict_type==FF_B_TYPE)
+ if(s->pict_type==AV_PICTURE_TYPE_B)
ff_estimate_b_frame_motion(s, s->mb_x, s->mb_y);
else
ff_estimate_p_frame_motion(s, s->mb_x, s->mb_y);
@@ -2393,7 +2393,7 @@ static int encode_thread(AVCodecContext *c, void *arg){
s->mv[1][0][0] = best_s.mv[1][0][0];
s->mv[1][0][1] = best_s.mv[1][0][1];
- qpi = s->pict_type == FF_B_TYPE ? 2 : 0;
+ qpi = s->pict_type == AV_PICTURE_TYPE_B ? 2 : 0;
for(; qpi<4; qpi++){
int dquant= dquant_tab[qpi];
qp= last_qp + dquant;
@@ -2495,7 +2495,7 @@ static int encode_thread(AVCodecContext *c, void *arg){
s->last_bits= put_bits_count(&s->pb);
if (CONFIG_H263_ENCODER &&
- s->out_format == FMT_H263 && s->pict_type!=FF_B_TYPE)
+ s->out_format == FMT_H263 && s->pict_type!=AV_PICTURE_TYPE_B)
ff_h263_update_motion_val(s);
if(next_block==0){ //FIXME 16 vs linesize16
@@ -2622,7 +2622,7 @@ static int encode_thread(AVCodecContext *c, void *arg){
s->last_mv_dir = s->mv_dir;
if (CONFIG_H263_ENCODER &&
- s->out_format == FMT_H263 && s->pict_type!=FF_B_TYPE)
+ s->out_format == FMT_H263 && s->pict_type!=AV_PICTURE_TYPE_B)
ff_h263_update_motion_val(s);
MPV_decode_mb(s, s->block);
@@ -2660,7 +2660,7 @@ static int encode_thread(AVCodecContext *c, void *arg){
}
//not beautiful here but we must write it before flushing so it has to be here
- if (CONFIG_MSMPEG4_ENCODER && s->msmpeg4_version && s->msmpeg4_version<4 && s->pict_type == FF_I_TYPE)
+ if (CONFIG_MSMPEG4_ENCODER && s->msmpeg4_version && s->msmpeg4_version<4 && s->pict_type == AV_PICTURE_TYPE_I)
msmpeg4_encode_ext_header(s);
write_slice_end(s);
@@ -2758,7 +2758,7 @@ static void set_frame_distances(MpegEncContext * s){
assert(s->current_picture_ptr->pts != AV_NOPTS_VALUE);
s->time= s->current_picture_ptr->pts*s->avctx->time_base.num;
- if(s->pict_type==FF_B_TYPE){
+ if(s->pict_type==AV_PICTURE_TYPE_B){
s->pb_time= s->pp_time - (s->last_non_b_time - s->time);
assert(s->pb_time > 0 && s->pb_time < s->pp_time);
}else{
@@ -2791,10 +2791,10 @@ static int encode_picture(MpegEncContext *s, int picture_number)
// s->lambda= s->current_picture_ptr->quality; //FIXME qscale / ... stuff for ME rate distortion
- if(s->pict_type==FF_I_TYPE){
+ if(s->pict_type==AV_PICTURE_TYPE_I){
if(s->msmpeg4_version >= 3) s->no_rounding=1;
else s->no_rounding=0;
- }else if(s->pict_type!=FF_B_TYPE){
+ }else if(s->pict_type!=AV_PICTURE_TYPE_B){
if(s->flipflop_rounding || s->codec_id == CODEC_ID_H263P || s->codec_id == CODEC_ID_MPEG4)
s->no_rounding ^= 1;
}
@@ -2804,7 +2804,7 @@ static int encode_picture(MpegEncContext *s, int picture_number)
return -1;
ff_get_2pass_fcode(s);
}else if(!(s->flags & CODEC_FLAG_QSCALE)){
- if(s->pict_type==FF_B_TYPE)
+ if(s->pict_type==AV_PICTURE_TYPE_B)
s->lambda= s->last_lambda_for[s->pict_type];
else
s->lambda= s->last_lambda_for[s->last_non_b_pict_type];
@@ -2820,17 +2820,17 @@ static int encode_picture(MpegEncContext *s, int picture_number)
return -1;
/* Estimate motion for every MB */
- if(s->pict_type != FF_I_TYPE){
+ if(s->pict_type != AV_PICTURE_TYPE_I){
s->lambda = (s->lambda * s->avctx->me_penalty_compensation + 128)>>8;
s->lambda2= (s->lambda2* (int64_t)s->avctx->me_penalty_compensation + 128)>>8;
- if(s->pict_type != FF_B_TYPE && s->avctx->me_threshold==0){
- if((s->avctx->pre_me && s->last_non_b_pict_type==FF_I_TYPE) || s->avctx->pre_me==2){
+ if(s->pict_type != AV_PICTURE_TYPE_B && s->avctx->me_threshold==0){
+ if((s->avctx->pre_me && s->last_non_b_pict_type==AV_PICTURE_TYPE_I) || s->avctx->pre_me==2){
s->avctx->execute(s->avctx, pre_estimate_motion_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
}
}
s->avctx->execute(s->avctx, estimate_motion_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
- }else /* if(s->pict_type == FF_I_TYPE) */{
+ }else /* if(s->pict_type == AV_PICTURE_TYPE_I) */{
/* I-Frame */
for(i=0; i<s->mb_stride*s->mb_height; i++)
s->mb_type[i]= CANDIDATE_MB_TYPE_INTRA;
@@ -2847,15 +2847,15 @@ static int encode_picture(MpegEncContext *s, int picture_number)
s->current_picture. mb_var_sum= s->current_picture_ptr-> mb_var_sum= s->me. mb_var_sum_temp;
emms_c();
- if(s->me.scene_change_score > s->avctx->scenechange_threshold && s->pict_type == FF_P_TYPE){
- s->pict_type= FF_I_TYPE;
+ if(s->me.scene_change_score > s->avctx->scenechange_threshold && s->pict_type == AV_PICTURE_TYPE_P){
+ s->pict_type= AV_PICTURE_TYPE_I;
for(i=0; i<s->mb_stride*s->mb_height; i++)
s->mb_type[i]= CANDIDATE_MB_TYPE_INTRA;
//printf("Scene change detected, encoding as I Frame %d %d\n", s->current_picture.mb_var_sum, s->current_picture.mc_mb_var_sum);
}
if(!s->umvplus){
- if(s->pict_type==FF_P_TYPE || s->pict_type==FF_S_TYPE) {
+ if(s->pict_type==AV_PICTURE_TYPE_P || s->pict_type==AV_PICTURE_TYPE_S) {
s->f_code= ff_get_best_fcode(s, s->p_mv_table, CANDIDATE_MB_TYPE_INTER);
if(s->flags & CODEC_FLAG_INTERLACED_ME){
@@ -2877,7 +2877,7 @@ static int encode_picture(MpegEncContext *s, int picture_number)
}
}
- if(s->pict_type==FF_B_TYPE){
+ if(s->pict_type==AV_PICTURE_TYPE_B){
int a, b;
a = ff_get_best_fcode(s, s->b_forw_mv_table, CANDIDATE_MB_TYPE_FORWARD);
@@ -2911,7 +2911,7 @@ static int encode_picture(MpegEncContext *s, int picture_number)
if (estimate_qp(s, 0) < 0)
return -1;
- if(s->qscale < 3 && s->max_qcoeff<=128 && s->pict_type==FF_I_TYPE && !(s->flags & CODEC_FLAG_QSCALE))
+ if(s->qscale < 3 && s->max_qcoeff<=128 && s->pict_type==AV_PICTURE_TYPE_I && !(s->flags & CODEC_FLAG_QSCALE))
s->qscale= 3; //reduce clipping problems
if (s->out_format == FMT_MJPEG) {
@@ -2931,7 +2931,7 @@ static int encode_picture(MpegEncContext *s, int picture_number)
//FIXME var duplication
s->current_picture_ptr->key_frame=
- s->current_picture.key_frame= s->pict_type == FF_I_TYPE; //FIXME pic_ptr
+ s->current_picture.key_frame= s->pict_type == AV_PICTURE_TYPE_I; //FIXME pic_ptr
s->current_picture_ptr->pict_type=
s->current_picture.pict_type= s->pict_type;
diff --git a/libavcodec/mpegvideo_xvmc.c b/libavcodec/mpegvideo_xvmc.c
index 0a5a750fd7..76794076f9 100644
--- a/libavcodec/mpegvideo_xvmc.c
+++ b/libavcodec/mpegvideo_xvmc.c
@@ -110,9 +110,9 @@ int ff_xvmc_field_start(MpegEncContext *s, AVCodecContext *avctx)
render->p_past_surface = NULL;
switch(s->pict_type) {
- case FF_I_TYPE:
+ case AV_PICTURE_TYPE_I:
return 0; // no prediction from other frames
- case FF_B_TYPE:
+ case AV_PICTURE_TYPE_B:
next = (struct xvmc_pix_fmt*)s->next_picture.data[2];
if (!next)
return -1;
@@ -120,7 +120,7 @@ int ff_xvmc_field_start(MpegEncContext *s, AVCodecContext *avctx)
return -1;
render->p_future_surface = next->p_surface;
// no return here, going to set forward prediction
- case FF_P_TYPE:
+ case AV_PICTURE_TYPE_P:
last = (struct xvmc_pix_fmt*)s->last_picture.data[2];
if (!last)
last = render; // predict second field from the first
diff --git a/libavcodec/msmpeg4.c b/libavcodec/msmpeg4.c
index 1ba3cdb2fa..ac0ad9bc9c 100644
--- a/libavcodec/msmpeg4.c
+++ b/libavcodec/msmpeg4.c
@@ -308,7 +308,7 @@ static void find_best_tables(MpegEncContext * s)
int intra_luma_count = s->ac_stats[1][0][level][run][last];
int intra_chroma_count= s->ac_stats[1][1][level][run][last];
- if(s->pict_type==FF_I_TYPE){
+ if(s->pict_type==AV_PICTURE_TYPE_I){
size += intra_luma_count *rl_length[i ][level][run][last];
chroma_size+= intra_chroma_count*rl_length[i+3][level][run][last];
}else{
@@ -333,7 +333,7 @@ static void find_best_tables(MpegEncContext * s)
// printf("type:%d, best:%d, qp:%d, var:%d, mcvar:%d, size:%d //\n",
// s->pict_type, best, s->qscale, s->mb_var_sum, s->mc_mb_var_sum, best_size);
- if(s->pict_type==FF_P_TYPE) chroma_best= best;
+ if(s->pict_type==AV_PICTURE_TYPE_P) chroma_best= best;
memset(s->ac_stats, 0, sizeof(int)*(MAX_LEVEL+1)*(MAX_RUN+1)*2*2*2);
@@ -342,7 +342,7 @@ static void find_best_tables(MpegEncContext * s)
if(s->pict_type != s->last_non_b_pict_type){
s->rl_table_index= 2;
- if(s->pict_type==FF_I_TYPE)
+ if(s->pict_type==AV_PICTURE_TYPE_I)
s->rl_chroma_table_index= 1;
else
s->rl_chroma_table_index= 2;
@@ -369,10 +369,10 @@ void msmpeg4_encode_picture_header(MpegEncContext * s, int picture_number)
s->use_skip_mb_code = 1; /* only if P frame */
s->per_mb_rl_table = 0;
if(s->msmpeg4_version==4)
- s->inter_intra_pred= (s->width*s->height < 320*240 && s->bit_rate<=II_BITRATE && s->pict_type==FF_P_TYPE);
+ s->inter_intra_pred= (s->width*s->height < 320*240 && s->bit_rate<=II_BITRATE && s->pict_type==AV_PICTURE_TYPE_P);
//printf("%d %d %d %d %d\n", s->pict_type, s->bit_rate, s->inter_intra_pred, s->width, s->height);
- if (s->pict_type == FF_I_TYPE) {
+ if (s->pict_type == AV_PICTURE_TYPE_I) {
s->slice_height= s->mb_height/1;
put_bits(&s->pb, 5, 0x16 + s->mb_height/s->slice_height);
@@ -619,7 +619,7 @@ void msmpeg4_encode_mb(MpegEncContext * s,
}
if(s->msmpeg4_version<=2){
- if (s->pict_type == FF_I_TYPE) {
+ if (s->pict_type == AV_PICTURE_TYPE_I) {
put_bits(&s->pb,
v2_intra_cbpc[cbp&3][1], v2_intra_cbpc[cbp&3][0]);
} else {
@@ -634,7 +634,7 @@ void msmpeg4_encode_mb(MpegEncContext * s,
ff_h263_cbpy_tab[cbp>>2][1],
ff_h263_cbpy_tab[cbp>>2][0]);
}else{
- if (s->pict_type == FF_I_TYPE) {
+ if (s->pict_type == AV_PICTURE_TYPE_I) {
put_bits(&s->pb,
ff_msmp4_mb_i_table[coded_cbp][1], ff_msmp4_mb_i_table[coded_cbp][0]);
} else {
@@ -1094,7 +1094,7 @@ static int msmpeg4v12_decode_mb(MpegEncContext *s, DCTELEM block[6][64])
{
int cbp, code, i;
- if (s->pict_type == FF_P_TYPE) {
+ if (s->pict_type == AV_PICTURE_TYPE_P) {
if (s->use_skip_mb_code) {
if (get_bits1(&s->gb)) {
/* skip mb */
@@ -1161,7 +1161,7 @@ static int msmpeg4v12_decode_mb(MpegEncContext *s, DCTELEM block[6][64])
} else{
s->ac_pred = 0;
cbp|= get_vlc2(&s->gb, ff_h263_cbpy_vlc.table, CBPY_VLC_BITS, 1)<<2; //FIXME check errors
- if(s->pict_type==FF_P_TYPE) cbp^=0x3C;
+ if(s->pict_type==AV_PICTURE_TYPE_P) cbp^=0x3C;
}
}
@@ -1182,7 +1182,7 @@ static int msmpeg4v34_decode_mb(MpegEncContext *s, DCTELEM block[6][64])
uint8_t *coded_val;
uint32_t * const mb_type_ptr= &s->current_picture.mb_type[ s->mb_x + s->mb_y*s->mb_stride ];
- if (s->pict_type == FF_P_TYPE) {
+ if (s->pict_type == AV_PICTURE_TYPE_P) {
if (s->use_skip_mb_code) {
if (get_bits1(&s->gb)) {
/* skip mb */
@@ -1390,15 +1390,15 @@ int msmpeg4_decode_picture_header(MpegEncContext * s)
}
s->pict_type = get_bits(&s->gb, 2) + 1;
- if (s->pict_type != FF_I_TYPE &&
- s->pict_type != FF_P_TYPE){
+ if (s->pict_type != AV_PICTURE_TYPE_I &&
+ s->pict_type != AV_PICTURE_TYPE_P){
av_log(s->avctx, AV_LOG_ERROR, "invalid picture type\n");
return -1;
}
#if 0
{
static int had_i=0;
- if(s->pict_type == FF_I_TYPE) had_i=1;
+ if(s->pict_type == AV_PICTURE_TYPE_I) had_i=1;
if(!had_i) return -1;
}
#endif
@@ -1408,7 +1408,7 @@ int msmpeg4_decode_picture_header(MpegEncContext * s)
return -1;
}
- if (s->pict_type == FF_I_TYPE) {
+ if (s->pict_type == AV_PICTURE_TYPE_I) {
code = get_bits(&s->gb, 5);
if(s->msmpeg4_version==1){
if(code==0 || code>s->mb_height){
diff --git a/libavcodec/msrle.c b/libavcodec/msrle.c
index f1fa8f54ff..cd81200c37 100644
--- a/libavcodec/msrle.c
+++ b/libavcodec/msrle.c
@@ -55,6 +55,9 @@ static av_cold int msrle_decode_init(AVCodecContext *avctx)
s->avctx = avctx;
switch (avctx->bits_per_coded_sample) {
+ case 1:
+ avctx->pix_fmt = PIX_FMT_MONOWHITE;
+ break;
case 4:
case 8:
avctx->pix_fmt = PIX_FMT_PAL8;
@@ -67,6 +70,7 @@ static av_cold int msrle_decode_init(AVCodecContext *avctx)
return -1;
}
+ avcodec_get_frame_defaults(&s->frame);
s->frame.data[0] = NULL;
return 0;
diff --git a/libavcodec/msvideo1.c b/libavcodec/msvideo1.c
index d40304df3a..bd55cad3b9 100644
--- a/libavcodec/msvideo1.c
+++ b/libavcodec/msvideo1.c
@@ -72,6 +72,7 @@ static av_cold int msvideo1_decode_init(AVCodecContext *avctx)
avctx->pix_fmt = PIX_FMT_RGB555;
}
+ avcodec_get_frame_defaults(&s->frame);
s->frame.data[0] = NULL;
return 0;
diff --git a/libavcodec/msvideo1enc.c b/libavcodec/msvideo1enc.c
new file mode 100644
index 0000000000..0b6d474604
--- /dev/null
+++ b/libavcodec/msvideo1enc.c
@@ -0,0 +1,298 @@
+/*
+ * Microsoft Video-1 Encoder
+ * Copyright (c) 2009 Konstantin Shishkov
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file msvideo1enc.c
+ * Microsoft Video-1 encoder
+ */
+
+#include "avcodec.h"
+#include "bytestream.h"
+#include "libavutil/lfg.h"
+#include "elbg.h"
+#include "libavutil/imgutils.h"
+/**
+ * Encoder context
+ */
+typedef struct Msvideo1EncContext {
+ AVCodecContext *avctx;
+ AVFrame pic;
+ AVLFG rnd;
+ uint8_t *prev;
+
+ int block[16*3];
+ int block2[16*3];
+ int codebook[8*3];
+ int codebook2[8*3];
+ int output[16*3];
+ int output2[16*3];
+ int avg[3];
+ int bestpos;
+ int keyint;
+} Msvideo1EncContext;
+
+enum MSV1Mode{
+ MODE_SKIP = 0,
+ MODE_FILL,
+ MODE_2COL,
+ MODE_8COL,
+};
+
+#define SKIP_PREFIX 0x8400
+#define SKIPS_MAX 0x0FFF
+#define MKRGB555(in, off) ((in[off] << 10) | (in[off + 1] << 5) | (in[off + 2]))
+
+static const int remap[16] = { 0, 1, 4, 5, 2, 3, 6, 7, 8, 9, 12, 13, 10, 11, 14, 15 };
+
+static int encode_frame(AVCodecContext *avctx, uint8_t *buf, int buf_size, void *data)
+{
+ Msvideo1EncContext * const c = avctx->priv_data;
+ AVFrame *pict = data;
+ AVFrame * const p = &c->pic;
+ uint16_t *src;
+ uint8_t *prevptr;
+ uint8_t *dst = buf;
+ int keyframe = 1;
+ int no_skips = 1;
+ int i, j, k, x, y;
+ int skips = 0;
+
+ *p = *pict;
+ if(!c->prev)
+ c->prev = av_malloc(avctx->width * 3 * (avctx->height + 3));
+ prevptr = c->prev + avctx->width * 3 * (FFALIGN(avctx->height, 4) - 1);
+ src = (uint16_t*)(p->data[0] + p->linesize[0]*(FFALIGN(avctx->height, 4) - 1));
+ if(c->keyint >= avctx->keyint_min)
+ keyframe = 1;
+
+ p->quality = 24;
+
+ for(y = 0; y < avctx->height; y += 4){
+ for(x = 0; x < avctx->width; x += 4){
+ int bestmode = MODE_SKIP;
+ int bestscore = INT_MAX;
+ int flags = 0;
+ int score;
+
+ for(j = 0; j < 4; j++){
+ for(i = 0; i < 4; i++){
+ uint16_t val = src[x + i - j*p->linesize[0]/2];
+ for(k = 0; k < 3; k++){
+ c->block[(i + j*4)*3 + k] =
+ c->block2[remap[i + j*4]*3 + k] = (val >> (10-k*5)) & 0x1F;
+ }
+ }
+ }
+ if(!keyframe){
+ bestscore = 0;
+ for(j = 0; j < 4; j++){
+ for(i = 0; i < 4*3; i++){
+ int t = prevptr[x*3 + i + j*p->linesize[0]] - c->block[i + j*4*3];
+ bestscore += t*t;
+ }
+ }
+ bestscore /= p->quality;
+ }
+ // try to find optimal value to fill whole 4x4 block
+ score = 0;
+ ff_init_elbg(c->block, 3, 16, c->avg, 1, 1, c->output, &c->rnd);
+ ff_do_elbg (c->block, 3, 16, c->avg, 1, 1, c->output, &c->rnd);
+ if(c->avg[0] == 1) // red component = 1 will be written as skip code
+ c->avg[0] = 0;
+ for(j = 0; j < 4; j++){
+ for(i = 0; i < 4; i++){
+ for(k = 0; k < 3; k++){
+ int t = c->avg[k] - c->block[(i+j*4)*3+k];
+ score += t*t;
+ }
+ }
+ }
+ score /= p->quality;
+ score += 2;
+ if(score < bestscore){
+ bestscore = score;
+ bestmode = MODE_FILL;
+ }
+ // search for optimal filling of 2-color block
+ score = 0;
+ ff_init_elbg(c->block, 3, 16, c->codebook, 2, 1, c->output, &c->rnd);
+ ff_do_elbg (c->block, 3, 16, c->codebook, 2, 1, c->output, &c->rnd);
+ // last output value should be always 1, swap codebooks if needed
+ if(!c->output[15]){
+ for(i = 0; i < 3; i++)
+ FFSWAP(uint8_t, c->codebook[i], c->codebook[i+3]);
+ for(i = 0; i < 16; i++)
+ c->output[i] ^= 1;
+ }
+ for(j = 0; j < 4; j++){
+ for(i = 0; i < 4; i++){
+ for(k = 0; k < 3; k++){
+ int t = c->codebook[c->output[i+j*4]*3 + k] - c->block[i*3+k+j*4*3];
+ score += t*t;
+ }
+ }
+ }
+ score /= p->quality;
+ score += 6;
+ if(score < bestscore){
+ bestscore = score;
+ bestmode = MODE_2COL;
+ }
+ // search for optimal filling of 2-color 2x2 subblocks
+ score = 0;
+ for(i = 0; i < 4; i++){
+ ff_init_elbg(c->block2 + i*4*3, 3, 4, c->codebook2 + i*2*3, 2, 1, c->output2 + i*4, &c->rnd);
+ ff_do_elbg (c->block2 + i*4*3, 3, 4, c->codebook2 + i*2*3, 2, 1, c->output2 + i*4, &c->rnd);
+ }
+ // last value should be always 1, swap codebooks if needed
+ if(!c->output2[15]){
+ for(i = 0; i < 3; i++)
+ FFSWAP(uint8_t, c->codebook2[i+18], c->codebook2[i+21]);
+ for(i = 12; i < 16; i++)
+ c->output2[i] ^= 1;
+ }
+ for(j = 0; j < 4; j++){
+ for(i = 0; i < 4; i++){
+ for(k = 0; k < 3; k++){
+ int t = c->codebook2[(c->output2[remap[i+j*4]] + (i&2) + (j&2)*2)*3+k] - c->block[i*3+k + j*4*3];
+ score += t*t;
+ }
+ }
+ }
+ score /= p->quality;
+ score += 18;
+ if(score < bestscore){
+ bestscore = score;
+ bestmode = MODE_8COL;
+ }
+
+ if(bestmode == MODE_SKIP){
+ skips++;
+ no_skips = 0;
+ }
+ if((bestmode != MODE_SKIP && skips) || skips == SKIPS_MAX){
+ bytestream_put_le16(&dst, skips | SKIP_PREFIX);
+ skips = 0;
+ }
+
+ switch(bestmode){
+ case MODE_FILL:
+ bytestream_put_le16(&dst, MKRGB555(c->avg,0) | 0x8000);
+ for(j = 0; j < 4; j++)
+ for(i = 0; i < 4; i++)
+ for(k = 0; k < 3; k++)
+ prevptr[i*3 + k - j*3*avctx->width] = c->avg[k];
+ break;
+ case MODE_2COL:
+ for(j = 0; j < 4; j++){
+ for(i = 0; i < 4; i++){
+ flags |= (c->output[i + j*4]^1) << (i + j*4);
+ for(k = 0; k < 3; k++)
+ prevptr[i*3 + k - j*3*avctx->width] = c->codebook[c->output[i + j*4]*3 + k];
+ }
+ }
+ bytestream_put_le16(&dst, flags);
+ bytestream_put_le16(&dst, MKRGB555(c->codebook, 0));
+ bytestream_put_le16(&dst, MKRGB555(c->codebook, 3));
+ break;
+ case MODE_8COL:
+ for(j = 0; j < 4; j++){
+ for(i = 0; i < 4; i++){
+ flags |= (c->output2[remap[i + j*4]]^1) << (i + j*4);
+ for(k = 0; k < 3; k++)
+ prevptr[i*3 + k - j*3*avctx->width] = c->codebook2[(c->output2[remap[i+j*4]] + (i&2) + (j&2)*2)*3 + k];
+ }
+ }
+ bytestream_put_le16(&dst, flags);
+ bytestream_put_le16(&dst, MKRGB555(c->codebook2, 0) | 0x8000);
+ for(i = 3; i < 24; i += 3)
+ bytestream_put_le16(&dst, MKRGB555(c->codebook2, i));
+ break;
+ }
+ }
+ src -= p->linesize[0] << 1;
+ prevptr -= avctx->width * 3 * 4;
+ }
+ if(skips)
+ bytestream_put_le16(&dst, skips | SKIP_PREFIX);
+ //EOF
+ bytestream_put_byte(&dst, 0);
+ bytestream_put_byte(&dst, 0);
+
+ if(no_skips)
+ keyframe = 1;
+ if(keyframe)
+ c->keyint = 0;
+ else
+ c->keyint++;
+ p->pict_type= keyframe ? FF_I_TYPE : FF_P_TYPE;
+ p->key_frame= keyframe;
+
+ return dst - buf;
+}
+
+
+/**
+ * init encoder
+ */
+static av_cold int encode_init(AVCodecContext *avctx)
+{
+ Msvideo1EncContext * const c = avctx->priv_data;
+
+ c->avctx = avctx;
+ if (av_image_check_size(avctx->width, avctx->height, 0, avctx) < 0) {
+ return -1;
+ }
+
+ avcodec_get_frame_defaults(&c->pic);
+ avctx->coded_frame = (AVFrame*)&c->pic;
+
+ c->keyint = avctx->keyint_min;
+ av_lfg_init(&c->rnd, 1);
+
+ return 0;
+}
+
+
+
+/**
+ * Uninit encoder
+ */
+static av_cold int encode_end(AVCodecContext *avctx)
+{
+ Msvideo1EncContext * const c = avctx->priv_data;
+
+ av_freep(&c->prev);
+
+ return 0;
+}
+
+AVCodec ff_msvideo1_encoder = {
+ "msvideo1",
+ AVMEDIA_TYPE_VIDEO,
+ CODEC_ID_MSVIDEO1,
+ sizeof(Msvideo1EncContext),
+ encode_init,
+ encode_frame,
+ encode_end,
+ .pix_fmts = (const enum PixelFormat[]){PIX_FMT_RGB555, PIX_FMT_NONE},
+ .long_name = NULL_IF_CONFIG_SMALL("Microsoft Video-1"),
+};
diff --git a/libavcodec/mxpegdec.c b/libavcodec/mxpegdec.c
index 7885e0d451..78067c995f 100644
--- a/libavcodec/mxpegdec.c
+++ b/libavcodec/mxpegdec.c
@@ -256,11 +256,11 @@ static int mxpeg_decode_frame(AVCodecContext *avctx,
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return AVERROR(ENOMEM);
}
- jpg->picture_ptr->pict_type = FF_P_TYPE;
+ jpg->picture_ptr->pict_type = AV_PICTURE_TYPE_P;
jpg->picture_ptr->key_frame = 0;
jpg->got_picture = 1;
} else {
- jpg->picture_ptr->pict_type = FF_I_TYPE;
+ jpg->picture_ptr->pict_type = AV_PICTURE_TYPE_I;
jpg->picture_ptr->key_frame = 1;
}
diff --git a/libavcodec/nuv.c b/libavcodec/nuv.c
index 84ee6af9b3..6eb6de3101 100644
--- a/libavcodec/nuv.c
+++ b/libavcodec/nuv.c
@@ -208,7 +208,7 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *data_size,
return -1;
}
- c->pic.pict_type = keyframe ? FF_I_TYPE : FF_P_TYPE;
+ c->pic.pict_type = keyframe ? AV_PICTURE_TYPE_I : AV_PICTURE_TYPE_P;
c->pic.key_frame = keyframe;
// decompress/copy/whatever data
switch (comptype) {
diff --git a/libavcodec/options.c b/libavcodec/options.c
index 04c58a64df..8aff6379ac 100644
--- a/libavcodec/options.c
+++ b/libavcodec/options.c
@@ -49,413 +49,400 @@ static const char* context_to_name(void* ptr) {
#define AV_CODEC_DEFAULT_BITRATE 200*1000
static const AVOption options[]={
-{"b", "set bitrate (in bits/s)", OFFSET(bit_rate), FF_OPT_TYPE_INT, AV_CODEC_DEFAULT_BITRATE, INT_MIN, INT_MAX, V|E},
-{"ab", "set bitrate (in bits/s)", OFFSET(bit_rate), FF_OPT_TYPE_INT, 64*1000, INT_MIN, INT_MAX, A|E},
-{"bt", "set video bitrate tolerance (in bits/s)", OFFSET(bit_rate_tolerance), FF_OPT_TYPE_INT, AV_CODEC_DEFAULT_BITRATE*20, 1, INT_MAX, V|E},
-{"flags", NULL, OFFSET(flags), FF_OPT_TYPE_FLAGS, DEFAULT, 0, UINT_MAX, V|A|E|D, "flags"},
-{"mv4", "use four motion vector by macroblock (mpeg4)", 0, FF_OPT_TYPE_CONST, CODEC_FLAG_4MV, INT_MIN, INT_MAX, V|E, "flags"},
-{"obmc", "use overlapped block motion compensation (h263+)", 0, FF_OPT_TYPE_CONST, CODEC_FLAG_OBMC, INT_MIN, INT_MAX, V|E, "flags"},
-{"qpel", "use 1/4 pel motion compensation", 0, FF_OPT_TYPE_CONST, CODEC_FLAG_QPEL, INT_MIN, INT_MAX, V|E, "flags"},
-{"loop", "use loop filter", 0, FF_OPT_TYPE_CONST, CODEC_FLAG_LOOP_FILTER, INT_MIN, INT_MAX, V|E, "flags"},
-{"qscale", "use fixed qscale", 0, FF_OPT_TYPE_CONST, CODEC_FLAG_QSCALE, INT_MIN, INT_MAX, 0, "flags"},
-{"gmc", "use gmc", 0, FF_OPT_TYPE_CONST, CODEC_FLAG_GMC, INT_MIN, INT_MAX, V|E, "flags"},
-{"mv0", "always try a mb with mv=<0,0>", 0, FF_OPT_TYPE_CONST, CODEC_FLAG_MV0, INT_MIN, INT_MAX, V|E, "flags"},
-{"part", "use data partitioning", 0, FF_OPT_TYPE_CONST, CODEC_FLAG_PART, INT_MIN, INT_MAX, V|E, "flags"},
-{"input_preserved", NULL, 0, FF_OPT_TYPE_CONST, CODEC_FLAG_INPUT_PRESERVED, INT_MIN, INT_MAX, 0, "flags"},
-{"pass1", "use internal 2pass ratecontrol in first pass mode", 0, FF_OPT_TYPE_CONST, CODEC_FLAG_PASS1, INT_MIN, INT_MAX, 0, "flags"},
-{"pass2", "use internal 2pass ratecontrol in second pass mode", 0, FF_OPT_TYPE_CONST, CODEC_FLAG_PASS2, INT_MIN, INT_MAX, 0, "flags"},
-{"extern_huff", "use external huffman table (for mjpeg)", 0, FF_OPT_TYPE_CONST, CODEC_FLAG_EXTERN_HUFF, INT_MIN, INT_MAX, 0, "flags"},
-{"gray", "only decode/encode grayscale", 0, FF_OPT_TYPE_CONST, CODEC_FLAG_GRAY, INT_MIN, INT_MAX, V|E|D, "flags"},
-{"emu_edge", "don't draw edges", 0, FF_OPT_TYPE_CONST, CODEC_FLAG_EMU_EDGE, INT_MIN, INT_MAX, 0, "flags"},
-{"psnr", "error[?] variables will be set during encoding", 0, FF_OPT_TYPE_CONST, CODEC_FLAG_PSNR, INT_MIN, INT_MAX, V|E, "flags"},
-{"truncated", NULL, 0, FF_OPT_TYPE_CONST, CODEC_FLAG_TRUNCATED, INT_MIN, INT_MAX, 0, "flags"},
-{"naq", "normalize adaptive quantization", 0, FF_OPT_TYPE_CONST, CODEC_FLAG_NORMALIZE_AQP, INT_MIN, INT_MAX, V|E, "flags"},
-{"ildct", "use interlaced dct", 0, FF_OPT_TYPE_CONST, CODEC_FLAG_INTERLACED_DCT, INT_MIN, INT_MAX, V|E, "flags"},
-{"low_delay", "force low delay", 0, FF_OPT_TYPE_CONST, CODEC_FLAG_LOW_DELAY, INT_MIN, INT_MAX, V|D|E, "flags"},
-{"alt", "enable alternate scantable (mpeg2/mpeg4)", 0, FF_OPT_TYPE_CONST, CODEC_FLAG_ALT_SCAN, INT_MIN, INT_MAX, V|E, "flags"},
-{"global_header", "place global headers in extradata instead of every keyframe", 0, FF_OPT_TYPE_CONST, CODEC_FLAG_GLOBAL_HEADER, INT_MIN, INT_MAX, V|A|E, "flags"},
-{"bitexact", "use only bitexact stuff (except (i)dct)", 0, FF_OPT_TYPE_CONST, CODEC_FLAG_BITEXACT, INT_MIN, INT_MAX, A|V|S|D|E, "flags"},
-{"aic", "h263 advanced intra coding / mpeg4 ac prediction", 0, FF_OPT_TYPE_CONST, CODEC_FLAG_AC_PRED, INT_MIN, INT_MAX, V|E, "flags"},
-{"umv", "use unlimited motion vectors", 0, FF_OPT_TYPE_CONST, CODEC_FLAG_H263P_UMV, INT_MIN, INT_MAX, V|E, "flags"},
-{"cbp", "use rate distortion optimization for cbp", 0, FF_OPT_TYPE_CONST, CODEC_FLAG_CBP_RD, INT_MIN, INT_MAX, V|E, "flags"},
-{"qprd", "use rate distortion optimization for qp selection", 0, FF_OPT_TYPE_CONST, CODEC_FLAG_QP_RD, INT_MIN, INT_MAX, V|E, "flags"},
-{"aiv", "h263 alternative inter vlc", 0, FF_OPT_TYPE_CONST, CODEC_FLAG_H263P_AIV, INT_MIN, INT_MAX, V|E, "flags"},
-{"slice", NULL, 0, FF_OPT_TYPE_CONST, CODEC_FLAG_H263P_SLICE_STRUCT, INT_MIN, INT_MAX, V|E, "flags"},
-{"ilme", "interlaced motion estimation", 0, FF_OPT_TYPE_CONST, CODEC_FLAG_INTERLACED_ME, INT_MIN, INT_MAX, V|E, "flags"},
-{"scan_offset", "will reserve space for svcd scan offset user data", 0, FF_OPT_TYPE_CONST, CODEC_FLAG_SVCD_SCAN_OFFSET, INT_MIN, INT_MAX, V|E, "flags"},
-{"cgop", "closed gop", 0, FF_OPT_TYPE_CONST, CODEC_FLAG_CLOSED_GOP, INT_MIN, INT_MAX, V|E, "flags"},
-{"fast", "allow non spec compliant speedup tricks", 0, FF_OPT_TYPE_CONST, CODEC_FLAG2_FAST, INT_MIN, INT_MAX, V|E, "flags2"},
-{"sgop", "strictly enforce gop size", 0, FF_OPT_TYPE_CONST, CODEC_FLAG2_STRICT_GOP, INT_MIN, INT_MAX, V|E, "flags2"},
-{"noout", "skip bitstream encoding", 0, FF_OPT_TYPE_CONST, CODEC_FLAG2_NO_OUTPUT, INT_MIN, INT_MAX, V|E, "flags2"},
-{"local_header", "place global headers at every keyframe instead of in extradata", 0, FF_OPT_TYPE_CONST, CODEC_FLAG2_LOCAL_HEADER, INT_MIN, INT_MAX, V|E, "flags2"},
-{"sub_id", NULL, OFFSET(sub_id), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX},
-{"me_method", "set motion estimation method", OFFSET(me_method), FF_OPT_TYPE_INT, ME_EPZS, INT_MIN, INT_MAX, V|E, "me_method"},
-{"zero", "zero motion estimation (fastest)", 0, FF_OPT_TYPE_CONST, ME_ZERO, INT_MIN, INT_MAX, V|E, "me_method" },
-{"full", "full motion estimation (slowest)", 0, FF_OPT_TYPE_CONST, ME_FULL, INT_MIN, INT_MAX, V|E, "me_method" },
-{"epzs", "EPZS motion estimation (default)", 0, FF_OPT_TYPE_CONST, ME_EPZS, INT_MIN, INT_MAX, V|E, "me_method" },
-{"esa", "esa motion estimation (alias for full)", 0, FF_OPT_TYPE_CONST, ME_FULL, INT_MIN, INT_MAX, V|E, "me_method" },
-{"tesa", "tesa motion estimation", 0, FF_OPT_TYPE_CONST, ME_TESA, INT_MIN, INT_MAX, V|E, "me_method" },
-{"dia", "dia motion estimation (alias for epzs)", 0, FF_OPT_TYPE_CONST, ME_EPZS, INT_MIN, INT_MAX, V|E, "me_method" },
-{"log", "log motion estimation", 0, FF_OPT_TYPE_CONST, ME_LOG, INT_MIN, INT_MAX, V|E, "me_method" },
-{"phods", "phods motion estimation", 0, FF_OPT_TYPE_CONST, ME_PHODS, INT_MIN, INT_MAX, V|E, "me_method" },
-{"x1", "X1 motion estimation", 0, FF_OPT_TYPE_CONST, ME_X1, INT_MIN, INT_MAX, V|E, "me_method" },
-{"hex", "hex motion estimation", 0, FF_OPT_TYPE_CONST, ME_HEX, INT_MIN, INT_MAX, V|E, "me_method" },
-{"umh", "umh motion estimation", 0, FF_OPT_TYPE_CONST, ME_UMH, INT_MIN, INT_MAX, V|E, "me_method" },
-{"iter", "iter motion estimation", 0, FF_OPT_TYPE_CONST, ME_ITER, INT_MIN, INT_MAX, V|E, "me_method" },
-{"extradata_size", NULL, OFFSET(extradata_size), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX},
-{"time_base", NULL, OFFSET(time_base), FF_OPT_TYPE_RATIONAL, DEFAULT, INT_MIN, INT_MAX},
-{"g", "set the group of picture size", OFFSET(gop_size), FF_OPT_TYPE_INT, 12, INT_MIN, INT_MAX, V|E},
-#if FF_API_RATE_EMU
-{"rate_emu", "frame rate emulation", OFFSET(rate_emu), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX},
-#endif
-{"ar", "set audio sampling rate (in Hz)", OFFSET(sample_rate), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX},
-{"ac", "set number of audio channels", OFFSET(channels), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX},
-{"cutoff", "set cutoff bandwidth", OFFSET(cutoff), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX, A|E},
-{"frame_size", NULL, OFFSET(frame_size), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX, A|E},
-{"frame_number", NULL, OFFSET(frame_number), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX},
-#if LIBAVCODEC_VERSION_MAJOR < 53
-{"real_pict_num", NULL, OFFSET(real_pict_num), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX},
-#endif
-{"delay", NULL, OFFSET(delay), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX},
-{"qcomp", "video quantizer scale compression (VBR)", OFFSET(qcompress), FF_OPT_TYPE_FLOAT, 0.5, -FLT_MAX, FLT_MAX, V|E},
-{"qblur", "video quantizer scale blur (VBR)", OFFSET(qblur), FF_OPT_TYPE_FLOAT, 0.5, 0, FLT_MAX, V|E},
-{"qmin", "min video quantizer scale (VBR)", OFFSET(qmin), FF_OPT_TYPE_INT, 2, 0, 69, V|E},
-{"qmax", "max video quantizer scale (VBR)", OFFSET(qmax), FF_OPT_TYPE_INT, 31, 0, 69, V|E},
-{"qdiff", "max difference between the quantizer scale (VBR)", OFFSET(max_qdiff), FF_OPT_TYPE_INT, 3, INT_MIN, INT_MAX, V|E},
-{"bf", "use 'frames' B frames", OFFSET(max_b_frames), FF_OPT_TYPE_INT, DEFAULT, 0, FF_MAX_B_FRAMES, V|E},
-{"b_qfactor", "qp factor between p and b frames", OFFSET(b_quant_factor), FF_OPT_TYPE_FLOAT, 1.25, -FLT_MAX, FLT_MAX, V|E},
-{"rc_strategy", "ratecontrol method", OFFSET(rc_strategy), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX, V|E},
-{"b_strategy", "strategy to choose between I/P/B-frames", OFFSET(b_frame_strategy), FF_OPT_TYPE_INT, 0, INT_MIN, INT_MAX, V|E},
-#if FF_API_HURRY_UP
-{"hurry_up", "deprecated, use skip_idct/skip_frame instead", OFFSET(hurry_up), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX, V|D},
-#endif
-{"ps", "rtp payload size in bytes", OFFSET(rtp_payload_size), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX, V|E},
-{"mv_bits", NULL, OFFSET(mv_bits), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX},
-{"header_bits", NULL, OFFSET(header_bits), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX},
-{"i_tex_bits", NULL, OFFSET(i_tex_bits), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX},
-{"p_tex_bits", NULL, OFFSET(p_tex_bits), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX},
-{"i_count", NULL, OFFSET(i_count), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX},
-{"p_count", NULL, OFFSET(p_count), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX},
-{"skip_count", NULL, OFFSET(skip_count), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX},
-{"misc_bits", NULL, OFFSET(misc_bits), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX},
-{"frame_bits", NULL, OFFSET(frame_bits), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX},
-{"codec_tag", NULL, OFFSET(codec_tag), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX},
-{"bug", "workaround not auto detected encoder bugs", OFFSET(workaround_bugs), FF_OPT_TYPE_FLAGS, FF_BUG_AUTODETECT, INT_MIN, INT_MAX, V|D, "bug"},
-{"autodetect", NULL, 0, FF_OPT_TYPE_CONST, FF_BUG_AUTODETECT, INT_MIN, INT_MAX, V|D, "bug"},
-{"old_msmpeg4", "some old lavc generated msmpeg4v3 files (no autodetection)", 0, FF_OPT_TYPE_CONST, FF_BUG_OLD_MSMPEG4, INT_MIN, INT_MAX, V|D, "bug"},
-{"xvid_ilace", "Xvid interlacing bug (autodetected if fourcc==XVIX)", 0, FF_OPT_TYPE_CONST, FF_BUG_XVID_ILACE, INT_MIN, INT_MAX, V|D, "bug"},
-{"ump4", "(autodetected if fourcc==UMP4)", 0, FF_OPT_TYPE_CONST, FF_BUG_UMP4, INT_MIN, INT_MAX, V|D, "bug"},
-{"no_padding", "padding bug (autodetected)", 0, FF_OPT_TYPE_CONST, FF_BUG_NO_PADDING, INT_MIN, INT_MAX, V|D, "bug"},
-{"amv", NULL, 0, FF_OPT_TYPE_CONST, FF_BUG_AMV, INT_MIN, INT_MAX, V|D, "bug"},
-{"ac_vlc", "illegal vlc bug (autodetected per fourcc)", 0, FF_OPT_TYPE_CONST, FF_BUG_AC_VLC, INT_MIN, INT_MAX, V|D, "bug"},
-{"qpel_chroma", NULL, 0, FF_OPT_TYPE_CONST, FF_BUG_QPEL_CHROMA, INT_MIN, INT_MAX, V|D, "bug"},
-{"std_qpel", "old standard qpel (autodetected per fourcc/version)", 0, FF_OPT_TYPE_CONST, FF_BUG_STD_QPEL, INT_MIN, INT_MAX, V|D, "bug"},
-{"qpel_chroma2", NULL, 0, FF_OPT_TYPE_CONST, FF_BUG_QPEL_CHROMA2, INT_MIN, INT_MAX, V|D, "bug"},
-{"direct_blocksize", "direct-qpel-blocksize bug (autodetected per fourcc/version)", 0, FF_OPT_TYPE_CONST, FF_BUG_DIRECT_BLOCKSIZE, INT_MIN, INT_MAX, V|D, "bug"},
-{"edge", "edge padding bug (autodetected per fourcc/version)", 0, FF_OPT_TYPE_CONST, FF_BUG_EDGE, INT_MIN, INT_MAX, V|D, "bug"},
-{"hpel_chroma", NULL, 0, FF_OPT_TYPE_CONST, FF_BUG_HPEL_CHROMA, INT_MIN, INT_MAX, V|D, "bug"},
-{"dc_clip", NULL, 0, FF_OPT_TYPE_CONST, FF_BUG_DC_CLIP, INT_MIN, INT_MAX, V|D, "bug"},
-{"ms", "workaround various bugs in microsofts broken decoders", 0, FF_OPT_TYPE_CONST, FF_BUG_MS, INT_MIN, INT_MAX, V|D, "bug"},
-{"trunc", "trancated frames", 0, FF_OPT_TYPE_CONST,FF_BUG_TRUNCATED, INT_MIN, INT_MAX, V|D, "bug"},
-{"lelim", "single coefficient elimination threshold for luminance (negative values also consider dc coefficient)", OFFSET(luma_elim_threshold), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX, V|E},
-{"celim", "single coefficient elimination threshold for chrominance (negative values also consider dc coefficient)", OFFSET(chroma_elim_threshold), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX, V|E},
-{"strict", "how strictly to follow the standards", OFFSET(strict_std_compliance), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX, A|V|D|E, "strict"},
-{"very", "strictly conform to a older more strict version of the spec or reference software", 0, FF_OPT_TYPE_CONST, FF_COMPLIANCE_VERY_STRICT, INT_MIN, INT_MAX, V|D|E, "strict"},
-{"strict", "strictly conform to all the things in the spec no matter what consequences", 0, FF_OPT_TYPE_CONST, FF_COMPLIANCE_STRICT, INT_MIN, INT_MAX, V|D|E, "strict"},
-{"normal", NULL, 0, FF_OPT_TYPE_CONST, FF_COMPLIANCE_NORMAL, INT_MIN, INT_MAX, V|D|E, "strict"},
-#if FF_API_INOFFICIAL
-{"inofficial", "allow unofficial extensions (deprecated - use unofficial)", 0, FF_OPT_TYPE_CONST, FF_COMPLIANCE_UNOFFICIAL, INT_MIN, INT_MAX, V|D|E, "strict"},
-#endif
-{"unofficial", "allow unofficial extensions", 0, FF_OPT_TYPE_CONST, FF_COMPLIANCE_UNOFFICIAL, INT_MIN, INT_MAX, V|D|E, "strict"},
-{"experimental", "allow non standardized experimental things", 0, FF_OPT_TYPE_CONST, FF_COMPLIANCE_EXPERIMENTAL, INT_MIN, INT_MAX, V|D|E, "strict"},
-{"b_qoffset", "qp offset between P and B frames", OFFSET(b_quant_offset), FF_OPT_TYPE_FLOAT, 1.25, -FLT_MAX, FLT_MAX, V|E},
-{"er", "set error detection aggressivity", OFFSET(error_recognition), FF_OPT_TYPE_INT, FF_ER_CAREFUL, INT_MIN, INT_MAX, A|V|D, "er"},
-{"careful", NULL, 0, FF_OPT_TYPE_CONST, FF_ER_CAREFUL, INT_MIN, INT_MAX, V|D, "er"},
-{"compliant", NULL, 0, FF_OPT_TYPE_CONST, FF_ER_COMPLIANT, INT_MIN, INT_MAX, V|D, "er"},
-{"aggressive", NULL, 0, FF_OPT_TYPE_CONST, FF_ER_AGGRESSIVE, INT_MIN, INT_MAX, V|D, "er"},
-{"very_aggressive", NULL, 0, FF_OPT_TYPE_CONST, FF_ER_VERY_AGGRESSIVE, INT_MIN, INT_MAX, V|D, "er"},
-{"has_b_frames", NULL, OFFSET(has_b_frames), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX},
-{"block_align", NULL, OFFSET(block_align), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX},
-{"parse_only", NULL, OFFSET(parse_only), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX},
-{"mpeg_quant", "use MPEG quantizers instead of H.263", OFFSET(mpeg_quant), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX, V|E},
-{"stats_out", NULL, OFFSET(stats_out), FF_OPT_TYPE_STRING, DEFAULT, CHAR_MIN, CHAR_MAX},
-{"stats_in", NULL, OFFSET(stats_in), FF_OPT_TYPE_STRING, DEFAULT, CHAR_MIN, CHAR_MAX},
-{"qsquish", "how to keep quantizer between qmin and qmax (0 = clip, 1 = use differentiable function)", OFFSET(rc_qsquish), FF_OPT_TYPE_FLOAT, DEFAULT, 0, 99, V|E},
-{"rc_qmod_amp", "experimental quantizer modulation", OFFSET(rc_qmod_amp), FF_OPT_TYPE_FLOAT, DEFAULT, -FLT_MAX, FLT_MAX, V|E},
-{"rc_qmod_freq", "experimental quantizer modulation", OFFSET(rc_qmod_freq), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX, V|E},
-{"rc_override_count", NULL, OFFSET(rc_override_count), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX},
-{"rc_eq", "set rate control equation", OFFSET(rc_eq), FF_OPT_TYPE_STRING, DEFAULT, CHAR_MIN, CHAR_MAX, V|E},
-{"maxrate", "set max video bitrate tolerance (in bits/s)", OFFSET(rc_max_rate), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX, V|E},
-{"minrate", "set min video bitrate tolerance (in bits/s)", OFFSET(rc_min_rate), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX, V|E},
-{"bufsize", "set ratecontrol buffer size (in bits)", OFFSET(rc_buffer_size), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX, A|V|E},
-{"rc_buf_aggressivity", "currently useless", OFFSET(rc_buffer_aggressivity), FF_OPT_TYPE_FLOAT, 1.0, -FLT_MAX, FLT_MAX, V|E},
-{"i_qfactor", "qp factor between P and I frames", OFFSET(i_quant_factor), FF_OPT_TYPE_FLOAT, -0.8, -FLT_MAX, FLT_MAX, V|E},
-{"i_qoffset", "qp offset between P and I frames", OFFSET(i_quant_offset), FF_OPT_TYPE_FLOAT, 0.0, -FLT_MAX, FLT_MAX, V|E},
-{"rc_init_cplx", "initial complexity for 1-pass encoding", OFFSET(rc_initial_cplx), FF_OPT_TYPE_FLOAT, DEFAULT, -FLT_MAX, FLT_MAX, V|E},
-{"dct", "DCT algorithm", OFFSET(dct_algo), FF_OPT_TYPE_INT, DEFAULT, 0, INT_MAX, V|E, "dct"},
-{"auto", "autoselect a good one (default)", 0, FF_OPT_TYPE_CONST, FF_DCT_AUTO, INT_MIN, INT_MAX, V|E, "dct"},
-{"fastint", "fast integer", 0, FF_OPT_TYPE_CONST, FF_DCT_FASTINT, INT_MIN, INT_MAX, V|E, "dct"},
-{"int", "accurate integer", 0, FF_OPT_TYPE_CONST, FF_DCT_INT, INT_MIN, INT_MAX, V|E, "dct"},
-{"mmx", NULL, 0, FF_OPT_TYPE_CONST, FF_DCT_MMX, INT_MIN, INT_MAX, V|E, "dct"},
-{"mlib", NULL, 0, FF_OPT_TYPE_CONST, FF_DCT_MLIB, INT_MIN, INT_MAX, V|E, "dct"},
-{"altivec", NULL, 0, FF_OPT_TYPE_CONST, FF_DCT_ALTIVEC, INT_MIN, INT_MAX, V|E, "dct"},
-{"faan", "floating point AAN DCT", 0, FF_OPT_TYPE_CONST, FF_DCT_FAAN, INT_MIN, INT_MAX, V|E, "dct"},
-{"lumi_mask", "compresses bright areas stronger than medium ones", OFFSET(lumi_masking), FF_OPT_TYPE_FLOAT, 0, -FLT_MAX, FLT_MAX, V|E},
-{"tcplx_mask", "temporal complexity masking", OFFSET(temporal_cplx_masking), FF_OPT_TYPE_FLOAT, 0, -FLT_MAX, FLT_MAX, V|E},
-{"scplx_mask", "spatial complexity masking", OFFSET(spatial_cplx_masking), FF_OPT_TYPE_FLOAT, 0, -FLT_MAX, FLT_MAX, V|E},
-{"p_mask", "inter masking", OFFSET(p_masking), FF_OPT_TYPE_FLOAT, 0, -FLT_MAX, FLT_MAX, V|E},
-{"dark_mask", "compresses dark areas stronger than medium ones", OFFSET(dark_masking), FF_OPT_TYPE_FLOAT, 0, -FLT_MAX, FLT_MAX, V|E},
-{"idct", "select IDCT implementation", OFFSET(idct_algo), FF_OPT_TYPE_INT, DEFAULT, 0, INT_MAX, V|E|D, "idct"},
-{"auto", NULL, 0, FF_OPT_TYPE_CONST, FF_IDCT_AUTO, INT_MIN, INT_MAX, V|E|D, "idct"},
-{"int", NULL, 0, FF_OPT_TYPE_CONST, FF_IDCT_INT, INT_MIN, INT_MAX, V|E|D, "idct"},
-{"simple", NULL, 0, FF_OPT_TYPE_CONST, FF_IDCT_SIMPLE, INT_MIN, INT_MAX, V|E|D, "idct"},
-{"simplemmx", NULL, 0, FF_OPT_TYPE_CONST, FF_IDCT_SIMPLEMMX, INT_MIN, INT_MAX, V|E|D, "idct"},
-{"libmpeg2mmx", NULL, 0, FF_OPT_TYPE_CONST, FF_IDCT_LIBMPEG2MMX, INT_MIN, INT_MAX, V|E|D, "idct"},
-{"ps2", NULL, 0, FF_OPT_TYPE_CONST, FF_IDCT_PS2, INT_MIN, INT_MAX, V|E|D, "idct"},
-{"mlib", NULL, 0, FF_OPT_TYPE_CONST, FF_IDCT_MLIB, INT_MIN, INT_MAX, V|E|D, "idct"},
-{"arm", NULL, 0, FF_OPT_TYPE_CONST, FF_IDCT_ARM, INT_MIN, INT_MAX, V|E|D, "idct"},
-{"altivec", NULL, 0, FF_OPT_TYPE_CONST, FF_IDCT_ALTIVEC, INT_MIN, INT_MAX, V|E|D, "idct"},
-{"sh4", NULL, 0, FF_OPT_TYPE_CONST, FF_IDCT_SH4, INT_MIN, INT_MAX, V|E|D, "idct"},
-{"simplearm", NULL, 0, FF_OPT_TYPE_CONST, FF_IDCT_SIMPLEARM, INT_MIN, INT_MAX, V|E|D, "idct"},
-{"simplearmv5te", NULL, 0, FF_OPT_TYPE_CONST, FF_IDCT_SIMPLEARMV5TE, INT_MIN, INT_MAX, V|E|D, "idct"},
-{"simplearmv6", NULL, 0, FF_OPT_TYPE_CONST, FF_IDCT_SIMPLEARMV6, INT_MIN, INT_MAX, V|E|D, "idct"},
-{"simpleneon", NULL, 0, FF_OPT_TYPE_CONST, FF_IDCT_SIMPLENEON, INT_MIN, INT_MAX, V|E|D, "idct"},
-{"simplealpha", NULL, 0, FF_OPT_TYPE_CONST, FF_IDCT_SIMPLEALPHA, INT_MIN, INT_MAX, V|E|D, "idct"},
-{"h264", NULL, 0, FF_OPT_TYPE_CONST, FF_IDCT_H264, INT_MIN, INT_MAX, V|E|D, "idct"},
-{"vp3", NULL, 0, FF_OPT_TYPE_CONST, FF_IDCT_VP3, INT_MIN, INT_MAX, V|E|D, "idct"},
-{"ipp", NULL, 0, FF_OPT_TYPE_CONST, FF_IDCT_IPP, INT_MIN, INT_MAX, V|E|D, "idct"},
-{"xvidmmx", NULL, 0, FF_OPT_TYPE_CONST, FF_IDCT_XVIDMMX, INT_MIN, INT_MAX, V|E|D, "idct"},
-{"faani", "floating point AAN IDCT", 0, FF_OPT_TYPE_CONST, FF_IDCT_FAAN, INT_MIN, INT_MAX, V|D|E, "idct"},
-{"slice_count", NULL, OFFSET(slice_count), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX},
-{"ec", "set error concealment strategy", OFFSET(error_concealment), FF_OPT_TYPE_FLAGS, 3, INT_MIN, INT_MAX, V|D, "ec"},
-{"guess_mvs", "iterative motion vector (MV) search (slow)", 0, FF_OPT_TYPE_CONST, FF_EC_GUESS_MVS, INT_MIN, INT_MAX, V|D, "ec"},
-{"deblock", "use strong deblock filter for damaged MBs", 0, FF_OPT_TYPE_CONST, FF_EC_DEBLOCK, INT_MIN, INT_MAX, V|D, "ec"},
-{"bits_per_coded_sample", NULL, OFFSET(bits_per_coded_sample), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX},
-{"pred", "prediction method", OFFSET(prediction_method), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX, V|E, "pred"},
-{"left", NULL, 0, FF_OPT_TYPE_CONST, FF_PRED_LEFT, INT_MIN, INT_MAX, V|E, "pred"},
-{"plane", NULL, 0, FF_OPT_TYPE_CONST, FF_PRED_PLANE, INT_MIN, INT_MAX, V|E, "pred"},
-{"median", NULL, 0, FF_OPT_TYPE_CONST, FF_PRED_MEDIAN, INT_MIN, INT_MAX, V|E, "pred"},
-{"aspect", "sample aspect ratio", OFFSET(sample_aspect_ratio), FF_OPT_TYPE_RATIONAL, DEFAULT, 0, 10, V|E},
-{"debug", "print specific debug info", OFFSET(debug), FF_OPT_TYPE_FLAGS, DEFAULT, 0, INT_MAX, V|A|S|E|D, "debug"},
-{"pict", "picture info", 0, FF_OPT_TYPE_CONST, FF_DEBUG_PICT_INFO, INT_MIN, INT_MAX, V|D, "debug"},
-{"rc", "rate control", 0, FF_OPT_TYPE_CONST, FF_DEBUG_RC, INT_MIN, INT_MAX, V|E, "debug"},
-{"bitstream", NULL, 0, FF_OPT_TYPE_CONST, FF_DEBUG_BITSTREAM, INT_MIN, INT_MAX, V|D, "debug"},
-{"mb_type", "macroblock (MB) type", 0, FF_OPT_TYPE_CONST, FF_DEBUG_MB_TYPE, INT_MIN, INT_MAX, V|D, "debug"},
-{"qp", "per-block quantization parameter (QP)", 0, FF_OPT_TYPE_CONST, FF_DEBUG_QP, INT_MIN, INT_MAX, V|D, "debug"},
-{"mv", "motion vector", 0, FF_OPT_TYPE_CONST, FF_DEBUG_MV, INT_MIN, INT_MAX, V|D, "debug"},
-{"dct_coeff", NULL, 0, FF_OPT_TYPE_CONST, FF_DEBUG_DCT_COEFF, INT_MIN, INT_MAX, V|D, "debug"},
-{"skip", NULL, 0, FF_OPT_TYPE_CONST, FF_DEBUG_SKIP, INT_MIN, INT_MAX, V|D, "debug"},
-{"startcode", NULL, 0, FF_OPT_TYPE_CONST, FF_DEBUG_STARTCODE, INT_MIN, INT_MAX, V|D, "debug"},
-{"pts", NULL, 0, FF_OPT_TYPE_CONST, FF_DEBUG_PTS, INT_MIN, INT_MAX, V|D, "debug"},
-{"er", "error recognition", 0, FF_OPT_TYPE_CONST, FF_DEBUG_ER, INT_MIN, INT_MAX, V|D, "debug"},
-{"mmco", "memory management control operations (H.264)", 0, FF_OPT_TYPE_CONST, FF_DEBUG_MMCO, INT_MIN, INT_MAX, V|D, "debug"},
-{"bugs", NULL, 0, FF_OPT_TYPE_CONST, FF_DEBUG_BUGS, INT_MIN, INT_MAX, V|D, "debug"},
-{"vis_qp", "visualize quantization parameter (QP), lower QP are tinted greener", 0, FF_OPT_TYPE_CONST, FF_DEBUG_VIS_QP, INT_MIN, INT_MAX, V|D, "debug"},
-{"vis_mb_type", "visualize block types", 0, FF_OPT_TYPE_CONST, FF_DEBUG_VIS_MB_TYPE, INT_MIN, INT_MAX, V|D, "debug"},
-{"buffers", "picture buffer allocations", 0, FF_OPT_TYPE_CONST, FF_DEBUG_BUFFERS, INT_MIN, INT_MAX, V|D, "debug"},
-{"thread_ops", "threading operations", 0, FF_OPT_TYPE_CONST, FF_DEBUG_THREADS, INT_MIN, INT_MAX, V|D, "debug"},
-{"vismv", "visualize motion vectors (MVs)", OFFSET(debug_mv), FF_OPT_TYPE_INT, DEFAULT, 0, INT_MAX, V|D, "debug_mv"},
-{"pf", "forward predicted MVs of P-frames", 0, FF_OPT_TYPE_CONST, FF_DEBUG_VIS_MV_P_FOR, INT_MIN, INT_MAX, V|D, "debug_mv"},
-{"bf", "forward predicted MVs of B-frames", 0, FF_OPT_TYPE_CONST, FF_DEBUG_VIS_MV_B_FOR, INT_MIN, INT_MAX, V|D, "debug_mv"},
-{"bb", "backward predicted MVs of B-frames", 0, FF_OPT_TYPE_CONST, FF_DEBUG_VIS_MV_B_BACK, INT_MIN, INT_MAX, V|D, "debug_mv"},
-#if FF_API_MB_Q
-{"mb_qmin", "obsolete, use qmin", OFFSET(mb_qmin), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX, V|E},
-{"mb_qmax", "obsolete, use qmax", OFFSET(mb_qmax), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX, V|E},
-#endif
-{"cmp", "full pel me compare function", OFFSET(me_cmp), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX, V|E, "cmp_func"},
-{"subcmp", "sub pel me compare function", OFFSET(me_sub_cmp), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX, V|E, "cmp_func"},
-{"mbcmp", "macroblock compare function", OFFSET(mb_cmp), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX, V|E, "cmp_func"},
-{"ildctcmp", "interlaced dct compare function", OFFSET(ildct_cmp), FF_OPT_TYPE_INT, FF_CMP_VSAD, INT_MIN, INT_MAX, V|E, "cmp_func"},
-{"dia_size", "diamond type & size for motion estimation", OFFSET(dia_size), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX, V|E},
-{"last_pred", "amount of motion predictors from the previous frame", OFFSET(last_predictor_count), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX, V|E},
-{"preme", "pre motion estimation", OFFSET(pre_me), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX, V|E},
-{"precmp", "pre motion estimation compare function", OFFSET(me_pre_cmp), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX, V|E, "cmp_func"},
-{"sad", "sum of absolute differences, fast (default)", 0, FF_OPT_TYPE_CONST, FF_CMP_SAD, INT_MIN, INT_MAX, V|E, "cmp_func"},
-{"sse", "sum of squared errors", 0, FF_OPT_TYPE_CONST, FF_CMP_SSE, INT_MIN, INT_MAX, V|E, "cmp_func"},
-{"satd", "sum of absolute Hadamard transformed differences", 0, FF_OPT_TYPE_CONST, FF_CMP_SATD, INT_MIN, INT_MAX, V|E, "cmp_func"},
-{"dct", "sum of absolute DCT transformed differences", 0, FF_OPT_TYPE_CONST, FF_CMP_DCT, INT_MIN, INT_MAX, V|E, "cmp_func"},
-{"psnr", "sum of squared quantization errors (avoid, low quality)", 0, FF_OPT_TYPE_CONST, FF_CMP_PSNR, INT_MIN, INT_MAX, V|E, "cmp_func"},
-{"bit", "number of bits needed for the block", 0, FF_OPT_TYPE_CONST, FF_CMP_BIT, INT_MIN, INT_MAX, V|E, "cmp_func"},
-{"rd", "rate distortion optimal, slow", 0, FF_OPT_TYPE_CONST, FF_CMP_RD, INT_MIN, INT_MAX, V|E, "cmp_func"},
-{"zero", "0", 0, FF_OPT_TYPE_CONST, FF_CMP_ZERO, INT_MIN, INT_MAX, V|E, "cmp_func"},
-{"vsad", "sum of absolute vertical differences", 0, FF_OPT_TYPE_CONST, FF_CMP_VSAD, INT_MIN, INT_MAX, V|E, "cmp_func"},
-{"vsse", "sum of squared vertical differences", 0, FF_OPT_TYPE_CONST, FF_CMP_VSSE, INT_MIN, INT_MAX, V|E, "cmp_func"},
-{"nsse", "noise preserving sum of squared differences", 0, FF_OPT_TYPE_CONST, FF_CMP_NSSE, INT_MIN, INT_MAX, V|E, "cmp_func"},
+{"b", "set bitrate (in bits/s)", OFFSET(bit_rate), FF_OPT_TYPE_INT, {.dbl = AV_CODEC_DEFAULT_BITRATE }, INT_MIN, INT_MAX, V|E},
+{"ab", "set bitrate (in bits/s)", OFFSET(bit_rate), FF_OPT_TYPE_INT, {.dbl = 64*1000 }, INT_MIN, INT_MAX, A|E},
+{"bt", "set video bitrate tolerance (in bits/s)", OFFSET(bit_rate_tolerance), FF_OPT_TYPE_INT, {.dbl = AV_CODEC_DEFAULT_BITRATE*20 }, 1, INT_MAX, V|E},
+{"flags", NULL, OFFSET(flags), FF_OPT_TYPE_FLAGS, {.dbl = DEFAULT }, 0, UINT_MAX, V|A|E|D, "flags"},
+{"mv4", "use four motion vector by macroblock (mpeg4)", 0, FF_OPT_TYPE_CONST, {.dbl = CODEC_FLAG_4MV }, INT_MIN, INT_MAX, V|E, "flags"},
+{"obmc", "use overlapped block motion compensation (h263+)", 0, FF_OPT_TYPE_CONST, {.dbl = CODEC_FLAG_OBMC }, INT_MIN, INT_MAX, V|E, "flags"},
+{"qpel", "use 1/4 pel motion compensation", 0, FF_OPT_TYPE_CONST, {.dbl = CODEC_FLAG_QPEL }, INT_MIN, INT_MAX, V|E, "flags"},
+{"loop", "use loop filter", 0, FF_OPT_TYPE_CONST, {.dbl = CODEC_FLAG_LOOP_FILTER }, INT_MIN, INT_MAX, V|E, "flags"},
+{"qscale", "use fixed qscale", 0, FF_OPT_TYPE_CONST, {.dbl = CODEC_FLAG_QSCALE }, INT_MIN, INT_MAX, 0, "flags"},
+{"gmc", "use gmc", 0, FF_OPT_TYPE_CONST, {.dbl = CODEC_FLAG_GMC }, INT_MIN, INT_MAX, V|E, "flags"},
+{"mv0", "always try a mb with mv=<0,0>", 0, FF_OPT_TYPE_CONST, {.dbl = CODEC_FLAG_MV0 }, INT_MIN, INT_MAX, V|E, "flags"},
+{"part", "use data partitioning", 0, FF_OPT_TYPE_CONST, {.dbl = CODEC_FLAG_PART }, INT_MIN, INT_MAX, V|E, "flags"},
+{"input_preserved", NULL, 0, FF_OPT_TYPE_CONST, {.dbl = CODEC_FLAG_INPUT_PRESERVED }, INT_MIN, INT_MAX, 0, "flags"},
+{"pass1", "use internal 2pass ratecontrol in first pass mode", 0, FF_OPT_TYPE_CONST, {.dbl = CODEC_FLAG_PASS1 }, INT_MIN, INT_MAX, 0, "flags"},
+{"pass2", "use internal 2pass ratecontrol in second pass mode", 0, FF_OPT_TYPE_CONST, {.dbl = CODEC_FLAG_PASS2 }, INT_MIN, INT_MAX, 0, "flags"},
+{"extern_huff", "use external huffman table (for mjpeg)", 0, FF_OPT_TYPE_CONST, {.dbl = CODEC_FLAG_EXTERN_HUFF }, INT_MIN, INT_MAX, 0, "flags"},
+{"gray", "only decode/encode grayscale", 0, FF_OPT_TYPE_CONST, {.dbl = CODEC_FLAG_GRAY }, INT_MIN, INT_MAX, V|E|D, "flags"},
+{"emu_edge", "don't draw edges", 0, FF_OPT_TYPE_CONST, {.dbl = CODEC_FLAG_EMU_EDGE }, INT_MIN, INT_MAX, 0, "flags"},
+{"psnr", "error[?] variables will be set during encoding", 0, FF_OPT_TYPE_CONST, {.dbl = CODEC_FLAG_PSNR }, INT_MIN, INT_MAX, V|E, "flags"},
+{"truncated", NULL, 0, FF_OPT_TYPE_CONST, {.dbl = CODEC_FLAG_TRUNCATED }, INT_MIN, INT_MAX, 0, "flags"},
+{"naq", "normalize adaptive quantization", 0, FF_OPT_TYPE_CONST, {.dbl = CODEC_FLAG_NORMALIZE_AQP }, INT_MIN, INT_MAX, V|E, "flags"},
+{"ildct", "use interlaced dct", 0, FF_OPT_TYPE_CONST, {.dbl = CODEC_FLAG_INTERLACED_DCT }, INT_MIN, INT_MAX, V|E, "flags"},
+{"low_delay", "force low delay", 0, FF_OPT_TYPE_CONST, {.dbl = CODEC_FLAG_LOW_DELAY }, INT_MIN, INT_MAX, V|D|E, "flags"},
+{"alt", "enable alternate scantable (mpeg2/mpeg4)", 0, FF_OPT_TYPE_CONST, {.dbl = CODEC_FLAG_ALT_SCAN }, INT_MIN, INT_MAX, V|E, "flags"},
+{"global_header", "place global headers in extradata instead of every keyframe", 0, FF_OPT_TYPE_CONST, {.dbl = CODEC_FLAG_GLOBAL_HEADER }, INT_MIN, INT_MAX, V|A|E, "flags"},
+{"bitexact", "use only bitexact stuff (except (i)dct)", 0, FF_OPT_TYPE_CONST, {.dbl = CODEC_FLAG_BITEXACT }, INT_MIN, INT_MAX, A|V|S|D|E, "flags"},
+{"aic", "h263 advanced intra coding / mpeg4 ac prediction", 0, FF_OPT_TYPE_CONST, {.dbl = CODEC_FLAG_AC_PRED }, INT_MIN, INT_MAX, V|E, "flags"},
+{"umv", "use unlimited motion vectors", 0, FF_OPT_TYPE_CONST, {.dbl = CODEC_FLAG_H263P_UMV }, INT_MIN, INT_MAX, V|E, "flags"},
+{"cbp", "use rate distortion optimization for cbp", 0, FF_OPT_TYPE_CONST, {.dbl = CODEC_FLAG_CBP_RD }, INT_MIN, INT_MAX, V|E, "flags"},
+{"qprd", "use rate distortion optimization for qp selection", 0, FF_OPT_TYPE_CONST, {.dbl = CODEC_FLAG_QP_RD }, INT_MIN, INT_MAX, V|E, "flags"},
+{"aiv", "h263 alternative inter vlc", 0, FF_OPT_TYPE_CONST, {.dbl = CODEC_FLAG_H263P_AIV }, INT_MIN, INT_MAX, V|E, "flags"},
+{"slice", NULL, 0, FF_OPT_TYPE_CONST, {.dbl = CODEC_FLAG_H263P_SLICE_STRUCT }, INT_MIN, INT_MAX, V|E, "flags"},
+{"ilme", "interlaced motion estimation", 0, FF_OPT_TYPE_CONST, {.dbl = CODEC_FLAG_INTERLACED_ME }, INT_MIN, INT_MAX, V|E, "flags"},
+{"scan_offset", "will reserve space for svcd scan offset user data", 0, FF_OPT_TYPE_CONST, {.dbl = CODEC_FLAG_SVCD_SCAN_OFFSET }, INT_MIN, INT_MAX, V|E, "flags"},
+{"cgop", "closed gop", 0, FF_OPT_TYPE_CONST, {.dbl = CODEC_FLAG_CLOSED_GOP }, INT_MIN, INT_MAX, V|E, "flags"},
+{"fast", "allow non spec compliant speedup tricks", 0, FF_OPT_TYPE_CONST, {.dbl = CODEC_FLAG2_FAST }, INT_MIN, INT_MAX, V|E, "flags2"},
+{"sgop", "strictly enforce gop size", 0, FF_OPT_TYPE_CONST, {.dbl = CODEC_FLAG2_STRICT_GOP }, INT_MIN, INT_MAX, V|E, "flags2"},
+{"noout", "skip bitstream encoding", 0, FF_OPT_TYPE_CONST, {.dbl = CODEC_FLAG2_NO_OUTPUT }, INT_MIN, INT_MAX, V|E, "flags2"},
+{"local_header", "place global headers at every keyframe instead of in extradata", 0, FF_OPT_TYPE_CONST, {.dbl = CODEC_FLAG2_LOCAL_HEADER }, INT_MIN, INT_MAX, V|E, "flags2"},
+{"sub_id", NULL, OFFSET(sub_id), FF_OPT_TYPE_INT, {.dbl = DEFAULT }, INT_MIN, INT_MAX},
+{"me_method", "set motion estimation method", OFFSET(me_method), FF_OPT_TYPE_INT, {.dbl = ME_EPZS }, INT_MIN, INT_MAX, V|E, "me_method"},
+{"zero", "zero motion estimation (fastest)", 0, FF_OPT_TYPE_CONST, {.dbl = ME_ZERO }, INT_MIN, INT_MAX, V|E, "me_method" },
+{"full", "full motion estimation (slowest)", 0, FF_OPT_TYPE_CONST, {.dbl = ME_FULL }, INT_MIN, INT_MAX, V|E, "me_method" },
+{"epzs", "EPZS motion estimation (default)", 0, FF_OPT_TYPE_CONST, {.dbl = ME_EPZS }, INT_MIN, INT_MAX, V|E, "me_method" },
+{"esa", "esa motion estimation (alias for full)", 0, FF_OPT_TYPE_CONST, {.dbl = ME_FULL }, INT_MIN, INT_MAX, V|E, "me_method" },
+{"tesa", "tesa motion estimation", 0, FF_OPT_TYPE_CONST, {.dbl = ME_TESA }, INT_MIN, INT_MAX, V|E, "me_method" },
+{"dia", "dia motion estimation (alias for epzs)", 0, FF_OPT_TYPE_CONST, {.dbl = ME_EPZS }, INT_MIN, INT_MAX, V|E, "me_method" },
+{"log", "log motion estimation", 0, FF_OPT_TYPE_CONST, {.dbl = ME_LOG }, INT_MIN, INT_MAX, V|E, "me_method" },
+{"phods", "phods motion estimation", 0, FF_OPT_TYPE_CONST, {.dbl = ME_PHODS }, INT_MIN, INT_MAX, V|E, "me_method" },
+{"x1", "X1 motion estimation", 0, FF_OPT_TYPE_CONST, {.dbl = ME_X1 }, INT_MIN, INT_MAX, V|E, "me_method" },
+{"hex", "hex motion estimation", 0, FF_OPT_TYPE_CONST, {.dbl = ME_HEX }, INT_MIN, INT_MAX, V|E, "me_method" },
+{"umh", "umh motion estimation", 0, FF_OPT_TYPE_CONST, {.dbl = ME_UMH }, INT_MIN, INT_MAX, V|E, "me_method" },
+{"iter", "iter motion estimation", 0, FF_OPT_TYPE_CONST, {.dbl = ME_ITER }, INT_MIN, INT_MAX, V|E, "me_method" },
+{"extradata_size", NULL, OFFSET(extradata_size), FF_OPT_TYPE_INT, {.dbl = DEFAULT }, INT_MIN, INT_MAX},
+{"time_base", NULL, OFFSET(time_base), FF_OPT_TYPE_RATIONAL, {.dbl = 0}, INT_MIN, INT_MAX},
+{"g", "set the group of picture size", OFFSET(gop_size), FF_OPT_TYPE_INT, {.dbl = 12 }, INT_MIN, INT_MAX, V|E},
+{"ar", "set audio sampling rate (in Hz)", OFFSET(sample_rate), FF_OPT_TYPE_INT, {.dbl = DEFAULT }, INT_MIN, INT_MAX},
+{"ac", "set number of audio channels", OFFSET(channels), FF_OPT_TYPE_INT, {.dbl = DEFAULT }, INT_MIN, INT_MAX},
+{"cutoff", "set cutoff bandwidth", OFFSET(cutoff), FF_OPT_TYPE_INT, {.dbl = DEFAULT }, INT_MIN, INT_MAX, A|E},
+{"frame_size", NULL, OFFSET(frame_size), FF_OPT_TYPE_INT, {.dbl = DEFAULT }, INT_MIN, INT_MAX, A|E},
+{"frame_number", NULL, OFFSET(frame_number), FF_OPT_TYPE_INT, {.dbl = DEFAULT }, INT_MIN, INT_MAX},
+{"delay", NULL, OFFSET(delay), FF_OPT_TYPE_INT, {.dbl = DEFAULT }, INT_MIN, INT_MAX},
+{"qcomp", "video quantizer scale compression (VBR)", OFFSET(qcompress), FF_OPT_TYPE_FLOAT, {.dbl = 0.5 }, -FLT_MAX, FLT_MAX, V|E},
+{"qblur", "video quantizer scale blur (VBR)", OFFSET(qblur), FF_OPT_TYPE_FLOAT, {.dbl = 0.5 }, 0, FLT_MAX, V|E},
+{"qmin", "min video quantizer scale (VBR)", OFFSET(qmin), FF_OPT_TYPE_INT, {.dbl = 2 }, 0, 69, V|E},
+{"qmax", "max video quantizer scale (VBR)", OFFSET(qmax), FF_OPT_TYPE_INT, {.dbl = 31 }, 0, 69, V|E},
+{"qdiff", "max difference between the quantizer scale (VBR)", OFFSET(max_qdiff), FF_OPT_TYPE_INT, {.dbl = 3 }, INT_MIN, INT_MAX, V|E},
+{"bf", "use 'frames' B frames", OFFSET(max_b_frames), FF_OPT_TYPE_INT, {.dbl = DEFAULT }, 0, FF_MAX_B_FRAMES, V|E},
+{"b_qfactor", "qp factor between p and b frames", OFFSET(b_quant_factor), FF_OPT_TYPE_FLOAT, {.dbl = 1.25 }, -FLT_MAX, FLT_MAX, V|E},
+{"rc_strategy", "ratecontrol method", OFFSET(rc_strategy), FF_OPT_TYPE_INT, {.dbl = DEFAULT }, INT_MIN, INT_MAX, V|E},
+{"b_strategy", "strategy to choose between I/P/B-frames", OFFSET(b_frame_strategy), FF_OPT_TYPE_INT, {.dbl = 0 }, INT_MIN, INT_MAX, V|E},
+{"wpredp", "weighted prediction analysis method", OFFSET(weighted_p_pred), FF_OPT_TYPE_INT, {.dbl = 0 }, INT_MIN, INT_MAX, V|E},
+{"ps", "rtp payload size in bytes", OFFSET(rtp_payload_size), FF_OPT_TYPE_INT, {.dbl = DEFAULT }, INT_MIN, INT_MAX, V|E},
+{"mv_bits", NULL, OFFSET(mv_bits), FF_OPT_TYPE_INT, {.dbl = DEFAULT }, INT_MIN, INT_MAX},
+{"header_bits", NULL, OFFSET(header_bits), FF_OPT_TYPE_INT, {.dbl = DEFAULT }, INT_MIN, INT_MAX},
+{"i_tex_bits", NULL, OFFSET(i_tex_bits), FF_OPT_TYPE_INT, {.dbl = DEFAULT }, INT_MIN, INT_MAX},
+{"p_tex_bits", NULL, OFFSET(p_tex_bits), FF_OPT_TYPE_INT, {.dbl = DEFAULT }, INT_MIN, INT_MAX},
+{"i_count", NULL, OFFSET(i_count), FF_OPT_TYPE_INT, {.dbl = DEFAULT }, INT_MIN, INT_MAX},
+{"p_count", NULL, OFFSET(p_count), FF_OPT_TYPE_INT, {.dbl = DEFAULT }, INT_MIN, INT_MAX},
+{"skip_count", NULL, OFFSET(skip_count), FF_OPT_TYPE_INT, {.dbl = DEFAULT }, INT_MIN, INT_MAX},
+{"misc_bits", NULL, OFFSET(misc_bits), FF_OPT_TYPE_INT, {.dbl = DEFAULT }, INT_MIN, INT_MAX},
+{"frame_bits", NULL, OFFSET(frame_bits), FF_OPT_TYPE_INT, {.dbl = DEFAULT }, INT_MIN, INT_MAX},
+{"codec_tag", NULL, OFFSET(codec_tag), FF_OPT_TYPE_INT, {.dbl = DEFAULT }, INT_MIN, INT_MAX},
+{"bug", "workaround not auto detected encoder bugs", OFFSET(workaround_bugs), FF_OPT_TYPE_FLAGS, {.dbl = FF_BUG_AUTODETECT }, INT_MIN, INT_MAX, V|D, "bug"},
+{"autodetect", NULL, 0, FF_OPT_TYPE_CONST, {.dbl = FF_BUG_AUTODETECT }, INT_MIN, INT_MAX, V|D, "bug"},
+{"old_msmpeg4", "some old lavc generated msmpeg4v3 files (no autodetection)", 0, FF_OPT_TYPE_CONST, {.dbl = FF_BUG_OLD_MSMPEG4 }, INT_MIN, INT_MAX, V|D, "bug"},
+{"xvid_ilace", "Xvid interlacing bug (autodetected if fourcc==XVIX)", 0, FF_OPT_TYPE_CONST, {.dbl = FF_BUG_XVID_ILACE }, INT_MIN, INT_MAX, V|D, "bug"},
+{"ump4", "(autodetected if fourcc==UMP4)", 0, FF_OPT_TYPE_CONST, {.dbl = FF_BUG_UMP4 }, INT_MIN, INT_MAX, V|D, "bug"},
+{"no_padding", "padding bug (autodetected)", 0, FF_OPT_TYPE_CONST, {.dbl = FF_BUG_NO_PADDING }, INT_MIN, INT_MAX, V|D, "bug"},
+{"amv", NULL, 0, FF_OPT_TYPE_CONST, {.dbl = FF_BUG_AMV }, INT_MIN, INT_MAX, V|D, "bug"},
+{"ac_vlc", "illegal vlc bug (autodetected per fourcc)", 0, FF_OPT_TYPE_CONST, {.dbl = FF_BUG_AC_VLC }, INT_MIN, INT_MAX, V|D, "bug"},
+{"qpel_chroma", NULL, 0, FF_OPT_TYPE_CONST, {.dbl = FF_BUG_QPEL_CHROMA }, INT_MIN, INT_MAX, V|D, "bug"},
+{"std_qpel", "old standard qpel (autodetected per fourcc/version)", 0, FF_OPT_TYPE_CONST, {.dbl = FF_BUG_STD_QPEL }, INT_MIN, INT_MAX, V|D, "bug"},
+{"qpel_chroma2", NULL, 0, FF_OPT_TYPE_CONST, {.dbl = FF_BUG_QPEL_CHROMA2 }, INT_MIN, INT_MAX, V|D, "bug"},
+{"direct_blocksize", "direct-qpel-blocksize bug (autodetected per fourcc/version)", 0, FF_OPT_TYPE_CONST, {.dbl = FF_BUG_DIRECT_BLOCKSIZE }, INT_MIN, INT_MAX, V|D, "bug"},
+{"edge", "edge padding bug (autodetected per fourcc/version)", 0, FF_OPT_TYPE_CONST, {.dbl = FF_BUG_EDGE }, INT_MIN, INT_MAX, V|D, "bug"},
+{"hpel_chroma", NULL, 0, FF_OPT_TYPE_CONST, {.dbl = FF_BUG_HPEL_CHROMA }, INT_MIN, INT_MAX, V|D, "bug"},
+{"dc_clip", NULL, 0, FF_OPT_TYPE_CONST, {.dbl = FF_BUG_DC_CLIP }, INT_MIN, INT_MAX, V|D, "bug"},
+{"ms", "workaround various bugs in microsofts broken decoders", 0, FF_OPT_TYPE_CONST, {.dbl = FF_BUG_MS }, INT_MIN, INT_MAX, V|D, "bug"},
+{"trunc", "trancated frames", 0, FF_OPT_TYPE_CONST, {.dbl = FF_BUG_TRUNCATED}, INT_MIN, INT_MAX, V|D, "bug"},
+{"lelim", "single coefficient elimination threshold for luminance (negative values also consider dc coefficient)", OFFSET(luma_elim_threshold), FF_OPT_TYPE_INT, {.dbl = DEFAULT }, INT_MIN, INT_MAX, V|E},
+{"celim", "single coefficient elimination threshold for chrominance (negative values also consider dc coefficient)", OFFSET(chroma_elim_threshold), FF_OPT_TYPE_INT, {.dbl = DEFAULT }, INT_MIN, INT_MAX, V|E},
+{"strict", "how strictly to follow the standards", OFFSET(strict_std_compliance), FF_OPT_TYPE_INT, {.dbl = DEFAULT }, INT_MIN, INT_MAX, A|V|D|E, "strict"},
+{"very", "strictly conform to a older more strict version of the spec or reference software", 0, FF_OPT_TYPE_CONST, {.dbl = FF_COMPLIANCE_VERY_STRICT }, INT_MIN, INT_MAX, V|D|E, "strict"},
+{"strict", "strictly conform to all the things in the spec no matter what consequences", 0, FF_OPT_TYPE_CONST, {.dbl = FF_COMPLIANCE_STRICT }, INT_MIN, INT_MAX, V|D|E, "strict"},
+{"normal", NULL, 0, FF_OPT_TYPE_CONST, {.dbl = FF_COMPLIANCE_NORMAL }, INT_MIN, INT_MAX, V|D|E, "strict"},
+{"unofficial", "allow unofficial extensions", 0, FF_OPT_TYPE_CONST, {.dbl = FF_COMPLIANCE_UNOFFICIAL }, INT_MIN, INT_MAX, V|D|E, "strict"},
+{"experimental", "allow non standardized experimental things", 0, FF_OPT_TYPE_CONST, {.dbl = FF_COMPLIANCE_EXPERIMENTAL }, INT_MIN, INT_MAX, V|D|E, "strict"},
+{"b_qoffset", "qp offset between P and B frames", OFFSET(b_quant_offset), FF_OPT_TYPE_FLOAT, {.dbl = 1.25 }, -FLT_MAX, FLT_MAX, V|E},
+{"er", "set error detection aggressivity", OFFSET(error_recognition), FF_OPT_TYPE_INT, {.dbl = FF_ER_CAREFUL }, INT_MIN, INT_MAX, A|V|D, "er"},
+{"careful", NULL, 0, FF_OPT_TYPE_CONST, {.dbl = FF_ER_CAREFUL }, INT_MIN, INT_MAX, V|D, "er"},
+{"compliant", NULL, 0, FF_OPT_TYPE_CONST, {.dbl = FF_ER_COMPLIANT }, INT_MIN, INT_MAX, V|D, "er"},
+{"aggressive", NULL, 0, FF_OPT_TYPE_CONST, {.dbl = FF_ER_AGGRESSIVE }, INT_MIN, INT_MAX, V|D, "er"},
+{"very_aggressive", NULL, 0, FF_OPT_TYPE_CONST, {.dbl = FF_ER_VERY_AGGRESSIVE }, INT_MIN, INT_MAX, V|D, "er"},
+{"has_b_frames", NULL, OFFSET(has_b_frames), FF_OPT_TYPE_INT, {.dbl = DEFAULT }, INT_MIN, INT_MAX},
+{"block_align", NULL, OFFSET(block_align), FF_OPT_TYPE_INT, {.dbl = DEFAULT }, INT_MIN, INT_MAX},
+{"parse_only", NULL, OFFSET(parse_only), FF_OPT_TYPE_INT, {.dbl = DEFAULT }, INT_MIN, INT_MAX},
+{"mpeg_quant", "use MPEG quantizers instead of H.263", OFFSET(mpeg_quant), FF_OPT_TYPE_INT, {.dbl = DEFAULT }, INT_MIN, INT_MAX, V|E},
+{"stats_out", NULL, OFFSET(stats_out), FF_OPT_TYPE_STRING, {.str = NULL}, CHAR_MIN, CHAR_MAX},
+{"stats_in", NULL, OFFSET(stats_in), FF_OPT_TYPE_STRING, {.str = NULL}, CHAR_MIN, CHAR_MAX},
+{"qsquish", "how to keep quantizer between qmin and qmax (0 = clip, 1 = use differentiable function)", OFFSET(rc_qsquish), FF_OPT_TYPE_FLOAT, {.dbl = DEFAULT }, 0, 99, V|E},
+{"rc_qmod_amp", "experimental quantizer modulation", OFFSET(rc_qmod_amp), FF_OPT_TYPE_FLOAT, {.dbl = DEFAULT }, -FLT_MAX, FLT_MAX, V|E},
+{"rc_qmod_freq", "experimental quantizer modulation", OFFSET(rc_qmod_freq), FF_OPT_TYPE_INT, {.dbl = DEFAULT }, INT_MIN, INT_MAX, V|E},
+{"rc_override_count", NULL, OFFSET(rc_override_count), FF_OPT_TYPE_INT, {.dbl = DEFAULT }, INT_MIN, INT_MAX},
+{"rc_eq", "set rate control equation", OFFSET(rc_eq), FF_OPT_TYPE_STRING, {.str = NULL}, CHAR_MIN, CHAR_MAX, V|E},
+{"maxrate", "set max video bitrate tolerance (in bits/s)", OFFSET(rc_max_rate), FF_OPT_TYPE_INT, {.dbl = DEFAULT }, INT_MIN, INT_MAX, V|E},
+{"minrate", "set min video bitrate tolerance (in bits/s)", OFFSET(rc_min_rate), FF_OPT_TYPE_INT, {.dbl = DEFAULT }, INT_MIN, INT_MAX, V|E},
+{"bufsize", "set ratecontrol buffer size (in bits)", OFFSET(rc_buffer_size), FF_OPT_TYPE_INT, {.dbl = DEFAULT }, INT_MIN, INT_MAX, A|V|E},
+{"rc_buf_aggressivity", "currently useless", OFFSET(rc_buffer_aggressivity), FF_OPT_TYPE_FLOAT, {.dbl = 1.0 }, -FLT_MAX, FLT_MAX, V|E},
+{"i_qfactor", "qp factor between P and I frames", OFFSET(i_quant_factor), FF_OPT_TYPE_FLOAT, {.dbl = -0.8 }, -FLT_MAX, FLT_MAX, V|E},
+{"i_qoffset", "qp offset between P and I frames", OFFSET(i_quant_offset), FF_OPT_TYPE_FLOAT, {.dbl = 0.0 }, -FLT_MAX, FLT_MAX, V|E},
+{"rc_init_cplx", "initial complexity for 1-pass encoding", OFFSET(rc_initial_cplx), FF_OPT_TYPE_FLOAT, {.dbl = DEFAULT }, -FLT_MAX, FLT_MAX, V|E},
+{"dct", "DCT algorithm", OFFSET(dct_algo), FF_OPT_TYPE_INT, {.dbl = DEFAULT }, 0, INT_MAX, V|E, "dct"},
+{"auto", "autoselect a good one (default)", 0, FF_OPT_TYPE_CONST, {.dbl = FF_DCT_AUTO }, INT_MIN, INT_MAX, V|E, "dct"},
+{"fastint", "fast integer", 0, FF_OPT_TYPE_CONST, {.dbl = FF_DCT_FASTINT }, INT_MIN, INT_MAX, V|E, "dct"},
+{"int", "accurate integer", 0, FF_OPT_TYPE_CONST, {.dbl = FF_DCT_INT }, INT_MIN, INT_MAX, V|E, "dct"},
+{"mmx", NULL, 0, FF_OPT_TYPE_CONST, {.dbl = FF_DCT_MMX }, INT_MIN, INT_MAX, V|E, "dct"},
+{"mlib", NULL, 0, FF_OPT_TYPE_CONST, {.dbl = FF_DCT_MLIB }, INT_MIN, INT_MAX, V|E, "dct"},
+{"altivec", NULL, 0, FF_OPT_TYPE_CONST, {.dbl = FF_DCT_ALTIVEC }, INT_MIN, INT_MAX, V|E, "dct"},
+{"faan", "floating point AAN DCT", 0, FF_OPT_TYPE_CONST, {.dbl = FF_DCT_FAAN }, INT_MIN, INT_MAX, V|E, "dct"},
+{"lumi_mask", "compresses bright areas stronger than medium ones", OFFSET(lumi_masking), FF_OPT_TYPE_FLOAT, {.dbl = 0 }, -FLT_MAX, FLT_MAX, V|E},
+{"tcplx_mask", "temporal complexity masking", OFFSET(temporal_cplx_masking), FF_OPT_TYPE_FLOAT, {.dbl = 0 }, -FLT_MAX, FLT_MAX, V|E},
+{"scplx_mask", "spatial complexity masking", OFFSET(spatial_cplx_masking), FF_OPT_TYPE_FLOAT, {.dbl = 0 }, -FLT_MAX, FLT_MAX, V|E},
+{"p_mask", "inter masking", OFFSET(p_masking), FF_OPT_TYPE_FLOAT, {.dbl = 0 }, -FLT_MAX, FLT_MAX, V|E},
+{"dark_mask", "compresses dark areas stronger than medium ones", OFFSET(dark_masking), FF_OPT_TYPE_FLOAT, {.dbl = 0 }, -FLT_MAX, FLT_MAX, V|E},
+{"idct", "select IDCT implementation", OFFSET(idct_algo), FF_OPT_TYPE_INT, {.dbl = DEFAULT }, 0, INT_MAX, V|E|D, "idct"},
+{"auto", NULL, 0, FF_OPT_TYPE_CONST, {.dbl = FF_IDCT_AUTO }, INT_MIN, INT_MAX, V|E|D, "idct"},
+{"int", NULL, 0, FF_OPT_TYPE_CONST, {.dbl = FF_IDCT_INT }, INT_MIN, INT_MAX, V|E|D, "idct"},
+{"simple", NULL, 0, FF_OPT_TYPE_CONST, {.dbl = FF_IDCT_SIMPLE }, INT_MIN, INT_MAX, V|E|D, "idct"},
+{"simplemmx", NULL, 0, FF_OPT_TYPE_CONST, {.dbl = FF_IDCT_SIMPLEMMX }, INT_MIN, INT_MAX, V|E|D, "idct"},
+{"libmpeg2mmx", NULL, 0, FF_OPT_TYPE_CONST, {.dbl = FF_IDCT_LIBMPEG2MMX }, INT_MIN, INT_MAX, V|E|D, "idct"},
+{"ps2", NULL, 0, FF_OPT_TYPE_CONST, {.dbl = FF_IDCT_PS2 }, INT_MIN, INT_MAX, V|E|D, "idct"},
+{"mlib", NULL, 0, FF_OPT_TYPE_CONST, {.dbl = FF_IDCT_MLIB }, INT_MIN, INT_MAX, V|E|D, "idct"},
+{"arm", NULL, 0, FF_OPT_TYPE_CONST, {.dbl = FF_IDCT_ARM }, INT_MIN, INT_MAX, V|E|D, "idct"},
+{"altivec", NULL, 0, FF_OPT_TYPE_CONST, {.dbl = FF_IDCT_ALTIVEC }, INT_MIN, INT_MAX, V|E|D, "idct"},
+{"sh4", NULL, 0, FF_OPT_TYPE_CONST, {.dbl = FF_IDCT_SH4 }, INT_MIN, INT_MAX, V|E|D, "idct"},
+{"simplearm", NULL, 0, FF_OPT_TYPE_CONST, {.dbl = FF_IDCT_SIMPLEARM }, INT_MIN, INT_MAX, V|E|D, "idct"},
+{"simplearmv5te", NULL, 0, FF_OPT_TYPE_CONST, {.dbl = FF_IDCT_SIMPLEARMV5TE }, INT_MIN, INT_MAX, V|E|D, "idct"},
+{"simplearmv6", NULL, 0, FF_OPT_TYPE_CONST, {.dbl = FF_IDCT_SIMPLEARMV6 }, INT_MIN, INT_MAX, V|E|D, "idct"},
+{"simpleneon", NULL, 0, FF_OPT_TYPE_CONST, {.dbl = FF_IDCT_SIMPLENEON }, INT_MIN, INT_MAX, V|E|D, "idct"},
+{"simplealpha", NULL, 0, FF_OPT_TYPE_CONST, {.dbl = FF_IDCT_SIMPLEALPHA }, INT_MIN, INT_MAX, V|E|D, "idct"},
+{"h264", NULL, 0, FF_OPT_TYPE_CONST, {.dbl = FF_IDCT_H264 }, INT_MIN, INT_MAX, V|E|D, "idct"},
+{"vp3", NULL, 0, FF_OPT_TYPE_CONST, {.dbl = FF_IDCT_VP3 }, INT_MIN, INT_MAX, V|E|D, "idct"},
+{"ipp", NULL, 0, FF_OPT_TYPE_CONST, {.dbl = FF_IDCT_IPP }, INT_MIN, INT_MAX, V|E|D, "idct"},
+{"xvidmmx", NULL, 0, FF_OPT_TYPE_CONST, {.dbl = FF_IDCT_XVIDMMX }, INT_MIN, INT_MAX, V|E|D, "idct"},
+{"faani", "floating point AAN IDCT", 0, FF_OPT_TYPE_CONST, {.dbl = FF_IDCT_FAAN }, INT_MIN, INT_MAX, V|D|E, "idct"},
+{"slice_count", NULL, OFFSET(slice_count), FF_OPT_TYPE_INT, {.dbl = DEFAULT }, INT_MIN, INT_MAX},
+{"ec", "set error concealment strategy", OFFSET(error_concealment), FF_OPT_TYPE_FLAGS, {.dbl = 3 }, INT_MIN, INT_MAX, V|D, "ec"},
+{"guess_mvs", "iterative motion vector (MV) search (slow)", 0, FF_OPT_TYPE_CONST, {.dbl = FF_EC_GUESS_MVS }, INT_MIN, INT_MAX, V|D, "ec"},
+{"deblock", "use strong deblock filter for damaged MBs", 0, FF_OPT_TYPE_CONST, {.dbl = FF_EC_DEBLOCK }, INT_MIN, INT_MAX, V|D, "ec"},
+{"bits_per_coded_sample", NULL, OFFSET(bits_per_coded_sample), FF_OPT_TYPE_INT, {.dbl = DEFAULT }, INT_MIN, INT_MAX},
+{"pred", "prediction method", OFFSET(prediction_method), FF_OPT_TYPE_INT, {.dbl = DEFAULT }, INT_MIN, INT_MAX, V|E, "pred"},
+{"left", NULL, 0, FF_OPT_TYPE_CONST, {.dbl = FF_PRED_LEFT }, INT_MIN, INT_MAX, V|E, "pred"},
+{"plane", NULL, 0, FF_OPT_TYPE_CONST, {.dbl = FF_PRED_PLANE }, INT_MIN, INT_MAX, V|E, "pred"},
+{"median", NULL, 0, FF_OPT_TYPE_CONST, {.dbl = FF_PRED_MEDIAN }, INT_MIN, INT_MAX, V|E, "pred"},
+{"aspect", "sample aspect ratio", OFFSET(sample_aspect_ratio), FF_OPT_TYPE_RATIONAL, {.dbl = 0}, 0, 10, V|E},
+{"debug", "print specific debug info", OFFSET(debug), FF_OPT_TYPE_FLAGS, {.dbl = DEFAULT }, 0, INT_MAX, V|A|S|E|D, "debug"},
+{"pict", "picture info", 0, FF_OPT_TYPE_CONST, {.dbl = FF_DEBUG_PICT_INFO }, INT_MIN, INT_MAX, V|D, "debug"},
+{"rc", "rate control", 0, FF_OPT_TYPE_CONST, {.dbl = FF_DEBUG_RC }, INT_MIN, INT_MAX, V|E, "debug"},
+{"bitstream", NULL, 0, FF_OPT_TYPE_CONST, {.dbl = FF_DEBUG_BITSTREAM }, INT_MIN, INT_MAX, V|D, "debug"},
+{"mb_type", "macroblock (MB) type", 0, FF_OPT_TYPE_CONST, {.dbl = FF_DEBUG_MB_TYPE }, INT_MIN, INT_MAX, V|D, "debug"},
+{"qp", "per-block quantization parameter (QP)", 0, FF_OPT_TYPE_CONST, {.dbl = FF_DEBUG_QP }, INT_MIN, INT_MAX, V|D, "debug"},
+{"mv", "motion vector", 0, FF_OPT_TYPE_CONST, {.dbl = FF_DEBUG_MV }, INT_MIN, INT_MAX, V|D, "debug"},
+{"dct_coeff", NULL, 0, FF_OPT_TYPE_CONST, {.dbl = FF_DEBUG_DCT_COEFF }, INT_MIN, INT_MAX, V|D, "debug"},
+{"skip", NULL, 0, FF_OPT_TYPE_CONST, {.dbl = FF_DEBUG_SKIP }, INT_MIN, INT_MAX, V|D, "debug"},
+{"startcode", NULL, 0, FF_OPT_TYPE_CONST, {.dbl = FF_DEBUG_STARTCODE }, INT_MIN, INT_MAX, V|D, "debug"},
+{"pts", NULL, 0, FF_OPT_TYPE_CONST, {.dbl = FF_DEBUG_PTS }, INT_MIN, INT_MAX, V|D, "debug"},
+{"er", "error recognition", 0, FF_OPT_TYPE_CONST, {.dbl = FF_DEBUG_ER }, INT_MIN, INT_MAX, V|D, "debug"},
+{"mmco", "memory management control operations (H.264)", 0, FF_OPT_TYPE_CONST, {.dbl = FF_DEBUG_MMCO }, INT_MIN, INT_MAX, V|D, "debug"},
+{"bugs", NULL, 0, FF_OPT_TYPE_CONST, {.dbl = FF_DEBUG_BUGS }, INT_MIN, INT_MAX, V|D, "debug"},
+{"vis_qp", "visualize quantization parameter (QP), lower QP are tinted greener", 0, FF_OPT_TYPE_CONST, {.dbl = FF_DEBUG_VIS_QP }, INT_MIN, INT_MAX, V|D, "debug"},
+{"vis_mb_type", "visualize block types", 0, FF_OPT_TYPE_CONST, {.dbl = FF_DEBUG_VIS_MB_TYPE }, INT_MIN, INT_MAX, V|D, "debug"},
+{"buffers", "picture buffer allocations", 0, FF_OPT_TYPE_CONST, {.dbl = FF_DEBUG_BUFFERS }, INT_MIN, INT_MAX, V|D, "debug"},
+{"thread_ops", "threading operations", 0, FF_OPT_TYPE_CONST, {.dbl = FF_DEBUG_THREADS }, INT_MIN, INT_MAX, V|D, "debug"},
+{"vismv", "visualize motion vectors (MVs)", OFFSET(debug_mv), FF_OPT_TYPE_INT, {.dbl = DEFAULT }, 0, INT_MAX, V|D, "debug_mv"},
+{"pf", "forward predicted MVs of P-frames", 0, FF_OPT_TYPE_CONST, {.dbl = FF_DEBUG_VIS_MV_P_FOR }, INT_MIN, INT_MAX, V|D, "debug_mv"},
+{"bf", "forward predicted MVs of B-frames", 0, FF_OPT_TYPE_CONST, {.dbl = FF_DEBUG_VIS_MV_B_FOR }, INT_MIN, INT_MAX, V|D, "debug_mv"},
+{"bb", "backward predicted MVs of B-frames", 0, FF_OPT_TYPE_CONST, {.dbl = FF_DEBUG_VIS_MV_B_BACK }, INT_MIN, INT_MAX, V|D, "debug_mv"},
+{"cmp", "full pel me compare function", OFFSET(me_cmp), FF_OPT_TYPE_INT, {.dbl = DEFAULT }, INT_MIN, INT_MAX, V|E, "cmp_func"},
+{"subcmp", "sub pel me compare function", OFFSET(me_sub_cmp), FF_OPT_TYPE_INT, {.dbl = DEFAULT }, INT_MIN, INT_MAX, V|E, "cmp_func"},
+{"mbcmp", "macroblock compare function", OFFSET(mb_cmp), FF_OPT_TYPE_INT, {.dbl = DEFAULT }, INT_MIN, INT_MAX, V|E, "cmp_func"},
+{"ildctcmp", "interlaced dct compare function", OFFSET(ildct_cmp), FF_OPT_TYPE_INT, {.dbl = FF_CMP_VSAD }, INT_MIN, INT_MAX, V|E, "cmp_func"},
+{"dia_size", "diamond type & size for motion estimation", OFFSET(dia_size), FF_OPT_TYPE_INT, {.dbl = DEFAULT }, INT_MIN, INT_MAX, V|E},
+{"last_pred", "amount of motion predictors from the previous frame", OFFSET(last_predictor_count), FF_OPT_TYPE_INT, {.dbl = DEFAULT }, INT_MIN, INT_MAX, V|E},
+{"preme", "pre motion estimation", OFFSET(pre_me), FF_OPT_TYPE_INT, {.dbl = DEFAULT }, INT_MIN, INT_MAX, V|E},
+{"precmp", "pre motion estimation compare function", OFFSET(me_pre_cmp), FF_OPT_TYPE_INT, {.dbl = DEFAULT }, INT_MIN, INT_MAX, V|E, "cmp_func"},
+{"sad", "sum of absolute differences, fast (default)", 0, FF_OPT_TYPE_CONST, {.dbl = FF_CMP_SAD }, INT_MIN, INT_MAX, V|E, "cmp_func"},
+{"sse", "sum of squared errors", 0, FF_OPT_TYPE_CONST, {.dbl = FF_CMP_SSE }, INT_MIN, INT_MAX, V|E, "cmp_func"},
+{"satd", "sum of absolute Hadamard transformed differences", 0, FF_OPT_TYPE_CONST, {.dbl = FF_CMP_SATD }, INT_MIN, INT_MAX, V|E, "cmp_func"},
+{"dct", "sum of absolute DCT transformed differences", 0, FF_OPT_TYPE_CONST, {.dbl = FF_CMP_DCT }, INT_MIN, INT_MAX, V|E, "cmp_func"},
+{"psnr", "sum of squared quantization errors (avoid, low quality)", 0, FF_OPT_TYPE_CONST, {.dbl = FF_CMP_PSNR }, INT_MIN, INT_MAX, V|E, "cmp_func"},
+{"bit", "number of bits needed for the block", 0, FF_OPT_TYPE_CONST, {.dbl = FF_CMP_BIT }, INT_MIN, INT_MAX, V|E, "cmp_func"},
+{"rd", "rate distortion optimal, slow", 0, FF_OPT_TYPE_CONST, {.dbl = FF_CMP_RD }, INT_MIN, INT_MAX, V|E, "cmp_func"},
+{"zero", "0", 0, FF_OPT_TYPE_CONST, {.dbl = FF_CMP_ZERO }, INT_MIN, INT_MAX, V|E, "cmp_func"},
+{"vsad", "sum of absolute vertical differences", 0, FF_OPT_TYPE_CONST, {.dbl = FF_CMP_VSAD }, INT_MIN, INT_MAX, V|E, "cmp_func"},
+{"vsse", "sum of squared vertical differences", 0, FF_OPT_TYPE_CONST, {.dbl = FF_CMP_VSSE }, INT_MIN, INT_MAX, V|E, "cmp_func"},
+{"nsse", "noise preserving sum of squared differences", 0, FF_OPT_TYPE_CONST, {.dbl = FF_CMP_NSSE }, INT_MIN, INT_MAX, V|E, "cmp_func"},
#if CONFIG_SNOW_ENCODER
-{"w53", "5/3 wavelet, only used in snow", 0, FF_OPT_TYPE_CONST, FF_CMP_W53, INT_MIN, INT_MAX, V|E, "cmp_func"},
-{"w97", "9/7 wavelet, only used in snow", 0, FF_OPT_TYPE_CONST, FF_CMP_W97, INT_MIN, INT_MAX, V|E, "cmp_func"},
+{"w53", "5/3 wavelet, only used in snow", 0, FF_OPT_TYPE_CONST, {.dbl = FF_CMP_W53 }, INT_MIN, INT_MAX, V|E, "cmp_func"},
+{"w97", "9/7 wavelet, only used in snow", 0, FF_OPT_TYPE_CONST, {.dbl = FF_CMP_W97 }, INT_MIN, INT_MAX, V|E, "cmp_func"},
#endif
-{"dctmax", NULL, 0, FF_OPT_TYPE_CONST, FF_CMP_DCTMAX, INT_MIN, INT_MAX, V|E, "cmp_func"},
-{"chroma", NULL, 0, FF_OPT_TYPE_CONST, FF_CMP_CHROMA, INT_MIN, INT_MAX, V|E, "cmp_func"},
-{"pre_dia_size", "diamond type & size for motion estimation pre-pass", OFFSET(pre_dia_size), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX, V|E},
-{"subq", "sub pel motion estimation quality", OFFSET(me_subpel_quality), FF_OPT_TYPE_INT, 8, INT_MIN, INT_MAX, V|E},
-{"dtg_active_format", NULL, OFFSET(dtg_active_format), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX},
-{"me_range", "limit motion vectors range (1023 for DivX player)", OFFSET(me_range), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX, V|E},
-{"ibias", "intra quant bias", OFFSET(intra_quant_bias), FF_OPT_TYPE_INT, FF_DEFAULT_QUANT_BIAS, INT_MIN, INT_MAX, V|E},
-{"pbias", "inter quant bias", OFFSET(inter_quant_bias), FF_OPT_TYPE_INT, FF_DEFAULT_QUANT_BIAS, INT_MIN, INT_MAX, V|E},
-{"color_table_id", NULL, OFFSET(color_table_id), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX},
-{"internal_buffer_count", NULL, OFFSET(internal_buffer_count), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX},
-{"global_quality", NULL, OFFSET(global_quality), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX},
-{"coder", NULL, OFFSET(coder_type), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX, V|E, "coder"},
-{"vlc", "variable length coder / huffman coder", 0, FF_OPT_TYPE_CONST, FF_CODER_TYPE_VLC, INT_MIN, INT_MAX, V|E, "coder"},
-{"ac", "arithmetic coder", 0, FF_OPT_TYPE_CONST, FF_CODER_TYPE_AC, INT_MIN, INT_MAX, V|E, "coder"},
-{"raw", "raw (no encoding)", 0, FF_OPT_TYPE_CONST, FF_CODER_TYPE_RAW, INT_MIN, INT_MAX, V|E, "coder"},
-{"rle", "run-length coder", 0, FF_OPT_TYPE_CONST, FF_CODER_TYPE_RLE, INT_MIN, INT_MAX, V|E, "coder"},
-{"deflate", "deflate-based coder", 0, FF_OPT_TYPE_CONST, FF_CODER_TYPE_DEFLATE, INT_MIN, INT_MAX, V|E, "coder"},
-{"context", "context model", OFFSET(context_model), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX, V|E},
-{"slice_flags", NULL, OFFSET(slice_flags), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX},
-{"xvmc_acceleration", NULL, OFFSET(xvmc_acceleration), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX},
-{"mbd", "macroblock decision algorithm (high quality mode)", OFFSET(mb_decision), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX, V|E, "mbd"},
-{"simple", "use mbcmp (default)", 0, FF_OPT_TYPE_CONST, FF_MB_DECISION_SIMPLE, INT_MIN, INT_MAX, V|E, "mbd"},
-{"bits", "use fewest bits", 0, FF_OPT_TYPE_CONST, FF_MB_DECISION_BITS, INT_MIN, INT_MAX, V|E, "mbd"},
-{"rd", "use best rate distortion", 0, FF_OPT_TYPE_CONST, FF_MB_DECISION_RD, INT_MIN, INT_MAX, V|E, "mbd"},
-{"stream_codec_tag", NULL, OFFSET(stream_codec_tag), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX},
-{"sc_threshold", "scene change threshold", OFFSET(scenechange_threshold), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX, V|E},
-{"lmin", "min lagrange factor (VBR)", OFFSET(lmin), FF_OPT_TYPE_INT, 2*FF_QP2LAMBDA, 0, INT_MAX, V|E},
-{"lmax", "max lagrange factor (VBR)", OFFSET(lmax), FF_OPT_TYPE_INT, 31*FF_QP2LAMBDA, 0, INT_MAX, V|E},
-{"nr", "noise reduction", OFFSET(noise_reduction), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX, V|E},
-{"rc_init_occupancy", "number of bits which should be loaded into the rc buffer before decoding starts", OFFSET(rc_initial_buffer_occupancy), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX, V|E},
-{"inter_threshold", NULL, OFFSET(inter_threshold), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX, V|E},
-{"flags2", NULL, OFFSET(flags2), FF_OPT_TYPE_FLAGS, CODEC_FLAG2_FASTPSKIP|CODEC_FLAG2_BIT_RESERVOIR|CODEC_FLAG2_PSY|CODEC_FLAG2_MBTREE, 0, UINT_MAX, V|A|E|D, "flags2"},
-{"error", NULL, OFFSET(error_rate), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX, V|E},
+{"dctmax", NULL, 0, FF_OPT_TYPE_CONST, {.dbl = FF_CMP_DCTMAX }, INT_MIN, INT_MAX, V|E, "cmp_func"},
+{"chroma", NULL, 0, FF_OPT_TYPE_CONST, {.dbl = FF_CMP_CHROMA }, INT_MIN, INT_MAX, V|E, "cmp_func"},
+{"pre_dia_size", "diamond type & size for motion estimation pre-pass", OFFSET(pre_dia_size), FF_OPT_TYPE_INT, {.dbl = DEFAULT }, INT_MIN, INT_MAX, V|E},
+{"subq", "sub pel motion estimation quality", OFFSET(me_subpel_quality), FF_OPT_TYPE_INT, {.dbl = 8 }, INT_MIN, INT_MAX, V|E},
+{"dtg_active_format", NULL, OFFSET(dtg_active_format), FF_OPT_TYPE_INT, {.dbl = DEFAULT }, INT_MIN, INT_MAX},
+{"me_range", "limit motion vectors range (1023 for DivX player)", OFFSET(me_range), FF_OPT_TYPE_INT, {.dbl = DEFAULT }, INT_MIN, INT_MAX, V|E},
+{"ibias", "intra quant bias", OFFSET(intra_quant_bias), FF_OPT_TYPE_INT, {.dbl = FF_DEFAULT_QUANT_BIAS }, INT_MIN, INT_MAX, V|E},
+{"pbias", "inter quant bias", OFFSET(inter_quant_bias), FF_OPT_TYPE_INT, {.dbl = FF_DEFAULT_QUANT_BIAS }, INT_MIN, INT_MAX, V|E},
+{"color_table_id", NULL, OFFSET(color_table_id), FF_OPT_TYPE_INT, {.dbl = DEFAULT }, INT_MIN, INT_MAX},
+{"internal_buffer_count", NULL, OFFSET(internal_buffer_count), FF_OPT_TYPE_INT, {.dbl = DEFAULT }, INT_MIN, INT_MAX},
+{"global_quality", NULL, OFFSET(global_quality), FF_OPT_TYPE_INT, {.dbl = DEFAULT }, INT_MIN, INT_MAX},
+{"coder", NULL, OFFSET(coder_type), FF_OPT_TYPE_INT, {.dbl = DEFAULT }, INT_MIN, INT_MAX, V|E, "coder"},
+{"vlc", "variable length coder / huffman coder", 0, FF_OPT_TYPE_CONST, {.dbl = FF_CODER_TYPE_VLC }, INT_MIN, INT_MAX, V|E, "coder"},
+{"ac", "arithmetic coder", 0, FF_OPT_TYPE_CONST, {.dbl = FF_CODER_TYPE_AC }, INT_MIN, INT_MAX, V|E, "coder"},
+{"raw", "raw (no encoding)", 0, FF_OPT_TYPE_CONST, {.dbl = FF_CODER_TYPE_RAW }, INT_MIN, INT_MAX, V|E, "coder"},
+{"rle", "run-length coder", 0, FF_OPT_TYPE_CONST, {.dbl = FF_CODER_TYPE_RLE }, INT_MIN, INT_MAX, V|E, "coder"},
+{"deflate", "deflate-based coder", 0, FF_OPT_TYPE_CONST, {.dbl = FF_CODER_TYPE_DEFLATE }, INT_MIN, INT_MAX, V|E, "coder"},
+{"context", "context model", OFFSET(context_model), FF_OPT_TYPE_INT, {.dbl = DEFAULT }, INT_MIN, INT_MAX, V|E},
+{"slice_flags", NULL, OFFSET(slice_flags), FF_OPT_TYPE_INT, {.dbl = DEFAULT }, INT_MIN, INT_MAX},
+{"xvmc_acceleration", NULL, OFFSET(xvmc_acceleration), FF_OPT_TYPE_INT, {.dbl = DEFAULT }, INT_MIN, INT_MAX},
+{"mbd", "macroblock decision algorithm (high quality mode)", OFFSET(mb_decision), FF_OPT_TYPE_INT, {.dbl = DEFAULT }, INT_MIN, INT_MAX, V|E, "mbd"},
+{"simple", "use mbcmp (default)", 0, FF_OPT_TYPE_CONST, {.dbl = FF_MB_DECISION_SIMPLE }, INT_MIN, INT_MAX, V|E, "mbd"},
+{"bits", "use fewest bits", 0, FF_OPT_TYPE_CONST, {.dbl = FF_MB_DECISION_BITS }, INT_MIN, INT_MAX, V|E, "mbd"},
+{"rd", "use best rate distortion", 0, FF_OPT_TYPE_CONST, {.dbl = FF_MB_DECISION_RD }, INT_MIN, INT_MAX, V|E, "mbd"},
+{"stream_codec_tag", NULL, OFFSET(stream_codec_tag), FF_OPT_TYPE_INT, {.dbl = DEFAULT }, INT_MIN, INT_MAX},
+{"sc_threshold", "scene change threshold", OFFSET(scenechange_threshold), FF_OPT_TYPE_INT, {.dbl = DEFAULT }, INT_MIN, INT_MAX, V|E},
+{"lmin", "min lagrange factor (VBR)", OFFSET(lmin), FF_OPT_TYPE_INT, {.dbl = 2*FF_QP2LAMBDA }, 0, INT_MAX, V|E},
+{"lmax", "max lagrange factor (VBR)", OFFSET(lmax), FF_OPT_TYPE_INT, {.dbl = 31*FF_QP2LAMBDA }, 0, INT_MAX, V|E},
+{"nr", "noise reduction", OFFSET(noise_reduction), FF_OPT_TYPE_INT, {.dbl = DEFAULT }, INT_MIN, INT_MAX, V|E},
+{"rc_init_occupancy", "number of bits which should be loaded into the rc buffer before decoding starts", OFFSET(rc_initial_buffer_occupancy), FF_OPT_TYPE_INT, {.dbl = DEFAULT }, INT_MIN, INT_MAX, V|E},
+{"inter_threshold", NULL, OFFSET(inter_threshold), FF_OPT_TYPE_INT, {.dbl = DEFAULT }, INT_MIN, INT_MAX, V|E},
+{"flags2", NULL, OFFSET(flags2), FF_OPT_TYPE_FLAGS, {.dbl = CODEC_FLAG2_FASTPSKIP|CODEC_FLAG2_BIT_RESERVOIR|CODEC_FLAG2_PSY|CODEC_FLAG2_MBTREE }, 0, UINT_MAX, V|A|E|D, "flags2"},
+{"error", NULL, OFFSET(error_rate), FF_OPT_TYPE_INT, {.dbl = DEFAULT }, INT_MIN, INT_MAX, V|E},
#if FF_API_ANTIALIAS_ALGO
-{"antialias", "MP3 antialias algorithm", OFFSET(antialias_algo), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX, V|D, "aa"},
+{"antialias", "MP3 antialias algorithm", OFFSET(antialias_algo), FF_OPT_TYPE_INT, {.dbl = DEFAULT }, INT_MIN, INT_MAX, V|D, "aa"},
+{"auto", NULL, 0, FF_OPT_TYPE_CONST, {.dbl = FF_AA_AUTO }, INT_MIN, INT_MAX, V|D, "aa"},
+{"fastint", NULL, 0, FF_OPT_TYPE_CONST, {.dbl = FF_AA_FASTINT }, INT_MIN, INT_MAX, V|D, "aa"},
+{"int", NULL, 0, FF_OPT_TYPE_CONST, {.dbl = FF_AA_INT }, INT_MIN, INT_MAX, V|D, "aa"},
+{"float", NULL, 0, FF_OPT_TYPE_CONST, {.dbl = FF_AA_FLOAT }, INT_MIN, INT_MAX, V|D, "aa"},
#endif
-{"auto", NULL, 0, FF_OPT_TYPE_CONST, FF_AA_AUTO, INT_MIN, INT_MAX, V|D, "aa"},
-{"fastint", NULL, 0, FF_OPT_TYPE_CONST, FF_AA_FASTINT, INT_MIN, INT_MAX, V|D, "aa"},
-{"int", NULL, 0, FF_OPT_TYPE_CONST, FF_AA_INT, INT_MIN, INT_MAX, V|D, "aa"},
-{"float", NULL, 0, FF_OPT_TYPE_CONST, FF_AA_FLOAT, INT_MIN, INT_MAX, V|D, "aa"},
-{"qns", "quantizer noise shaping", OFFSET(quantizer_noise_shaping), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX, V|E},
-{"threads", NULL, OFFSET(thread_count), FF_OPT_TYPE_INT, 1, INT_MIN, INT_MAX, V|E|D},
-{"me_threshold", "motion estimaton threshold", OFFSET(me_threshold), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX},
-{"mb_threshold", "macroblock threshold", OFFSET(mb_threshold), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX, V|E},
-{"dc", "intra_dc_precision", OFFSET(intra_dc_precision), FF_OPT_TYPE_INT, 0, INT_MIN, INT_MAX, V|E},
-{"nssew", "nsse weight", OFFSET(nsse_weight), FF_OPT_TYPE_INT, 8, INT_MIN, INT_MAX, V|E},
-{"skip_top", "number of macroblock rows at the top which are skipped", OFFSET(skip_top), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX, V|D},
-{"skip_bottom", "number of macroblock rows at the bottom which are skipped", OFFSET(skip_bottom), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX, V|D},
-{"profile", NULL, OFFSET(profile), FF_OPT_TYPE_INT, FF_PROFILE_UNKNOWN, INT_MIN, INT_MAX, V|A|E, "profile"},
-{"unknown", NULL, 0, FF_OPT_TYPE_CONST, FF_PROFILE_UNKNOWN, INT_MIN, INT_MAX, V|A|E, "profile"},
-{"aac_main", NULL, 0, FF_OPT_TYPE_CONST, FF_PROFILE_AAC_MAIN, INT_MIN, INT_MAX, A|E, "profile"},
-{"aac_low", NULL, 0, FF_OPT_TYPE_CONST, FF_PROFILE_AAC_LOW, INT_MIN, INT_MAX, A|E, "profile"},
-{"aac_ssr", NULL, 0, FF_OPT_TYPE_CONST, FF_PROFILE_AAC_SSR, INT_MIN, INT_MAX, A|E, "profile"},
-{"aac_ltp", NULL, 0, FF_OPT_TYPE_CONST, FF_PROFILE_AAC_LTP, INT_MIN, INT_MAX, A|E, "profile"},
-{"dts", NULL, 0, FF_OPT_TYPE_CONST, FF_PROFILE_DTS, INT_MIN, INT_MAX, A|E, "profile"},
-{"dts_es", NULL, 0, FF_OPT_TYPE_CONST, FF_PROFILE_DTS_ES, INT_MIN, INT_MAX, A|E, "profile"},
-{"dts_96_24", NULL, 0, FF_OPT_TYPE_CONST, FF_PROFILE_DTS_96_24, INT_MIN, INT_MAX, A|E, "profile"},
-{"dts_hd_hra", NULL, 0, FF_OPT_TYPE_CONST, FF_PROFILE_DTS_HD_HRA, INT_MIN, INT_MAX, A|E, "profile"},
-{"dts_hd_ma", NULL, 0, FF_OPT_TYPE_CONST, FF_PROFILE_DTS_HD_MA, INT_MIN, INT_MAX, A|E, "profile"},
-{"level", NULL, OFFSET(level), FF_OPT_TYPE_INT, FF_LEVEL_UNKNOWN, INT_MIN, INT_MAX, V|A|E, "level"},
-{"unknown", NULL, 0, FF_OPT_TYPE_CONST, FF_LEVEL_UNKNOWN, INT_MIN, INT_MAX, V|A|E, "level"},
-{"lowres", "decode at 1= 1/2, 2=1/4, 3=1/8 resolutions", OFFSET(lowres), FF_OPT_TYPE_INT, 0, 0, INT_MAX, V|A|D},
-{"skip_threshold", "frame skip threshold", OFFSET(frame_skip_threshold), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX, V|E},
-{"skip_factor", "frame skip factor", OFFSET(frame_skip_factor), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX, V|E},
-{"skip_exp", "frame skip exponent", OFFSET(frame_skip_exp), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX, V|E},
-{"skipcmp", "frame skip compare function", OFFSET(frame_skip_cmp), FF_OPT_TYPE_INT, FF_CMP_DCTMAX, INT_MIN, INT_MAX, V|E, "cmp_func"},
-{"border_mask", "increases the quantizer for macroblocks close to borders", OFFSET(border_masking), FF_OPT_TYPE_FLOAT, DEFAULT, -FLT_MAX, FLT_MAX, V|E},
-{"mblmin", "min macroblock lagrange factor (VBR)", OFFSET(mb_lmin), FF_OPT_TYPE_INT, FF_QP2LAMBDA * 2, 1, FF_LAMBDA_MAX, V|E},
-{"mblmax", "max macroblock lagrange factor (VBR)", OFFSET(mb_lmax), FF_OPT_TYPE_INT, FF_QP2LAMBDA * 31, 1, FF_LAMBDA_MAX, V|E},
-{"mepc", "motion estimation bitrate penalty compensation (1.0 = 256)", OFFSET(me_penalty_compensation), FF_OPT_TYPE_INT, 256, INT_MIN, INT_MAX, V|E},
-{"skip_loop_filter", NULL, OFFSET(skip_loop_filter), FF_OPT_TYPE_INT, AVDISCARD_DEFAULT, INT_MIN, INT_MAX, V|D, "avdiscard"},
-{"skip_idct" , NULL, OFFSET(skip_idct) , FF_OPT_TYPE_INT, AVDISCARD_DEFAULT, INT_MIN, INT_MAX, V|D, "avdiscard"},
-{"skip_frame" , NULL, OFFSET(skip_frame) , FF_OPT_TYPE_INT, AVDISCARD_DEFAULT, INT_MIN, INT_MAX, V|D, "avdiscard"},
-{"none" , NULL, 0, FF_OPT_TYPE_CONST, AVDISCARD_NONE , INT_MIN, INT_MAX, V|D, "avdiscard"},
-{"default" , NULL, 0, FF_OPT_TYPE_CONST, AVDISCARD_DEFAULT, INT_MIN, INT_MAX, V|D, "avdiscard"},
-{"noref" , NULL, 0, FF_OPT_TYPE_CONST, AVDISCARD_NONREF , INT_MIN, INT_MAX, V|D, "avdiscard"},
-{"bidir" , NULL, 0, FF_OPT_TYPE_CONST, AVDISCARD_BIDIR , INT_MIN, INT_MAX, V|D, "avdiscard"},
-{"nokey" , NULL, 0, FF_OPT_TYPE_CONST, AVDISCARD_NONKEY , INT_MIN, INT_MAX, V|D, "avdiscard"},
-{"all" , NULL, 0, FF_OPT_TYPE_CONST, AVDISCARD_ALL , INT_MIN, INT_MAX, V|D, "avdiscard"},
-{"bidir_refine", "refine the two motion vectors used in bidirectional macroblocks", OFFSET(bidir_refine), FF_OPT_TYPE_INT, 1, 0, 4, V|E},
-{"brd_scale", "downscales frames for dynamic B-frame decision", OFFSET(brd_scale), FF_OPT_TYPE_INT, DEFAULT, 0, 10, V|E},
-{"crf", "enables constant quality mode, and selects the quality (x264/VP8)", OFFSET(crf), FF_OPT_TYPE_FLOAT, DEFAULT, 0, 63, V|E},
-{"cqp", "constant quantization parameter rate control method", OFFSET(cqp), FF_OPT_TYPE_INT, -1, INT_MIN, INT_MAX, V|E},
-{"keyint_min", "minimum interval between IDR-frames (x264)", OFFSET(keyint_min), FF_OPT_TYPE_INT, 25, INT_MIN, INT_MAX, V|E},
-{"refs", "reference frames to consider for motion compensation (Snow)", OFFSET(refs), FF_OPT_TYPE_INT, 1, INT_MIN, INT_MAX, V|E},
-{"chromaoffset", "chroma qp offset from luma", OFFSET(chromaoffset), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX, V|E},
-{"bframebias", "influences how often B-frames are used", OFFSET(bframebias), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX, V|E},
-{"trellis", "rate-distortion optimal quantization", OFFSET(trellis), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX, V|A|E},
-{"directpred", "direct mv prediction mode - 0 (none), 1 (spatial), 2 (temporal), 3 (auto)", OFFSET(directpred), FF_OPT_TYPE_INT, 2, INT_MIN, INT_MAX, V|E},
-{"bpyramid", "allows B-frames to be used as references for predicting", 0, FF_OPT_TYPE_CONST, CODEC_FLAG2_BPYRAMID, INT_MIN, INT_MAX, V|E, "flags2"},
-{"wpred", "weighted biprediction for b-frames (H.264)", 0, FF_OPT_TYPE_CONST, CODEC_FLAG2_WPRED, INT_MIN, INT_MAX, V|E, "flags2"},
-{"mixed_refs", "one reference per partition, as opposed to one reference per macroblock", 0, FF_OPT_TYPE_CONST, CODEC_FLAG2_MIXED_REFS, INT_MIN, INT_MAX, V|E, "flags2"},
-{"dct8x8", "high profile 8x8 transform (H.264)", 0, FF_OPT_TYPE_CONST, CODEC_FLAG2_8X8DCT, INT_MIN, INT_MAX, V|E, "flags2"},
-{"fastpskip", "fast pskip (H.264)", 0, FF_OPT_TYPE_CONST, CODEC_FLAG2_FASTPSKIP, INT_MIN, INT_MAX, V|E, "flags2"},
-{"aud", "access unit delimiters (H.264)", 0, FF_OPT_TYPE_CONST, CODEC_FLAG2_AUD, INT_MIN, INT_MAX, V|E, "flags2"},
-{"skiprd", "RD optimal MB level residual skipping", 0, FF_OPT_TYPE_CONST, CODEC_FLAG2_SKIP_RD, INT_MIN, INT_MAX, V|E, "flags2"},
-{"complexityblur", "reduce fluctuations in qp (before curve compression)", OFFSET(complexityblur), FF_OPT_TYPE_FLOAT, 20.0, FLT_MIN, FLT_MAX, V|E},
-{"deblockalpha", "in-loop deblocking filter alphac0 parameter", OFFSET(deblockalpha), FF_OPT_TYPE_INT, DEFAULT, -6, 6, V|E},
-{"deblockbeta", "in-loop deblocking filter beta parameter", OFFSET(deblockbeta), FF_OPT_TYPE_INT, DEFAULT, -6, 6, V|E},
-{"partitions", "macroblock subpartition sizes to consider", OFFSET(partitions), FF_OPT_TYPE_FLAGS, DEFAULT, INT_MIN, INT_MAX, V|E, "partitions"},
-{"parti4x4", NULL, 0, FF_OPT_TYPE_CONST, X264_PART_I4X4, INT_MIN, INT_MAX, V|E, "partitions"},
-{"parti8x8", NULL, 0, FF_OPT_TYPE_CONST, X264_PART_I8X8, INT_MIN, INT_MAX, V|E, "partitions"},
-{"partp4x4", NULL, 0, FF_OPT_TYPE_CONST, X264_PART_P4X4, INT_MIN, INT_MAX, V|E, "partitions"},
-{"partp8x8", NULL, 0, FF_OPT_TYPE_CONST, X264_PART_P8X8, INT_MIN, INT_MAX, V|E, "partitions"},
-{"partb8x8", NULL, 0, FF_OPT_TYPE_CONST, X264_PART_B8X8, INT_MIN, INT_MAX, V|E, "partitions"},
-{"sc_factor", "multiplied by qscale for each frame and added to scene_change_score", OFFSET(scenechange_factor), FF_OPT_TYPE_INT, 6, 0, INT_MAX, V|E},
-{"mv0_threshold", NULL, OFFSET(mv0_threshold), FF_OPT_TYPE_INT, 256, 0, INT_MAX, V|E},
-{"ivlc", "intra vlc table", 0, FF_OPT_TYPE_CONST, CODEC_FLAG2_INTRA_VLC, INT_MIN, INT_MAX, V|E, "flags2"},
-{"b_sensitivity", "adjusts sensitivity of b_frame_strategy 1", OFFSET(b_sensitivity), FF_OPT_TYPE_INT, 40, 1, INT_MAX, V|E},
-{"compression_level", NULL, OFFSET(compression_level), FF_OPT_TYPE_INT, FF_COMPRESSION_DEFAULT, INT_MIN, INT_MAX, V|A|E},
-#if FF_API_USE_LPC
-{"use_lpc", "sets whether to use LPC mode (FLAC)", OFFSET(use_lpc), FF_OPT_TYPE_INT, -1, INT_MIN, INT_MAX, A|E},
+{"qns", "quantizer noise shaping", OFFSET(quantizer_noise_shaping), FF_OPT_TYPE_INT, {.dbl = DEFAULT }, INT_MIN, INT_MAX, V|E},
+{"threads", NULL, OFFSET(thread_count), FF_OPT_TYPE_INT, {.dbl = 1 }, INT_MIN, INT_MAX, V|E|D},
+{"me_threshold", "motion estimaton threshold", OFFSET(me_threshold), FF_OPT_TYPE_INT, {.dbl = DEFAULT }, INT_MIN, INT_MAX},
+{"mb_threshold", "macroblock threshold", OFFSET(mb_threshold), FF_OPT_TYPE_INT, {.dbl = DEFAULT }, INT_MIN, INT_MAX, V|E},
+{"dc", "intra_dc_precision", OFFSET(intra_dc_precision), FF_OPT_TYPE_INT, {.dbl = 0 }, INT_MIN, INT_MAX, V|E},
+{"nssew", "nsse weight", OFFSET(nsse_weight), FF_OPT_TYPE_INT, {.dbl = 8 }, INT_MIN, INT_MAX, V|E},
+{"skip_top", "number of macroblock rows at the top which are skipped", OFFSET(skip_top), FF_OPT_TYPE_INT, {.dbl = DEFAULT }, INT_MIN, INT_MAX, V|D},
+{"skip_bottom", "number of macroblock rows at the bottom which are skipped", OFFSET(skip_bottom), FF_OPT_TYPE_INT, {.dbl = DEFAULT }, INT_MIN, INT_MAX, V|D},
+{"profile", NULL, OFFSET(profile), FF_OPT_TYPE_INT, {.dbl = FF_PROFILE_UNKNOWN }, INT_MIN, INT_MAX, V|A|E, "profile"},
+{"unknown", NULL, 0, FF_OPT_TYPE_CONST, {.dbl = FF_PROFILE_UNKNOWN }, INT_MIN, INT_MAX, V|A|E, "profile"},
+{"aac_main", NULL, 0, FF_OPT_TYPE_CONST, {.dbl = FF_PROFILE_AAC_MAIN }, INT_MIN, INT_MAX, A|E, "profile"},
+{"aac_low", NULL, 0, FF_OPT_TYPE_CONST, {.dbl = FF_PROFILE_AAC_LOW }, INT_MIN, INT_MAX, A|E, "profile"},
+{"aac_ssr", NULL, 0, FF_OPT_TYPE_CONST, {.dbl = FF_PROFILE_AAC_SSR }, INT_MIN, INT_MAX, A|E, "profile"},
+{"aac_ltp", NULL, 0, FF_OPT_TYPE_CONST, {.dbl = FF_PROFILE_AAC_LTP }, INT_MIN, INT_MAX, A|E, "profile"},
+{"dts", NULL, 0, FF_OPT_TYPE_CONST, {.dbl = FF_PROFILE_DTS }, INT_MIN, INT_MAX, A|E, "profile"},
+{"dts_es", NULL, 0, FF_OPT_TYPE_CONST, {.dbl = FF_PROFILE_DTS_ES }, INT_MIN, INT_MAX, A|E, "profile"},
+{"dts_96_24", NULL, 0, FF_OPT_TYPE_CONST, {.dbl = FF_PROFILE_DTS_96_24 }, INT_MIN, INT_MAX, A|E, "profile"},
+{"dts_hd_hra", NULL, 0, FF_OPT_TYPE_CONST, {.dbl = FF_PROFILE_DTS_HD_HRA }, INT_MIN, INT_MAX, A|E, "profile"},
+{"dts_hd_ma", NULL, 0, FF_OPT_TYPE_CONST, {.dbl = FF_PROFILE_DTS_HD_MA }, INT_MIN, INT_MAX, A|E, "profile"},
+{"level", NULL, OFFSET(level), FF_OPT_TYPE_INT, {.dbl = FF_LEVEL_UNKNOWN }, INT_MIN, INT_MAX, V|A|E, "level"},
+{"unknown", NULL, 0, FF_OPT_TYPE_CONST, {.dbl = FF_LEVEL_UNKNOWN }, INT_MIN, INT_MAX, V|A|E, "level"},
+{"lowres", "decode at 1= 1/2, 2=1/4, 3=1/8 resolutions", OFFSET(lowres), FF_OPT_TYPE_INT, {.dbl = 0 }, 0, INT_MAX, V|A|D},
+{"skip_threshold", "frame skip threshold", OFFSET(frame_skip_threshold), FF_OPT_TYPE_INT, {.dbl = DEFAULT }, INT_MIN, INT_MAX, V|E},
+{"skip_factor", "frame skip factor", OFFSET(frame_skip_factor), FF_OPT_TYPE_INT, {.dbl = DEFAULT }, INT_MIN, INT_MAX, V|E},
+{"skip_exp", "frame skip exponent", OFFSET(frame_skip_exp), FF_OPT_TYPE_INT, {.dbl = DEFAULT }, INT_MIN, INT_MAX, V|E},
+{"skipcmp", "frame skip compare function", OFFSET(frame_skip_cmp), FF_OPT_TYPE_INT, {.dbl = FF_CMP_DCTMAX }, INT_MIN, INT_MAX, V|E, "cmp_func"},
+{"border_mask", "increases the quantizer for macroblocks close to borders", OFFSET(border_masking), FF_OPT_TYPE_FLOAT, {.dbl = DEFAULT }, -FLT_MAX, FLT_MAX, V|E},
+{"mblmin", "min macroblock lagrange factor (VBR)", OFFSET(mb_lmin), FF_OPT_TYPE_INT, {.dbl = FF_QP2LAMBDA * 2 }, 1, FF_LAMBDA_MAX, V|E},
+{"mblmax", "max macroblock lagrange factor (VBR)", OFFSET(mb_lmax), FF_OPT_TYPE_INT, {.dbl = FF_QP2LAMBDA * 31 }, 1, FF_LAMBDA_MAX, V|E},
+{"mepc", "motion estimation bitrate penalty compensation (1.0 = 256)", OFFSET(me_penalty_compensation), FF_OPT_TYPE_INT, {.dbl = 256 }, INT_MIN, INT_MAX, V|E},
+{"skip_loop_filter", NULL, OFFSET(skip_loop_filter), FF_OPT_TYPE_INT, {.dbl = AVDISCARD_DEFAULT }, INT_MIN, INT_MAX, V|D, "avdiscard"},
+{"skip_idct" , NULL, OFFSET(skip_idct) , FF_OPT_TYPE_INT, {.dbl = AVDISCARD_DEFAULT }, INT_MIN, INT_MAX, V|D, "avdiscard"},
+{"skip_frame" , NULL, OFFSET(skip_frame) , FF_OPT_TYPE_INT, {.dbl = AVDISCARD_DEFAULT }, INT_MIN, INT_MAX, V|D, "avdiscard"},
+{"none" , NULL, 0, FF_OPT_TYPE_CONST, {.dbl = AVDISCARD_NONE }, INT_MIN, INT_MAX, V|D, "avdiscard"},
+{"default" , NULL, 0, FF_OPT_TYPE_CONST, {.dbl = AVDISCARD_DEFAULT }, INT_MIN, INT_MAX, V|D, "avdiscard"},
+{"noref" , NULL, 0, FF_OPT_TYPE_CONST, {.dbl = AVDISCARD_NONREF }, INT_MIN, INT_MAX, V|D, "avdiscard"},
+{"bidir" , NULL, 0, FF_OPT_TYPE_CONST, {.dbl = AVDISCARD_BIDIR }, INT_MIN, INT_MAX, V|D, "avdiscard"},
+{"nokey" , NULL, 0, FF_OPT_TYPE_CONST, {.dbl = AVDISCARD_NONKEY }, INT_MIN, INT_MAX, V|D, "avdiscard"},
+{"all" , NULL, 0, FF_OPT_TYPE_CONST, {.dbl = AVDISCARD_ALL }, INT_MIN, INT_MAX, V|D, "avdiscard"},
+{"bidir_refine", "refine the two motion vectors used in bidirectional macroblocks", OFFSET(bidir_refine), FF_OPT_TYPE_INT, {.dbl = 1 }, 0, 4, V|E},
+{"brd_scale", "downscales frames for dynamic B-frame decision", OFFSET(brd_scale), FF_OPT_TYPE_INT, {.dbl = DEFAULT }, 0, 10, V|E},
+{"crf", "enables constant quality mode, and selects the quality (x264/VP8)", OFFSET(crf), FF_OPT_TYPE_FLOAT, {.dbl = DEFAULT }, 0, 63, V|E},
+{"cqp", "constant quantization parameter rate control method", OFFSET(cqp), FF_OPT_TYPE_INT, {.dbl = -1 }, INT_MIN, INT_MAX, V|E},
+{"keyint_min", "minimum interval between IDR-frames (x264)", OFFSET(keyint_min), FF_OPT_TYPE_INT, {.dbl = 25 }, INT_MIN, INT_MAX, V|E},
+{"refs", "reference frames to consider for motion compensation (Snow)", OFFSET(refs), FF_OPT_TYPE_INT, {.dbl = 1 }, INT_MIN, INT_MAX, V|E},
+{"chromaoffset", "chroma qp offset from luma", OFFSET(chromaoffset), FF_OPT_TYPE_INT, {.dbl = DEFAULT }, INT_MIN, INT_MAX, V|E},
+{"bframebias", "influences how often B-frames are used", OFFSET(bframebias), FF_OPT_TYPE_INT, {.dbl = DEFAULT }, INT_MIN, INT_MAX, V|E},
+{"trellis", "rate-distortion optimal quantization", OFFSET(trellis), FF_OPT_TYPE_INT, {.dbl = DEFAULT }, INT_MIN, INT_MAX, V|A|E},
+{"directpred", "direct mv prediction mode - 0 (none), 1 (spatial), 2 (temporal), 3 (auto)", OFFSET(directpred), FF_OPT_TYPE_INT, {.dbl = 2 }, INT_MIN, INT_MAX, V|E},
+{"bpyramid", "allows B-frames to be used as references for predicting", 0, FF_OPT_TYPE_CONST, {.dbl = CODEC_FLAG2_BPYRAMID }, INT_MIN, INT_MAX, V|E, "flags2"},
+{"wpred", "weighted biprediction for b-frames (H.264)", 0, FF_OPT_TYPE_CONST, {.dbl = CODEC_FLAG2_WPRED }, INT_MIN, INT_MAX, V|E, "flags2"},
+{"mixed_refs", "one reference per partition, as opposed to one reference per macroblock", 0, FF_OPT_TYPE_CONST, {.dbl = CODEC_FLAG2_MIXED_REFS }, INT_MIN, INT_MAX, V|E, "flags2"},
+{"dct8x8", "high profile 8x8 transform (H.264)", 0, FF_OPT_TYPE_CONST, {.dbl = CODEC_FLAG2_8X8DCT }, INT_MIN, INT_MAX, V|E, "flags2"},
+{"fastpskip", "fast pskip (H.264)", 0, FF_OPT_TYPE_CONST, {.dbl = CODEC_FLAG2_FASTPSKIP }, INT_MIN, INT_MAX, V|E, "flags2"},
+{"aud", "access unit delimiters (H.264)", 0, FF_OPT_TYPE_CONST, {.dbl = CODEC_FLAG2_AUD }, INT_MIN, INT_MAX, V|E, "flags2"},
+{"skiprd", "RD optimal MB level residual skipping", 0, FF_OPT_TYPE_CONST, {.dbl = CODEC_FLAG2_SKIP_RD }, INT_MIN, INT_MAX, V|E, "flags2"},
+{"complexityblur", "reduce fluctuations in qp (before curve compression)", OFFSET(complexityblur), FF_OPT_TYPE_FLOAT, {.dbl = 20.0 }, FLT_MIN, FLT_MAX, V|E},
+{"deblockalpha", "in-loop deblocking filter alphac0 parameter", OFFSET(deblockalpha), FF_OPT_TYPE_INT, {.dbl = DEFAULT }, -6, 6, V|E},
+{"deblockbeta", "in-loop deblocking filter beta parameter", OFFSET(deblockbeta), FF_OPT_TYPE_INT, {.dbl = DEFAULT }, -6, 6, V|E},
+{"partitions", "macroblock subpartition sizes to consider", OFFSET(partitions), FF_OPT_TYPE_FLAGS, {.dbl = DEFAULT }, INT_MIN, INT_MAX, V|E, "partitions"},
+{"parti4x4", NULL, 0, FF_OPT_TYPE_CONST, {.dbl = X264_PART_I4X4 }, INT_MIN, INT_MAX, V|E, "partitions"},
+{"parti8x8", NULL, 0, FF_OPT_TYPE_CONST, {.dbl = X264_PART_I8X8 }, INT_MIN, INT_MAX, V|E, "partitions"},
+{"partp4x4", NULL, 0, FF_OPT_TYPE_CONST, {.dbl = X264_PART_P4X4 }, INT_MIN, INT_MAX, V|E, "partitions"},
+{"partp8x8", NULL, 0, FF_OPT_TYPE_CONST, {.dbl = X264_PART_P8X8 }, INT_MIN, INT_MAX, V|E, "partitions"},
+{"partb8x8", NULL, 0, FF_OPT_TYPE_CONST, {.dbl = X264_PART_B8X8 }, INT_MIN, INT_MAX, V|E, "partitions"},
+{"sc_factor", "multiplied by qscale for each frame and added to scene_change_score", OFFSET(scenechange_factor), FF_OPT_TYPE_INT, {.dbl = 6 }, 0, INT_MAX, V|E},
+{"mv0_threshold", NULL, OFFSET(mv0_threshold), FF_OPT_TYPE_INT, {.dbl = 256 }, 0, INT_MAX, V|E},
+{"ivlc", "intra vlc table", 0, FF_OPT_TYPE_CONST, {.dbl = CODEC_FLAG2_INTRA_VLC }, INT_MIN, INT_MAX, V|E, "flags2"},
+{"b_sensitivity", "adjusts sensitivity of b_frame_strategy 1", OFFSET(b_sensitivity), FF_OPT_TYPE_INT, {.dbl = 40 }, 1, INT_MAX, V|E},
+{"compression_level", NULL, OFFSET(compression_level), FF_OPT_TYPE_INT, {.dbl = FF_COMPRESSION_DEFAULT }, INT_MIN, INT_MAX, V|A|E},
+{"min_prediction_order", NULL, OFFSET(min_prediction_order), FF_OPT_TYPE_INT, {.dbl = -1 }, INT_MIN, INT_MAX, A|E},
+{"max_prediction_order", NULL, OFFSET(max_prediction_order), FF_OPT_TYPE_INT, {.dbl = -1 }, INT_MIN, INT_MAX, A|E},
+#if FF_API_FLAC_GLOBAL_OPTS
+{"lpc_coeff_precision", "deprecated, use flac-specific options", OFFSET(lpc_coeff_precision), FF_OPT_TYPE_INT, {.dbl = DEFAULT }, 0, INT_MAX, A|E},
+{"prediction_order_method", "deprecated, use flac-specific options", OFFSET(prediction_order_method), FF_OPT_TYPE_INT, {.dbl = -1 }, INT_MIN, INT_MAX, A|E},
+{"min_partition_order", "deprecated, use flac-specific options", OFFSET(min_partition_order), FF_OPT_TYPE_INT, {.dbl = -1 }, INT_MIN, INT_MAX, A|E},
+{"max_partition_order", "deprecated, use flac-specific options", OFFSET(max_partition_order), FF_OPT_TYPE_INT, {.dbl = -1 }, INT_MIN, INT_MAX, A|E},
#endif
-{"lpc_coeff_precision", "LPC coefficient precision (FLAC)", OFFSET(lpc_coeff_precision), FF_OPT_TYPE_INT, DEFAULT, 0, INT_MAX, A|E},
-{"min_prediction_order", NULL, OFFSET(min_prediction_order), FF_OPT_TYPE_INT, -1, INT_MIN, INT_MAX, A|E},
-{"max_prediction_order", NULL, OFFSET(max_prediction_order), FF_OPT_TYPE_INT, -1, INT_MIN, INT_MAX, A|E},
-{"prediction_order_method", "search method for selecting prediction order", OFFSET(prediction_order_method), FF_OPT_TYPE_INT, -1, INT_MIN, INT_MAX, A|E},
-{"min_partition_order", NULL, OFFSET(min_partition_order), FF_OPT_TYPE_INT, -1, INT_MIN, INT_MAX, A|E},
-{"max_partition_order", NULL, OFFSET(max_partition_order), FF_OPT_TYPE_INT, -1, INT_MIN, INT_MAX, A|E},
-{"timecode_frame_start", "GOP timecode frame start number, in non drop frame format", OFFSET(timecode_frame_start), FF_OPT_TYPE_INT64, 0, 0, INT64_MAX, V|E},
-{"drop_frame_timecode", NULL, 0, FF_OPT_TYPE_CONST, CODEC_FLAG2_DROP_FRAME_TIMECODE, INT_MIN, INT_MAX, V|E, "flags2"},
-{"non_linear_q", "use non linear quantizer", 0, FF_OPT_TYPE_CONST, CODEC_FLAG2_NON_LINEAR_QUANT, INT_MIN, INT_MAX, V|E, "flags2"},
+{"timecode_frame_start", "GOP timecode frame start number, in non drop frame format", OFFSET(timecode_frame_start), FF_OPT_TYPE_INT64, {.dbl = 0 }, 0, INT64_MAX, V|E},
+{"drop_frame_timecode", NULL, 0, FF_OPT_TYPE_CONST, {.dbl = CODEC_FLAG2_DROP_FRAME_TIMECODE }, INT_MIN, INT_MAX, V|E, "flags2"},
+{"non_linear_q", "use non linear quantizer", 0, FF_OPT_TYPE_CONST, {.dbl = CODEC_FLAG2_NON_LINEAR_QUANT }, INT_MIN, INT_MAX, V|E, "flags2"},
#if FF_API_REQUEST_CHANNELS
-{"request_channels", "set desired number of audio channels", OFFSET(request_channels), FF_OPT_TYPE_INT, DEFAULT, 0, INT_MAX, A|D},
+{"request_channels", "set desired number of audio channels", OFFSET(request_channels), FF_OPT_TYPE_INT, {.dbl = DEFAULT }, 0, INT_MAX, A|D},
+#endif
+{"drc_scale", "percentage of dynamic range compression to apply", OFFSET(drc_scale), FF_OPT_TYPE_FLOAT, {.dbl = 1.0 }, 0.0, 1.0, A|D},
+{"reservoir", "use bit reservoir", 0, FF_OPT_TYPE_CONST, {.dbl = CODEC_FLAG2_BIT_RESERVOIR }, INT_MIN, INT_MAX, A|E, "flags2"},
+{"mbtree", "use macroblock tree ratecontrol (x264 only)", 0, FF_OPT_TYPE_CONST, {.dbl = CODEC_FLAG2_MBTREE }, INT_MIN, INT_MAX, V|E, "flags2"},
+{"bits_per_raw_sample", NULL, OFFSET(bits_per_raw_sample), FF_OPT_TYPE_INT, {.dbl = DEFAULT }, INT_MIN, INT_MAX},
+{"channel_layout", NULL, OFFSET(channel_layout), FF_OPT_TYPE_INT64, {.dbl = DEFAULT }, 0, INT64_MAX, A|E|D, "channel_layout"},
+{"request_channel_layout", NULL, OFFSET(request_channel_layout), FF_OPT_TYPE_INT64, {.dbl = DEFAULT }, 0, INT64_MAX, A|D, "request_channel_layout"},
+{"rc_max_vbv_use", NULL, OFFSET(rc_max_available_vbv_use), FF_OPT_TYPE_FLOAT, {.dbl = 1.0/3 }, 0.0, FLT_MAX, V|E},
+{"rc_min_vbv_use", NULL, OFFSET(rc_min_vbv_overflow_use), FF_OPT_TYPE_FLOAT, {.dbl = 3 }, 0.0, FLT_MAX, V|E},
+{"ticks_per_frame", NULL, OFFSET(ticks_per_frame), FF_OPT_TYPE_INT, {.dbl = 1 }, 1, INT_MAX, A|V|E|D},
+{"color_primaries", NULL, OFFSET(color_primaries), FF_OPT_TYPE_INT, {.dbl = AVCOL_PRI_UNSPECIFIED }, 1, AVCOL_PRI_NB-1, V|E|D},
+{"color_trc", NULL, OFFSET(color_trc), FF_OPT_TYPE_INT, {.dbl = AVCOL_TRC_UNSPECIFIED }, 1, AVCOL_TRC_NB-1, V|E|D},
+{"colorspace", NULL, OFFSET(colorspace), FF_OPT_TYPE_INT, {.dbl = AVCOL_SPC_UNSPECIFIED }, 1, AVCOL_SPC_NB-1, V|E|D},
+{"color_range", NULL, OFFSET(color_range), FF_OPT_TYPE_INT, {.dbl = AVCOL_RANGE_UNSPECIFIED }, 0, AVCOL_RANGE_NB-1, V|E|D},
+{"chroma_sample_location", NULL, OFFSET(chroma_sample_location), FF_OPT_TYPE_INT, {.dbl = AVCHROMA_LOC_UNSPECIFIED }, 0, AVCHROMA_LOC_NB-1, V|E|D},
+{"psy", "use psycho visual optimization", 0, FF_OPT_TYPE_CONST, {.dbl = CODEC_FLAG2_PSY }, INT_MIN, INT_MAX, V|E, "flags2"},
+{"psy_rd", "specify psycho visual strength", OFFSET(psy_rd), FF_OPT_TYPE_FLOAT, {.dbl = 1.0 }, 0, FLT_MAX, V|E},
+{"psy_trellis", "specify psycho visual trellis", OFFSET(psy_trellis), FF_OPT_TYPE_FLOAT, {.dbl = 0 }, 0, FLT_MAX, V|E},
+{"aq_mode", "specify aq method", OFFSET(aq_mode), FF_OPT_TYPE_INT, {.dbl = 1 }, 0, INT_MAX, V|E},
+{"aq_strength", "specify aq strength", OFFSET(aq_strength), FF_OPT_TYPE_FLOAT, {.dbl = 1.0 }, 0, FLT_MAX, V|E},
+{"rc_lookahead", "specify number of frames to look ahead for frametype", OFFSET(rc_lookahead), FF_OPT_TYPE_INT, {.dbl = 40 }, 0, INT_MAX, V|E},
+{"ssim", "ssim will be calculated during encoding", 0, FF_OPT_TYPE_CONST, {.dbl = CODEC_FLAG2_SSIM }, INT_MIN, INT_MAX, V|E, "flags2"},
+{"intra_refresh", "use periodic insertion of intra blocks instead of keyframes", 0, FF_OPT_TYPE_CONST, {.dbl = CODEC_FLAG2_INTRA_REFRESH }, INT_MIN, INT_MAX, V|E, "flags2"},
+{"crf_max", "in crf mode, prevents vbv from lowering quality beyond this point", OFFSET(crf_max), FF_OPT_TYPE_FLOAT, {.dbl = DEFAULT }, 0, 51, V|E},
+{"log_level_offset", "set the log level offset", OFFSET(log_level_offset), FF_OPT_TYPE_INT, {.dbl = 0 }, INT_MIN, INT_MAX },
+#if FF_API_FLAC_GLOBAL_OPTS
+{"use_lpc", "sets whether to use LPC mode (FLAC)", OFFSET(use_lpc), FF_OPT_TYPE_INT, {.dbl = -1 }, INT_MIN, INT_MAX, A|E},
+{"lpc_type", "deprecated, use flac-specific options", OFFSET(lpc_type), FF_OPT_TYPE_INT, {.dbl = AV_LPC_TYPE_DEFAULT }, AV_LPC_TYPE_DEFAULT, AV_LPC_TYPE_NB-1, A|E},
+{"none", NULL, 0, FF_OPT_TYPE_CONST, {.dbl = AV_LPC_TYPE_NONE }, INT_MIN, INT_MAX, A|E, "lpc_type"},
+{"fixed", NULL, 0, FF_OPT_TYPE_CONST, {.dbl = AV_LPC_TYPE_FIXED }, INT_MIN, INT_MAX, A|E, "lpc_type"},
+{"levinson", NULL, 0, FF_OPT_TYPE_CONST, {.dbl = AV_LPC_TYPE_LEVINSON }, INT_MIN, INT_MAX, A|E, "lpc_type"},
+{"cholesky", NULL, 0, FF_OPT_TYPE_CONST, {.dbl = AV_LPC_TYPE_CHOLESKY }, INT_MIN, INT_MAX, A|E, "lpc_type"},
+{"lpc_passes", "deprecated, use flac-specific options", OFFSET(lpc_passes), FF_OPT_TYPE_INT, {.dbl = -1 }, INT_MIN, INT_MAX, A|E},
#endif
-{"drc_scale", "percentage of dynamic range compression to apply", OFFSET(drc_scale), FF_OPT_TYPE_FLOAT, 1.0, 0.0, 1.0, A|D},
-{"reservoir", "use bit reservoir", 0, FF_OPT_TYPE_CONST, CODEC_FLAG2_BIT_RESERVOIR, INT_MIN, INT_MAX, A|E, "flags2"},
-{"mbtree", "use macroblock tree ratecontrol (x264 only)", 0, FF_OPT_TYPE_CONST, CODEC_FLAG2_MBTREE, INT_MIN, INT_MAX, V|E, "flags2"},
-{"bits_per_raw_sample", NULL, OFFSET(bits_per_raw_sample), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX},
-{"channel_layout", NULL, OFFSET(channel_layout), FF_OPT_TYPE_INT64, DEFAULT, 0, INT64_MAX, A|E|D, "channel_layout"},
-{"request_channel_layout", NULL, OFFSET(request_channel_layout), FF_OPT_TYPE_INT64, DEFAULT, 0, INT64_MAX, A|D, "request_channel_layout"},
-{"rc_max_vbv_use", NULL, OFFSET(rc_max_available_vbv_use), FF_OPT_TYPE_FLOAT, 1.0/3, 0.0, FLT_MAX, V|E},
-{"rc_min_vbv_use", NULL, OFFSET(rc_min_vbv_overflow_use), FF_OPT_TYPE_FLOAT, 3, 0.0, FLT_MAX, V|E},
-{"ticks_per_frame", NULL, OFFSET(ticks_per_frame), FF_OPT_TYPE_INT, 1, 1, INT_MAX, A|V|E|D},
-{"color_primaries", NULL, OFFSET(color_primaries), FF_OPT_TYPE_INT, AVCOL_PRI_UNSPECIFIED, 1, AVCOL_PRI_NB-1, V|E|D},
-{"color_trc", NULL, OFFSET(color_trc), FF_OPT_TYPE_INT, AVCOL_TRC_UNSPECIFIED, 1, AVCOL_TRC_NB-1, V|E|D},
-{"colorspace", NULL, OFFSET(colorspace), FF_OPT_TYPE_INT, AVCOL_SPC_UNSPECIFIED, 1, AVCOL_SPC_NB-1, V|E|D},
-{"color_range", NULL, OFFSET(color_range), FF_OPT_TYPE_INT, AVCOL_RANGE_UNSPECIFIED, 0, AVCOL_RANGE_NB-1, V|E|D},
-{"chroma_sample_location", NULL, OFFSET(chroma_sample_location), FF_OPT_TYPE_INT, AVCHROMA_LOC_UNSPECIFIED, 0, AVCHROMA_LOC_NB-1, V|E|D},
-{"psy", "use psycho visual optimization", 0, FF_OPT_TYPE_CONST, CODEC_FLAG2_PSY, INT_MIN, INT_MAX, V|E, "flags2"},
-{"psy_rd", "specify psycho visual strength", OFFSET(psy_rd), FF_OPT_TYPE_FLOAT, 1.0, 0, FLT_MAX, V|E},
-{"psy_trellis", "specify psycho visual trellis", OFFSET(psy_trellis), FF_OPT_TYPE_FLOAT, 0, 0, FLT_MAX, V|E},
-{"aq_mode", "specify aq method", OFFSET(aq_mode), FF_OPT_TYPE_INT, 1, 0, INT_MAX, V|E},
-{"aq_strength", "specify aq strength", OFFSET(aq_strength), FF_OPT_TYPE_FLOAT, 1.0, 0, FLT_MAX, V|E},
-{"rc_lookahead", "specify number of frames to look ahead for frametype", OFFSET(rc_lookahead), FF_OPT_TYPE_INT, 40, 0, INT_MAX, V|E},
-{"ssim", "ssim will be calculated during encoding", 0, FF_OPT_TYPE_CONST, CODEC_FLAG2_SSIM, INT_MIN, INT_MAX, V|E, "flags2"},
-{"intra_refresh", "use periodic insertion of intra blocks instead of keyframes", 0, FF_OPT_TYPE_CONST, CODEC_FLAG2_INTRA_REFRESH, INT_MIN, INT_MAX, V|E, "flags2"},
-{"crf_max", "in crf mode, prevents vbv from lowering quality beyond this point", OFFSET(crf_max), FF_OPT_TYPE_FLOAT, DEFAULT, 0, 51, V|E},
-{"log_level_offset", "set the log level offset", OFFSET(log_level_offset), FF_OPT_TYPE_INT, 0, INT_MIN, INT_MAX },
-{"lpc_type", "specify LPC algorithm", OFFSET(lpc_type), FF_OPT_TYPE_INT, AV_LPC_TYPE_DEFAULT, AV_LPC_TYPE_DEFAULT, AV_LPC_TYPE_NB-1, A|E},
-{"none", NULL, 0, FF_OPT_TYPE_CONST, AV_LPC_TYPE_NONE, INT_MIN, INT_MAX, A|E, "lpc_type"},
-{"fixed", NULL, 0, FF_OPT_TYPE_CONST, AV_LPC_TYPE_FIXED, INT_MIN, INT_MAX, A|E, "lpc_type"},
-{"levinson", NULL, 0, FF_OPT_TYPE_CONST, AV_LPC_TYPE_LEVINSON, INT_MIN, INT_MAX, A|E, "lpc_type"},
-{"cholesky", NULL, 0, FF_OPT_TYPE_CONST, AV_LPC_TYPE_CHOLESKY, INT_MIN, INT_MAX, A|E, "lpc_type"},
-{"lpc_passes", "number of passes to use for Cholesky factorization during LPC analysis", OFFSET(lpc_passes), FF_OPT_TYPE_INT, -1, INT_MIN, INT_MAX, A|E},
-{"slices", "number of slices, used in parallelized decoding", OFFSET(slices), FF_OPT_TYPE_INT, 0, 0, INT_MAX, V|E},
-{"thread_type", "select multithreading type", OFFSET(thread_type), FF_OPT_TYPE_INT, FF_THREAD_SLICE|FF_THREAD_FRAME, 0, INT_MAX, V|E|D, "thread_type"},
-{"slice", NULL, 0, FF_OPT_TYPE_CONST, FF_THREAD_SLICE, INT_MIN, INT_MAX, V|E|D, "thread_type"},
-{"frame", NULL, 0, FF_OPT_TYPE_CONST, FF_THREAD_FRAME, INT_MIN, INT_MAX, V|E|D, "thread_type"},
-{"vbv_delay", "initial buffer fill time in periods of 27Mhz clock", 0, FF_OPT_TYPE_INT64, 0, 0, INT64_MAX},
-{"audio_service_type", "audio service type", OFFSET(audio_service_type), FF_OPT_TYPE_INT, AV_AUDIO_SERVICE_TYPE_MAIN, 0, AV_AUDIO_SERVICE_TYPE_NB-1, A|E, "audio_service_type"},
-{"ma", "Main Audio Service", 0, FF_OPT_TYPE_CONST, AV_AUDIO_SERVICE_TYPE_MAIN, INT_MIN, INT_MAX, A|E, "audio_service_type"},
-{"ef", "Effects", 0, FF_OPT_TYPE_CONST, AV_AUDIO_SERVICE_TYPE_EFFECTS, INT_MIN, INT_MAX, A|E, "audio_service_type"},
-{"vi", "Visually Impaired", 0, FF_OPT_TYPE_CONST, AV_AUDIO_SERVICE_TYPE_VISUALLY_IMPAIRED, INT_MIN, INT_MAX, A|E, "audio_service_type"},
-{"hi", "Hearing Impaired", 0, FF_OPT_TYPE_CONST, AV_AUDIO_SERVICE_TYPE_HEARING_IMPAIRED, INT_MIN, INT_MAX, A|E, "audio_service_type"},
-{"di", "Dialogue", 0, FF_OPT_TYPE_CONST, AV_AUDIO_SERVICE_TYPE_DIALOGUE, INT_MIN, INT_MAX, A|E, "audio_service_type"},
-{"co", "Commentary", 0, FF_OPT_TYPE_CONST, AV_AUDIO_SERVICE_TYPE_COMMENTARY, INT_MIN, INT_MAX, A|E, "audio_service_type"},
-{"em", "Emergency", 0, FF_OPT_TYPE_CONST, AV_AUDIO_SERVICE_TYPE_EMERGENCY, INT_MIN, INT_MAX, A|E, "audio_service_type"},
-{"vo", "Voice Over", 0, FF_OPT_TYPE_CONST, AV_AUDIO_SERVICE_TYPE_VOICE_OVER, INT_MIN, INT_MAX, A|E, "audio_service_type"},
-{"ka", "Karaoke", 0, FF_OPT_TYPE_CONST, AV_AUDIO_SERVICE_TYPE_KARAOKE, INT_MIN, INT_MAX, A|E, "audio_service_type"},
-{"request_sample_fmt", "sample format audio decoders should prefer", OFFSET(request_sample_fmt), FF_OPT_TYPE_INT, AV_SAMPLE_FMT_NONE, AV_SAMPLE_FMT_NONE, AV_SAMPLE_FMT_NB-1, A|D},
+{"slices", "number of slices, used in parallelized decoding", OFFSET(slices), FF_OPT_TYPE_INT, {.dbl = 0 }, 0, INT_MAX, V|E},
+{"thread_type", "select multithreading type", OFFSET(thread_type), FF_OPT_TYPE_INT, {.dbl = FF_THREAD_SLICE|FF_THREAD_FRAME }, 0, INT_MAX, V|E|D, "thread_type"},
+{"slice", NULL, 0, FF_OPT_TYPE_CONST, {.dbl = FF_THREAD_SLICE }, INT_MIN, INT_MAX, V|E|D, "thread_type"},
+{"frame", NULL, 0, FF_OPT_TYPE_CONST, {.dbl = FF_THREAD_FRAME }, INT_MIN, INT_MAX, V|E|D, "thread_type"},
+{"vbv_delay", "initial buffer fill time in periods of 27Mhz clock", 0, FF_OPT_TYPE_INT64, {.dbl = 0 }, 0, INT64_MAX},
+{"audio_service_type", "audio service type", OFFSET(audio_service_type), FF_OPT_TYPE_INT, {.dbl = AV_AUDIO_SERVICE_TYPE_MAIN }, 0, AV_AUDIO_SERVICE_TYPE_NB-1, A|E, "audio_service_type"},
+{"ma", "Main Audio Service", 0, FF_OPT_TYPE_CONST, {.dbl = AV_AUDIO_SERVICE_TYPE_MAIN }, INT_MIN, INT_MAX, A|E, "audio_service_type"},
+{"ef", "Effects", 0, FF_OPT_TYPE_CONST, {.dbl = AV_AUDIO_SERVICE_TYPE_EFFECTS }, INT_MIN, INT_MAX, A|E, "audio_service_type"},
+{"vi", "Visually Impaired", 0, FF_OPT_TYPE_CONST, {.dbl = AV_AUDIO_SERVICE_TYPE_VISUALLY_IMPAIRED }, INT_MIN, INT_MAX, A|E, "audio_service_type"},
+{"hi", "Hearing Impaired", 0, FF_OPT_TYPE_CONST, {.dbl = AV_AUDIO_SERVICE_TYPE_HEARING_IMPAIRED }, INT_MIN, INT_MAX, A|E, "audio_service_type"},
+{"di", "Dialogue", 0, FF_OPT_TYPE_CONST, {.dbl = AV_AUDIO_SERVICE_TYPE_DIALOGUE }, INT_MIN, INT_MAX, A|E, "audio_service_type"},
+{"co", "Commentary", 0, FF_OPT_TYPE_CONST, {.dbl = AV_AUDIO_SERVICE_TYPE_COMMENTARY }, INT_MIN, INT_MAX, A|E, "audio_service_type"},
+{"em", "Emergency", 0, FF_OPT_TYPE_CONST, {.dbl = AV_AUDIO_SERVICE_TYPE_EMERGENCY }, INT_MIN, INT_MAX, A|E, "audio_service_type"},
+{"vo", "Voice Over", 0, FF_OPT_TYPE_CONST, {.dbl = AV_AUDIO_SERVICE_TYPE_VOICE_OVER }, INT_MIN, INT_MAX, A|E, "audio_service_type"},
+{"ka", "Karaoke", 0, FF_OPT_TYPE_CONST, {.dbl = AV_AUDIO_SERVICE_TYPE_KARAOKE }, INT_MIN, INT_MAX, A|E, "audio_service_type"},
+{"request_sample_fmt", "sample format audio decoders should prefer", OFFSET(request_sample_fmt), FF_OPT_TYPE_INT, {.dbl = AV_SAMPLE_FMT_NONE }, AV_SAMPLE_FMT_NONE, AV_SAMPLE_FMT_NB-1, A|D},
{NULL},
};
diff --git a/libavcodec/pamenc.c b/libavcodec/pamenc.c
index 9f50d7fbfa..b6d58dec49 100644
--- a/libavcodec/pamenc.c
+++ b/libavcodec/pamenc.c
@@ -40,7 +40,7 @@ static int pam_encode_frame(AVCodecContext *avctx, unsigned char *outbuf,
}
*p = *pict;
- p->pict_type = FF_I_TYPE;
+ p->pict_type = AV_PICTURE_TYPE_I;
p->key_frame = 1;
s->bytestream_start =
diff --git a/libavcodec/parser.c b/libavcodec/parser.c
index 1000a0bd34..ff0842fb5d 100644
--- a/libavcodec/parser.c
+++ b/libavcodec/parser.c
@@ -72,7 +72,7 @@ AVCodecParserContext *av_parser_init(int codec_id)
}
}
s->fetch_timestamp=1;
- s->pict_type = FF_I_TYPE;
+ s->pict_type = AV_PICTURE_TYPE_I;
s->key_frame = -1;
s->convergence_duration = 0;
s->dts_sync_point = INT_MIN;
@@ -223,7 +223,7 @@ int av_parser_change(AVCodecParserContext *s,
*poutbuf_size= buf_size;
if(avctx->extradata){
if( (keyframe && (avctx->flags2 & CODEC_FLAG2_LOCAL_HEADER))
- /*||(s->pict_type != FF_I_TYPE && (s->flags & PARSER_FLAG_DUMP_EXTRADATA_AT_NOKEY))*/
+ /*||(s->pict_type != AV_PICTURE_TYPE_I && (s->flags & PARSER_FLAG_DUMP_EXTRADATA_AT_NOKEY))*/
/*||(? && (s->flags & PARSER_FLAG_DUMP_EXTRADATA_AT_BEGIN)*/){
int size= buf_size + avctx->extradata_size;
*poutbuf_size= size;
diff --git a/libavcodec/pcx.c b/libavcodec/pcx.c
index d47417e3f7..2c9f8c07d5 100644
--- a/libavcodec/pcx.c
+++ b/libavcodec/pcx.c
@@ -152,7 +152,7 @@ static int pcx_decode_frame(AVCodecContext *avctx, void *data, int *data_size,
return -1;
}
- p->pict_type = FF_I_TYPE;
+ p->pict_type = AV_PICTURE_TYPE_I;
ptr = p->data[0];
stride = p->linesize[0];
diff --git a/libavcodec/pcxenc.c b/libavcodec/pcxenc.c
index 124516b957..bf7cebbbe2 100644
--- a/libavcodec/pcxenc.c
+++ b/libavcodec/pcxenc.c
@@ -108,7 +108,7 @@ static int pcx_encode_frame(AVCodecContext *avctx,
const uint8_t *src;
*pict = *(AVFrame *)data;
- pict->pict_type = FF_I_TYPE;
+ pict->pict_type = AV_PICTURE_TYPE_I;
pict->key_frame = 1;
if (avctx->width > 65535 || avctx->height > 65535) {
diff --git a/libavcodec/pictordec.c b/libavcodec/pictordec.c
index f1b3607082..b87c8643d0 100644
--- a/libavcodec/pictordec.c
+++ b/libavcodec/pictordec.c
@@ -94,6 +94,14 @@ static const uint8_t cga_mode45_index[6][4] = {
[5] = { 0, 11, 12, 15 }, // mode5, high intensity
};
+static av_cold int decode_init(AVCodecContext *avctx)
+{
+ PicContext *s = avctx->priv_data;
+
+ avcodec_get_frame_defaults(&s->frame);
+ return 0;
+}
+
static int decode_frame(AVCodecContext *avctx,
void *data, int *data_size,
AVPacket *avpkt)
@@ -148,7 +156,7 @@ static int decode_frame(AVCodecContext *avctx,
return -1;
}
memset(s->frame.data[0], 0, s->height * s->frame.linesize[0]);
- s->frame.pict_type = FF_I_TYPE;
+ s->frame.pict_type = AV_PICTURE_TYPE_I;
s->frame.palette_has_changed = 1;
palette = (uint32_t*)s->frame.data[1];
@@ -242,7 +250,7 @@ AVCodec ff_pictor_decoder = {
AVMEDIA_TYPE_VIDEO,
CODEC_ID_PICTOR,
sizeof(PicContext),
- NULL,
+ decode_init,
NULL,
decode_end,
decode_frame,
diff --git a/libavcodec/pngdec.c b/libavcodec/pngdec.c
index 1199bd5412..100b60cd1e 100644
--- a/libavcodec/pngdec.c
+++ b/libavcodec/pngdec.c
@@ -485,7 +485,7 @@ static int decode_frame(AVCodecContext *avctx,
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
goto fail;
}
- p->pict_type= FF_I_TYPE;
+ p->pict_type= AV_PICTURE_TYPE_I;
p->key_frame= 1;
p->interlaced_frame = !!s->interlace_type;
diff --git a/libavcodec/pngenc.c b/libavcodec/pngenc.c
index 4cb56337e7..c4ef2fd945 100644
--- a/libavcodec/pngenc.c
+++ b/libavcodec/pngenc.c
@@ -243,7 +243,7 @@ static int encode_frame(AVCodecContext *avctx, unsigned char *buf, int buf_size,
uint8_t *top_buf = NULL;
*p = *pict;
- p->pict_type= FF_I_TYPE;
+ p->pict_type= AV_PICTURE_TYPE_I;
p->key_frame= 1;
s->bytestream_start=
diff --git a/libavcodec/pnm.c b/libavcodec/pnm.c
index 2cbbdf60ea..dfc18d6013 100644
--- a/libavcodec/pnm.c
+++ b/libavcodec/pnm.c
@@ -135,7 +135,7 @@ int ff_pnm_decode_header(AVCodecContext *avctx, PNMContext * const s)
return -1;
pnm_get(s, buf1, sizeof(buf1));
avctx->height = atoi(buf1);
- if(av_image_check_size(avctx->width, avctx->height, 0, avctx))
+ if(avctx->height <= 0 || av_image_check_size(avctx->width, avctx->height, 0, avctx))
return -1;
if (avctx->pix_fmt != PIX_FMT_MONOWHITE) {
pnm_get(s, buf1, sizeof(buf1));
diff --git a/libavcodec/pnmdec.c b/libavcodec/pnmdec.c
index c746db6cc2..ebecad4006 100644
--- a/libavcodec/pnmdec.c
+++ b/libavcodec/pnmdec.c
@@ -33,7 +33,7 @@ static int pnm_decode_frame(AVCodecContext *avctx, void *data,
PNMContext * const s = avctx->priv_data;
AVFrame *picture = data;
AVFrame * const p = (AVFrame*)&s->picture;
- int i, j, n, linesize, h, upgrade = 0;
+ int i, j, n, linesize, h, upgrade = 0, is_mono = 0;
unsigned char *ptr;
int components, sample_len;
@@ -52,7 +52,7 @@ static int pnm_decode_frame(AVCodecContext *avctx, void *data,
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return -1;
}
- p->pict_type = FF_I_TYPE;
+ p->pict_type = AV_PICTURE_TYPE_I;
p->key_frame = 1;
switch (avctx->pix_fmt) {
@@ -88,6 +88,7 @@ static int pnm_decode_frame(AVCodecContext *avctx, void *data,
n = (avctx->width + 7) >> 3;
components=1;
sample_len=1;
+ is_mono = 1;
do_read:
ptr = p->data[0];
linesize = p->linesize[0];
@@ -104,10 +105,16 @@ static int pnm_decode_frame(AVCodecContext *avctx, void *data,
s->bytestream++;
if(s->bytestream >= s->bytestream_end)
return -1;
- do{
- v= 10*v + c;
- c= (*s->bytestream++) - '0';
- }while(c <= 9);
+ if (is_mono) {
+ /* read a single digit */
+ v = (*s->bytestream++) - '0';
+ } else {
+ /* read a sequence of digits */
+ do {
+ v = 10*v + c;
+ c = (*s->bytestream++) - '0';
+ } while (c <= 9);
+ }
put_bits(&pb, sample_len, (((1<<sample_len)-1)*v + (s->maxval>>1))/s->maxval);
}
flush_put_bits(&pb);
diff --git a/libavcodec/pnmenc.c b/libavcodec/pnmenc.c
index 4dbd587e1a..42c32dc94a 100644
--- a/libavcodec/pnmenc.c
+++ b/libavcodec/pnmenc.c
@@ -39,7 +39,7 @@ static int pnm_encode_frame(AVCodecContext *avctx, unsigned char *outbuf,
}
*p = *pict;
- p->pict_type = FF_I_TYPE;
+ p->pict_type = AV_PICTURE_TYPE_I;
p->key_frame = 1;
s->bytestream_start =
diff --git a/libavcodec/ppc/dsputil_altivec.c b/libavcodec/ppc/dsputil_altivec.c
index a8d0a61a85..bd432beb87 100644
--- a/libavcodec/ppc/dsputil_altivec.c
+++ b/libavcodec/ppc/dsputil_altivec.c
@@ -1384,7 +1384,7 @@ static void avg_pixels8_xy2_altivec(uint8_t *block, const uint8_t *pixels, int l
void dsputil_init_altivec(DSPContext* c, AVCodecContext *avctx)
{
- const int h264_high_depth = avctx->codec_id == CODEC_ID_H264 && avctx->bits_per_raw_sample > 8;
+ const int high_bit_depth = avctx->codec_id == CODEC_ID_H264 && avctx->bits_per_raw_sample > 8;
c->pix_abs[0][1] = sad16_x2_altivec;
c->pix_abs[0][2] = sad16_y2_altivec;
@@ -1399,10 +1399,10 @@ void dsputil_init_altivec(DSPContext* c, AVCodecContext *avctx)
c->pix_sum = pix_sum_altivec;
c->diff_pixels = diff_pixels_altivec;
c->get_pixels = get_pixels_altivec;
- if (!h264_high_depth)
+ if (!high_bit_depth)
c->clear_block = clear_block_altivec;
c->add_bytes= add_bytes_altivec;
- if (!h264_high_depth) {
+ if (!high_bit_depth) {
c->put_pixels_tab[0][0] = put_pixels16_altivec;
/* the two functions do the same thing, so use the same code */
c->put_no_rnd_pixels_tab[0][0] = put_pixels16_altivec;
diff --git a/libavcodec/ppc/dsputil_ppc.c b/libavcodec/ppc/dsputil_ppc.c
index 57f30ef703..327fe2c72f 100644
--- a/libavcodec/ppc/dsputil_ppc.c
+++ b/libavcodec/ppc/dsputil_ppc.c
@@ -153,11 +153,11 @@ static void prefetch_ppc(void *mem, int stride, int h)
void dsputil_init_ppc(DSPContext* c, AVCodecContext *avctx)
{
- const int h264_high_depth = avctx->codec_id == CODEC_ID_H264 && avctx->bits_per_raw_sample > 8;
+ const int high_bit_depth = avctx->codec_id == CODEC_ID_H264 && avctx->bits_per_raw_sample > 8;
// Common optimizations whether AltiVec is available or not
c->prefetch = prefetch_ppc;
- if (!h264_high_depth) {
+ if (!high_bit_depth) {
switch (check_dcbzl_effect()) {
case 32:
c->clear_blocks = clear_blocks_dcbz32_ppc;
diff --git a/libavcodec/ppc/h264_altivec.c b/libavcodec/ppc/h264_altivec.c
index 0e58846f51..9df18888ad 100644
--- a/libavcodec/ppc/h264_altivec.c
+++ b/libavcodec/ppc/h264_altivec.c
@@ -965,10 +965,10 @@ H264_WEIGHT( 8, 8)
H264_WEIGHT( 8, 4)
void dsputil_h264_init_ppc(DSPContext* c, AVCodecContext *avctx) {
- const int h264_high_depth = avctx->codec_id == CODEC_ID_H264 && avctx->bits_per_raw_sample > 8;
+ const int high_bit_depth = avctx->codec_id == CODEC_ID_H264 && avctx->bits_per_raw_sample > 8;
if (av_get_cpu_flags() & AV_CPU_FLAG_ALTIVEC) {
- if (!h264_high_depth) {
+ if (!high_bit_depth) {
c->put_h264_chroma_pixels_tab[0] = put_h264_chroma_mc8_altivec;
c->avg_h264_chroma_pixels_tab[0] = avg_h264_chroma_mc8_altivec;
diff --git a/libavcodec/ppc/vc1dsp_altivec.c b/libavcodec/ppc/vc1dsp_altivec.c
index 05edb53b7c..69670619da 100644
--- a/libavcodec/ppc/vc1dsp_altivec.c
+++ b/libavcodec/ppc/vc1dsp_altivec.c
@@ -130,8 +130,7 @@ do { \
/** Do inverse transform on 8x8 block
*/
-static void vc1_inv_trans_8x8_altivec(DCTELEM block[64],
- int sign, int rangered)
+static void vc1_inv_trans_8x8_altivec(DCTELEM block[64])
{
vector signed short src0, src1, src2, src3, src4, src5, src6, src7;
vector signed int s0, s1, s2, s3, s4, s5, s6, s7;
@@ -145,9 +144,6 @@ static void vc1_inv_trans_8x8_altivec(DCTELEM block[64],
const vector unsigned int vec_2 = vec_splat_u32(2);
const vector signed int vec_1s = vec_splat_s32(1);
const vector unsigned int vec_1 = vec_splat_u32(1);
- const vector unsigned short rangered_shift = vec_splat_u16(1);
- const vector signed short signed_bias = vec_sl(vec_splat_s16(4),
- vec_splat_u16(4));
src0 = vec_ld( 0, block);
src1 = vec_ld( 16, block);
@@ -217,27 +213,6 @@ static void vc1_inv_trans_8x8_altivec(DCTELEM block[64],
src6 = vec_pack(sE, s6);
src7 = vec_pack(sF, s7);
- if (rangered) {
- if (!sign) {
- src0 = vec_sub(src0, signed_bias);
- src1 = vec_sub(src1, signed_bias);
- src2 = vec_sub(src2, signed_bias);
- src3 = vec_sub(src3, signed_bias);
- src4 = vec_sub(src4, signed_bias);
- src5 = vec_sub(src5, signed_bias);
- src6 = vec_sub(src6, signed_bias);
- src7 = vec_sub(src7, signed_bias);
- }
- src0 = vec_sl(src0, rangered_shift);
- src1 = vec_sl(src1, rangered_shift);
- src2 = vec_sl(src2, rangered_shift);
- src3 = vec_sl(src3, rangered_shift);
- src4 = vec_sl(src4, rangered_shift);
- src5 = vec_sl(src5, rangered_shift);
- src6 = vec_sl(src6, rangered_shift);
- src7 = vec_sl(src7, rangered_shift);
- }
-
vec_st(src0, 0, block);
vec_st(src1, 16, block);
vec_st(src2, 32, block);
@@ -248,36 +223,6 @@ static void vc1_inv_trans_8x8_altivec(DCTELEM block[64],
vec_st(src7,112, block);
}
-static void vc1_inv_trans_8x8_add_altivec(uint8_t *dest, int stride, DCTELEM *b)
-{
- vc1_inv_trans_8x8_altivec(b, 0, 0);
- ff_add_pixels_clamped_c(b, dest, stride);
-}
-
-static void vc1_inv_trans_8x8_put_signed_altivec(uint8_t *dest, int stride, DCTELEM *b)
-{
- vc1_inv_trans_8x8_altivec(b, 1, 0);
- ff_put_signed_pixels_clamped_c(b, dest, stride);
-}
-
-static void vc1_inv_trans_8x8_put_signed_rangered_altivec(uint8_t *dest, int stride, DCTELEM *b)
-{
- vc1_inv_trans_8x8_altivec(b, 1, 1);
- ff_put_signed_pixels_clamped_c(b, dest, stride);
-}
-
-static void vc1_inv_trans_8x8_put_altivec(uint8_t *dest, int stride, DCTELEM *b)
-{
- vc1_inv_trans_8x8_altivec(b, 0, 0);
- ff_put_pixels_clamped_c(b, dest, stride);
-}
-
-static void vc1_inv_trans_8x8_put_rangered_altivec(uint8_t *dest, int stride, DCTELEM *b)
-{
- vc1_inv_trans_8x8_altivec(b, 0, 1);
- ff_put_pixels_clamped_c(b, dest, stride);
-}
-
/** Do inverse transform on 8x4 part of block
*/
static void vc1_inv_trans_8x4_altivec(uint8_t *dest, int stride, DCTELEM *block)
@@ -396,11 +341,7 @@ void ff_vc1dsp_init_altivec(VC1DSPContext* dsp)
if (!(av_get_cpu_flags() & AV_CPU_FLAG_ALTIVEC))
return;
- dsp->vc1_inv_trans_8x8_add = vc1_inv_trans_8x8_add_altivec;
- dsp->vc1_inv_trans_8x8_put_signed[0] = vc1_inv_trans_8x8_put_signed_altivec;
- dsp->vc1_inv_trans_8x8_put_signed[1] = vc1_inv_trans_8x8_put_signed_rangered_altivec;
- dsp->vc1_inv_trans_8x8_put[0] = vc1_inv_trans_8x8_put_altivec;
- dsp->vc1_inv_trans_8x8_put[1] = vc1_inv_trans_8x8_put_rangered_altivec;
+ dsp->vc1_inv_trans_8x8 = vc1_inv_trans_8x8_altivec;
dsp->vc1_inv_trans_8x4 = vc1_inv_trans_8x4_altivec;
dsp->put_no_rnd_vc1_chroma_pixels_tab[0] = put_no_rnd_vc1_chroma_mc8_altivec;
dsp->avg_no_rnd_vc1_chroma_pixels_tab[0] = avg_no_rnd_vc1_chroma_mc8_altivec;
diff --git a/libavcodec/ps2/dsputil_mmi.c b/libavcodec/ps2/dsputil_mmi.c
index 4190f9da10..349583f1ba 100644
--- a/libavcodec/ps2/dsputil_mmi.c
+++ b/libavcodec/ps2/dsputil_mmi.c
@@ -142,9 +142,9 @@ static void put_pixels16_mmi(uint8_t *block, const uint8_t *pixels, int line_siz
void dsputil_init_mmi(DSPContext* c, AVCodecContext *avctx)
{
const int idct_algo= avctx->idct_algo;
- const int h264_high_depth = avctx->codec_id == CODEC_ID_H264 && avctx->bits_per_raw_sample > 8;
+ const int high_bit_depth = avctx->codec_id == CODEC_ID_H264 && avctx->bits_per_raw_sample > 8;
- if (!h264_high_depth) {
+ if (!high_bit_depth) {
c->clear_blocks = clear_blocks_mmi;
c->put_pixels_tab[1][0] = put_pixels8_mmi;
diff --git a/libavcodec/psymodel.c b/libavcodec/psymodel.c
index 8bd5b8bdd8..133a85f5c1 100644
--- a/libavcodec/psymodel.c
+++ b/libavcodec/psymodel.c
@@ -45,19 +45,6 @@ av_cold int ff_psy_init(FFPsyContext *ctx, AVCodecContext *avctx,
return 0;
}
-FFPsyWindowInfo ff_psy_suggest_window(FFPsyContext *ctx,
- const int16_t *audio, const int16_t *la,
- int channel, int prev_type)
-{
- return ctx->model->window(ctx, audio, la, channel, prev_type);
-}
-
-void ff_psy_set_band_info(FFPsyContext *ctx, int channel,
- const float *coeffs, const FFPsyWindowInfo *wi)
-{
- ctx->model->analyze(ctx, channel, coeffs, wi);
-}
-
av_cold void ff_psy_end(FFPsyContext *ctx)
{
if (ctx->model->end)
diff --git a/libavcodec/psymodel.h b/libavcodec/psymodel.h
index a89b64c308..c65614a151 100644
--- a/libavcodec/psymodel.h
+++ b/libavcodec/psymodel.h
@@ -80,8 +80,30 @@ typedef struct FFPsyContext {
typedef struct FFPsyModel {
const char *name;
int (*init) (FFPsyContext *apc);
+
+ /**
+ * Suggest window sequence for channel.
+ *
+ * @param ctx model context
+ * @param audio samples for the current frame
+ * @param la lookahead samples (NULL when unavailable)
+ * @param channel number of channel element to analyze
+ * @param prev_type previous window type
+ *
+ * @return suggested window information in a structure
+ */
FFPsyWindowInfo (*window)(FFPsyContext *ctx, const int16_t *audio, const int16_t *la, int channel, int prev_type);
+
+ /**
+ * Perform psychoacoustic analysis and set band info (threshold, energy).
+ *
+ * @param ctx model context
+ * @param channel audio channel number
+ * @param coeffs pointer to the transformed coefficients
+ * @param wi window information
+ */
void (*analyze)(FFPsyContext *ctx, int channel, const float *coeffs, const FFPsyWindowInfo *wi);
+
void (*end) (FFPsyContext *apc);
} FFPsyModel;
@@ -101,33 +123,6 @@ av_cold int ff_psy_init(FFPsyContext *ctx, AVCodecContext *avctx,
const uint8_t **bands, const int* num_bands);
/**
- * Suggest window sequence for channel.
- *
- * @param ctx model context
- * @param audio samples for the current frame
- * @param la lookahead samples (NULL when unavailable)
- * @param channel number of channel element to analyze
- * @param prev_type previous window type
- *
- * @return suggested window information in a structure
- */
-FFPsyWindowInfo ff_psy_suggest_window(FFPsyContext *ctx,
- const int16_t *audio, const int16_t *la,
- int channel, int prev_type);
-
-
-/**
- * Perform psychoacoustic analysis and set band info (threshold, energy).
- *
- * @param ctx model context
- * @param channel audio channel number
- * @param coeffs pointer to the transformed coefficients
- * @param wi window information
- */
-void ff_psy_set_band_info(FFPsyContext *ctx, int channel, const float *coeffs,
- const FFPsyWindowInfo *wi);
-
-/**
* Cleanup model context at the end.
*
* @param ctx model context
diff --git a/libavcodec/ptx.c b/libavcodec/ptx.c
index 426f46ce6c..3273fd2f8e 100644
--- a/libavcodec/ptx.c
+++ b/libavcodec/ptx.c
@@ -74,7 +74,7 @@ static int ptx_decode_frame(AVCodecContext *avctx, void *data, int *data_size,
return -1;
}
- p->pict_type = FF_I_TYPE;
+ p->pict_type = AV_PICTURE_TYPE_I;
ptr = p->data[0];
stride = p->linesize[0];
diff --git a/libavcodec/qdrw.c b/libavcodec/qdrw.c
index be47b4228b..cd3146388e 100644
--- a/libavcodec/qdrw.c
+++ b/libavcodec/qdrw.c
@@ -54,7 +54,7 @@ static int decode_frame(AVCodecContext *avctx,
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return -1;
}
- p->pict_type= FF_I_TYPE;
+ p->pict_type= AV_PICTURE_TYPE_I;
p->key_frame= 1;
outdata = a->pic.data[0];
@@ -133,8 +133,9 @@ static int decode_frame(AVCodecContext *avctx,
}
static av_cold int decode_init(AVCodecContext *avctx){
-// QdrawContext * const a = avctx->priv_data;
+ QdrawContext * const a = avctx->priv_data;
+ avcodec_get_frame_defaults(&a->pic);
avctx->pix_fmt= PIX_FMT_PAL8;
return 0;
diff --git a/libavcodec/qpeg.c b/libavcodec/qpeg.c
index 497d113683..39d8171951 100644
--- a/libavcodec/qpeg.c
+++ b/libavcodec/qpeg.c
@@ -298,6 +298,8 @@ static av_cold int decode_init(AVCodecContext *avctx){
av_log(avctx, AV_LOG_FATAL, "Missing required palette via palctrl\n");
return -1;
}
+ avcodec_get_frame_defaults(&a->pic);
+ avcodec_get_frame_defaults(&a->ref);
a->avctx = avctx;
avctx->pix_fmt= PIX_FMT_PAL8;
diff --git a/libavcodec/qtrle.c b/libavcodec/qtrle.c
index 8ad6778299..a2b6c7f991 100644
--- a/libavcodec/qtrle.c
+++ b/libavcodec/qtrle.c
@@ -416,6 +416,7 @@ static av_cold int qtrle_decode_init(AVCodecContext *avctx)
break;
}
+ avcodec_get_frame_defaults(&s->frame);
s->frame.data[0] = NULL;
return 0;
diff --git a/libavcodec/qtrleenc.c b/libavcodec/qtrleenc.c
index 8815736e87..d43ff7b06b 100644
--- a/libavcodec/qtrleenc.c
+++ b/libavcodec/qtrleenc.c
@@ -39,6 +39,7 @@ typedef struct QtrleEncContext {
int pixel_size;
AVPicture previous_frame;
unsigned int max_buf_size;
+ int logical_width;
/**
* This array will contain at ith position the value of the best RLE code
* if the line started at pixel i
@@ -67,8 +68,13 @@ static av_cold int qtrle_encode_init(AVCodecContext *avctx)
return -1;
}
s->avctx=avctx;
+ s->logical_width=avctx->width;
switch (avctx->pix_fmt) {
+ case PIX_FMT_GRAY8:
+ s->logical_width = avctx->width / 4;
+ s->pixel_size = 4;
+ break;
case PIX_FMT_RGB555BE:
s->pixel_size = 2;
break;
@@ -82,11 +88,11 @@ static av_cold int qtrle_encode_init(AVCodecContext *avctx)
av_log(avctx, AV_LOG_ERROR, "Unsupported colorspace.\n");
break;
}
- avctx->bits_per_coded_sample = s->pixel_size*8;
+ avctx->bits_per_coded_sample = avctx->pix_fmt == PIX_FMT_GRAY8 ? 40 : s->pixel_size*8;
- s->rlecode_table = av_mallocz(s->avctx->width);
- s->skip_table = av_mallocz(s->avctx->width);
- s->length_table = av_mallocz((s->avctx->width + 1)*sizeof(int));
+ s->rlecode_table = av_mallocz(s->logical_width);
+ s->skip_table = av_mallocz(s->logical_width);
+ s->length_table = av_mallocz((s->logical_width + 1)*sizeof(int));
if (!s->skip_table || !s->length_table || !s->rlecode_table) {
av_log(avctx, AV_LOG_ERROR, "Error allocating memory.\n");
return -1;
@@ -96,10 +102,10 @@ static av_cold int qtrle_encode_init(AVCodecContext *avctx)
return -1;
}
- s->max_buf_size = s->avctx->width*s->avctx->height*s->pixel_size /* image base material */
- + 15 /* header + footer */
- + s->avctx->height*2 /* skip code+rle end */
- + s->avctx->width/MAX_RLE_BULK + 1 /* rle codes */;
+ s->max_buf_size = s->logical_width*s->avctx->height*s->pixel_size /* image base material */
+ + 15 /* header + footer */
+ + s->avctx->height*2 /* skip code+rle end */
+ + s->logical_width/MAX_RLE_BULK + 1 /* rle codes */;
avctx->coded_frame = &s->frame;
return 0;
}
@@ -109,7 +115,7 @@ static av_cold int qtrle_encode_init(AVCodecContext *avctx)
*/
static void qtrle_encode_line(QtrleEncContext *s, AVFrame *p, int line, uint8_t **buf)
{
- int width=s->avctx->width;
+ int width=s->logical_width;
int i;
signed char rlecode;
@@ -224,12 +230,26 @@ static void qtrle_encode_line(QtrleEncContext *s, AVFrame *p, int line, uint8_t
}
else if (rlecode > 0) {
/* bulk copy */
- bytestream_put_buffer(buf, this_line + i*s->pixel_size, rlecode*s->pixel_size);
+ if (s->avctx->pix_fmt == PIX_FMT_GRAY8) {
+ // QT grayscale colorspace has 0=white and 255=black, we will
+ // ignore the palette that is included in the AVFrame because
+ // PIX_FMT_GRAY8 has defined color mapping
+ for (int j = 0; j < rlecode*s->pixel_size; ++j)
+ bytestream_put_byte(buf, *(this_line + i*s->pixel_size + j) ^ 0xff);
+ } else {
+ bytestream_put_buffer(buf, this_line + i*s->pixel_size, rlecode*s->pixel_size);
+ }
i += rlecode;
}
else {
/* repeat the bits */
- bytestream_put_buffer(buf, this_line + i*s->pixel_size, s->pixel_size);
+ if (s->avctx->pix_fmt == PIX_FMT_GRAY8) {
+ // QT grayscale colorspace has 0=white and 255=black, ...
+ for (int j = 0; j < s->pixel_size; ++j)
+ bytestream_put_byte(buf, *(this_line + i*s->pixel_size + j) ^ 0xff);
+ } else {
+ bytestream_put_buffer(buf, this_line + i*s->pixel_size, s->pixel_size);
+ }
i -= rlecode;
}
}
@@ -245,7 +265,7 @@ static int encode_frame(QtrleEncContext *s, AVFrame *p, uint8_t *buf)
uint8_t *orig_buf = buf;
if (!s->frame.key_frame) {
- unsigned line_size = s->avctx->width * s->pixel_size;
+ unsigned line_size = s->logical_width * s->pixel_size;
for (start_line = 0; start_line < s->avctx->height; start_line++)
if (memcmp(p->data[0] + start_line*p->linesize[0],
s->previous_frame.data[0] + start_line*s->previous_frame.linesize[0],
@@ -295,11 +315,11 @@ static int qtrle_encode_frame(AVCodecContext *avctx, uint8_t *buf, int buf_size,
if (avctx->gop_size == 0 || (s->avctx->frame_number % avctx->gop_size) == 0) {
/* I-Frame */
- p->pict_type = FF_I_TYPE;
+ p->pict_type = AV_PICTURE_TYPE_I;
p->key_frame = 1;
} else {
/* P-Frame */
- p->pict_type = FF_P_TYPE;
+ p->pict_type = AV_PICTURE_TYPE_P;
p->key_frame = 0;
}
@@ -329,6 +349,6 @@ AVCodec ff_qtrle_encoder = {
qtrle_encode_init,
qtrle_encode_frame,
qtrle_encode_end,
- .pix_fmts = (const enum PixelFormat[]){PIX_FMT_RGB24, PIX_FMT_RGB555BE, PIX_FMT_ARGB, PIX_FMT_NONE},
+ .pix_fmts = (const enum PixelFormat[]){PIX_FMT_RGB24, PIX_FMT_RGB555BE, PIX_FMT_ARGB, PIX_FMT_GRAY8, PIX_FMT_NONE},
.long_name = NULL_IF_CONFIG_SMALL("QuickTime Animation (RLE) video"),
};
diff --git a/libavcodec/r210dec.c b/libavcodec/r210dec.c
index a5222b17fc..293fe654ad 100644
--- a/libavcodec/r210dec.c
+++ b/libavcodec/r210dec.c
@@ -54,7 +54,7 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *data_size,
if (avctx->get_buffer(avctx, pic) < 0)
return -1;
- pic->pict_type = FF_I_TYPE;
+ pic->pict_type = AV_PICTURE_TYPE_I;
pic->key_frame = 1;
dst_line = pic->data[0];
diff --git a/libavcodec/ra144enc.c b/libavcodec/ra144enc.c
index a0912056d7..351ba9e871 100644
--- a/libavcodec/ra144enc.c
+++ b/libavcodec/ra144enc.c
@@ -54,7 +54,7 @@ static av_cold int ra144_encode_init(AVCodecContext * avctx)
ractx->lpc_coef[1] = ractx->lpc_tables[1];
ractx->avctx = avctx;
ret = ff_lpc_init(&ractx->lpc_ctx, avctx->frame_size, LPC_ORDER,
- AV_LPC_TYPE_LEVINSON);
+ FF_LPC_TYPE_LEVINSON);
return ret;
}
@@ -461,7 +461,7 @@ static int ra144_encode_frame(AVCodecContext *avctx, uint8_t *frame,
32)];
ff_lpc_calc_coefs(&ractx->lpc_ctx, lpc_data, NBLOCKS * BLOCKSIZE, LPC_ORDER,
- LPC_ORDER, 16, lpc_coefs, shift, AV_LPC_TYPE_LEVINSON,
+ LPC_ORDER, 16, lpc_coefs, shift, FF_LPC_TYPE_LEVINSON,
0, ORDER_METHOD_EST, 12, 0);
for (i = 0; i < LPC_ORDER; i++)
block_coefs[NBLOCKS - 1][i] = -(lpc_coefs[LPC_ORDER - 1][i] <<
diff --git a/libavcodec/ratecontrol.c b/libavcodec/ratecontrol.c
index bb17490a12..6874fc7034 100644
--- a/libavcodec/ratecontrol.c
+++ b/libavcodec/ratecontrol.c
@@ -144,7 +144,7 @@ int ff_rate_control_init(MpegEncContext *s)
/* init all to skipped p frames (with b frames we might have a not encoded frame at the end FIXME) */
for(i=0; i<rcc->num_entries; i++){
RateControlEntry *rce= &rcc->entry[i];
- rce->pict_type= rce->new_pict_type=FF_P_TYPE;
+ rce->pict_type= rce->new_pict_type=AV_PICTURE_TYPE_P;
rce->qscale= rce->new_qscale=FF_QP2LAMBDA * 2;
rce->misc_bits= s->mb_num + 10;
rce->mb_var_sum= s->mb_num*100;
@@ -211,9 +211,9 @@ int ff_rate_control_init(MpegEncContext *s)
double bits= s->avctx->rc_initial_cplx * (i/10000.0 + 1.0)*s->mb_num;
RateControlEntry rce;
- if (i%((s->gop_size+3)/4)==0) rce.pict_type= FF_I_TYPE;
- else if(i%(s->max_b_frames+1)) rce.pict_type= FF_B_TYPE;
- else rce.pict_type= FF_P_TYPE;
+ if (i%((s->gop_size+3)/4)==0) rce.pict_type= AV_PICTURE_TYPE_I;
+ else if(i%(s->max_b_frames+1)) rce.pict_type= AV_PICTURE_TYPE_B;
+ else rce.pict_type= AV_PICTURE_TYPE_P;
rce.new_pict_type= rce.pict_type;
rce.mc_mb_var_sum= bits*s->mb_num/100000;
@@ -223,7 +223,7 @@ int ff_rate_control_init(MpegEncContext *s)
rce.b_code = 1;
rce.misc_bits= 1;
- if(s->pict_type== FF_I_TYPE){
+ if(s->pict_type== AV_PICTURE_TYPE_I){
rce.i_count = s->mb_num;
rce.i_tex_bits= bits;
rce.p_tex_bits= 0;
@@ -317,23 +317,23 @@ static double get_qscale(MpegEncContext *s, RateControlEntry *rce, double rate_f
rce->p_tex_bits*rce->qscale,
(rce->i_tex_bits + rce->p_tex_bits)*(double)rce->qscale,
rce->mv_bits/mb_num,
- rce->pict_type == FF_B_TYPE ? (rce->f_code + rce->b_code)*0.5 : rce->f_code,
+ rce->pict_type == AV_PICTURE_TYPE_B ? (rce->f_code + rce->b_code)*0.5 : rce->f_code,
rce->i_count/mb_num,
rce->mc_mb_var_sum/mb_num,
rce->mb_var_sum/mb_num,
- rce->pict_type == FF_I_TYPE,
- rce->pict_type == FF_P_TYPE,
- rce->pict_type == FF_B_TYPE,
+ rce->pict_type == AV_PICTURE_TYPE_I,
+ rce->pict_type == AV_PICTURE_TYPE_P,
+ rce->pict_type == AV_PICTURE_TYPE_B,
rcc->qscale_sum[pict_type] / (double)rcc->frame_count[pict_type],
a->qcompress,
-/* rcc->last_qscale_for[FF_I_TYPE],
- rcc->last_qscale_for[FF_P_TYPE],
- rcc->last_qscale_for[FF_B_TYPE],
+/* rcc->last_qscale_for[AV_PICTURE_TYPE_I],
+ rcc->last_qscale_for[AV_PICTURE_TYPE_P],
+ rcc->last_qscale_for[AV_PICTURE_TYPE_B],
rcc->next_non_b_qscale,*/
- rcc->i_cplx_sum[FF_I_TYPE] / (double)rcc->frame_count[FF_I_TYPE],
- rcc->i_cplx_sum[FF_P_TYPE] / (double)rcc->frame_count[FF_P_TYPE],
- rcc->p_cplx_sum[FF_P_TYPE] / (double)rcc->frame_count[FF_P_TYPE],
- rcc->p_cplx_sum[FF_B_TYPE] / (double)rcc->frame_count[FF_B_TYPE],
+ rcc->i_cplx_sum[AV_PICTURE_TYPE_I] / (double)rcc->frame_count[AV_PICTURE_TYPE_I],
+ rcc->i_cplx_sum[AV_PICTURE_TYPE_P] / (double)rcc->frame_count[AV_PICTURE_TYPE_P],
+ rcc->p_cplx_sum[AV_PICTURE_TYPE_P] / (double)rcc->frame_count[AV_PICTURE_TYPE_P],
+ rcc->p_cplx_sum[AV_PICTURE_TYPE_B] / (double)rcc->frame_count[AV_PICTURE_TYPE_B],
(rcc->i_cplx_sum[pict_type] + rcc->p_cplx_sum[pict_type]) / (double)rcc->frame_count[pict_type],
0
};
@@ -364,9 +364,9 @@ static double get_qscale(MpegEncContext *s, RateControlEntry *rce, double rate_f
q= bits2qp(rce, bits);
/* I/B difference */
- if (pict_type==FF_I_TYPE && s->avctx->i_quant_factor<0.0)
+ if (pict_type==AV_PICTURE_TYPE_I && s->avctx->i_quant_factor<0.0)
q= -q*s->avctx->i_quant_factor + s->avctx->i_quant_offset;
- else if(pict_type==FF_B_TYPE && s->avctx->b_quant_factor<0.0)
+ else if(pict_type==AV_PICTURE_TYPE_B && s->avctx->b_quant_factor<0.0)
q= -q*s->avctx->b_quant_factor + s->avctx->b_quant_offset;
if(q<1) q=1;
@@ -377,17 +377,17 @@ static double get_diff_limited_q(MpegEncContext *s, RateControlEntry *rce, doubl
RateControlContext *rcc= &s->rc_context;
AVCodecContext *a= s->avctx;
const int pict_type= rce->new_pict_type;
- const double last_p_q = rcc->last_qscale_for[FF_P_TYPE];
+ const double last_p_q = rcc->last_qscale_for[AV_PICTURE_TYPE_P];
const double last_non_b_q= rcc->last_qscale_for[rcc->last_non_b_pict_type];
- if (pict_type==FF_I_TYPE && (a->i_quant_factor>0.0 || rcc->last_non_b_pict_type==FF_P_TYPE))
+ if (pict_type==AV_PICTURE_TYPE_I && (a->i_quant_factor>0.0 || rcc->last_non_b_pict_type==AV_PICTURE_TYPE_P))
q= last_p_q *FFABS(a->i_quant_factor) + a->i_quant_offset;
- else if(pict_type==FF_B_TYPE && a->b_quant_factor>0.0)
+ else if(pict_type==AV_PICTURE_TYPE_B && a->b_quant_factor>0.0)
q= last_non_b_q* a->b_quant_factor + a->b_quant_offset;
if(q<1) q=1;
/* last qscale / qdiff stuff */
- if(rcc->last_non_b_pict_type==pict_type || pict_type!=FF_I_TYPE){
+ if(rcc->last_non_b_pict_type==pict_type || pict_type!=AV_PICTURE_TYPE_I){
double last_q= rcc->last_qscale_for[pict_type];
const int maxdiff= FF_QP2LAMBDA * a->max_qdiff;
@@ -397,7 +397,7 @@ static double get_diff_limited_q(MpegEncContext *s, RateControlEntry *rce, doubl
rcc->last_qscale_for[pict_type]= q; //Note we cannot do that after blurring
- if(pict_type!=FF_B_TYPE)
+ if(pict_type!=AV_PICTURE_TYPE_B)
rcc->last_non_b_pict_type= pict_type;
return q;
@@ -412,10 +412,10 @@ static void get_qminmax(int *qmin_ret, int *qmax_ret, MpegEncContext *s, int pic
assert(qmin <= qmax);
- if(pict_type==FF_B_TYPE){
+ if(pict_type==AV_PICTURE_TYPE_B){
qmin= (int)(qmin*FFABS(s->avctx->b_quant_factor)+s->avctx->b_quant_offset + 0.5);
qmax= (int)(qmax*FFABS(s->avctx->b_quant_factor)+s->avctx->b_quant_offset + 0.5);
- }else if(pict_type==FF_I_TYPE){
+ }else if(pict_type==AV_PICTURE_TYPE_I){
qmin= (int)(qmin*FFABS(s->avctx->i_quant_factor)+s->avctx->i_quant_offset + 0.5);
qmax= (int)(qmax*FFABS(s->avctx->i_quant_factor)+s->avctx->i_quant_offset + 0.5);
}
@@ -441,7 +441,7 @@ static double modify_qscale(MpegEncContext *s, RateControlEntry *rce, double q,
get_qminmax(&qmin, &qmax, s, pict_type);
/* modulation */
- if(s->avctx->rc_qmod_freq && frame_num%s->avctx->rc_qmod_freq==0 && pict_type==FF_P_TYPE)
+ if(s->avctx->rc_qmod_freq && frame_num%s->avctx->rc_qmod_freq==0 && pict_type==AV_PICTURE_TYPE_P)
q*= s->avctx->rc_qmod_amp;
//printf("q:%f\n", q);
@@ -684,7 +684,7 @@ float ff_rate_estimate_qscale(MpegEncContext *s, int dry_run)
//printf("input_pic_num:%d pic_num:%d frame_rate:%d\n", s->input_picture_number, s->picture_number, s->frame_rate);
/* update predictors */
if(picture_number>2 && !dry_run){
- const int last_var= s->last_pict_type == FF_I_TYPE ? rcc->last_mb_var_sum : rcc->last_mc_mb_var_sum;
+ const int last_var= s->last_pict_type == AV_PICTURE_TYPE_I ? rcc->last_mb_var_sum : rcc->last_mc_mb_var_sum;
update_predictor(&rcc->pred[s->last_pict_type], rcc->last_qscale, sqrt(last_var), s->frame_bits);
}
@@ -699,7 +699,7 @@ float ff_rate_estimate_qscale(MpegEncContext *s, int dry_run)
//FIXME add a dts field to AVFrame and ensure its set and use it here instead of reordering
//but the reordering is simpler for now until h.264 b pyramid must be handeld
- if(s->pict_type == FF_B_TYPE || s->low_delay)
+ if(s->pict_type == AV_PICTURE_TYPE_B || s->low_delay)
dts_pic= s->current_picture_ptr;
else
dts_pic= s->last_picture_ptr;
@@ -717,11 +717,11 @@ float ff_rate_estimate_qscale(MpegEncContext *s, int dry_run)
br_compensation= (a->bit_rate_tolerance - diff)/a->bit_rate_tolerance;
if(br_compensation<=0.0) br_compensation=0.001;
- var= pict_type == FF_I_TYPE ? pic->mb_var_sum : pic->mc_mb_var_sum;
+ var= pict_type == AV_PICTURE_TYPE_I ? pic->mb_var_sum : pic->mc_mb_var_sum;
short_term_q = 0; /* avoid warning */
if(s->flags&CODEC_FLAG_PASS2){
- if(pict_type!=FF_I_TYPE)
+ if(pict_type!=AV_PICTURE_TYPE_I)
assert(pict_type == rce->new_pict_type);
q= rce->new_qscale / br_compensation;
@@ -737,7 +737,7 @@ float ff_rate_estimate_qscale(MpegEncContext *s, int dry_run)
rce->misc_bits= 1;
bits= predict_size(&rcc->pred[pict_type], rce->qscale, sqrt(var));
- if(pict_type== FF_I_TYPE){
+ if(pict_type== AV_PICTURE_TYPE_I){
rce->i_count = s->mb_num;
rce->i_tex_bits= bits;
rce->p_tex_bits= 0;
@@ -767,7 +767,7 @@ float ff_rate_estimate_qscale(MpegEncContext *s, int dry_run)
//printf("%f ", q);
assert(q>0.0);
- if(pict_type==FF_P_TYPE || s->intra_only){ //FIXME type dependent blur like in 2-pass
+ if(pict_type==AV_PICTURE_TYPE_P || s->intra_only){ //FIXME type dependent blur like in 2-pass
rcc->short_term_qsum*=a->qblur;
rcc->short_term_qcount*=a->qblur;
@@ -788,7 +788,7 @@ float ff_rate_estimate_qscale(MpegEncContext *s, int dry_run)
if(s->avctx->debug&FF_DEBUG_RC){
av_log(s->avctx, AV_LOG_DEBUG, "%c qp:%d<%2.1f<%d %d want:%d total:%d comp:%f st_q:%2.2f size:%d var:%d/%d br:%d fps:%d\n",
- av_get_pict_type_char(pict_type), qmin, q, qmax, picture_number, (int)wanted_bits/1000, (int)s->total_bits/1000,
+ av_get_picture_type_char(pict_type), qmin, q, qmax, picture_number, (int)wanted_bits/1000, (int)s->total_bits/1000,
br_compensation, short_term_q, s->frame_bits, pic->mb_var_sum, pic->mc_mb_var_sum, s->bit_rate/1000, (int)fps
);
}
@@ -842,7 +842,7 @@ static int init_pass2(MpegEncContext *s)
complexity[rce->new_pict_type]+= (rce->i_tex_bits+ rce->p_tex_bits)*(double)rce->qscale;
const_bits[rce->new_pict_type]+= rce->mv_bits + rce->misc_bits;
}
- all_const_bits= const_bits[FF_I_TYPE] + const_bits[FF_P_TYPE] + const_bits[FF_B_TYPE];
+ all_const_bits= const_bits[AV_PICTURE_TYPE_I] + const_bits[AV_PICTURE_TYPE_P] + const_bits[AV_PICTURE_TYPE_B];
if(all_available_bits < all_const_bits){
av_log(s->avctx, AV_LOG_ERROR, "requested bitrate is too low\n");
diff --git a/libavcodec/raw.c b/libavcodec/raw.c
index 0a865f470a..7481b2fba9 100644
--- a/libavcodec/raw.c
+++ b/libavcodec/raw.c
@@ -122,6 +122,7 @@ const PixelFormatTag ff_raw_pix_fmt_tags[] = {
{ PIX_FMT_UYVY422, MKTAG('A', 'V', 'U', 'I') }, /* FIXME merge both fields */
{ PIX_FMT_YUYV422, MKTAG('y', 'u', 'v', '2') },
{ PIX_FMT_YUYV422, MKTAG('y', 'u', 'v', 's') },
+ { PIX_FMT_YUYV422, MKTAG('D', 'V', 'O', 'O') }, /* Digital Voodoo SD 8 Bit */
{ PIX_FMT_PAL8, MKTAG('W', 'R', 'A', 'W') },
{ PIX_FMT_RGB555LE,MKTAG('L', '5', '5', '5') },
{ PIX_FMT_RGB565LE,MKTAG('L', '5', '6', '5') },
diff --git a/libavcodec/rawdec.c b/libavcodec/rawdec.c
index e142369d60..e6d3c21519 100644
--- a/libavcodec/rawdec.c
+++ b/libavcodec/rawdec.c
@@ -59,6 +59,7 @@ static const PixelFormatTag pix_fmt_bps_mov[] = {
{ PIX_FMT_RGB555BE, 16 },
{ PIX_FMT_RGB24, 24 },
{ PIX_FMT_ARGB, 32 },
+ { PIX_FMT_MONOWHITE,33 },
{ PIX_FMT_NONE, 0 },
};
@@ -92,7 +93,7 @@ static av_cold int raw_init_decoder(AVCodecContext *avctx)
if (!context->buffer)
return -1;
}
- context->pic.pict_type = FF_I_TYPE;
+ context->pic.pict_type = AV_PICTURE_TYPE_I;
context->pic.key_frame = 1;
avctx->coded_frame= &context->pic;
@@ -124,6 +125,7 @@ static int raw_decode(AVCodecContext *avctx,
frame->top_field_first = avctx->coded_frame->top_field_first;
frame->reordered_opaque = avctx->reordered_opaque;
frame->pkt_pts = avctx->pkt->pts;
+ frame->pkt_pos = avctx->pkt->pos;
//2bpp and 4bpp raw in avi and mov (yes this is ugly ...)
if (context->buffer) {
diff --git a/libavcodec/rawenc.c b/libavcodec/rawenc.c
index 630df0f723..772ce94067 100644
--- a/libavcodec/rawenc.c
+++ b/libavcodec/rawenc.c
@@ -32,7 +32,7 @@
static av_cold int raw_init_encoder(AVCodecContext *avctx)
{
avctx->coded_frame = (AVFrame *)avctx->priv_data;
- avctx->coded_frame->pict_type = FF_I_TYPE;
+ avctx->coded_frame->pict_type = AV_PICTURE_TYPE_I;
avctx->coded_frame->key_frame = 1;
avctx->bits_per_coded_sample = av_get_bits_per_pixel(&av_pix_fmt_descriptors[avctx->pix_fmt]);
if(!avctx->codec_tag)
diff --git a/libavcodec/resample.c b/libavcodec/resample.c
index ed2b31bd31..1fb19b0232 100644
--- a/libavcodec/resample.c
+++ b/libavcodec/resample.c
@@ -29,6 +29,8 @@
#include "libavutil/opt.h"
#include "libavutil/samplefmt.h"
+#define MAX_CHANNELS 8
+
struct AVResampleContext;
static const char *context_to_name(void *ptr)
@@ -37,20 +39,22 @@ static const char *context_to_name(void *ptr)
}
static const AVOption options[] = {{NULL}};
-static const AVClass audioresample_context_class = { "ReSampleContext", context_to_name, options, LIBAVUTIL_VERSION_INT };
+static const AVClass audioresample_context_class = {
+ "ReSampleContext", context_to_name, options, LIBAVUTIL_VERSION_INT
+};
struct ReSampleContext {
struct AVResampleContext *resample_context;
- short *temp[2];
+ short *temp[MAX_CHANNELS];
int temp_len;
float ratio;
/* channel convert */
int input_channels, output_channels, filter_channels;
AVAudioConvert *convert_ctx[2];
enum AVSampleFormat sample_fmt[2]; ///< input and output sample format
- unsigned sample_size[2]; ///< size of one sample in sample_fmt
- short *buffer[2]; ///< buffers used for conversion to S16
- unsigned buffer_size[2]; ///< sizes of allocated buffers
+ unsigned sample_size[2]; ///< size of one sample in sample_fmt
+ short *buffer[2]; ///< buffers used for conversion to S16
+ unsigned buffer_size[2]; ///< sizes of allocated buffers
};
/* n1: number of samples */
@@ -104,41 +108,42 @@ static void mono_to_stereo(short *output, short *input, int n1)
}
}
-/* XXX: should use more abstract 'N' channels system */
-static void stereo_split(short *output1, short *output2, short *input, int n)
+static void deinterleave(short **output, short *input, int channels, int samples)
{
- int i;
+ int i, j;
- for(i=0;i<n;i++) {
- *output1++ = *input++;
- *output2++ = *input++;
+ for (i = 0; i < samples; i++) {
+ for (j = 0; j < channels; j++) {
+ *output[j]++ = *input++;
+ }
}
}
-static void stereo_mux(short *output, short *input1, short *input2, int n)
+static void interleave(short *output, short **input, int channels, int samples)
{
- int i;
+ int i, j;
- for(i=0;i<n;i++) {
- *output++ = *input1++;
- *output++ = *input2++;
+ for (i = 0; i < samples; i++) {
+ for (j = 0; j < channels; j++) {
+ *output++ = *input[j]++;
+ }
}
}
static void ac3_5p1_mux(short *output, short *input1, short *input2, int n)
{
int i;
- short l,r;
-
- for(i=0;i<n;i++) {
- l=*input1++;
- r=*input2++;
- *output++ = l; /* left */
- *output++ = (l/2)+(r/2); /* center */
- *output++ = r; /* right */
- *output++ = 0; /* left surround */
- *output++ = 0; /* right surroud */
- *output++ = 0; /* low freq */
+ short l, r;
+
+ for (i = 0; i < n; i++) {
+ l = *input1++;
+ r = *input2++;
+ *output++ = l; /* left */
+ *output++ = (l / 2) + (r / 2); /* center */
+ *output++ = r; /* right */
+ *output++ = 0; /* left surround */
+ *output++ = 0; /* right surroud */
+ *output++ = 0; /* low freq */
}
}
@@ -151,18 +156,25 @@ ReSampleContext *av_audio_resample_init(int output_channels, int input_channels,
{
ReSampleContext *s;
- if ( input_channels > 2)
- {
- av_log(NULL, AV_LOG_ERROR, "Resampling with input channels greater than 2 unsupported.\n");
+ if (input_channels > MAX_CHANNELS) {
+ av_log(NULL, AV_LOG_ERROR,
+ "Resampling with input channels greater than %d is unsupported.\n",
+ MAX_CHANNELS);
+ return NULL;
+ }
+ if (output_channels > 2 &&
+ !(output_channels == 6 && input_channels == 2) &&
+ output_channels != input_channels) {
+ av_log(NULL, AV_LOG_ERROR,
+ "Resampling output channel count must be 1 or 2 for mono input; 1, 2 or 6 for stereo input; or N for N channel input.\n");
return NULL;
- }
+ }
s = av_mallocz(sizeof(ReSampleContext));
- if (!s)
- {
+ if (!s) {
av_log(NULL, AV_LOG_ERROR, "Can't allocate memory for resample context.\n");
return NULL;
- }
+ }
s->ratio = (float)output_rate / (float)input_rate;
@@ -173,10 +185,10 @@ ReSampleContext *av_audio_resample_init(int output_channels, int input_channels,
if (s->output_channels < s->filter_channels)
s->filter_channels = s->output_channels;
- s->sample_fmt [0] = sample_fmt_in;
- s->sample_fmt [1] = sample_fmt_out;
- s->sample_size[0] = av_get_bits_per_sample_fmt(s->sample_fmt[0])>>3;
- s->sample_size[1] = av_get_bits_per_sample_fmt(s->sample_fmt[1])>>3;
+ s->sample_fmt[0] = sample_fmt_in;
+ s->sample_fmt[1] = sample_fmt_out;
+ s->sample_size[0] = av_get_bits_per_sample_fmt(s->sample_fmt[0]) >> 3;
+ s->sample_size[1] = av_get_bits_per_sample_fmt(s->sample_fmt[1]) >> 3;
if (s->sample_fmt[0] != AV_SAMPLE_FMT_S16) {
if (!(s->convert_ctx[0] = av_audio_convert_alloc(AV_SAMPLE_FMT_S16, 1,
@@ -201,17 +213,10 @@ ReSampleContext *av_audio_resample_init(int output_channels, int input_channels,
}
}
-/*
- * AC-3 output is the only case where filter_channels could be greater than 2.
- * input channels can't be greater than 2, so resample the 2 channels and then
- * expand to 6 channels after the resampling.
- */
- if(s->filter_channels>2)
- s->filter_channels = 2;
-
#define TAPS 16
- s->resample_context= av_resample_init(output_rate, input_rate,
- filter_length, log2_phase_count, linear, cutoff);
+ s->resample_context = av_resample_init(output_rate, input_rate,
+ filter_length, log2_phase_count,
+ linear, cutoff);
*(const AVClass**)s->resample_context = &audioresample_context_class;
@@ -234,9 +239,9 @@ ReSampleContext *audio_resample_init(int output_channels, int input_channels,
int audio_resample(ReSampleContext *s, short *output, short *input, int nb_samples)
{
int i, nb_samples1;
- short *bufin[2];
- short *bufout[2];
- short *buftmp2[2], *buftmp3[2];
+ short *bufin[MAX_CHANNELS];
+ short *bufout[MAX_CHANNELS];
+ short *buftmp2[MAX_CHANNELS], *buftmp3[MAX_CHANNELS];
short *output_bak = NULL;
int lenout;
@@ -251,7 +256,7 @@ int audio_resample(ReSampleContext *s, short *output, short *input, int nb_sampl
int ostride[1] = { 2 };
const void *ibuf[1] = { input };
void *obuf[1];
- unsigned input_size = nb_samples*s->input_channels*2;
+ unsigned input_size = nb_samples * s->input_channels * 2;
if (!s->buffer_size[0] || s->buffer_size[0] < input_size) {
av_free(s->buffer[0]);
@@ -266,12 +271,13 @@ int audio_resample(ReSampleContext *s, short *output, short *input, int nb_sampl
obuf[0] = s->buffer[0];
if (av_audio_convert(s->convert_ctx[0], obuf, ostride,
- ibuf, istride, nb_samples*s->input_channels) < 0) {
- av_log(s->resample_context, AV_LOG_ERROR, "Audio sample format conversion failed\n");
+ ibuf, istride, nb_samples * s->input_channels) < 0) {
+ av_log(s->resample_context, AV_LOG_ERROR,
+ "Audio sample format conversion failed\n");
return 0;
}
- input = s->buffer[0];
+ input = s->buffer[0];
}
lenout= 2*s->output_channels*nb_samples * s->ratio + 16;
@@ -293,52 +299,50 @@ int audio_resample(ReSampleContext *s, short *output, short *input, int nb_sampl
}
/* XXX: move those malloc to resample init code */
- for(i=0; i<s->filter_channels; i++){
- bufin[i]= av_malloc( (nb_samples + s->temp_len) * sizeof(short) );
+ for (i = 0; i < s->filter_channels; i++) {
+ bufin[i] = av_malloc((nb_samples + s->temp_len) * sizeof(short));
memcpy(bufin[i], s->temp[i], s->temp_len * sizeof(short));
buftmp2[i] = bufin[i] + s->temp_len;
+ bufout[i] = av_malloc(lenout * sizeof(short));
}
- /* make some zoom to avoid round pb */
- bufout[0]= av_malloc( lenout * sizeof(short) );
- bufout[1]= av_malloc( lenout * sizeof(short) );
-
- if (s->input_channels == 2 &&
- s->output_channels == 1) {
+ if (s->input_channels == 2 && s->output_channels == 1) {
buftmp3[0] = output;
stereo_to_mono(buftmp2[0], input, nb_samples);
} else if (s->output_channels >= 2 && s->input_channels == 1) {
buftmp3[0] = bufout[0];
- memcpy(buftmp2[0], input, nb_samples*sizeof(short));
- } else if (s->output_channels >= 2) {
- buftmp3[0] = bufout[0];
- buftmp3[1] = bufout[1];
- stereo_split(buftmp2[0], buftmp2[1], input, nb_samples);
+ memcpy(buftmp2[0], input, nb_samples * sizeof(short));
+ } else if (s->output_channels >= s->input_channels && s->input_channels >= 2) {
+ for (i = 0; i < s->input_channels; i++) {
+ buftmp3[i] = bufout[i];
+ }
+ deinterleave(buftmp2, input, s->input_channels, nb_samples);
} else {
buftmp3[0] = output;
- memcpy(buftmp2[0], input, nb_samples*sizeof(short));
+ memcpy(buftmp2[0], input, nb_samples * sizeof(short));
}
nb_samples += s->temp_len;
/* resample each channel */
nb_samples1 = 0; /* avoid warning */
- for(i=0;i<s->filter_channels;i++) {
+ for (i = 0; i < s->filter_channels; i++) {
int consumed;
- int is_last= i+1 == s->filter_channels;
+ int is_last = i + 1 == s->filter_channels;
- nb_samples1 = av_resample(s->resample_context, buftmp3[i], bufin[i], &consumed, nb_samples, lenout, is_last);
- s->temp_len= nb_samples - consumed;
- s->temp[i]= av_realloc(s->temp[i], s->temp_len*sizeof(short));
- memcpy(s->temp[i], bufin[i] + consumed, s->temp_len*sizeof(short));
+ nb_samples1 = av_resample(s->resample_context, buftmp3[i], bufin[i],
+ &consumed, nb_samples, lenout, is_last);
+ s->temp_len = nb_samples - consumed;
+ s->temp[i] = av_realloc(s->temp[i], s->temp_len * sizeof(short));
+ memcpy(s->temp[i], bufin[i] + consumed, s->temp_len * sizeof(short));
}
if (s->output_channels == 2 && s->input_channels == 1) {
mono_to_stereo(output, buftmp3[0], nb_samples1);
- } else if (s->output_channels == 2) {
- stereo_mux(output, buftmp3[0], buftmp3[1], nb_samples1);
- } else if (s->output_channels == 6) {
+ } else if (s->output_channels == 6 && s->input_channels == 2) {
ac3_5p1_mux(output, buftmp3[0], buftmp3[1], nb_samples1);
+ } else if (s->output_channels == s->input_channels && s->input_channels >= 2) {
+ interleave(output, buftmp3, s->output_channels, nb_samples1);
}
if (s->sample_fmt[1] != AV_SAMPLE_FMT_S16) {
@@ -348,25 +352,27 @@ int audio_resample(ReSampleContext *s, short *output, short *input, int nb_sampl
void *obuf[1] = { output_bak };
if (av_audio_convert(s->convert_ctx[1], obuf, ostride,
- ibuf, istride, nb_samples1*s->output_channels) < 0) {
- av_log(s->resample_context, AV_LOG_ERROR, "Audio sample format convertion failed\n");
+ ibuf, istride, nb_samples1 * s->output_channels) < 0) {
+ av_log(s->resample_context, AV_LOG_ERROR,
+ "Audio sample format convertion failed\n");
return 0;
}
}
- for(i=0; i<s->filter_channels; i++)
+ for (i = 0; i < s->filter_channels; i++) {
av_free(bufin[i]);
+ av_free(bufout[i]);
+ }
- av_free(bufout[0]);
- av_free(bufout[1]);
return nb_samples1;
}
void audio_resample_close(ReSampleContext *s)
{
+ int i;
av_resample_close(s->resample_context);
- av_freep(&s->temp[0]);
- av_freep(&s->temp[1]);
+ for (i = 0; i < s->filter_channels; i++)
+ av_freep(&s->temp[i]);
av_freep(&s->buffer[0]);
av_freep(&s->buffer[1]);
av_audio_convert_free(s->convert_ctx[0]);
diff --git a/libavcodec/rl2.c b/libavcodec/rl2.c
index 19104144a2..8a553539ab 100644
--- a/libavcodec/rl2.c
+++ b/libavcodec/rl2.c
@@ -134,6 +134,7 @@ static av_cold int rl2_decode_init(AVCodecContext *avctx)
int i;
s->avctx = avctx;
avctx->pix_fmt = PIX_FMT_PAL8;
+ avcodec_get_frame_defaults(&s->frame);
/** parse extra data */
if(!avctx->extradata || avctx->extradata_size < EXTRADATA1_SIZE){
diff --git a/libavcodec/roqvideodec.c b/libavcodec/roqvideodec.c
index 4959239ba7..f0977f6491 100644
--- a/libavcodec/roqvideodec.c
+++ b/libavcodec/roqvideodec.c
@@ -159,6 +159,8 @@ static av_cold int roq_decode_init(AVCodecContext *avctx)
s->avctx = avctx;
s->width = avctx->width;
s->height = avctx->height;
+ avcodec_get_frame_defaults(&s->frames[0]);
+ avcodec_get_frame_defaults(&s->frames[1]);
s->last_frame = &s->frames[0];
s->current_frame = &s->frames[1];
avctx->pix_fmt = PIX_FMT_YUV444P;
diff --git a/libavcodec/rpza.c b/libavcodec/rpza.c
index 6a79d97e86..12558563c6 100644
--- a/libavcodec/rpza.c
+++ b/libavcodec/rpza.c
@@ -233,6 +233,7 @@ static av_cold int rpza_decode_init(AVCodecContext *avctx)
s->avctx = avctx;
avctx->pix_fmt = PIX_FMT_RGB555;
+ avcodec_get_frame_defaults(&s->frame);
s->frame.data[0] = NULL;
return 0;
diff --git a/libavcodec/rv10.c b/libavcodec/rv10.c
index 8185b75546..cbecfa8a87 100644
--- a/libavcodec/rv10.c
+++ b/libavcodec/rv10.c
@@ -240,9 +240,9 @@ static int rv10_decode_picture_header(MpegEncContext *s)
marker = get_bits1(&s->gb);
if (get_bits1(&s->gb))
- s->pict_type = FF_P_TYPE;
+ s->pict_type = AV_PICTURE_TYPE_P;
else
- s->pict_type = FF_I_TYPE;
+ s->pict_type = AV_PICTURE_TYPE_I;
if(!marker) av_log(s->avctx, AV_LOG_ERROR, "marker missing\n");
pb_frame = get_bits1(&s->gb);
@@ -259,7 +259,7 @@ static int rv10_decode_picture_header(MpegEncContext *s)
return -1;
}
- if (s->pict_type == FF_I_TYPE) {
+ if (s->pict_type == AV_PICTURE_TYPE_I) {
if (s->rv10_version == 3) {
/* specific MPEG like DC coding not used */
s->last_dc[0] = get_bits(&s->gb, 8);
@@ -319,16 +319,16 @@ static int rv20_decode_picture_header(MpegEncContext *s)
i= get_bits(&s->gb, 2);
switch(i){
- case 0: s->pict_type= FF_I_TYPE; break;
- case 1: s->pict_type= FF_I_TYPE; break; //hmm ...
- case 2: s->pict_type= FF_P_TYPE; break;
- case 3: s->pict_type= FF_B_TYPE; break;
+ case 0: s->pict_type= AV_PICTURE_TYPE_I; break;
+ case 1: s->pict_type= AV_PICTURE_TYPE_I; break; //hmm ...
+ case 2: s->pict_type= AV_PICTURE_TYPE_P; break;
+ case 3: s->pict_type= AV_PICTURE_TYPE_B; break;
default:
av_log(s->avctx, AV_LOG_ERROR, "unknown frame type\n");
return -1;
}
- if(s->last_picture_ptr==NULL && s->pict_type==FF_B_TYPE){
+ if(s->last_picture_ptr==NULL && s->pict_type==AV_PICTURE_TYPE_B){
av_log(s->avctx, AV_LOG_ERROR, "early B pix\n");
return -1;
}
@@ -399,7 +399,7 @@ static int rv20_decode_picture_header(MpegEncContext *s)
if(seq - s->time > 0x4000) seq -= 0x8000;
if(seq - s->time < -0x4000) seq += 0x8000;
if(seq != s->time){
- if(s->pict_type!=FF_B_TYPE){
+ if(s->pict_type!=AV_PICTURE_TYPE_B){
s->time= seq;
s->pp_time= s->time - s->last_non_b_time;
s->last_non_b_time= s->time;
@@ -422,7 +422,7 @@ av_log(s->avctx, AV_LOG_DEBUG, "\n");*/
s->f_code = 1;
s->unrestricted_mv = 1;
- s->h263_aic= s->pict_type == FF_I_TYPE;
+ s->h263_aic= s->pict_type == AV_PICTURE_TYPE_I;
// s->alt_inter_vlc=1;
// s->obmc=1;
// s->umvplus=1;
@@ -435,7 +435,7 @@ av_log(s->avctx, AV_LOG_DEBUG, "\n");*/
seq, s->mb_x, s->mb_y, s->pict_type, s->qscale, s->no_rounding);
}
- assert(s->pict_type != FF_B_TYPE || !s->low_delay);
+ assert(s->pict_type != AV_PICTURE_TYPE_B || !s->low_delay);
return s->mb_width*s->mb_height - mb_pos;
}
@@ -616,7 +616,7 @@ static int rv10_decode_packet(AVCodecContext *avctx,
av_log(s->avctx, AV_LOG_ERROR, "ERROR at MB %d %d\n", s->mb_x, s->mb_y);
return -1;
}
- if(s->pict_type != FF_B_TYPE)
+ if(s->pict_type != AV_PICTURE_TYPE_B)
ff_h263_update_motion_val(s);
MPV_decode_mb(s, s->block);
if(s->loop_filter)
@@ -693,7 +693,7 @@ static int rv10_decode_frame(AVCodecContext *avctx,
ff_er_frame_end(s);
MPV_frame_end(s);
- if (s->pict_type == FF_B_TYPE || s->low_delay) {
+ if (s->pict_type == AV_PICTURE_TYPE_B || s->low_delay) {
*pict= *(AVFrame*)s->current_picture_ptr;
} else if (s->last_picture_ptr != NULL) {
*pict= *(AVFrame*)s->last_picture_ptr;
diff --git a/libavcodec/rv10enc.c b/libavcodec/rv10enc.c
index 206fe13225..82b1258799 100644
--- a/libavcodec/rv10enc.c
+++ b/libavcodec/rv10enc.c
@@ -36,13 +36,13 @@ void rv10_encode_picture_header(MpegEncContext *s, int picture_number)
put_bits(&s->pb, 1, 1); /* marker */
- put_bits(&s->pb, 1, (s->pict_type == FF_P_TYPE));
+ put_bits(&s->pb, 1, (s->pict_type == AV_PICTURE_TYPE_P));
put_bits(&s->pb, 1, 0); /* not PB frame */
put_bits(&s->pb, 5, s->qscale);
- if (s->pict_type == FF_I_TYPE) {
+ if (s->pict_type == AV_PICTURE_TYPE_I) {
/* specific MPEG like DC coding not used */
}
/* if multiple packets per frame are sent, the position at which
diff --git a/libavcodec/rv20enc.c b/libavcodec/rv20enc.c
index fe26dd486d..a10998450c 100644
--- a/libavcodec/rv20enc.c
+++ b/libavcodec/rv20enc.c
@@ -47,7 +47,7 @@ void rv20_encode_picture_header(MpegEncContext *s, int picture_number){
assert(s->modified_quant==1);
assert(s->loop_filter==1);
- s->h263_aic= s->pict_type == FF_I_TYPE;
+ s->h263_aic= s->pict_type == AV_PICTURE_TYPE_I;
if(s->h263_aic){
s->y_dc_scale_table=
s->c_dc_scale_table= ff_aic_dc_scale_table;
diff --git a/libavcodec/rv30.c b/libavcodec/rv30.c
index 38b600af80..b43859b9cb 100644
--- a/libavcodec/rv30.c
+++ b/libavcodec/rv30.c
@@ -111,7 +111,7 @@ static int rv30_decode_mb_info(RV34DecContext *r)
av_log(s->avctx, AV_LOG_ERROR, "dquant needed\n");
code -= 6;
}
- if(s->pict_type != FF_B_TYPE)
+ if(s->pict_type != AV_PICTURE_TYPE_B)
return rv30_p_types[code];
else
return rv30_b_types[code];
diff --git a/libavcodec/rv34.c b/libavcodec/rv34.c
index 32d8198eeb..a5db0b0255 100644
--- a/libavcodec/rv34.c
+++ b/libavcodec/rv34.c
@@ -816,7 +816,7 @@ static int rv34_decode_mv(RV34DecContext *r, int block_type)
ZERO8x2(s->current_picture_ptr->motion_val[0][s->mb_x * 2 + s->mb_y * 2 * s->b8_stride], s->b8_stride);
return 0;
case RV34_MB_SKIP:
- if(s->pict_type == FF_P_TYPE){
+ if(s->pict_type == AV_PICTURE_TYPE_P){
ZERO8x2(s->current_picture_ptr->motion_val[0][s->mb_x * 2 + s->mb_y * 2 * s->b8_stride], s->b8_stride);
rv34_mc_1mv (r, block_type, 0, 0, 0, 2, 2, 0);
break;
@@ -1055,9 +1055,9 @@ static int rv34_decode_mb_header(RV34DecContext *r, int8_t *intra_types)
s->current_picture_ptr->mb_type[mb_pos] = rv34_mb_type_to_lavc[r->block_type];
r->mb_type[mb_pos] = r->block_type;
if(r->block_type == RV34_MB_SKIP){
- if(s->pict_type == FF_P_TYPE)
+ if(s->pict_type == AV_PICTURE_TYPE_P)
r->mb_type[mb_pos] = RV34_MB_P_16x16;
- if(s->pict_type == FF_B_TYPE)
+ if(s->pict_type == AV_PICTURE_TYPE_B)
r->mb_type[mb_pos] = RV34_MB_B_DIRECT;
}
r->is16 = !!IS_INTRA16x16(s->current_picture_ptr->mb_type[mb_pos]);
@@ -1197,7 +1197,7 @@ static int rv34_decode_macroblock(RV34DecContext *r, int8_t *intra_types)
cbp = cbp2 = rv34_decode_mb_header(r, intra_types);
r->cbp_luma [mb_pos] = cbp;
r->cbp_chroma[mb_pos] = cbp >> 16;
- if(s->pict_type == FF_I_TYPE)
+ if(s->pict_type == AV_PICTURE_TYPE_I)
r->deblock_coefs[mb_pos] = 0xFFFF;
else
r->deblock_coefs[mb_pos] = rv34_set_deblock_coef(r) | r->cbp_luma[mb_pos];
@@ -1298,12 +1298,12 @@ static int rv34_decode_slice(RV34DecContext *r, int end, const uint8_t* buf, int
r->cbp_chroma = av_realloc(r->cbp_chroma, r->s.mb_stride * r->s.mb_height * sizeof(*r->cbp_chroma));
r->deblock_coefs = av_realloc(r->deblock_coefs, r->s.mb_stride * r->s.mb_height * sizeof(*r->deblock_coefs));
}
- s->pict_type = r->si.type ? r->si.type : FF_I_TYPE;
+ s->pict_type = r->si.type ? r->si.type : AV_PICTURE_TYPE_I;
if(MPV_frame_start(s, s->avctx) < 0)
return -1;
ff_er_frame_start(s);
r->cur_pts = r->si.pts;
- if(s->pict_type != FF_B_TYPE){
+ if(s->pict_type != AV_PICTURE_TYPE_B){
r->last_pts = r->next_pts;
r->next_pts = r->cur_pts;
}
@@ -1452,14 +1452,14 @@ int ff_rv34_decode_frame(AVCodecContext *avctx,
av_log(avctx, AV_LOG_ERROR, "First slice header is incorrect\n");
return -1;
}
- if((!s->last_picture_ptr || !s->last_picture_ptr->data[0]) && si.type == FF_B_TYPE)
+ if((!s->last_picture_ptr || !s->last_picture_ptr->data[0]) && si.type == AV_PICTURE_TYPE_B)
return -1;
#if FF_API_HURRY_UP
/* skip b frames if we are in a hurry */
if(avctx->hurry_up && si.type==FF_B_TYPE) return buf_size;
#endif
- if( (avctx->skip_frame >= AVDISCARD_NONREF && si.type==FF_B_TYPE)
- || (avctx->skip_frame >= AVDISCARD_NONKEY && si.type!=FF_I_TYPE)
+ if( (avctx->skip_frame >= AVDISCARD_NONREF && si.type==AV_PICTURE_TYPE_B)
+ || (avctx->skip_frame >= AVDISCARD_NONKEY && si.type!=AV_PICTURE_TYPE_I)
|| avctx->skip_frame >= AVDISCARD_ALL)
return buf_size;
#if FF_API_HURRY_UP
@@ -1503,7 +1503,7 @@ int ff_rv34_decode_frame(AVCodecContext *avctx,
r->loop_filter(r, s->mb_height - 1);
ff_er_frame_end(s);
MPV_frame_end(s);
- if (s->pict_type == FF_B_TYPE || s->low_delay) {
+ if (s->pict_type == AV_PICTURE_TYPE_B || s->low_delay) {
*pict= *(AVFrame*)s->current_picture_ptr;
} else if (s->last_picture_ptr != NULL) {
*pict= *(AVFrame*)s->last_picture_ptr;
diff --git a/libavcodec/rv40.c b/libavcodec/rv40.c
index 5d4aafc004..54d786a3a6 100644
--- a/libavcodec/rv40.c
+++ b/libavcodec/rv40.c
@@ -253,7 +253,7 @@ static int rv40_decode_mb_info(RV34DecContext *r)
prev_type = i;
}
}
- if(s->pict_type == FF_P_TYPE){
+ if(s->pict_type == AV_PICTURE_TYPE_P){
prev_type = block_num_to_ptype_vlc_num[prev_type];
q = get_vlc2(gb, ptype_vlc[prev_type].table, PTYPE_VLC_BITS, 1);
if(q < PBTYPE_ESCAPE)
diff --git a/libavcodec/s302m.c b/libavcodec/s302m.c
new file mode 100644
index 0000000000..d8b2b38a02
--- /dev/null
+++ b/libavcodec/s302m.c
@@ -0,0 +1,151 @@
+/*
+ * SMPTE 302M decoder
+ * Copyright (c) 2008 Laurent Aimar <fenrir@videolan.org>
+ * Copyright (c) 2009 Baptiste Coudurier <baptiste.coudurier@gmail.com>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "libavutil/intreadwrite.h"
+#include "avcodec.h"
+
+#define AES3_HEADER_LEN 4
+
+static int s302m_parse_frame_header(AVCodecContext *avctx, const uint8_t *buf,
+ int buf_size)
+{
+ uint32_t h;
+ int frame_size, channels, id, bits;
+
+ if (buf_size <= AES3_HEADER_LEN) {
+ av_log(avctx, AV_LOG_ERROR, "frame is too short\n");
+ return AVERROR_INVALIDDATA;
+ }
+
+ /*
+ * AES3 header :
+ * size: 16
+ * number channels 2
+ * channel_id 8
+ * bits per samples 2
+ * alignments 4
+ */
+
+ h = AV_RB32(buf);
+ frame_size = (h >> 16) & 0xffff;
+ channels = ((h >> 14) & 0x0003) * 2 + 2;
+ id = (h >> 6) & 0x00ff;
+ bits = ((h >> 4) & 0x0003) * 4 + 16;
+
+ if (AES3_HEADER_LEN + frame_size != buf_size || bits > 24) {
+ av_log(avctx, AV_LOG_ERROR, "frame has invalid header\n");
+ return AVERROR_INVALIDDATA;
+ }
+
+ /* Set output properties */
+ avctx->bits_per_coded_sample = bits;
+ if (bits > 16)
+ avctx->sample_fmt = SAMPLE_FMT_S32;
+ else
+ avctx->sample_fmt = SAMPLE_FMT_S16;
+
+ avctx->channels = channels;
+ switch(channels) {
+ case 2:
+ avctx->channel_layout = AV_CH_LAYOUT_STEREO;
+ break;
+ case 4:
+ avctx->channel_layout = AV_CH_LAYOUT_QUAD;
+ break;
+ case 8:
+ avctx->channel_layout = AV_CH_LAYOUT_5POINT1_BACK | AV_CH_LAYOUT_STEREO_DOWNMIX;
+ }
+ avctx->sample_rate = 48000;
+ avctx->bit_rate = 48000 * avctx->channels * (avctx->bits_per_coded_sample + 4) +
+ 32 * (48000 / (buf_size * 8 /
+ (avctx->channels *
+ (avctx->bits_per_coded_sample + 4))));
+
+ return frame_size;
+}
+
+static int s302m_decode_frame(AVCodecContext *avctx, void *data,
+ int *data_size, AVPacket *avpkt)
+{
+ const uint8_t *buf = avpkt->data;
+ int buf_size = avpkt->size;
+
+ int frame_size = s302m_parse_frame_header(avctx, buf, buf_size);
+ if (frame_size < 0)
+ return frame_size;
+
+ buf_size -= AES3_HEADER_LEN;
+ buf += AES3_HEADER_LEN;
+
+ if (*data_size < 4 * buf_size * 8 / (avctx->bits_per_coded_sample + 4))
+ return -1;
+
+ if (avctx->bits_per_coded_sample == 24) {
+ uint32_t *o = data;
+ for (; buf_size > 6; buf_size -= 7) {
+ *o++ = (av_reverse[buf[2]] << 24) |
+ (av_reverse[buf[1]] << 16) |
+ (av_reverse[buf[0]] << 8);
+ *o++ = (av_reverse[buf[6] & 0xf0] << 28) |
+ (av_reverse[buf[5]] << 20) |
+ (av_reverse[buf[4]] << 12) |
+ (av_reverse[buf[3] & 0x0f] << 8);
+ buf += 7;
+ }
+ *data_size = (uint8_t*) o - (uint8_t*) data;
+ } else if (avctx->bits_per_coded_sample == 20) {
+ uint32_t *o = data;
+ for (; buf_size > 5; buf_size -= 6) {
+ *o++ = (av_reverse[buf[2] & 0xf0] << 28) |
+ (av_reverse[buf[1]] << 20) |
+ (av_reverse[buf[0]] << 12);
+ *o++ = (av_reverse[buf[5] & 0xf0] << 28) |
+ (av_reverse[buf[4]] << 20) |
+ (av_reverse[buf[3]] << 12);
+ buf += 6;
+ }
+ *data_size = (uint8_t*) o - (uint8_t*) data;
+ } else {
+ uint16_t *o = data;
+ for (; buf_size > 4; buf_size -= 5) {
+ *o++ = (av_reverse[buf[1]] << 8) |
+ av_reverse[buf[0]];
+ *o++ = (av_reverse[buf[4] & 0xf0] << 12) |
+ (av_reverse[buf[3]] << 4) |
+ av_reverse[buf[2] & 0x0f];
+ buf += 5;
+ }
+ *data_size = (uint8_t*) o - (uint8_t*) data;
+ }
+
+ return buf - avpkt->data;
+}
+
+
+AVCodec ff_s302m_decoder = {
+ .name = "s302m",
+ .type = AVMEDIA_TYPE_AUDIO,
+ .id = CODEC_ID_S302M,
+ .priv_data_size = 0,
+ .decode = s302m_decode_frame,
+ .long_name = NULL_IF_CONFIG_SMALL("SMPTE 302M"),
+};
diff --git a/libavcodec/sgidec.c b/libavcodec/sgidec.c
index 96af5c6fc1..360a25ced2 100644
--- a/libavcodec/sgidec.c
+++ b/libavcodec/sgidec.c
@@ -215,7 +215,7 @@ static int decode_frame(AVCodecContext *avctx,
return -1;
}
- p->pict_type = FF_I_TYPE;
+ p->pict_type = AV_PICTURE_TYPE_I;
p->key_frame = 1;
out_buf = p->data[0];
diff --git a/libavcodec/sgienc.c b/libavcodec/sgienc.c
index eafb655b46..1e2af8e7ae 100644
--- a/libavcodec/sgienc.c
+++ b/libavcodec/sgienc.c
@@ -52,7 +52,7 @@ static int encode_frame(AVCodecContext *avctx, unsigned char *buf,
unsigned char *orig_buf = buf, *end_buf = buf + buf_size;
*p = *(AVFrame*)data;
- p->pict_type = FF_I_TYPE;
+ p->pict_type = AV_PICTURE_TYPE_I;
p->key_frame = 1;
width = avctx->width;
diff --git a/libavcodec/sh4/dsputil_align.c b/libavcodec/sh4/dsputil_align.c
index 93b663894a..8be9318cdb 100644
--- a/libavcodec/sh4/dsputil_align.c
+++ b/libavcodec/sh4/dsputil_align.c
@@ -333,9 +333,9 @@ DEFFUNC(avg,no_rnd,xy,16,OP_XY,PACK)
void dsputil_init_align(DSPContext* c, AVCodecContext *avctx)
{
- const int h264_high_depth = avctx->codec_id == CODEC_ID_H264 && avctx->bits_per_raw_sample > 8;
+ const int high_bit_depth = avctx->codec_id == CODEC_ID_H264 && avctx->bits_per_raw_sample > 8;
- if (!h264_high_depth) {
+ if (!high_bit_depth) {
c->put_pixels_tab[0][0] = put_rnd_pixels16_o;
c->put_pixels_tab[0][1] = put_rnd_pixels16_x;
c->put_pixels_tab[0][2] = put_rnd_pixels16_y;
@@ -405,7 +405,7 @@ void dsputil_init_align(DSPContext* c, AVCodecContext *avctx)
dspfunc(avg_qpel, 1, 8);
/* dspfunc(avg_no_rnd_qpel, 1, 8); */
- if (!h264_high_depth) {
+ if (!high_bit_depth) {
dspfunc(put_h264_qpel, 0, 16);
dspfunc(put_h264_qpel, 1, 8);
dspfunc(put_h264_qpel, 2, 4);
@@ -415,7 +415,7 @@ void dsputil_init_align(DSPContext* c, AVCodecContext *avctx)
}
#undef dspfunc
- if (!h264_high_depth) {
+ if (!high_bit_depth) {
c->put_h264_chroma_pixels_tab[0]= put_h264_chroma_mc8_sh4;
c->put_h264_chroma_pixels_tab[1]= put_h264_chroma_mc4_sh4;
c->put_h264_chroma_pixels_tab[2]= put_h264_chroma_mc2_sh4;
diff --git a/libavcodec/sh4/dsputil_sh4.c b/libavcodec/sh4/dsputil_sh4.c
index 219bb4c353..d254e1db6b 100644
--- a/libavcodec/sh4/dsputil_sh4.c
+++ b/libavcodec/sh4/dsputil_sh4.c
@@ -92,10 +92,10 @@ static void idct_add(uint8_t *dest, int line_size, DCTELEM *block)
void dsputil_init_sh4(DSPContext* c, AVCodecContext *avctx)
{
const int idct_algo= avctx->idct_algo;
- const int h264_high_depth = avctx->codec_id == CODEC_ID_H264 && avctx->bits_per_raw_sample > 8;
+ const int high_bit_depth = avctx->codec_id == CODEC_ID_H264 && avctx->bits_per_raw_sample > 8;
dsputil_init_align(c,avctx);
- if (!h264_high_depth)
+ if (!high_bit_depth)
c->clear_blocks = clear_blocks_sh4;
if(idct_algo==FF_IDCT_AUTO || idct_algo==FF_IDCT_SH4){
c->idct_put = idct_put;
diff --git a/libavcodec/smacker.c b/libavcodec/smacker.c
index e6c3460d73..b8eab837ff 100644
--- a/libavcodec/smacker.c
+++ b/libavcodec/smacker.c
@@ -373,9 +373,9 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPac
smk->pic.palette_has_changed = buf[0] & 1;
smk->pic.key_frame = !!(buf[0] & 2);
if(smk->pic.key_frame)
- smk->pic.pict_type = FF_I_TYPE;
+ smk->pic.pict_type = AV_PICTURE_TYPE_I;
else
- smk->pic.pict_type = FF_P_TYPE;
+ smk->pic.pict_type = AV_PICTURE_TYPE_P;
buf++;
for(i = 0; i < 256; i++)
@@ -515,6 +515,7 @@ static av_cold int decode_init(AVCodecContext *avctx)
avctx->pix_fmt = PIX_FMT_PAL8;
+ avcodec_get_frame_defaults(&c->pic);
/* decode huffman trees from extradata */
if(avctx->extradata_size < 16){
diff --git a/libavcodec/smc.c b/libavcodec/smc.c
index f8b994c1d1..fddd5ab32a 100644
--- a/libavcodec/smc.c
+++ b/libavcodec/smc.c
@@ -428,6 +428,7 @@ static av_cold int smc_decode_init(AVCodecContext *avctx)
s->avctx = avctx;
avctx->pix_fmt = PIX_FMT_PAL8;
+ avcodec_get_frame_defaults(&s->frame);
s->frame.data[0] = NULL;
return 0;
diff --git a/libavcodec/snow.c b/libavcodec/snow.c
index 0668f67b57..4ee1722e4c 100644
--- a/libavcodec/snow.c
+++ b/libavcodec/snow.c
@@ -2074,7 +2074,7 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPac
ff_init_range_decoder(c, buf, buf_size);
ff_build_rac_states(c, 0.05*(1LL<<32), 256-8);
- s->current_picture.pict_type= FF_I_TYPE; //FIXME I vs. P
+ s->current_picture.pict_type= AV_PICTURE_TYPE_I; //FIXME I vs. P
if(decode_header(s)<0)
return -1;
common_init_after_header(avctx);
@@ -3651,7 +3651,7 @@ static int ratecontrol_1pass(SnowContext *s, AVFrame *pict)
coef_sum = (uint64_t)coef_sum * coef_sum >> 16;
assert(coef_sum < INT_MAX);
- if(pict->pict_type == FF_I_TYPE){
+ if(pict->pict_type == AV_PICTURE_TYPE_I){
s->m.current_picture.mb_var_sum= coef_sum;
s->m.current_picture.mc_mb_var_sum= 0;
}else{
@@ -3720,7 +3720,7 @@ static int encode_frame(AVCodecContext *avctx, unsigned char *buf, int buf_size,
if(avctx->flags&CODEC_FLAG_PASS2){
s->m.pict_type =
pict->pict_type= s->m.rc_context.entry[avctx->frame_number].new_pict_type;
- s->keyframe= pict->pict_type==FF_I_TYPE;
+ s->keyframe= pict->pict_type==AV_PICTURE_TYPE_I;
if(!(avctx->flags&CODEC_FLAG_QSCALE)) {
pict->quality= ff_rate_estimate_qscale(&s->m, 0);
if (pict->quality < 0)
@@ -3729,7 +3729,7 @@ static int encode_frame(AVCodecContext *avctx, unsigned char *buf, int buf_size,
}else{
s->keyframe= avctx->gop_size==0 || avctx->frame_number % avctx->gop_size == 0;
s->m.pict_type=
- pict->pict_type= s->keyframe ? FF_I_TYPE : FF_P_TYPE;
+ pict->pict_type= s->keyframe ? AV_PICTURE_TYPE_I : AV_PICTURE_TYPE_P;
}
if(s->pass1_rc && avctx->frame_number == 0)
@@ -3748,7 +3748,7 @@ static int encode_frame(AVCodecContext *avctx, unsigned char *buf, int buf_size,
s->m.current_picture_ptr= &s->m.current_picture;
s->m.last_picture.pts= s->m.current_picture.pts;
s->m.current_picture.pts= pict->pts;
- if(pict->pict_type == FF_P_TYPE){
+ if(pict->pict_type == AV_PICTURE_TYPE_P){
int block_width = (width +15)>>4;
int block_height= (height+15)>>4;
int stride= s->current_picture.linesize[0];
@@ -3797,13 +3797,13 @@ static int encode_frame(AVCodecContext *avctx, unsigned char *buf, int buf_size,
redo_frame:
- if(pict->pict_type == FF_I_TYPE)
+ if(pict->pict_type == AV_PICTURE_TYPE_I)
s->spatial_decomposition_count= 5;
else
s->spatial_decomposition_count= 5;
s->m.pict_type = pict->pict_type;
- s->qbias= pict->pict_type == FF_P_TYPE ? 2 : 0;
+ s->qbias= pict->pict_type == AV_PICTURE_TYPE_P ? 2 : 0;
common_init_after_header(avctx);
@@ -3836,12 +3836,12 @@ redo_frame:
predict_plane(s, s->spatial_idwt_buffer, plane_index, 0);
if( plane_index==0
- && pict->pict_type == FF_P_TYPE
+ && pict->pict_type == AV_PICTURE_TYPE_P
&& !(avctx->flags&CODEC_FLAG_PASS2)
&& s->m.me.scene_change_score > s->avctx->scenechange_threshold){
ff_init_range_encoder(c, buf, buf_size);
ff_build_rac_states(c, 0.05*(1LL<<32), 256-8);
- pict->pict_type= FF_I_TYPE;
+ pict->pict_type= AV_PICTURE_TYPE_I;
s->keyframe=1;
s->current_picture.key_frame=1;
goto redo_frame;
@@ -3887,7 +3887,7 @@ redo_frame:
if(!QUANTIZE2)
quantize(s, b, b->ibuf, b->buf, b->stride, s->qbias);
if(orientation==0)
- decorrelate(s, b, b->ibuf, b->stride, pict->pict_type == FF_P_TYPE, 0);
+ decorrelate(s, b, b->ibuf, b->stride, pict->pict_type == AV_PICTURE_TYPE_P, 0);
encode_subband(s, b, b->ibuf, b->parent ? b->parent->ibuf : NULL, b->stride, orientation);
assert(b->parent==NULL || b->parent->stride == b->stride*2);
if(orientation==0)
@@ -3914,7 +3914,7 @@ redo_frame:
predict_plane(s, s->spatial_idwt_buffer, plane_index, 1);
}else{
//ME/MC only
- if(pict->pict_type == FF_I_TYPE){
+ if(pict->pict_type == AV_PICTURE_TYPE_I){
for(y=0; y<h; y++){
for(x=0; x<w; x++){
s->current_picture.data[plane_index][y*s->current_picture.linesize[plane_index] + x]=
diff --git a/libavcodec/sonic.c b/libavcodec/sonic.c
index 8dfac1a6fa..e7cdb3ba81 100644
--- a/libavcodec/sonic.c
+++ b/libavcodec/sonic.c
@@ -796,7 +796,7 @@ static av_cold int sonic_decode_init(AVCodecContext *avctx)
if (get_bits1(&gb)) // XXX FIXME
av_log(avctx, AV_LOG_INFO, "Custom quant table\n");
- s->block_align = (int)(2048.0*(s->samplerate/44100))/s->downsampling;
+ s->block_align = (int)(2048.0*s->samplerate/44100)/s->downsampling;
s->frame_size = s->channels*s->block_align*s->downsampling;
// avctx->frame_size = s->block_align;
diff --git a/libavcodec/sparc/dsputil_vis.c b/libavcodec/sparc/dsputil_vis.c
index ba921ad772..e4236602f6 100644
--- a/libavcodec/sparc/dsputil_vis.c
+++ b/libavcodec/sparc/dsputil_vis.c
@@ -3953,7 +3953,7 @@ void dsputil_init_vis(DSPContext* c, AVCodecContext *avctx)
{
/* VIS-specific optimizations */
int accel = vis_level ();
- const int h264_high_depth = avctx->codec_id == CODEC_ID_H264 && avctx->bits_per_raw_sample > 8;
+ const int high_bit_depth = avctx->codec_id == CODEC_ID_H264 && avctx->bits_per_raw_sample > 8;
if (accel & ACCEL_SPARC_VIS) {
if(avctx->idct_algo==FF_IDCT_SIMPLEVIS){
@@ -3963,7 +3963,7 @@ void dsputil_init_vis(DSPContext* c, AVCodecContext *avctx)
c->idct_permutation_type = FF_TRANSPOSE_IDCT_PERM;
}
- if (!h264_high_depth) {
+ if (!high_bit_depth) {
c->put_pixels_tab[0][0] = MC_put_o_16_vis;
c->put_pixels_tab[0][1] = MC_put_x_16_vis;
c->put_pixels_tab[0][2] = MC_put_y_16_vis;
diff --git a/libavcodec/sunrast.c b/libavcodec/sunrast.c
index 73e4b5b91c..558b0edd8f 100644
--- a/libavcodec/sunrast.c
+++ b/libavcodec/sunrast.c
@@ -107,7 +107,7 @@ static int sunrast_decode_frame(AVCodecContext *avctx, void *data,
return -1;
}
- p->pict_type = FF_I_TYPE;
+ p->pict_type = AV_PICTURE_TYPE_I;
if (depth != 8 && maplength) {
av_log(avctx, AV_LOG_WARNING, "useless colormap found or file is corrupted, trying to recover\n");
diff --git a/libavcodec/svq1dec.c b/libavcodec/svq1dec.c
index 04b12b756c..66536145b2 100644
--- a/libavcodec/svq1dec.c
+++ b/libavcodec/svq1dec.c
@@ -142,7 +142,7 @@ static const uint8_t string_table[256] = {
break;\
/* add child nodes */\
list[n++] = list[i];\
- list[n++] = list[i] + (((level & 1) ? pitch : 1) << ((level / 2) + 1));\
+ list[n++] = list[i] + (((level & 1) ? pitch : 1) << ((level >> 1) + 1));\
}
#define SVQ1_ADD_CODEBOOK()\
@@ -202,7 +202,7 @@ static const uint8_t string_table[256] = {
entries[j] = (((bit_cache >> (4*(stages - j - 1))) & 0xF) + 16*j) << (level + 1);\
}\
mean -= (stages * 128);\
- n4 = ((mean + (mean >> 31)) << 16) | (mean & 0xFFFF);
+ n4 = (mean << 16) + mean;
static int svq1_decode_block_intra (GetBitContext *bitbuf, uint8_t *pixels, int pitch ) {
uint32_t bit_cache;
@@ -563,7 +563,7 @@ static int svq1_decode_frame_header (GetBitContext *bitbuf,MpegEncContext *s) {
if(s->pict_type==4)
return -1;
- if (s->pict_type == FF_I_TYPE) {
+ if (s->pict_type == AV_PICTURE_TYPE_I) {
/* unknown fields */
if (s->f_code == 0x50 || s->f_code == 0x60) {
@@ -669,13 +669,13 @@ static int svq1_decode_frame(AVCodecContext *avctx,
//FIXME this avoids some confusion for "B frames" without 2 references
//this should be removed after libavcodec can handle more flexible picture types & ordering
- if(s->pict_type==FF_B_TYPE && s->last_picture_ptr==NULL) return buf_size;
+ if(s->pict_type==AV_PICTURE_TYPE_B && s->last_picture_ptr==NULL) return buf_size;
#if FF_API_HURRY_UP
if(avctx->hurry_up && s->pict_type==FF_B_TYPE) return buf_size;
#endif
- if( (avctx->skip_frame >= AVDISCARD_NONREF && s->pict_type==FF_B_TYPE)
- ||(avctx->skip_frame >= AVDISCARD_NONKEY && s->pict_type!=FF_I_TYPE)
+ if( (avctx->skip_frame >= AVDISCARD_NONREF && s->pict_type==AV_PICTURE_TYPE_B)
+ ||(avctx->skip_frame >= AVDISCARD_NONKEY && s->pict_type!=AV_PICTURE_TYPE_I)
|| avctx->skip_frame >= AVDISCARD_ALL)
return buf_size;
@@ -702,13 +702,13 @@ static int svq1_decode_frame(AVCodecContext *avctx,
current = s->current_picture.data[i];
- if(s->pict_type==FF_B_TYPE){
+ if(s->pict_type==AV_PICTURE_TYPE_B){
previous = s->next_picture.data[i];
}else{
previous = s->last_picture.data[i];
}
- if (s->pict_type == FF_I_TYPE) {
+ if (s->pict_type == AV_PICTURE_TYPE_I) {
/* keyframe */
for (y=0; y < height; y+=16) {
for (x=0; x < width; x+=16) {
diff --git a/libavcodec/svq1enc.c b/libavcodec/svq1enc.c
index a415d843d2..edd6029209 100644
--- a/libavcodec/svq1enc.c
+++ b/libavcodec/svq1enc.c
@@ -86,7 +86,7 @@ static void svq1_write_header(SVQ1Context *s, int frame_type)
/* frame type */
put_bits(&s->pb, 2, frame_type - 1);
- if (frame_type == FF_I_TYPE) {
+ if (frame_type == AV_PICTURE_TYPE_I) {
/* no checksum since frame code is 0x20 */
@@ -280,7 +280,7 @@ static int svq1_encode_plane(SVQ1Context *s, int plane, unsigned char *src_plane
block_width = (width + 15) / 16;
block_height = (height + 15) / 16;
- if(s->picture.pict_type == FF_P_TYPE){
+ if(s->picture.pict_type == AV_PICTURE_TYPE_P){
s->m.avctx= s->avctx;
s->m.current_picture_ptr= &s->m.current_picture;
s->m.last_picture_ptr = &s->m.last_picture;
@@ -382,11 +382,11 @@ static int svq1_encode_plane(SVQ1Context *s, int plane, unsigned char *src_plane
ff_init_block_index(&s->m);
ff_update_block_index(&s->m);
- if(s->picture.pict_type == FF_I_TYPE || (s->m.mb_type[x + y*s->m.mb_stride]&CANDIDATE_MB_TYPE_INTRA)){
+ if(s->picture.pict_type == AV_PICTURE_TYPE_I || (s->m.mb_type[x + y*s->m.mb_stride]&CANDIDATE_MB_TYPE_INTRA)){
for(i=0; i<6; i++){
init_put_bits(&s->reorder_pb[i], reorder_buffer[0][i], 7*32);
}
- if(s->picture.pict_type == FF_P_TYPE){
+ if(s->picture.pict_type == AV_PICTURE_TYPE_P){
const uint8_t *vlc= ff_svq1_block_type_vlc[SVQ1_BLOCK_INTRA];
put_bits(&s->reorder_pb[5], vlc[1], vlc[0]);
score[0]= vlc[1]*lambda;
@@ -401,7 +401,7 @@ static int svq1_encode_plane(SVQ1Context *s, int plane, unsigned char *src_plane
best=0;
- if(s->picture.pict_type == FF_P_TYPE){
+ if(s->picture.pict_type == AV_PICTURE_TYPE_P){
const uint8_t *vlc= ff_svq1_block_type_vlc[SVQ1_BLOCK_INTER];
int mx, my, pred_x, pred_y, dxy;
int16_t *motion_ptr;
@@ -528,8 +528,8 @@ static int svq1_encode_frame(AVCodecContext *avctx, unsigned char *buf,
init_put_bits(&s->pb, buf, buf_size);
*p = *pict;
- p->pict_type = avctx->gop_size && avctx->frame_number % avctx->gop_size ? FF_P_TYPE : FF_I_TYPE;
- p->key_frame = p->pict_type == FF_I_TYPE;
+ p->pict_type = avctx->gop_size && avctx->frame_number % avctx->gop_size ? AV_PICTURE_TYPE_P : AV_PICTURE_TYPE_I;
+ p->key_frame = p->pict_type == AV_PICTURE_TYPE_I;
svq1_write_header(s, p->pict_type);
for(i=0; i<3; i++){
diff --git a/libavcodec/svq3.c b/libavcodec/svq3.c
index c7119b975b..ab9eab2309 100644
--- a/libavcodec/svq3.c
+++ b/libavcodec/svq3.c
@@ -63,6 +63,17 @@
* svq3 decoder.
*/
+typedef struct {
+ H264Context h;
+ int halfpel_flag;
+ int thirdpel_flag;
+ int unknown_flag;
+ int next_slice_index;
+ uint32_t watermark_key;
+ uint8_t *buf;
+ int buf_size;
+} SVQ3Context;
+
#define FULLPEL_MODE 1
#define HALFPEL_MODE 2
#define THIRDPEL_MODE 3
@@ -423,8 +434,9 @@ static inline int svq3_mc_dir(H264Context *h, int size, int mode, int dir,
return 0;
}
-static int svq3_decode_mb(H264Context *h, unsigned int mb_type)
+static int svq3_decode_mb(SVQ3Context *svq3, unsigned int mb_type)
{
+ H264Context *h = &svq3->h;
int i, j, k, m, dir, mode;
int cbp = 0;
uint32_t vlc;
@@ -438,10 +450,10 @@ static int svq3_decode_mb(H264Context *h, unsigned int mb_type)
h->topright_samples_available = 0xFFFF;
if (mb_type == 0) { /* SKIP */
- if (s->pict_type == FF_P_TYPE || s->next_picture.mb_type[mb_xy] == -1) {
+ if (s->pict_type == AV_PICTURE_TYPE_P || s->next_picture.mb_type[mb_xy] == -1) {
svq3_mc_dir_part(s, 16*s->mb_x, 16*s->mb_y, 16, 16, 0, 0, 0, 0, 0, 0);
- if (s->pict_type == FF_B_TYPE) {
+ if (s->pict_type == AV_PICTURE_TYPE_B) {
svq3_mc_dir_part(s, 16*s->mb_x, 16*s->mb_y, 16, 16, 0, 0, 0, 0, 1, 1);
}
@@ -456,9 +468,9 @@ static int svq3_decode_mb(H264Context *h, unsigned int mb_type)
mb_type = MB_TYPE_16x16;
}
} else if (mb_type < 8) { /* INTER */
- if (h->thirdpel_flag && h->halfpel_flag == !get_bits1 (&s->gb)) {
+ if (svq3->thirdpel_flag && svq3->halfpel_flag == !get_bits1 (&s->gb)) {
mode = THIRDPEL_MODE;
- } else if (h->halfpel_flag && h->thirdpel_flag == !get_bits1 (&s->gb)) {
+ } else if (svq3->halfpel_flag && svq3->thirdpel_flag == !get_bits1 (&s->gb)) {
mode = HALFPEL_MODE;
} else {
mode = FULLPEL_MODE;
@@ -502,15 +514,15 @@ static int svq3_decode_mb(H264Context *h, unsigned int mb_type)
}else
memset(&h->ref_cache[m][scan8[0] - 1*8 - 1], PART_NOT_AVAILABLE, 8);
- if (s->pict_type != FF_B_TYPE)
+ if (s->pict_type != AV_PICTURE_TYPE_B)
break;
}
/* decode motion vector(s) and form prediction(s) */
- if (s->pict_type == FF_P_TYPE) {
+ if (s->pict_type == AV_PICTURE_TYPE_P) {
if (svq3_mc_dir(h, (mb_type - 1), mode, 0, 0) < 0)
return -1;
- } else { /* FF_B_TYPE */
+ } else { /* AV_PICTURE_TYPE_B */
if (mb_type != 2) {
if (svq3_mc_dir(h, 0, mode, 0, 0) < 0)
return -1;
@@ -609,11 +621,11 @@ static int svq3_decode_mb(H264Context *h, unsigned int mb_type)
mb_type = MB_TYPE_INTRA16x16;
}
- if (!IS_INTER(mb_type) && s->pict_type != FF_I_TYPE) {
+ if (!IS_INTER(mb_type) && s->pict_type != AV_PICTURE_TYPE_I) {
for (i = 0; i < 4; i++) {
memset(s->current_picture.motion_val[0][b_xy + i*h->b_stride], 0, 4*2*sizeof(int16_t));
}
- if (s->pict_type == FF_B_TYPE) {
+ if (s->pict_type == AV_PICTURE_TYPE_B) {
for (i = 0; i < 4; i++) {
memset(s->current_picture.motion_val[1][b_xy + i*h->b_stride], 0, 4*2*sizeof(int16_t));
}
@@ -622,12 +634,12 @@ static int svq3_decode_mb(H264Context *h, unsigned int mb_type)
if (!IS_INTRA4x4(mb_type)) {
memset(h->intra4x4_pred_mode+h->mb2br_xy[mb_xy], DC_PRED, 8);
}
- if (!IS_SKIP(mb_type) || s->pict_type == FF_B_TYPE) {
+ if (!IS_SKIP(mb_type) || s->pict_type == AV_PICTURE_TYPE_B) {
memset(h->non_zero_count_cache + 8, 0, 4*9*sizeof(uint8_t));
s->dsp.clear_blocks(h->mb);
}
- if (!IS_INTRA16x16(mb_type) && (!IS_SKIP(mb_type) || s->pict_type == FF_B_TYPE)) {
+ if (!IS_INTRA16x16(mb_type) && (!IS_SKIP(mb_type) || s->pict_type == AV_PICTURE_TYPE_B)) {
if ((vlc = svq3_get_ue_golomb(&s->gb)) >= 48){
av_log(h->s.avctx, AV_LOG_ERROR, "cbp_vlc=%d\n", vlc);
return -1;
@@ -635,7 +647,7 @@ static int svq3_decode_mb(H264Context *h, unsigned int mb_type)
cbp = IS_INTRA(mb_type) ? golomb_to_intra4x4_cbp[vlc] : golomb_to_inter_cbp[vlc];
}
- if (IS_INTRA16x16(mb_type) || (s->pict_type != FF_I_TYPE && s->adaptive_quant && cbp)) {
+ if (IS_INTRA16x16(mb_type) || (s->pict_type != AV_PICTURE_TYPE_I && s->adaptive_quant && cbp)) {
s->qscale += svq3_get_se_golomb(&s->gb);
if (s->qscale > 31){
@@ -701,9 +713,11 @@ static int svq3_decode_mb(H264Context *h, unsigned int mb_type)
return 0;
}
-static int svq3_decode_slice_header(H264Context *h)
+static int svq3_decode_slice_header(AVCodecContext *avctx)
{
- MpegEncContext *const s = (MpegEncContext *) h;
+ SVQ3Context *svq3 = avctx->priv_data;
+ H264Context *h = &svq3->h;
+ MpegEncContext *s = &h->s;
const int mb_xy = h->mb_xy;
int i, header;
@@ -711,24 +725,24 @@ static int svq3_decode_slice_header(H264Context *h)
if (((header & 0x9F) != 1 && (header & 0x9F) != 2) || (header & 0x60) == 0) {
/* TODO: what? */
- av_log(h->s.avctx, AV_LOG_ERROR, "unsupported slice header (%02X)\n", header);
+ av_log(avctx, AV_LOG_ERROR, "unsupported slice header (%02X)\n", header);
return -1;
} else {
int length = (header >> 5) & 3;
- h->next_slice_index = get_bits_count(&s->gb) + 8*show_bits(&s->gb, 8*length) + 8*length;
+ svq3->next_slice_index = get_bits_count(&s->gb) + 8*show_bits(&s->gb, 8*length) + 8*length;
- if (h->next_slice_index > s->gb.size_in_bits) {
- av_log(h->s.avctx, AV_LOG_ERROR, "slice after bitstream end\n");
+ if (svq3->next_slice_index > s->gb.size_in_bits) {
+ av_log(avctx, AV_LOG_ERROR, "slice after bitstream end\n");
return -1;
}
- s->gb.size_in_bits = h->next_slice_index - 8*(length - 1);
+ s->gb.size_in_bits = svq3->next_slice_index - 8*(length - 1);
skip_bits(&s->gb, 8);
- if (h->svq3_watermark_key) {
+ if (svq3->watermark_key) {
uint32_t header = AV_RL32(&s->gb.buffer[(get_bits_count(&s->gb)>>3)+1]);
- AV_WL32(&s->gb.buffer[(get_bits_count(&s->gb)>>3)+1], header ^ h->svq3_watermark_key);
+ AV_WL32(&s->gb.buffer[(get_bits_count(&s->gb)>>3)+1], header ^ svq3->watermark_key);
}
if (length > 0) {
memcpy((uint8_t *) &s->gb.buffer[get_bits_count(&s->gb) >> 3],
@@ -759,7 +773,7 @@ static int svq3_decode_slice_header(H264Context *h)
/* unknown fields */
skip_bits1(&s->gb);
- if (h->unknown_svq3_flag) {
+ if (svq3->unknown_flag) {
skip_bits1(&s->gb);
}
@@ -788,8 +802,9 @@ static int svq3_decode_slice_header(H264Context *h)
static av_cold int svq3_decode_init(AVCodecContext *avctx)
{
- MpegEncContext *const s = avctx->priv_data;
- H264Context *const h = avctx->priv_data;
+ SVQ3Context *svq3 = avctx->priv_data;
+ H264Context *h = &svq3->h;
+ MpegEncContext *s = &h->s;
int m;
unsigned char *extradata;
unsigned int size;
@@ -806,10 +821,11 @@ static av_cold int svq3_decode_init(AVCodecContext *avctx)
if (!s->context_initialized) {
s->width = avctx->width;
s->height = avctx->height;
- h->halfpel_flag = 1;
- h->thirdpel_flag = 1;
- h->unknown_svq3_flag = 0;
- h->chroma_qp[0] = h->chroma_qp[1] = 4;
+ h->chroma_qp[0] = h->chroma_qp[1] = 4;
+
+ svq3->halfpel_flag = 1;
+ svq3->thirdpel_flag = 1;
+ svq3->unknown_flag = 0;
if (MPV_common_init(s) < 0)
return -1;
@@ -851,8 +867,8 @@ static av_cold int svq3_decode_init(AVCodecContext *avctx)
break;
}
- h->halfpel_flag = get_bits1(&gb);
- h->thirdpel_flag = get_bits1(&gb);
+ svq3->halfpel_flag = get_bits1(&gb);
+ svq3->thirdpel_flag = get_bits1(&gb);
/* unknown fields */
skip_bits1(&gb);
@@ -869,9 +885,9 @@ static av_cold int svq3_decode_init(AVCodecContext *avctx)
skip_bits(&gb, 8);
}
- h->unknown_svq3_flag = get_bits1(&gb);
+ svq3->unknown_flag = get_bits1(&gb);
avctx->has_b_frames = !s->low_delay;
- if (h->unknown_svq3_flag) {
+ if (svq3->unknown_flag) {
#if CONFIG_ZLIB
unsigned watermark_width = svq3_get_ue_golomb(&gb);
unsigned watermark_height = svq3_get_ue_golomb(&gb);
@@ -894,9 +910,9 @@ static av_cold int svq3_decode_init(AVCodecContext *avctx)
av_free(buf);
return -1;
}
- h->svq3_watermark_key = ff_svq1_packet_checksum(buf, buf_len, 0);
- h->svq3_watermark_key = h->svq3_watermark_key << 16 | h->svq3_watermark_key;
- av_log(avctx, AV_LOG_DEBUG, "watermark key %#x\n", h->svq3_watermark_key);
+ svq3->watermark_key = ff_svq1_packet_checksum(buf, buf_len, 0);
+ svq3->watermark_key = svq3->watermark_key << 16 | svq3->watermark_key;
+ av_log(avctx, AV_LOG_DEBUG, "watermark key %#x\n", svq3->watermark_key);
av_free(buf);
#else
av_log(avctx, AV_LOG_ERROR, "this svq3 file contains watermark which need zlib support compiled in\n");
@@ -913,11 +929,12 @@ static int svq3_decode_frame(AVCodecContext *avctx,
void *data, int *data_size,
AVPacket *avpkt)
{
- const uint8_t *buf = avpkt->data;
+ SVQ3Context *svq3 = avctx->priv_data;
+ H264Context *h = &svq3->h;
+ MpegEncContext *s = &h->s;
int buf_size = avpkt->size;
- MpegEncContext *const s = avctx->priv_data;
- H264Context *const h = avctx->priv_data;
- int m, mb_type;
+ int m, mb_type, left;
+ uint8_t *buf;
/* special case for last picture */
if (buf_size == 0) {
@@ -929,11 +946,22 @@ static int svq3_decode_frame(AVCodecContext *avctx,
return 0;
}
- init_get_bits (&s->gb, buf, 8*buf_size);
-
s->mb_x = s->mb_y = h->mb_xy = 0;
- if (svq3_decode_slice_header(h))
+ if (svq3->watermark_key) {
+ av_fast_malloc(&svq3->buf, &svq3->buf_size,
+ buf_size+FF_INPUT_BUFFER_PADDING_SIZE);
+ if (!svq3->buf)
+ return AVERROR(ENOMEM);
+ memcpy(svq3->buf, avpkt->data, buf_size);
+ buf = svq3->buf;
+ } else {
+ buf = avpkt->data;
+ }
+
+ init_get_bits(&s->gb, buf, 8*buf_size);
+
+ if (svq3_decode_slice_header(avctx))
return -1;
s->pict_type = h->slice_type;
@@ -941,16 +969,16 @@ static int svq3_decode_frame(AVCodecContext *avctx,
if (avctx->debug&FF_DEBUG_PICT_INFO){
av_log(h->s.avctx, AV_LOG_DEBUG, "%c hpel:%d, tpel:%d aqp:%d qp:%d, slice_num:%02X\n",
- av_get_pict_type_char(s->pict_type), h->halfpel_flag, h->thirdpel_flag,
+ av_get_picture_type_char(s->pict_type), svq3->halfpel_flag, svq3->thirdpel_flag,
s->adaptive_quant, s->qscale, h->slice_num);
}
/* for skipping the frame */
s->current_picture.pict_type = s->pict_type;
- s->current_picture.key_frame = (s->pict_type == FF_I_TYPE);
+ s->current_picture.key_frame = (s->pict_type == AV_PICTURE_TYPE_I);
/* Skip B-frames if we do not have reference frames. */
- if (s->last_picture_ptr == NULL && s->pict_type == FF_B_TYPE)
+ if (s->last_picture_ptr == NULL && s->pict_type == AV_PICTURE_TYPE_B)
return 0;
#if FF_API_HURRY_UP
/* Skip B-frames if we are in a hurry. */
@@ -960,13 +988,13 @@ static int svq3_decode_frame(AVCodecContext *avctx,
if (avctx->hurry_up >= 5)
return 0;
#endif
- if ( (avctx->skip_frame >= AVDISCARD_NONREF && s->pict_type == FF_B_TYPE)
- ||(avctx->skip_frame >= AVDISCARD_NONKEY && s->pict_type != FF_I_TYPE)
+ if ( (avctx->skip_frame >= AVDISCARD_NONREF && s->pict_type == AV_PICTURE_TYPE_B)
+ ||(avctx->skip_frame >= AVDISCARD_NONKEY && s->pict_type != AV_PICTURE_TYPE_I)
|| avctx->skip_frame >= AVDISCARD_ALL)
return 0;
if (s->next_p_frame_damaged) {
- if (s->pict_type == FF_B_TYPE)
+ if (s->pict_type == AV_PICTURE_TYPE_B)
return 0;
else
s->next_p_frame_damaged = 0;
@@ -975,7 +1003,7 @@ static int svq3_decode_frame(AVCodecContext *avctx,
if (ff_h264_frame_start(h) < 0)
return -1;
- if (s->pict_type == FF_B_TYPE) {
+ if (s->pict_type == AV_PICTURE_TYPE_B) {
h->frame_num_offset = (h->slice_num - h->prev_frame_num);
if (h->frame_num_offset < 0) {
@@ -1013,10 +1041,10 @@ static int svq3_decode_frame(AVCodecContext *avctx,
if ( (get_bits_count(&s->gb) + 7) >= s->gb.size_in_bits &&
((get_bits_count(&s->gb) & 7) == 0 || show_bits(&s->gb, (-get_bits_count(&s->gb) & 7)) == 0)) {
- skip_bits(&s->gb, h->next_slice_index - get_bits_count(&s->gb));
+ skip_bits(&s->gb, svq3->next_slice_index - get_bits_count(&s->gb));
s->gb.size_in_bits = 8*buf_size;
- if (svq3_decode_slice_header(h))
+ if (svq3_decode_slice_header(avctx))
return -1;
/* TODO: support s->mb_skip_run */
@@ -1024,12 +1052,12 @@ static int svq3_decode_frame(AVCodecContext *avctx,
mb_type = svq3_get_ue_golomb(&s->gb);
- if (s->pict_type == FF_I_TYPE) {
+ if (s->pict_type == AV_PICTURE_TYPE_I) {
mb_type += 8;
- } else if (s->pict_type == FF_B_TYPE && mb_type >= 4) {
+ } else if (s->pict_type == AV_PICTURE_TYPE_B && mb_type >= 4) {
mb_type += 4;
}
- if (mb_type > 33 || svq3_decode_mb(h, mb_type)) {
+ if ((unsigned)mb_type > 33 || svq3_decode_mb(svq3, mb_type)) {
av_log(h->s.avctx, AV_LOG_ERROR, "error while decoding MB %d %d\n", s->mb_x, s->mb_y);
return -1;
}
@@ -1038,18 +1066,30 @@ static int svq3_decode_frame(AVCodecContext *avctx,
ff_h264_hl_decode_mb (h);
}
- if (s->pict_type != FF_B_TYPE && !s->low_delay) {
+ if (s->pict_type != AV_PICTURE_TYPE_B && !s->low_delay) {
s->current_picture.mb_type[s->mb_x + s->mb_y*s->mb_stride] =
- (s->pict_type == FF_P_TYPE && mb_type < 8) ? (mb_type - 1) : -1;
+ (s->pict_type == AV_PICTURE_TYPE_P && mb_type < 8) ? (mb_type - 1) : -1;
}
}
ff_draw_horiz_band(s, 16*s->mb_y, 16);
}
+ left = buf_size*8 - get_bits_count(&s->gb);
+
+ if (s->mb_y != s->mb_height || s->mb_x != s->mb_width) {
+ av_log(avctx, AV_LOG_INFO, "frame num %d incomplete pic x %d y %d left %d\n", avctx->frame_number, s->mb_y, s->mb_x, left);
+ //av_hex_dump(stderr, buf+buf_size-8, 8);
+ }
+
+ if (left < 0) {
+ av_log(avctx, AV_LOG_ERROR, "frame num %d left %d\n", avctx->frame_number, left);
+ return -1;
+ }
+
MPV_frame_end(s);
- if (s->pict_type == FF_B_TYPE || s->low_delay) {
+ if (s->pict_type == AV_PICTURE_TYPE_B || s->low_delay) {
*(AVFrame *) data = *(AVFrame *) &s->current_picture;
} else {
*(AVFrame *) data = *(AVFrame *) &s->last_picture;
@@ -1063,15 +1103,30 @@ static int svq3_decode_frame(AVCodecContext *avctx,
return buf_size;
}
+static int svq3_decode_end(AVCodecContext *avctx)
+{
+ SVQ3Context *svq3 = avctx->priv_data;
+ H264Context *h = &svq3->h;
+ MpegEncContext *s = &h->s;
+
+ ff_h264_free_context(h);
+
+ MPV_common_end(s);
+
+ av_freep(&svq3->buf);
+ svq3->buf_size = 0;
+
+ return 0;
+}
AVCodec ff_svq3_decoder = {
"svq3",
AVMEDIA_TYPE_VIDEO,
CODEC_ID_SVQ3,
- sizeof(H264Context),
+ sizeof(SVQ3Context),
svq3_decode_init,
NULL,
- ff_h264_decode_end,
+ svq3_decode_end,
svq3_decode_frame,
CODEC_CAP_DRAW_HORIZ_BAND | CODEC_CAP_DR1 | CODEC_CAP_DELAY,
.long_name = NULL_IF_CONFIG_SMALL("Sorenson Vector Quantizer 3 / Sorenson Video 3 / SVQ3"),
diff --git a/libavcodec/targaenc.c b/libavcodec/targaenc.c
index 5c5fd008c1..7bdaca3bec 100644
--- a/libavcodec/targaenc.c
+++ b/libavcodec/targaenc.c
@@ -90,7 +90,7 @@ static int targa_encode_frame(AVCodecContext *avctx,
return AVERROR(EINVAL);
}
- p->pict_type= FF_I_TYPE;
+ p->pict_type= AV_PICTURE_TYPE_I;
p->key_frame= 1;
/* zero out the header and only set applicable fields */
diff --git a/libavcodec/tiertexseqv.c b/libavcodec/tiertexseqv.c
index c8aa38cca9..f3a044882e 100644
--- a/libavcodec/tiertexseqv.c
+++ b/libavcodec/tiertexseqv.c
@@ -180,6 +180,7 @@ static av_cold int seqvideo_decode_init(AVCodecContext *avctx)
seq->avctx = avctx;
avctx->pix_fmt = PIX_FMT_PAL8;
+ avcodec_get_frame_defaults(&seq->frame);
seq->frame.data[0] = NULL;
return 0;
diff --git a/libavcodec/tiff.c b/libavcodec/tiff.c
index d43999b2f0..683527bedf 100644
--- a/libavcodec/tiff.c
+++ b/libavcodec/tiff.c
@@ -1,5 +1,4 @@
/*
- * TIFF image decoder
* Copyright (c) 2006 Konstantin Shishkov
*
* This file is part of FFmpeg.
@@ -20,9 +19,8 @@
*/
/**
- * TIFF image decoder
* @file
- * @author Konstantin Shishkov
+ * TIFF image decoder
*/
#include "avcodec.h"
#if CONFIG_ZLIB
@@ -40,9 +38,9 @@ typedef struct TiffContext {
AVFrame picture;
int width, height;
- unsigned int bpp;
+ unsigned int bpp, bppcount;
int le;
- int compr;
+ enum TiffCompr compr;
int invert;
int fax_opts;
int predictor;
@@ -103,7 +101,7 @@ static int tiff_uncompress(uint8_t *dst, unsigned long *len, const uint8_t *src,
static int tiff_unpack_strip(TiffContext *s, uint8_t* dst, int stride, const uint8_t *src, int size, int lines){
int c, line, pixels, code;
const uint8_t *ssrc = src;
- int width = s->width * s->bpp >> 3;
+ int width = ((s->width * s->bpp) + 7) >> 3;
#if CONFIG_ZLIB
uint8_t *zbuf; unsigned long outlen;
@@ -170,7 +168,13 @@ static int tiff_unpack_strip(TiffContext *s, uint8_t* dst, int stride, const uin
}
switch(s->compr){
case TIFF_RAW:
- memcpy(dst, src, width);
+ if (!s->fill_order) {
+ memcpy(dst, src, width);
+ } else {
+ int i;
+ for (i = 0; i < width; i++)
+ dst[i] = av_reverse[src[i]];
+ }
src += width;
break;
case TIFF_PACKBITS:
@@ -210,6 +214,55 @@ static int tiff_unpack_strip(TiffContext *s, uint8_t* dst, int stride, const uin
return 0;
}
+static int init_image(TiffContext *s)
+{
+ int i, ret;
+ uint32_t *pal;
+
+ switch (s->bpp * 10 + s->bppcount) {
+ case 11:
+ s->avctx->pix_fmt = PIX_FMT_MONOBLACK;
+ break;
+ case 81:
+ s->avctx->pix_fmt = PIX_FMT_PAL8;
+ break;
+ case 243:
+ s->avctx->pix_fmt = PIX_FMT_RGB24;
+ break;
+ case 161:
+ s->avctx->pix_fmt = PIX_FMT_GRAY16BE;
+ break;
+ case 324:
+ s->avctx->pix_fmt = PIX_FMT_RGBA;
+ break;
+ case 483:
+ s->avctx->pix_fmt = s->le ? PIX_FMT_RGB48LE : PIX_FMT_RGB48BE;
+ break;
+ default:
+ av_log(s->avctx, AV_LOG_ERROR,
+ "This format is not supported (bpp=%d, bppcount=%d)\n",
+ s->bpp, s->bppcount);
+ return AVERROR_INVALIDDATA;
+ }
+ if (s->width != s->avctx->width || s->height != s->avctx->height) {
+ if ((ret = av_image_check_size(s->width, s->height, 0, s->avctx)) < 0)
+ return ret;
+ avcodec_set_dimensions(s->avctx, s->width, s->height);
+ }
+ if (s->picture.data[0])
+ s->avctx->release_buffer(s->avctx, &s->picture);
+ if ((ret = s->avctx->get_buffer(s->avctx, &s->picture)) < 0) {
+ av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed\n");
+ return ret;
+ }
+ if (s->bpp == 8 && s->picture.data[1]){
+ /* make default grayscale pal */
+ pal = (uint32_t *) s->picture.data[1];
+ for (i = 0; i < 256; i++)
+ pal[i] = i * 0x010101;
+ }
+ return 0;
+}
static int tiff_decode_tag(TiffContext *s, const uint8_t *start, const uint8_t *buf, const uint8_t *end_buf)
{
@@ -263,6 +316,7 @@ static int tiff_decode_tag(TiffContext *s, const uint8_t *start, const uint8_t *
s->height = value;
break;
case TIFF_BPP:
+ s->bppcount = count;
if(count > 4){
av_log(s->avctx, AV_LOG_ERROR, "This format is not supported (bpp=%d, %d components)\n", s->bpp, count);
return -1;
@@ -282,46 +336,16 @@ static int tiff_decode_tag(TiffContext *s, const uint8_t *start, const uint8_t *
s->bpp = -1;
}
}
- switch(s->bpp*10 + count){
- case 11:
- s->avctx->pix_fmt = PIX_FMT_MONOBLACK;
- break;
- case 81:
- s->avctx->pix_fmt = PIX_FMT_PAL8;
- break;
- case 243:
- s->avctx->pix_fmt = PIX_FMT_RGB24;
- break;
- case 161:
- s->avctx->pix_fmt = PIX_FMT_GRAY16BE;
- break;
- case 324:
- s->avctx->pix_fmt = PIX_FMT_RGBA;
- break;
- case 483:
- s->avctx->pix_fmt = s->le ? PIX_FMT_RGB48LE : PIX_FMT_RGB48BE;
- break;
- default:
- av_log(s->avctx, AV_LOG_ERROR, "This format is not supported (bpp=%d, %d components)\n", s->bpp, count);
- return -1;
- }
- if(s->width != s->avctx->width || s->height != s->avctx->height){
- if(av_image_check_size(s->width, s->height, 0, s->avctx))
- return -1;
- avcodec_set_dimensions(s->avctx, s->width, s->height);
- }
- if(s->picture.data[0])
- s->avctx->release_buffer(s->avctx, &s->picture);
- if(s->avctx->get_buffer(s->avctx, &s->picture) < 0){
- av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed\n");
- return -1;
- }
- if(s->bpp == 8){
- /* make default grayscale pal */
- pal = (uint32_t *) s->picture.data[1];
- for(i = 0; i < 256; i++)
- pal[i] = i * 0x010101;
- }
+ break;
+ case TIFF_SAMPLES_PER_PIXEL:
+ if (count != 1) {
+ av_log(s->avctx, AV_LOG_ERROR,
+ "Samples per pixel requires a single value, many provided\n");
+ return AVERROR_INVALIDDATA;
+ }
+ if (s->bppcount == 1)
+ s->bpp *= value;
+ s->bppcount = value;
break;
case TIFF_COMPR:
s->compr = value;
@@ -450,6 +474,9 @@ static int tiff_decode_tag(TiffContext *s, const uint8_t *start, const uint8_t *
if(s->compr == TIFF_G4)
s->fax_opts = value;
break;
+
+ default:
+ av_log(s->avctx, AV_LOG_DEBUG, "Unknown or unsupported tag %d/0X%0X\n", tag, tag);
}
return 0;
}
@@ -464,7 +491,7 @@ static int decode_frame(AVCodecContext *avctx,
AVFrame *picture = data;
AVFrame * const p= (AVFrame*)&s->picture;
const uint8_t *orig_buf = buf, *end_buf = buf + buf_size;
- int id, le, off;
+ int id, le, off, ret;
int i, j, entries;
int stride, soff, ssize;
uint8_t *dst;
@@ -505,21 +532,9 @@ static int decode_frame(AVCodecContext *avctx,
return -1;
}
/* now we have the data and may start decoding */
- if(!p->data[0]){
- s->bpp = 1;
- avctx->pix_fmt = PIX_FMT_MONOBLACK;
- if(s->width != s->avctx->width || s->height != s->avctx->height){
- if(av_image_check_size(s->width, s->height, 0, s->avctx))
- return -1;
- avcodec_set_dimensions(s->avctx, s->width, s->height);
- }
- if(s->picture.data[0])
- s->avctx->release_buffer(s->avctx, &s->picture);
- if(s->avctx->get_buffer(s->avctx, &s->picture) < 0){
- av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed\n");
- return -1;
- }
- }
+ if ((ret = init_image(s)) < 0)
+ return ret;
+
if(s->strips == 1 && !s->stripsize){
av_log(avctx, AV_LOG_WARNING, "Image data size missing\n");
s->stripsize = buf_size - s->stripoff;
diff --git a/libavcodec/tiff.h b/libavcodec/tiff.h
index 235a998fcd..e1aca68e35 100644
--- a/libavcodec/tiff.h
+++ b/libavcodec/tiff.h
@@ -1,5 +1,4 @@
/*
- * TIFF tables
* Copyright (c) 2006 Konstantin Shishkov
*
* This file is part of FFmpeg.
@@ -19,13 +18,16 @@
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
+#ifndef AVCODEC_TIFF_H
+#define AVCODEC_TIFF_H
+
/**
- * TIFF tables
* @file
- * @author Konstantin Shishkov
+ * TIFF tables
+ *
+ * For more information about the TIFF format, check the official docs at:
+ * http://partners.adobe.com/public/developer/tiff/index.html
*/
-#ifndef AVCODEC_TIFF_H
-#define AVCODEC_TIFF_H
#include <stdint.h>
diff --git a/libavcodec/tiffenc.c b/libavcodec/tiffenc.c
index 293ec0ac65..f7228f128f 100644
--- a/libavcodec/tiffenc.c
+++ b/libavcodec/tiffenc.c
@@ -210,7 +210,7 @@ static int encode_frame(AVCodecContext * avctx, unsigned char *buf,
uint32_t *strip_offsets = NULL;
int bytes_per_row;
uint32_t res[2] = { 72, 1 }; // image resolution (72/1)
- static const uint16_t bpp_tab[] = { 8, 8, 8, 8 };
+ uint16_t bpp_tab[] = { 8, 8, 8, 8 };
int ret = -1;
int is_yuv = 0;
uint8_t *yuv_line = NULL;
@@ -221,7 +221,7 @@ static int encode_frame(AVCodecContext * avctx, unsigned char *buf,
s->buf_size = buf_size;
*p = *pict;
- p->pict_type = FF_I_TYPE;
+ p->pict_type = AV_PICTURE_TYPE_I;
p->key_frame = 1;
avctx->coded_frame= &s->picture;
@@ -255,12 +255,10 @@ static int encode_frame(AVCodecContext * avctx, unsigned char *buf,
s->photometric_interpretation = 3;
break;
case PIX_FMT_MONOBLACK:
- s->bpp = 1;
- s->photometric_interpretation = 1;
- break;
case PIX_FMT_MONOWHITE:
s->bpp = 1;
- s->photometric_interpretation = 0;
+ s->photometric_interpretation = avctx->pix_fmt == PIX_FMT_MONOBLACK;
+ bpp_tab[0] = 1;
break;
case PIX_FMT_YUV420P:
case PIX_FMT_YUV422P:
@@ -282,7 +280,7 @@ static int encode_frame(AVCodecContext * avctx, unsigned char *buf,
return -1;
}
if (!is_yuv)
- s->bpp_tab_size = (s->bpp >> 3);
+ s->bpp_tab_size = ((s->bpp + 7) >> 3);
if (s->compr == TIFF_DEFLATE || s->compr == TIFF_ADOBE_DEFLATE || s->compr == TIFF_LZW)
//best choose for DEFLATE
diff --git a/libavcodec/tmv.c b/libavcodec/tmv.c
index 62b6e19de1..b1083fe682 100644
--- a/libavcodec/tmv.c
+++ b/libavcodec/tmv.c
@@ -34,6 +34,14 @@ typedef struct TMVContext {
AVFrame pic;
} TMVContext;
+static av_cold int tmv_decode_init(AVCodecContext *avctx)
+{
+ TMVContext *tmv = avctx->priv_data;
+
+ avcodec_get_frame_defaults(&tmv->pic);
+ return 0;
+}
+
static int tmv_decode_frame(AVCodecContext *avctx, void *data,
int *data_size, AVPacket *avpkt)
{
@@ -59,7 +67,7 @@ static int tmv_decode_frame(AVCodecContext *avctx, void *data,
return -1;
}
- tmv->pic.pict_type = FF_I_TYPE;
+ tmv->pic.pict_type = AV_PICTURE_TYPE_I;
tmv->pic.key_frame = 1;
dst = tmv->pic.data[0];
@@ -97,6 +105,7 @@ AVCodec ff_tmv_decoder = {
.type = AVMEDIA_TYPE_VIDEO,
.id = CODEC_ID_TMV,
.priv_data_size = sizeof(TMVContext),
+ .init = tmv_decode_init,
.close = tmv_decode_close,
.decode = tmv_decode_frame,
.capabilities = CODEC_CAP_DR1,
diff --git a/libavcodec/truemotion1.c b/libavcodec/truemotion1.c
index b1b14319c5..284dbd8e12 100644
--- a/libavcodec/truemotion1.c
+++ b/libavcodec/truemotion1.c
@@ -474,6 +474,7 @@ static av_cold int truemotion1_decode_init(AVCodecContext *avctx)
// else
// avctx->pix_fmt = PIX_FMT_RGB555;
+ avcodec_get_frame_defaults(&s->frame);
s->frame.data[0] = NULL;
/* there is a vertical predictor for each pixel in a line; each vertical
diff --git a/libavcodec/truemotion2.c b/libavcodec/truemotion2.c
index 86454ec291..122049c957 100644
--- a/libavcodec/truemotion2.c
+++ b/libavcodec/truemotion2.c
@@ -807,9 +807,9 @@ static int decode_frame(AVCodecContext *avctx,
}
p->key_frame = tm2_decode_blocks(l, p);
if(p->key_frame)
- p->pict_type = FF_I_TYPE;
+ p->pict_type = AV_PICTURE_TYPE_I;
else
- p->pict_type = FF_P_TYPE;
+ p->pict_type = AV_PICTURE_TYPE_P;
l->cur = !l->cur;
*data_size = sizeof(AVFrame);
@@ -831,6 +831,7 @@ static av_cold int decode_init(AVCodecContext *avctx){
l->avctx = avctx;
l->pic.data[0]=NULL;
avctx->pix_fmt = PIX_FMT_BGR24;
+ avcodec_get_frame_defaults(&l->pic);
dsputil_init(&l->dsp, avctx);
diff --git a/libavcodec/tscc.c b/libavcodec/tscc.c
index 9de53a7267..3a69a6aaf8 100644
--- a/libavcodec/tscc.c
+++ b/libavcodec/tscc.c
@@ -141,6 +141,7 @@ static av_cold int decode_init(AVCodecContext *avctx)
c->height = avctx->height;
+ avcodec_get_frame_defaults(&c->pic);
// Needed if zlib unused or init aborted before inflateInit
memset(&(c->zstream), 0, sizeof(z_stream));
switch(avctx->bits_per_coded_sample){
diff --git a/libavcodec/tta.c b/libavcodec/tta.c
index 3367788633..dccca46132 100644
--- a/libavcodec/tta.c
+++ b/libavcodec/tta.c
@@ -263,9 +263,9 @@ static av_cold int tta_decode_init(AVCodecContext * avctx)
return -1;
}
else switch(s->bps) {
-// case 1: avctx->sample_fmt = AV_SAMPLE_FMT_U8; break;
+ case 1: avctx->sample_fmt = AV_SAMPLE_FMT_U8; break;
case 2: avctx->sample_fmt = AV_SAMPLE_FMT_S16; break;
-// case 3: avctx->sample_fmt = AV_SAMPLE_FMT_S24; break;
+ case 3: avctx->bits_per_coded_sample = 24;
case 4: avctx->sample_fmt = AV_SAMPLE_FMT_S32; break;
default:
av_log_ask_for_sample(s->avctx,
@@ -325,7 +325,7 @@ static int tta_decode_frame(AVCodecContext *avctx,
int cur_chan = 0, framelen = s->frame_length;
int32_t *p;
- if (*data_size < (framelen * s->channels * 2)) {
+ if (*data_size < (framelen * s->channels * av_get_bits_per_sample_fmt(avctx->sample_fmt) / 8)) {
av_log(avctx, AV_LOG_ERROR, "Output buffer size is too small.\n");
return -1;
}
@@ -442,6 +442,13 @@ static int tta_decode_frame(AVCodecContext *avctx,
// convert to output buffer
switch(s->bps) {
+ case 1: {
+ uint8_t *samples = data;
+ for (p = s->decode_buffer; p < s->decode_buffer + (framelen * s->channels); p++)
+ *samples++ = *p + 0x80;
+ *data_size = samples - (uint8_t *)data;
+ break;
+ }
case 2: {
uint16_t *samples = data;
for (p = s->decode_buffer; p < s->decode_buffer + (framelen * s->channels); p++) {
@@ -452,6 +459,13 @@ static int tta_decode_frame(AVCodecContext *avctx,
*data_size = (uint8_t *)samples - (uint8_t *)data;
break;
}
+ case 3: {
+ int32_t *samples = data;
+ for (p = s->decode_buffer; p < s->decode_buffer + (framelen * s->channels); p++)
+ *samples++ = AV_RN32(p) << 8;
+ *data_size = (uint8_t *)samples - (uint8_t *)data;
+ break;
+ }
default:
av_log(s->avctx, AV_LOG_ERROR, "Error, only 16bit samples supported!\n");
}
diff --git a/libavcodec/txd.c b/libavcodec/txd.c
index da1f5c1e20..0e25458c86 100644
--- a/libavcodec/txd.c
+++ b/libavcodec/txd.c
@@ -89,7 +89,7 @@ static int txd_decode_frame(AVCodecContext *avctx, void *data, int *data_size,
return -1;
}
- p->pict_type = FF_I_TYPE;
+ p->pict_type = AV_PICTURE_TYPE_I;
ptr = p->data[0];
stride = p->linesize[0];
diff --git a/libavcodec/ulti.c b/libavcodec/ulti.c
index 83a66ab85e..9033cee98f 100644
--- a/libavcodec/ulti.c
+++ b/libavcodec/ulti.c
@@ -49,6 +49,7 @@ static av_cold int ulti_decode_init(AVCodecContext *avctx)
s->height = avctx->height;
s->blocks = (s->width / 8) * (s->height / 8);
avctx->pix_fmt = PIX_FMT_YUV410P;
+ avcodec_get_frame_defaults(&s->frame);
avctx->coded_frame = (AVFrame*) &s->frame;
s->ulti_codebook = ulti_codebook;
diff --git a/libavcodec/utils.c b/libavcodec/utils.c
index a350605f97..0c03c5224d 100644
--- a/libavcodec/utils.c
+++ b/libavcodec/utils.c
@@ -147,6 +147,8 @@ void avcodec_align_dimensions2(AVCodecContext *s, int *width, int *height, int l
case PIX_FMT_YUV420P9BE:
case PIX_FMT_YUV420P10LE:
case PIX_FMT_YUV420P10BE:
+ case PIX_FMT_YUV422P10LE:
+ case PIX_FMT_YUV422P10BE:
w_align= 16; //FIXME check for non mpeg style codecs and use less alignment
h_align= 16;
if(s->codec_id == CODEC_ID_MPEG2VIDEO || s->codec_id == CODEC_ID_MJPEG || s->codec_id == CODEC_ID_AMV || s->codec_id == CODEC_ID_THP || s->codec_id == CODEC_ID_H264)
@@ -354,9 +356,18 @@ int avcodec_default_get_buffer(AVCodecContext *s, AVFrame *pic){
}
s->internal_buffer_count++;
- if(s->pkt) pic->pkt_pts= s->pkt->pts;
- else pic->pkt_pts= AV_NOPTS_VALUE;
+ if (s->pkt) {
+ pic->pkt_pts = s->pkt->pts;
+ pic->pkt_pos = s->pkt->pos;
+ } else {
+ pic->pkt_pts = AV_NOPTS_VALUE;
+ pic->pkt_pos = -1;
+ }
pic->reordered_opaque= s->reordered_opaque;
+ pic->sample_aspect_ratio = s->sample_aspect_ratio;
+ pic->width = s->width;
+ pic->height = s->height;
+ pic->format = s->pix_fmt;
if(s->debug&FF_DEBUG_BUFFERS)
av_log(s, AV_LOG_DEBUG, "default_get_buffer called on pic %p, %d buffers used\n", pic, s->internal_buffer_count);
@@ -461,7 +472,10 @@ void avcodec_get_frame_defaults(AVFrame *pic){
memset(pic, 0, sizeof(AVFrame));
pic->pts = pic->best_effort_timestamp = AV_NOPTS_VALUE;
+ pic->pkt_pos = -1;
pic->key_frame= 1;
+ pic->sample_aspect_ratio = (AVRational){0, 1};
+ pic->format = -1; /* unknown */
}
AVFrame *avcodec_alloc_frame(void){
@@ -684,8 +698,7 @@ int avcodec_encode_subtitle(AVCodecContext *avctx, uint8_t *buf, int buf_size,
av_log(avctx, AV_LOG_ERROR, "start_display_time must be 0.\n");
return -1;
}
- if(sub->num_rects == 0 || !sub->rects)
- return -1;
+
ret = avctx->codec->encode(avctx, buf, buf_size, sub);
avctx->frame_number++;
return ret;
@@ -743,6 +756,18 @@ int attribute_align_arg avcodec_decode_video2(AVCodecContext *avctx, AVFrame *pi
ret = avctx->codec->decode(avctx, picture, got_picture_ptr,
avpkt);
picture->pkt_dts= avpkt->dts;
+
+ if(!avctx->has_b_frames){
+ picture->pkt_pos= avpkt->pos;
+ if (!picture->sample_aspect_ratio.num)
+ picture->sample_aspect_ratio = avctx->sample_aspect_ratio;
+ if (!picture->width)
+ picture->width = avctx->width;
+ if (!picture->height)
+ picture->height = avctx->height;
+ if (picture->format == PIX_FMT_NONE)
+ picture->format = avctx->pix_fmt;
+ }
}
emms_c(); //needed to avoid an emms_c() call before every return;
@@ -1334,9 +1359,9 @@ int av_lockmgr_register(int (*cb)(void **mutex, enum AVLockOp op))
unsigned int ff_toupper4(unsigned int x)
{
return toupper( x &0xFF)
- + (toupper((x>>8 )&0xFF)<<8 )
- + (toupper((x>>16)&0xFF)<<16)
- + (toupper((x>>24)&0xFF)<<24);
+ + (toupper((x>>8 )&0xFF)<<8 )
+ + (toupper((x>>16)&0xFF)<<16)
+ + (toupper((x>>24)&0xFF)<<24);
}
#if !HAVE_PTHREADS
diff --git a/libavcodec/v210dec.c b/libavcodec/v210dec.c
index 97ef594666..94c5b5bb26 100644
--- a/libavcodec/v210dec.c
+++ b/libavcodec/v210dec.c
@@ -30,7 +30,7 @@ static av_cold int decode_init(AVCodecContext *avctx)
av_log(avctx, AV_LOG_ERROR, "v210 needs even width\n");
return -1;
}
- avctx->pix_fmt = PIX_FMT_YUV422P16;
+ avctx->pix_fmt = PIX_FMT_YUV422P10;
avctx->bits_per_raw_sample = 10;
avctx->coded_frame = avcodec_alloc_frame();
@@ -63,15 +63,15 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *data_size,
y = (uint16_t*)pic->data[0];
u = (uint16_t*)pic->data[1];
v = (uint16_t*)pic->data[2];
- pic->pict_type = FF_I_TYPE;
+ pic->pict_type = AV_PICTURE_TYPE_I;
pic->key_frame = 1;
#define READ_PIXELS(a, b, c) \
do { \
- val = av_le2ne32(*src++); \
- *a++ = val << 6; \
- *b++ = (val >> 4) & 0xFFC0; \
- *c++ = (val >> 14) & 0xFFC0; \
+ val = av_le2ne32(*src++); \
+ *a++ = val & 0x3FF; \
+ *b++ = (val >> 10) & 0x3FF; \
+ *c++ = (val >> 20) & 0x3FF; \
} while (0)
for (h = 0; h < avctx->height; h++) {
@@ -87,15 +87,15 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *data_size,
READ_PIXELS(u, y, v);
val = av_le2ne32(*src++);
- *y++ = val << 6;
+ *y++ = val & 0x3FF;
}
if (w < avctx->width - 3) {
- *u++ = (val >> 4) & 0xFFC0;
- *y++ = (val >> 14) & 0xFFC0;
+ *u++ = (val >> 10) & 0x3FF;
+ *y++ = (val >> 20) & 0x3FF;
val = av_le2ne32(*src++);
- *v++ = val << 6;
- *y++ = (val >> 4) & 0xFFC0;
+ *v++ = val & 0x3FF;
+ *y++ = (val >> 10) & 0x3FF;
}
psrc += stride;
diff --git a/libavcodec/v210enc.c b/libavcodec/v210enc.c
index 39f672d22b..d1b3d9f858 100644
--- a/libavcodec/v210enc.c
+++ b/libavcodec/v210enc.c
@@ -31,8 +31,8 @@ static av_cold int encode_init(AVCodecContext *avctx)
return -1;
}
- if (avctx->pix_fmt != PIX_FMT_YUV422P16) {
- av_log(avctx, AV_LOG_ERROR, "v210 needs YUV422P16\n");
+ if (avctx->pix_fmt != PIX_FMT_YUV422P10) {
+ av_log(avctx, AV_LOG_ERROR, "v210 needs YUV422P10\n");
return -1;
}
@@ -43,7 +43,7 @@ static av_cold int encode_init(AVCodecContext *avctx)
avctx->coded_frame = avcodec_alloc_frame();
avctx->coded_frame->key_frame = 1;
- avctx->coded_frame->pict_type = FF_I_TYPE;
+ avctx->coded_frame->pict_type = AV_PICTURE_TYPE_I;
return 0;
}
@@ -66,11 +66,13 @@ static int encode_frame(AVCodecContext *avctx, unsigned char *buf,
return -1;
}
+#define CLIP(v) av_clip(v, 4, 1019)
+
#define WRITE_PIXELS(a, b, c) \
do { \
- val = (*a++ >> 6) | \
- ((*b++ & 0xFFC0) << 4); \
- val|= (*c++ & 0xFFC0) << 14; \
+ val = CLIP(*a++); \
+ val |= (CLIP(*b++) << 10) | \
+ (CLIP(*c++) << 20); \
bytestream_put_le32(&p, val); \
} while (0)
@@ -85,17 +87,15 @@ static int encode_frame(AVCodecContext *avctx, unsigned char *buf,
if (w < avctx->width - 1) {
WRITE_PIXELS(u, y, v);
- val = *y++ >> 6;
+ val = CLIP(*y++);
if (w == avctx->width - 2)
bytestream_put_le32(&p, val);
}
if (w < avctx->width - 3) {
- val |=((*u++ & 0xFFC0) << 4) |
- ((*y++ & 0xFFC0) << 14);
+ val |= (CLIP(*u++) << 10) | (CLIP(*y++) << 20);
bytestream_put_le32(&p, val);
- val = (*v++ >> 6) |
- (*y++ & 0xFFC0) << 4;
+ val = CLIP(*v++) | (CLIP(*y++) << 10);
bytestream_put_le32(&p, val);
}
@@ -125,6 +125,6 @@ AVCodec ff_v210_encoder = {
encode_init,
encode_frame,
encode_close,
- .pix_fmts = (const enum PixelFormat[]){PIX_FMT_YUV422P16, PIX_FMT_NONE},
+ .pix_fmts = (const enum PixelFormat[]){PIX_FMT_YUV422P10, PIX_FMT_NONE},
.long_name = NULL_IF_CONFIG_SMALL("Uncompressed 4:2:2 10-bit"),
};
diff --git a/libavcodec/v210x.c b/libavcodec/v210x.c
index 962678dde6..64954cb6bb 100644
--- a/libavcodec/v210x.c
+++ b/libavcodec/v210x.c
@@ -63,7 +63,7 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPac
udst= (uint16_t *)pic->data[1];
vdst= (uint16_t *)pic->data[2];
yend= ydst + width;
- pic->pict_type= FF_I_TYPE;
+ pic->pict_type= AV_PICTURE_TYPE_I;
pic->key_frame= 1;
for(;;){
diff --git a/libavcodec/vaapi_h264.c b/libavcodec/vaapi_h264.c
index 34e4976796..922dc86963 100644
--- a/libavcodec/vaapi_h264.c
+++ b/libavcodec/vaapi_h264.c
@@ -312,7 +312,7 @@ static int decode_slice(AVCodecContext *avctx,
slice_param->slice_data_bit_offset = get_bits_count(&h->s.gb) + 8; /* bit buffer started beyond nal_unit_type */
slice_param->first_mb_in_slice = (s->mb_y >> FIELD_OR_MBAFF_PICTURE) * s->mb_width + s->mb_x;
slice_param->slice_type = ff_h264_get_slice_type(h);
- slice_param->direct_spatial_mv_pred_flag = h->slice_type == FF_B_TYPE ? h->direct_spatial_mv_pred : 0;
+ slice_param->direct_spatial_mv_pred_flag = h->slice_type == AV_PICTURE_TYPE_B ? h->direct_spatial_mv_pred : 0;
slice_param->num_ref_idx_l0_active_minus1 = h->list_count > 0 ? h->ref_count[0] - 1 : 0;
slice_param->num_ref_idx_l1_active_minus1 = h->list_count > 1 ? h->ref_count[1] - 1 : 0;
slice_param->cabac_init_idc = h->cabac_init_idc;
diff --git a/libavcodec/vaapi_mpeg2.c b/libavcodec/vaapi_mpeg2.c
index 17d82b723f..3b3f6e0444 100644
--- a/libavcodec/vaapi_mpeg2.c
+++ b/libavcodec/vaapi_mpeg2.c
@@ -72,10 +72,10 @@ static int vaapi_mpeg2_start_frame(AVCodecContext *avctx, av_unused const uint8_
pic_param->picture_coding_extension.bits.is_first_field = mpeg2_get_is_frame_start(s);
switch (s->pict_type) {
- case FF_B_TYPE:
+ case AV_PICTURE_TYPE_B:
pic_param->backward_reference_picture = ff_vaapi_get_surface_id(&s->next_picture);
// fall-through
- case FF_P_TYPE:
+ case AV_PICTURE_TYPE_P:
pic_param->forward_reference_picture = ff_vaapi_get_surface_id(&s->last_picture);
break;
}
diff --git a/libavcodec/vaapi_mpeg4.c b/libavcodec/vaapi_mpeg4.c
index 78e0d646cb..cff77740c2 100644
--- a/libavcodec/vaapi_mpeg4.c
+++ b/libavcodec/vaapi_mpeg4.c
@@ -78,8 +78,8 @@ static int vaapi_mpeg4_start_frame(AVCodecContext *avctx, av_unused const uint8_
}
pic_param->quant_precision = s->quant_precision;
pic_param->vop_fields.value = 0; /* reset all bits */
- pic_param->vop_fields.bits.vop_coding_type = s->pict_type - FF_I_TYPE;
- pic_param->vop_fields.bits.backward_reference_vop_coding_type = s->pict_type == FF_B_TYPE ? s->next_picture.pict_type - FF_I_TYPE : 0;
+ pic_param->vop_fields.bits.vop_coding_type = s->pict_type - AV_PICTURE_TYPE_I;
+ pic_param->vop_fields.bits.backward_reference_vop_coding_type = s->pict_type == AV_PICTURE_TYPE_B ? s->next_picture.pict_type - AV_PICTURE_TYPE_I : 0;
pic_param->vop_fields.bits.vop_rounding_type = s->no_rounding;
pic_param->vop_fields.bits.intra_dc_vlc_thr = mpeg4_get_intra_dc_vlc_thr(s);
pic_param->vop_fields.bits.top_field_first = s->top_field_first;
@@ -92,9 +92,9 @@ static int vaapi_mpeg4_start_frame(AVCodecContext *avctx, av_unused const uint8_
pic_param->TRB = s->pb_time;
pic_param->TRD = s->pp_time;
- if (s->pict_type == FF_B_TYPE)
+ if (s->pict_type == AV_PICTURE_TYPE_B)
pic_param->backward_reference_picture = ff_vaapi_get_surface_id(&s->next_picture);
- if (s->pict_type != FF_I_TYPE)
+ if (s->pict_type != AV_PICTURE_TYPE_I)
pic_param->forward_reference_picture = ff_vaapi_get_surface_id(&s->last_picture);
/* Fill in VAIQMatrixBufferMPEG4 */
diff --git a/libavcodec/vaapi_vc1.c b/libavcodec/vaapi_vc1.c
index b1dfde86dd..d4d76c815f 100644
--- a/libavcodec/vaapi_vc1.c
+++ b/libavcodec/vaapi_vc1.c
@@ -42,7 +42,7 @@ static inline int vc1_has_MVTYPEMB_bitplane(VC1Context *v)
{
if (v->mv_type_is_raw)
return 0;
- return (v->s.pict_type == FF_P_TYPE &&
+ return (v->s.pict_type == AV_PICTURE_TYPE_P &&
(v->mv_mode == MV_PMODE_MIXED_MV ||
(v->mv_mode == MV_PMODE_INTENSITY_COMP &&
v->mv_mode2 == MV_PMODE_MIXED_MV)));
@@ -53,8 +53,8 @@ static inline int vc1_has_SKIPMB_bitplane(VC1Context *v)
{
if (v->skip_is_raw)
return 0;
- return (v->s.pict_type == FF_P_TYPE ||
- (v->s.pict_type == FF_B_TYPE && !v->bi_type));
+ return (v->s.pict_type == AV_PICTURE_TYPE_P ||
+ (v->s.pict_type == AV_PICTURE_TYPE_B && !v->bi_type));
}
/** Check whether the DIRECTMB bitplane is present */
@@ -62,7 +62,7 @@ static inline int vc1_has_DIRECTMB_bitplane(VC1Context *v)
{
if (v->dmb_is_raw)
return 0;
- return v->s.pict_type == FF_B_TYPE && !v->bi_type;
+ return v->s.pict_type == AV_PICTURE_TYPE_B && !v->bi_type;
}
/** Check whether the ACPRED bitplane is present */
@@ -71,8 +71,8 @@ static inline int vc1_has_ACPRED_bitplane(VC1Context *v)
if (v->acpred_is_raw)
return 0;
return (v->profile == PROFILE_ADVANCED &&
- (v->s.pict_type == FF_I_TYPE ||
- (v->s.pict_type == FF_B_TYPE && v->bi_type)));
+ (v->s.pict_type == AV_PICTURE_TYPE_I ||
+ (v->s.pict_type == AV_PICTURE_TYPE_B && v->bi_type)));
}
/** Check whether the OVERFLAGS bitplane is present */
@@ -81,8 +81,8 @@ static inline int vc1_has_OVERFLAGS_bitplane(VC1Context *v)
if (v->overflg_is_raw)
return 0;
return (v->profile == PROFILE_ADVANCED &&
- (v->s.pict_type == FF_I_TYPE ||
- (v->s.pict_type == FF_B_TYPE && v->bi_type)) &&
+ (v->s.pict_type == AV_PICTURE_TYPE_I ||
+ (v->s.pict_type == AV_PICTURE_TYPE_B && v->bi_type)) &&
(v->overlap && v->pq <= 8) &&
v->condover == CONDOVER_SELECT);
}
@@ -92,9 +92,9 @@ static int vc1_get_PTYPE(VC1Context *v)
{
MpegEncContext * const s = &v->s;
switch (s->pict_type) {
- case FF_I_TYPE: return 0;
- case FF_P_TYPE: return v->p_frame_skipped ? 4 : 1;
- case FF_B_TYPE: return v->bi_type ? 3 : 2;
+ case AV_PICTURE_TYPE_I: return 0;
+ case AV_PICTURE_TYPE_P: return v->p_frame_skipped ? 4 : 1;
+ case AV_PICTURE_TYPE_B: return v->bi_type ? 3 : 2;
}
return 0;
}
@@ -102,8 +102,8 @@ static int vc1_get_PTYPE(VC1Context *v)
/** Reconstruct bitstream MVMODE (7.1.1.32) */
static inline VAMvModeVC1 vc1_get_MVMODE(VC1Context *v)
{
- if (v->s.pict_type == FF_P_TYPE ||
- (v->s.pict_type == FF_B_TYPE && !v->bi_type))
+ if (v->s.pict_type == AV_PICTURE_TYPE_P ||
+ (v->s.pict_type == AV_PICTURE_TYPE_B && !v->bi_type))
return get_VAMvModeVC1(v->mv_mode);
return 0;
}
@@ -111,7 +111,7 @@ static inline VAMvModeVC1 vc1_get_MVMODE(VC1Context *v)
/** Reconstruct bitstream MVMODE2 (7.1.1.33) */
static inline VAMvModeVC1 vc1_get_MVMODE2(VC1Context *v)
{
- if (v->s.pict_type == FF_P_TYPE && v->mv_mode == MV_PMODE_INTENSITY_COMP)
+ if (v->s.pict_type == AV_PICTURE_TYPE_P && v->mv_mode == MV_PMODE_INTENSITY_COMP)
return get_VAMvModeVC1(v->mv_mode2);
return 0;
}
@@ -245,10 +245,10 @@ static int vaapi_vc1_start_frame(AVCodecContext *avctx, av_unused const uint8_t
pic_param->transform_fields.bits.intra_transform_dc_table = v->s.dc_table_index;
switch (s->pict_type) {
- case FF_B_TYPE:
+ case AV_PICTURE_TYPE_B:
pic_param->backward_reference_picture = ff_vaapi_get_surface_id(&s->next_picture);
// fall-through
- case FF_P_TYPE:
+ case AV_PICTURE_TYPE_P:
pic_param->forward_reference_picture = ff_vaapi_get_surface_id(&s->last_picture);
break;
}
@@ -259,12 +259,12 @@ static int vaapi_vc1_start_frame(AVCodecContext *avctx, av_unused const uint8_t
int x, y, n;
switch (s->pict_type) {
- case FF_P_TYPE:
+ case AV_PICTURE_TYPE_P:
ff_bp[0] = pic_param->bitplane_present.flags.bp_direct_mb ? v->direct_mb_plane : NULL;
ff_bp[1] = pic_param->bitplane_present.flags.bp_skip_mb ? s->mbskip_table : NULL;
ff_bp[2] = pic_param->bitplane_present.flags.bp_mv_type_mb ? v->mv_type_mb_plane : NULL;
break;
- case FF_B_TYPE:
+ case AV_PICTURE_TYPE_B:
if (!v->bi_type) {
ff_bp[0] = pic_param->bitplane_present.flags.bp_direct_mb ? v->direct_mb_plane : NULL;
ff_bp[1] = pic_param->bitplane_present.flags.bp_skip_mb ? s->mbskip_table : NULL;
@@ -272,7 +272,7 @@ static int vaapi_vc1_start_frame(AVCodecContext *avctx, av_unused const uint8_t
break;
}
/* fall-through (BI-type) */
- case FF_I_TYPE:
+ case AV_PICTURE_TYPE_I:
ff_bp[0] = NULL; /* XXX: interlaced frame (FIELDTX plane) */
ff_bp[1] = pic_param->bitplane_present.flags.bp_ac_pred ? v->acpred_plane : NULL;
ff_bp[2] = pic_param->bitplane_present.flags.bp_overflags ? v->over_flags_plane : NULL;
diff --git a/libavcodec/vb.c b/libavcodec/vb.c
index fc79f5d586..3fb59cf377 100644
--- a/libavcodec/vb.c
+++ b/libavcodec/vb.c
@@ -268,6 +268,7 @@ static av_cold int decode_init(AVCodecContext *avctx)
c->avctx = avctx;
avctx->pix_fmt = PIX_FMT_PAL8;
+ avcodec_get_frame_defaults(&c->pic);
c->frame = av_mallocz(avctx->width * avctx->height);
c->prev_frame = av_mallocz(avctx->width * avctx->height);
diff --git a/libavcodec/vc1.c b/libavcodec/vc1.c
index 1b1809588e..e062a35cc1 100644
--- a/libavcodec/vc1.c
+++ b/libavcodec/vc1.c
@@ -280,28 +280,6 @@ static int vop_dquant_decoding(VC1Context *v)
static int decode_sequence_header_adv(VC1Context *v, GetBitContext *gb);
-static void simple_idct_put_rangered(uint8_t *dest, int line_size, DCTELEM *block)
-{
- int i;
- ff_simple_idct(block);
- for (i = 0; i < 64; i++) block[i] = (block[i] - 64) << 1;
- ff_put_pixels_clamped_c(block, dest, line_size);
-}
-
-static void simple_idct_put_signed(uint8_t *dest, int line_size, DCTELEM *block)
-{
- ff_simple_idct(block);
- ff_put_signed_pixels_clamped_c(block, dest, line_size);
-}
-
-static void simple_idct_put_signed_rangered(uint8_t *dest, int line_size, DCTELEM *block)
-{
- int i;
- ff_simple_idct(block);
- for (i = 0; i < 64; i++) block[i] <<= 1;
- ff_put_signed_pixels_clamped_c(block, dest, line_size);
-}
-
/**
* Decode Simple/Main Profiles sequence header
* @see Figure 7-8, p16-17
@@ -359,11 +337,7 @@ int vc1_decode_sequence_header(AVCodecContext *avctx, VC1Context *v, GetBitConte
v->res_fasttx = get_bits1(gb);
if (!v->res_fasttx)
{
- v->vc1dsp.vc1_inv_trans_8x8_add = ff_simple_idct_add;
- v->vc1dsp.vc1_inv_trans_8x8_put[0] = ff_simple_idct_put;
- v->vc1dsp.vc1_inv_trans_8x8_put[1] = simple_idct_put_rangered;
- v->vc1dsp.vc1_inv_trans_8x8_put_signed[0] = simple_idct_put_signed;
- v->vc1dsp.vc1_inv_trans_8x8_put_signed[1] = simple_idct_put_signed_rangered;
+ v->vc1dsp.vc1_inv_trans_8x8 = ff_simple_idct;
v->vc1dsp.vc1_inv_trans_8x4 = ff_simple_idct84_add;
v->vc1dsp.vc1_inv_trans_4x8 = ff_simple_idct48_add;
v->vc1dsp.vc1_inv_trans_4x4 = ff_simple_idct44_add;
@@ -612,29 +586,29 @@ int vc1_parse_frame_header(VC1Context *v, GetBitContext* gb)
v->s.pict_type = get_bits1(gb);
if (v->s.avctx->max_b_frames) {
if (!v->s.pict_type) {
- if (get_bits1(gb)) v->s.pict_type = FF_I_TYPE;
- else v->s.pict_type = FF_B_TYPE;
- } else v->s.pict_type = FF_P_TYPE;
- } else v->s.pict_type = v->s.pict_type ? FF_P_TYPE : FF_I_TYPE;
+ if (get_bits1(gb)) v->s.pict_type = AV_PICTURE_TYPE_I;
+ else v->s.pict_type = AV_PICTURE_TYPE_B;
+ } else v->s.pict_type = AV_PICTURE_TYPE_P;
+ } else v->s.pict_type = v->s.pict_type ? AV_PICTURE_TYPE_P : AV_PICTURE_TYPE_I;
v->bi_type = 0;
- if(v->s.pict_type == FF_B_TYPE) {
+ if(v->s.pict_type == AV_PICTURE_TYPE_B) {
v->bfraction_lut_index = get_vlc2(gb, ff_vc1_bfraction_vlc.table, VC1_BFRACTION_VLC_BITS, 1);
v->bfraction = ff_vc1_bfraction_lut[v->bfraction_lut_index];
if(v->bfraction == 0) {
- v->s.pict_type = FF_BI_TYPE;
+ v->s.pict_type = AV_PICTURE_TYPE_BI;
}
}
- if(v->s.pict_type == FF_I_TYPE || v->s.pict_type == FF_BI_TYPE)
+ if(v->s.pict_type == AV_PICTURE_TYPE_I || v->s.pict_type == AV_PICTURE_TYPE_BI)
skip_bits(gb, 7); // skip buffer fullness
if(v->parse_only)
return 0;
/* calculate RND */
- if(v->s.pict_type == FF_I_TYPE || v->s.pict_type == FF_BI_TYPE)
+ if(v->s.pict_type == AV_PICTURE_TYPE_I || v->s.pict_type == AV_PICTURE_TYPE_BI)
v->rnd = 1;
- if(v->s.pict_type == FF_P_TYPE)
+ if(v->s.pict_type == AV_PICTURE_TYPE_P)
v->rnd ^= 1;
/* Quantizer stuff */
@@ -661,18 +635,18 @@ int vc1_parse_frame_header(VC1Context *v, GetBitContext* gb)
v->k_y = v->mvrange + 8; //k_y can be 8 9 10 11
v->range_x = 1 << (v->k_x - 1);
v->range_y = 1 << (v->k_y - 1);
- if (v->multires && v->s.pict_type != FF_B_TYPE) v->respic = get_bits(gb, 2);
+ if (v->multires && v->s.pict_type != AV_PICTURE_TYPE_B) v->respic = get_bits(gb, 2);
- if(v->res_x8 && (v->s.pict_type == FF_I_TYPE || v->s.pict_type == FF_BI_TYPE)){
+ if(v->res_x8 && (v->s.pict_type == AV_PICTURE_TYPE_I || v->s.pict_type == AV_PICTURE_TYPE_BI)){
v->x8_type = get_bits1(gb);
}else v->x8_type = 0;
//av_log(v->s.avctx, AV_LOG_INFO, "%c Frame: QP=[%i]%i (+%i/2) %i\n",
-// (v->s.pict_type == FF_P_TYPE) ? 'P' : ((v->s.pict_type == FF_I_TYPE) ? 'I' : 'B'), pqindex, v->pq, v->halfpq, v->rangeredfrm);
+// (v->s.pict_type == AV_PICTURE_TYPE_P) ? 'P' : ((v->s.pict_type == AV_PICTURE_TYPE_I) ? 'I' : 'B'), pqindex, v->pq, v->halfpq, v->rangeredfrm);
- if(v->s.pict_type == FF_I_TYPE || v->s.pict_type == FF_P_TYPE) v->use_ic = 0;
+ if(v->s.pict_type == AV_PICTURE_TYPE_I || v->s.pict_type == AV_PICTURE_TYPE_P) v->use_ic = 0;
switch(v->s.pict_type) {
- case FF_P_TYPE:
+ case AV_PICTURE_TYPE_P:
if (v->pq < 5) v->tt_index = 0;
else if(v->pq < 13) v->tt_index = 1;
else v->tt_index = 2;
@@ -755,7 +729,7 @@ int vc1_parse_frame_header(VC1Context *v, GetBitContext* gb)
v->ttfrm = TT_8X8;
}
break;
- case FF_B_TYPE:
+ case AV_PICTURE_TYPE_B:
if (v->pq < 5) v->tt_index = 0;
else if(v->pq < 13) v->tt_index = 1;
else v->tt_index = 2;
@@ -801,7 +775,7 @@ int vc1_parse_frame_header(VC1Context *v, GetBitContext* gb)
{
/* AC Syntax */
v->c_ac_table_index = decode012(gb);
- if (v->s.pict_type == FF_I_TYPE || v->s.pict_type == FF_BI_TYPE)
+ if (v->s.pict_type == AV_PICTURE_TYPE_I || v->s.pict_type == AV_PICTURE_TYPE_BI)
{
v->y_ac_table_index = decode012(gb);
}
@@ -809,8 +783,8 @@ int vc1_parse_frame_header(VC1Context *v, GetBitContext* gb)
v->s.dc_table_index = get_bits1(gb);
}
- if(v->s.pict_type == FF_BI_TYPE) {
- v->s.pict_type = FF_B_TYPE;
+ if(v->s.pict_type == AV_PICTURE_TYPE_BI) {
+ v->s.pict_type = AV_PICTURE_TYPE_B;
v->bi_type = 1;
}
return 0;
@@ -833,19 +807,19 @@ int vc1_parse_frame_header_adv(VC1Context *v, GetBitContext* gb)
}
switch(get_unary(gb, 0, 4)) {
case 0:
- v->s.pict_type = FF_P_TYPE;
+ v->s.pict_type = AV_PICTURE_TYPE_P;
break;
case 1:
- v->s.pict_type = FF_B_TYPE;
+ v->s.pict_type = AV_PICTURE_TYPE_B;
break;
case 2:
- v->s.pict_type = FF_I_TYPE;
+ v->s.pict_type = AV_PICTURE_TYPE_I;
break;
case 3:
- v->s.pict_type = FF_BI_TYPE;
+ v->s.pict_type = AV_PICTURE_TYPE_BI;
break;
case 4:
- v->s.pict_type = FF_P_TYPE; // skipped pic
+ v->s.pict_type = AV_PICTURE_TYPE_P; // skipped pic
v->p_frame_skipped = 1;
return 0;
}
@@ -867,11 +841,11 @@ int vc1_parse_frame_header_adv(VC1Context *v, GetBitContext* gb)
if(v->interlace)
v->uvsamp = get_bits1(gb);
if(v->finterpflag) v->interpfrm = get_bits1(gb);
- if(v->s.pict_type == FF_B_TYPE) {
+ if(v->s.pict_type == AV_PICTURE_TYPE_B) {
v->bfraction_lut_index = get_vlc2(gb, ff_vc1_bfraction_vlc.table, VC1_BFRACTION_VLC_BITS, 1);
v->bfraction = ff_vc1_bfraction_lut[v->bfraction_lut_index];
if(v->bfraction == 0) {
- v->s.pict_type = FF_BI_TYPE; /* XXX: should not happen here */
+ v->s.pict_type = AV_PICTURE_TYPE_BI; /* XXX: should not happen here */
}
}
pqindex = get_bits(gb, 5);
@@ -895,14 +869,14 @@ int vc1_parse_frame_header_adv(VC1Context *v, GetBitContext* gb)
if(v->postprocflag)
v->postproc = get_bits(gb, 2);
- if(v->s.pict_type == FF_I_TYPE || v->s.pict_type == FF_P_TYPE) v->use_ic = 0;
+ if(v->s.pict_type == AV_PICTURE_TYPE_I || v->s.pict_type == AV_PICTURE_TYPE_P) v->use_ic = 0;
if(v->parse_only)
return 0;
switch(v->s.pict_type) {
- case FF_I_TYPE:
- case FF_BI_TYPE:
+ case AV_PICTURE_TYPE_I:
+ case AV_PICTURE_TYPE_BI:
status = bitplane_decoding(v->acpred_plane, &v->acpred_is_raw, v);
if (status < 0) return -1;
av_log(v->s.avctx, AV_LOG_DEBUG, "ACPRED plane encoding: "
@@ -918,7 +892,7 @@ int vc1_parse_frame_header_adv(VC1Context *v, GetBitContext* gb)
}
}
break;
- case FF_P_TYPE:
+ case AV_PICTURE_TYPE_P:
if (v->extended_mv) v->mvrange = get_unary(gb, 0, 3);
else v->mvrange = 0;
v->k_x = v->mvrange + 9 + (v->mvrange >> 1); //k_x can be 9 10 12 13
@@ -1007,7 +981,7 @@ int vc1_parse_frame_header_adv(VC1Context *v, GetBitContext* gb)
v->ttfrm = TT_8X8;
}
break;
- case FF_B_TYPE:
+ case AV_PICTURE_TYPE_B:
if (v->extended_mv) v->mvrange = get_unary(gb, 0, 3);
else v->mvrange = 0;
v->k_x = v->mvrange + 9 + (v->mvrange >> 1); //k_x can be 9 10 12 13
@@ -1058,20 +1032,20 @@ int vc1_parse_frame_header_adv(VC1Context *v, GetBitContext* gb)
/* AC Syntax */
v->c_ac_table_index = decode012(gb);
- if (v->s.pict_type == FF_I_TYPE || v->s.pict_type == FF_BI_TYPE)
+ if (v->s.pict_type == AV_PICTURE_TYPE_I || v->s.pict_type == AV_PICTURE_TYPE_BI)
{
v->y_ac_table_index = decode012(gb);
}
/* DC Syntax */
v->s.dc_table_index = get_bits1(gb);
- if ((v->s.pict_type == FF_I_TYPE || v->s.pict_type == FF_BI_TYPE) && v->dquant) {
+ if ((v->s.pict_type == AV_PICTURE_TYPE_I || v->s.pict_type == AV_PICTURE_TYPE_BI) && v->dquant) {
av_log(v->s.avctx, AV_LOG_DEBUG, "VOP DQuant info\n");
vop_dquant_decoding(v);
}
v->bi_type = 0;
- if(v->s.pict_type == FF_BI_TYPE) {
- v->s.pict_type = FF_B_TYPE;
+ if(v->s.pict_type == AV_PICTURE_TYPE_BI) {
+ v->s.pict_type = AV_PICTURE_TYPE_B;
v->bi_type = 1;
}
return 0;
diff --git a/libavcodec/vc1.h b/libavcodec/vc1.h
index 19be3c3452..6d4c0aa7a3 100644
--- a/libavcodec/vc1.h
+++ b/libavcodec/vc1.h
@@ -317,6 +317,8 @@ typedef struct VC1Context{
int bi_type;
int x8_type;
+ DCTELEM (*block)[6][64];
+ int n_allocated_blks, cur_blk_idx, left_blk_idx, topleft_blk_idx, top_blk_idx;
uint32_t *cbp_base, *cbp;
uint8_t *is_intra_base, *is_intra;
int16_t (*luma_mv_base)[2], (*luma_mv)[2];
diff --git a/libavcodec/vc1_parser.c b/libavcodec/vc1_parser.c
index 98caa2048d..4ea9c47076 100644
--- a/libavcodec/vc1_parser.c
+++ b/libavcodec/vc1_parser.c
@@ -67,9 +67,9 @@ static void vc1_extract_headers(AVCodecParserContext *s, AVCodecContext *avctx,
else
vc1_parse_frame_header_adv(&vpc->v, &gb);
- /* keep FF_BI_TYPE internal to VC1 */
- if (vpc->v.s.pict_type == FF_BI_TYPE)
- s->pict_type = FF_B_TYPE;
+ /* keep AV_PICTURE_TYPE_BI internal to VC1 */
+ if (vpc->v.s.pict_type == AV_PICTURE_TYPE_BI)
+ s->pict_type = AV_PICTURE_TYPE_B;
else
s->pict_type = vpc->v.s.pict_type;
diff --git a/libavcodec/vc1dec.c b/libavcodec/vc1dec.c
index 27695f184a..922481406f 100644
--- a/libavcodec/vc1dec.c
+++ b/libavcodec/vc1dec.c
@@ -160,6 +160,72 @@ enum Imode {
/** @} */ //Bitplane group
+static void vc1_put_signed_blocks_clamped(VC1Context *v)
+{
+ MpegEncContext *s = &v->s;
+
+ /* The put pixels loop is always one MB row behind the decoding loop,
+ * because we can only put pixels when overlap filtering is done, and
+ * for filtering of the bottom edge of a MB, we need the next MB row
+ * present as well.
+ * Within the row, the put pixels loop is also one MB col behind the
+ * decoding loop. The reason for this is again, because for filtering
+ * of the right MB edge, we need the next MB present. */
+ if (!s->first_slice_line) {
+ if (s->mb_x) {
+ s->dsp.put_signed_pixels_clamped(v->block[v->topleft_blk_idx][0],
+ s->dest[0] - 16 * s->linesize - 16,
+ s->linesize);
+ s->dsp.put_signed_pixels_clamped(v->block[v->topleft_blk_idx][1],
+ s->dest[0] - 16 * s->linesize - 8,
+ s->linesize);
+ s->dsp.put_signed_pixels_clamped(v->block[v->topleft_blk_idx][2],
+ s->dest[0] - 8 * s->linesize - 16,
+ s->linesize);
+ s->dsp.put_signed_pixels_clamped(v->block[v->topleft_blk_idx][3],
+ s->dest[0] - 8 * s->linesize - 8,
+ s->linesize);
+ s->dsp.put_signed_pixels_clamped(v->block[v->topleft_blk_idx][4],
+ s->dest[1] - 8 * s->uvlinesize - 8,
+ s->uvlinesize);
+ s->dsp.put_signed_pixels_clamped(v->block[v->topleft_blk_idx][5],
+ s->dest[2] - 8 * s->uvlinesize - 8,
+ s->uvlinesize);
+ }
+ if (s->mb_x == s->mb_width - 1) {
+ s->dsp.put_signed_pixels_clamped(v->block[v->top_blk_idx][0],
+ s->dest[0] - 16 * s->linesize,
+ s->linesize);
+ s->dsp.put_signed_pixels_clamped(v->block[v->top_blk_idx][1],
+ s->dest[0] - 16 * s->linesize + 8,
+ s->linesize);
+ s->dsp.put_signed_pixels_clamped(v->block[v->top_blk_idx][2],
+ s->dest[0] - 8 * s->linesize,
+ s->linesize);
+ s->dsp.put_signed_pixels_clamped(v->block[v->top_blk_idx][3],
+ s->dest[0] - 8 * s->linesize + 8,
+ s->linesize);
+ s->dsp.put_signed_pixels_clamped(v->block[v->top_blk_idx][4],
+ s->dest[1] - 8 * s->uvlinesize,
+ s->uvlinesize);
+ s->dsp.put_signed_pixels_clamped(v->block[v->top_blk_idx][5],
+ s->dest[2] - 8 * s->uvlinesize,
+ s->uvlinesize);
+ }
+ }
+
+#define inc_blk_idx(idx) do { \
+ idx++; \
+ if (idx >= v->n_allocated_blks) \
+ idx = 0; \
+ } while (0)
+
+ inc_blk_idx(v->topleft_blk_idx);
+ inc_blk_idx(v->top_blk_idx);
+ inc_blk_idx(v->left_blk_idx);
+ inc_blk_idx(v->cur_blk_idx);
+}
+
static void vc1_loop_filter_iblk(VC1Context *v, int pq)
{
MpegEncContext *s = &v->s;
@@ -187,6 +253,151 @@ static void vc1_loop_filter_iblk(VC1Context *v, int pq)
}
}
+static void vc1_loop_filter_iblk_delayed(VC1Context *v, int pq)
+{
+ MpegEncContext *s = &v->s;
+ int j;
+
+ /* The loopfilter runs 1 row and 1 column behind the overlap filter, which
+ * means it runs two rows/cols behind the decoding loop. */
+ if (!s->first_slice_line) {
+ if (s->mb_x) {
+ if (s->mb_y >= s->start_mb_y + 2) {
+ v->vc1dsp.vc1_v_loop_filter16(s->dest[0] - 16 * s->linesize - 16, s->linesize, pq);
+
+ if (s->mb_x >= 2)
+ v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 32 * s->linesize - 16, s->linesize, pq);
+ v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 32 * s->linesize - 8, s->linesize, pq);
+ for(j = 0; j < 2; j++) {
+ v->vc1dsp.vc1_v_loop_filter8(s->dest[j+1] - 8 * s->uvlinesize - 8, s->uvlinesize, pq);
+ if (s->mb_x >= 2) {
+ v->vc1dsp.vc1_h_loop_filter8(s->dest[j+1] - 16 * s->uvlinesize - 8, s->uvlinesize, pq);
+ }
+ }
+ }
+ v->vc1dsp.vc1_v_loop_filter16(s->dest[0] - 8 * s->linesize - 16, s->linesize, pq);
+ }
+
+ if (s->mb_x == s->mb_width - 1) {
+ if (s->mb_y >= s->start_mb_y + 2) {
+ v->vc1dsp.vc1_v_loop_filter16(s->dest[0] - 16 * s->linesize, s->linesize, pq);
+
+ if (s->mb_x)
+ v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 32 * s->linesize, s->linesize, pq);
+ v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 32 * s->linesize + 8, s->linesize, pq);
+ for(j = 0; j < 2; j++) {
+ v->vc1dsp.vc1_v_loop_filter8(s->dest[j+1] - 8 * s->uvlinesize, s->uvlinesize, pq);
+ if (s->mb_x >= 2) {
+ v->vc1dsp.vc1_h_loop_filter8(s->dest[j+1] - 16 * s->uvlinesize, s->uvlinesize, pq);
+ }
+ }
+ }
+ v->vc1dsp.vc1_v_loop_filter16(s->dest[0] - 8 * s->linesize, s->linesize, pq);
+ }
+
+ if (s->mb_y == s->mb_height) {
+ if (s->mb_x) {
+ if (s->mb_x >= 2)
+ v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 16 * s->linesize - 16, s->linesize, pq);
+ v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 16 * s->linesize - 8, s->linesize, pq);
+ if (s->mb_x >= 2) {
+ for(j = 0; j < 2; j++) {
+ v->vc1dsp.vc1_h_loop_filter8(s->dest[j+1] - 8 * s->uvlinesize - 8, s->uvlinesize, pq);
+ }
+ }
+ }
+
+ if (s->mb_x == s->mb_width - 1) {
+ if (s->mb_x)
+ v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 16 * s->linesize, s->linesize, pq);
+ v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 16 * s->linesize + 8, s->linesize, pq);
+ if (s->mb_x) {
+ for(j = 0; j < 2; j++) {
+ v->vc1dsp.vc1_h_loop_filter8(s->dest[j+1] - 8 * s->uvlinesize, s->uvlinesize, pq);
+ }
+ }
+ }
+ }
+ }
+}
+
+static void vc1_smooth_overlap_filter_iblk(VC1Context *v)
+{
+ MpegEncContext *s = &v->s;
+ int mb_pos;
+
+ if (v->condover == CONDOVER_NONE)
+ return;
+
+ mb_pos = s->mb_x + s->mb_y * s->mb_stride;
+
+ /* Within a MB, the horizontal overlap always runs before the vertical.
+ * To accomplish that, we run the H on left and internal borders of the
+ * currently decoded MB. Then, we wait for the next overlap iteration
+ * to do H overlap on the right edge of this MB, before moving over and
+ * running the V overlap. Therefore, the V overlap makes us trail by one
+ * MB col and the H overlap filter makes us trail by one MB row. This
+ * is reflected in the time at which we run the put_pixels loop. */
+ if(v->condover == CONDOVER_ALL || v->pq >= 9 || v->over_flags_plane[mb_pos]) {
+ if(s->mb_x && (v->condover == CONDOVER_ALL || v->pq >= 9 ||
+ v->over_flags_plane[mb_pos - 1])) {
+ v->vc1dsp.vc1_h_s_overlap(v->block[v->left_blk_idx][1],
+ v->block[v->cur_blk_idx][0]);
+ v->vc1dsp.vc1_h_s_overlap(v->block[v->left_blk_idx][3],
+ v->block[v->cur_blk_idx][2]);
+ if(!(s->flags & CODEC_FLAG_GRAY)) {
+ v->vc1dsp.vc1_h_s_overlap(v->block[v->left_blk_idx][4],
+ v->block[v->cur_blk_idx][4]);
+ v->vc1dsp.vc1_h_s_overlap(v->block[v->left_blk_idx][5],
+ v->block[v->cur_blk_idx][5]);
+ }
+ }
+ v->vc1dsp.vc1_h_s_overlap(v->block[v->cur_blk_idx][0],
+ v->block[v->cur_blk_idx][1]);
+ v->vc1dsp.vc1_h_s_overlap(v->block[v->cur_blk_idx][2],
+ v->block[v->cur_blk_idx][3]);
+
+ if (s->mb_x == s->mb_width - 1) {
+ if(!s->first_slice_line && (v->condover == CONDOVER_ALL || v->pq >= 9 ||
+ v->over_flags_plane[mb_pos - s->mb_stride])) {
+ v->vc1dsp.vc1_v_s_overlap(v->block[v->top_blk_idx][2],
+ v->block[v->cur_blk_idx][0]);
+ v->vc1dsp.vc1_v_s_overlap(v->block[v->top_blk_idx][3],
+ v->block[v->cur_blk_idx][1]);
+ if(!(s->flags & CODEC_FLAG_GRAY)) {
+ v->vc1dsp.vc1_v_s_overlap(v->block[v->top_blk_idx][4],
+ v->block[v->cur_blk_idx][4]);
+ v->vc1dsp.vc1_v_s_overlap(v->block[v->top_blk_idx][5],
+ v->block[v->cur_blk_idx][5]);
+ }
+ }
+ v->vc1dsp.vc1_v_s_overlap(v->block[v->cur_blk_idx][0],
+ v->block[v->cur_blk_idx][2]);
+ v->vc1dsp.vc1_v_s_overlap(v->block[v->cur_blk_idx][1],
+ v->block[v->cur_blk_idx][3]);
+ }
+ }
+ if (s->mb_x && (v->condover == CONDOVER_ALL || v->over_flags_plane[mb_pos - 1])) {
+ if(!s->first_slice_line && (v->condover == CONDOVER_ALL || v->pq >= 9 ||
+ v->over_flags_plane[mb_pos - s->mb_stride - 1])) {
+ v->vc1dsp.vc1_v_s_overlap(v->block[v->topleft_blk_idx][2],
+ v->block[v->left_blk_idx][0]);
+ v->vc1dsp.vc1_v_s_overlap(v->block[v->topleft_blk_idx][3],
+ v->block[v->left_blk_idx][1]);
+ if(!(s->flags & CODEC_FLAG_GRAY)) {
+ v->vc1dsp.vc1_v_s_overlap(v->block[v->topleft_blk_idx][4],
+ v->block[v->left_blk_idx][4]);
+ v->vc1dsp.vc1_v_s_overlap(v->block[v->topleft_blk_idx][5],
+ v->block[v->left_blk_idx][5]);
+ }
+ }
+ v->vc1dsp.vc1_v_s_overlap(v->block[v->left_blk_idx][0],
+ v->block[v->left_blk_idx][2]);
+ v->vc1dsp.vc1_v_s_overlap(v->block[v->left_blk_idx][1],
+ v->block[v->left_blk_idx][3]);
+ }
+}
+
/** Do motion compensation over 1 macroblock
* Mostly adapted hpel_motion and qpel_motion from mpegvideo.c
*/
@@ -203,7 +414,7 @@ static void vc1_mc_1mv(VC1Context *v, int dir)
my = s->mv[dir][0][1];
// store motion vectors for further use in B frames
- if(s->pict_type == FF_P_TYPE) {
+ if(s->pict_type == AV_PICTURE_TYPE_P) {
s->current_picture.motion_val[1][s->block_index[0]][0] = mx;
s->current_picture.motion_val[1][s->block_index[0]][1] = my;
}
@@ -2016,7 +2227,8 @@ static int vc1_decode_p_block(VC1Context *v, DCTELEM block[64], int n, int mquan
if(i==1)
v->vc1dsp.vc1_inv_trans_8x8_dc(dst, linesize, block);
else{
- v->vc1dsp.vc1_inv_trans_8x8_add(dst, linesize, block);
+ v->vc1dsp.vc1_inv_trans_8x8(block);
+ s->dsp.add_pixels_clamped(block, dst, linesize);
}
}
break;
@@ -2258,7 +2470,7 @@ static int vc1_decode_p_mb(VC1Context *v)
{
MpegEncContext *s = &v->s;
GetBitContext *gb = &s->gb;
- int i;
+ int i, j;
int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
int cbp; /* cbp decoding stuff */
int mqdiff, mquant; /* MB quantization */
@@ -2288,8 +2500,6 @@ static int vc1_decode_p_mb(VC1Context *v)
{
if (!skipped)
{
- vc1_idct_func idct8x8_fn;
-
GET_MVDATA(dmv_x, dmv_y);
if (s->mb_intra) {
@@ -2324,7 +2534,6 @@ static int vc1_decode_p_mb(VC1Context *v)
VC1_TTMB_VLC_BITS, 2);
if(!s->mb_intra) vc1_mc_1mv(v, 0);
dst_idx = 0;
- idct8x8_fn = v->vc1dsp.vc1_inv_trans_8x8_put_signed[!!v->rangeredfrm];
for (i=0; i<6; i++)
{
s->dc_val[0][s->block_index[i]] = 0;
@@ -2342,9 +2551,9 @@ static int vc1_decode_p_mb(VC1Context *v)
vc1_decode_intra_block(v, s->block[i], i, val, mquant, (i&4)?v->codingset2:v->codingset);
if((i>3) && (s->flags & CODEC_FLAG_GRAY)) continue;
- idct8x8_fn(s->dest[dst_idx] + off,
- i & 4 ? s->uvlinesize : s->linesize,
- s->block[i]);
+ v->vc1dsp.vc1_inv_trans_8x8(s->block[i]);
+ if(v->rangeredfrm) for(j = 0; j < 64; j++) s->block[i][j] <<= 1;
+ s->dsp.put_signed_pixels_clamped(s->block[i], s->dest[dst_idx] + off, i & 4 ? s->uvlinesize : s->linesize);
if(v->pq >= 9 && v->overlap) {
if(v->c_avail)
v->vc1dsp.vc1_h_overlap(s->dest[dst_idx] + off, i & 4 ? s->uvlinesize : s->linesize);
@@ -2380,7 +2589,6 @@ static int vc1_decode_p_mb(VC1Context *v)
{
int intra_count = 0, coded_inter = 0;
int is_intra[6], is_coded[6];
- vc1_idct_func idct8x8_fn;
/* Get CBPCY */
cbp = get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
for (i=0; i<6; i++)
@@ -2431,7 +2639,6 @@ static int vc1_decode_p_mb(VC1Context *v)
}
if (!v->ttmbf && coded_inter)
ttmb = get_vlc2(gb, ff_vc1_ttmb_vlc[v->tt_index].table, VC1_TTMB_VLC_BITS, 2);
- idct8x8_fn = v->vc1dsp.vc1_inv_trans_8x8_put_signed[!!v->rangeredfrm];
for (i=0; i<6; i++)
{
dst_idx += i >> 2;
@@ -2447,9 +2654,9 @@ static int vc1_decode_p_mb(VC1Context *v)
vc1_decode_intra_block(v, s->block[i], i, is_coded[i], mquant, (i&4)?v->codingset2:v->codingset);
if((i>3) && (s->flags & CODEC_FLAG_GRAY)) continue;
- idct8x8_fn(s->dest[dst_idx] + off,
- (i&4)?s->uvlinesize:s->linesize,
- s->block[i]);
+ v->vc1dsp.vc1_inv_trans_8x8(s->block[i]);
+ if(v->rangeredfrm) for(j = 0; j < 64; j++) s->block[i][j] <<= 1;
+ s->dsp.put_signed_pixels_clamped(s->block[i], s->dest[dst_idx] + off, (i&4)?s->uvlinesize:s->linesize);
if(v->pq >= 9 && v->overlap) {
if(v->c_avail)
v->vc1dsp.vc1_h_overlap(s->dest[dst_idx] + off, i & 4 ? s->uvlinesize : s->linesize);
@@ -2497,7 +2704,7 @@ static void vc1_decode_b_mb(VC1Context *v)
{
MpegEncContext *s = &v->s;
GetBitContext *gb = &s->gb;
- int i;
+ int i, j;
int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
int cbp = 0; /* cbp decoding stuff */
int mqdiff, mquant; /* MB quantization */
@@ -2510,7 +2717,6 @@ static void vc1_decode_b_mb(VC1Context *v)
int skipped, direct;
int dmv_x[2], dmv_y[2];
int bmvtype = BMV_TYPE_BACKWARD;
- vc1_idct_func idct8x8_fn;
mquant = v->pq; /* Loosy initialization */
s->mb_intra = 0;
@@ -2608,7 +2814,6 @@ static void vc1_decode_b_mb(VC1Context *v)
}
}
dst_idx = 0;
- idct8x8_fn = v->vc1dsp.vc1_inv_trans_8x8_put_signed[!!v->rangeredfrm];
for (i=0; i<6; i++)
{
s->dc_val[0][s->block_index[i]] = 0;
@@ -2626,9 +2831,9 @@ static void vc1_decode_b_mb(VC1Context *v)
vc1_decode_intra_block(v, s->block[i], i, val, mquant, (i&4)?v->codingset2:v->codingset);
if((i>3) && (s->flags & CODEC_FLAG_GRAY)) continue;
- idct8x8_fn(s->dest[dst_idx] + off,
- i & 4 ? s->uvlinesize : s->linesize,
- s->block[i]);
+ v->vc1dsp.vc1_inv_trans_8x8(s->block[i]);
+ if(v->rangeredfrm) for(j = 0; j < 64; j++) s->block[i][j] <<= 1;
+ s->dsp.put_signed_pixels_clamped(s->block[i], s->dest[dst_idx] + off, i & 4 ? s->uvlinesize : s->linesize);
} else if(val) {
vc1_decode_p_block(v, s->block[i], i, mquant, ttmb, first_block, s->dest[dst_idx] + off, (i&4)?s->uvlinesize:s->linesize, (i&4) && (s->flags & CODEC_FLAG_GRAY), NULL);
if(!v->ttmbf && ttmb < 8) ttmb = -1;
@@ -2641,12 +2846,11 @@ static void vc1_decode_b_mb(VC1Context *v)
*/
static void vc1_decode_i_blocks(VC1Context *v)
{
- int k;
+ int k, j;
MpegEncContext *s = &v->s;
int cbp, val;
uint8_t *coded_val;
int mb_pos;
- vc1_idct_func idct8x8_fn;
/* select codingmode used for VLC tables selection */
switch(v->y_ac_table_index){
@@ -2681,10 +2885,6 @@ static void vc1_decode_i_blocks(VC1Context *v)
s->mb_x = s->mb_y = 0;
s->mb_intra = 1;
s->first_slice_line = 1;
- if(v->pq >= 9 && v->overlap) {
- idct8x8_fn = v->vc1dsp.vc1_inv_trans_8x8_put_signed[!!v->rangeredfrm];
- } else
- idct8x8_fn = v->vc1dsp.vc1_inv_trans_8x8_put[!!v->rangeredfrm];
for(s->mb_y = 0; s->mb_y < s->mb_height; s->mb_y++) {
s->mb_x = 0;
ff_init_block_index(s);
@@ -2721,9 +2921,14 @@ static void vc1_decode_i_blocks(VC1Context *v)
vc1_decode_i_block(v, s->block[k], k, val, (k<4)? v->codingset : v->codingset2);
if (k > 3 && (s->flags & CODEC_FLAG_GRAY)) continue;
- idct8x8_fn(dst[k],
- k & 4 ? s->uvlinesize : s->linesize,
- s->block[k]);
+ v->vc1dsp.vc1_inv_trans_8x8(s->block[k]);
+ if(v->pq >= 9 && v->overlap) {
+ if (v->rangeredfrm) for(j = 0; j < 64; j++) s->block[k][j] <<= 1;
+ s->dsp.put_signed_pixels_clamped(s->block[k], dst[k], k & 4 ? s->uvlinesize : s->linesize);
+ } else {
+ if (v->rangeredfrm) for(j = 0; j < 64; j++) s->block[k][j] = (s->block[k][j] - 64) << 1;
+ s->dsp.put_pixels_clamped(s->block[k], dst[k], k & 4 ? s->uvlinesize : s->linesize);
+ }
}
if(v->pq >= 9 && v->overlap) {
@@ -2770,7 +2975,7 @@ static void vc1_decode_i_blocks(VC1Context *v)
/** Decode blocks of I-frame for advanced profile
*/
-static void vc1_decode_i_blocks_adv(VC1Context *v, int mby_start, int mby_end)
+static void vc1_decode_i_blocks_adv(VC1Context *v)
{
int k;
MpegEncContext *s = &v->s;
@@ -2779,9 +2984,7 @@ static void vc1_decode_i_blocks_adv(VC1Context *v, int mby_start, int mby_end)
int mb_pos;
int mquant = v->pq;
int mqdiff;
- int overlap;
GetBitContext *gb = &s->gb;
- vc1_idct_func idct8x8_fn;
/* select codingmode used for VLC tables selection */
switch(v->y_ac_table_index){
@@ -2812,27 +3015,20 @@ static void vc1_decode_i_blocks_adv(VC1Context *v, int mby_start, int mby_end)
s->mb_x = s->mb_y = 0;
s->mb_intra = 1;
s->first_slice_line = 1;
- s->mb_y = mby_start;
- if (mby_start) {
+ s->mb_y = s->start_mb_y;
+ if (s->start_mb_y) {
s->mb_x = 0;
ff_init_block_index(s);
memset(&s->coded_block[s->block_index[0]-s->b8_stride], 0,
s->b8_stride * sizeof(*s->coded_block));
}
- idct8x8_fn = v->vc1dsp.vc1_inv_trans_8x8_put_signed[0];
- for(; s->mb_y < mby_end; s->mb_y++) {
+ for(; s->mb_y < s->end_mb_y; s->mb_y++) {
s->mb_x = 0;
ff_init_block_index(s);
for(;s->mb_x < s->mb_width; s->mb_x++) {
- uint8_t *dst[6];
+ DCTELEM (*block)[64] = v->block[v->cur_blk_idx];
ff_update_block_index(s);
- dst[0] = s->dest[0];
- dst[1] = dst[0] + 8;
- dst[2] = s->dest[0] + s->linesize * 8;
- dst[3] = dst[2] + 8;
- dst[4] = s->dest[1];
- dst[5] = s->dest[2];
- s->dsp.clear_blocks(s->block[0]);
+ s->dsp.clear_blocks(block[0]);
mb_pos = s->mb_x + s->mb_y * s->mb_stride;
s->current_picture.mb_type[mb_pos] = MB_TYPE_INTRA;
s->current_picture.motion_val[1][s->block_index[0]][0] = 0;
@@ -2845,13 +3041,8 @@ static void vc1_decode_i_blocks_adv(VC1Context *v, int mby_start, int mby_end)
else
v->s.ac_pred = v->acpred_plane[mb_pos];
- if(v->condover == CONDOVER_SELECT) {
- if(v->overflg_is_raw)
- overlap = get_bits1(&v->s.gb);
- else
- overlap = v->over_flags_plane[mb_pos];
- } else
- overlap = (v->condover == CONDOVER_ALL);
+ if (v->condover == CONDOVER_SELECT && v->overflg_is_raw)
+ v->over_flags_plane[mb_pos] = get_bits1(&v->s.gb);
GET_MQUANT();
@@ -2873,40 +3064,18 @@ static void vc1_decode_i_blocks_adv(VC1Context *v, int mby_start, int mby_end)
v->a_avail = !s->first_slice_line || (k==2 || k==3);
v->c_avail = !!s->mb_x || (k==1 || k==3);
- vc1_decode_i_block_adv(v, s->block[k], k, val, (k<4)? v->codingset : v->codingset2, mquant);
+ vc1_decode_i_block_adv(v, block[k], k, val, (k<4)? v->codingset : v->codingset2, mquant);
if (k > 3 && (s->flags & CODEC_FLAG_GRAY)) continue;
- idct8x8_fn(dst[k],
- k & 4 ? s->uvlinesize : s->linesize,
- s->block[k]);
+ v->vc1dsp.vc1_inv_trans_8x8(block[k]);
}
- if(overlap) {
- if(s->mb_x) {
- v->vc1dsp.vc1_h_overlap(s->dest[0], s->linesize);
- v->vc1dsp.vc1_h_overlap(s->dest[0] + 8 * s->linesize, s->linesize);
- if(!(s->flags & CODEC_FLAG_GRAY)) {
- v->vc1dsp.vc1_h_overlap(s->dest[1], s->uvlinesize);
- v->vc1dsp.vc1_h_overlap(s->dest[2], s->uvlinesize);
- }
- }
- v->vc1dsp.vc1_h_overlap(s->dest[0] + 8, s->linesize);
- v->vc1dsp.vc1_h_overlap(s->dest[0] + 8 * s->linesize + 8, s->linesize);
- if(!s->first_slice_line) {
- v->vc1dsp.vc1_v_overlap(s->dest[0], s->linesize);
- v->vc1dsp.vc1_v_overlap(s->dest[0] + 8, s->linesize);
- if(!(s->flags & CODEC_FLAG_GRAY)) {
- v->vc1dsp.vc1_v_overlap(s->dest[1], s->uvlinesize);
- v->vc1dsp.vc1_v_overlap(s->dest[2], s->uvlinesize);
- }
- }
- v->vc1dsp.vc1_v_overlap(s->dest[0] + 8 * s->linesize, s->linesize);
- v->vc1dsp.vc1_v_overlap(s->dest[0] + 8 * s->linesize + 8, s->linesize);
- }
- if(v->s.loop_filter) vc1_loop_filter_iblk(v, v->pq);
+ vc1_smooth_overlap_filter_iblk(v);
+ vc1_put_signed_blocks_clamped(v);
+ if(v->s.loop_filter) vc1_loop_filter_iblk_delayed(v, v->pq);
if(get_bits_count(&s->gb) > v->bits) {
- ff_er_add_slice(s, 0, mby_start, s->mb_x, s->mb_y, (AC_END|DC_END|MV_END));
+ ff_er_add_slice(s, 0, s->start_mb_y, s->mb_x, s->mb_y, (AC_END|DC_END|MV_END));
av_log(s->avctx, AV_LOG_ERROR, "Bits overconsumption: %i > %i\n", get_bits_count(&s->gb), v->bits);
return;
}
@@ -2917,12 +3086,21 @@ static void vc1_decode_i_blocks_adv(VC1Context *v, int mby_start, int mby_end)
ff_draw_horiz_band(s, (s->mb_y-1) * 16, 16);
s->first_slice_line = 0;
}
+
+ /* raw bottom MB row */
+ s->mb_x = 0;
+ ff_init_block_index(s);
+ for(;s->mb_x < s->mb_width; s->mb_x++) {
+ ff_update_block_index(s);
+ vc1_put_signed_blocks_clamped(v);
+ if(v->s.loop_filter) vc1_loop_filter_iblk_delayed(v, v->pq);
+ }
if (v->s.loop_filter)
ff_draw_horiz_band(s, (s->mb_height-1)*16, 16);
- ff_er_add_slice(s, 0, mby_start, s->mb_width - 1, mby_end - 1, (AC_END|DC_END|MV_END));
+ ff_er_add_slice(s, 0, s->start_mb_y, s->mb_width - 1, s->end_mb_y - 1, (AC_END|DC_END|MV_END));
}
-static void vc1_decode_p_blocks(VC1Context *v, int mby_start, int mby_end)
+static void vc1_decode_p_blocks(VC1Context *v)
{
MpegEncContext *s = &v->s;
int apply_loop_filter;
@@ -2955,17 +3133,17 @@ static void vc1_decode_p_blocks(VC1Context *v, int mby_start, int mby_end)
apply_loop_filter = s->loop_filter && !(s->avctx->skip_loop_filter >= AVDISCARD_NONKEY);
s->first_slice_line = 1;
memset(v->cbp_base, 0, sizeof(v->cbp_base[0])*2*s->mb_stride);
- for(s->mb_y = mby_start; s->mb_y < mby_end; s->mb_y++) {
+ for(s->mb_y = s->start_mb_y; s->mb_y < s->end_mb_y; s->mb_y++) {
s->mb_x = 0;
ff_init_block_index(s);
for(; s->mb_x < s->mb_width; s->mb_x++) {
ff_update_block_index(s);
vc1_decode_p_mb(v);
- if (s->mb_y != mby_start && apply_loop_filter)
+ if (s->mb_y != s->start_mb_y && apply_loop_filter)
vc1_apply_p_loop_filter(v);
if(get_bits_count(&s->gb) > v->bits || get_bits_count(&s->gb) < 0) {
- ff_er_add_slice(s, 0, mby_start, s->mb_x, s->mb_y, (AC_END|DC_END|MV_END));
+ ff_er_add_slice(s, 0, s->start_mb_y, s->mb_x, s->mb_y, (AC_END|DC_END|MV_END));
av_log(s->avctx, AV_LOG_ERROR, "Bits overconsumption: %i > %i at %ix%i\n", get_bits_count(&s->gb), v->bits,s->mb_x,s->mb_y);
return;
}
@@ -2974,7 +3152,7 @@ static void vc1_decode_p_blocks(VC1Context *v, int mby_start, int mby_end)
memmove(v->ttblk_base, v->ttblk, sizeof(v->ttblk_base[0])*s->mb_stride);
memmove(v->is_intra_base, v->is_intra, sizeof(v->is_intra_base[0])*s->mb_stride);
memmove(v->luma_mv_base, v->luma_mv, sizeof(v->luma_mv_base[0])*s->mb_stride);
- if (s->mb_y != mby_start) ff_draw_horiz_band(s, (s->mb_y-1) * 16, 16);
+ if (s->mb_y != s->start_mb_y) ff_draw_horiz_band(s, (s->mb_y-1) * 16, 16);
s->first_slice_line = 0;
}
if (apply_loop_filter) {
@@ -2985,12 +3163,12 @@ static void vc1_decode_p_blocks(VC1Context *v, int mby_start, int mby_end)
vc1_apply_p_loop_filter(v);
}
}
- if (mby_end >= mby_start)
- ff_draw_horiz_band(s, (mby_end-1) * 16, 16);
- ff_er_add_slice(s, 0, mby_start, s->mb_width - 1, mby_end - 1, (AC_END|DC_END|MV_END));
+ if (s->end_mb_y >= s->start_mb_y)
+ ff_draw_horiz_band(s, (s->end_mb_y-1) * 16, 16);
+ ff_er_add_slice(s, 0, s->start_mb_y, s->mb_width - 1, s->end_mb_y - 1, (AC_END|DC_END|MV_END));
}
-static void vc1_decode_b_blocks(VC1Context *v, int mby_start, int mby_end)
+static void vc1_decode_b_blocks(VC1Context *v)
{
MpegEncContext *s = &v->s;
@@ -3020,7 +3198,7 @@ static void vc1_decode_b_blocks(VC1Context *v, int mby_start, int mby_end)
}
s->first_slice_line = 1;
- for(s->mb_y = mby_start; s->mb_y < mby_end; s->mb_y++) {
+ for(s->mb_y = s->start_mb_y; s->mb_y < s->end_mb_y; s->mb_y++) {
s->mb_x = 0;
ff_init_block_index(s);
for(; s->mb_x < s->mb_width; s->mb_x++) {
@@ -3028,7 +3206,7 @@ static void vc1_decode_b_blocks(VC1Context *v, int mby_start, int mby_end)
vc1_decode_b_mb(v);
if(get_bits_count(&s->gb) > v->bits || get_bits_count(&s->gb) < 0) {
- ff_er_add_slice(s, 0, mby_start, s->mb_x, s->mb_y, (AC_END|DC_END|MV_END));
+ ff_er_add_slice(s, 0, s->start_mb_y, s->mb_x, s->mb_y, (AC_END|DC_END|MV_END));
av_log(s->avctx, AV_LOG_ERROR, "Bits overconsumption: %i > %i at %ix%i\n", get_bits_count(&s->gb), v->bits,s->mb_x,s->mb_y);
return;
}
@@ -3042,7 +3220,7 @@ static void vc1_decode_b_blocks(VC1Context *v, int mby_start, int mby_end)
}
if (v->s.loop_filter)
ff_draw_horiz_band(s, (s->mb_height-1)*16, 16);
- ff_er_add_slice(s, 0, mby_start, s->mb_width - 1, mby_end - 1, (AC_END|DC_END|MV_END));
+ ff_er_add_slice(s, 0, s->start_mb_y, s->mb_width - 1, s->end_mb_y - 1, (AC_END|DC_END|MV_END));
}
static void vc1_decode_skip_blocks(VC1Context *v)
@@ -3061,37 +3239,41 @@ static void vc1_decode_skip_blocks(VC1Context *v)
ff_draw_horiz_band(s, s->mb_y * 16, 16);
s->first_slice_line = 0;
}
- s->pict_type = FF_P_TYPE;
+ s->pict_type = AV_PICTURE_TYPE_P;
}
-static void vc1_decode_blocks(VC1Context *v, int mby_start, int mby_end)
+static void vc1_decode_blocks(VC1Context *v)
{
v->s.esc3_level_length = 0;
if(v->x8_type){
ff_intrax8_decode_picture(&v->x8, 2*v->pq+v->halfpq, v->pq*(!v->pquantizer) );
}else{
+ v->cur_blk_idx = 0;
+ v->left_blk_idx = -1;
+ v->topleft_blk_idx = 1;
+ v->top_blk_idx = 2;
switch(v->s.pict_type) {
- case FF_I_TYPE:
+ case AV_PICTURE_TYPE_I:
if(v->profile == PROFILE_ADVANCED)
- vc1_decode_i_blocks_adv(v, mby_start, mby_end);
+ vc1_decode_i_blocks_adv(v);
else
vc1_decode_i_blocks(v);
break;
- case FF_P_TYPE:
+ case AV_PICTURE_TYPE_P:
if(v->p_frame_skipped)
vc1_decode_skip_blocks(v);
else
- vc1_decode_p_blocks(v, mby_start, mby_end);
+ vc1_decode_p_blocks(v);
break;
- case FF_B_TYPE:
+ case AV_PICTURE_TYPE_B:
if(v->bi_type){
if(v->profile == PROFILE_ADVANCED)
- vc1_decode_i_blocks_adv(v, mby_start, mby_end);
+ vc1_decode_i_blocks_adv(v);
else
vc1_decode_i_blocks(v);
}else
- vc1_decode_b_blocks(v, mby_start, mby_end);
+ vc1_decode_b_blocks(v);
break;
}
}
@@ -3341,6 +3523,8 @@ static av_cold int vc1_decode_init(AVCodecContext *avctx)
v->acpred_plane = av_malloc(s->mb_stride * s->mb_height);
v->over_flags_plane = av_malloc(s->mb_stride * s->mb_height);
+ v->n_allocated_blks = s->mb_width + 2;
+ v->block = av_malloc(sizeof(*v->block) * v->n_allocated_blks);
v->cbp_base = av_malloc(sizeof(v->cbp_base[0]) * 2 * s->mb_stride);
v->cbp = v->cbp_base + s->mb_stride;
v->ttblk_base = av_malloc(sizeof(v->ttblk_base[0]) * 2 * s->mb_stride);
@@ -3498,7 +3682,7 @@ static int vc1_decode_frame(AVCodecContext *avctx,
}
}
- if (v->res_sprite && s->pict_type!=FF_I_TYPE) {
+ if (v->res_sprite && s->pict_type!=AV_PICTURE_TYPE_I) {
av_log(v->s.avctx, AV_LOG_WARNING, "Sprite decoder: expected I-frame\n");
}
@@ -3513,18 +3697,18 @@ static int vc1_decode_frame(AVCodecContext *avctx,
// for skipping the frame
s->current_picture.pict_type= s->pict_type;
- s->current_picture.key_frame= s->pict_type == FF_I_TYPE;
+ s->current_picture.key_frame= s->pict_type == AV_PICTURE_TYPE_I;
/* skip B-frames if we don't have reference frames */
- if(s->last_picture_ptr==NULL && (s->pict_type==FF_B_TYPE || s->dropable)){
+ if(s->last_picture_ptr==NULL && (s->pict_type==AV_PICTURE_TYPE_B || s->dropable)){
goto err;
}
#if FF_API_HURRY_UP
/* skip b frames if we are in a hurry */
if(avctx->hurry_up && s->pict_type==FF_B_TYPE) return -1;//buf_size;
#endif
- if( (avctx->skip_frame >= AVDISCARD_NONREF && s->pict_type==FF_B_TYPE)
- || (avctx->skip_frame >= AVDISCARD_NONKEY && s->pict_type!=FF_I_TYPE)
+ if( (avctx->skip_frame >= AVDISCARD_NONREF && s->pict_type==AV_PICTURE_TYPE_B)
+ || (avctx->skip_frame >= AVDISCARD_NONKEY && s->pict_type!=AV_PICTURE_TYPE_I)
|| avctx->skip_frame >= AVDISCARD_ALL) {
goto end;
}
@@ -3536,7 +3720,7 @@ static int vc1_decode_frame(AVCodecContext *avctx,
#endif
if(s->next_p_frame_damaged){
- if(s->pict_type==FF_B_TYPE)
+ if(s->pict_type==AV_PICTURE_TYPE_B)
goto end;
else
s->next_p_frame_damaged=0;
@@ -3566,8 +3750,9 @@ static int vc1_decode_frame(AVCodecContext *avctx,
for (i = 0; i <= n_slices; i++) {
if (i && get_bits1(&s->gb))
vc1_parse_frame_header_adv(v, &s->gb);
- vc1_decode_blocks(v, i == 0 ? 0 : FFMAX(0, slices[i-1].mby_start),
- i == n_slices ? s->mb_height : FFMIN(s->mb_height, slices[i].mby_start));
+ s->start_mb_y = (i == 0) ? 0 : FFMAX(0, slices[i-1].mby_start);
+ s->end_mb_y = (i == n_slices) ? s->mb_height : FFMIN(s->mb_height, slices[i].mby_start);
+ vc1_decode_blocks(v);
if (i != n_slices) s->gb = slices[i].gb;
}
//av_log(s->avctx, AV_LOG_INFO, "Consumed %i/%i bits\n", get_bits_count(&s->gb), s->gb.size_in_bits);
@@ -3580,7 +3765,7 @@ static int vc1_decode_frame(AVCodecContext *avctx,
assert(s->current_picture.pict_type == s->current_picture_ptr->pict_type);
assert(s->current_picture.pict_type == s->pict_type);
- if (s->pict_type == FF_B_TYPE || s->low_delay) {
+ if (s->pict_type == AV_PICTURE_TYPE_B || s->low_delay) {
*pict= *(AVFrame*)s->current_picture_ptr;
} else if (s->last_picture_ptr != NULL) {
*pict= *(AVFrame*)s->last_picture_ptr;
@@ -3624,6 +3809,7 @@ static av_cold int vc1_decode_end(AVCodecContext *avctx)
av_freep(&v->acpred_plane);
av_freep(&v->over_flags_plane);
av_freep(&v->mb_type_base);
+ av_freep(&v->block);
av_freep(&v->cbp_base);
av_freep(&v->ttblk_base);
av_freep(&v->is_intra_base); // FIXME use v->mb_type[]
diff --git a/libavcodec/vc1dsp.c b/libavcodec/vc1dsp.c
index dbe2120829..2eaa47a05b 100644
--- a/libavcodec/vc1dsp.c
+++ b/libavcodec/vc1dsp.c
@@ -78,6 +78,58 @@ static void vc1_h_overlap_c(uint8_t* src, int stride)
}
}
+static void vc1_v_s_overlap_c(DCTELEM *top, DCTELEM *bottom)
+{
+ int i;
+ int a, b, c, d;
+ int d1, d2;
+ int rnd1 = 4, rnd2 = 3;
+ for(i = 0; i < 8; i++) {
+ a = top[48];
+ b = top[56];
+ c = bottom[0];
+ d = bottom[8];
+ d1 = a - d;
+ d2 = a - d + b - c;
+
+ top[48] = ((a << 3) - d1 + rnd1) >> 3;
+ top[56] = ((b << 3) - d2 + rnd2) >> 3;
+ bottom[0] = ((c << 3) + d2 + rnd1) >> 3;
+ bottom[8] = ((d << 3) + d1 + rnd2) >> 3;
+
+ bottom++;
+ top++;
+ rnd2 = 7 - rnd2;
+ rnd1 = 7 - rnd1;
+ }
+}
+
+static void vc1_h_s_overlap_c(DCTELEM *left, DCTELEM *right)
+{
+ int i;
+ int a, b, c, d;
+ int d1, d2;
+ int rnd1 = 4, rnd2 = 3;
+ for(i = 0; i < 8; i++) {
+ a = left[6];
+ b = left[7];
+ c = right[0];
+ d = right[1];
+ d1 = a - d;
+ d2 = a - d + b - c;
+
+ left[6] = ((a << 3) - d1 + rnd1) >> 3;
+ left[7] = ((b << 3) - d2 + rnd2) >> 3;
+ right[0] = ((c << 3) + d2 + rnd1) >> 3;
+ right[1] = ((d << 3) + d1 + rnd2) >> 3;
+
+ right += 8;
+ left += 8;
+ rnd2 = 7 - rnd2;
+ rnd1 = 7 - rnd1;
+ }
+}
+
/**
* VC-1 in-loop deblocking filter for one line
* @param src source block type
@@ -199,7 +251,7 @@ static void vc1_inv_trans_8x8_dc_c(uint8_t *dest, int linesize, DCTELEM *block)
}
}
-static av_always_inline void vc1_inv_trans_8x8_c(DCTELEM block[64], int shl, int sub)
+static void vc1_inv_trans_8x8_c(DCTELEM block[64])
{
int i;
register int t1,t2,t3,t4,t5,t6,t7,t8;
@@ -254,50 +306,20 @@ static av_always_inline void vc1_inv_trans_8x8_c(DCTELEM block[64], int shl, int
t3 = 9 * src[ 8] - 16 * src[24] + 4 * src[40] + 15 * src[56];
t4 = 4 * src[ 8] - 9 * src[24] + 15 * src[40] - 16 * src[56];
- dst[ 0] = (((t5 + t1 ) >> 7) - sub) << shl;
- dst[ 8] = (((t6 + t2 ) >> 7) - sub) << shl;
- dst[16] = (((t7 + t3 ) >> 7) - sub) << shl;
- dst[24] = (((t8 + t4 ) >> 7) - sub) << shl;
- dst[32] = (((t8 - t4 + 1) >> 7) - sub) << shl;
- dst[40] = (((t7 - t3 + 1) >> 7) - sub) << shl;
- dst[48] = (((t6 - t2 + 1) >> 7) - sub) << shl;
- dst[56] = (((t5 - t1 + 1) >> 7) - sub) << shl;
+ dst[ 0] = (t5 + t1) >> 7;
+ dst[ 8] = (t6 + t2) >> 7;
+ dst[16] = (t7 + t3) >> 7;
+ dst[24] = (t8 + t4) >> 7;
+ dst[32] = (t8 - t4 + 1) >> 7;
+ dst[40] = (t7 - t3 + 1) >> 7;
+ dst[48] = (t6 - t2 + 1) >> 7;
+ dst[56] = (t5 - t1 + 1) >> 7;
src++;
dst++;
}
}
-static void vc1_inv_trans_8x8_add_c(uint8_t *dest, int linesize, DCTELEM *block)
-{
- vc1_inv_trans_8x8_c(block, 0, 0);
- ff_add_pixels_clamped_c(block, dest, linesize);
-}
-
-static void vc1_inv_trans_8x8_put_signed_c(uint8_t *dest, int linesize, DCTELEM *block)
-{
- vc1_inv_trans_8x8_c(block, 0, 0);
- ff_put_signed_pixels_clamped_c(block, dest, linesize);
-}
-
-static void vc1_inv_trans_8x8_put_signed_rangered_c(uint8_t *dest, int linesize, DCTELEM *block)
-{
- vc1_inv_trans_8x8_c(block, 1, 0);
- ff_put_signed_pixels_clamped_c(block, dest, linesize);
-}
-
-static void vc1_inv_trans_8x8_put_c(uint8_t *dest, int linesize, DCTELEM *block)
-{
- vc1_inv_trans_8x8_c(block, 0, 0);
- ff_put_pixels_clamped_c(block, dest, linesize);
-}
-
-static void vc1_inv_trans_8x8_put_rangered_c(uint8_t *dest, int linesize, DCTELEM *block)
-{
- vc1_inv_trans_8x8_c(block, 1, 64);
- ff_put_pixels_clamped_c(block, dest, linesize);
-}
-
/** Do inverse transform on 8x4 part of block
*/
static void vc1_inv_trans_8x4_dc_c(uint8_t *dest, int linesize, DCTELEM *block)
@@ -692,11 +714,7 @@ static void avg_no_rnd_vc1_chroma_mc8_c(uint8_t *dst/*align 8*/, uint8_t *src/*a
}
av_cold void ff_vc1dsp_init(VC1DSPContext* dsp) {
- dsp->vc1_inv_trans_8x8_add = vc1_inv_trans_8x8_add_c;
- dsp->vc1_inv_trans_8x8_put_signed[0] = vc1_inv_trans_8x8_put_signed_c;
- dsp->vc1_inv_trans_8x8_put_signed[1] = vc1_inv_trans_8x8_put_signed_rangered_c;
- dsp->vc1_inv_trans_8x8_put[0] = vc1_inv_trans_8x8_put_c;
- dsp->vc1_inv_trans_8x8_put[1] = vc1_inv_trans_8x8_put_rangered_c;
+ dsp->vc1_inv_trans_8x8 = vc1_inv_trans_8x8_c;
dsp->vc1_inv_trans_4x8 = vc1_inv_trans_4x8_c;
dsp->vc1_inv_trans_8x4 = vc1_inv_trans_8x4_c;
dsp->vc1_inv_trans_4x4 = vc1_inv_trans_4x4_c;
@@ -706,6 +724,8 @@ av_cold void ff_vc1dsp_init(VC1DSPContext* dsp) {
dsp->vc1_inv_trans_4x4_dc = vc1_inv_trans_4x4_dc_c;
dsp->vc1_h_overlap = vc1_h_overlap_c;
dsp->vc1_v_overlap = vc1_v_overlap_c;
+ dsp->vc1_h_s_overlap = vc1_h_s_overlap_c;
+ dsp->vc1_v_s_overlap = vc1_v_s_overlap_c;
dsp->vc1_v_loop_filter4 = vc1_v_loop_filter4_c;
dsp->vc1_h_loop_filter4 = vc1_h_loop_filter4_c;
dsp->vc1_v_loop_filter8 = vc1_v_loop_filter8_c;
diff --git a/libavcodec/vc1dsp.h b/libavcodec/vc1dsp.h
index db9d892a23..93a9ea3858 100644
--- a/libavcodec/vc1dsp.h
+++ b/libavcodec/vc1dsp.h
@@ -30,13 +30,9 @@
#include "dsputil.h"
-typedef void (*vc1_idct_func)(uint8_t *dest, int line_size, DCTELEM *block);
-
typedef struct VC1DSPContext {
/* vc1 functions */
- vc1_idct_func vc1_inv_trans_8x8_add;
- vc1_idct_func vc1_inv_trans_8x8_put_signed[2];
- vc1_idct_func vc1_inv_trans_8x8_put[2];
+ void (*vc1_inv_trans_8x8)(DCTELEM *b);
void (*vc1_inv_trans_8x4)(uint8_t *dest, int line_size, DCTELEM *block);
void (*vc1_inv_trans_4x8)(uint8_t *dest, int line_size, DCTELEM *block);
void (*vc1_inv_trans_4x4)(uint8_t *dest, int line_size, DCTELEM *block);
@@ -44,8 +40,10 @@ typedef struct VC1DSPContext {
void (*vc1_inv_trans_8x4_dc)(uint8_t *dest, int line_size, DCTELEM *block);
void (*vc1_inv_trans_4x8_dc)(uint8_t *dest, int line_size, DCTELEM *block);
void (*vc1_inv_trans_4x4_dc)(uint8_t *dest, int line_size, DCTELEM *block);
- void (*vc1_v_overlap)(uint8_t* src, int stride);
- void (*vc1_h_overlap)(uint8_t* src, int stride);
+ void (*vc1_v_overlap)(uint8_t *src, int stride);
+ void (*vc1_h_overlap)(uint8_t *src, int stride);
+ void (*vc1_v_s_overlap)(DCTELEM *top, DCTELEM *bottom);
+ void (*vc1_h_s_overlap)(DCTELEM *left, DCTELEM *right);
void (*vc1_v_loop_filter4)(uint8_t *src, int stride, int pq);
void (*vc1_h_loop_filter4)(uint8_t *src, int stride, int pq);
void (*vc1_v_loop_filter8)(uint8_t *src, int stride, int pq);
diff --git a/libavcodec/vcr1.c b/libavcodec/vcr1.c
index 9a9c439881..0d59b7e7ec 100644
--- a/libavcodec/vcr1.c
+++ b/libavcodec/vcr1.c
@@ -61,7 +61,7 @@ static int decode_frame(AVCodecContext *avctx,
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return -1;
}
- p->pict_type= FF_I_TYPE;
+ p->pict_type= AV_PICTURE_TYPE_I;
p->key_frame= 1;
for(i=0; i<16; i++){
@@ -127,7 +127,7 @@ static int encode_frame(AVCodecContext *avctx, unsigned char *buf, int buf_size,
int size;
*p = *pict;
- p->pict_type= FF_I_TYPE;
+ p->pict_type= AV_PICTURE_TYPE_I;
p->key_frame= 1;
emms_c();
@@ -146,6 +146,7 @@ static av_cold void common_init(AVCodecContext *avctx){
VCR1Context * const a = avctx->priv_data;
avctx->coded_frame= (AVFrame*)&a->picture;
+ avcodec_get_frame_defaults(&a->picture);
a->avctx= avctx;
}
diff --git a/libavcodec/vdpau.c b/libavcodec/vdpau.c
index 55b2110c02..19bd96bc15 100644
--- a/libavcodec/vdpau.c
+++ b/libavcodec/vdpau.c
@@ -225,12 +225,12 @@ void ff_vdpau_mpeg_picture_complete(MpegEncContext *s, const uint8_t *buf,
render->info.mpeg.backward_reference = VDP_INVALID_HANDLE;
switch(s->pict_type){
- case FF_B_TYPE:
+ case AV_PICTURE_TYPE_B:
next = (struct vdpau_render_state *)s->next_picture.data[0];
assert(next);
render->info.mpeg.backward_reference = next->surface;
// no return here, going to set forward prediction
- case FF_P_TYPE:
+ case AV_PICTURE_TYPE_P:
last = (struct vdpau_render_state *)s->last_picture.data[0];
if (!last) // FIXME: Does this test make sense?
last = render; // predict second field from the first
@@ -295,12 +295,12 @@ void ff_vdpau_vc1_decode_picture(MpegEncContext *s, const uint8_t *buf,
render->info.vc1.picture_type = s->pict_type - 1 + s->pict_type / 3;
switch(s->pict_type){
- case FF_B_TYPE:
+ case AV_PICTURE_TYPE_B:
next = (struct vdpau_render_state *)s->next_picture.data[0];
assert(next);
render->info.vc1.backward_reference = next->surface;
// no break here, going to set forward prediction
- case FF_P_TYPE:
+ case AV_PICTURE_TYPE_P:
last = (struct vdpau_render_state *)s->last_picture.data[0];
if (!last) // FIXME: Does this test make sense?
last = render; // predict second field from the first
@@ -351,13 +351,13 @@ void ff_vdpau_mpeg4_decode_picture(MpegEncContext *s, const uint8_t *buf,
render->info.mpeg4.backward_reference = VDP_INVALID_HANDLE;
switch (s->pict_type) {
- case FF_B_TYPE:
+ case AV_PICTURE_TYPE_B:
next = (struct vdpau_render_state *)s->next_picture.data[0];
assert(next);
render->info.mpeg4.backward_reference = next->surface;
render->info.mpeg4.vop_coding_type = 2;
// no break here, going to set forward prediction
- case FF_P_TYPE:
+ case AV_PICTURE_TYPE_P:
last = (struct vdpau_render_state *)s->last_picture.data[0];
assert(last);
render->info.mpeg4.forward_reference = last->surface;
diff --git a/libavcodec/version.h b/libavcodec/version.h
index 9046632b94..434d7b93d7 100644
--- a/libavcodec/version.h
+++ b/libavcodec/version.h
@@ -21,7 +21,7 @@
#define AVCODEC_VERSION_H
#define LIBAVCODEC_VERSION_MAJOR 52
-#define LIBAVCODEC_VERSION_MINOR 120
+#define LIBAVCODEC_VERSION_MINOR 121
#define LIBAVCODEC_VERSION_MICRO 0
#define LIBAVCODEC_VERSION_INT AV_VERSION_INT(LIBAVCODEC_VERSION_MAJOR, \
@@ -95,5 +95,8 @@
#ifndef FF_API_OLD_FF_PICT_TYPES
#define FF_API_OLD_FF_PICT_TYPES (LIBAVCODEC_VERSION_MAJOR < 54)
#endif
+#ifndef FF_API_FLAC_GLOBAL_OPTS
+#define FF_API_FLAC_GLOBAL_OPTS (LIBAVCODEC_VERSION_MAJOR < 54)
+#endif
#endif /* AVCODEC_VERSION_H */
diff --git a/libavcodec/vmdav.c b/libavcodec/vmdav.c
index 86e3683725..8d8bc61e42 100644
--- a/libavcodec/vmdav.c
+++ b/libavcodec/vmdav.c
@@ -358,6 +358,9 @@ static av_cold int vmdvideo_decode_init(AVCodecContext *avctx)
palette32[i] = (r << 16) | (g << 8) | (b);
}
+ avcodec_get_frame_defaults(&s->frame);
+ avcodec_get_frame_defaults(&s->prev_frame);
+
return 0;
}
diff --git a/libavcodec/vmnc.c b/libavcodec/vmnc.c
index e13452dcc6..6455d86f77 100644
--- a/libavcodec/vmnc.c
+++ b/libavcodec/vmnc.c
@@ -301,7 +301,7 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPac
}
c->pic.key_frame = 0;
- c->pic.pict_type = FF_P_TYPE;
+ c->pic.pict_type = AV_PICTURE_TYPE_P;
//restore screen after cursor
if(c->screendta) {
@@ -374,7 +374,7 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPac
break;
case MAGIC_WMVi: // ServerInitialization struct
c->pic.key_frame = 1;
- c->pic.pict_type = FF_I_TYPE;
+ c->pic.pict_type = AV_PICTURE_TYPE_I;
depth = *src++;
if(depth != c->bpp) {
av_log(avctx, AV_LOG_INFO, "Depth mismatch. Container %i bpp, Frame data: %i bpp\n", c->bpp, depth);
@@ -470,6 +470,7 @@ static av_cold int decode_init(AVCodecContext *avctx)
c->bpp = avctx->bits_per_coded_sample;
c->bpp2 = c->bpp/8;
+ avcodec_get_frame_defaults(&c->pic);
switch(c->bpp){
case 8:
diff --git a/libavcodec/vorbis.c b/libavcodec/vorbis.c
index 68acc77ab9..a805452eab 100644
--- a/libavcodec/vorbis.c
+++ b/libavcodec/vorbis.c
@@ -53,9 +53,7 @@ unsigned int ff_vorbis_nth_root(unsigned int x, unsigned int n)
// reasonable to check redundantly.
int ff_vorbis_len2vlc(uint8_t *bits, uint32_t *codes, unsigned num)
{
- uint_fast32_t exit_at_level[33] = {
- 404, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
+ uint32_t exit_at_level[33] = { 404 };
unsigned i, j, p, code;
@@ -106,7 +104,7 @@ int ff_vorbis_len2vlc(uint8_t *bits, uint32_t *codes, unsigned num)
#ifdef V_DEBUG
av_log(NULL, AV_LOG_INFO, " %d. code len %d code %d - ", p, bits[p], codes[p]);
- init_get_bits(&gb, (uint_fast8_t *)&codes[p], bits[p]);
+ init_get_bits(&gb, (uint8_t *)&codes[p], bits[p]);
for (i = 0; i < bits[p]; ++i)
av_log(NULL, AV_LOG_INFO, "%s", get_bits1(&gb) ? "1" : "0");
av_log(NULL, AV_LOG_INFO, "\n");
@@ -206,7 +204,7 @@ static void render_line(int x0, int y0, int x1, int y1, float *buf)
}
void ff_vorbis_floor1_render_list(vorbis_floor1_entry * list, int values,
- uint_fast16_t *y_list, int *flag,
+ uint16_t *y_list, int *flag,
int multiplier, float *out, int samples)
{
int lx, ly, i;
diff --git a/libavcodec/vorbis.h b/libavcodec/vorbis.h
index 28c1bcf636..15b5d85b36 100644
--- a/libavcodec/vorbis.h
+++ b/libavcodec/vorbis.h
@@ -30,17 +30,17 @@ extern const uint8_t ff_vorbis_encoding_channel_layout_offsets[8][8];
extern const int64_t ff_vorbis_channel_layouts[9];
typedef struct {
- uint_fast16_t x;
- uint_fast16_t sort;
- uint_fast16_t low;
- uint_fast16_t high;
+ uint16_t x;
+ uint16_t sort;
+ uint16_t low;
+ uint16_t high;
} vorbis_floor1_entry;
void ff_vorbis_ready_floor1_list(vorbis_floor1_entry * list, int values);
unsigned int ff_vorbis_nth_root(unsigned int x, unsigned int n); // x^(1/n)
int ff_vorbis_len2vlc(uint8_t *bits, uint32_t *codes, unsigned num);
void ff_vorbis_floor1_render_list(vorbis_floor1_entry * list, int values,
- uint_fast16_t * y_list, int * flag,
+ uint16_t *y_list, int *flag,
int multiplier, float * out, int samples);
void vorbis_inverse_coupling(float *mag, float *ang, int blocksize);
diff --git a/libavcodec/vorbisdec.c b/libavcodec/vorbisdec.c
index 161c54d6bc..f4b743e8ab 100644
--- a/libavcodec/vorbisdec.c
+++ b/libavcodec/vorbisdec.c
@@ -49,11 +49,11 @@
#include <assert.h>
typedef struct {
- uint_fast8_t dimensions;
- uint_fast8_t lookup_type;
- uint_fast8_t maxdepth;
- VLC vlc;
- float *codevectors;
+ uint8_t dimensions;
+ uint8_t lookup_type;
+ uint8_t maxdepth;
+ VLC vlc;
+ float *codevectors;
unsigned int nb_bits;
} vorbis_codebook;
@@ -65,63 +65,63 @@ typedef
int (* vorbis_floor_decode_func)
(struct vorbis_context_s *, vorbis_floor_data *, float *);
typedef struct {
- uint_fast8_t floor_type;
+ uint8_t floor_type;
vorbis_floor_decode_func decode;
union vorbis_floor_u {
struct vorbis_floor0_s {
- uint_fast8_t order;
- uint_fast16_t rate;
- uint_fast16_t bark_map_size;
- int_fast32_t *map[2];
- uint_fast32_t map_size[2];
- uint_fast8_t amplitude_bits;
- uint_fast8_t amplitude_offset;
- uint_fast8_t num_books;
- uint_fast8_t *book_list;
+ uint8_t order;
+ uint16_t rate;
+ uint16_t bark_map_size;
+ int32_t *map[2];
+ uint32_t map_size[2];
+ uint8_t amplitude_bits;
+ uint8_t amplitude_offset;
+ uint8_t num_books;
+ uint8_t *book_list;
float *lsp;
} t0;
struct vorbis_floor1_s {
- uint_fast8_t partitions;
- uint8_t partition_class[32];
- uint_fast8_t class_dimensions[16];
- uint_fast8_t class_subclasses[16];
- uint_fast8_t class_masterbook[16];
- int_fast16_t subclass_books[16][8];
- uint_fast8_t multiplier;
- uint_fast16_t x_list_dim;
+ uint8_t partitions;
+ uint8_t partition_class[32];
+ uint8_t class_dimensions[16];
+ uint8_t class_subclasses[16];
+ uint8_t class_masterbook[16];
+ int16_t subclass_books[16][8];
+ uint8_t multiplier;
+ uint16_t x_list_dim;
vorbis_floor1_entry *list;
} t1;
} data;
} vorbis_floor;
typedef struct {
- uint_fast16_t type;
- uint_fast32_t begin;
- uint_fast32_t end;
+ uint16_t type;
+ uint32_t begin;
+ uint32_t end;
unsigned partition_size;
- uint_fast8_t classifications;
- uint_fast8_t classbook;
- int_fast16_t books[64][8];
- uint_fast8_t maxpass;
- uint_fast16_t ptns_to_read;
- uint8_t *classifs;
+ uint8_t classifications;
+ uint8_t classbook;
+ int16_t books[64][8];
+ uint8_t maxpass;
+ uint16_t ptns_to_read;
+ uint8_t *classifs;
} vorbis_residue;
typedef struct {
- uint_fast8_t submaps;
- uint_fast16_t coupling_steps;
- uint_fast8_t *magnitude;
- uint_fast8_t *angle;
- uint_fast8_t *mux;
- uint_fast8_t submap_floor[16];
- uint_fast8_t submap_residue[16];
+ uint8_t submaps;
+ uint16_t coupling_steps;
+ uint8_t *magnitude;
+ uint8_t *angle;
+ uint8_t *mux;
+ uint8_t submap_floor[16];
+ uint8_t submap_residue[16];
} vorbis_mapping;
typedef struct {
- uint_fast8_t blockflag;
- uint_fast16_t windowtype;
- uint_fast16_t transformtype;
- uint_fast8_t mapping;
+ uint8_t blockflag;
+ uint16_t windowtype;
+ uint16_t transformtype;
+ uint8_t mapping;
} vorbis_mode;
typedef struct vorbis_context_s {
@@ -131,27 +131,27 @@ typedef struct vorbis_context_s {
FmtConvertContext fmt_conv;
FFTContext mdct[2];
- uint_fast8_t first_frame;
- uint_fast32_t version;
- uint_fast8_t audio_channels;
- uint_fast32_t audio_samplerate;
- uint_fast32_t bitrate_maximum;
- uint_fast32_t bitrate_nominal;
- uint_fast32_t bitrate_minimum;
- uint_fast32_t blocksize[2];
+ uint8_t first_frame;
+ uint32_t version;
+ uint8_t audio_channels;
+ uint32_t audio_samplerate;
+ uint32_t bitrate_maximum;
+ uint32_t bitrate_nominal;
+ uint32_t bitrate_minimum;
+ uint32_t blocksize[2];
const float *win[2];
- uint_fast16_t codebook_count;
+ uint16_t codebook_count;
vorbis_codebook *codebooks;
- uint_fast8_t floor_count;
+ uint8_t floor_count;
vorbis_floor *floors;
- uint_fast8_t residue_count;
+ uint8_t residue_count;
vorbis_residue *residues;
- uint_fast8_t mapping_count;
+ uint8_t mapping_count;
vorbis_mapping *mappings;
- uint_fast8_t mode_count;
+ uint8_t mode_count;
vorbis_mode *modes;
- uint_fast8_t mode_number; // mode number for the current packet
- uint_fast8_t previous_window;
+ uint8_t mode_number; // mode number for the current packet
+ uint8_t previous_window;
float *channel_residues;
float *channel_floors;
float *saved;
@@ -241,7 +241,7 @@ static int vorbis_parse_setup_hdr_codebooks(vorbis_context *vc)
uint8_t *tmp_vlc_bits;
uint32_t *tmp_vlc_codes;
GetBitContext *gb = &vc->gb;
- uint_fast16_t *codebook_multiplicands;
+ uint16_t *codebook_multiplicands;
vc->codebook_count = get_bits(gb, 8) + 1;
@@ -359,7 +359,7 @@ static int vorbis_parse_setup_hdr_codebooks(vorbis_context *vc)
codebook_multiplicands[i] = get_bits(gb, codebook_value_bits);
AV_DEBUG(" multiplicands*delta+minmum : %e \n", (float)codebook_multiplicands[i]*codebook_delta_value+codebook_minimum_value);
- AV_DEBUG(" multiplicand %d \n", codebook_multiplicands[i]);
+ AV_DEBUG(" multiplicand %u\n", codebook_multiplicands[i]);
}
// Weed out unused vlcs and build codevector vector
@@ -544,7 +544,7 @@ static int vorbis_parse_setup_hdr_floors(vorbis_context *vc)
rangemax = (1 << rangebits);
if (rangemax > vc->blocksize[1] / 2) {
av_log(vc->avccontext, AV_LOG_ERROR,
- "Floor value is too large for blocksize: %u (%"PRIuFAST32")\n",
+ "Floor value is too large for blocksize: %u (%"PRIu32")\n",
rangemax, vc->blocksize[1] / 2);
return -1;
}
@@ -652,7 +652,7 @@ static int vorbis_parse_setup_hdr_residues(vorbis_context *vc)
for (i = 0; i < vc->residue_count; ++i) {
vorbis_residue *res_setup = &vc->residues[i];
- uint_fast8_t cascade[64];
+ uint8_t cascade[64];
unsigned high_bits, low_bits;
res_setup->type = get_bits(gb, 16);
@@ -666,7 +666,10 @@ static int vorbis_parse_setup_hdr_residues(vorbis_context *vc)
if (res_setup->begin>res_setup->end ||
res_setup->end > vc->avccontext->channels * vc->blocksize[1] / 2 ||
(res_setup->end-res_setup->begin) / res_setup->partition_size > V_MAX_PARTITIONS) {
- av_log(vc->avccontext, AV_LOG_ERROR, "partition out of bounds: type, begin, end, size, blocksize: %"PRIdFAST16", %"PRIdFAST32", %"PRIdFAST32", %u, %"PRIdFAST32"\n", res_setup->type, res_setup->begin, res_setup->end, res_setup->partition_size, vc->blocksize[1] / 2);
+ av_log(vc->avccontext, AV_LOG_ERROR,
+ "partition out of bounds: type, begin, end, size, blocksize: %"PRIu16", %"PRIu32", %"PRIu32", %u, %"PRIu32"\n",
+ res_setup->type, res_setup->begin, res_setup->end,
+ res_setup->partition_size, vc->blocksize[1] / 2);
return -1;
}
@@ -790,12 +793,12 @@ static void create_map(vorbis_context *vc, unsigned floor_number)
vorbis_floor0 *vf;
int idx;
int blockflag, n;
- int_fast32_t *map;
+ int32_t *map;
for (blockflag = 0; blockflag < 2; ++blockflag) {
n = vc->blocksize[blockflag] / 2;
floors[floor_number].data.t0.map[blockflag] =
- av_malloc((n+1) * sizeof(int_fast32_t)); // n + sentinel
+ av_malloc((n + 1) * sizeof(int32_t)); // n + sentinel
map = floors[floor_number].data.t0.map[blockflag];
vf = &floors[floor_number].data.t0;
@@ -1145,10 +1148,10 @@ static int vorbis_floor1_decode(vorbis_context *vc,
{
vorbis_floor1 *vf = &vfu->t1;
GetBitContext *gb = &vc->gb;
- uint_fast16_t range_v[4] = { 256, 128, 86, 64 };
- unsigned range = range_v[vf->multiplier-1];
- uint_fast16_t floor1_Y[258];
- uint_fast16_t floor1_Y_final[258];
+ uint16_t range_v[4] = { 256, 128, 86, 64 };
+ unsigned range = range_v[vf->multiplier - 1];
+ uint16_t floor1_Y[258];
+ uint16_t floor1_Y_final[258];
int floor1_flag[258];
unsigned partition_class, cdim, cbits, csub, cval, offset, i, j;
int book, adx, ady, dy, off, predicted, err;
@@ -1250,7 +1253,7 @@ static int vorbis_floor1_decode(vorbis_context *vc,
floor1_Y_final[i] = predicted;
}
- AV_DEBUG(" Decoded floor(%d) = %d / val %u\n",
+ AV_DEBUG(" Decoded floor(%d) = %u / val %u\n",
vf->list[i].x, floor1_Y_final[i], val);
}
@@ -1268,7 +1271,7 @@ static int vorbis_floor1_decode(vorbis_context *vc,
static av_always_inline int vorbis_residue_decode_internal(vorbis_context *vc,
vorbis_residue *vr,
unsigned ch,
- uint_fast8_t *do_not_decode,
+ uint8_t *do_not_decode,
float *vec,
unsigned vlen,
int vr_type)
@@ -1405,7 +1408,7 @@ static av_always_inline int vorbis_residue_decode_internal(vorbis_context *vc,
static inline int vorbis_residue_decode(vorbis_context *vc, vorbis_residue *vr,
unsigned ch,
- uint_fast8_t *do_not_decode,
+ uint8_t *do_not_decode,
float *vec, unsigned vlen)
{
if (vr->type == 2)
@@ -1453,12 +1456,12 @@ static int vorbis_parse_audio_packet(vorbis_context *vc)
unsigned previous_window = vc->previous_window;
unsigned mode_number, blockflag, blocksize;
int i, j;
- uint_fast8_t no_residue[255];
- uint_fast8_t do_not_decode[255];
+ uint8_t no_residue[255];
+ uint8_t do_not_decode[255];
vorbis_mapping *mapping;
float *ch_res_ptr = vc->channel_residues;
float *ch_floor_ptr = vc->channel_floors;
- uint_fast8_t res_chan[255];
+ uint8_t res_chan[255];
unsigned res_num = 0;
int retlen = 0;
diff --git a/libavcodec/vorbisenc.c b/libavcodec/vorbisenc.c
index d15d3454eb..617e2b7cc4 100644
--- a/libavcodec/vorbisenc.c
+++ b/libavcodec/vorbisenc.c
@@ -674,7 +674,7 @@ static float get_floor_average(vorbis_enc_floor * fc, float *coeffs, int i)
}
static void floor_fit(vorbis_enc_context *venc, vorbis_enc_floor *fc,
- float *coeffs, uint_fast16_t *posts, int samples)
+ float *coeffs, uint16_t *posts, int samples)
{
int range = 255 / fc->multiplier + 1;
int i;
@@ -706,7 +706,7 @@ static int render_point(int x0, int y0, int x1, int y1, int x)
}
static void floor_encode(vorbis_enc_context *venc, vorbis_enc_floor *fc,
- PutBitContext *pb, uint_fast16_t *posts,
+ PutBitContext *pb, uint16_t *posts,
float *floor, int samples)
{
int range = 255 / fc->multiplier + 1;
@@ -1010,7 +1010,7 @@ static int vorbis_encode_frame(AVCodecContext *avccontext,
for (i = 0; i < venc->channels; i++) {
vorbis_enc_floor *fc = &venc->floors[mapping->floor[mapping->mux[i]]];
- uint_fast16_t posts[MAX_FLOOR_VALUES];
+ uint16_t posts[MAX_FLOOR_VALUES];
floor_fit(venc, fc, &venc->coeffs[i * samples], posts, samples);
floor_encode(venc, fc, &pb, posts, &venc->floor[i * samples], samples);
}
diff --git a/libavcodec/vp3.c b/libavcodec/vp3.c
index 97e5d64351..148f1179e3 100644
--- a/libavcodec/vp3.c
+++ b/libavcodec/vp3.c
@@ -1876,7 +1876,7 @@ static int vp3_decode_frame(AVCodecContext *avctx,
return buf_size;
s->current_frame.reference = 3;
- s->current_frame.pict_type = s->keyframe ? FF_I_TYPE : FF_P_TYPE;
+ s->current_frame.pict_type = s->keyframe ? AV_PICTURE_TYPE_I : AV_PICTURE_TYPE_P;
if (ff_thread_get_buffer(avctx, &s->current_frame) < 0) {
av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed\n");
goto error;
@@ -1908,7 +1908,7 @@ static int vp3_decode_frame(AVCodecContext *avctx,
av_log(s->avctx, AV_LOG_WARNING, "vp3: first frame not a keyframe\n");
s->golden_frame.reference = 3;
- s->golden_frame.pict_type = FF_I_TYPE;
+ s->golden_frame.pict_type = AV_PICTURE_TYPE_I;
if (ff_thread_get_buffer(avctx, &s->golden_frame) < 0) {
av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed\n");
goto error;
diff --git a/libavcodec/vp3_parser.c b/libavcodec/vp3_parser.c
index 4453447aaf..320f9870f9 100644
--- a/libavcodec/vp3_parser.c
+++ b/libavcodec/vp3_parser.c
@@ -26,9 +26,9 @@ static int parse(AVCodecParserContext *s,
const uint8_t *buf, int buf_size)
{
if(avctx->codec_id == CODEC_ID_THEORA)
- s->pict_type= (buf[0]&0x40) ? FF_P_TYPE : FF_I_TYPE;
+ s->pict_type= (buf[0]&0x40) ? AV_PICTURE_TYPE_P : AV_PICTURE_TYPE_I;
else
- s->pict_type= (buf[0]&0x80) ? FF_P_TYPE : FF_I_TYPE;
+ s->pict_type= (buf[0]&0x80) ? AV_PICTURE_TYPE_P : AV_PICTURE_TYPE_I;
*poutbuf = buf;
*poutbuf_size = buf_size;
diff --git a/libavcodec/vp56.c b/libavcodec/vp56.c
index 3412e3dce2..ad451c251f 100644
--- a/libavcodec/vp56.c
+++ b/libavcodec/vp56.c
@@ -526,12 +526,12 @@ int ff_vp56_decode_frame(AVCodecContext *avctx, void *data, int *data_size,
}
if (p->key_frame) {
- p->pict_type = FF_I_TYPE;
+ p->pict_type = AV_PICTURE_TYPE_I;
s->default_models_init(s);
for (block=0; block<s->mb_height*s->mb_width; block++)
s->macroblocks[block].type = VP56_MB_INTRA;
} else {
- p->pict_type = FF_P_TYPE;
+ p->pict_type = AV_PICTURE_TYPE_P;
vp56_parse_mb_type_models(s);
s->parse_vector_models(s);
s->mb_type = VP56_MB_INTER_NOVEC_PF;
@@ -653,8 +653,10 @@ av_cold void ff_vp56_init(AVCodecContext *avctx, int flip, int has_alpha)
ff_vp56dsp_init(&s->vp56dsp, avctx->codec->id);
ff_init_scantable(s->dsp.idct_permutation, &s->scantable,ff_zigzag_direct);
- for (i=0; i<4; i++)
+ for (i=0; i<4; i++) {
s->framep[i] = &s->frames[i];
+ avcodec_get_frame_defaults(&s->frames[i]);
+ }
s->framep[VP56_FRAME_UNUSED] = s->framep[VP56_FRAME_GOLDEN];
s->framep[VP56_FRAME_UNUSED2] = s->framep[VP56_FRAME_GOLDEN2];
s->edge_emu_buffer_alloc = NULL;
diff --git a/libavcodec/vp8.c b/libavcodec/vp8.c
index 760958ef84..6bef32973d 100644
--- a/libavcodec/vp8.c
+++ b/libavcodec/vp8.c
@@ -27,6 +27,7 @@
#include "vp8.h"
#include "vp8data.h"
#include "rectangle.h"
+#include "thread.h"
#if ARCH_ARM
# include "arm/vp8.h"
@@ -37,9 +38,11 @@ static void vp8_decode_flush(AVCodecContext *avctx)
VP8Context *s = avctx->priv_data;
int i;
- for (i = 0; i < 4; i++)
- if (s->frames[i].data[0])
- avctx->release_buffer(avctx, &s->frames[i]);
+ if (!avctx->is_copy) {
+ for (i = 0; i < 5; i++)
+ if (s->frames[i].data[0])
+ ff_thread_release_buffer(avctx, &s->frames[i]);
+ }
memset(s->framep, 0, sizeof(s->framep));
av_freep(&s->macroblocks_base);
@@ -55,12 +58,15 @@ static void vp8_decode_flush(AVCodecContext *avctx)
static int update_dimensions(VP8Context *s, int width, int height)
{
- if (av_image_check_size(width, height, 0, s->avctx))
- return AVERROR_INVALIDDATA;
+ if (width != s->avctx->width ||
+ height != s->avctx->height) {
+ if (av_image_check_size(width, height, 0, s->avctx))
+ return AVERROR_INVALIDDATA;
- vp8_decode_flush(s->avctx);
+ vp8_decode_flush(s->avctx);
- avcodec_set_dimensions(s->avctx, width, height);
+ avcodec_set_dimensions(s->avctx, width, height);
+ }
s->mb_width = (s->avctx->coded_width +15) / 16;
s->mb_height = (s->avctx->coded_height+15) / 16;
@@ -579,12 +585,14 @@ void decode_intra4x4_modes(VP8Context *s, VP56RangeCoder *c,
}
static av_always_inline
-void decode_mb_mode(VP8Context *s, VP8Macroblock *mb, int mb_x, int mb_y, uint8_t *segment)
+void decode_mb_mode(VP8Context *s, VP8Macroblock *mb, int mb_x, int mb_y, uint8_t *segment, uint8_t *ref)
{
VP56RangeCoder *c = &s->c;
if (s->segmentation.update_map)
*segment = vp8_rac_get_tree(c, vp8_segmentid_tree, s->prob->segmentid);
+ else
+ *segment = ref ? *ref : *segment;
s->segment = *segment;
mb->skip = s->mbskip_enabled ? vp56_rac_get_prob(c, s->prob->mbskip) : 0;
@@ -1043,11 +1051,13 @@ static const uint8_t subpel_idx[3][8] = {
* @param mc_func motion compensation function pointers (bilinear or sixtap MC)
*/
static av_always_inline
-void vp8_mc_luma(VP8Context *s, uint8_t *dst, uint8_t *src, const VP56mv *mv,
+void vp8_mc_luma(VP8Context *s, uint8_t *dst, AVFrame *ref, const VP56mv *mv,
int x_off, int y_off, int block_w, int block_h,
int width, int height, int linesize,
vp8_mc_func mc_func[3][3])
{
+ uint8_t *src = ref->data[0];
+
if (AV_RN32A(mv)) {
int mx = (mv->x << 1)&7, mx_idx = subpel_idx[0][mx];
@@ -1057,6 +1067,7 @@ void vp8_mc_luma(VP8Context *s, uint8_t *dst, uint8_t *src, const VP56mv *mv,
y_off += mv->y >> 2;
// edge emulation
+ ff_thread_await_progress(ref, (3 + y_off + block_h + subpel_idx[2][my]) >> 4, 0);
src += y_off * linesize + x_off;
if (x_off < mx_idx || x_off >= width - block_w - subpel_idx[2][mx] ||
y_off < my_idx || y_off >= height - block_h - subpel_idx[2][my]) {
@@ -1066,16 +1077,20 @@ void vp8_mc_luma(VP8Context *s, uint8_t *dst, uint8_t *src, const VP56mv *mv,
src = s->edge_emu_buffer + mx_idx + linesize * my_idx;
}
mc_func[my_idx][mx_idx](dst, linesize, src, linesize, block_h, mx, my);
- } else
+ } else {
+ ff_thread_await_progress(ref, (3 + y_off + block_h) >> 4, 0);
mc_func[0][0](dst, linesize, src + y_off * linesize + x_off, linesize, block_h, 0, 0);
+ }
}
static av_always_inline
-void vp8_mc_chroma(VP8Context *s, uint8_t *dst1, uint8_t *dst2, uint8_t *src1,
- uint8_t *src2, const VP56mv *mv, int x_off, int y_off,
+void vp8_mc_chroma(VP8Context *s, uint8_t *dst1, uint8_t *dst2, AVFrame *ref,
+ const VP56mv *mv, int x_off, int y_off,
int block_w, int block_h, int width, int height, int linesize,
vp8_mc_func mc_func[3][3])
{
+ uint8_t *src1 = ref->data[1], *src2 = ref->data[2];
+
if (AV_RN32A(mv)) {
int mx = mv->x&7, mx_idx = subpel_idx[0][mx];
int my = mv->y&7, my_idx = subpel_idx[0][my];
@@ -1086,6 +1101,7 @@ void vp8_mc_chroma(VP8Context *s, uint8_t *dst1, uint8_t *dst2, uint8_t *src1,
// edge emulation
src1 += y_off * linesize + x_off;
src2 += y_off * linesize + x_off;
+ ff_thread_await_progress(ref, (3 + y_off + block_h + subpel_idx[2][my]) >> 3, 0);
if (x_off < mx_idx || x_off >= width - block_w - subpel_idx[2][mx] ||
y_off < my_idx || y_off >= height - block_h - subpel_idx[2][my]) {
s->dsp.emulated_edge_mc(s->edge_emu_buffer, src1 - my_idx * linesize - mx_idx, linesize,
@@ -1104,6 +1120,7 @@ void vp8_mc_chroma(VP8Context *s, uint8_t *dst1, uint8_t *dst2, uint8_t *src1,
mc_func[my_idx][mx_idx](dst2, linesize, src2, linesize, block_h, mx, my);
}
} else {
+ ff_thread_await_progress(ref, (3 + y_off + block_h) >> 3, 0);
mc_func[0][0](dst1, linesize, src1 + y_off * linesize + x_off, linesize, block_h, 0, 0);
mc_func[0][0](dst2, linesize, src2 + y_off * linesize + x_off, linesize, block_h, 0, 0);
}
@@ -1120,7 +1137,7 @@ void vp8_mc_part(VP8Context *s, uint8_t *dst[3],
/* Y */
vp8_mc_luma(s, dst[0] + by_off * s->linesize + bx_off,
- ref_frame->data[0], mv, x_off + bx_off, y_off + by_off,
+ ref_frame, mv, x_off + bx_off, y_off + by_off,
block_w, block_h, width, height, s->linesize,
s->put_pixels_tab[block_w == 8]);
@@ -1134,8 +1151,8 @@ void vp8_mc_part(VP8Context *s, uint8_t *dst[3],
width >>= 1; height >>= 1;
block_w >>= 1; block_h >>= 1;
vp8_mc_chroma(s, dst[1] + by_off * s->uvlinesize + bx_off,
- dst[2] + by_off * s->uvlinesize + bx_off, ref_frame->data[1],
- ref_frame->data[2], &uvmv, x_off + bx_off, y_off + by_off,
+ dst[2] + by_off * s->uvlinesize + bx_off, ref_frame,
+ &uvmv, x_off + bx_off, y_off + by_off,
block_w, block_h, width, height, s->uvlinesize,
s->put_pixels_tab[1 + (block_w == 4)]);
}
@@ -1151,6 +1168,9 @@ static av_always_inline void prefetch_motion(VP8Context *s, VP8Macroblock *mb, i
int my = (mb->mv.y>>2) + y_off;
uint8_t **src= s->framep[ref]->data;
int off= mx + (my + (mb_x&3)*4)*s->linesize + 64;
+ /* For threading, a ff_thread_await_progress here might be useful, but
+ * it actually slows down the decoder. Since a bad prefetch doesn't
+ * generate bad decoder output, we don't run it here. */
s->dsp.prefetch(src[0]+off, s->linesize, 4);
off= (mx>>1) + ((my>>1) + (mb_x&7))*s->uvlinesize + 64;
s->dsp.prefetch(src[1]+off, src[2]-src[1], 2);
@@ -1182,7 +1202,7 @@ void inter_predict(VP8Context *s, uint8_t *dst[3], VP8Macroblock *mb,
for (y = 0; y < 4; y++) {
for (x = 0; x < 4; x++) {
vp8_mc_luma(s, dst[0] + 4*y*s->linesize + x*4,
- ref->data[0], &bmv[4*y + x],
+ ref, &bmv[4*y + x],
4*x + x_off, 4*y + y_off, 4, 4,
width, height, s->linesize,
s->put_pixels_tab[2]);
@@ -1208,8 +1228,7 @@ void inter_predict(VP8Context *s, uint8_t *dst[3], VP8Macroblock *mb,
uvmv.y &= ~7;
}
vp8_mc_chroma(s, dst[1] + 4*y*s->uvlinesize + x*4,
- dst[2] + 4*y*s->uvlinesize + x*4,
- ref->data[1], ref->data[2], &uvmv,
+ dst[2] + 4*y*s->uvlinesize + x*4, ref, &uvmv,
4*x + x_off, 4*y + y_off, 4, 4,
width, height, s->uvlinesize,
s->put_pixels_tab[2]);
@@ -1310,9 +1329,7 @@ static av_always_inline void filter_level_for_mb(VP8Context *s, VP8Macroblock *m
filter_level += s->lf_delta.mode[mb->mode];
}
-/* Like av_clip for inputs 0 and max, where max is equal to (2^n-1) */
-#define POW2CLIP(x,max) (((x) & ~max) ? (-(x))>>31 & max : (x));
- filter_level = POW2CLIP(filter_level, 63);
+ filter_level = av_clip_uintp2(filter_level, 6);
interior_limit = filter_level;
if (s->filter.sharpness) {
@@ -1427,13 +1444,13 @@ static av_always_inline void filter_mb_simple(VP8Context *s, uint8_t *dst, VP8Fi
}
}
-static void filter_mb_row(VP8Context *s, int mb_y)
+static void filter_mb_row(VP8Context *s, AVFrame *curframe, int mb_y)
{
VP8FilterStrength *f = s->filter_strength;
uint8_t *dst[3] = {
- s->framep[VP56_FRAME_CURRENT]->data[0] + 16*mb_y*s->linesize,
- s->framep[VP56_FRAME_CURRENT]->data[1] + 8*mb_y*s->uvlinesize,
- s->framep[VP56_FRAME_CURRENT]->data[2] + 8*mb_y*s->uvlinesize
+ curframe->data[0] + 16*mb_y*s->linesize,
+ curframe->data[1] + 8*mb_y*s->uvlinesize,
+ curframe->data[2] + 8*mb_y*s->uvlinesize
};
int mb_x;
@@ -1446,10 +1463,10 @@ static void filter_mb_row(VP8Context *s, int mb_y)
}
}
-static void filter_mb_row_simple(VP8Context *s, int mb_y)
+static void filter_mb_row_simple(VP8Context *s, AVFrame *curframe, int mb_y)
{
VP8FilterStrength *f = s->filter_strength;
- uint8_t *dst = s->framep[VP56_FRAME_CURRENT]->data[0] + 16*mb_y*s->linesize;
+ uint8_t *dst = curframe->data[0] + 16*mb_y*s->linesize;
int mb_x;
for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
@@ -1465,7 +1482,7 @@ static int vp8_decode_frame(AVCodecContext *avctx, void *data, int *data_size,
VP8Context *s = avctx->priv_data;
int ret, mb_x, mb_y, i, y, referenced;
enum AVDiscard skip_thresh;
- AVFrame *av_uninit(curframe);
+ AVFrame *av_uninit(curframe), *prev_frame = s->framep[VP56_FRAME_CURRENT];
if ((ret = decode_frame_header(s, avpkt->data, avpkt->size)) < 0)
return ret;
@@ -1482,24 +1499,60 @@ static int vp8_decode_frame(AVCodecContext *avctx, void *data, int *data_size,
}
s->deblock_filter = s->filter.level && avctx->skip_loop_filter < skip_thresh;
- for (i = 0; i < 4; i++)
- if (&s->frames[i] != s->framep[VP56_FRAME_PREVIOUS] &&
+ // release no longer referenced frames
+ for (i = 0; i < 5; i++)
+ if (s->frames[i].data[0] &&
+ &s->frames[i] != prev_frame &&
+ &s->frames[i] != s->framep[VP56_FRAME_PREVIOUS] &&
+ &s->frames[i] != s->framep[VP56_FRAME_GOLDEN] &&
+ &s->frames[i] != s->framep[VP56_FRAME_GOLDEN2])
+ ff_thread_release_buffer(avctx, &s->frames[i]);
+
+ // find a free buffer
+ for (i = 0; i < 5; i++)
+ if (&s->frames[i] != prev_frame &&
+ &s->frames[i] != s->framep[VP56_FRAME_PREVIOUS] &&
&s->frames[i] != s->framep[VP56_FRAME_GOLDEN] &&
&s->frames[i] != s->framep[VP56_FRAME_GOLDEN2]) {
curframe = s->framep[VP56_FRAME_CURRENT] = &s->frames[i];
break;
}
+ if (i == 5) {
+ av_log(avctx, AV_LOG_FATAL, "Ran out of free frames!\n");
+ abort();
+ }
if (curframe->data[0])
- avctx->release_buffer(avctx, curframe);
+ ff_thread_release_buffer(avctx, curframe);
curframe->key_frame = s->keyframe;
- curframe->pict_type = s->keyframe ? FF_I_TYPE : FF_P_TYPE;
+ curframe->pict_type = s->keyframe ? AV_PICTURE_TYPE_I : AV_PICTURE_TYPE_P;
curframe->reference = referenced ? 3 : 0;
- if ((ret = avctx->get_buffer(avctx, curframe))) {
+ curframe->ref_index[0] = s->segmentation_map;
+ if ((ret = ff_thread_get_buffer(avctx, curframe))) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed!\n");
return ret;
}
+ // check if golden and altref are swapped
+ if (s->update_altref != VP56_FRAME_NONE) {
+ s->next_framep[VP56_FRAME_GOLDEN2] = s->framep[s->update_altref];
+ } else {
+ s->next_framep[VP56_FRAME_GOLDEN2] = s->framep[VP56_FRAME_GOLDEN2];
+ }
+ if (s->update_golden != VP56_FRAME_NONE) {
+ s->next_framep[VP56_FRAME_GOLDEN] = s->framep[s->update_golden];
+ } else {
+ s->next_framep[VP56_FRAME_GOLDEN] = s->framep[VP56_FRAME_GOLDEN];
+ }
+ if (s->update_last) {
+ s->next_framep[VP56_FRAME_PREVIOUS] = curframe;
+ } else {
+ s->next_framep[VP56_FRAME_PREVIOUS] = s->framep[VP56_FRAME_PREVIOUS];
+ }
+ s->next_framep[VP56_FRAME_CURRENT] = curframe;
+
+ ff_thread_finish_setup(avctx);
+
// Given that arithmetic probabilities are updated every frame, it's quite likely
// that the values we have on a random interframe are complete junk if we didn't
// start decode on a keyframe. So just don't display anything rather than junk.
@@ -1530,7 +1583,7 @@ static int vp8_decode_frame(AVCodecContext *avctx, void *data, int *data_size,
if (s->keyframe)
memset(s->intra4x4_pred_mode_top, DC_PRED, s->mb_width*4);
- #define MARGIN (16 << 2)
+#define MARGIN (16 << 2)
s->mv_min.y = -MARGIN;
s->mv_max.y = ((s->mb_height - 1) << 6) + MARGIN;
@@ -1559,13 +1612,16 @@ static int vp8_decode_frame(AVCodecContext *avctx, void *data, int *data_size,
s->mv_min.x = -MARGIN;
s->mv_max.x = ((s->mb_width - 1) << 6) + MARGIN;
+ if (prev_frame && s->segmentation.enabled && s->segmentation.update_map)
+ ff_thread_await_progress(prev_frame, mb_y, 0);
for (mb_x = 0; mb_x < s->mb_width; mb_x++, mb_xy++, mb++) {
/* Prefetch the current frame, 4 MBs ahead */
s->dsp.prefetch(dst[0] + (mb_x&3)*4*s->linesize + 64, s->linesize, 4);
s->dsp.prefetch(dst[1] + (mb_x&7)*s->uvlinesize + 64, dst[2] - dst[1], 2);
- decode_mb_mode(s, mb, mb_x, mb_y, s->segmentation_map + mb_xy);
+ decode_mb_mode(s, mb, mb_x, mb_y, s->segmentation_map + mb_xy,
+ prev_frame ? prev_frame->ref_index[0] + mb_xy : NULL);
prefetch_motion(s, mb, mb_x, mb_y, mb_xy, VP56_FRAME_PREVIOUS);
@@ -1605,46 +1661,27 @@ static int vp8_decode_frame(AVCodecContext *avctx, void *data, int *data_size,
}
if (s->deblock_filter) {
if (s->filter.simple)
- filter_mb_row_simple(s, mb_y);
+ filter_mb_row_simple(s, curframe, mb_y);
else
- filter_mb_row(s, mb_y);
+ filter_mb_row(s, curframe, mb_y);
}
s->mv_min.y -= 64;
s->mv_max.y -= 64;
+
+ ff_thread_report_progress(curframe, mb_y, 0);
}
+ ff_thread_report_progress(curframe, INT_MAX, 0);
skip_decode:
// if future frames don't use the updated probabilities,
// reset them to the values we saved
if (!s->update_probabilities)
s->prob[0] = s->prob[1];
- // check if golden and altref are swapped
- if (s->update_altref == VP56_FRAME_GOLDEN &&
- s->update_golden == VP56_FRAME_GOLDEN2)
- FFSWAP(AVFrame *, s->framep[VP56_FRAME_GOLDEN], s->framep[VP56_FRAME_GOLDEN2]);
- else {
- if (s->update_altref != VP56_FRAME_NONE)
- s->framep[VP56_FRAME_GOLDEN2] = s->framep[s->update_altref];
-
- if (s->update_golden != VP56_FRAME_NONE)
- s->framep[VP56_FRAME_GOLDEN] = s->framep[s->update_golden];
- }
-
- if (s->update_last) // move cur->prev
- s->framep[VP56_FRAME_PREVIOUS] = s->framep[VP56_FRAME_CURRENT];
-
- // release no longer referenced frames
- for (i = 0; i < 4; i++)
- if (s->frames[i].data[0] &&
- &s->frames[i] != s->framep[VP56_FRAME_CURRENT] &&
- &s->frames[i] != s->framep[VP56_FRAME_PREVIOUS] &&
- &s->frames[i] != s->framep[VP56_FRAME_GOLDEN] &&
- &s->frames[i] != s->framep[VP56_FRAME_GOLDEN2])
- avctx->release_buffer(avctx, &s->frames[i]);
+ memcpy(&s->framep[0], &s->next_framep[0], sizeof(s->framep[0]) * 4);
if (!s->invisible) {
- *(AVFrame*)data = *s->framep[VP56_FRAME_CURRENT];
+ *(AVFrame*)data = *curframe;
*data_size = sizeof(AVFrame);
}
@@ -1671,6 +1708,36 @@ static av_cold int vp8_decode_free(AVCodecContext *avctx)
return 0;
}
+static av_cold int vp8_decode_init_thread_copy(AVCodecContext *avctx)
+{
+ VP8Context *s = avctx->priv_data;
+
+ s->avctx = avctx;
+
+ return 0;
+}
+
+#define REBASE(pic) \
+ pic ? pic - &s_src->frames[0] + &s->frames[0] : NULL
+
+static int vp8_decode_update_thread_context(AVCodecContext *dst, const AVCodecContext *src)
+{
+ VP8Context *s = dst->priv_data, *s_src = src->priv_data;
+
+ s->prob[0] = s_src->prob[!s_src->update_probabilities];
+ s->segmentation = s_src->segmentation;
+ s->lf_delta = s_src->lf_delta;
+ memcpy(s->sign_bias, s_src->sign_bias, sizeof(s->sign_bias));
+
+ memcpy(&s->frames, &s_src->frames, sizeof(s->frames));
+ s->framep[0] = REBASE(s_src->next_framep[0]);
+ s->framep[1] = REBASE(s_src->next_framep[1]);
+ s->framep[2] = REBASE(s_src->next_framep[2]);
+ s->framep[3] = REBASE(s_src->next_framep[3]);
+
+ return 0;
+}
+
AVCodec ff_vp8_decoder = {
"vp8",
AVMEDIA_TYPE_VIDEO,
@@ -1680,7 +1747,9 @@ AVCodec ff_vp8_decoder = {
NULL,
vp8_decode_free,
vp8_decode_frame,
- CODEC_CAP_DR1,
+ CODEC_CAP_DR1 | CODEC_CAP_FRAME_THREADS,
.flush = vp8_decode_flush,
.long_name = NULL_IF_CONFIG_SMALL("On2 VP8"),
+ .init_thread_copy = ONLY_IF_THREADS_ENABLED(vp8_decode_init_thread_copy),
+ .update_thread_context = ONLY_IF_THREADS_ENABLED(vp8_decode_update_thread_context),
};
diff --git a/libavcodec/vp8.h b/libavcodec/vp8.h
index 2db056f073..d706b34aa6 100644
--- a/libavcodec/vp8.h
+++ b/libavcodec/vp8.h
@@ -86,6 +86,7 @@ typedef struct {
typedef struct {
AVCodecContext *avctx;
AVFrame *framep[4];
+ AVFrame *next_framep[4];
uint8_t *edge_emu_buffer;
uint16_t mb_width; /* number of horizontal MB */
@@ -235,7 +236,7 @@ typedef struct {
VP8DSPContext vp8dsp;
H264PredContext hpc;
vp8_mc_func put_pixels_tab[3][3][3];
- AVFrame frames[4];
+ AVFrame frames[5];
} VP8Context;
#endif
diff --git a/libavcodec/vp8_parser.c b/libavcodec/vp8_parser.c
index e769e33abe..aebf667e73 100644
--- a/libavcodec/vp8_parser.c
+++ b/libavcodec/vp8_parser.c
@@ -25,7 +25,7 @@ static int parse(AVCodecParserContext *s,
const uint8_t **poutbuf, int *poutbuf_size,
const uint8_t *buf, int buf_size)
{
- s->pict_type= (buf[0]&0x01) ? FF_P_TYPE : FF_I_TYPE;
+ s->pict_type= (buf[0]&0x01) ? AV_PICTURE_TYPE_P : AV_PICTURE_TYPE_I;
*poutbuf = buf;
*poutbuf_size = buf_size;
diff --git a/libavcodec/vqavideo.c b/libavcodec/vqavideo.c
index 109faab4fb..272d80906e 100644
--- a/libavcodec/vqavideo.c
+++ b/libavcodec/vqavideo.c
@@ -187,6 +187,7 @@ static av_cold int vqa_decode_init(AVCodecContext *avctx)
(s->height / s->vector_height) * 2;
s->decode_buffer = av_malloc(s->decode_buffer_size);
+ avcodec_get_frame_defaults(&s->frame);
s->frame.data[0] = NULL;
return 0;
diff --git a/libavcodec/wmavoice.c b/libavcodec/wmavoice.c
index cd640b01fc..3604eac782 100644
--- a/libavcodec/wmavoice.c
+++ b/libavcodec/wmavoice.c
@@ -315,7 +315,7 @@ static av_cold int decode_vbmtree(GetBitContext *gb, int8_t vbm_tree[25])
};
int cntr[8], n, res;
- memset(vbm_tree, 0xff, sizeof(vbm_tree));
+ memset(vbm_tree, 0xff, sizeof(vbm_tree[0]) * 25);
memset(cntr, 0, sizeof(cntr));
for (n = 0; n < 17; n++) {
res = get_bits(gb, 3);
diff --git a/libavcodec/wmv2dec.c b/libavcodec/wmv2dec.c
index 3214aae42b..21d1eeca3b 100644
--- a/libavcodec/wmv2dec.c
+++ b/libavcodec/wmv2dec.c
@@ -120,7 +120,7 @@ int ff_wmv2_decode_picture_header(MpegEncContext * s)
decode_ext_header(w);
s->pict_type = get_bits1(&s->gb) + 1;
- if(s->pict_type == FF_I_TYPE){
+ if(s->pict_type == AV_PICTURE_TYPE_I){
code = get_bits(&s->gb, 7);
av_log(s->avctx, AV_LOG_DEBUG, "I7:%X/\n", code);
}
@@ -135,7 +135,7 @@ int ff_wmv2_decode_secondary_picture_header(MpegEncContext * s)
{
Wmv2Context * const w= (Wmv2Context*)s;
- if (s->pict_type == FF_I_TYPE) {
+ if (s->pict_type == AV_PICTURE_TYPE_I) {
if(w->j_type_bit) w->j_type= get_bits1(&s->gb);
else w->j_type= 0; //FIXME check
@@ -342,7 +342,7 @@ int ff_wmv2_decode_mb(MpegEncContext *s, DCTELEM block[6][64])
if(w->j_type) return 0;
- if (s->pict_type == FF_P_TYPE) {
+ if (s->pict_type == AV_PICTURE_TYPE_P) {
if(IS_SKIP(s->current_picture.mb_type[s->mb_y * s->mb_stride + s->mb_x])){
/* skip mb */
s->mb_intra = 0;
@@ -419,7 +419,7 @@ int ff_wmv2_decode_mb(MpegEncContext *s, DCTELEM block[6][64])
}
}
} else {
-//if(s->pict_type==FF_P_TYPE)
+//if(s->pict_type==AV_PICTURE_TYPE_P)
// printf("%d%d ", s->inter_intra_pred, cbp);
//printf("I at %d %d %d %06X\n", s->mb_x, s->mb_y, ((cbp&3)? 1 : 0) +((cbp&0x3C)? 2 : 0), show_bits(&s->gb, 24));
s->ac_pred = get_bits1(&s->gb);
@@ -480,7 +480,6 @@ AVCodec ff_wmv2_decoder = {
wmv2_decode_end,
ff_h263_decode_frame,
CODEC_CAP_DRAW_HORIZ_BAND | CODEC_CAP_DR1,
- .max_lowres = 3,
.long_name = NULL_IF_CONFIG_SMALL("Windows Media Video 8"),
.pix_fmts= ff_pixfmt_list_420,
};
diff --git a/libavcodec/wmv2enc.c b/libavcodec/wmv2enc.c
index f2ab55b7fb..4a074e674c 100644
--- a/libavcodec/wmv2enc.c
+++ b/libavcodec/wmv2enc.c
@@ -72,7 +72,7 @@ int ff_wmv2_encode_picture_header(MpegEncContext * s, int picture_number)
Wmv2Context * const w= (Wmv2Context*)s;
put_bits(&s->pb, 1, s->pict_type - 1);
- if(s->pict_type == FF_I_TYPE){
+ if(s->pict_type == AV_PICTURE_TYPE_I){
put_bits(&s->pb, 7, 0);
}
put_bits(&s->pb, 5, s->qscale);
@@ -87,7 +87,7 @@ int ff_wmv2_encode_picture_header(MpegEncContext * s, int picture_number)
assert(s->flipflop_rounding);
- if (s->pict_type == FF_I_TYPE) {
+ if (s->pict_type == AV_PICTURE_TYPE_I) {
assert(s->no_rounding==1);
if(w->j_type_bit) put_bits(&s->pb, 1, w->j_type);
@@ -191,7 +191,7 @@ void ff_wmv2_encode_mb(MpegEncContext * s,
coded_cbp |= val << (5 - i);
}
- if (s->pict_type == FF_I_TYPE) {
+ if (s->pict_type == AV_PICTURE_TYPE_I) {
put_bits(&s->pb,
ff_msmp4_mb_i_table[coded_cbp][1], ff_msmp4_mb_i_table[coded_cbp][0]);
} else {
diff --git a/libavcodec/wnv1.c b/libavcodec/wnv1.c
index 15d90c1a89..197cf7985d 100644
--- a/libavcodec/wnv1.c
+++ b/libavcodec/wnv1.c
@@ -136,6 +136,7 @@ static av_cold int decode_init(AVCodecContext *avctx){
l->avctx = avctx;
avctx->pix_fmt = PIX_FMT_YUV422P;
+ avcodec_get_frame_defaults(&l->pic);
code_vlc.table = code_table;
code_vlc.table_allocated = 1 << CODE_VLC_BITS;
diff --git a/libavcodec/x86/Makefile b/libavcodec/x86/Makefile
index 6416e600a3..4775853697 100644
--- a/libavcodec/x86/Makefile
+++ b/libavcodec/x86/Makefile
@@ -9,6 +9,7 @@ YASM-OBJS-$(CONFIG_FFT) += x86/fft_mmx.o \
MMX-OBJS-$(CONFIG_H264DSP) += x86/h264dsp_mmx.o
YASM-OBJS-$(CONFIG_H264DSP) += x86/h264_deblock.o \
+ x86/h264_deblock_10bit.o \
x86/h264_weight.o \
x86/h264_idct.o \
diff --git a/libavcodec/x86/ac3dsp.asm b/libavcodec/x86/ac3dsp.asm
index 8b7e826a2d..b67f893f22 100644
--- a/libavcodec/x86/ac3dsp.asm
+++ b/libavcodec/x86/ac3dsp.asm
@@ -16,7 +16,7 @@
;*
;* You should have received a copy of the GNU Lesser General Public
;* License along with FFmpeg; if not, write to the Free Software
-;* 51, Inc., Foundation Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
;******************************************************************************
%include "x86inc.asm"
diff --git a/libavcodec/x86/deinterlace.asm b/libavcodec/x86/deinterlace.asm
index 76a096eff3..2997787beb 100644
--- a/libavcodec/x86/deinterlace.asm
+++ b/libavcodec/x86/deinterlace.asm
@@ -17,7 +17,7 @@
;*
;* You should have received a copy of the GNU Lesser General Public
;* License along with FFmpeg; if not, write to the Free Software
-;* 51, Inc., Foundation Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
;******************************************************************************
%include "x86inc.asm"
diff --git a/libavcodec/x86/dsputil_mmx.c b/libavcodec/x86/dsputil_mmx.c
index 985a15d2f1..d867dc3e6a 100644
--- a/libavcodec/x86/dsputil_mmx.c
+++ b/libavcodec/x86/dsputil_mmx.c
@@ -43,6 +43,7 @@ DECLARE_ALIGNED(16, const uint64_t, ff_pdw_80000000)[2] =
{0x8000000080000000ULL, 0x8000000080000000ULL};
DECLARE_ALIGNED(8, const uint64_t, ff_pw_1 ) = 0x0001000100010001ULL;
+DECLARE_ALIGNED(16, const xmm_reg, ff_pw_2 ) = {0x0002000200020002ULL, 0x0002000200020002ULL};
DECLARE_ALIGNED(16, const xmm_reg, ff_pw_3 ) = {0x0003000300030003ULL, 0x0003000300030003ULL};
DECLARE_ALIGNED(16, const xmm_reg, ff_pw_4 ) = {0x0004000400040004ULL, 0x0004000400040004ULL};
DECLARE_ALIGNED(16, const xmm_reg, ff_pw_5 ) = {0x0005000500050005ULL, 0x0005000500050005ULL};
@@ -2322,7 +2323,7 @@ float ff_scalarproduct_float_sse(const float *v1, const float *v2, int order);
void dsputil_init_mmx(DSPContext* c, AVCodecContext *avctx)
{
int mm_flags = av_get_cpu_flags();
- const int h264_high_depth = avctx->codec_id == CODEC_ID_H264 && avctx->bits_per_raw_sample > 8;
+ const int high_bit_depth = avctx->codec_id == CODEC_ID_H264 && avctx->bits_per_raw_sample > 8;
if (avctx->dsp_mask) {
if (avctx->dsp_mask & AV_CPU_FLAG_FORCE)
@@ -2404,7 +2405,7 @@ void dsputil_init_mmx(DSPContext* c, AVCodecContext *avctx)
c->put_pixels_clamped = ff_put_pixels_clamped_mmx;
c->put_signed_pixels_clamped = ff_put_signed_pixels_clamped_mmx;
c->add_pixels_clamped = ff_add_pixels_clamped_mmx;
- if (!h264_high_depth) {
+ if (!high_bit_depth) {
c->clear_block = clear_block_mmx;
c->clear_blocks = clear_blocks_mmx;
if ((mm_flags & AV_CPU_FLAG_SSE) &&
@@ -2421,7 +2422,7 @@ void dsputil_init_mmx(DSPContext* c, AVCodecContext *avctx)
c->PFX ## _pixels_tab[IDX][2] = PFX ## _pixels ## SIZE ## _y2_ ## CPU; \
c->PFX ## _pixels_tab[IDX][3] = PFX ## _pixels ## SIZE ## _xy2_ ## CPU
- if (!h264_high_depth) {
+ if (!high_bit_depth) {
SET_HPEL_FUNCS(put, 0, 16, mmx);
SET_HPEL_FUNCS(put_no_rnd, 0, 16, mmx);
SET_HPEL_FUNCS(avg, 0, 16, mmx);
@@ -2436,13 +2437,13 @@ void dsputil_init_mmx(DSPContext* c, AVCodecContext *avctx)
c->gmc= gmc_mmx;
#endif
#if ARCH_X86_32 && HAVE_YASM
- if (!h264_high_depth)
+ if (!high_bit_depth)
c->emulated_edge_mc = emulated_edge_mc_mmx;
#endif
c->add_bytes= add_bytes_mmx;
- if (!h264_high_depth)
+ if (!high_bit_depth)
c->draw_edges = draw_edges_mmx;
if (CONFIG_H263_DECODER || CONFIG_H263_ENCODER) {
@@ -2451,7 +2452,7 @@ void dsputil_init_mmx(DSPContext* c, AVCodecContext *avctx)
}
#if HAVE_YASM
- if (!h264_high_depth) {
+ if (!high_bit_depth) {
c->put_h264_chroma_pixels_tab[0]= ff_put_h264_chroma_mc8_mmx_rnd;
c->put_h264_chroma_pixels_tab[1]= ff_put_h264_chroma_mc4_mmx;
}
@@ -2463,7 +2464,7 @@ void dsputil_init_mmx(DSPContext* c, AVCodecContext *avctx)
if (mm_flags & AV_CPU_FLAG_MMX2) {
c->prefetch = prefetch_mmx2;
- if (!h264_high_depth) {
+ if (!high_bit_depth) {
c->put_pixels_tab[0][1] = put_pixels16_x2_mmx2;
c->put_pixels_tab[0][2] = put_pixels16_y2_mmx2;
@@ -2480,7 +2481,7 @@ void dsputil_init_mmx(DSPContext* c, AVCodecContext *avctx)
}
if(!(avctx->flags & CODEC_FLAG_BITEXACT)){
- if (!h264_high_depth) {
+ if (!high_bit_depth) {
c->put_no_rnd_pixels_tab[0][1] = put_no_rnd_pixels16_x2_mmx2;
c->put_no_rnd_pixels_tab[0][2] = put_no_rnd_pixels16_y2_mmx2;
c->put_no_rnd_pixels_tab[1][1] = put_no_rnd_pixels8_x2_mmx2;
@@ -2529,7 +2530,7 @@ void dsputil_init_mmx(DSPContext* c, AVCodecContext *avctx)
SET_QPEL_FUNCS(avg_qpel, 0, 16, mmx2);
SET_QPEL_FUNCS(avg_qpel, 1, 8, mmx2);
- if (!h264_high_depth) {
+ if (!high_bit_depth) {
SET_QPEL_FUNCS(put_h264_qpel, 0, 16, mmx2);
SET_QPEL_FUNCS(put_h264_qpel, 1, 8, mmx2);
SET_QPEL_FUNCS(put_h264_qpel, 2, 4, mmx2);
@@ -2547,7 +2548,7 @@ void dsputil_init_mmx(DSPContext* c, AVCodecContext *avctx)
c->avg_rv40_chroma_pixels_tab[0]= ff_avg_rv40_chroma_mc8_mmx2;
c->avg_rv40_chroma_pixels_tab[1]= ff_avg_rv40_chroma_mc4_mmx2;
- if (!h264_high_depth) {
+ if (!high_bit_depth) {
c->avg_h264_chroma_pixels_tab[0]= ff_avg_h264_chroma_mc8_mmx2_rnd;
c->avg_h264_chroma_pixels_tab[1]= ff_avg_h264_chroma_mc4_mmx2;
c->avg_h264_chroma_pixels_tab[2]= ff_avg_h264_chroma_mc2_mmx2;
@@ -2564,7 +2565,7 @@ void dsputil_init_mmx(DSPContext* c, AVCodecContext *avctx)
} else if (mm_flags & AV_CPU_FLAG_3DNOW) {
c->prefetch = prefetch_3dnow;
- if (!h264_high_depth) {
+ if (!high_bit_depth) {
c->put_pixels_tab[0][1] = put_pixels16_x2_3dnow;
c->put_pixels_tab[0][2] = put_pixels16_y2_3dnow;
@@ -2602,7 +2603,7 @@ void dsputil_init_mmx(DSPContext* c, AVCodecContext *avctx)
SET_QPEL_FUNCS(avg_qpel, 0, 16, 3dnow);
SET_QPEL_FUNCS(avg_qpel, 1, 8, 3dnow);
- if (!h264_high_depth) {
+ if (!high_bit_depth) {
SET_QPEL_FUNCS(put_h264_qpel, 0, 16, 3dnow);
SET_QPEL_FUNCS(put_h264_qpel, 1, 8, 3dnow);
SET_QPEL_FUNCS(put_h264_qpel, 2, 4, 3dnow);
@@ -2617,7 +2618,7 @@ void dsputil_init_mmx(DSPContext* c, AVCodecContext *avctx)
SET_QPEL_FUNCS(avg_2tap_qpel, 1, 8, 3dnow);
#if HAVE_YASM
- if (!h264_high_depth) {
+ if (!high_bit_depth) {
c->avg_h264_chroma_pixels_tab[0]= ff_avg_h264_chroma_mc8_3dnow_rnd;
c->avg_h264_chroma_pixels_tab[1]= ff_avg_h264_chroma_mc4_3dnow;
}
@@ -2635,7 +2636,7 @@ void dsputil_init_mmx(DSPContext* c, AVCodecContext *avctx)
c->avg_h264_qpel_pixels_tab[1][x+y*4] = avg_h264_qpel8_mc##x##y##_##CPU;
if((mm_flags & AV_CPU_FLAG_SSE2) && !(mm_flags & AV_CPU_FLAG_3DNOW)){
// these functions are slower than mmx on AMD, but faster on Intel
- if (!h264_high_depth) {
+ if (!high_bit_depth) {
c->put_pixels_tab[0][0] = put_pixels16_sse2;
c->put_no_rnd_pixels_tab[0][0] = put_pixels16_sse2;
c->avg_pixels_tab[0][0] = avg_pixels16_sse2;
@@ -2643,7 +2644,7 @@ void dsputil_init_mmx(DSPContext* c, AVCodecContext *avctx)
}
}
if(mm_flags & AV_CPU_FLAG_SSE2){
- if (!h264_high_depth) {
+ if (!high_bit_depth) {
H264_QPEL_FUNCS(0, 1, sse2);
H264_QPEL_FUNCS(0, 2, sse2);
H264_QPEL_FUNCS(0, 3, sse2);
@@ -2660,7 +2661,7 @@ void dsputil_init_mmx(DSPContext* c, AVCodecContext *avctx)
}
#if HAVE_SSSE3
if(mm_flags & AV_CPU_FLAG_SSSE3){
- if (!h264_high_depth) {
+ if (!high_bit_depth) {
H264_QPEL_FUNCS(1, 0, ssse3);
H264_QPEL_FUNCS(1, 1, ssse3);
H264_QPEL_FUNCS(1, 2, ssse3);
@@ -2675,7 +2676,7 @@ void dsputil_init_mmx(DSPContext* c, AVCodecContext *avctx)
H264_QPEL_FUNCS(3, 3, ssse3);
}
#if HAVE_YASM
- if (!h264_high_depth) {
+ if (!high_bit_depth) {
c->put_h264_chroma_pixels_tab[0]= ff_put_h264_chroma_mc8_ssse3_rnd;
c->avg_h264_chroma_pixels_tab[0]= ff_avg_h264_chroma_mc8_ssse3_rnd;
c->put_h264_chroma_pixels_tab[1]= ff_put_h264_chroma_mc4_ssse3;
@@ -2737,7 +2738,7 @@ void dsputil_init_mmx(DSPContext* c, AVCodecContext *avctx)
}
}
- if (!h264_high_depth)
+ if (!high_bit_depth)
c->emulated_edge_mc = emulated_edge_mc_sse;
c->gmc= gmc_sse;
#endif
diff --git a/libavcodec/x86/dsputil_yasm.asm b/libavcodec/x86/dsputil_yasm.asm
index 4658e5e3fa..a41640b0a2 100644
--- a/libavcodec/x86/dsputil_yasm.asm
+++ b/libavcodec/x86/dsputil_yasm.asm
@@ -16,7 +16,7 @@
;*
;* You should have received a copy of the GNU Lesser General Public
;* License along with FFmpeg; if not, write to the Free Software
-;* 51, Inc., Foundation Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
;******************************************************************************
%include "x86inc.asm"
diff --git a/libavcodec/x86/dsputilenc_yasm.asm b/libavcodec/x86/dsputilenc_yasm.asm
index b2844a5c72..a4f2d0cf51 100644
--- a/libavcodec/x86/dsputilenc_yasm.asm
+++ b/libavcodec/x86/dsputilenc_yasm.asm
@@ -18,7 +18,7 @@
;*
;* You should have received a copy of the GNU Lesser General Public
;* License along with FFmpeg; if not, write to the Free Software
-;* 51, Inc., Foundation Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
;*****************************************************************************
%include "x86inc.asm"
diff --git a/libavcodec/x86/fft_mmx.asm b/libavcodec/x86/fft_mmx.asm
index b923f1f9a3..02b638f144 100644
--- a/libavcodec/x86/fft_mmx.asm
+++ b/libavcodec/x86/fft_mmx.asm
@@ -20,7 +20,7 @@
;*
;* You should have received a copy of the GNU Lesser General Public
;* License along with FFmpeg; if not, write to the Free Software
-;* 51, Inc., Foundation Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
;******************************************************************************
; These functions are not individually interchangeable with the C versions.
diff --git a/libavcodec/x86/fmtconvert.asm b/libavcodec/x86/fmtconvert.asm
index 6c744fc581..dc038dde73 100644
--- a/libavcodec/x86/fmtconvert.asm
+++ b/libavcodec/x86/fmtconvert.asm
@@ -16,7 +16,7 @@
;*
;* You should have received a copy of the GNU Lesser General Public
;* License along with FFmpeg; if not, write to the Free Software
-;* 51, Inc., Foundation Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
;******************************************************************************
%include "x86inc.asm"
diff --git a/libavcodec/x86/h264_chromamc.asm b/libavcodec/x86/h264_chromamc.asm
index 3bb5ed4855..5dae1cca85 100644
--- a/libavcodec/x86/h264_chromamc.asm
+++ b/libavcodec/x86/h264_chromamc.asm
@@ -17,7 +17,7 @@
;*
;* You should have received a copy of the GNU Lesser General Public
;* License along with FFmpeg; if not, write to the Free Software
-;* 51, Inc., Foundation Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
;******************************************************************************
%include "x86inc.asm"
diff --git a/libavcodec/x86/h264_deblock.asm b/libavcodec/x86/h264_deblock.asm
index fb9cacfd11..2a6da0fe90 100644
--- a/libavcodec/x86/h264_deblock.asm
+++ b/libavcodec/x86/h264_deblock.asm
@@ -1,10 +1,11 @@
;*****************************************************************************
-;* MMX/SSE2-optimized H.264 deblocking code
+;* MMX/SSE2/AVX-optimized H.264 deblocking code
;*****************************************************************************
-;* Copyright (C) 2005-2008 x264 project
+;* Copyright (C) 2005-2011 x264 project
;*
;* Authors: Loren Merritt <lorenm@u.washington.edu>
;* Jason Garrett-Glaser <darkshikari@gmail.com>
+;* Oskar Arvidsson <oskar@irock.se>
;*
;* This file is part of FFmpeg.
;*
@@ -20,102 +21,100 @@
;*
;* You should have received a copy of the GNU Lesser General Public
;* License along with FFmpeg; if not, write to the Free Software
-;* 51, Inc., Foundation Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
;******************************************************************************
%include "x86inc.asm"
%include "x86util.asm"
-SECTION_RODATA
+SECTION .text
cextern pb_0
cextern pb_1
cextern pb_3
cextern pb_A1
-SECTION .text
-
; expands to [base],...,[base+7*stride]
%define PASS8ROWS(base, base3, stride, stride3) \
[base], [base+stride], [base+stride*2], [base3], \
[base3+stride], [base3+stride*2], [base3+stride3], [base3+stride*4]
-; in: 8 rows of 4 bytes in %1..%8
+%define PASS8ROWS(base, base3, stride, stride3, offset) \
+ PASS8ROWS(base+offset, base3+offset, stride, stride3)
+
+; in: 8 rows of 4 bytes in %4..%11
; out: 4 rows of 8 bytes in m0..m3
-%macro TRANSPOSE4x8_LOAD 8
- movd m0, %1
- movd m2, %2
- movd m1, %3
- movd m3, %4
- punpcklbw m0, m2
- punpcklbw m1, m3
- movq m2, m0
- punpcklwd m0, m1
- punpckhwd m2, m1
-
- movd m4, %5
- movd m6, %6
- movd m5, %7
- movd m7, %8
- punpcklbw m4, m6
- punpcklbw m5, m7
- movq m6, m4
- punpcklwd m4, m5
- punpckhwd m6, m5
-
- movq m1, m0
- movq m3, m2
- punpckldq m0, m4
- punpckhdq m1, m4
- punpckldq m2, m6
- punpckhdq m3, m6
+%macro TRANSPOSE4x8_LOAD 11
+ movh m0, %4
+ movh m2, %5
+ movh m1, %6
+ movh m3, %7
+ punpckl%1 m0, m2
+ punpckl%1 m1, m3
+ mova m2, m0
+ punpckl%2 m0, m1
+ punpckh%2 m2, m1
+
+ movh m4, %8
+ movh m6, %9
+ movh m5, %10
+ movh m7, %11
+ punpckl%1 m4, m6
+ punpckl%1 m5, m7
+ mova m6, m4
+ punpckl%2 m4, m5
+ punpckh%2 m6, m5
+
+ punpckh%3 m1, m0, m4
+ punpckh%3 m3, m2, m6
+ punpckl%3 m0, m4
+ punpckl%3 m2, m6
%endmacro
; in: 4 rows of 8 bytes in m0..m3
; out: 8 rows of 4 bytes in %1..%8
-%macro TRANSPOSE8x4_STORE 8
- movq m4, m0
- movq m5, m1
- movq m6, m2
- punpckhdq m4, m4
- punpckhdq m5, m5
- punpckhdq m6, m6
+%macro TRANSPOSE8x4B_STORE 8
+ punpckhdq m4, m0, m0
+ punpckhdq m5, m1, m1
+ punpckhdq m6, m2, m2
punpcklbw m0, m1
punpcklbw m2, m3
- movq m1, m0
- punpcklwd m0, m2
- punpckhwd m1, m2
- movd %1, m0
- punpckhdq m0, m0
- movd %2, m0
- movd %3, m1
+ punpcklwd m1, m0, m2
+ punpckhwd m0, m2
+ movh %1, m1
punpckhdq m1, m1
- movd %4, m1
+ movh %2, m1
+ movh %3, m0
+ punpckhdq m0, m0
+ movh %4, m0
punpckhdq m3, m3
punpcklbw m4, m5
punpcklbw m6, m3
- movq m5, m4
- punpcklwd m4, m6
- punpckhwd m5, m6
- movd %5, m4
- punpckhdq m4, m4
- movd %6, m4
- movd %7, m5
+ punpcklwd m5, m4, m6
+ punpckhwd m4, m6
+ movh %5, m5
punpckhdq m5, m5
- movd %8, m5
+ movh %6, m5
+ movh %7, m4
+ punpckhdq m4, m4
+ movh %8, m4
+%endmacro
+
+%macro TRANSPOSE4x8B_LOAD 8
+ TRANSPOSE4x8_LOAD bw, wd, dq, %1, %2, %3, %4, %5, %6, %7, %8
%endmacro
%macro SBUTTERFLY3 4
- movq %4, %2
+ punpckh%1 %4, %2, %3
punpckl%1 %2, %3
- punpckh%1 %4, %3
%endmacro
; in: 8 rows of 8 (only the middle 6 pels are used) in %1..%8
; out: 6 rows of 8 in [%9+0*16] .. [%9+5*16]
%macro TRANSPOSE6x8_MEM 9
+ RESET_MM_PERMUTATION
movq m0, %1
movq m1, %2
movq m2, %3
@@ -123,30 +122,32 @@ SECTION .text
movq m4, %5
movq m5, %6
movq m6, %7
- SBUTTERFLY3 bw, m0, m1, m7
- SBUTTERFLY3 bw, m2, m3, m1
- SBUTTERFLY3 bw, m4, m5, m3
- movq [%9+0x10], m1
- SBUTTERFLY3 bw, m6, %8, m5
- SBUTTERFLY3 wd, m0, m2, m1
- SBUTTERFLY3 wd, m4, m6, m2
+ SBUTTERFLY bw, 0, 1, 7
+ SBUTTERFLY bw, 2, 3, 7
+ SBUTTERFLY bw, 4, 5, 7
+ movq [%9+0x10], m3
+ SBUTTERFLY3 bw, m6, %8, m7
+ SBUTTERFLY wd, 0, 2, 3
+ SBUTTERFLY wd, 4, 6, 3
punpckhdq m0, m4
movq [%9+0x00], m0
- SBUTTERFLY3 wd, m7, [%9+0x10], m6
- SBUTTERFLY3 wd, m3, m5, m4
- SBUTTERFLY3 dq, m7, m3, m0
- SBUTTERFLY3 dq, m1, m2, m5
- punpckldq m6, m4
- movq [%9+0x10], m1
- movq [%9+0x20], m5
- movq [%9+0x30], m7
- movq [%9+0x40], m0
- movq [%9+0x50], m6
+ SBUTTERFLY3 wd, m1, [%9+0x10], m3
+ SBUTTERFLY wd, 5, 7, 0
+ SBUTTERFLY dq, 1, 5, 0
+ SBUTTERFLY dq, 2, 6, 0
+ punpckldq m3, m7
+ movq [%9+0x10], m2
+ movq [%9+0x20], m6
+ movq [%9+0x30], m1
+ movq [%9+0x40], m5
+ movq [%9+0x50], m3
+ RESET_MM_PERMUTATION
%endmacro
; in: 8 rows of 8 in %1..%8
; out: 8 rows of 8 in %9..%16
%macro TRANSPOSE8x8_MEM 16
+ RESET_MM_PERMUTATION
movq m0, %1
movq m1, %2
movq m2, %3
@@ -154,38 +155,44 @@ SECTION .text
movq m4, %5
movq m5, %6
movq m6, %7
- SBUTTERFLY3 bw, m0, m1, m7
- SBUTTERFLY3 bw, m2, m3, m1
- SBUTTERFLY3 bw, m4, m5, m3
- SBUTTERFLY3 bw, m6, %8, m5
- movq %9, m3
- SBUTTERFLY3 wd, m0, m2, m3
- SBUTTERFLY3 wd, m4, m6, m2
- SBUTTERFLY3 wd, m7, m1, m6
- movq %11, m2
- movq m2, %9
- SBUTTERFLY3 wd, m2, m5, m1
- SBUTTERFLY3 dq, m0, m4, m5
- SBUTTERFLY3 dq, m7, m2, m4
+ SBUTTERFLY bw, 0, 1, 7
+ SBUTTERFLY bw, 2, 3, 7
+ SBUTTERFLY bw, 4, 5, 7
+ SBUTTERFLY3 bw, m6, %8, m7
+ movq %9, m5
+ SBUTTERFLY wd, 0, 2, 5
+ SBUTTERFLY wd, 4, 6, 5
+ SBUTTERFLY wd, 1, 3, 5
+ movq %11, m6
+ movq m6, %9
+ SBUTTERFLY wd, 6, 7, 5
+ SBUTTERFLY dq, 0, 4, 5
+ SBUTTERFLY dq, 1, 6, 5
movq %9, m0
- movq %10, m5
- movq %13, m7
- movq %14, m4
- SBUTTERFLY3 dq, m3, %11, m0
- SBUTTERFLY3 dq, m6, m1, m5
- movq %11, m3
+ movq %10, m4
+ movq %13, m1
+ movq %14, m6
+ SBUTTERFLY3 dq, m2, %11, m0
+ SBUTTERFLY dq, 3, 7, 4
+ movq %11, m2
movq %12, m0
- movq %15, m6
- movq %16, m5
+ movq %15, m3
+ movq %16, m7
+ RESET_MM_PERMUTATION
%endmacro
; out: %4 = |%1-%2|>%3
; clobbers: %5
%macro DIFF_GT 5
+%if avx_enabled == 0
mova %5, %2
mova %4, %1
psubusb %5, %1
psubusb %4, %2
+%else
+ psubusb %5, %2, %1
+ psubusb %4, %1, %2
+%endif
por %4, %5
psubusb %4, %3
%endmacro
@@ -193,32 +200,28 @@ SECTION .text
; out: %4 = |%1-%2|>%3
; clobbers: %5
%macro DIFF_GT2 5
+%ifdef ARCH_X86_64
+ psubusb %5, %2, %1
+ psubusb %4, %1, %2
+%else
mova %5, %2
mova %4, %1
psubusb %5, %1
psubusb %4, %2
+%endif
psubusb %5, %3
psubusb %4, %3
pcmpeqb %4, %5
%endmacro
-%macro SPLATW 1
-%ifidn m0, xmm0
- pshuflw %1, %1, 0
- punpcklqdq %1, %1
-%else
- pshufw %1, %1, 0
-%endif
-%endmacro
-
; in: m0=p1 m1=p0 m2=q0 m3=q1 %1=alpha-1 %2=beta-1
; out: m5=beta-1, m7=mask, %3=alpha-1
; clobbers: m4,m6
%macro LOAD_MASK 2-3
movd m4, %1
movd m5, %2
- SPLATW m4
- SPLATW m5
+ SPLATW m4, m4
+ SPLATW m5, m5
packuswb m4, m4 ; 16x alpha-1
packuswb m5, m5 ; 16x beta-1
%if %0>2
@@ -237,8 +240,7 @@ SECTION .text
; out: m1=p0' m2=q0'
; clobbers: m0,3-6
%macro DEBLOCK_P0_Q0 0
- mova m5, m1
- pxor m5, m2 ; p0^q0
+ pxor m5, m1, m2 ; p0^q0
pand m5, [pb_1] ; (p0^q0)&1
pcmpeqb m4, m4
pxor m3, m4
@@ -264,14 +266,12 @@ SECTION .text
; out: [q1] = clip( (q2+((p0+q0+1)>>1))>>1, q1-tc0, q1+tc0 )
; clobbers: q2, tmp, tc0
%macro LUMA_Q1 6
- mova %6, m1
- pavgb %6, m2
+ pavgb %6, m1, m2
pavgb %2, %6 ; avg(p2,avg(p0,q0))
pxor %6, %3
pand %6, [pb_1] ; (p2^avg(p0,q0))&1
psubusb %2, %6 ; (p2+((p0+q0+1)>>1))>>1
- mova %6, %1
- psubusb %6, %5
+ psubusb %6, %1, %5
paddusb %5, %1
pmaxub %2, %6
pminub %2, %5
@@ -280,10 +280,10 @@ SECTION .text
%ifdef ARCH_X86_64
;-----------------------------------------------------------------------------
-; void x264_deblock_v_luma_sse2( uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0 )
+; void deblock_v_luma( uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0 )
;-----------------------------------------------------------------------------
-INIT_XMM
-cglobal x264_deblock_v_luma_sse2, 5,5,10
+%macro DEBLOCK_LUMA 1
+cglobal deblock_v_luma_8_%1, 5,5,10
movd m8, [r4] ; tc0
lea r4, [r1*3]
dec r2d ; alpha-1
@@ -307,8 +307,7 @@ cglobal x264_deblock_v_luma_sse2, 5,5,10
movdqa m3, [r4] ; p2
DIFF_GT2 m1, m3, m5, m6, m7 ; |p2-p0| > beta-1
pand m6, m9
- mova m7, m8
- psubb m7, m6
+ psubb m7, m8, m6
pand m6, m8
LUMA_Q1 m0, m3, [r4], [r4+r1], m6, m4
@@ -326,10 +325,10 @@ cglobal x264_deblock_v_luma_sse2, 5,5,10
RET
;-----------------------------------------------------------------------------
-; void x264_deblock_h_luma_sse2( uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0 )
+; void deblock_h_luma( uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0 )
;-----------------------------------------------------------------------------
INIT_MMX
-cglobal x264_deblock_h_luma_sse2, 5,7
+cglobal deblock_h_luma_8_%1, 5,7
movsxd r10, r1d
lea r11, [r10+r10*2]
lea r6, [r0-4]
@@ -350,13 +349,13 @@ cglobal x264_deblock_h_luma_sse2, 5,7
; vertical filter
; alpha, beta, tc0 are still in r2d, r3d, r4
- ; don't backup r6, r5, r10, r11 because x264_deblock_v_luma_sse2 doesn't use them
+ ; don't backup r6, r5, r10, r11 because deblock_v_luma_sse2 doesn't use them
lea r0, [pix_tmp+0x30]
mov r1d, 0x10
%ifdef WIN64
mov [rsp+0x20], r4
%endif
- call x264_deblock_v_luma_sse2
+ call deblock_v_luma_8_%1
; transpose 16x4 -> original space (only the middle 4 rows were changed by the filter)
add r6, 2
@@ -365,7 +364,7 @@ cglobal x264_deblock_h_luma_sse2, 5,7
movq m1, [pix_tmp+0x28]
movq m2, [pix_tmp+0x38]
movq m3, [pix_tmp+0x48]
- TRANSPOSE8x4_STORE PASS8ROWS(r6, r5, r10, r11)
+ TRANSPOSE8x4B_STORE PASS8ROWS(r6, r5, r10, r11)
shl r10, 3
sub r6, r10
@@ -375,7 +374,7 @@ cglobal x264_deblock_h_luma_sse2, 5,7
movq m1, [pix_tmp+0x20]
movq m2, [pix_tmp+0x30]
movq m3, [pix_tmp+0x40]
- TRANSPOSE8x4_STORE PASS8ROWS(r6, r5, r10, r11)
+ TRANSPOSE8x4B_STORE PASS8ROWS(r6, r5, r10, r11)
%ifdef WIN64
add rsp, 0x98
@@ -383,14 +382,20 @@ cglobal x264_deblock_h_luma_sse2, 5,7
add rsp, 0x68
%endif
RET
+%endmacro
+
+INIT_XMM
+DEBLOCK_LUMA sse2
+INIT_AVX
+DEBLOCK_LUMA avx
%else
%macro DEBLOCK_LUMA 3
;-----------------------------------------------------------------------------
-; void x264_deblock_v8_luma_mmxext( uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0 )
+; void deblock_v8_luma( uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0 )
;-----------------------------------------------------------------------------
-cglobal x264_deblock_%2_luma_%1, 5,5
+cglobal deblock_%2_luma_8_%1, 5,5
lea r4, [r1*3]
dec r2 ; alpha-1
neg r4
@@ -419,8 +424,7 @@ cglobal x264_deblock_%2_luma_%1, 5,5
DIFF_GT2 m1, m3, m5, m6, m7 ; |p2-p0| > beta-1
pand m6, m4
pand m4, [esp+%3] ; tc
- mova m7, m4
- psubb m7, m6
+ psubb m7, m4, m6
pand m6, m4
LUMA_Q1 m0, m3, [r4], [r4+r1], m6, m4
@@ -441,10 +445,10 @@ cglobal x264_deblock_%2_luma_%1, 5,5
RET
;-----------------------------------------------------------------------------
-; void x264_deblock_h_luma_mmxext( uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0 )
+; void deblock_h_luma( uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0 )
;-----------------------------------------------------------------------------
INIT_MMX
-cglobal x264_deblock_h_luma_%1, 0,5
+cglobal deblock_h_luma_8_%1, 0,5
mov r0, r0mp
mov r3, r1m
lea r4, [r3*3]
@@ -467,11 +471,11 @@ cglobal x264_deblock_h_luma_%1, 0,5
PUSH dword r2m
PUSH dword 16
PUSH dword r0
- call x264_deblock_%2_luma_%1
+ call deblock_%2_luma_8_%1
%ifidn %2, v8
add dword [esp ], 8 ; pix_tmp+0x38
add dword [esp+16], 2 ; tc0+2
- call x264_deblock_%2_luma_%1
+ call deblock_%2_luma_8_%1
%endif
ADD esp, 20
@@ -484,7 +488,7 @@ cglobal x264_deblock_h_luma_%1, 0,5
movq m1, [pix_tmp+0x20]
movq m2, [pix_tmp+0x30]
movq m3, [pix_tmp+0x40]
- TRANSPOSE8x4_STORE PASS8ROWS(r0, r1, r3, r4)
+ TRANSPOSE8x4B_STORE PASS8ROWS(r0, r1, r3, r4)
lea r0, [r0+r3*8]
lea r1, [r1+r3*8]
@@ -492,7 +496,7 @@ cglobal x264_deblock_h_luma_%1, 0,5
movq m1, [pix_tmp+0x28]
movq m2, [pix_tmp+0x38]
movq m3, [pix_tmp+0x48]
- TRANSPOSE8x4_STORE PASS8ROWS(r0, r1, r3, r4)
+ TRANSPOSE8x4B_STORE PASS8ROWS(r0, r1, r3, r4)
ADD esp, pad
RET
@@ -502,22 +506,34 @@ INIT_MMX
DEBLOCK_LUMA mmxext, v8, 8
INIT_XMM
DEBLOCK_LUMA sse2, v, 16
+INIT_AVX
+DEBLOCK_LUMA avx, v, 16
%endif ; ARCH
%macro LUMA_INTRA_P012 4 ; p0..p3 in memory
+%ifdef ARCH_X86_64
+ pavgb t0, p2, p1
+ pavgb t1, p0, q0
+%else
mova t0, p2
mova t1, p0
pavgb t0, p1
pavgb t1, q0
+%endif
pavgb t0, t1 ; ((p2+p1+1)/2 + (p0+q0+1)/2 + 1)/2
mova t5, t1
+%ifdef ARCH_X86_64
+ paddb t2, p2, p1
+ paddb t3, p0, q0
+%else
mova t2, p2
mova t3, p0
paddb t2, p1
paddb t3, q0
+%endif
paddb t2, t3
mova t3, t2
mova t4, t2
@@ -527,10 +543,15 @@ DEBLOCK_LUMA sse2, v, 16
pand t2, mpb_1
psubb t0, t2 ; p1' = (p2+p1+p0+q0+2)/4;
+%ifdef ARCH_X86_64
+ pavgb t1, p2, q1
+ psubb t2, p2, q1
+%else
mova t1, p2
mova t2, p2
pavgb t1, q1
psubb t2, q1
+%endif
paddb t3, t3
psubb t3, t2 ; p2+2*p1+2*p0+2*q0+q1
pand t2, mpb_1
@@ -543,10 +564,8 @@ DEBLOCK_LUMA sse2, v, 16
pand t3, mpb_1
psubb t1, t3 ; p0'a = (p2+2*p1+2*p0+2*q0+q1+4)/8
- mova t3, p0
- mova t2, p0
- pxor t3, q1
- pavgb t2, q1
+ pxor t3, p0, q1
+ pavgb t2, p0, q1
pand t3, mpb_1
psubb t2, t3
pavgb t2, p1 ; p0'b = (2*p1+p0+q0+2)/4
@@ -560,9 +579,8 @@ DEBLOCK_LUMA sse2, v, 16
mova %1, t1 ; store p0
mova t1, %4 ; p3
- mova t2, t1
+ paddb t2, t1, p2
pavgb t1, p2
- paddb t2, p2
pavgb t1, t0 ; (p3+p2+1)/2 + (p2+p1+p0+q0+2)/4
paddb t2, t2
paddb t2, t4 ; 2*p3+3*p2+p1+p0+q0
@@ -624,9 +642,9 @@ DEBLOCK_LUMA sse2, v, 16
%endif
;-----------------------------------------------------------------------------
-; void x264_deblock_v_luma_intra_sse2( uint8_t *pix, int stride, int alpha, int beta )
+; void deblock_v_luma_intra( uint8_t *pix, int stride, int alpha, int beta )
;-----------------------------------------------------------------------------
-cglobal x264_deblock_%2_luma_intra_%1, 4,6,16
+cglobal deblock_%2_luma_intra_8_%1, 4,6,16
%ifndef ARCH_X86_64
sub esp, 0x60
%endif
@@ -686,9 +704,9 @@ cglobal x264_deblock_%2_luma_intra_%1, 4,6,16
INIT_MMX
%ifdef ARCH_X86_64
;-----------------------------------------------------------------------------
-; void x264_deblock_h_luma_intra_sse2( uint8_t *pix, int stride, int alpha, int beta )
+; void deblock_h_luma_intra( uint8_t *pix, int stride, int alpha, int beta )
;-----------------------------------------------------------------------------
-cglobal x264_deblock_h_luma_intra_%1, 4,7
+cglobal deblock_h_luma_intra_8_%1, 4,7
movsxd r10, r1d
lea r11, [r10*3]
lea r6, [r0-4]
@@ -704,7 +722,7 @@ cglobal x264_deblock_h_luma_intra_%1, 4,7
lea r0, [pix_tmp+0x40]
mov r1, 0x10
- call x264_deblock_v_luma_intra_%1
+ call deblock_v_luma_intra_8_%1
; transpose 16x6 -> original space (but we can't write only 6 pixels, so really 16x8)
lea r5, [r6+r11]
@@ -717,7 +735,7 @@ cglobal x264_deblock_h_luma_intra_%1, 4,7
add rsp, 0x88
RET
%else
-cglobal x264_deblock_h_luma_intra_%1, 2,4
+cglobal deblock_h_luma_intra_8_%1, 2,4
lea r3, [r1*3]
sub r0, 4
lea r2, [r0+r3]
@@ -736,10 +754,10 @@ cglobal x264_deblock_h_luma_intra_%1, 2,4
PUSH dword r2m
PUSH dword 16
PUSH r0
- call x264_deblock_%2_luma_intra_%1
+ call deblock_%2_luma_intra_8_%1
%ifidn %2, v8
add dword [rsp], 8 ; pix_tmp+8
- call x264_deblock_%2_luma_intra_%1
+ call deblock_%2_luma_intra_8_%1
%endif
ADD esp, 16
@@ -760,13 +778,13 @@ cglobal x264_deblock_h_luma_intra_%1, 2,4
INIT_XMM
DEBLOCK_LUMA_INTRA sse2, v
+INIT_AVX
+DEBLOCK_LUMA_INTRA avx , v
%ifndef ARCH_X86_64
INIT_MMX
DEBLOCK_LUMA_INTRA mmxext, v8
%endif
-
-
INIT_MMX
%macro CHROMA_V_START 0
@@ -790,23 +808,23 @@ INIT_MMX
%define t6 r6
;-----------------------------------------------------------------------------
-; void x264_deblock_v_chroma( uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0 )
+; void ff_deblock_v_chroma( uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0 )
;-----------------------------------------------------------------------------
-cglobal x264_deblock_v_chroma_mmxext, 5,6
+cglobal deblock_v_chroma_8_mmxext, 5,6
CHROMA_V_START
movq m0, [t5]
movq m1, [t5+r1]
movq m2, [r0]
movq m3, [r0+r1]
- call x264_chroma_inter_body_mmxext
+ call ff_chroma_inter_body_mmxext
movq [t5+r1], m1
movq [r0], m2
RET
;-----------------------------------------------------------------------------
-; void x264_deblock_h_chroma( uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0 )
+; void ff_deblock_h_chroma( uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0 )
;-----------------------------------------------------------------------------
-cglobal x264_deblock_h_chroma_mmxext, 5,7
+cglobal deblock_h_chroma_8_mmxext, 5,7
%ifdef ARCH_X86_64
%define buf0 [rsp-24]
%define buf1 [rsp-16]
@@ -815,17 +833,17 @@ cglobal x264_deblock_h_chroma_mmxext, 5,7
%define buf1 r2m
%endif
CHROMA_H_START
- TRANSPOSE4x8_LOAD PASS8ROWS(t5, r0, r1, t6)
+ TRANSPOSE4x8_LOAD bw, wd, dq, PASS8ROWS(t5, r0, r1, t6)
movq buf0, m0
movq buf1, m3
- call x264_chroma_inter_body_mmxext
+ call ff_chroma_inter_body_mmxext
movq m0, buf0
movq m3, buf1
- TRANSPOSE8x4_STORE PASS8ROWS(t5, r0, r1, t6)
+ TRANSPOSE8x4B_STORE PASS8ROWS(t5, r0, r1, t6)
RET
ALIGN 16
-x264_chroma_inter_body_mmxext:
+ff_chroma_inter_body_mmxext:
LOAD_MASK r2d, r3d
movd m6, [r4] ; tc0
punpcklbw m6, m6
@@ -850,31 +868,31 @@ x264_chroma_inter_body_mmxext:
%define t6 r5
;-----------------------------------------------------------------------------
-; void x264_deblock_v_chroma_intra( uint8_t *pix, int stride, int alpha, int beta )
+; void ff_deblock_v_chroma_intra( uint8_t *pix, int stride, int alpha, int beta )
;-----------------------------------------------------------------------------
-cglobal x264_deblock_v_chroma_intra_mmxext, 4,5
+cglobal deblock_v_chroma_intra_8_mmxext, 4,5
CHROMA_V_START
movq m0, [t5]
movq m1, [t5+r1]
movq m2, [r0]
movq m3, [r0+r1]
- call x264_chroma_intra_body_mmxext
+ call ff_chroma_intra_body_mmxext
movq [t5+r1], m1
movq [r0], m2
RET
;-----------------------------------------------------------------------------
-; void x264_deblock_h_chroma_intra( uint8_t *pix, int stride, int alpha, int beta )
+; void ff_deblock_h_chroma_intra( uint8_t *pix, int stride, int alpha, int beta )
;-----------------------------------------------------------------------------
-cglobal x264_deblock_h_chroma_intra_mmxext, 4,6
+cglobal deblock_h_chroma_intra_8_mmxext, 4,6
CHROMA_H_START
- TRANSPOSE4x8_LOAD PASS8ROWS(t5, r0, r1, t6)
- call x264_chroma_intra_body_mmxext
- TRANSPOSE8x4_STORE PASS8ROWS(t5, r0, r1, t6)
+ TRANSPOSE4x8_LOAD bw, wd, dq, PASS8ROWS(t5, r0, r1, t6)
+ call ff_chroma_intra_body_mmxext
+ TRANSPOSE8x4B_STORE PASS8ROWS(t5, r0, r1, t6)
RET
ALIGN 16
-x264_chroma_intra_body_mmxext:
+ff_chroma_intra_body_mmxext:
LOAD_MASK r2d, r3d
movq m5, m1
movq m6, m2
diff --git a/libavcodec/x86/h264_deblock_10bit.asm b/libavcodec/x86/h264_deblock_10bit.asm
new file mode 100644
index 0000000000..f5a13f1250
--- /dev/null
+++ b/libavcodec/x86/h264_deblock_10bit.asm
@@ -0,0 +1,910 @@
+;*****************************************************************************
+;* MMX/SSE2/AVX-optimized 10-bit H.264 deblocking code
+;*****************************************************************************
+;* Copyright (C) 2005-2011 x264 project
+;*
+;* Authors: Oskar Arvidsson <oskar@irock.se>
+;* Loren Merritt <lorenm@u.washington.edu>
+;* Jason Garrett-Glaser <darkshikari@gmail.com>
+;*
+;* This file is part of Libav.
+;*
+;* Libav is free software; you can redistribute it and/or
+;* modify it under the terms of the GNU Lesser General Public
+;* License as published by the Free Software Foundation; either
+;* version 2.1 of the License, or (at your option) any later version.
+;*
+;* Libav is distributed in the hope that it will be useful,
+;* but WITHOUT ANY WARRANTY; without even the implied warranty of
+;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+;* Lesser General Public License for more details.
+;*
+;* You should have received a copy of the GNU Lesser General Public
+;* License along with Libav; if not, write to the Free Software
+;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+;******************************************************************************
+
+%include "x86inc.asm"
+%include "x86util.asm"
+
+SECTION_RODATA
+
+pw_pixel_max: times 8 dw ((1 << 10)-1)
+
+SECTION .text
+
+cextern pw_2
+cextern pw_3
+cextern pw_4
+
+; out: %4 = |%1-%2|-%3
+; clobbers: %5
+%macro ABS_SUB 5
+ psubusw %5, %2, %1
+ psubusw %4, %1, %2
+ por %4, %5
+ psubw %4, %3
+%endmacro
+
+; out: %4 = |%1-%2|<%3
+%macro DIFF_LT 5
+ psubusw %4, %2, %1
+ psubusw %5, %1, %2
+ por %5, %4 ; |%1-%2|
+ pxor %4, %4
+ psubw %5, %3 ; |%1-%2|-%3
+ pcmpgtw %4, %5 ; 0 > |%1-%2|-%3
+%endmacro
+
+%macro LOAD_AB 4
+ movd %1, %3
+ movd %2, %4
+ SPLATW %1, %1
+ SPLATW %2, %2
+%endmacro
+
+; in: %2=tc reg
+; out: %1=splatted tc
+%macro LOAD_TC 2
+ movd %1, [%2]
+ punpcklbw %1, %1
+%if mmsize == 8
+ pshufw %1, %1, 0
+%else
+ pshuflw %1, %1, 01010000b
+ pshufd %1, %1, 01010000b
+%endif
+ psraw %1, 6
+%endmacro
+
+; in: %1=p1, %2=p0, %3=q0, %4=q1
+; %5=alpha, %6=beta, %7-%9=tmp
+; out: %7=mask
+%macro LOAD_MASK 9
+ ABS_SUB %2, %3, %5, %8, %7 ; |p0-q0| - alpha
+ ABS_SUB %1, %2, %6, %9, %7 ; |p1-p0| - beta
+ pand %8, %9
+ ABS_SUB %3, %4, %6, %9, %7 ; |q1-q0| - beta
+ pxor %7, %7
+ pand %8, %9
+ pcmpgtw %7, %8
+%endmacro
+
+; in: %1=p0, %2=q0, %3=p1, %4=q1, %5=mask, %6=tmp, %7=tmp
+; out: %1=p0', m2=q0'
+%macro DEBLOCK_P0_Q0 7
+ psubw %3, %4
+ pxor %7, %7
+ paddw %3, [pw_4]
+ psubw %7, %5
+ psubw %6, %2, %1
+ psllw %6, 2
+ paddw %3, %6
+ psraw %3, 3
+ mova %6, [pw_pixel_max]
+ CLIPW %3, %7, %5
+ pxor %7, %7
+ paddw %1, %3
+ psubw %2, %3
+ CLIPW %1, %7, %6
+ CLIPW %2, %7, %6
+%endmacro
+
+; in: %1=x2, %2=x1, %3=p0, %4=q0 %5=mask&tc, %6=tmp
+%macro LUMA_Q1 6
+ pavgw %6, %3, %4 ; (p0+q0+1)>>1
+ paddw %1, %6
+ pxor %6, %6
+ psraw %1, 1
+ psubw %6, %5
+ psubw %1, %2
+ CLIPW %1, %6, %5
+ paddw %1, %2
+%endmacro
+
+%macro LUMA_DEBLOCK_ONE 3
+ DIFF_LT m5, %1, bm, m4, m6
+ pxor m6, m6
+ mova %3, m4
+ pcmpgtw m6, tcm
+ pand m4, tcm
+ pandn m6, m7
+ pand m4, m6
+ LUMA_Q1 m5, %2, m1, m2, m4, m6
+%endmacro
+
+%macro LUMA_H_STORE 2
+%if mmsize == 8
+ movq [r0-4], m0
+ movq [r0+r1-4], m1
+ movq [r0+r1*2-4], m2
+ movq [r0+%2-4], m3
+%else
+ movq [r0-4], m0
+ movhps [r0+r1-4], m0
+ movq [r0+r1*2-4], m1
+ movhps [%1-4], m1
+ movq [%1+r1-4], m2
+ movhps [%1+r1*2-4], m2
+ movq [%1+%2-4], m3
+ movhps [%1+r1*4-4], m3
+%endif
+%endmacro
+
+%macro DEBLOCK_LUMA 1
+;-----------------------------------------------------------------------------
+; void deblock_v_luma( uint16_t *pix, int stride, int alpha, int beta, int8_t *tc0 )
+;-----------------------------------------------------------------------------
+cglobal deblock_v_luma_10_%1, 5,5,8*(mmsize/16)
+ %assign pad 5*mmsize+12-(stack_offset&15)
+ %define tcm [rsp]
+ %define ms1 [rsp+mmsize]
+ %define ms2 [rsp+mmsize*2]
+ %define am [rsp+mmsize*3]
+ %define bm [rsp+mmsize*4]
+ SUB rsp, pad
+ shl r2d, 2
+ shl r3d, 2
+ LOAD_AB m4, m5, r2, r3
+ mov r3, 32/mmsize
+ mov r2, r0
+ sub r0, r1
+ mova am, m4
+ sub r0, r1
+ mova bm, m5
+ sub r0, r1
+.loop:
+ mova m0, [r0+r1]
+ mova m1, [r0+r1*2]
+ mova m2, [r2]
+ mova m3, [r2+r1]
+
+ LOAD_MASK m0, m1, m2, m3, am, bm, m7, m4, m6
+ LOAD_TC m6, r4
+ mova tcm, m6
+
+ mova m5, [r0]
+ LUMA_DEBLOCK_ONE m1, m0, ms1
+ mova [r0+r1], m5
+
+ mova m5, [r2+r1*2]
+ LUMA_DEBLOCK_ONE m2, m3, ms2
+ mova [r2+r1], m5
+
+ pxor m5, m5
+ mova m6, tcm
+ pcmpgtw m5, tcm
+ psubw m6, ms1
+ pandn m5, m7
+ psubw m6, ms2
+ pand m5, m6
+ DEBLOCK_P0_Q0 m1, m2, m0, m3, m5, m7, m6
+ mova [r0+r1*2], m1
+ mova [r2], m2
+
+ add r0, mmsize
+ add r2, mmsize
+ add r4, mmsize/8
+ dec r3
+ jg .loop
+ ADD rsp, pad
+ RET
+
+cglobal deblock_h_luma_10_%1, 5,6,8*(mmsize/16)
+ %assign pad 7*mmsize+12-(stack_offset&15)
+ %define tcm [rsp]
+ %define ms1 [rsp+mmsize]
+ %define ms2 [rsp+mmsize*2]
+ %define p1m [rsp+mmsize*3]
+ %define p2m [rsp+mmsize*4]
+ %define am [rsp+mmsize*5]
+ %define bm [rsp+mmsize*6]
+ SUB rsp, pad
+ shl r2d, 2
+ shl r3d, 2
+ LOAD_AB m4, m5, r2, r3
+ mov r3, r1
+ mova am, m4
+ add r3, r1
+ mov r5, 32/mmsize
+ mova bm, m5
+ add r3, r1
+%if mmsize == 16
+ mov r2, r0
+ add r2, r3
+%endif
+.loop:
+%if mmsize == 8
+ movq m2, [r0-8] ; y q2 q1 q0
+ movq m7, [r0+0]
+ movq m5, [r0+r1-8]
+ movq m3, [r0+r1+0]
+ movq m0, [r0+r1*2-8]
+ movq m6, [r0+r1*2+0]
+ movq m1, [r0+r3-8]
+ TRANSPOSE4x4W 2, 5, 0, 1, 4
+ SWAP 2, 7
+ movq m7, [r0+r3]
+ TRANSPOSE4x4W 2, 3, 6, 7, 4
+%else
+ movu m5, [r0-8] ; y q2 q1 q0 p0 p1 p2 x
+ movu m0, [r0+r1-8]
+ movu m2, [r0+r1*2-8]
+ movu m3, [r2-8]
+ TRANSPOSE4x4W 5, 0, 2, 3, 6
+ mova tcm, m3
+
+ movu m4, [r2+r1-8]
+ movu m1, [r2+r1*2-8]
+ movu m3, [r2+r3-8]
+ movu m7, [r2+r1*4-8]
+ TRANSPOSE4x4W 4, 1, 3, 7, 6
+
+ mova m6, tcm
+ punpcklqdq m6, m7
+ punpckhqdq m5, m4
+ SBUTTERFLY qdq, 0, 1, 7
+ SBUTTERFLY qdq, 2, 3, 7
+%endif
+
+ mova p2m, m6
+ LOAD_MASK m0, m1, m2, m3, am, bm, m7, m4, m6
+ LOAD_TC m6, r4
+ mova tcm, m6
+
+ LUMA_DEBLOCK_ONE m1, m0, ms1
+ mova p1m, m5
+
+ mova m5, p2m
+ LUMA_DEBLOCK_ONE m2, m3, ms2
+ mova p2m, m5
+
+ pxor m5, m5
+ mova m6, tcm
+ pcmpgtw m5, tcm
+ psubw m6, ms1
+ pandn m5, m7
+ psubw m6, ms2
+ pand m5, m6
+ DEBLOCK_P0_Q0 m1, m2, m0, m3, m5, m7, m6
+ mova m0, p1m
+ mova m3, p2m
+ TRANSPOSE4x4W 0, 1, 2, 3, 4
+ LUMA_H_STORE r2, r3
+
+ add r4, mmsize/8
+ lea r0, [r0+r1*(mmsize/2)]
+ lea r2, [r2+r1*(mmsize/2)]
+ dec r5
+ jg .loop
+ ADD rsp, pad
+ RET
+%endmacro
+
+INIT_XMM
+%ifdef ARCH_X86_64
+; in: m0=p1, m1=p0, m2=q0, m3=q1, m8=p2, m9=q2
+; m12=alpha, m13=beta
+; out: m0=p1', m3=q1', m1=p0', m2=q0'
+; clobbers: m4, m5, m6, m7, m10, m11, m14
+%macro DEBLOCK_LUMA_INTER_SSE2 0
+ LOAD_MASK m0, m1, m2, m3, m12, m13, m7, m4, m6
+ LOAD_TC m6, r4
+ DIFF_LT m8, m1, m13, m10, m4
+ DIFF_LT m9, m2, m13, m11, m4
+ pand m6, m7
+
+ mova m14, m6
+ pxor m4, m4
+ pcmpgtw m6, m4
+ pand m6, m14
+
+ mova m5, m10
+ pand m5, m6
+ LUMA_Q1 m8, m0, m1, m2, m5, m4
+
+ mova m5, m11
+ pand m5, m6
+ LUMA_Q1 m9, m3, m1, m2, m5, m4
+
+ pxor m4, m4
+ psubw m6, m10
+ pcmpgtw m4, m14
+ pandn m4, m7
+ psubw m6, m11
+ pand m4, m6
+ DEBLOCK_P0_Q0 m1, m2, m0, m3, m4, m5, m6
+
+ SWAP 0, 8
+ SWAP 3, 9
+%endmacro
+
+%macro DEBLOCK_LUMA_64 1
+cglobal deblock_v_luma_10_%1, 5,5,15
+ %define p2 m8
+ %define p1 m0
+ %define p0 m1
+ %define q0 m2
+ %define q1 m3
+ %define q2 m9
+ %define mask0 m7
+ %define mask1 m10
+ %define mask2 m11
+ shl r2d, 2
+ shl r3d, 2
+ LOAD_AB m12, m13, r2, r3
+ mov r2, r0
+ sub r0, r1
+ sub r0, r1
+ sub r0, r1
+ mov r3, 2
+.loop:
+ mova p2, [r0]
+ mova p1, [r0+r1]
+ mova p0, [r0+r1*2]
+ mova q0, [r2]
+ mova q1, [r2+r1]
+ mova q2, [r2+r1*2]
+ DEBLOCK_LUMA_INTER_SSE2
+ mova [r0+r1], p1
+ mova [r0+r1*2], p0
+ mova [r2], q0
+ mova [r2+r1], q1
+ add r0, mmsize
+ add r2, mmsize
+ add r4, 2
+ dec r3
+ jg .loop
+ REP_RET
+
+cglobal deblock_h_luma_10_%1, 5,7,15
+ shl r2d, 2
+ shl r3d, 2
+ LOAD_AB m12, m13, r2, r3
+ mov r2, r1
+ add r2, r1
+ add r2, r1
+ mov r5, r0
+ add r5, r2
+ mov r6, 2
+.loop:
+ movu m8, [r0-8] ; y q2 q1 q0 p0 p1 p2 x
+ movu m0, [r0+r1-8]
+ movu m2, [r0+r1*2-8]
+ movu m9, [r5-8]
+ movu m5, [r5+r1-8]
+ movu m1, [r5+r1*2-8]
+ movu m3, [r5+r2-8]
+ movu m7, [r5+r1*4-8]
+
+ TRANSPOSE4x4W 8, 0, 2, 9, 10
+ TRANSPOSE4x4W 5, 1, 3, 7, 10
+
+ punpckhqdq m8, m5
+ SBUTTERFLY qdq, 0, 1, 10
+ SBUTTERFLY qdq, 2, 3, 10
+ punpcklqdq m9, m7
+
+ DEBLOCK_LUMA_INTER_SSE2
+
+ TRANSPOSE4x4W 0, 1, 2, 3, 4
+ LUMA_H_STORE r5, r2
+ add r4, 2
+ lea r0, [r0+r1*8]
+ lea r5, [r5+r1*8]
+ dec r6
+ jg .loop
+ REP_RET
+%endmacro
+
+INIT_XMM
+DEBLOCK_LUMA_64 sse2
+INIT_AVX
+DEBLOCK_LUMA_64 avx
+%endif
+
+%macro SWAPMOVA 2
+%ifid %1
+ SWAP %1, %2
+%else
+ mova %1, %2
+%endif
+%endmacro
+
+; in: t0-t2: tmp registers
+; %1=p0 %2=p1 %3=p2 %4=p3 %5=q0 %6=q1 %7=mask0
+; %8=mask1p %9=2 %10=p0' %11=p1' %12=p2'
+%macro LUMA_INTRA_P012 12 ; p0..p3 in memory
+%ifdef ARCH_X86_64
+ paddw t0, %3, %2
+ mova t2, %4
+ paddw t2, %3
+%else
+ mova t0, %3
+ mova t2, %4
+ paddw t0, %2
+ paddw t2, %3
+%endif
+ paddw t0, %1
+ paddw t2, t2
+ paddw t0, %5
+ paddw t2, %9
+ paddw t0, %9 ; (p2 + p1 + p0 + q0 + 2)
+ paddw t2, t0 ; (2*p3 + 3*p2 + p1 + p0 + q0 + 4)
+
+ psrlw t2, 3
+ psrlw t1, t0, 2
+ psubw t2, %3
+ psubw t1, %2
+ pand t2, %8
+ pand t1, %8
+ paddw t2, %3
+ paddw t1, %2
+ SWAPMOVA %11, t1
+
+ psubw t1, t0, %3
+ paddw t0, t0
+ psubw t1, %5
+ psubw t0, %3
+ paddw t1, %6
+ paddw t1, %2
+ paddw t0, %6
+ psrlw t1, 2 ; (2*p1 + p0 + q1 + 2)/4
+ psrlw t0, 3 ; (p2 + 2*p1 + 2*p0 + 2*q0 + q1 + 4)>>3
+
+ pxor t0, t1
+ pxor t1, %1
+ pand t0, %8
+ pand t1, %7
+ pxor t0, t1
+ pxor t0, %1
+ SWAPMOVA %10, t0
+ SWAPMOVA %12, t2
+%endmacro
+
+%macro LUMA_INTRA_INIT 1
+ %xdefine pad %1*mmsize+((gprsize*3) % mmsize)-(stack_offset&15)
+ %define t0 m4
+ %define t1 m5
+ %define t2 m6
+ %define t3 m7
+ %assign i 4
+%rep %1
+ CAT_XDEFINE t, i, [rsp+mmsize*(i-4)]
+ %assign i i+1
+%endrep
+ SUB rsp, pad
+%endmacro
+
+; in: %1-%3=tmp, %4=p2, %5=q2
+%macro LUMA_INTRA_INTER 5
+ LOAD_AB t0, t1, r2d, r3d
+ mova %1, t0
+ LOAD_MASK m0, m1, m2, m3, %1, t1, t0, t2, t3
+%ifdef ARCH_X86_64
+ mova %2, t0 ; mask0
+ psrlw t3, %1, 2
+%else
+ mova t3, %1
+ mova %2, t0 ; mask0
+ psrlw t3, 2
+%endif
+ paddw t3, [pw_2] ; alpha/4+2
+ DIFF_LT m1, m2, t3, t2, t0 ; t2 = |p0-q0| < alpha/4+2
+ pand t2, %2
+ mova t3, %5 ; q2
+ mova %1, t2 ; mask1
+ DIFF_LT t3, m2, t1, t2, t0 ; t2 = |q2-q0| < beta
+ pand t2, %1
+ mova t3, %4 ; p2
+ mova %3, t2 ; mask1q
+ DIFF_LT t3, m1, t1, t2, t0 ; t2 = |p2-p0| < beta
+ pand t2, %1
+ mova %1, t2 ; mask1p
+%endmacro
+
+%macro LUMA_H_INTRA_LOAD 0
+%if mmsize == 8
+ movu t0, [r0-8]
+ movu t1, [r0+r1-8]
+ movu m0, [r0+r1*2-8]
+ movu m1, [r0+r4-8]
+ TRANSPOSE4x4W 4, 5, 0, 1, 2
+ mova t4, t0 ; p3
+ mova t5, t1 ; p2
+
+ movu m2, [r0]
+ movu m3, [r0+r1]
+ movu t0, [r0+r1*2]
+ movu t1, [r0+r4]
+ TRANSPOSE4x4W 2, 3, 4, 5, 6
+ mova t6, t0 ; q2
+ mova t7, t1 ; q3
+%else
+ movu t0, [r0-8]
+ movu t1, [r0+r1-8]
+ movu m0, [r0+r1*2-8]
+ movu m1, [r0+r5-8]
+ movu m2, [r4-8]
+ movu m3, [r4+r1-8]
+ movu t2, [r4+r1*2-8]
+ movu t3, [r4+r5-8]
+ TRANSPOSE8x8W 4, 5, 0, 1, 2, 3, 6, 7, t4, t5
+ mova t4, t0 ; p3
+ mova t5, t1 ; p2
+ mova t6, t2 ; q2
+ mova t7, t3 ; q3
+%endif
+%endmacro
+
+; in: %1=q3 %2=q2' %3=q1' %4=q0' %5=p0' %6=p1' %7=p2' %8=p3 %9=tmp
+%macro LUMA_H_INTRA_STORE 9
+%if mmsize == 8
+ TRANSPOSE4x4W %1, %2, %3, %4, %9
+ movq [r0-8], m%1
+ movq [r0+r1-8], m%2
+ movq [r0+r1*2-8], m%3
+ movq [r0+r4-8], m%4
+ movq m%1, %8
+ TRANSPOSE4x4W %5, %6, %7, %1, %9
+ movq [r0], m%5
+ movq [r0+r1], m%6
+ movq [r0+r1*2], m%7
+ movq [r0+r4], m%1
+%else
+ TRANSPOSE2x4x4W %1, %2, %3, %4, %9
+ movq [r0-8], m%1
+ movq [r0+r1-8], m%2
+ movq [r0+r1*2-8], m%3
+ movq [r0+r5-8], m%4
+ movhps [r4-8], m%1
+ movhps [r4+r1-8], m%2
+ movhps [r4+r1*2-8], m%3
+ movhps [r4+r5-8], m%4
+%ifnum %8
+ SWAP %1, %8
+%else
+ mova m%1, %8
+%endif
+ TRANSPOSE2x4x4W %5, %6, %7, %1, %9
+ movq [r0], m%5
+ movq [r0+r1], m%6
+ movq [r0+r1*2], m%7
+ movq [r0+r5], m%1
+ movhps [r4], m%5
+ movhps [r4+r1], m%6
+ movhps [r4+r1*2], m%7
+ movhps [r4+r5], m%1
+%endif
+%endmacro
+
+%ifdef ARCH_X86_64
+;-----------------------------------------------------------------------------
+; void deblock_v_luma_intra( uint16_t *pix, int stride, int alpha, int beta )
+;-----------------------------------------------------------------------------
+%macro DEBLOCK_LUMA_INTRA_64 1
+cglobal deblock_v_luma_intra_10_%1, 4,7,16
+ %define t0 m1
+ %define t1 m2
+ %define t2 m4
+ %define p2 m8
+ %define p1 m9
+ %define p0 m10
+ %define q0 m11
+ %define q1 m12
+ %define q2 m13
+ %define aa m5
+ %define bb m14
+ lea r4, [r1*4]
+ lea r5, [r1*3] ; 3*stride
+ neg r4
+ add r4, r0 ; pix-4*stride
+ mov r6, 2
+ mova m0, [pw_2]
+ shl r2d, 2
+ shl r3d, 2
+ LOAD_AB aa, bb, r2d, r3d
+.loop
+ mova p2, [r4+r1]
+ mova p1, [r4+2*r1]
+ mova p0, [r4+r5]
+ mova q0, [r0]
+ mova q1, [r0+r1]
+ mova q2, [r0+2*r1]
+
+ LOAD_MASK p1, p0, q0, q1, aa, bb, m3, t0, t1
+ mova t2, aa
+ psrlw t2, 2
+ paddw t2, m0 ; alpha/4+2
+ DIFF_LT p0, q0, t2, m6, t0 ; m6 = |p0-q0| < alpha/4+2
+ DIFF_LT p2, p0, bb, t1, t0 ; m7 = |p2-p0| < beta
+ DIFF_LT q2, q0, bb, m7, t0 ; t1 = |q2-q0| < beta
+ pand m6, m3
+ pand m7, m6
+ pand m6, t1
+ LUMA_INTRA_P012 p0, p1, p2, [r4], q0, q1, m3, m6, m0, [r4+r5], [r4+2*r1], [r4+r1]
+ LUMA_INTRA_P012 q0, q1, q2, [r0+r5], p0, p1, m3, m7, m0, [r0], [r0+r1], [r0+2*r1]
+ add r0, mmsize
+ add r4, mmsize
+ dec r6
+ jg .loop
+ REP_RET
+
+;-----------------------------------------------------------------------------
+; void deblock_h_luma_intra( uint16_t *pix, int stride, int alpha, int beta )
+;-----------------------------------------------------------------------------
+cglobal deblock_h_luma_intra_10_%1, 4,7,16
+ %define t0 m15
+ %define t1 m14
+ %define t2 m2
+ %define q3 m5
+ %define q2 m8
+ %define q1 m9
+ %define q0 m10
+ %define p0 m11
+ %define p1 m12
+ %define p2 m13
+ %define p3 m4
+ %define spill [rsp]
+ %assign pad 24-(stack_offset&15)
+ SUB rsp, pad
+ lea r4, [r1*4]
+ lea r5, [r1*3] ; 3*stride
+ add r4, r0 ; pix+4*stride
+ mov r6, 2
+ mova m0, [pw_2]
+ shl r2d, 2
+ shl r3d, 2
+.loop
+ movu q3, [r0-8]
+ movu q2, [r0+r1-8]
+ movu q1, [r0+r1*2-8]
+ movu q0, [r0+r5-8]
+ movu p0, [r4-8]
+ movu p1, [r4+r1-8]
+ movu p2, [r4+r1*2-8]
+ movu p3, [r4+r5-8]
+ TRANSPOSE8x8W 5, 8, 9, 10, 11, 12, 13, 4, 1
+
+ LOAD_AB m1, m2, r2d, r3d
+ LOAD_MASK q1, q0, p0, p1, m1, m2, m3, t0, t1
+ psrlw m1, 2
+ paddw m1, m0 ; alpha/4+2
+ DIFF_LT p0, q0, m1, m6, t0 ; m6 = |p0-q0| < alpha/4+2
+ DIFF_LT q2, q0, m2, t1, t0 ; t1 = |q2-q0| < beta
+ DIFF_LT p0, p2, m2, m7, t0 ; m7 = |p2-p0| < beta
+ pand m6, m3
+ pand m7, m6
+ pand m6, t1
+
+ mova spill, q3
+ LUMA_INTRA_P012 q0, q1, q2, q3, p0, p1, m3, m6, m0, m5, m1, q2
+ LUMA_INTRA_P012 p0, p1, p2, p3, q0, q1, m3, m7, m0, p0, m6, p2
+ mova m7, spill
+
+ LUMA_H_INTRA_STORE 7, 8, 1, 5, 11, 6, 13, 4, 14
+
+ lea r0, [r0+r1*8]
+ lea r4, [r4+r1*8]
+ dec r6
+ jg .loop
+ ADD rsp, pad
+ RET
+%endmacro
+
+INIT_XMM
+DEBLOCK_LUMA_INTRA_64 sse2
+INIT_AVX
+DEBLOCK_LUMA_INTRA_64 avx
+
+%endif
+
+%macro DEBLOCK_LUMA_INTRA 1
+;-----------------------------------------------------------------------------
+; void deblock_v_luma_intra( uint16_t *pix, int stride, int alpha, int beta )
+;-----------------------------------------------------------------------------
+cglobal deblock_v_luma_intra_10_%1, 4,7,8*(mmsize/16)
+ LUMA_INTRA_INIT 3
+ lea r4, [r1*4]
+ lea r5, [r1*3]
+ neg r4
+ add r4, r0
+ mov r6, 32/mmsize
+ shl r2d, 2
+ shl r3d, 2
+.loop:
+ mova m0, [r4+r1*2] ; p1
+ mova m1, [r4+r5] ; p0
+ mova m2, [r0] ; q0
+ mova m3, [r0+r1] ; q1
+ LUMA_INTRA_INTER t4, t5, t6, [r4+r1], [r0+r1*2]
+ LUMA_INTRA_P012 m1, m0, t3, [r4], m2, m3, t5, t4, [pw_2], [r4+r5], [r4+2*r1], [r4+r1]
+ mova t3, [r0+r1*2] ; q2
+ LUMA_INTRA_P012 m2, m3, t3, [r0+r5], m1, m0, t5, t6, [pw_2], [r0], [r0+r1], [r0+2*r1]
+ add r0, mmsize
+ add r4, mmsize
+ dec r6
+ jg .loop
+ ADD rsp, pad
+ RET
+
+;-----------------------------------------------------------------------------
+; void deblock_h_luma_intra( uint16_t *pix, int stride, int alpha, int beta )
+;-----------------------------------------------------------------------------
+cglobal deblock_h_luma_intra_10_%1, 4,7,8*(mmsize/16)
+ LUMA_INTRA_INIT 8
+%if mmsize == 8
+ lea r4, [r1*3]
+ mov r5, 32/mmsize
+%else
+ lea r4, [r1*4]
+ lea r5, [r1*3] ; 3*stride
+ add r4, r0 ; pix+4*stride
+ mov r6, 32/mmsize
+%endif
+ shl r2d, 2
+ shl r3d, 2
+.loop:
+ LUMA_H_INTRA_LOAD
+ LUMA_INTRA_INTER t8, t9, t10, t5, t6
+
+ LUMA_INTRA_P012 m1, m0, t3, t4, m2, m3, t9, t8, [pw_2], t8, t5, t11
+ mova t3, t6 ; q2
+ LUMA_INTRA_P012 m2, m3, t3, t7, m1, m0, t9, t10, [pw_2], m4, t6, m5
+
+ mova m2, t4
+ mova m0, t11
+ mova m1, t5
+ mova m3, t8
+ mova m6, t6
+
+ LUMA_H_INTRA_STORE 2, 0, 1, 3, 4, 6, 5, t7, 7
+
+ lea r0, [r0+r1*(mmsize/2)]
+%if mmsize == 8
+ dec r5
+%else
+ lea r4, [r4+r1*(mmsize/2)]
+ dec r6
+%endif
+ jg .loop
+ ADD rsp, pad
+ RET
+%endmacro
+
+%ifndef ARCH_X86_64
+INIT_MMX
+DEBLOCK_LUMA mmxext
+DEBLOCK_LUMA_INTRA mmxext
+INIT_XMM
+DEBLOCK_LUMA sse2
+DEBLOCK_LUMA_INTRA sse2
+INIT_AVX
+DEBLOCK_LUMA avx
+DEBLOCK_LUMA_INTRA avx
+%endif
+
+; in: %1=p0, %2=q0, %3=p1, %4=q1, %5=mask, %6=tmp, %7=tmp
+; out: %1=p0', %2=q0'
+%macro CHROMA_DEBLOCK_P0_Q0_INTRA 7
+ mova %6, [pw_2]
+ paddw %6, %3
+ paddw %6, %4
+ paddw %7, %6, %2
+ paddw %6, %1
+ paddw %6, %3
+ paddw %7, %4
+ psraw %6, 2
+ psraw %7, 2
+ psubw %6, %1
+ psubw %7, %2
+ pand %6, %5
+ pand %7, %5
+ paddw %1, %6
+ paddw %2, %7
+%endmacro
+
+%macro CHROMA_V_LOAD 1
+ mova m0, [r0] ; p1
+ mova m1, [r0+r1] ; p0
+ mova m2, [%1] ; q0
+ mova m3, [%1+r1] ; q1
+%endmacro
+
+%macro CHROMA_V_STORE 0
+ mova [r0+1*r1], m1
+ mova [r0+2*r1], m2
+%endmacro
+
+%macro DEBLOCK_CHROMA 1
+;-----------------------------------------------------------------------------
+; void deblock_v_chroma( uint16_t *pix, int stride, int alpha, int beta, int8_t *tc0 )
+;-----------------------------------------------------------------------------
+cglobal deblock_v_chroma_10_%1, 5,7-(mmsize/16),8*(mmsize/16)
+ mov r5, r0
+ sub r0, r1
+ sub r0, r1
+ shl r2d, 2
+ shl r3d, 2
+%if mmsize < 16
+ mov r6, 16/mmsize
+.loop:
+%endif
+ CHROMA_V_LOAD r5
+ LOAD_AB m4, m5, r2, r3
+ LOAD_MASK m0, m1, m2, m3, m4, m5, m7, m6, m4
+ pxor m4, m4
+ LOAD_TC m6, r4
+ psubw m6, [pw_3]
+ pmaxsw m6, m4
+ pand m7, m6
+ DEBLOCK_P0_Q0 m1, m2, m0, m3, m7, m5, m6
+ CHROMA_V_STORE
+%if mmsize < 16
+ add r0, mmsize
+ add r5, mmsize
+ add r4, mmsize/8
+ dec r6
+ jg .loop
+ REP_RET
+%else
+ RET
+%endif
+
+;-----------------------------------------------------------------------------
+; void deblock_v_chroma_intra( uint16_t *pix, int stride, int alpha, int beta )
+;-----------------------------------------------------------------------------
+cglobal deblock_v_chroma_intra_10_%1, 4,6-(mmsize/16),8*(mmsize/16)
+ mov r4, r0
+ sub r0, r1
+ sub r0, r1
+ shl r2d, 2
+ shl r3d, 2
+%if mmsize < 16
+ mov r5, 16/mmsize
+.loop:
+%endif
+ CHROMA_V_LOAD r4
+ LOAD_AB m4, m5, r2, r3
+ LOAD_MASK m0, m1, m2, m3, m4, m5, m7, m6, m4
+ CHROMA_DEBLOCK_P0_Q0_INTRA m1, m2, m0, m3, m7, m5, m6
+ CHROMA_V_STORE
+%if mmsize < 16
+ add r0, mmsize
+ add r4, mmsize
+ dec r5
+ jg .loop
+ REP_RET
+%else
+ RET
+%endif
+%endmacro
+
+%ifndef ARCH_X86_64
+INIT_MMX
+DEBLOCK_CHROMA mmxext
+%endif
+INIT_XMM
+DEBLOCK_CHROMA sse2
+INIT_AVX
+DEBLOCK_CHROMA avx
diff --git a/libavcodec/x86/h264_idct.asm b/libavcodec/x86/h264_idct.asm
index fdb35003a8..e90b0b1186 100644
--- a/libavcodec/x86/h264_idct.asm
+++ b/libavcodec/x86/h264_idct.asm
@@ -23,7 +23,7 @@
;*
;* You should have received a copy of the GNU Lesser General Public
;* License along with FFmpeg; if not, write to the Free Software
-;* 51, Inc., Foundation Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
;*****************************************************************************
%include "x86inc.asm"
diff --git a/libavcodec/x86/h264_intrapred.asm b/libavcodec/x86/h264_intrapred.asm
index 28c2f399c3..191eb8ddf2 100644
--- a/libavcodec/x86/h264_intrapred.asm
+++ b/libavcodec/x86/h264_intrapred.asm
@@ -19,7 +19,7 @@
;*
;* You should have received a copy of the GNU Lesser General Public
;* License along with FFmpeg; if not, write to the Free Software
-;* 51, Inc., Foundation Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
;******************************************************************************
%include "x86inc.asm"
diff --git a/libavcodec/x86/h264_weight.asm b/libavcodec/x86/h264_weight.asm
index 53aa210473..87544ae288 100644
--- a/libavcodec/x86/h264_weight.asm
+++ b/libavcodec/x86/h264_weight.asm
@@ -18,7 +18,7 @@
;*
;* You should have received a copy of the GNU Lesser General Public
;* License along with FFmpeg; if not, write to the Free Software
-;* 51, Inc., Foundation Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
;******************************************************************************
%include "x86inc.asm"
diff --git a/libavcodec/x86/h264dsp_mmx.c b/libavcodec/x86/h264dsp_mmx.c
index 7657a85890..b331f94b5e 100644
--- a/libavcodec/x86/h264dsp_mmx.c
+++ b/libavcodec/x86/h264dsp_mmx.c
@@ -218,41 +218,57 @@ static void h264_loop_filter_strength_mmx2( int16_t bS[2][4][4], uint8_t nnz[40]
);
}
-#define LF_FUNC(DIR, TYPE, OPT) \
-void ff_x264_deblock_ ## DIR ## _ ## TYPE ## _ ## OPT (uint8_t *pix, int stride, \
- int alpha, int beta, int8_t *tc0);
-#define LF_IFUNC(DIR, TYPE, OPT) \
-void ff_x264_deblock_ ## DIR ## _ ## TYPE ## _ ## OPT (uint8_t *pix, int stride, \
- int alpha, int beta);
-
-LF_FUNC (h, chroma, mmxext)
-LF_IFUNC(h, chroma_intra, mmxext)
-LF_FUNC (v, chroma, mmxext)
-LF_IFUNC(v, chroma_intra, mmxext)
-
-LF_FUNC (h, luma, mmxext)
-LF_IFUNC(h, luma_intra, mmxext)
-#if HAVE_YASM && ARCH_X86_32
-LF_FUNC (v8, luma, mmxext)
-static void ff_x264_deblock_v_luma_mmxext(uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0)
+#define LF_FUNC(DIR, TYPE, DEPTH, OPT) \
+void ff_deblock_ ## DIR ## _ ## TYPE ## _ ## DEPTH ## _ ## OPT (uint8_t *pix, int stride, \
+ int alpha, int beta, int8_t *tc0);
+#define LF_IFUNC(DIR, TYPE, DEPTH, OPT) \
+void ff_deblock_ ## DIR ## _ ## TYPE ## _ ## DEPTH ## _ ## OPT (uint8_t *pix, int stride, \
+ int alpha, int beta);
+
+#define LF_FUNCS(type, depth)\
+LF_FUNC (h, chroma, depth, mmxext)\
+LF_IFUNC(h, chroma_intra, depth, mmxext)\
+LF_FUNC (v, chroma, depth, mmxext)\
+LF_IFUNC(v, chroma_intra, depth, mmxext)\
+LF_FUNC (h, luma, depth, mmxext)\
+LF_IFUNC(h, luma_intra, depth, mmxext)\
+LF_FUNC (h, luma, depth, sse2)\
+LF_IFUNC(h, luma_intra, depth, sse2)\
+LF_FUNC (v, luma, depth, sse2)\
+LF_IFUNC(v, luma_intra, depth, sse2)\
+LF_FUNC (h, chroma, depth, sse2)\
+LF_IFUNC(h, chroma_intra, depth, sse2)\
+LF_FUNC (v, chroma, depth, sse2)\
+LF_IFUNC(v, chroma_intra, depth, sse2)\
+LF_FUNC (h, luma, depth, avx)\
+LF_IFUNC(h, luma_intra, depth, avx)\
+LF_FUNC (v, luma, depth, avx)\
+LF_IFUNC(v, luma_intra, depth, avx)\
+LF_FUNC (h, chroma, depth, avx)\
+LF_IFUNC(h, chroma_intra, depth, avx)\
+LF_FUNC (v, chroma, depth, avx)\
+LF_IFUNC(v, chroma_intra, depth, avx)
+
+LF_FUNCS( uint8_t, 8)
+LF_FUNCS(uint16_t, 10)
+
+LF_FUNC (v8, luma, 8, mmxext)
+static void ff_deblock_v_luma_8_mmxext(uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0)
{
if((tc0[0] & tc0[1]) >= 0)
- ff_x264_deblock_v8_luma_mmxext(pix+0, stride, alpha, beta, tc0);
+ ff_deblock_v8_luma_8_mmxext(pix+0, stride, alpha, beta, tc0);
if((tc0[2] & tc0[3]) >= 0)
- ff_x264_deblock_v8_luma_mmxext(pix+8, stride, alpha, beta, tc0+2);
+ ff_deblock_v8_luma_8_mmxext(pix+8, stride, alpha, beta, tc0+2);
}
-LF_IFUNC(v8, luma_intra, mmxext)
-static void ff_x264_deblock_v_luma_intra_mmxext(uint8_t *pix, int stride, int alpha, int beta)
+LF_IFUNC(v8, luma_intra, 8, mmxext)
+static void ff_deblock_v_luma_intra_8_mmxext(uint8_t *pix, int stride, int alpha, int beta)
{
- ff_x264_deblock_v8_luma_intra_mmxext(pix+0, stride, alpha, beta);
- ff_x264_deblock_v8_luma_intra_mmxext(pix+8, stride, alpha, beta);
+ ff_deblock_v8_luma_intra_8_mmxext(pix+0, stride, alpha, beta);
+ ff_deblock_v8_luma_intra_8_mmxext(pix+8, stride, alpha, beta);
}
-#endif
-LF_FUNC (h, luma, sse2)
-LF_IFUNC(h, luma_intra, sse2)
-LF_FUNC (v, luma, sse2)
-LF_IFUNC(v, luma_intra, sse2)
+LF_FUNC (v, luma, 10, mmxext)
+LF_IFUNC(v, luma_intra, 10, mmxext)
/***********************************/
/* weighted prediction */
@@ -314,15 +330,15 @@ void ff_h264dsp_init_x86(H264DSPContext *c, const int bit_depth)
c->h264_idct_add8 = ff_h264_idct_add8_mmx2;
c->h264_idct_add16intra= ff_h264_idct_add16intra_mmx2;
- c->h264_v_loop_filter_chroma= ff_x264_deblock_v_chroma_mmxext;
- c->h264_h_loop_filter_chroma= ff_x264_deblock_h_chroma_mmxext;
- c->h264_v_loop_filter_chroma_intra= ff_x264_deblock_v_chroma_intra_mmxext;
- c->h264_h_loop_filter_chroma_intra= ff_x264_deblock_h_chroma_intra_mmxext;
+ c->h264_v_loop_filter_chroma= ff_deblock_v_chroma_8_mmxext;
+ c->h264_h_loop_filter_chroma= ff_deblock_h_chroma_8_mmxext;
+ c->h264_v_loop_filter_chroma_intra= ff_deblock_v_chroma_intra_8_mmxext;
+ c->h264_h_loop_filter_chroma_intra= ff_deblock_h_chroma_intra_8_mmxext;
#if ARCH_X86_32
- c->h264_v_loop_filter_luma= ff_x264_deblock_v_luma_mmxext;
- c->h264_h_loop_filter_luma= ff_x264_deblock_h_luma_mmxext;
- c->h264_v_loop_filter_luma_intra = ff_x264_deblock_v_luma_intra_mmxext;
- c->h264_h_loop_filter_luma_intra = ff_x264_deblock_h_luma_intra_mmxext;
+ c->h264_v_loop_filter_luma= ff_deblock_v_luma_8_mmxext;
+ c->h264_h_loop_filter_luma= ff_deblock_h_luma_8_mmxext;
+ c->h264_v_loop_filter_luma_intra = ff_deblock_v_luma_intra_8_mmxext;
+ c->h264_h_loop_filter_luma_intra = ff_deblock_h_luma_intra_8_mmxext;
#endif
c->weight_h264_pixels_tab[0]= ff_h264_weight_16x16_mmx2;
c->weight_h264_pixels_tab[1]= ff_h264_weight_16x8_mmx2;
@@ -360,10 +376,10 @@ void ff_h264dsp_init_x86(H264DSPContext *c, const int bit_depth)
c->biweight_h264_pixels_tab[4]= ff_h264_biweight_8x4_sse2;
#if HAVE_ALIGNED_STACK
- c->h264_v_loop_filter_luma = ff_x264_deblock_v_luma_sse2;
- c->h264_h_loop_filter_luma = ff_x264_deblock_h_luma_sse2;
- c->h264_v_loop_filter_luma_intra = ff_x264_deblock_v_luma_intra_sse2;
- c->h264_h_loop_filter_luma_intra = ff_x264_deblock_h_luma_intra_sse2;
+ c->h264_v_loop_filter_luma = ff_deblock_v_luma_8_sse2;
+ c->h264_h_loop_filter_luma = ff_deblock_h_luma_8_sse2;
+ c->h264_v_loop_filter_luma_intra = ff_deblock_v_luma_intra_8_sse2;
+ c->h264_h_loop_filter_luma_intra = ff_deblock_h_luma_intra_8_sse2;
#endif
c->h264_idct_add16 = ff_h264_idct_add16_sse2;
@@ -377,6 +393,49 @@ void ff_h264dsp_init_x86(H264DSPContext *c, const int bit_depth)
c->biweight_h264_pixels_tab[3]= ff_h264_biweight_8x8_ssse3;
c->biweight_h264_pixels_tab[4]= ff_h264_biweight_8x4_ssse3;
}
+ if (mm_flags&AV_CPU_FLAG_AVX) {
+#if HAVE_ALIGNED_STACK
+ c->h264_v_loop_filter_luma = ff_deblock_v_luma_8_avx;
+ c->h264_h_loop_filter_luma = ff_deblock_h_luma_8_avx;
+ c->h264_v_loop_filter_luma_intra = ff_deblock_v_luma_intra_8_avx;
+ c->h264_h_loop_filter_luma_intra = ff_deblock_h_luma_intra_8_avx;
+#endif
+ }
+ }
+ }
+#endif
+ } else if (bit_depth == 10) {
+#if HAVE_YASM
+ if (mm_flags & AV_CPU_FLAG_MMX) {
+ if (mm_flags & AV_CPU_FLAG_MMX2) {
+#if ARCH_X86_32
+ c->h264_v_loop_filter_chroma= ff_deblock_v_chroma_10_mmxext;
+ c->h264_v_loop_filter_chroma_intra= ff_deblock_v_chroma_intra_10_mmxext;
+ c->h264_v_loop_filter_luma= ff_deblock_v_luma_10_mmxext;
+ c->h264_h_loop_filter_luma= ff_deblock_h_luma_10_mmxext;
+ c->h264_v_loop_filter_luma_intra = ff_deblock_v_luma_intra_10_mmxext;
+ c->h264_h_loop_filter_luma_intra = ff_deblock_h_luma_intra_10_mmxext;
+#endif
+ if (mm_flags&AV_CPU_FLAG_SSE2) {
+ c->h264_v_loop_filter_chroma= ff_deblock_v_chroma_10_sse2;
+ c->h264_v_loop_filter_chroma_intra= ff_deblock_v_chroma_intra_10_sse2;
+#if HAVE_ALIGNED_STACK
+ c->h264_v_loop_filter_luma = ff_deblock_v_luma_10_sse2;
+ c->h264_h_loop_filter_luma = ff_deblock_h_luma_10_sse2;
+ c->h264_v_loop_filter_luma_intra = ff_deblock_v_luma_intra_10_sse2;
+ c->h264_h_loop_filter_luma_intra = ff_deblock_h_luma_intra_10_sse2;
+#endif
+ }
+ if (mm_flags&AV_CPU_FLAG_AVX) {
+ c->h264_v_loop_filter_chroma= ff_deblock_v_chroma_10_avx;
+ c->h264_v_loop_filter_chroma_intra= ff_deblock_v_chroma_intra_10_avx;
+#if HAVE_ALIGNED_STACK
+ c->h264_v_loop_filter_luma = ff_deblock_v_luma_10_avx;
+ c->h264_h_loop_filter_luma = ff_deblock_h_luma_10_avx;
+ c->h264_v_loop_filter_luma_intra = ff_deblock_v_luma_intra_10_avx;
+ c->h264_h_loop_filter_luma_intra = ff_deblock_h_luma_intra_10_avx;
+#endif
+ }
}
}
#endif
diff --git a/libavcodec/x86/vc1dsp_yasm.asm b/libavcodec/x86/vc1dsp_yasm.asm
index 3ea9d8db47..a353c5f9ed 100644
--- a/libavcodec/x86/vc1dsp_yasm.asm
+++ b/libavcodec/x86/vc1dsp_yasm.asm
@@ -16,7 +16,7 @@
;*
;* You should have received a copy of the GNU Lesser General Public
;* License along with FFmpeg; if not, write to the Free Software
-;* 51, Inc., Foundation Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
;******************************************************************************
%include "x86inc.asm"
diff --git a/libavcodec/x86/vp3dsp.asm b/libavcodec/x86/vp3dsp.asm
index f2b0af3266..328f94ca60 100644
--- a/libavcodec/x86/vp3dsp.asm
+++ b/libavcodec/x86/vp3dsp.asm
@@ -16,7 +16,7 @@
;*
;* You should have received a copy of the GNU Lesser General Public
;* License along with FFmpeg; if not, write to the Free Software
-;* 51, Inc., Foundation Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
;******************************************************************************
%include "x86inc.asm"
diff --git a/libavcodec/x86/vp56dsp.asm b/libavcodec/x86/vp56dsp.asm
index 0543ba00ce..6da3ce30e3 100644
--- a/libavcodec/x86/vp56dsp.asm
+++ b/libavcodec/x86/vp56dsp.asm
@@ -17,7 +17,7 @@
;*
;* You should have received a copy of the GNU Lesser General Public
;* License along with FFmpeg; if not, write to the Free Software
-;* 51, Inc., Foundation Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
;******************************************************************************
%include "x86inc.asm"
diff --git a/libavcodec/x86/vp8dsp.asm b/libavcodec/x86/vp8dsp.asm
index bc5ccc8e3a..9b175c1488 100644
--- a/libavcodec/x86/vp8dsp.asm
+++ b/libavcodec/x86/vp8dsp.asm
@@ -17,7 +17,7 @@
;*
;* You should have received a copy of the GNU Lesser General Public
;* License along with FFmpeg; if not, write to the Free Software
-;* 51, Inc., Foundation Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
;******************************************************************************
%include "x86inc.asm"
diff --git a/libavcodec/x86/x86util.asm b/libavcodec/x86/x86util.asm
index b28a6198f7..7e5b67419a 100644
--- a/libavcodec/x86/x86util.asm
+++ b/libavcodec/x86/x86util.asm
@@ -20,20 +20,24 @@
;*
;* You should have received a copy of the GNU Lesser General Public
;* License along with FFmpeg; if not, write to the Free Software
-;* 51, Inc., Foundation Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
;******************************************************************************
%macro SBUTTERFLY 4
+%if avx_enabled == 0
mova m%4, m%2
punpckl%1 m%2, m%3
punpckh%1 m%4, m%3
+%else
+ punpckh%1 m%4, m%2, m%3
+ punpckl%1 m%2, m%3
+%endif
SWAP %3, %4
%endmacro
%macro SBUTTERFLY2 4
- mova m%4, m%2
- punpckh%1 m%2, m%3
- punpckl%1 m%4, m%3
+ punpckl%1 m%4, m%2, m%3
+ punpckh%1 m%2, m%2, m%3
SWAP %2, %4, %3
%endmacro
@@ -444,3 +448,17 @@
%macro PMINUB_MMXEXT 3 ; dst, src, ignored
pminub %1, %2
%endmacro
+
+%macro SPLATW 2-3 0
+%if mmsize == 16
+ pshuflw %1, %2, (%3)*0x55
+ punpcklqdq %1, %1
+%else
+ pshufw %1, %2, (%3)*0x55
+%endif
+%endmacro
+
+%macro CLIPW 3 ;(dst, min, max)
+ pmaxsw %1, %2
+ pminsw %1, %3
+%endmacro
diff --git a/libavcodec/xan.c b/libavcodec/xan.c
index 520331634c..f5d1812aec 100644
--- a/libavcodec/xan.c
+++ b/libavcodec/xan.c
@@ -91,6 +91,8 @@ static av_cold int xan_decode_init(AVCodecContext *avctx)
av_freep(&s->buffer1);
return AVERROR(ENOMEM);
}
+ avcodec_get_frame_defaults(&s->last_frame);
+ avcodec_get_frame_defaults(&s->current_frame);
return 0;
}
diff --git a/libavcodec/xl.c b/libavcodec/xl.c
index 7fbe626d58..7f3b0775c0 100644
--- a/libavcodec/xl.c
+++ b/libavcodec/xl.c
@@ -60,7 +60,7 @@ static int decode_frame(AVCodecContext *avctx,
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return -1;
}
- p->pict_type= FF_I_TYPE;
+ p->pict_type= AV_PICTURE_TYPE_I;
p->key_frame= 1;
Y = a->pic.data[0];
@@ -121,8 +121,9 @@ static int decode_frame(AVCodecContext *avctx,
}
static av_cold int decode_init(AVCodecContext *avctx){
-// VideoXLContext * const a = avctx->priv_data;
+ VideoXLContext * const a = avctx->priv_data;
+ avcodec_get_frame_defaults(&a->pic);
avctx->pix_fmt= PIX_FMT_YUV411P;
return 0;
diff --git a/libavcodec/xsubdec.c b/libavcodec/xsubdec.c
index d7babe4cd2..9289a2554e 100644
--- a/libavcodec/xsubdec.c
+++ b/libavcodec/xsubdec.c
@@ -51,7 +51,7 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *data_size,
AVSubtitle *sub = data;
const uint8_t *buf_end = buf + buf_size;
uint8_t *bitmap;
- int w, h, x, y, rlelen, i;
+ int w, h, x, y, i;
int64_t packet_time = 0;
GetBitContext gb;
int has_alpha = avctx->codec_tag == MKTAG('D','X','S','A');
@@ -83,7 +83,11 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *data_size,
// skip bottom right position, it gives no new information
bytestream_get_le16(&buf);
bytestream_get_le16(&buf);
- rlelen = bytestream_get_le16(&buf);
+ // The following value is supposed to indicate the start offset
+ // (relative to the palette) of the data for the second field,
+ // however there are files where it has a bogus value and thus
+ // we just ignore it
+ bytestream_get_le16(&buf);
// allocate sub and set values
sub->rects = av_mallocz(sizeof(*sub->rects));
@@ -105,8 +109,7 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *data_size,
((uint32_t*)sub->rects[0]->pict.data[1])[i] |= (has_alpha ? *buf++ : (i ? 0xff : 0)) << 24;
// process RLE-compressed data
- rlelen = FFMIN(rlelen, buf_end - buf);
- init_get_bits(&gb, buf, rlelen * 8);
+ init_get_bits(&gb, buf, (buf_end - buf) * 8);
bitmap = sub->rects[0]->pict.data[0];
for (y = 0; y < h; y++) {
// interlaced: do odd lines
diff --git a/libavcodec/xsubenc.c b/libavcodec/xsubenc.c
index c448a2271c..a7e3a891d4 100644
--- a/libavcodec/xsubenc.c
+++ b/libavcodec/xsubenc.c
@@ -129,7 +129,7 @@ static int xsub_encode(AVCodecContext *avctx, unsigned char *buf,
}
// TODO: support multiple rects
- if (h->num_rects > 1)
+ if (h->num_rects != 1)
av_log(avctx, AV_LOG_WARNING, "Only single rects supported (%d in subtitle.)\n", h->num_rects);
// TODO: render text-based subtitles into bitmaps
diff --git a/libavcodec/yop.c b/libavcodec/yop.c
index a117e2d791..45a3344b9e 100644
--- a/libavcodec/yop.c
+++ b/libavcodec/yop.c
@@ -92,6 +92,7 @@ static av_cold int yop_decode_init(AVCodecContext *avctx)
avctx->pix_fmt = PIX_FMT_PAL8;
+ avcodec_get_frame_defaults(&s->frame);
s->num_pal_colors = avctx->extradata[0];
s->first_color[0] = avctx->extradata[1];
s->first_color[1] = avctx->extradata[2];
diff --git a/libavcodec/zmbv.c b/libavcodec/zmbv.c
index f660cd4693..4bd159cc44 100644
--- a/libavcodec/zmbv.c
+++ b/libavcodec/zmbv.c
@@ -500,11 +500,11 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPac
}
if(c->flags & ZMBV_KEYFRAME) {
c->pic.key_frame = 1;
- c->pic.pict_type = FF_I_TYPE;
+ c->pic.pict_type = AV_PICTURE_TYPE_I;
c->decode_intra(c);
} else {
c->pic.key_frame = 0;
- c->pic.pict_type = FF_P_TYPE;
+ c->pic.pict_type = AV_PICTURE_TYPE_P;
if(c->decomp_len)
c->decode_xor(c);
}
@@ -599,6 +599,7 @@ static av_cold int decode_init(AVCodecContext *avctx)
c->width = avctx->width;
c->height = avctx->height;
+ avcodec_get_frame_defaults(&c->pic);
c->bpp = avctx->bits_per_coded_sample;
diff --git a/libavcodec/zmbvenc.c b/libavcodec/zmbvenc.c
index b830bb4a8e..55aa7b936b 100644
--- a/libavcodec/zmbvenc.c
+++ b/libavcodec/zmbvenc.c
@@ -134,7 +134,7 @@ static int encode_frame(AVCodecContext *avctx, uint8_t *buf, int buf_size, void
if(c->curfrm == c->keyint)
c->curfrm = 0;
*p = *pict;
- p->pict_type= keyframe ? FF_I_TYPE : FF_P_TYPE;
+ p->pict_type= keyframe ? AV_PICTURE_TYPE_I : AV_PICTURE_TYPE_P;
p->key_frame= keyframe;
chpal = !keyframe && memcmp(p->data[1], c->pal2, 1024);
diff --git a/libavdevice/bktr.c b/libavdevice/bktr.c
index fb5c45c7b6..6378ce7873 100644
--- a/libavdevice/bktr.c
+++ b/libavdevice/bktr.c
@@ -24,10 +24,6 @@
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
-#define _BSD_SOURCE 1
-#define _NETBSD_SOURCE
-#define _XOPEN_SOURCE 600
-
#include "libavformat/avformat.h"
#if HAVE_DEV_BKTR_IOCTL_METEOR_H && HAVE_DEV_BKTR_IOCTL_BT848_H
# include <dev/bktr/ioctl_meteor.h>
diff --git a/libavdevice/dv1394.c b/libavdevice/dv1394.c
index 588f35662b..a9bc058dc9 100644
--- a/libavdevice/dv1394.c
+++ b/libavdevice/dv1394.c
@@ -202,7 +202,7 @@ restart_poll:
size = dv_produce_packet(dv->dv_demux, pkt,
dv->ring + (dv->index * DV1394_PAL_FRAME_SIZE),
- DV1394_PAL_FRAME_SIZE);
+ DV1394_PAL_FRAME_SIZE, -1);
dv->index = (dv->index + 1) % DV1394_RING_FRAMES;
dv->done++; dv->avail--;
diff --git a/libavdevice/x11grab.c b/libavdevice/x11grab.c
index fdc8a6d22f..090af4930f 100644
--- a/libavdevice/x11grab.c
+++ b/libavdevice/x11grab.c
@@ -35,8 +35,6 @@
* and Edouard Gomez <ed.gomez@free.fr>.
*/
-#define _XOPEN_SOURCE 600
-
#include "config.h"
#include "libavformat/avformat.h"
#include <time.h>
diff --git a/libavfilter/Makefile b/libavfilter/Makefile
index bf28f9aa54..de34089468 100644
--- a/libavfilter/Makefile
+++ b/libavfilter/Makefile
@@ -6,7 +6,7 @@ FFLIBS-$(CONFIG_MOVIE_FILTER) += avformat avcodec
FFLIBS-$(CONFIG_SCALE_FILTER) += swscale
FFLIBS-$(CONFIG_MP_FILTER) += avcodec
-HEADERS = avfilter.h avfiltergraph.h
+HEADERS = avcodec.h avfilter.h avfiltergraph.h
OBJS = allfilters.o \
avfilter.o \
@@ -16,6 +16,8 @@ OBJS = allfilters.o \
formats.o \
graphparser.o \
+OBJS-$(CONFIG_AVCODEC) += avcodec.o
+
OBJS-$(CONFIG_ANULL_FILTER) += af_anull.o
OBJS-$(CONFIG_ANULLSRC_FILTER) += asrc_anullsrc.o
diff --git a/libavfilter/avcodec.c b/libavfilter/avcodec.c
new file mode 100644
index 0000000000..c2f8651106
--- /dev/null
+++ b/libavfilter/avcodec.c
@@ -0,0 +1,42 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * libavcodec/libavfilter gluing utilities
+ */
+
+#include "avcodec.h"
+
+void avfilter_copy_frame_props(AVFilterBufferRef *dst, const AVFrame *src)
+{
+ dst->pts = src->pts;
+ dst->pos = src->pkt_pos;
+ dst->format = src->format;
+
+ switch (dst->type) {
+ case AVMEDIA_TYPE_VIDEO:
+ dst->video->w = src->width;
+ dst->video->h = src->height;
+ dst->video->sample_aspect_ratio = src->sample_aspect_ratio;
+ dst->video->interlaced = src->interlaced_frame;
+ dst->video->top_field_first = src->top_field_first;
+ dst->video->key_frame = src->key_frame;
+ dst->video->pict_type = src->pict_type;
+ }
+}
diff --git a/libavfilter/avcodec.h b/libavfilter/avcodec.h
new file mode 100644
index 0000000000..f438860d0b
--- /dev/null
+++ b/libavfilter/avcodec.h
@@ -0,0 +1,40 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVFILTER_AVCODEC_H
+#define AVFILTER_AVCODEC_H
+
+/**
+ * @file
+ * libavcodec/libavfilter gluing utilities
+ *
+ * This should be included in an application ONLY if the installed
+ * libavfilter has been compiled with libavcodec support, otherwise
+ * symbols defined below will not be available.
+ */
+
+#include "libavcodec/avcodec.h" // AVFrame
+#include "avfilter.h"
+
+/**
+ * Copy the frame properties of src to dst, without copying the actual
+ * image data.
+ */
+void avfilter_copy_frame_props(AVFilterBufferRef *dst, const AVFrame *src);
+
+#endif /* AVFILTER_AVCODEC_H */
diff --git a/libavfilter/avfilter.c b/libavfilter/avfilter.c
index 443562b2f4..72e0a87f8e 100644
--- a/libavfilter/avfilter.c
+++ b/libavfilter/avfilter.c
@@ -25,6 +25,7 @@
#include "libavutil/rational.h"
#include "libavutil/audioconvert.h"
#include "libavutil/imgutils.h"
+#include "libavutil/avassert.h"
#include "avfilter.h"
#include "internal.h"
@@ -69,14 +70,47 @@ AVFilterBufferRef *avfilter_ref_buffer(AVFilterBufferRef *ref, int pmask)
return ret;
}
+static void store_in_pool(AVFilterBufferRef *ref)
+{
+ int i;
+ AVFilterPool *pool= ref->buf->priv;
+
+ av_assert0(ref->buf->data[0]);
+
+ if(pool->count == POOL_SIZE){
+ AVFilterBufferRef *ref1= pool->pic[0];
+ av_freep(&ref1->video);
+ av_freep(&ref1->audio);
+ av_freep(&ref1->buf->data[0]);
+ av_freep(&ref1->buf);
+ av_free(ref1);
+ memmove(&pool->pic[0], &pool->pic[1], sizeof(void*)*(POOL_SIZE-1));
+ pool->count--;
+ pool->pic[POOL_SIZE-1] = NULL;
+ }
+
+ for(i=0; i<POOL_SIZE; i++){
+ if(!pool->pic[i]){
+ pool->pic[i]= ref;
+ pool->count++;
+ break;
+ }
+ }
+}
+
void avfilter_unref_buffer(AVFilterBufferRef *ref)
{
if (!ref)
return;
- if (!(--ref->buf->refcount))
+ if (!(--ref->buf->refcount)){
+ if(!ref->buf->free){
+ store_in_pool(ref);
+ return;
+ }
ref->buf->free(ref->buf);
- av_free(ref->video);
- av_free(ref->audio);
+ }
+ av_freep(&ref->video);
+ av_freep(&ref->audio);
av_free(ref);
}
@@ -238,7 +272,7 @@ static void ff_dlog_ref(void *ctx, AVFilterBufferRef *ref, int end)
if (ref->video) {
av_dlog(ctx, " a:%d/%d s:%dx%d i:%c iskey:%d type:%c",
- ref->video->pixel_aspect.num, ref->video->pixel_aspect.den,
+ ref->video->sample_aspect_ratio.num, ref->video->sample_aspect_ratio.den,
ref->video->w, ref->video->h,
!ref->video->interlaced ? 'P' : /* Progressive */
ref->video->top_field_first ? 'T' : 'B', /* Top / Bottom */
@@ -585,28 +619,53 @@ int avfilter_open(AVFilterContext **filter_ctx, AVFilter *filter, const char *in
return AVERROR(EINVAL);
ret = av_mallocz(sizeof(AVFilterContext));
+ if (!ret)
+ return AVERROR(ENOMEM);
ret->av_class = &avfilter_class;
ret->filter = filter;
ret->name = inst_name ? av_strdup(inst_name) : NULL;
- ret->priv = av_mallocz(filter->priv_size);
+ if (filter->priv_size) {
+ ret->priv = av_mallocz(filter->priv_size);
+ if (!ret->priv)
+ goto err;
+ }
ret->input_count = pad_count(filter->inputs);
if (ret->input_count) {
ret->input_pads = av_malloc(sizeof(AVFilterPad) * ret->input_count);
+ if (!ret->input_pads)
+ goto err;
memcpy(ret->input_pads, filter->inputs, sizeof(AVFilterPad) * ret->input_count);
ret->inputs = av_mallocz(sizeof(AVFilterLink*) * ret->input_count);
+ if (!ret->inputs)
+ goto err;
}
ret->output_count = pad_count(filter->outputs);
if (ret->output_count) {
ret->output_pads = av_malloc(sizeof(AVFilterPad) * ret->output_count);
+ if (!ret->output_pads)
+ goto err;
memcpy(ret->output_pads, filter->outputs, sizeof(AVFilterPad) * ret->output_count);
ret->outputs = av_mallocz(sizeof(AVFilterLink*) * ret->output_count);
+ if (!ret->outputs)
+ goto err;
}
*filter_ctx = ret;
return 0;
+
+err:
+ av_freep(&ret->inputs);
+ av_freep(&ret->input_pads);
+ ret->input_count = 0;
+ av_freep(&ret->outputs);
+ av_freep(&ret->output_pads);
+ ret->output_count = 0;
+ av_freep(&ret->priv);
+ av_free(ret);
+ return AVERROR(ENOMEM);
}
void avfilter_free(AVFilterContext *filter)
diff --git a/libavfilter/avfilter.h b/libavfilter/avfilter.h
index d65d48d901..8251f2bf0e 100644
--- a/libavfilter/avfilter.h
+++ b/libavfilter/avfilter.h
@@ -26,7 +26,7 @@
#include "libavutil/samplefmt.h"
#define LIBAVFILTER_VERSION_MAJOR 1
-#define LIBAVFILTER_VERSION_MINOR 77
+#define LIBAVFILTER_VERSION_MINOR 78
#define LIBAVFILTER_VERSION_MICRO 0
#define LIBAVFILTER_VERSION_INT AV_VERSION_INT(LIBAVFILTER_VERSION_MAJOR, \
@@ -112,7 +112,7 @@ typedef struct AVFilterBufferRefAudioProps {
typedef struct AVFilterBufferRefVideoProps {
int w; ///< image width
int h; ///< image height
- AVRational pixel_aspect; ///< pixel aspect ratio
+ AVRational sample_aspect_ratio; ///< sample aspect ratio
int interlaced; ///< is frame interlaced
int top_field_first; ///< field order
enum AVPictureType pict_type; ///< picture type of the frame
@@ -619,6 +619,8 @@ struct AVFilterLink {
* input link is assumed to be an unchangeable property.
*/
AVRational time_base;
+
+ struct AVFilterPool *pool;
};
/**
diff --git a/libavfilter/defaults.c b/libavfilter/defaults.c
index 1da2630471..9ee23e57b7 100644
--- a/libavfilter/defaults.c
+++ b/libavfilter/defaults.c
@@ -25,7 +25,6 @@
#include "avfilter.h"
#include "internal.h"
-/* TODO: buffer pool. see comment for avfilter_default_get_video_buffer() */
void ff_avfilter_default_free_buffer(AVFilterBuffer *ptr)
{
av_free(ptr->data[0]);
@@ -39,10 +38,30 @@ AVFilterBufferRef *avfilter_default_get_video_buffer(AVFilterLink *link, int per
{
int linesize[4];
uint8_t *data[4];
+ int i;
AVFilterBufferRef *picref = NULL;
+ AVFilterPool *pool= link->pool;
+
+ if(pool) for(i=0; i<POOL_SIZE; i++){
+ picref= pool->pic[i];
+ if(picref && picref->buf->format == link->format && picref->buf->w == w && picref->buf->h == h){
+ AVFilterBuffer *pic= picref->buf;
+ pool->pic[i]= NULL;
+ pool->count--;
+ picref->video->w = w;
+ picref->video->h = h;
+ picref->perms = perms | AV_PERM_READ;
+ picref->format= link->format;
+ pic->refcount = 1;
+ memcpy(picref->data, pic->data, sizeof(picref->data));
+ memcpy(picref->linesize, pic->linesize, sizeof(picref->linesize));
+ return picref;
+ }
+ }else
+ pool = link->pool = av_mallocz(sizeof(AVFilterPool));
// +2 is needed for swscaler, +16 to be SIMD-friendly
- if (av_image_alloc(data, linesize, w, h, link->format, 16) < 0)
+ if ((i=av_image_alloc(data, linesize, w, h, link->format, 16)) < 0)
return NULL;
picref = avfilter_get_video_buffer_ref_from_arrays(data, linesize,
@@ -51,6 +70,10 @@ AVFilterBufferRef *avfilter_default_get_video_buffer(AVFilterLink *link, int per
av_free(data[0]);
return NULL;
}
+ memset(data[0], 128, i);
+
+ picref->buf->priv= pool;
+ picref->buf->free= NULL;
return picref;
}
diff --git a/libavfilter/internal.h b/libavfilter/internal.h
index 188da87099..159e979168 100644
--- a/libavfilter/internal.h
+++ b/libavfilter/internal.h
@@ -27,6 +27,12 @@
#include "avfilter.h"
#include "avfiltergraph.h"
+#define POOL_SIZE 32
+typedef struct AVFilterPool {
+ AVFilterBufferRef *pic[POOL_SIZE];
+ int count;
+}AVFilterPool;
+
/**
* Check for the validity of graph.
*
diff --git a/libavfilter/libmpcodecs/mp_image.h b/libavfilter/libmpcodecs/mp_image.h
index 3d566af693..50d3fa19a6 100644
--- a/libavfilter/libmpcodecs/mp_image.h
+++ b/libavfilter/libmpcodecs/mp_image.h
@@ -33,6 +33,7 @@
#undef rand
#undef srand
#undef printf
+#undef strncpy
#define ASMALIGN(ZEROBITS) ".p2align " #ZEROBITS "\n\t"
diff --git a/libavfilter/vf_aspect.c b/libavfilter/vf_aspect.c
index 95900d15a9..3b4a57cf58 100644
--- a/libavfilter/vf_aspect.c
+++ b/libavfilter/vf_aspect.c
@@ -65,7 +65,7 @@ static void start_frame(AVFilterLink *link, AVFilterBufferRef *picref)
{
AspectContext *aspect = link->dst->priv;
- picref->video->pixel_aspect = aspect->aspect;
+ picref->video->sample_aspect_ratio = aspect->aspect;
avfilter_start_frame(link->dst->outputs[0], picref);
}
diff --git a/libavfilter/vf_drawtext.c b/libavfilter/vf_drawtext.c
index 99045b7b4b..b26029bb8f 100644
--- a/libavfilter/vf_drawtext.c
+++ b/libavfilter/vf_drawtext.c
@@ -45,17 +45,13 @@
#include FT_FREETYPE_H
#include FT_GLYPH_H
-#define MAX_EXPANDED_TEXT_SIZE 2048
-
typedef struct {
const AVClass *class;
- char *fontfile; ///< font to be used
- char *text; ///< text to be drawn
+ uint8_t *fontfile; ///< font to be used
+ uint8_t *text; ///< text to be drawn
+ uint8_t *text_priv; ///< used to detect whether text changed
int ft_load_flags; ///< flags used for loading fonts, see FT_LOAD_*
- /** buffer containing the text expanded by strftime */
- char expanded_text[MAX_EXPANDED_TEXT_SIZE];
- /** positions for each element in the text */
- FT_Vector positions[MAX_EXPANDED_TEXT_SIZE];
+ FT_Vector *positions; ///< positions for each element in the text
char *textfile; ///< file with text to be drawn
unsigned int x; ///< x position to start drawing text
unsigned int y; ///< y position to start drawing text
@@ -157,9 +153,10 @@ typedef struct {
int bitmap_top;
} Glyph;
-static int glyph_cmp(const Glyph *a, const Glyph *b)
+static int glyph_cmp(void *key, const void *b)
{
- int64_t diff = (int64_t)a->code - (int64_t)b->code;
+ const Glyph *a = key, *bb = b;
+ int64_t diff = (int64_t)a->code - (int64_t)bb->code;
return diff > 0 ? 1 : diff < 0 ? -1 : 0;
}
@@ -169,21 +166,26 @@ static int glyph_cmp(const Glyph *a, const Glyph *b)
static int load_glyph(AVFilterContext *ctx, Glyph **glyph_ptr, uint32_t code)
{
DrawTextContext *dtext = ctx->priv;
- Glyph *glyph = av_mallocz(sizeof(Glyph));
+ Glyph *glyph;
struct AVTreeNode *node = NULL;
int ret;
/* load glyph into dtext->face->glyph */
- ret = FT_Load_Char(dtext->face, code, dtext->ft_load_flags);
- if (ret)
+ if (FT_Load_Char(dtext->face, code, dtext->ft_load_flags))
return AVERROR(EINVAL);
/* save glyph */
+ if (!(glyph = av_mallocz(sizeof(*glyph))) ||
+ !(glyph->glyph = av_mallocz(sizeof(*glyph->glyph)))) {
+ ret = AVERROR(ENOMEM);
+ goto error;
+ }
glyph->code = code;
- glyph->glyph = av_mallocz(sizeof(FT_Glyph));
- ret = FT_Get_Glyph(dtext->face->glyph, glyph->glyph);
- if (ret)
- return AVERROR(EINVAL);
+
+ if (FT_Get_Glyph(dtext->face->glyph, glyph->glyph)) {
+ ret = AVERROR(EINVAL);
+ goto error;
+ }
glyph->bitmap = dtext->face->glyph->bitmap;
glyph->bitmap_left = dtext->face->glyph->bitmap_left;
@@ -194,19 +196,29 @@ static int load_glyph(AVFilterContext *ctx, Glyph **glyph_ptr, uint32_t code)
FT_Glyph_Get_CBox(*glyph->glyph, ft_glyph_bbox_pixels, &glyph->bbox);
/* cache the newly created glyph */
- if (!node)
- node = av_mallocz(av_tree_node_size);
- av_tree_insert(&dtext->glyphs, glyph, (void *)glyph_cmp, &node);
+ if (!(node = av_mallocz(av_tree_node_size))) {
+ ret = AVERROR(ENOMEM);
+ goto error;
+ }
+ av_tree_insert(&dtext->glyphs, glyph, glyph_cmp, &node);
if (glyph_ptr)
*glyph_ptr = glyph;
return 0;
+
+error:
+ if (glyph)
+ av_freep(&glyph->glyph);
+ av_freep(&glyph);
+ av_freep(&node);
+ return ret;
}
static av_cold int init(AVFilterContext *ctx, const char *args, void *opaque)
{
int err;
DrawTextContext *dtext = ctx->priv;
+ Glyph *glyph;
dtext->class = &drawtext_class;
av_opt_set_defaults2(dtext, 0, 0);
@@ -294,14 +306,15 @@ static av_cold int init(AVFilterContext *ctx, const char *args, void *opaque)
/* load the fallback glyph with code 0 */
load_glyph(ctx, NULL, 0);
+ /* set the tabsize in pixels */
+ if ((err = load_glyph(ctx, &glyph, ' ') < 0)) {
+ av_log(ctx, AV_LOG_ERROR, "Could not set tabsize.\n");
+ return err;
+ }
+ dtext->tabsize *= glyph->advance;
+
#if !HAVE_LOCALTIME_R
av_log(ctx, AV_LOG_WARNING, "strftime() expansion unavailable!\n");
-#else
- if (strlen(dtext->text) >= MAX_EXPANDED_TEXT_SIZE) {
- av_log(ctx, AV_LOG_ERROR,
- "Impossible to print text, string is too big\n");
- return AVERROR(EINVAL);
- }
#endif
return 0;
@@ -338,6 +351,7 @@ static av_cold void uninit(AVFilterContext *ctx)
av_freep(&dtext->text);
av_freep(&dtext->fontcolor_string);
av_freep(&dtext->boxcolor_string);
+ av_freep(&dtext->positions);
av_freep(&dtext->shadowcolor_string);
av_tree_enumerate(dtext->glyphs, NULL, NULL, glyph_enu_free);
av_tree_destroy(dtext->glyphs);
@@ -393,7 +407,7 @@ static int config_input(AVFilterLink *inlink)
luma_pos = ((x) ) + ((y) ) * picref->linesize[0]; \
alpha = yuva_color[3] * (val) * 129; \
picref->data[0][luma_pos] = (alpha * yuva_color[0] + (255*255*129 - alpha) * picref->data[0][luma_pos] ) >> 23; \
- if(((x) & ((1<<(hsub))-1))==0 && ((y) & ((1<<(vsub))-1))==0){\
+ if (((x) & ((1<<(hsub)) - 1)) == 0 && ((y) & ((1<<(vsub)) - 1)) == 0) {\
chroma_pos1 = ((x) >> (hsub)) + ((y) >> (vsub)) * picref->linesize[1]; \
chroma_pos2 = ((x) >> (hsub)) + ((y) >> (vsub)) * picref->linesize[2]; \
picref->data[1][chroma_pos1] = (alpha * yuva_color[1] + (255*255*129 - alpha) * picref->data[1][chroma_pos1]) >> 23; \
@@ -403,7 +417,7 @@ static int config_input(AVFilterLink *inlink)
static inline int draw_glyph_yuv(AVFilterBufferRef *picref, FT_Bitmap *bitmap, unsigned int x,
unsigned int y, unsigned int width, unsigned int height,
- unsigned char yuva_color[4], int hsub, int vsub)
+ const uint8_t yuva_color[4], int hsub, int vsub)
{
int r, c, alpha;
unsigned int luma_pos, chroma_pos1, chroma_pos2;
@@ -439,7 +453,7 @@ static inline int draw_glyph_yuv(AVFilterBufferRef *picref, FT_Bitmap *bitmap, u
static inline int draw_glyph_rgb(AVFilterBufferRef *picref, FT_Bitmap *bitmap,
unsigned int x, unsigned int y,
unsigned int width, unsigned int height, int pixel_step,
- unsigned char rgba_color[4], uint8_t rgba_map[4])
+ const uint8_t rgba_color[4], const uint8_t rgba_map[4])
{
int r, c, alpha;
uint8_t *p;
@@ -495,10 +509,15 @@ static inline void drawbox(AVFilterBufferRef *picref, unsigned int x, unsigned i
}
}
+static inline int is_newline(uint32_t c)
+{
+ return (c == '\n' || c == '\r' || c == '\f' || c == '\v');
+}
+
static int draw_glyphs(DrawTextContext *dtext, AVFilterBufferRef *picref,
int width, int height, const uint8_t rgbcolor[4], const uint8_t yuvcolor[4], int x, int y)
{
- char *text = HAVE_LOCALTIME_R ? dtext->expanded_text : dtext->text;
+ char *text = dtext->text;
uint32_t code = 0;
int i;
uint8_t *p;
@@ -537,44 +556,53 @@ static int draw_text(AVFilterContext *ctx, AVFilterBufferRef *picref,
int width, int height)
{
DrawTextContext *dtext = ctx->priv;
- char *text = dtext->text;
uint32_t code = 0, prev_code = 0;
int x = 0, y = 0, i = 0, ret;
int text_height, baseline;
uint8_t *p;
- int str_w, str_w_max;
+ int str_w = 0;
int y_min = 32000, y_max = -32000;
FT_Vector delta;
Glyph *glyph = NULL, *prev_glyph = NULL;
Glyph dummy = { 0 };
+ if (dtext->text != dtext->text_priv) {
#if HAVE_LOCALTIME_R
- time_t now = time(0);
- struct tm ltime;
- size_t expanded_text_len;
-
- dtext->expanded_text[0] = '\1';
- expanded_text_len = strftime(dtext->expanded_text, MAX_EXPANDED_TEXT_SIZE,
- text, localtime_r(&now, &ltime));
- text = dtext->expanded_text;
- if (expanded_text_len == 0 && dtext->expanded_text[0] != '\0') {
- av_log(ctx, AV_LOG_ERROR,
- "Impossible to print text, string is too big\n");
- return AVERROR(EINVAL);
- }
+ time_t now = time(0);
+ struct tm ltime;
+ uint8_t *buf = NULL;
+ int buflen = 2*strlen(dtext->text) + 1, len;
+
+ localtime_r(&now, &ltime);
+
+ while ((buf = av_realloc(buf, buflen))) {
+ *buf = 1;
+ if ((len = strftime(buf, buflen, dtext->text, &ltime)) != 0 || *buf == 0)
+ break;
+ buflen *= 2;
+ }
+ if (!buf)
+ return AVERROR(ENOMEM);
+ av_freep(&dtext->text);
+ dtext->text = dtext->text_priv = buf;
+#else
+ dtext->text_priv = dtext->text;
#endif
+ if (!(dtext->positions = av_realloc(dtext->positions,
+ strlen(dtext->text)*sizeof(*dtext->positions))))
+ return AVERROR(ENOMEM);
+ }
- str_w = str_w_max = 0;
x = dtext->x;
y = dtext->y;
/* load and cache glyphs */
- for (i = 0, p = text; *p; i++) {
+ for (i = 0, p = dtext->text; *p; i++) {
GET_UTF8(code, *p++, continue;);
/* get glyph */
dummy.code = code;
- glyph = av_tree_find(dtext->glyphs, &dummy, (void *)glyph_cmp, NULL);
+ glyph = av_tree_find(dtext->glyphs, &dummy, glyph_cmp, NULL);
if (!glyph)
load_glyph(ctx, &glyph, code);
@@ -586,17 +614,25 @@ static int draw_text(AVFilterContext *ctx, AVFilterBufferRef *picref,
/* compute and save position for each glyph */
glyph = NULL;
- for (i = 0, p = text; *p; i++) {
+ for (i = 0, p = dtext->text; *p; i++) {
GET_UTF8(code, *p++, continue;);
/* skip the \n in the sequence \r\n */
if (prev_code == '\r' && code == '\n')
continue;
+ prev_code = code;
+ if (is_newline(code)) {
+ str_w = FFMAX(str_w, x - dtext->x);
+ y += text_height;
+ x = dtext->x;
+ continue;
+ }
+
/* get glyph */
prev_glyph = glyph;
dummy.code = code;
- glyph = av_tree_find(dtext->glyphs, &dummy, (void *)glyph_cmp, NULL);
+ glyph = av_tree_find(dtext->glyphs, &dummy, glyph_cmp, NULL);
/* kerning */
if (dtext->use_kerning && prev_glyph && glyph->code) {
@@ -605,9 +641,8 @@ static int draw_text(AVFilterContext *ctx, AVFilterBufferRef *picref,
x += delta.x >> 6;
}
- if (x + glyph->advance >= width || code == '\r' || code == '\n') {
- if (x + glyph->advance >= width)
- str_w_max = width - dtext->x - 1;
+ if (x + glyph->bbox.xMax >= width) {
+ str_w = FFMAX(str_w, x - dtext->x);
y += text_height;
x = dtext->x;
}
@@ -615,38 +650,27 @@ static int draw_text(AVFilterContext *ctx, AVFilterBufferRef *picref,
/* save position */
dtext->positions[i].x = x + glyph->bitmap_left;
dtext->positions[i].y = y - glyph->bitmap_top + baseline;
- if (code != '\n' && code != '\r') {
- int advance = glyph->advance;
- if (code == '\t')
- advance *= dtext->tabsize;
- x += advance;
- str_w += advance;
- }
- prev_code = code;
+ if (code == '\t') x = (x / dtext->tabsize + 1)*dtext->tabsize;
+ else x += glyph->advance;
}
- y += text_height;
- if (str_w_max == 0)
- str_w_max = str_w;
+ str_w = FFMIN(width - dtext->x - 1, FFMAX(str_w, x - dtext->x));
+ y = FFMIN(y + text_height, height - 1);
/* draw box */
- if (dtext->draw_box) {
- /* check if it doesn't pass the limits */
- str_w_max = FFMIN(str_w_max, width - dtext->x - 1);
- y = FFMIN(y, height - 1);
-
- /* draw background */
- drawbox(picref, dtext->x, dtext->y, str_w_max, y-dtext->y,
+ if (dtext->draw_box)
+ drawbox(picref, dtext->x, dtext->y, str_w, y-dtext->y,
dtext->box_line, dtext->pixel_step, dtext->boxcolor,
dtext->hsub, dtext->vsub, dtext->is_packed_rgb, dtext->rgba_map);
- }
- if(dtext->shadowx || dtext->shadowy){
- if((ret=draw_glyphs(dtext, picref, width, height, dtext->shadowcolor_rgba, dtext->shadowcolor, dtext->shadowx, dtext->shadowy))<0)
+ if (dtext->shadowx || dtext->shadowy) {
+ if ((ret = draw_glyphs(dtext, picref, width, height, dtext->shadowcolor_rgba,
+ dtext->shadowcolor, dtext->shadowx, dtext->shadowy)) < 0)
return ret;
}
- if((ret=draw_glyphs(dtext, picref, width, height, dtext->fontcolor_rgba, dtext->fontcolor, 0, 0))<0)
+ if ((ret = draw_glyphs(dtext, picref, width, height, dtext->fontcolor_rgba,
+ dtext->fontcolor, 0, 0)) < 0)
return ret;
return 0;
diff --git a/libavfilter/vf_frei0r.c b/libavfilter/vf_frei0r.c
index adccccb95b..0cb5fd30b5 100644
--- a/libavfilter/vf_frei0r.c
+++ b/libavfilter/vf_frei0r.c
@@ -430,7 +430,7 @@ static int source_request_frame(AVFilterLink *outlink)
{
Frei0rContext *frei0r = outlink->src->priv;
AVFilterBufferRef *picref = avfilter_get_video_buffer(outlink, AV_PERM_WRITE, outlink->w, outlink->h);
- picref->video->pixel_aspect = (AVRational) {1, 1};
+ picref->video->sample_aspect_ratio = (AVRational) {1, 1};
picref->pts = frei0r->pts++;
picref->pos = -1;
diff --git a/libavfilter/vf_scale.c b/libavfilter/vf_scale.c
index 27214a6a2f..9ff93bd411 100644
--- a/libavfilter/vf_scale.c
+++ b/libavfilter/vf_scale.c
@@ -214,14 +214,18 @@ static int config_props(AVFilterLink *outlink)
scale->input_is_pal = av_pix_fmt_descriptors[inlink->format].flags & PIX_FMT_PAL;
- if(scale->sws)
+ if (scale->sws)
sws_freeContext(scale->sws);
scale->sws = sws_getContext(inlink ->w, inlink ->h, inlink ->format,
outlink->w, outlink->h, outlink->format,
scale->flags, NULL, NULL, NULL);
+ if (scale->isws[0])
+ sws_freeContext(scale->isws[0]);
scale->isws[0] = sws_getContext(inlink ->w, inlink ->h/2, inlink ->format,
outlink->w, outlink->h/2, outlink->format,
scale->flags, NULL, NULL, NULL);
+ if (scale->isws[1])
+ sws_freeContext(scale->isws[1]);
scale->isws[1] = sws_getContext(inlink ->w, inlink ->h/2, inlink ->format,
outlink->w, outlink->h/2, outlink->format,
scale->flags, NULL, NULL, NULL);
@@ -252,9 +256,9 @@ static void start_frame(AVFilterLink *link, AVFilterBufferRef *picref)
outlink->out_buf = outpicref;
- av_reduce(&outpicref->video->pixel_aspect.num, &outpicref->video->pixel_aspect.den,
- (int64_t)picref->video->pixel_aspect.num * outlink->h * link->w,
- (int64_t)picref->video->pixel_aspect.den * outlink->w * link->h,
+ av_reduce(&outpicref->video->sample_aspect_ratio.num, &outpicref->video->sample_aspect_ratio.den,
+ (int64_t)picref->video->sample_aspect_ratio.num * outlink->h * link->w,
+ (int64_t)picref->video->sample_aspect_ratio.den * outlink->w * link->h,
INT_MAX);
scale->slice_y = 0;
diff --git a/libavfilter/vf_showinfo.c b/libavfilter/vf_showinfo.c
index d2483d5a7f..d512199602 100644
--- a/libavfilter/vf_showinfo.c
+++ b/libavfilter/vf_showinfo.c
@@ -59,7 +59,7 @@ static void end_frame(AVFilterLink *inlink)
showinfo->frame,
picref->pts, picref ->pts * av_q2d(inlink->time_base), picref->pos,
av_pix_fmt_descriptors[picref->format].name,
- picref->video->pixel_aspect.num, picref->video->pixel_aspect.den,
+ picref->video->sample_aspect_ratio.num, picref->video->sample_aspect_ratio.den,
picref->video->w, picref->video->h,
!picref->video->interlaced ? 'P' : /* Progressive */
picref->video->top_field_first ? 'T' : 'B', /* Top / Bottom */
diff --git a/libavfilter/vf_transpose.c b/libavfilter/vf_transpose.c
index ed91aaade5..a5247c9753 100644
--- a/libavfilter/vf_transpose.c
+++ b/libavfilter/vf_transpose.c
@@ -122,11 +122,11 @@ static void start_frame(AVFilterLink *inlink, AVFilterBufferRef *picref)
outlink->w, outlink->h);
outlink->out_buf->pts = picref->pts;
- if (picref->video->pixel_aspect.num == 0) {
- outlink->out_buf->video->pixel_aspect = picref->video->pixel_aspect;
+ if (picref->video->sample_aspect_ratio.num == 0) {
+ outlink->out_buf->video->sample_aspect_ratio = picref->video->sample_aspect_ratio;
} else {
- outlink->out_buf->video->pixel_aspect.num = picref->video->pixel_aspect.den;
- outlink->out_buf->video->pixel_aspect.den = picref->video->pixel_aspect.num;
+ outlink->out_buf->video->sample_aspect_ratio.num = picref->video->sample_aspect_ratio.den;
+ outlink->out_buf->video->sample_aspect_ratio.den = picref->video->sample_aspect_ratio.num;
}
avfilter_start_frame(outlink, avfilter_ref_buffer(outlink->out_buf, ~0));
diff --git a/libavfilter/vsrc_buffer.c b/libavfilter/vsrc_buffer.c
index c683d51e6f..84f3b33c3f 100644
--- a/libavfilter/vsrc_buffer.c
+++ b/libavfilter/vsrc_buffer.c
@@ -24,23 +24,21 @@
*/
#include "avfilter.h"
+#include "avcodec.h"
#include "vsrc_buffer.h"
#include "libavutil/imgutils.h"
typedef struct {
- int64_t pts;
AVFrame frame;
int has_frame;
int h, w;
enum PixelFormat pix_fmt;
AVRational time_base; ///< time_base to set in the output link
- AVRational pixel_aspect;
+ AVRational sample_aspect_ratio;
char sws_param[256];
} BufferSourceContext;
int av_vsrc_buffer_add_frame2(AVFilterContext *buffer_filter, AVFrame *frame,
- int64_t pts, AVRational pixel_aspect, int width,
- int height, enum PixelFormat pix_fmt,
const char *sws_param)
{
BufferSourceContext *c = buffer_filter->priv;
@@ -58,12 +56,14 @@ int av_vsrc_buffer_add_frame2(AVFilterContext *buffer_filter, AVFrame *frame,
snprintf(c->sws_param, 255, "%d:%d:%s", c->w, c->h, sws_param);
}
- if(width != c->w || height != c->h || pix_fmt != c->pix_fmt){
+ if (frame->width != c->w || frame->height != c->h || frame->format != c->pix_fmt) {
AVFilterContext *scale= buffer_filter->outputs[0]->dst;
AVFilterLink *link;
- av_log(buffer_filter, AV_LOG_INFO, "Changing filter graph input to accept %dx%d %d (%d %d)\n",
- width,height,pix_fmt, c->pix_fmt, scale && scale->outputs ? scale->outputs[0]->format : -123);
+ av_log(buffer_filter, AV_LOG_INFO,
+ "Buffer video input changed from size:%dx%d fmt:%s to size:%dx%d fmt:%s\n",
+ c->w, c->h, av_pix_fmt_descriptors[c->pix_fmt].name,
+ frame->width, frame->height, av_pix_fmt_descriptors[frame->format].name);
if(!scale || strcmp(scale->filter->name,"scale")){
AVFilter *f= avfilter_get_by_name("scale");
@@ -89,36 +89,26 @@ int av_vsrc_buffer_add_frame2(AVFilterContext *buffer_filter, AVFrame *frame,
scale->filter->init(scale, c->sws_param, NULL);
}
- c->pix_fmt= scale->inputs[0]->format= pix_fmt;
- c->w= scale->inputs[0]->w= width;
- c->h= scale->inputs[0]->h= height;
+ c->pix_fmt = scale->inputs[0]->format = frame->format;
+ c->w = scale->inputs[0]->w = frame->width;
+ c->h = scale->inputs[0]->h = frame->height;
link= scale->outputs[0];
if ((ret = link->srcpad->config_props(link)) < 0)
return ret;
}
+ c->frame = *frame;
memcpy(c->frame.data , frame->data , sizeof(frame->data));
memcpy(c->frame.linesize, frame->linesize, sizeof(frame->linesize));
- c->frame.interlaced_frame= frame->interlaced_frame;
- c->frame.top_field_first = frame->top_field_first;
- c->frame.key_frame = frame->key_frame;
- c->frame.pict_type = frame->pict_type;
- c->pts = pts;
- c->pixel_aspect = pixel_aspect;
c->has_frame = 1;
return 0;
}
-int av_vsrc_buffer_add_frame(AVFilterContext *buffer_filter, AVFrame *frame,
- int64_t pts, AVRational pixel_aspect)
+int av_vsrc_buffer_add_frame(AVFilterContext *buffer_filter, AVFrame *frame)
{
- BufferSourceContext *c = buffer_filter->priv;
-
- return av_vsrc_buffer_add_frame2(buffer_filter, frame,
- pts, pixel_aspect, c->w,
- c->h, c->pix_fmt, "");
+ return av_vsrc_buffer_add_frame2(buffer_filter, frame, "");
}
static av_cold int init(AVFilterContext *ctx, const char *args, void *opaque)
@@ -130,7 +120,7 @@ static av_cold int init(AVFilterContext *ctx, const char *args, void *opaque)
if (!args ||
(n = sscanf(args, "%d:%d:%127[^:]:%d:%d:%d:%d", &c->w, &c->h, pix_fmt_str,
&c->time_base.num, &c->time_base.den,
- &c->pixel_aspect.num, &c->pixel_aspect.den)) != 7) {
+ &c->sample_aspect_ratio.num, &c->sample_aspect_ratio.den)) != 7) {
av_log(ctx, AV_LOG_ERROR, "Expected 7 arguments, but only %d found in '%s'\n", n, args);
return AVERROR(EINVAL);
}
@@ -143,7 +133,10 @@ static av_cold int init(AVFilterContext *ctx, const char *args, void *opaque)
}
}
- av_log(ctx, AV_LOG_INFO, "w:%d h:%d pixfmt:%s\n", c->w, c->h, av_pix_fmt_descriptors[c->pix_fmt].name);
+ av_log(ctx, AV_LOG_INFO, "w:%d h:%d pixfmt:%s tb:%d/%d sar:%d/%d\n",
+ c->w, c->h, av_pix_fmt_descriptors[c->pix_fmt].name,
+ c->time_base.num, c->time_base.den,
+ c->sample_aspect_ratio.num, c->sample_aspect_ratio.den);
return 0;
}
@@ -162,7 +155,7 @@ static int config_props(AVFilterLink *link)
link->w = c->w;
link->h = c->h;
- link->sample_aspect_ratio = c->pixel_aspect;
+ link->sample_aspect_ratio = c->sample_aspect_ratio;
link->time_base = c->time_base;
return 0;
@@ -188,13 +181,8 @@ static int request_frame(AVFilterLink *link)
av_image_copy(picref->data, picref->linesize,
c->frame.data, c->frame.linesize,
picref->format, link->w, link->h);
+ avfilter_copy_frame_props(picref, &c->frame);
- picref->pts = c->pts;
- picref->video->pixel_aspect = c->pixel_aspect;
- picref->video->interlaced = c->frame.interlaced_frame;
- picref->video->top_field_first = c->frame.top_field_first;
- picref->video->key_frame = c->frame.key_frame;
- picref->video->pict_type = c->frame.pict_type;
avfilter_start_frame(link, avfilter_ref_buffer(picref, ~0));
avfilter_draw_slice(link, 0, link->h, 1);
avfilter_end_frame(link);
diff --git a/libavfilter/vsrc_buffer.h b/libavfilter/vsrc_buffer.h
index 79a9908c69..2dda546e01 100644
--- a/libavfilter/vsrc_buffer.h
+++ b/libavfilter/vsrc_buffer.h
@@ -30,12 +30,9 @@
#include "libavcodec/avcodec.h" /* AVFrame */
#include "avfilter.h"
-int av_vsrc_buffer_add_frame(AVFilterContext *buffer_filter, AVFrame *frame,
- int64_t pts, AVRational pixel_aspect);
+int av_vsrc_buffer_add_frame(AVFilterContext *buffer_filter, AVFrame *frame);
int av_vsrc_buffer_add_frame2(AVFilterContext *buffer_filter, AVFrame *frame,
- int64_t pts, AVRational pixel_aspect, int width,
- int height, enum PixelFormat pix_fmt,
const char *sws_param);
#endif /* AVFILTER_VSRC_BUFFER_H */
diff --git a/libavfilter/vsrc_color.c b/libavfilter/vsrc_color.c
index 3fab260a2f..dc73e1bafa 100644
--- a/libavfilter/vsrc_color.c
+++ b/libavfilter/vsrc_color.c
@@ -132,7 +132,7 @@ static int color_request_frame(AVFilterLink *link)
{
ColorContext *color = link->src->priv;
AVFilterBufferRef *picref = avfilter_get_video_buffer(link, AV_PERM_WRITE, color->w, color->h);
- picref->video->pixel_aspect = (AVRational) {1, 1};
+ picref->video->sample_aspect_ratio = (AVRational) {1, 1};
picref->pts = av_rescale_q(color->pts++, color->time_base, AV_TIME_BASE_Q);
picref->pos = 0;
diff --git a/libavfilter/vsrc_movie.c b/libavfilter/vsrc_movie.c
index da601bb11f..e36412f480 100644
--- a/libavfilter/vsrc_movie.c
+++ b/libavfilter/vsrc_movie.c
@@ -35,6 +35,7 @@
#include "libavutil/opt.h"
#include "libavutil/imgutils.h"
#include "libavformat/avformat.h"
+#include "avcodec.h"
#include "avfilter.h"
typedef struct {
@@ -57,12 +58,12 @@ typedef struct {
#define OFFSET(x) offsetof(MovieContext, x)
static const AVOption movie_options[]= {
-{"format_name", "set format name", OFFSET(format_name), FF_OPT_TYPE_STRING, 0, CHAR_MIN, CHAR_MAX },
-{"f", "set format name", OFFSET(format_name), FF_OPT_TYPE_STRING, 0, CHAR_MIN, CHAR_MAX },
-{"stream_index", "set stream index", OFFSET(stream_index), FF_OPT_TYPE_INT, -1, -1, INT_MAX },
-{"si", "set stream index", OFFSET(stream_index), FF_OPT_TYPE_INT, -1, -1, INT_MAX },
-{"seek_point", "set seekpoint (seconds)", OFFSET(seek_point_d), FF_OPT_TYPE_DOUBLE, 0, 0, (INT64_MAX-1) / 1000000 },
-{"sp", "set seekpoint (seconds)", OFFSET(seek_point_d), FF_OPT_TYPE_DOUBLE, 0, 0, (INT64_MAX-1) / 1000000 },
+{"format_name", "set format name", OFFSET(format_name), FF_OPT_TYPE_STRING, {.str = 0}, CHAR_MIN, CHAR_MAX },
+{"f", "set format name", OFFSET(format_name), FF_OPT_TYPE_STRING, {.str = 0}, CHAR_MIN, CHAR_MAX },
+{"stream_index", "set stream index", OFFSET(stream_index), FF_OPT_TYPE_INT, {.dbl = -1}, -1, INT_MAX },
+{"si", "set stream index", OFFSET(stream_index), FF_OPT_TYPE_INT, {.dbl = -1}, -1, INT_MAX },
+{"seek_point", "set seekpoint (seconds)", OFFSET(seek_point_d), FF_OPT_TYPE_DOUBLE, {.dbl = 0}, 0, (INT64_MAX-1) / 1000000 },
+{"sp", "set seekpoint (seconds)", OFFSET(seek_point_d), FF_OPT_TYPE_DOUBLE, {.dbl = 0}, 0, (INT64_MAX-1) / 1000000 },
{NULL},
};
@@ -230,7 +231,6 @@ static int movie_get_frame(AVFilterLink *outlink)
while ((ret = av_read_frame(movie->format_ctx, &pkt)) >= 0) {
// Is this a packet from the video stream?
if (pkt.stream_index == movie->stream_index) {
- movie->codec_ctx->reordered_opaque = pkt.pos;
avcodec_decode_video2(movie->codec_ctx, movie->frame, &frame_decoded, &pkt);
if (frame_decoded) {
@@ -240,26 +240,22 @@ static int movie_get_frame(AVFilterLink *outlink)
av_image_copy(movie->picref->data, movie->picref->linesize,
movie->frame->data, movie->frame->linesize,
movie->picref->format, outlink->w, outlink->h);
+ avfilter_copy_frame_props(movie->picref, movie->frame);
/* FIXME: use a PTS correction mechanism as that in
* ffplay.c when some API will be available for that */
/* use pkt_dts if pkt_pts is not available */
movie->picref->pts = movie->frame->pkt_pts == AV_NOPTS_VALUE ?
movie->frame->pkt_dts : movie->frame->pkt_pts;
-
- movie->picref->pos = movie->frame->reordered_opaque;
- movie->picref->video->pixel_aspect = st->sample_aspect_ratio.num ?
- st->sample_aspect_ratio : movie->codec_ctx->sample_aspect_ratio;
- movie->picref->video->interlaced = movie->frame->interlaced_frame;
- movie->picref->video->top_field_first = movie->frame->top_field_first;
- movie->picref->video->key_frame = movie->frame->key_frame;
- movie->picref->video->pict_type = movie->frame->pict_type;
+ if (!movie->frame->sample_aspect_ratio.num)
+ movie->picref->video->sample_aspect_ratio = st->sample_aspect_ratio;
av_dlog(outlink->src,
"movie_get_frame(): file:'%s' pts:%"PRId64" time:%lf pos:%"PRId64" aspect:%d/%d\n",
movie->file_name, movie->picref->pts,
(double)movie->picref->pts * av_q2d(st->time_base),
movie->picref->pos,
- movie->picref->video->pixel_aspect.num, movie->picref->video->pixel_aspect.den);
+ movie->picref->video->sample_aspect_ratio.num,
+ movie->picref->video->sample_aspect_ratio.den);
// We got it. Free the packet since we are returning
av_free_packet(&pkt);
diff --git a/libavformat/Makefile b/libavformat/Makefile
index e278136579..cbaa4d6840 100644
--- a/libavformat/Makefile
+++ b/libavformat/Makefile
@@ -49,6 +49,7 @@ OBJS-$(CONFIG_BFI_DEMUXER) += bfi.o
OBJS-$(CONFIG_BINK_DEMUXER) += bink.o
OBJS-$(CONFIG_C93_DEMUXER) += c93.o vocdec.o voc.o
OBJS-$(CONFIG_CAF_DEMUXER) += cafdec.o caf.o mov.o riff.o isom.o
+OBJS-$(CONFIG_CAF_MUXER) += cafenc.o caf.o riff.o isom.o
OBJS-$(CONFIG_CAVSVIDEO_DEMUXER) += cavsvideodec.o rawdec.o
OBJS-$(CONFIG_CAVSVIDEO_MUXER) += rawenc.o
OBJS-$(CONFIG_CDG_DEMUXER) += cdg.o
@@ -300,7 +301,8 @@ OBJS-$(CONFIG_WEBM_MUXER) += matroskaenc.o matroska.o \
flacenc_header.o avlanguage.o
OBJS-$(CONFIG_WSAUD_DEMUXER) += westwood.o
OBJS-$(CONFIG_WSVQA_DEMUXER) += westwood.o
-OBJS-$(CONFIG_WTV_DEMUXER) += wtv.o asf.o asfdec.o mpegts.o riff.o
+OBJS-$(CONFIG_WTV_DEMUXER) += wtvdec.o wtv.o asfdec.o asf.o asfcrypt.o \
+ avlanguage.o mpegts.o isom.o riff.o
OBJS-$(CONFIG_WV_DEMUXER) += wv.o apetag.o
OBJS-$(CONFIG_XA_DEMUXER) += xa.o
OBJS-$(CONFIG_XWMA_DEMUXER) += xwma.o riff.o
diff --git a/libavformat/aiffdec.c b/libavformat/aiffdec.c
index cc2631448b..e691bba29d 100644
--- a/libavformat/aiffdec.c
+++ b/libavformat/aiffdec.c
@@ -68,19 +68,20 @@ static int get_tag(AVIOContext *pb, uint32_t * tag)
static void get_meta(AVFormatContext *s, const char *key, int size)
{
uint8_t *str = av_malloc(size+1);
- int res;
- if (!str) {
- avio_skip(s->pb, size);
- return;
- }
-
- res = avio_read(s->pb, str, size);
- if (res < 0)
- return;
+ if (str) {
+ int res = avio_read(s->pb, str, size);
+ if (res < 0){
+ av_free(str);
+ return;
+ }
+ size += (size&1)-res;
+ str[res] = 0;
+ av_metadata_set2(&s->metadata, key, str, AV_METADATA_DONT_STRDUP_VAL);
+ }else
+ size+= size&1;
- str[res] = 0;
- av_metadata_set2(&s->metadata, key, str, AV_METADATA_DONT_STRDUP_VAL);
+ avio_skip(s->pb, size);
}
/* Returns the number of sound data frames or negative on error */
diff --git a/libavformat/allformats.c b/libavformat/allformats.c
index d08f2f742a..a9fa117243 100644
--- a/libavformat/allformats.c
+++ b/libavformat/allformats.c
@@ -71,7 +71,7 @@ void av_register_all(void)
REGISTER_DEMUXER (BFI, bfi);
REGISTER_DEMUXER (BINK, bink);
REGISTER_DEMUXER (C93, c93);
- REGISTER_DEMUXER (CAF, caf);
+ REGISTER_MUXDEMUX (CAF, caf);
REGISTER_MUXDEMUX (CAVSVIDEO, cavsvideo);
REGISTER_DEMUXER (CDG, cdg);
REGISTER_MUXER (CRC, crc);
diff --git a/libavformat/ape.c b/libavformat/ape.c
index d63b2ac40d..4b37c7972d 100644
--- a/libavformat/ape.c
+++ b/libavformat/ape.c
@@ -251,7 +251,7 @@ static int ape_read_header(AVFormatContext * s, AVFormatParameters * ap)
return -1;
}
if (ape->seektablelength && (ape->seektablelength / sizeof(*ape->seektable)) < ape->totalframes) {
- av_log(s, AV_LOG_ERROR, "Number of seek entries is less than number of frames: %d vs. %d\n",
+ av_log(s, AV_LOG_ERROR, "Number of seek entries is less than number of frames: %zd vs. %d\n",
ape->seektablelength / sizeof(*ape->seektable), ape->totalframes);
return AVERROR_INVALIDDATA;
}
diff --git a/libavformat/applehttp.c b/libavformat/applehttp.c
index a6cdad1f29..d0de6bbc3e 100644
--- a/libavformat/applehttp.c
+++ b/libavformat/applehttp.c
@@ -25,7 +25,6 @@
* http://tools.ietf.org/html/draft-pantos-http-live-streaming
*/
-#define _XOPEN_SOURCE 600
#include "libavutil/avstring.h"
#include "libavutil/intreadwrite.h"
#include "libavutil/opt.h"
diff --git a/libavformat/applehttpproto.c b/libavformat/applehttpproto.c
index 4871cccd71..e0bc6077c5 100644
--- a/libavformat/applehttpproto.c
+++ b/libavformat/applehttpproto.c
@@ -25,7 +25,6 @@
* http://tools.ietf.org/html/draft-pantos-http-live-streaming
*/
-#define _XOPEN_SOURCE 600
#include "libavutil/avstring.h"
#include "avformat.h"
#include "internal.h"
diff --git a/libavformat/asfdec.c b/libavformat/asfdec.c
index 5bcfe98ce0..4a4f1d927c 100644
--- a/libavformat/asfdec.c
+++ b/libavformat/asfdec.c
@@ -843,11 +843,21 @@ static int asf_read_frame_header(AVFormatContext *s, AVIOContext *pb){
av_log(s, AV_LOG_ERROR, "unexpected packet_replic_size of %d\n", asf->packet_replic_size);
return -1;
}
+ if (rsize > asf->packet_size_left) {
+ av_log(s, AV_LOG_ERROR, "packet_replic_size is invalid\n");
+ return -1;
+ }
if (asf->packet_flags & 0x01) {
DO_2BITS(asf->packet_segsizetype >> 6, asf->packet_frag_size, 0); // 0 is illegal
if(asf->packet_frag_size > asf->packet_size_left - rsize){
- av_log(s, AV_LOG_ERROR, "packet_frag_size is invalid\n");
- return -1;
+ if (asf->packet_frag_size > asf->packet_size_left - rsize + asf->packet_padsize) {
+ av_log(s, AV_LOG_ERROR, "packet_frag_size is invalid (%d-%d)\n", asf->packet_size_left, rsize);
+ return -1;
+ } else {
+ int diff = asf->packet_frag_size - (asf->packet_size_left - rsize);
+ asf->packet_size_left += diff;
+ asf->packet_padsize -= diff;
+ }
}
//printf("Fragsize %d\n", asf->packet_frag_size);
} else {
diff --git a/libavformat/avidec.c b/libavformat/avidec.c
index c388298fef..39686491e2 100644
--- a/libavformat/avidec.c
+++ b/libavformat/avidec.c
@@ -356,7 +356,7 @@ static int avi_read_header(AVFormatContext *s, AVFormatParameters *ap)
AVIContext *avi = s->priv_data;
AVIOContext *pb = s->pb;
unsigned int tag, tag1, handler;
- int codec_type, stream_index, frame_period, bit_rate;
+ int codec_type, stream_index, frame_period;
unsigned int size;
int i;
AVStream *st;
@@ -428,7 +428,7 @@ static int avi_read_header(AVFormatContext *s, AVFormatParameters *ap)
/* AVI header */
/* using frame_period is bad idea */
frame_period = avio_rl32(pb);
- bit_rate = avio_rl32(pb) * 8;
+ avio_rl32(pb); /* max. bytes per second */
avio_rl32(pb);
avi->non_interleaved |= avio_rl32(pb) & AVIF_MUSTUSEINDEX;
@@ -970,7 +970,7 @@ resync:
if (CONFIG_DV_DEMUXER && avi->dv_demux) {
dstr = pkt->destruct;
size = dv_produce_packet(avi->dv_demux, pkt,
- pkt->data, pkt->size);
+ pkt->data, pkt->size, pkt->pos);
pkt->destruct = dstr;
pkt->flags |= AV_PKT_FLAG_KEY;
if (size < 0)
@@ -999,6 +999,23 @@ resync:
e= &st->index_entries[index];
if(index >= 0 && e->timestamp == ast->frame_offset){
+ if (index == st->nb_index_entries-1){
+ int key=1;
+ int i;
+ uint32_t state=-1;
+ for(i=0; i<FFMIN(size,256); i++){
+ if(st->codec->codec_id == CODEC_ID_MPEG4){
+ if(state == 0x1B6){
+ key= !(pkt->data[i]&0xC0);
+ break;
+ }
+ }else
+ break;
+ state= (state<<8) + pkt->data[i];
+ }
+ if(!key)
+ e->flags &= ~AVINDEX_KEYFRAME;
+ }
if (e->flags & AVINDEX_KEYFRAME)
pkt->flags |= AV_PKT_FLAG_KEY;
}
@@ -1347,6 +1364,22 @@ static int avi_read_seek(AVFormatContext *s, int stream_index, int64_t timestamp
index=0;
ast2->seek_pos= st2->index_entries[index].pos;
pos_min= FFMIN(pos_min,ast2->seek_pos);
+ }
+ for(i = 0; i < s->nb_streams; i++) {
+ AVStream *st2 = s->streams[i];
+ AVIStream *ast2 = st2->priv_data;
+
+ if (ast2->sub_ctx || st2->nb_index_entries <= 0)
+ continue;
+
+ index = av_index_search_timestamp(
+ st2,
+ av_rescale_q(timestamp, st->time_base, st2->time_base) * FFMAX(ast2->sample_size, 1),
+ flags | AVSEEK_FLAG_BACKWARD);
+ if(index<0)
+ index=0;
+ while(index>0 && st2->index_entries[index-1].pos >= pos_min)
+ index--;
ast2->frame_offset = st2->index_entries[index].timestamp;
}
diff --git a/libavformat/avienc.c b/libavformat/avienc.c
index ff74c3b492..9f488d9600 100644
--- a/libavformat/avienc.c
+++ b/libavformat/avienc.c
@@ -518,7 +518,7 @@ static int avi_write_packet(AVFormatContext *s, AVPacket *pkt)
AVCodecContext *enc= s->streams[stream_index]->codec;
int size= pkt->size;
-// av_log(s, AV_LOG_DEBUG, "%"PRId64" %d %d\n", pkt->dts, avi->packet_count[stream_index], stream_index);
+// av_log(s, AV_LOG_DEBUG, "%"PRId64" %d %d\n", pkt->dts, avist->packet_count, stream_index);
while(enc->block_align==0 && pkt->dts != AV_NOPTS_VALUE && pkt->dts > avist->packet_count){
AVPacket empty_packet;
@@ -527,7 +527,7 @@ static int avi_write_packet(AVFormatContext *s, AVPacket *pkt)
empty_packet.data= NULL;
empty_packet.stream_index= stream_index;
avi_write_packet(s, &empty_packet);
-// av_log(s, AV_LOG_DEBUG, "dup %"PRId64" %d\n", pkt->dts, avi->packet_count[stream_index]);
+// av_log(s, AV_LOG_DEBUG, "dup %"PRId64" %d\n", pkt->dts, avist->packet_count);
}
avist->packet_count++;
diff --git a/libavformat/avio.c b/libavformat/avio.c
index dfbd5869e4..16f8c2f2dd 100644
--- a/libavformat/avio.c
+++ b/libavformat/avio.c
@@ -19,9 +19,8 @@
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
-/* needed for usleep() */
-#define _XOPEN_SOURCE 600
#include <unistd.h>
+
#include "libavutil/avstring.h"
#include "libavutil/opt.h"
#include "os_support.h"
@@ -41,8 +40,12 @@ static const char *urlcontext_to_name(void *ptr)
else return "NULL";
}
static const AVOption options[] = {{NULL}};
-static const AVClass urlcontext_class =
- { "URLContext", urlcontext_to_name, options, LIBAVUTIL_VERSION_INT };
+static const AVClass urlcontext_class = {
+ .class_name = "URLContext",
+ .item_name = urlcontext_to_name,
+ .option = options,
+ .version = LIBAVUTIL_VERSION_INT,
+};
/*@}*/
#endif
diff --git a/libavformat/caf.c b/libavformat/caf.c
index fd916746d8..907562a082 100644
--- a/libavformat/caf.c
+++ b/libavformat/caf.c
@@ -39,6 +39,8 @@ const AVCodecTag ff_codec_caf_tags[] = {
/*{ CODEC_ID_DVAUDIO, MKBETAG('v','d','v','a') },*/
/*{ CODEC_ID_DVAUDIO, MKBETAG('d','v','c','a') },*/
{ CODEC_ID_ADPCM_IMA_QT, MKBETAG('i','m','a','4') },
+ { CODEC_ID_AMR_NB, MKBETAG('s','a','m','r') },
+ { CODEC_ID_GSM, MKBETAG('a','g','s','m') },
{ CODEC_ID_MACE3, MKBETAG('M','A','C','3') },
{ CODEC_ID_MACE6, MKBETAG('M','A','C','6') },
{ CODEC_ID_MP3, MKBETAG('.','m','p','3') },
diff --git a/libavformat/cafenc.c b/libavformat/cafenc.c
new file mode 100644
index 0000000000..0f33c6b592
--- /dev/null
+++ b/libavformat/cafenc.c
@@ -0,0 +1,182 @@
+/*
+ * Core Audio Format muxer
+ * Copyright (c) 2011 Carl Eugen Hoyos
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "avformat.h"
+#include "caf.h"
+#include "riff.h"
+#include "isom.h"
+#include "avio_internal.h"
+
+typedef struct {
+ int64_t data;
+} CAFContext;
+
+static uint32_t codec_flags(enum CodecID codec_id) {
+ switch (codec_id) {
+ case CODEC_ID_PCM_F32BE:
+ case CODEC_ID_PCM_F64BE:
+ return 1; //< kCAFLinearPCMFormatFlagIsFloat
+ case CODEC_ID_PCM_S16LE:
+ case CODEC_ID_PCM_S24LE:
+ case CODEC_ID_PCM_S32LE:
+ return 2; //< kCAFLinearPCMFormatFlagIsLittleEndian
+ case CODEC_ID_PCM_F32LE:
+ case CODEC_ID_PCM_F64LE:
+ return 3; //< kCAFLinearPCMFormatFlagIsFloat | kCAFLinearPCMFormatFlagIsLittleEndian
+ default:
+ return 0;
+ }
+}
+
+static uint32_t samples_per_packet(enum CodecID codec_id) {
+ switch (codec_id) {
+ case CODEC_ID_PCM_S8:
+ case CODEC_ID_PCM_S16LE:
+ case CODEC_ID_PCM_S16BE:
+ case CODEC_ID_PCM_S24LE:
+ case CODEC_ID_PCM_S24BE:
+ case CODEC_ID_PCM_S32LE:
+ case CODEC_ID_PCM_S32BE:
+ case CODEC_ID_PCM_F32LE:
+ case CODEC_ID_PCM_F32BE:
+ case CODEC_ID_PCM_F64LE:
+ case CODEC_ID_PCM_F64BE:
+ case CODEC_ID_PCM_ALAW:
+ case CODEC_ID_PCM_MULAW:
+ return 1;
+ case CODEC_ID_MACE3:
+ case CODEC_ID_MACE6:
+ return 6;
+ case CODEC_ID_ADPCM_IMA_QT:
+ return 64;
+ case CODEC_ID_AMR_NB:
+ case CODEC_ID_GSM:
+ case CODEC_ID_QCELP:
+ return 160;
+ case CODEC_ID_MP1:
+ return 384;
+ case CODEC_ID_MP2:
+ case CODEC_ID_MP3:
+ return 1152;
+ case CODEC_ID_AC3:
+ return 1536;
+ case CODEC_ID_ALAC:
+ case CODEC_ID_QDM2:
+ return 4096;
+ default:
+ return 0;
+ }
+}
+
+static int caf_write_header(AVFormatContext *s)
+{
+ AVIOContext *pb = s->pb;
+ AVCodecContext *enc = s->streams[0]->codec;
+ CAFContext *caf = s->priv_data;
+ unsigned int codec_tag = ff_codec_get_tag(ff_codec_caf_tags, enc->codec_id);
+
+ switch (enc->codec_id) {
+ case CODEC_ID_PCM_S8:
+ case CODEC_ID_PCM_S16LE:
+ case CODEC_ID_PCM_S16BE:
+ case CODEC_ID_PCM_S24LE:
+ case CODEC_ID_PCM_S24BE:
+ case CODEC_ID_PCM_S32LE:
+ case CODEC_ID_PCM_S32BE:
+ case CODEC_ID_PCM_F32LE:
+ case CODEC_ID_PCM_F32BE:
+ case CODEC_ID_PCM_F64LE:
+ case CODEC_ID_PCM_F64BE:
+ case CODEC_ID_PCM_ALAW:
+ case CODEC_ID_PCM_MULAW:
+ codec_tag = MKBETAG('l','p','c','m');
+ }
+
+ if (!codec_tag) {
+ av_log(s, AV_LOG_ERROR, "unsupported codec\n");
+ return AVERROR_INVALIDDATA;
+ }
+
+ if (!enc->block_align) {
+ av_log(s, AV_LOG_ERROR, "muxing with unknown or variable packet size not yet supported\n");
+ return AVERROR_PATCHWELCOME;
+ }
+
+ ffio_wfourcc(pb, "caff"); //< mFileType
+ avio_wb16(pb, 1); //< mFileVersion
+ avio_wb16(pb, 0); //< mFileFlags
+
+ ffio_wfourcc(pb, "desc"); //< Audio Description chunk
+ avio_wb64(pb, 32); //< mChunkSize
+ avio_wb64(pb, av_dbl2int(enc->sample_rate)); //< mSampleRate
+ avio_wb32(pb, codec_tag); //< mFormatID
+ avio_wb32(pb, codec_flags(enc->codec_id)); //< mFormatFlags
+ avio_wb32(pb, enc->block_align); //< mBytesPerPacket
+ avio_wb32(pb, samples_per_packet(enc->codec_id)); //< mFramesPerPacket
+ avio_wb32(pb, enc->channels); //< mChannelsPerFrame
+ avio_wb32(pb, enc->bits_per_coded_sample); //< mBitsPerChannel
+
+ ff_mov_write_chan(s, enc->channel_layout, "chan");
+
+ ffio_wfourcc(pb, "data"); //< Audio Data chunk
+ caf->data = avio_tell(pb);
+ avio_wb64(pb, -1); //< mChunkSize
+ avio_wb32(pb, 0); //< mEditCount
+
+ avio_flush(pb);
+ return 0;
+}
+
+static int caf_write_packet(AVFormatContext *s, AVPacket *pkt)
+{
+ avio_write(s->pb, pkt->data, pkt->size);
+ return 0;
+}
+
+static int caf_write_trailer(AVFormatContext *s)
+{
+ AVIOContext *pb = s->pb;
+
+ if (pb->seekable) {
+ CAFContext *caf = s->priv_data;
+ int64_t file_size = avio_tell(pb);
+
+ avio_seek(pb, caf->data, SEEK_SET);
+ avio_wb64(pb, file_size - caf->data - 8);
+ avio_seek(pb, file_size, SEEK_SET);
+ avio_flush(pb);
+ }
+ return 0;
+}
+
+AVOutputFormat ff_caf_muxer = {
+ "caf",
+ NULL_IF_CONFIG_SMALL("Apple Core Audio Format"),
+ "audio/x-caf",
+ "caf",
+ sizeof(CAFContext),
+ CODEC_ID_PCM_S16BE,
+ CODEC_ID_NONE,
+ caf_write_header,
+ caf_write_packet,
+ caf_write_trailer,
+ .codec_tag= (const AVCodecTag* const []){ff_codec_caf_tags, 0},
+};
diff --git a/libavformat/crypto.c b/libavformat/crypto.c
index f1e548a0ca..c83bf92895 100644
--- a/libavformat/crypto.c
+++ b/libavformat/crypto.c
@@ -52,7 +52,10 @@ static const AVOption options[] = {
};
static const AVClass crypto_class = {
- "crypto", av_default_item_name, options, LIBAVUTIL_VERSION_INT
+ .class_name = "crypto",
+ .item_name = av_default_item_name,
+ .option = options,
+ .version = LIBAVUTIL_VERSION_INT,
};
static int crypto_open(URLContext *h, const char *uri, int flags)
diff --git a/libavformat/cutils.c b/libavformat/cutils.c
index e6578df2ce..76aba56150 100644
--- a/libavformat/cutils.c
+++ b/libavformat/cutils.c
@@ -21,27 +21,6 @@
#include "avformat.h"
#include "internal.h"
-/* add one element to a dynamic array */
-void ff_dynarray_add(intptr_t **tab_ptr, int *nb_ptr, intptr_t elem)
-{
- /* see similar ffmpeg.c:grow_array() */
- int nb, nb_alloc;
- intptr_t *tab;
-
- nb = *nb_ptr;
- tab = *tab_ptr;
- if ((nb & (nb - 1)) == 0) {
- if (nb == 0)
- nb_alloc = 1;
- else
- nb_alloc = nb * 2;
- tab = av_realloc(tab, nb_alloc * sizeof(intptr_t));
- *tab_ptr = tab;
- }
- tab[nb++] = elem;
- *nb_ptr = nb;
-}
-
#define ISLEAP(y) (((y) % 4 == 0) && (((y) % 100) != 0 || ((y) % 400) == 0))
#define LEAPS_COUNT(y) ((y)/4 - (y)/100 + (y)/400)
diff --git a/libavformat/dv.c b/libavformat/dv.c
index 1e32125c23..750c950df8 100644
--- a/libavformat/dv.c
+++ b/libavformat/dv.c
@@ -316,7 +316,7 @@ int dv_get_packet(DVDemuxContext *c, AVPacket *pkt)
}
int dv_produce_packet(DVDemuxContext *c, AVPacket *pkt,
- uint8_t* buf, int buf_size)
+ uint8_t* buf, int buf_size, int64_t pos)
{
int size, i;
uint8_t *ppcm[4] = {0};
@@ -331,6 +331,7 @@ int dv_produce_packet(DVDemuxContext *c, AVPacket *pkt,
/* FIXME: in case of no audio/bad audio we have to do something */
size = dv_extract_audio_info(c, buf);
for (i = 0; i < c->ach; i++) {
+ c->audio_pkt[i].pos = pos;
c->audio_pkt[i].size = size;
c->audio_pkt[i].pts = c->abytes * 30000*8 / c->ast[i]->codec->bit_rate;
ppcm[i] = c->audio_buf[i];
@@ -354,6 +355,7 @@ int dv_produce_packet(DVDemuxContext *c, AVPacket *pkt,
size = dv_extract_video_info(c, buf);
av_init_packet(pkt);
pkt->data = buf;
+ pkt->pos = pos;
pkt->size = size;
pkt->flags |= AV_PKT_FLAG_KEY;
pkt->stream_index = c->vst->id;
@@ -452,13 +454,14 @@ static int dv_read_packet(AVFormatContext *s, AVPacket *pkt)
size = dv_get_packet(c->dv_demux, pkt);
if (size < 0) {
+ int64_t pos = avio_tell(s->pb);
if (!c->dv_demux->sys)
return AVERROR(EIO);
size = c->dv_demux->sys->frame_size;
if (avio_read(s->pb, c->buf, size) <= 0)
return AVERROR(EIO);
- size = dv_produce_packet(c->dv_demux, pkt, c->buf, size);
+ size = dv_produce_packet(c->dv_demux, pkt, c->buf, size, pos);
}
return size;
diff --git a/libavformat/dv.h b/libavformat/dv.h
index b8b43f1444..ce240c072c 100644
--- a/libavformat/dv.h
+++ b/libavformat/dv.h
@@ -33,7 +33,7 @@
typedef struct DVDemuxContext DVDemuxContext;
DVDemuxContext* dv_init_demux(AVFormatContext* s);
int dv_get_packet(DVDemuxContext*, AVPacket *);
-int dv_produce_packet(DVDemuxContext*, AVPacket*, uint8_t*, int);
+int dv_produce_packet(DVDemuxContext*, AVPacket*, uint8_t*, int, int64_t);
void dv_offset_reset(DVDemuxContext *c, int64_t frame_offset);
typedef struct DVMuxContext DVMuxContext;
diff --git a/libavformat/file.c b/libavformat/file.c
index 9d28a89327..88b5527521 100644
--- a/libavformat/file.c
+++ b/libavformat/file.c
@@ -51,6 +51,20 @@ static int file_get_handle(URLContext *h)
return (intptr_t) h->priv_data;
}
+static int file_check(URLContext *h, int mask)
+{
+ struct stat st;
+ int ret = stat(h->filename, &st);
+ if (ret < 0)
+ return AVERROR(errno);
+
+ ret |= st.st_mode&S_IRUSR ? mask&AVIO_RDONLY : 0;
+ ret |= st.st_mode&S_IWUSR ? mask&AVIO_WRONLY : 0;
+ ret |= st.st_mode&S_IWUSR && st.st_mode&S_IRUSR ? mask&AVIO_RDWR : 0;
+
+ return ret;
+}
+
#if CONFIG_FILE_PROTOCOL
static int file_open(URLContext *h, const char *filename, int flags)
@@ -95,20 +109,6 @@ static int file_close(URLContext *h)
return close(fd);
}
-static int file_check(URLContext *h, int mask)
-{
- struct stat st;
- int ret = stat(h->filename, &st);
- if (ret < 0)
- return AVERROR(errno);
-
- ret |= st.st_mode&S_IRUSR ? mask&AVIO_RDONLY : 0;
- ret |= st.st_mode&S_IWUSR ? mask&AVIO_WRONLY : 0;
- ret |= st.st_mode&S_IWUSR && st.st_mode&S_IRUSR ? mask&AVIO_RDWR : 0;
-
- return ret;
-}
-
URLProtocol ff_file_protocol = {
.name = "file",
.url_open = file_open,
diff --git a/libavformat/gxfenc.c b/libavformat/gxfenc.c
index 08270c8f66..3f7d7851f7 100644
--- a/libavformat/gxfenc.c
+++ b/libavformat/gxfenc.c
@@ -834,10 +834,10 @@ static int gxf_write_media_preamble(AVFormatContext *s, AVPacket *pkt, int size)
avio_wb16(pb, size / 2);
} else if (st->codec->codec_id == CODEC_ID_MPEG2VIDEO) {
int frame_type = gxf_parse_mpeg_frame(sc, pkt->data, pkt->size);
- if (frame_type == FF_I_TYPE) {
+ if (frame_type == AV_PICTURE_TYPE_I) {
avio_w8(pb, 0x0d);
sc->iframes++;
- } else if (frame_type == FF_B_TYPE) {
+ } else if (frame_type == AV_PICTURE_TYPE_B) {
avio_w8(pb, 0x0f);
sc->bframes++;
} else {
diff --git a/libavformat/h264dec.c b/libavformat/h264dec.c
index 3f5ed5eabb..cec8f85275 100644
--- a/libavformat/h264dec.c
+++ b/libavformat/h264dec.c
@@ -54,7 +54,7 @@ static int h264_probe(AVProbeData *p)
case 1: sli++; break;
case 5: idr++; break;
case 7:
- if(p->buf[i+2]&0x0F)
+ if(p->buf[i+2]&0x03)
return 0;
sps++;
break;
diff --git a/libavformat/http.c b/libavformat/http.c
index 086a19ff81..95246366bc 100644
--- a/libavformat/http.c
+++ b/libavformat/http.c
@@ -54,11 +54,14 @@ typedef struct {
#define OFFSET(x) offsetof(HTTPContext, x)
static const AVOption options[] = {
-{"chunksize", "use chunked transfer-encoding for posts, -1 disables it, 0 enables it", OFFSET(chunksize), FF_OPT_TYPE_INT64, 0, -1, 0 }, /* Default to 0, for chunked POSTs */
+{"chunksize", "use chunked transfer-encoding for posts, -1 disables it, 0 enables it", OFFSET(chunksize), FF_OPT_TYPE_INT64, {.dbl = 0}, -1, 0 }, /* Default to 0, for chunked POSTs */
{NULL}
};
static const AVClass httpcontext_class = {
- "HTTP", av_default_item_name, options, LIBAVUTIL_VERSION_INT
+ .class_name = "HTTP",
+ .item_name = av_default_item_name,
+ .option = options,
+ .version = LIBAVUTIL_VERSION_INT,
};
static int http_connect(URLContext *h, const char *path, const char *hoststr,
diff --git a/libavformat/img2.c b/libavformat/img2.c
index 7a8f756254..19292c39ec 100644
--- a/libavformat/img2.c
+++ b/libavformat/img2.c
@@ -75,6 +75,7 @@ static const IdStrMap img_tags[] = {
{ CODEC_ID_SUNRAST , "im8"},
{ CODEC_ID_SUNRAST , "im24"},
{ CODEC_ID_SUNRAST , "sunras"},
+ { CODEC_ID_JPEG2000 , "j2k"},
{ CODEC_ID_JPEG2000 , "jp2"},
{ CODEC_ID_DPX , "dpx"},
{ CODEC_ID_PICTOR , "pic"},
diff --git a/libavformat/internal.h b/libavformat/internal.h
index 20a52a6958..d75e0676e5 100644
--- a/libavformat/internal.h
+++ b/libavformat/internal.h
@@ -31,20 +31,18 @@ typedef struct AVCodecTag {
unsigned int tag;
} AVCodecTag;
-void ff_dynarray_add(intptr_t **tab_ptr, int *nb_ptr, intptr_t elem);
-
#ifdef __GNUC__
#define dynarray_add(tab, nb_ptr, elem)\
do {\
__typeof__(tab) _tab = (tab);\
__typeof__(elem) _elem = (elem);\
(void)sizeof(**_tab == _elem); /* check that types are compatible */\
- ff_dynarray_add((intptr_t **)_tab, nb_ptr, (intptr_t)_elem);\
+ av_dynarray_add(_tab, nb_ptr, _elem);\
} while(0)
#else
#define dynarray_add(tab, nb_ptr, elem)\
do {\
- ff_dynarray_add((intptr_t **)(tab), nb_ptr, (intptr_t)(elem));\
+ av_dynarray_add((tab), nb_ptr, (elem));\
} while(0)
#endif
diff --git a/libavformat/isom.c b/libavformat/isom.c
index 76a7082148..3259128d3a 100644
--- a/libavformat/isom.c
+++ b/libavformat/isom.c
@@ -27,6 +27,7 @@
#include "internal.h"
#include "isom.h"
#include "riff.h"
+#include "avio_internal.h"
#include "libavcodec/mpeg4audio.h"
#include "libavcodec/mpegaudiodata.h"
@@ -81,8 +82,10 @@ const AVCodecTag codec_movvideo_tags[] = {
{ CODEC_ID_RAWVIDEO, MKTAG('A', 'B', 'G', 'R') },
{ CODEC_ID_RAWVIDEO, MKTAG('b', '1', '6', 'g') },
{ CODEC_ID_RAWVIDEO, MKTAG('b', '4', '8', 'r') },
+ { CODEC_ID_RAWVIDEO, MKTAG('D', 'V', 'O', 'O') }, /* Digital Voodoo SD 8 Bit */
{ CODEC_ID_R10K, MKTAG('R', '1', '0', 'k') }, /* UNCOMPRESSED 10BIT RGB */
+ { CODEC_ID_R10K, MKTAG('R', '1', '0', 'g') }, /* UNCOMPRESSED 10BIT RGB */
{ CODEC_ID_R210, MKTAG('r', '2', '1', '0') }, /* UNCOMPRESSED 10BIT RGB */
{ CODEC_ID_V210, MKTAG('v', '2', '1', '0') }, /* UNCOMPRESSED 10BIT 4:2:2 */
@@ -442,6 +445,7 @@ static const MovChannelLayout mov_channel_layout[] = {
{ AV_CH_LAYOUT_5POINT1, (121<<16) | 6}, //< kCAFChannelLayoutTag_MPEG_5_1_A
{ AV_CH_LAYOUT_7POINT1, (128<<16) | 8}, //< kCAFChannelLayoutTag_MPEG_7_1_C
{ AV_CH_LAYOUT_7POINT1_WIDE, (126<<16) | 8}, //< kCAFChannelLayoutTag_MPEG_7_1_A
+ { AV_CH_LAYOUT_5POINT1_BACK|AV_CH_LAYOUT_STEREO_DOWNMIX, (130<<16) | 8}, //< kCAFChannelLayoutTag_SMPTE_DTV
{ AV_CH_LAYOUT_STEREO|AV_CH_LOW_FREQUENCY, (133<<16) | 3}, //< kCAFChannelLayoutTag_DVD_4
{ AV_CH_LAYOUT_2_1|AV_CH_LOW_FREQUENCY, (134<<16) | 4}, //< kCAFChannelLayoutTag_DVD_5
{ AV_CH_LAYOUT_QUAD|AV_CH_LOW_FREQUENCY, (135<<16) | 4}, //< kCAFChannelLayoutTag_DVD_6
@@ -480,3 +484,31 @@ void ff_mov_read_chan(AVFormatContext *s, int64_t size, AVCodecContext *codec)
avio_skip(pb, 8);
}
+void ff_mov_write_chan(AVFormatContext *s, int64_t channel_layout,
+ const char *chunk_type)
+{
+ AVIOContext *pb = s->pb;
+ const MovChannelLayout *layouts;
+ uint32_t layout_tag = 0;
+
+ if (!channel_layout)
+ return;
+
+ for (layouts = mov_channel_layout; layouts->channel_layout; layouts++)
+ if (channel_layout == layouts->channel_layout) {
+ layout_tag = layouts->layout_tag;
+ break;
+ }
+
+ ffio_wfourcc(pb, chunk_type);
+ avio_wb64(pb, 12); //< mChunkSize
+ if (layout_tag) {
+ avio_wb32(pb, layout_tag); //< mChannelLayoutTag
+ avio_wb32(pb, 0); //< mChannelBitmap
+ } else {
+ avio_wb32(pb, 0x10000); //< kCAFChannelLayoutTag_UseChannelBitmap
+ avio_wb32(pb, channel_layout);
+ }
+ avio_wb32(pb, 0); //< mNumberChannelDescriptions
+}
+
diff --git a/libavformat/isom.h b/libavformat/isom.h
index 6848a7d4cc..6e42a1f841 100644
--- a/libavformat/isom.h
+++ b/libavformat/isom.h
@@ -109,7 +109,7 @@ typedef struct MOVStreamContext {
unsigned int keyframe_count;
int *keyframes;
int time_scale;
- int time_offset; ///< time offset of the first edit list entry
+ int64_t time_offset; ///< time offset of the first edit list entry
int current_sample;
unsigned int bytes_per_frame;
unsigned int samples_per_frame;
@@ -155,5 +155,7 @@ enum CodecID ff_mov_get_lpcm_codec_id(int bps, int flags);
int ff_mov_read_stsd_entries(MOVContext *c, AVIOContext *pb, int entries);
void ff_mov_read_chan(AVFormatContext *s, int64_t size, AVCodecContext *codec);
+void ff_mov_write_chan(AVFormatContext *s, int64_t channel_layout,
+ const char *chunk_type);
#endif /* AVFORMAT_ISOM_H */
diff --git a/libavformat/matroskaenc.c b/libavformat/matroskaenc.c
index 5e4552a09c..387cead8cd 100644
--- a/libavformat/matroskaenc.c
+++ b/libavformat/matroskaenc.c
@@ -403,8 +403,6 @@ static int64_t mkv_write_cues(AVIOContext *pb, mkv_cues *cues, int num_tracks)
}
end_ebml_master(pb, cues_element);
- av_free(cues->entries);
- av_free(cues);
return currentpos;
}
@@ -618,7 +616,7 @@ static int mkv_write_tracks(AVFormatContext *s)
put_ebml_uint(pb, MATROSKA_ID_TRACKTYPE, MATROSKA_TRACK_TYPE_SUBTITLE);
if (!native_id) {
av_log(s, AV_LOG_ERROR, "Subtitle codec %d is not supported.\n", codec->codec_id);
- return AVERROR(EINVAL);
+ return AVERROR(ENOSYS);
}
break;
default:
@@ -1164,6 +1162,8 @@ static int mkv_write_trailer(AVFormatContext *s)
end_ebml_master(pb, mkv->segment);
av_free(mkv->tracks);
+ av_freep(&mkv->cues->entries);
+ av_freep(&mkv->cues);
av_destruct_packet(&mkv->cur_audio_pkt);
avio_flush(pb);
return 0;
diff --git a/libavformat/mov.c b/libavformat/mov.c
index 41e23a0ad4..c16add7713 100644
--- a/libavformat/mov.c
+++ b/libavformat/mov.c
@@ -2005,6 +2005,7 @@ static int mov_read_trun(MOVContext *c, AVIOContext *pb, MOVAtom atom)
MOVFragment *frag = &c->fragment;
AVStream *st = NULL;
MOVStreamContext *sc;
+ MOVStts *ctts_data;
uint64_t offset;
int64_t dts;
int data_offset = 0;
@@ -2028,18 +2029,33 @@ static int mov_read_trun(MOVContext *c, AVIOContext *pb, MOVAtom atom)
flags = avio_rb24(pb);
entries = avio_rb32(pb);
av_dlog(c->fc, "flags 0x%x entries %d\n", flags, entries);
- if (flags & 0x001) data_offset = avio_rb32(pb);
- if (flags & 0x004) first_sample_flags = avio_rb32(pb);
- if (flags & 0x800) {
- MOVStts *ctts_data;
- if ((uint64_t)entries+sc->ctts_count >= UINT_MAX/sizeof(*sc->ctts_data))
- return -1;
- ctts_data = av_realloc(sc->ctts_data,
- (entries+sc->ctts_count)*sizeof(*sc->ctts_data));
+
+ /* Always assume the presence of composition time offsets.
+ * Without this assumption, for instance, we cannot deal with a track in fragmented movies that meet the following.
+ * 1) in the initial movie, there are no samples.
+ * 2) in the first movie fragment, there is only one sample without composition time offset.
+ * 3) in the subsequent movie fragments, there are samples with composition time offset. */
+ if (!sc->ctts_count && sc->sample_count)
+ {
+ /* Complement ctts table if moov atom doesn't have ctts atom. */
+ ctts_data = av_malloc(sizeof(*sc->ctts_data));
if (!ctts_data)
return AVERROR(ENOMEM);
sc->ctts_data = ctts_data;
+ sc->ctts_data[sc->ctts_count].count = sc->sample_count;
+ sc->ctts_data[sc->ctts_count].duration = 0;
+ sc->ctts_count++;
}
+ if ((uint64_t)entries+sc->ctts_count >= UINT_MAX/sizeof(*sc->ctts_data))
+ return -1;
+ ctts_data = av_realloc(sc->ctts_data,
+ (entries+sc->ctts_count)*sizeof(*sc->ctts_data));
+ if (!ctts_data)
+ return AVERROR(ENOMEM);
+ sc->ctts_data = ctts_data;
+
+ if (flags & 0x001) data_offset = avio_rb32(pb);
+ if (flags & 0x004) first_sample_flags = avio_rb32(pb);
dts = st->duration;
offset = frag->base_data_offset + data_offset;
distance = 0;
@@ -2053,11 +2069,9 @@ static int mov_read_trun(MOVContext *c, AVIOContext *pb, MOVAtom atom)
if (flags & 0x100) sample_duration = avio_rb32(pb);
if (flags & 0x200) sample_size = avio_rb32(pb);
if (flags & 0x400) sample_flags = avio_rb32(pb);
- if (flags & 0x800) {
- sc->ctts_data[sc->ctts_count].count = 1;
- sc->ctts_data[sc->ctts_count].duration = avio_rb32(pb);
- sc->ctts_count++;
- }
+ sc->ctts_data[sc->ctts_count].count = 1;
+ sc->ctts_data[sc->ctts_count].duration = (flags & 0x800) ? avio_rb32(pb) : 0;
+ sc->ctts_count++;
if ((keyframe = st->codec->codec_type == AVMEDIA_TYPE_AUDIO ||
(flags & 0x004 && !i && !sample_flags) || sample_flags & 0x2000000))
distance = 0;
@@ -2153,13 +2167,13 @@ free_and_return:
static int mov_read_elst(MOVContext *c, AVIOContext *pb, MOVAtom atom)
{
MOVStreamContext *sc;
- int i, edit_count;
+ int i, edit_count, version;
if (c->fc->nb_streams < 1)
return 0;
sc = c->fc->streams[c->fc->nb_streams-1]->priv_data;
- avio_r8(pb); /* version */
+ version = avio_r8(pb); /* version */
avio_rb24(pb); /* flags */
edit_count = avio_rb32(pb); /* entries */
@@ -2167,9 +2181,15 @@ static int mov_read_elst(MOVContext *c, AVIOContext *pb, MOVAtom atom)
return -1;
for(i=0; i<edit_count; i++){
- int time;
- int duration = avio_rb32(pb); /* Track duration */
- time = avio_rb32(pb); /* Media time */
+ int64_t time;
+ int64_t duration;
+ if (version == 1) {
+ duration = avio_rb64(pb);
+ time = avio_rb64(pb);
+ } else {
+ duration = avio_rb32(pb); /* segment duration */
+ time = avio_rb32(pb); /* media time */
+ }
avio_rb32(pb); /* Media rate */
if (i == 0 && time >= -1) {
sc->time_offset = time != -1 ? time : -duration;
@@ -2448,7 +2468,7 @@ static int mov_read_packet(AVFormatContext *s, AVPacket *pkt)
return ret;
#if CONFIG_DV_DEMUXER
if (mov->dv_demux && sc->dv_audio_container) {
- dv_produce_packet(mov->dv_demux, pkt, pkt->data, pkt->size);
+ dv_produce_packet(mov->dv_demux, pkt, pkt->data, pkt->size, pkt->pos);
av_free(pkt->data);
pkt->size = 0;
ret = dv_get_packet(mov->dv_demux, pkt);
diff --git a/libavformat/movenc.c b/libavformat/movenc.c
index eec61fed66..1b61706da3 100644
--- a/libavformat/movenc.c
+++ b/libavformat/movenc.c
@@ -829,7 +829,7 @@ static int mov_write_video_tag(AVIOContext *pb, MOVTrack *track)
memset(compressor_name,0,32);
/* FIXME not sure, ISO 14496-1 draft where it shall be set to 0 */
if (track->mode == MODE_MOV && track->enc->codec && track->enc->codec->name)
- strncpy(compressor_name,track->enc->codec->name,31);
+ av_strlcpy(compressor_name,track->enc->codec->name,32);
avio_w8(pb, strlen(compressor_name));
avio_write(pb, compressor_name, 31);
diff --git a/libavformat/mp3enc.c b/libavformat/mp3enc.c
index a19769bbb0..082450779f 100644
--- a/libavformat/mp3enc.c
+++ b/libavformat/mp3enc.c
@@ -24,15 +24,19 @@
#include "id3v1.h"
#include "id3v2.h"
#include "rawenc.h"
+#include "libavutil/avstring.h"
#include "libavutil/intreadwrite.h"
#include "libavutil/opt.h"
+#include "libavcodec/mpegaudiodata.h"
+#include "libavcodec/mpegaudiodecheader.h"
+#include "libavformat/avio_internal.h"
static int id3v1_set_string(AVFormatContext *s, const char *key,
uint8_t *buf, int buf_size)
{
AVMetadataTag *tag;
if ((tag = av_metadata_get(s->metadata, key, NULL, 0)))
- strncpy(buf, tag->value, buf_size);
+ av_strlcpy(buf, tag->value, buf_size);
return !!tag;
}
@@ -126,7 +130,7 @@ static int id3v2_put_ttag(AVFormatContext *s, const char *str1, const char *str2
return len + ID3v2_HEADER_SIZE;
}
-static int mp3_write_trailer(struct AVFormatContext *s)
+static int mp2_write_trailer(struct AVFormatContext *s)
{
uint8_t buf[ID3v1_TAG_SIZE];
@@ -149,27 +153,36 @@ AVOutputFormat ff_mp2_muxer = {
CODEC_ID_NONE,
NULL,
ff_raw_write_packet,
- mp3_write_trailer,
+ mp2_write_trailer,
};
#endif
#if CONFIG_MP3_MUXER
+#define VBR_NUM_BAGS 400
+#define VBR_TOC_SIZE 100
typedef struct MP3Context {
const AVClass *class;
int id3v2_version;
+ int64_t frames_offset;
+ int32_t frames;
+ int32_t size;
+ uint32_t want;
+ uint32_t seen;
+ uint32_t pos;
+ uint64_t bag[VBR_NUM_BAGS];
} MP3Context;
static const AVOption options[] = {
{ "id3v2_version", "Select ID3v2 version to write. Currently 3 and 4 are supported.",
- offsetof(MP3Context, id3v2_version), FF_OPT_TYPE_INT, 4, 3, 4, AV_OPT_FLAG_ENCODING_PARAM},
+ offsetof(MP3Context, id3v2_version), FF_OPT_TYPE_INT, {.dbl = 4}, 3, 4, AV_OPT_FLAG_ENCODING_PARAM},
{ NULL },
};
static const AVClass mp3_muxer_class = {
- "MP3 muxer",
- av_default_item_name,
- options,
- LIBAVUTIL_VERSION_INT,
+ .class_name = "MP3 muxer",
+ .item_name = av_default_item_name,
+ .option = options,
+ .version = LIBAVUTIL_VERSION_INT,
};
static int id3v2_check_write_tag(AVFormatContext *s, AVMetadataTag *t, const char table[][4],
@@ -187,6 +200,143 @@ static int id3v2_check_write_tag(AVFormatContext *s, AVMetadataTag *t, const cha
return -1;
}
+static const int64_t xing_offtbl[2][2] = {{32, 17}, {17,9}};
+
+/*
+ * Write an empty XING header and initialize respective data.
+ */
+static int mp3_write_xing(AVFormatContext *s)
+{
+ AVCodecContext *codec = s->streams[0]->codec;
+ MP3Context *mp3 = s->priv_data;
+ int bitrate_idx = 3;
+ int64_t xing_offset;
+ int32_t mask, header;
+ MPADecodeHeader c;
+ int srate_idx, i, channels;
+ int needed;
+
+ for (i = 0; i < FF_ARRAY_ELEMS(ff_mpa_freq_tab); i++)
+ if (ff_mpa_freq_tab[i] == codec->sample_rate) {
+ srate_idx = i;
+ break;
+ }
+ if (i == FF_ARRAY_ELEMS(ff_mpa_freq_tab)) {
+ av_log(s, AV_LOG_ERROR, "Unsupported sample rate.\n");
+ return -1;
+ }
+
+ switch (codec->channels) {
+ case 1: channels = MPA_MONO; break;
+ case 2: channels = MPA_STEREO; break;
+ default: av_log(s, AV_LOG_ERROR, "Unsupported number of channels.\n"); return -1;
+ }
+
+ /* dummy MPEG audio header */
+ header = 0xff << 24; // sync
+ header |= (0x7 << 5 | 0x3 << 3 | 0x1 << 1 | 0x1) << 16; // sync/mpeg-1/layer 3/no crc*/
+ header |= (srate_idx << 2) << 8;
+ header |= channels << 6;
+
+ for (;;) {
+ if (15 == bitrate_idx)
+ return -1;
+
+ mask = (bitrate_idx << 4) << 8;
+ header |= mask;
+ ff_mpegaudio_decode_header(&c, header);
+ xing_offset=xing_offtbl[c.lsf == 1][c.nb_channels == 1];
+ needed = 4 // header
+ + xing_offset
+ + 4 // xing tag
+ + 4 // frames/size/toc flags
+ + 4 // frames
+ + 4 // size
+ + VBR_TOC_SIZE; // toc
+
+ if (needed <= c.frame_size)
+ break;
+
+ header &= ~mask;
+ ++bitrate_idx;
+ }
+
+ avio_wb32(s->pb, header);
+ ffio_fill(s->pb, 0, xing_offset);
+ avio_wb32(s->pb, MKBETAG('X', 'i', 'n', 'g'));
+ avio_wb32(s->pb, 0x01 | 0x02 | 0x04); // frames/size/toc
+
+ mp3->frames_offset = avio_tell(s->pb);
+ mp3->size = c.frame_size;
+ mp3->want=1;
+ mp3->seen=0;
+ mp3->pos=0;
+
+ avio_wb32(s->pb, 0); // frames
+ avio_wb32(s->pb, 0); // size
+
+ // toc
+ for (i = 0; i < VBR_TOC_SIZE; ++i)
+ avio_w8(s->pb, (uint8_t)(255 * i / VBR_TOC_SIZE));
+
+ ffio_fill(s->pb, 0, c.frame_size - needed);
+ avio_flush(s->pb);
+
+ return 0;
+}
+
+/*
+ * Add a frame to XING data.
+ * Following lame's "VbrTag.c".
+ */
+static void mp3_xing_add_frame(AVFormatContext *s, AVPacket *pkt)
+{
+ MP3Context *mp3 = s->priv_data;
+ int i;
+
+ ++mp3->frames;
+ mp3->size += pkt->size;
+
+ if (mp3->want == ++mp3->seen) {
+ mp3->bag[mp3->pos] = mp3->size;
+
+ if (VBR_NUM_BAGS == ++mp3->pos) {
+ /* shrink table to half size by throwing away each second bag. */
+ for (i = 1; i < VBR_NUM_BAGS; i += 2)
+ mp3->bag[i >> 1] = mp3->bag[i];
+
+ /* double wanted amount per bag. */
+ mp3->want <<= 1;
+ /* adjust current position to half of table size. */
+ mp3->pos >>= 1;
+ }
+
+ mp3->seen = 0;
+ }
+}
+
+static void mp3_fix_xing(AVFormatContext *s)
+{
+ MP3Context *mp3 = s->priv_data;
+ int i;
+
+ avio_flush(s->pb);
+ avio_seek(s->pb, mp3->frames_offset, SEEK_SET);
+ avio_wb32(s->pb, mp3->frames);
+ avio_wb32(s->pb, mp3->size);
+
+ avio_w8(s->pb, 0); // first toc entry has to be zero.
+
+ for (i = 1; i < VBR_TOC_SIZE; ++i) {
+ int j = i * mp3->pos / VBR_TOC_SIZE;
+ int seek_point = 256LL * mp3->bag[j] / mp3->size;
+ avio_w8(s->pb, FFMIN(seek_point, 255));
+ }
+
+ avio_flush(s->pb);
+ avio_seek(s->pb, 0, SEEK_END);
+}
+
/**
* Write an ID3v2 header at beginning of stream
*/
@@ -235,6 +385,59 @@ static int mp3_write_header(struct AVFormatContext *s)
id3v2_put_size(s, totlen);
avio_seek(s->pb, cur_pos, SEEK_SET);
+ if (s->pb->seekable)
+ mp3_write_xing(s);
+
+ return 0;
+}
+
+static int mp3_write_packet(AVFormatContext *s, AVPacket *pkt)
+{
+ if (! pkt || ! pkt->data || pkt->size < 4)
+ return ff_raw_write_packet(s, pkt);
+ else {
+ MP3Context *mp3 = s->priv_data;
+#ifdef FILTER_VBR_HEADERS
+ MPADecodeHeader c;
+ int base;
+
+ ff_mpegaudio_decode_header(&c, AV_RB32(pkt->data));
+
+ /* filter out XING and INFO headers. */
+ base = 4 + xing_offtbl[c.lsf == 1][c.nb_channels == 1];
+
+ if (base + 4 <= pkt->size) {
+ uint32_t v = AV_RB32(pkt->data + base);
+
+ if (MKBETAG('X','i','n','g') == v || MKBETAG('I','n','f','o') == v)
+ return 0;
+ }
+
+ /* filter out VBRI headers. */
+ base = 4 + 32;
+
+ if (base + 4 <= pkt->size && MKBETAG('V','B','R','I') == AV_RB32(pkt->data + base))
+ return 0;
+#endif
+
+ if (mp3->frames_offset)
+ mp3_xing_add_frame(s, pkt);
+
+ return ff_raw_write_packet(s, pkt);
+ }
+}
+
+static int mp3_write_trailer(AVFormatContext *s)
+{
+ MP3Context *mp3 = s->priv_data;
+ int ret=mp2_write_trailer(s);
+
+ if (ret < 0)
+ return ret;
+
+ if (mp3->frames_offset)
+ mp3_fix_xing(s);
+
return 0;
}
@@ -247,7 +450,7 @@ AVOutputFormat ff_mp3_muxer = {
CODEC_ID_MP3,
CODEC_ID_NONE,
mp3_write_header,
- ff_raw_write_packet,
+ mp3_write_packet,
mp3_write_trailer,
AVFMT_NOTIMESTAMPS,
.priv_class = &mp3_muxer_class,
diff --git a/libavformat/mpegts.c b/libavformat/mpegts.c
index eb53486aeb..6452861e38 100644
--- a/libavformat/mpegts.c
+++ b/libavformat/mpegts.c
@@ -524,6 +524,7 @@ static const StreamType MISC_types[] = {
static const StreamType REGD_types[] = {
{ MKTAG('d','r','a','c'), AVMEDIA_TYPE_VIDEO, CODEC_ID_DIRAC },
{ MKTAG('A','C','-','3'), AVMEDIA_TYPE_AUDIO, CODEC_ID_AC3 },
+ { MKTAG('B','S','S','D'), AVMEDIA_TYPE_AUDIO, CODEC_ID_S302M },
{ 0 },
};
@@ -1455,7 +1456,7 @@ static int mpegts_read_header(AVFormatContext *s,
{
MpegTSContext *ts = s->priv_data;
AVIOContext *pb = s->pb;
- uint8_t buf[5*1024];
+ uint8_t buf[8*1024];
int len;
int64_t pos;
@@ -1473,8 +1474,10 @@ static int mpegts_read_header(AVFormatContext *s,
if (len != sizeof(buf))
goto fail;
ts->raw_packet_size = get_packet_size(buf, sizeof(buf));
- if (ts->raw_packet_size <= 0)
- goto fail;
+ if (ts->raw_packet_size <= 0) {
+ av_log(s, AV_LOG_WARNING, "Could not detect TS packet size, defaulting to non-FEC/DVHS\n");
+ ts->raw_packet_size = TS_PACKET_SIZE;
+ }
ts->stream = s;
ts->auto_guess = 0;
diff --git a/libavformat/mpegtsenc.c b/libavformat/mpegtsenc.c
index 3ca743a2ba..e7d70015aa 100644
--- a/libavformat/mpegtsenc.c
+++ b/libavformat/mpegtsenc.c
@@ -76,23 +76,23 @@ typedef struct MpegTSWrite {
static const AVOption options[] = {
{ "mpegts_transport_stream_id", "Set transport_stream_id field.",
- offsetof(MpegTSWrite, transport_stream_id), FF_OPT_TYPE_INT, 0x0001, 0x0001, 0xffff, AV_OPT_FLAG_ENCODING_PARAM},
+ offsetof(MpegTSWrite, transport_stream_id), FF_OPT_TYPE_INT, {.dbl = 0x0001 }, 0x0001, 0xffff, AV_OPT_FLAG_ENCODING_PARAM},
{ "mpegts_original_network_id", "Set original_network_id field.",
- offsetof(MpegTSWrite, original_network_id), FF_OPT_TYPE_INT, 0x0001, 0x0001, 0xffff, AV_OPT_FLAG_ENCODING_PARAM},
+ offsetof(MpegTSWrite, original_network_id), FF_OPT_TYPE_INT, {.dbl = 0x0001 }, 0x0001, 0xffff, AV_OPT_FLAG_ENCODING_PARAM},
{ "mpegts_service_id", "Set service_id field.",
- offsetof(MpegTSWrite, service_id), FF_OPT_TYPE_INT, 0x0001, 0x0001, 0xffff, AV_OPT_FLAG_ENCODING_PARAM},
+ offsetof(MpegTSWrite, service_id), FF_OPT_TYPE_INT, {.dbl = 0x0001 }, 0x0001, 0xffff, AV_OPT_FLAG_ENCODING_PARAM},
{ "mpegts_pmt_start_pid", "Set the first pid of the PMT.",
- offsetof(MpegTSWrite, pmt_start_pid), FF_OPT_TYPE_INT, 0x1000, 0x1000, 0x1f00, AV_OPT_FLAG_ENCODING_PARAM},
+ offsetof(MpegTSWrite, pmt_start_pid), FF_OPT_TYPE_INT, {.dbl = 0x1000 }, 0x1000, 0x1f00, AV_OPT_FLAG_ENCODING_PARAM},
{ "mpegts_start_pid", "Set the first pid.",
- offsetof(MpegTSWrite, start_pid), FF_OPT_TYPE_INT, 0x0100, 0x0100, 0x0f00, AV_OPT_FLAG_ENCODING_PARAM},
+ offsetof(MpegTSWrite, start_pid), FF_OPT_TYPE_INT, {.dbl = 0x0100 }, 0x0100, 0x0f00, AV_OPT_FLAG_ENCODING_PARAM},
{ NULL },
};
static const AVClass mpegts_muxer_class = {
- "MPEGTS muxer",
- av_default_item_name,
- options,
- LIBAVUTIL_VERSION_INT,
+ .class_name = "MPEGTS muxer",
+ .item_name = av_default_item_name,
+ .option = options,
+ .version = LIBAVUTIL_VERSION_INT,
};
/* NOTE: 4 bytes must be left at the end for the crc32 */
@@ -588,7 +588,7 @@ static int mpegts_write_header(AVFormatContext *s)
av_free(pids);
for(i = 0;i < s->nb_streams; i++) {
st = s->streams[i];
- av_free(st->priv_data);
+ av_freep(&st->priv_data);
}
return -1;
}
diff --git a/libavformat/nsvdec.c b/libavformat/nsvdec.c
index 4b294d3591..300a8fd7f8 100644
--- a/libavformat/nsvdec.c
+++ b/libavformat/nsvdec.c
@@ -737,6 +737,9 @@ static int nsv_read_close(AVFormatContext *s)
static int nsv_probe(AVProbeData *p)
{
int i;
+ int score;
+ int vsize, asize, auxcount;
+ score = 0;
av_dlog(NULL, "nsv_probe(), buf_size %d\n", p->buf_size);
/* check file header */
/* streamed files might not have any header */
@@ -749,14 +752,25 @@ static int nsv_probe(AVProbeData *p)
/* sometimes even the first header is at 9KB or something :^) */
for (i = 1; i < p->buf_size - 3; i++) {
if (p->buf[i+0] == 'N' && p->buf[i+1] == 'S' &&
- p->buf[i+2] == 'V' && p->buf[i+3] == 's')
- return AVPROBE_SCORE_MAX-20;
+ p->buf[i+2] == 'V' && p->buf[i+3] == 's') {
+ score = AVPROBE_SCORE_MAX/5;
+ /* Get the chunk size and check if at the end we are getting 0xBEEF */
+ auxcount = p->buf[i+19];
+ vsize = p->buf[i+20] | p->buf[i+21] << 8;
+ asize = p->buf[i+22] | p->buf[i+23] << 8;
+ vsize = (vsize << 4) | (auxcount >> 4);
+ if ((asize + vsize + i + 23) < p->buf_size - 2) {
+ if (p->buf[i+23+asize+vsize+1] == 0xEF &&
+ p->buf[i+23+asize+vsize+2] == 0xBE)
+ return AVPROBE_SCORE_MAX-20;
+ }
+ }
}
/* so we'll have more luck on extension... */
if (av_match_ext(p->filename, "nsv"))
return AVPROBE_SCORE_MAX/2;
/* FIXME: add mime-type check */
- return 0;
+ return score;
}
AVInputFormat ff_nsv_demuxer = {
diff --git a/libavformat/nutenc.c b/libavformat/nutenc.c
index 47eaa2c81c..4756ca2a1a 100644
--- a/libavformat/nutenc.c
+++ b/libavformat/nutenc.c
@@ -589,6 +589,12 @@ static int write_header(AVFormatContext *s){
nut->chapter = av_mallocz(sizeof(ChapterContext)*s->nb_chapters);
nut->time_base= av_mallocz(sizeof(AVRational )*(s->nb_streams +
s->nb_chapters));
+ if (!nut->stream || !nut->chapter || !nut->time_base) {
+ av_freep(&nut->stream);
+ av_freep(&nut->chapter);
+ av_freep(&nut->time_base);
+ return AVERROR(ENOMEM);
+ }
for(i=0; i<s->nb_streams; i++){
AVStream *st= s->streams[i];
diff --git a/libavformat/oggdec.c b/libavformat/oggdec.c
index 29e4907e86..c799ce5205 100644
--- a/libavformat/oggdec.c
+++ b/libavformat/oggdec.c
@@ -56,8 +56,7 @@ static const struct ogg_codec * const ogg_codecs[] = {
};
//FIXME We could avoid some structure duplication
-static int
-ogg_save (AVFormatContext * s)
+static int ogg_save(AVFormatContext *s)
{
struct ogg *ogg = s->priv_data;
struct ogg_state *ost =
@@ -81,8 +80,7 @@ ogg_save (AVFormatContext * s)
return 0;
}
-static int
-ogg_restore (AVFormatContext * s, int discard)
+static int ogg_restore(AVFormatContext *s, int discard)
{
struct ogg *ogg = s->priv_data;
AVIOContext *bc = s->pb;
@@ -110,8 +108,7 @@ ogg_restore (AVFormatContext * s, int discard)
return 0;
}
-static int
-ogg_reset (struct ogg * ogg)
+static int ogg_reset(struct ogg *ogg)
{
int i;
@@ -135,8 +132,7 @@ ogg_reset (struct ogg * ogg)
return 0;
}
-static const struct ogg_codec *
-ogg_find_codec (uint8_t * buf, int size)
+static const struct ogg_codec *ogg_find_codec(uint8_t *buf, int size)
{
int i;
@@ -148,8 +144,7 @@ ogg_find_codec (uint8_t * buf, int size)
return NULL;
}
-static int
-ogg_new_stream (AVFormatContext * s, uint32_t serial)
+static int ogg_new_stream(AVFormatContext *s, uint32_t serial, int new_avstream)
{
struct ogg *ogg = s->priv_data;
@@ -166,17 +161,18 @@ ogg_new_stream (AVFormatContext * s, uint32_t serial)
os->buf = av_malloc(os->bufsize);
os->header = -1;
- st = av_new_stream (s, idx);
- if (!st)
- return AVERROR(ENOMEM);
+ if (new_avstream) {
+ st = av_new_stream(s, idx);
+ if (!st)
+ return AVERROR(ENOMEM);
- av_set_pts_info(st, 64, 1, 1000000);
+ av_set_pts_info(st, 64, 1, 1000000);
+ }
return idx;
}
-static int
-ogg_new_buf(struct ogg *ogg, int idx)
+static int ogg_new_buf(struct ogg *ogg, int idx)
{
struct ogg_stream *os = ogg->streams + idx;
uint8_t *nb = av_malloc(os->bufsize);
@@ -192,8 +188,7 @@ ogg_new_buf(struct ogg *ogg, int idx)
return 0;
}
-static int
-ogg_read_page (AVFormatContext * s, int *str)
+static int ogg_read_page(AVFormatContext *s, int *str)
{
AVIOContext *bc = s->pb;
struct ogg *ogg = s->priv_data;
@@ -251,8 +246,10 @@ ogg_read_page (AVFormatContext * s, int *str)
}
ogg->curidx = -1;
ogg->nstreams = 0;
+ idx = ogg_new_stream(s, serial, 0);
+ } else {
+ idx = ogg_new_stream(s, serial, 1);
}
- idx = ogg_new_stream (s, serial);
if (idx < 0)
return -1;
}
@@ -308,8 +305,8 @@ ogg_read_page (AVFormatContext * s, int *str)
return 0;
}
-static int
-ogg_packet (AVFormatContext * s, int *str, int *dstart, int *dsize, int64_t *fpos)
+static int ogg_packet(AVFormatContext *s, int *str, int *dstart, int *dsize,
+ int64_t *fpos)
{
struct ogg *ogg = s->priv_data;
int idx, i;
@@ -439,8 +436,7 @@ ogg_packet (AVFormatContext * s, int *str, int *dstart, int *dsize, int64_t *fpo
return 0;
}
-static int
-ogg_get_headers (AVFormatContext * s)
+static int ogg_get_headers(AVFormatContext *s)
{
struct ogg *ogg = s->priv_data;
@@ -456,8 +452,7 @@ ogg_get_headers (AVFormatContext * s)
return 0;
}
-static int
-ogg_get_length (AVFormatContext * s)
+static int ogg_get_length(AVFormatContext *s)
{
struct ogg *ogg = s->priv_data;
int i;
@@ -490,12 +485,22 @@ ogg_get_length (AVFormatContext * s)
ogg_restore (s, 0);
+ ogg_save (s);
+ avio_seek (s->pb, 0, SEEK_SET);
+ while (!ogg_read_page (s, &i)){
+ if (ogg->streams[i].granule != -1 && ogg->streams[i].granule != 0 &&
+ ogg->streams[i].codec) {
+ s->streams[i]->duration -=
+ ogg_gptopts (s, i, ogg->streams[i].granule, NULL);
+ break;
+ }
+ }
+ ogg_restore (s, 0);
+
return 0;
}
-
-static int
-ogg_read_header (AVFormatContext * s, AVFormatParameters * ap)
+static int ogg_read_header(AVFormatContext *s, AVFormatParameters *ap)
{
struct ogg *ogg = s->priv_data;
int i;
@@ -546,8 +551,7 @@ static int64_t ogg_calc_pts(AVFormatContext *s, int idx, int64_t *dts)
return pts;
}
-static int
-ogg_read_packet (AVFormatContext * s, AVPacket * pkt)
+static int ogg_read_packet(AVFormatContext *s, AVPacket *pkt)
{
struct ogg *ogg;
struct ogg_stream *os;
@@ -587,9 +591,7 @@ retry:
return psize;
}
-
-static int
-ogg_read_close (AVFormatContext * s)
+static int ogg_read_close(AVFormatContext *s)
{
struct ogg *ogg = s->priv_data;
int i;
@@ -602,10 +604,8 @@ ogg_read_close (AVFormatContext * s)
return 0;
}
-
-static int64_t
-ogg_read_timestamp (AVFormatContext * s, int stream_index, int64_t * pos_arg,
- int64_t pos_limit)
+static int64_t ogg_read_timestamp(AVFormatContext *s, int stream_index,
+ int64_t *pos_arg, int64_t pos_limit)
{
struct ogg *ogg = s->priv_data;
struct ogg_stream *os = ogg->streams + stream_index;
@@ -628,7 +628,8 @@ ogg_read_timestamp (AVFormatContext * s, int stream_index, int64_t * pos_arg,
return pts;
}
-static int ogg_read_seek(AVFormatContext *s, int stream_index, int64_t timestamp, int flags)
+static int ogg_read_seek(AVFormatContext *s, int stream_index,
+ int64_t timestamp, int flags)
{
struct ogg *ogg = s->priv_data;
struct ogg_stream *os = ogg->streams + stream_index;
@@ -648,24 +649,21 @@ static int ogg_read_seek(AVFormatContext *s, int stream_index, int64_t timestamp
static int ogg_probe(AVProbeData *p)
{
- if (p->buf[0] == 'O' && p->buf[1] == 'g' &&
- p->buf[2] == 'g' && p->buf[3] == 'S' &&
- p->buf[4] == 0x0 && p->buf[5] <= 0x7 )
+ if (!memcmp("OggS", p->buf, 5) && p->buf[5] <= 0x7)
return AVPROBE_SCORE_MAX;
- else
- return 0;
+ return 0;
}
AVInputFormat ff_ogg_demuxer = {
- "ogg",
- NULL_IF_CONFIG_SMALL("Ogg"),
- sizeof (struct ogg),
- ogg_probe,
- ogg_read_header,
- ogg_read_packet,
- ogg_read_close,
- ogg_read_seek,
- ogg_read_timestamp,
- .extensions = "ogg",
- .flags = AVFMT_GENERIC_INDEX,
+ .name = "ogg",
+ .long_name = NULL_IF_CONFIG_SMALL("Ogg"),
+ .priv_data_size = sizeof(struct ogg),
+ .read_probe = ogg_probe,
+ .read_header = ogg_read_header,
+ .read_packet = ogg_read_packet,
+ .read_close = ogg_read_close,
+ .read_seek = ogg_read_seek,
+ .read_timestamp = ogg_read_timestamp,
+ .extensions = "ogg",
+ .flags = AVFMT_GENERIC_INDEX,
};
diff --git a/libavformat/oggenc.c b/libavformat/oggenc.c
index f0251a6455..45e677fb08 100644
--- a/libavformat/oggenc.c
+++ b/libavformat/oggenc.c
@@ -20,6 +20,7 @@
*/
#include "libavutil/crc.h"
+#include "libavutil/opt.h"
#include "libavutil/random_seed.h"
#include "libavcodec/xiph.h"
#include "libavcodec/bytestream.h"
@@ -62,9 +63,26 @@ typedef struct OGGPageList {
} OGGPageList;
typedef struct {
+ const AVClass *class;
OGGPageList *page_list;
+ int pref_size; ///< preferred page size (0 => fill all segments)
} OGGContext;
+
+static const AVOption options[] = {
+ { "oggpagesize", "Set preferred Ogg page size.",
+ offsetof(OGGContext, pref_size), FF_OPT_TYPE_INT, {.dbl=0}, 0, MAX_PAGE_SIZE, AV_OPT_FLAG_ENCODING_PARAM},
+ { NULL },
+};
+
+static const AVClass ogg_muxer_class = {
+ "Ogg muxer",
+ av_default_item_name,
+ options,
+ LIBAVUTIL_VERSION_INT,
+};
+
+
static void ogg_update_checksum(AVFormatContext *s, AVIOContext *pb, int64_t crc_offset)
{
int64_t pos = avio_tell(pb);
@@ -174,6 +192,7 @@ static int ogg_buffer_data(AVFormatContext *s, AVStream *st,
uint8_t *data, unsigned size, int64_t granule)
{
OGGStreamContext *oggstream = st->priv_data;
+ OGGContext *ogg = s->priv_data;
int total_segments = size / 255 + 1;
uint8_t *p = data;
int i, segments, len, flush = 0;
@@ -209,8 +228,9 @@ static int ogg_buffer_data(AVFormatContext *s, AVStream *st,
if (i == total_segments)
page->granule = granule;
- if (page->segments_count == 255) {
- ogg_buffer_page(s, oggstream);
+ if(page->segments_count == 255 ||
+ (ogg->pref_size > 0 && page->size >= ogg->pref_size)) {
+ ogg_buffer_page(s, oggstream);
}
}
@@ -514,4 +534,5 @@ AVOutputFormat ff_ogg_muxer = {
ogg_write_header,
ogg_write_packet,
ogg_write_trailer,
+ .priv_class = &ogg_muxer_class,
};
diff --git a/libavformat/options.c b/libavformat/options.c
index 26e2bcebe4..a6f78817f4 100644
--- a/libavformat/options.c
+++ b/libavformat/options.c
@@ -40,28 +40,24 @@ static const char* format_to_name(void* ptr)
#define D AV_OPT_FLAG_DECODING_PARAM
static const AVOption options[]={
-{"probesize", "set probing size", OFFSET(probesize), FF_OPT_TYPE_INT, 5000000, 32, INT_MAX, D},
-{"muxrate", "set mux rate", OFFSET(mux_rate), FF_OPT_TYPE_INT, DEFAULT, 0, INT_MAX, E},
-{"packetsize", "set packet size", OFFSET(packet_size), FF_OPT_TYPE_INT, DEFAULT, 0, INT_MAX, E},
-{"fflags", NULL, OFFSET(flags), FF_OPT_TYPE_FLAGS, DEFAULT, INT_MIN, INT_MAX, D|E, "fflags"},
-{"ignidx", "ignore index", 0, FF_OPT_TYPE_CONST, AVFMT_FLAG_IGNIDX, INT_MIN, INT_MAX, D, "fflags"},
-{"genpts", "generate pts", 0, FF_OPT_TYPE_CONST, AVFMT_FLAG_GENPTS, INT_MIN, INT_MAX, D, "fflags"},
-{"nofillin", "do not fill in missing values that can be exactly calculated", 0, FF_OPT_TYPE_CONST, AVFMT_FLAG_NOFILLIN, INT_MIN, INT_MAX, D, "fflags"},
-{"noparse", "disable AVParsers, this needs nofillin too", 0, FF_OPT_TYPE_CONST, AVFMT_FLAG_NOPARSE, INT_MIN, INT_MAX, D, "fflags"},
-{"igndts", "ignore dts", 0, FF_OPT_TYPE_CONST, AVFMT_FLAG_IGNDTS, INT_MIN, INT_MAX, D, "fflags"},
-{"rtphint", "add rtp hinting", 0, FF_OPT_TYPE_CONST, AVFMT_FLAG_RTP_HINT, INT_MIN, INT_MAX, E, "fflags"},
-{"sortdts", "try to interleave outputted packets by dts", 0, FF_OPT_TYPE_CONST, AVFMT_FLAG_SORT_DTS, INT_MIN, INT_MAX, D, "fflags"},
-#if FF_API_OLD_METADATA
-{"track", " set the track number", OFFSET(track), FF_OPT_TYPE_INT, DEFAULT, 0, INT_MAX, E},
-{"year", "set the year", OFFSET(year), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX, E},
-#endif
-{"analyzeduration", "how many microseconds are analyzed to estimate duration", OFFSET(max_analyze_duration), FF_OPT_TYPE_INT, 5*AV_TIME_BASE, 0, INT_MAX, D},
-{"cryptokey", "decryption key", OFFSET(key), FF_OPT_TYPE_BINARY, 0, 0, 0, D},
-{"indexmem", "max memory used for timestamp index (per stream)", OFFSET(max_index_size), FF_OPT_TYPE_INT, 1<<20, 0, INT_MAX, D},
-{"rtbufsize", "max memory used for buffering real-time frames", OFFSET(max_picture_buffer), FF_OPT_TYPE_INT, 3041280, 0, INT_MAX, D}, /* defaults to 1s of 15fps 352x288 YUYV422 video */
-{"fdebug", "print specific debug info", OFFSET(debug), FF_OPT_TYPE_FLAGS, DEFAULT, 0, INT_MAX, E|D, "fdebug"},
-{"ts", NULL, 0, FF_OPT_TYPE_CONST, FF_FDEBUG_TS, INT_MIN, INT_MAX, E|D, "fdebug"},
-{"max_delay", "maximum muxing or demuxing delay in microseconds", OFFSET(max_delay), FF_OPT_TYPE_INT, DEFAULT, 0, INT_MAX, E|D},
+{"probesize", "set probing size", OFFSET(probesize), FF_OPT_TYPE_INT, {.dbl = 5000000 }, 32, INT_MAX, D},
+{"muxrate", "set mux rate", OFFSET(mux_rate), FF_OPT_TYPE_INT, {.dbl = DEFAULT }, 0, INT_MAX, E},
+{"packetsize", "set packet size", OFFSET(packet_size), FF_OPT_TYPE_INT, {.dbl = DEFAULT }, 0, INT_MAX, E},
+{"fflags", NULL, OFFSET(flags), FF_OPT_TYPE_FLAGS, {.dbl = DEFAULT }, INT_MIN, INT_MAX, D|E, "fflags"},
+{"ignidx", "ignore index", 0, FF_OPT_TYPE_CONST, {.dbl = AVFMT_FLAG_IGNIDX }, INT_MIN, INT_MAX, D, "fflags"},
+{"genpts", "generate pts", 0, FF_OPT_TYPE_CONST, {.dbl = AVFMT_FLAG_GENPTS }, INT_MIN, INT_MAX, D, "fflags"},
+{"nofillin", "do not fill in missing values that can be exactly calculated", 0, FF_OPT_TYPE_CONST, {.dbl = AVFMT_FLAG_NOFILLIN }, INT_MIN, INT_MAX, D, "fflags"},
+{"noparse", "disable AVParsers, this needs nofillin too", 0, FF_OPT_TYPE_CONST, {.dbl = AVFMT_FLAG_NOPARSE }, INT_MIN, INT_MAX, D, "fflags"},
+{"igndts", "ignore dts", 0, FF_OPT_TYPE_CONST, {.dbl = AVFMT_FLAG_IGNDTS }, INT_MIN, INT_MAX, D, "fflags"},
+{"rtphint", "add rtp hinting", 0, FF_OPT_TYPE_CONST, {.dbl = AVFMT_FLAG_RTP_HINT }, INT_MIN, INT_MAX, E, "fflags"},
+{"sortdts", "try to interleave outputted packets by dts", 0, FF_OPT_TYPE_CONST, {.dbl = AVFMT_FLAG_SORT_DTS }, INT_MIN, INT_MAX, D, "fflags"},
+{"analyzeduration", "how many microseconds are analyzed to estimate duration", OFFSET(max_analyze_duration), FF_OPT_TYPE_INT, {.dbl = 5*AV_TIME_BASE }, 0, INT_MAX, D},
+{"cryptokey", "decryption key", OFFSET(key), FF_OPT_TYPE_BINARY, {.dbl = 0}, 0, 0, D},
+{"indexmem", "max memory used for timestamp index (per stream)", OFFSET(max_index_size), FF_OPT_TYPE_INT, {.dbl = 1<<20 }, 0, INT_MAX, D},
+{"rtbufsize", "max memory used for buffering real-time frames", OFFSET(max_picture_buffer), FF_OPT_TYPE_INT, {.dbl = 3041280 }, 0, INT_MAX, D}, /* defaults to 1s of 15fps 352x288 YUYV422 video */
+{"fdebug", "print specific debug info", OFFSET(debug), FF_OPT_TYPE_FLAGS, {.dbl = DEFAULT }, 0, INT_MAX, E|D, "fdebug"},
+{"ts", NULL, 0, FF_OPT_TYPE_CONST, {.dbl = FF_FDEBUG_TS }, INT_MIN, INT_MAX, E|D, "fdebug"},
+{"max_delay", "maximum muxing or demuxing delay in microseconds", OFFSET(max_delay), FF_OPT_TYPE_INT, {.dbl = DEFAULT }, 0, INT_MAX, E|D},
{NULL},
};
@@ -69,7 +65,12 @@ static const AVOption options[]={
#undef D
#undef DEFAULT
-static const AVClass av_format_context_class = { "AVFormatContext", format_to_name, options, LIBAVUTIL_VERSION_INT };
+static const AVClass av_format_context_class = {
+ .class_name = "AVFormatContext",
+ .item_name = format_to_name,
+ .option = options,
+ .version = LIBAVUTIL_VERSION_INT,
+};
static void avformat_get_context_defaults(AVFormatContext *s)
{
@@ -86,7 +87,6 @@ AVFormatContext *avformat_alloc_context(void)
ic = av_malloc(sizeof(AVFormatContext));
if (!ic) return ic;
avformat_get_context_defaults(ic);
- ic->av_class = &av_format_context_class;
return ic;
}
diff --git a/libavformat/os_support.c b/libavformat/os_support.c
index 8afa628515..ac9086c3c3 100644
--- a/libavformat/os_support.c
+++ b/libavformat/os_support.c
@@ -22,7 +22,6 @@
/* needed by inet_aton() */
#define _SVID_SOURCE
-#define _DARWIN_C_SOURCE
#include "config.h"
#include "avformat.h"
diff --git a/libavformat/riff.c b/libavformat/riff.c
index 48a8df4e18..833a6bf9e8 100644
--- a/libavformat/riff.c
+++ b/libavformat/riff.c
@@ -33,6 +33,7 @@ const AVCodecTag ff_codec_bmp_tags[] = {
{ CODEC_ID_H264, MKTAG('X', '2', '6', '4') },
{ CODEC_ID_H264, MKTAG('x', '2', '6', '4') },
{ CODEC_ID_H264, MKTAG('a', 'v', 'c', '1') },
+ { CODEC_ID_H264, MKTAG('D', 'A', 'V', 'C') },
{ CODEC_ID_H264, MKTAG('V', 'S', 'S', 'H') },
{ CODEC_ID_H263, MKTAG('H', '2', '6', '3') },
{ CODEC_ID_H263, MKTAG('X', '2', '6', '3') },
@@ -442,7 +443,7 @@ int ff_put_wav_header(AVIOContext *pb, AVCodecContext *enc)
if(waveformatextensible) { /* write WAVEFORMATEXTENSIBLE extensions */
hdrsize += 22;
avio_wl16(pb, riff_extradata - riff_extradata_start + 22); /* 22 is WAVEFORMATEXTENSIBLE size */
- avio_wl16(pb, enc->bits_per_coded_sample); /* ValidBitsPerSample || SamplesPerBlock || Reserved */
+ avio_wl16(pb, bps); /* ValidBitsPerSample || SamplesPerBlock || Reserved */
avio_wl32(pb, enc->channel_layout); /* dwChannelMask */
avio_wl32(pb, enc->codec_tag); /* GUID + next 3 */
avio_wl32(pb, 0x00100000);
@@ -603,6 +604,7 @@ void ff_parse_specific_params(AVCodecContext *stream, int *au_rate, int *au_ssiz
*au_scale=stream->frame_size;
*au_rate= stream->sample_rate;
}else if(stream->codec_type == AVMEDIA_TYPE_VIDEO ||
+ stream->codec_type == AVMEDIA_TYPE_DATA ||
stream->codec_type == AVMEDIA_TYPE_SUBTITLE){
*au_scale= stream->time_base.num;
*au_rate = stream->time_base.den;
diff --git a/libavformat/rtpdec.c b/libavformat/rtpdec.c
index db728ad2cb..0a9a8bc8d6 100644
--- a/libavformat/rtpdec.c
+++ b/libavformat/rtpdec.c
@@ -19,9 +19,6 @@
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
-/* needed for gethostname() */
-#define _XOPEN_SOURCE 600
-
#include "libavcodec/get_bits.h"
#include "avformat.h"
#include "mpegts.h"
diff --git a/libavformat/rtpdec_qdm2.c b/libavformat/rtpdec_qdm2.c
index 1c1d3219be..c4314ec2c4 100644
--- a/libavformat/rtpdec_qdm2.c
+++ b/libavformat/rtpdec_qdm2.c
@@ -266,6 +266,8 @@ static int qdm2_parse_packet(AVFormatContext *s, PayloadContext *qdm,
* to the decoder that it is OK to initialize. */
st->codec->codec_id = CODEC_ID_QDM2;
}
+ if (st->codec->codec_id == CODEC_ID_NONE)
+ return AVERROR(EAGAIN);
/* subpackets */
while (end - p >= 4) {
diff --git a/libavformat/rtpproto.c b/libavformat/rtpproto.c
index d620cf9264..8b23f25c46 100644
--- a/libavformat/rtpproto.c
+++ b/libavformat/rtpproto.c
@@ -138,15 +138,13 @@ static int rtp_open(URLContext *h, const char *uri, int flags)
{
RTPContext *s;
int rtp_port, rtcp_port,
- is_output, ttl, connect,
+ ttl, connect,
local_rtp_port, local_rtcp_port, max_packet_size;
char hostname[256];
char buf[1024];
char path[1024];
const char *p;
- is_output = (flags & AVIO_WRONLY);
-
s = av_mallocz(sizeof(RTPContext));
if (!s)
return AVERROR(ENOMEM);
diff --git a/libavformat/rtsp.c b/libavformat/rtsp.c
index 01e1bc1a04..74e4b31f43 100644
--- a/libavformat/rtsp.c
+++ b/libavformat/rtsp.c
@@ -808,6 +808,10 @@ void ff_rtsp_parse_line(RTSPMessageHeader *reply, const char *buf,
p += strspn(p, SPACE_CHARS);
if (method && !strcmp(method, "PLAY"))
rtsp_parse_rtp_info(rt, p);
+ } else if (av_stristart(p, "Public:", &p) && rt) {
+ if (strstr(p, "GET_PARAMETER") &&
+ method && !strcmp(method, "OPTIONS"))
+ rt->get_parameter_supported = 1;
}
}
diff --git a/libavformat/rtsp.h b/libavformat/rtsp.h
index 0fec3cc991..56160cefc2 100644
--- a/libavformat/rtsp.h
+++ b/libavformat/rtsp.h
@@ -331,6 +331,11 @@ typedef struct RTSPState {
* Polling array for udp
*/
struct pollfd *p;
+
+ /**
+ * Whether the server supports the GET_PARAMETER method.
+ */
+ int get_parameter_supported;
} RTSPState;
/**
diff --git a/libavformat/rtspdec.c b/libavformat/rtspdec.c
index 5833a5209a..454a31c3f9 100644
--- a/libavformat/rtspdec.c
+++ b/libavformat/rtspdec.c
@@ -341,7 +341,9 @@ retry:
/* send dummy request to keep TCP connection alive */
if ((av_gettime() - rt->last_cmd_time) / 1000000 >= rt->timeout / 2) {
- if (rt->server_type != RTSP_SERVER_REAL) {
+ if (rt->server_type == RTSP_SERVER_WMS ||
+ (rt->server_type != RTSP_SERVER_REAL &&
+ rt->get_parameter_supported)) {
ff_rtsp_send_cmd_async(s, "GET_PARAMETER", rt->control_uri, NULL);
} else {
ff_rtsp_send_cmd_async(s, "OPTIONS", "*", NULL);
diff --git a/libavformat/spdifenc.c b/libavformat/spdifenc.c
index f6d4ec160e..3c170bd7cc 100644
--- a/libavformat/spdifenc.c
+++ b/libavformat/spdifenc.c
@@ -86,14 +86,19 @@ typedef struct IEC61937Context {
} IEC61937Context;
static const AVOption options[] = {
-{ "spdif_flags", "IEC 61937 encapsulation flags", offsetof(IEC61937Context, spdif_flags), FF_OPT_TYPE_FLAGS, 0, 0, INT_MAX, AV_OPT_FLAG_ENCODING_PARAM, "spdif_flags" },
-{ "be", "output in big-endian format (for use as s16be)", 0, FF_OPT_TYPE_CONST, SPDIF_FLAG_BIGENDIAN, 0, INT_MAX, AV_OPT_FLAG_ENCODING_PARAM, "spdif_flags" },
-{ "dtshd_rate", "mux complete DTS frames in HD mode at the specified IEC958 rate (in Hz, default 0=disabled)", offsetof(IEC61937Context, dtshd_rate), FF_OPT_TYPE_INT, 0, 0, 768000, AV_OPT_FLAG_ENCODING_PARAM },
-{ "dtshd_fallback_time", "min secs to strip HD for after an overflow (-1: till the end, default 60)", offsetof(IEC61937Context, dtshd_fallback), FF_OPT_TYPE_INT, 60, -1, INT_MAX, AV_OPT_FLAG_ENCODING_PARAM },
+{ "spdif_flags", "IEC 61937 encapsulation flags", offsetof(IEC61937Context, spdif_flags), FF_OPT_TYPE_FLAGS, {.dbl = 0}, 0, INT_MAX, AV_OPT_FLAG_ENCODING_PARAM, "spdif_flags" },
+{ "be", "output in big-endian format (for use as s16be)", 0, FF_OPT_TYPE_CONST, {.dbl = SPDIF_FLAG_BIGENDIAN}, 0, INT_MAX, AV_OPT_FLAG_ENCODING_PARAM, "spdif_flags" },
+{ "dtshd_rate", "mux complete DTS frames in HD mode at the specified IEC958 rate (in Hz, default 0=disabled)", offsetof(IEC61937Context, dtshd_rate), FF_OPT_TYPE_INT, {.dbl = 0}, 0, 768000, AV_OPT_FLAG_ENCODING_PARAM },
+{ "dtshd_fallback_time", "min secs to strip HD for after an overflow (-1: till the end, default 60)", offsetof(IEC61937Context, dtshd_fallback), FF_OPT_TYPE_INT, {.dbl = 60}, -1, INT_MAX, AV_OPT_FLAG_ENCODING_PARAM },
{ NULL },
};
-static const AVClass class = { "spdif", av_default_item_name, options, LIBAVUTIL_VERSION_INT };
+static const AVClass class = {
+ .class_name = "spdif",
+ .item_name = av_default_item_name,
+ .option = options,
+ .version = LIBAVUTIL_VERSION_INT,
+};
static int spdif_header_ac3(AVFormatContext *s, AVPacket *pkt)
{
diff --git a/libavformat/udp.c b/libavformat/udp.c
index 6f22277e23..021f529d64 100644
--- a/libavformat/udp.c
+++ b/libavformat/udp.c
@@ -25,15 +25,17 @@
*/
#define _BSD_SOURCE /* Needed for using struct ip_mreq with recent glibc */
-#define _DARWIN_C_SOURCE /* Needed for using IP_MULTICAST_TTL on OS X */
+
#include "avformat.h"
#include "avio_internal.h"
#include "libavutil/parseutils.h"
+#include "libavutil/fifo.h"
#include <unistd.h>
#include "internal.h"
#include "network.h"
#include "os_support.h"
#include "url.h"
+#include <pthread.h>
#include <sys/time.h>
#ifndef IPV6_ADD_MEMBERSHIP
@@ -51,6 +53,13 @@ typedef struct {
struct sockaddr_storage dest_addr;
int dest_addr_len;
int is_connected;
+
+ /* Circular Buffer variables for use in UDP receive code */
+ int circular_buffer_size;
+ AVFifoBuffer *fifo;
+ int circular_buffer_available_max;
+ int circular_buffer_error;
+ pthread_t circular_buffer_thread;
} UDPContext;
#define UDP_TX_BUF_SIZE 32768
@@ -301,6 +310,66 @@ int udp_get_file_handle(URLContext *h)
return s->udp_fd;
}
+static void *circular_buffer_task( void *_URLContext)
+{
+ URLContext *h = _URLContext;
+ UDPContext *s = h->priv_data;
+ fd_set rfds;
+ struct timeval tv;
+
+ for(;;) {
+ int left;
+ int ret;
+ int len;
+
+ if (url_interrupt_cb()) {
+ s->circular_buffer_error = EINTR;
+ return NULL;
+ }
+
+ FD_ZERO(&rfds);
+ FD_SET(s->udp_fd, &rfds);
+ tv.tv_sec = 1;
+ tv.tv_usec = 0;
+ ret = select(s->udp_fd + 1, &rfds, NULL, NULL, &tv);
+ if (ret < 0) {
+ if (ff_neterrno() == AVERROR(EINTR))
+ continue;
+ s->circular_buffer_error = EIO;
+ return NULL;
+ }
+
+ if (!(ret > 0 && FD_ISSET(s->udp_fd, &rfds)))
+ continue;
+
+ /* How much do we have left to the end of the buffer */
+ /* Whats the minimum we can read so that we dont comletely fill the buffer */
+ left = av_fifo_space(s->fifo);
+ left = FFMIN(left, s->fifo->end - s->fifo->wptr);
+
+ /* No Space left, error, what do we do now */
+ if( !left) {
+ av_log(h, AV_LOG_ERROR, "circular_buffer: OVERRUN\n");
+ s->circular_buffer_error = EIO;
+ return NULL;
+ }
+
+ len = recv(s->udp_fd, s->fifo->wptr, left, 0);
+ if (len < 0) {
+ if (ff_neterrno() != AVERROR(EAGAIN) && ff_neterrno() != AVERROR(EINTR)) {
+ s->circular_buffer_error = EIO;
+ return NULL;
+ }
+ }
+ s->fifo->wptr += len;
+ if (s->fifo->wptr >= s->fifo->end)
+ s->fifo->wptr = s->fifo->buffer;
+ s->fifo->wndx += len;
+ }
+
+ return NULL;
+}
+
/* put it in UDP context */
/* return non zero if error */
static int udp_open(URLContext *h, const char *uri, int flags)
@@ -328,10 +397,12 @@ static int udp_open(URLContext *h, const char *uri, int flags)
s->ttl = 16;
s->buffer_size = is_output ? UDP_TX_BUF_SIZE : UDP_MAX_PKT_SIZE;
+ s->circular_buffer_size = 7*188*4096;
+
p = strchr(uri, '?');
if (p) {
if (av_find_info_tag(buf, sizeof(buf), "reuse", p)) {
- const char *endptr=NULL;
+ char *endptr=NULL;
s->reuse_socket = strtol(buf, &endptr, 10);
/* assume if no digits were found it is a request to enable it */
if (buf == endptr)
@@ -353,6 +424,9 @@ static int udp_open(URLContext *h, const char *uri, int flags)
if (av_find_info_tag(buf, sizeof(buf), "connect", p)) {
s->is_connected = strtol(buf, NULL, 10);
}
+ if (av_find_info_tag(buf, sizeof(buf), "buf_size", p)) {
+ s->circular_buffer_size = strtol(buf, NULL, 10)*188;
+ }
}
/* fill the dest addr */
@@ -434,10 +508,21 @@ static int udp_open(URLContext *h, const char *uri, int flags)
}
s->udp_fd = udp_fd;
+
+ if (!is_output && s->circular_buffer_size) {
+ /* start the task going */
+ s->fifo = av_fifo_alloc(s->circular_buffer_size);
+ if (pthread_create(&s->circular_buffer_thread, NULL, circular_buffer_task, h)) {
+ av_log(h, AV_LOG_ERROR, "pthread_create failed\n");
+ goto fail;
+ }
+ }
+
return 0;
fail:
if (udp_fd >= 0)
closesocket(udp_fd);
+ av_fifo_free(s->fifo);
av_free(s);
return AVERROR(EIO);
}
@@ -446,6 +531,33 @@ static int udp_read(URLContext *h, uint8_t *buf, int size)
{
UDPContext *s = h->priv_data;
int ret;
+ int avail;
+ int left;
+ fd_set rfds;
+ struct timeval tv;
+
+ if (s->fifo) {
+
+ do {
+ avail = av_fifo_size(s->fifo);
+ if (avail) { // >=size) {
+
+ // Maximum amount available
+ size = FFMIN( avail, size);
+ av_fifo_generic_read(s->fifo, buf, size, NULL);
+ return size;
+ }
+ else {
+ FD_ZERO(&rfds);
+ FD_SET(s->udp_fd, &rfds);
+ tv.tv_sec = 1;
+ tv.tv_usec = 0;
+ ret = select(s->udp_fd + 1, &rfds, NULL, NULL, &tv);
+ if (ret<0)
+ return ret;
+ }
+ } while( 1);
+ }
if (!(h->flags & AVIO_FLAG_NONBLOCK)) {
ret = ff_network_wait_fd(s->udp_fd, 0);
@@ -453,6 +565,7 @@ static int udp_read(URLContext *h, uint8_t *buf, int size)
return ret;
}
ret = recv(s->udp_fd, buf, size, 0);
+
return ret < 0 ? ff_neterrno() : ret;
}
@@ -484,6 +597,8 @@ static int udp_close(URLContext *h)
if (s->is_multicast && !(h->flags & AVIO_WRONLY))
udp_leave_multicast_group(s->udp_fd, (struct sockaddr *)&s->dest_addr);
closesocket(s->udp_fd);
+ av_log( h, AV_LOG_INFO, "circular_buffer_info max:%d%%\r\n", (s->circular_buffer_available_max*100)/s->circular_buffer_size);
+ av_fifo_free(s->fifo);
av_free(s);
return 0;
}
diff --git a/libavformat/utils.c b/libavformat/utils.c
index f99adab712..16297af16a 100644
--- a/libavformat/utils.c
+++ b/libavformat/utils.c
@@ -967,7 +967,7 @@ static void compute_pkt_fields(AVFormatContext *s, AVStream *st,
if((s->flags & AVFMT_FLAG_IGNDTS) && pkt->pts != AV_NOPTS_VALUE)
pkt->dts= AV_NOPTS_VALUE;
- if (st->codec->codec_id != CODEC_ID_H264 && pc && pc->pict_type == FF_B_TYPE)
+ if (st->codec->codec_id != CODEC_ID_H264 && pc && pc->pict_type == AV_PICTURE_TYPE_B)
//FIXME Set low_delay = 0 when has_b_frames = 1
st->codec->has_b_frames = 1;
@@ -983,7 +983,7 @@ static void compute_pkt_fields(AVFormatContext *s, AVStream *st,
/* XXX: need has_b_frame, but cannot get it if the codec is
not initialized */
if (delay &&
- pc && pc->pict_type != FF_B_TYPE)
+ pc && pc->pict_type != AV_PICTURE_TYPE_B)
presentation_delayed = 1;
if(pkt->pts != AV_NOPTS_VALUE && pkt->dts != AV_NOPTS_VALUE && pkt->dts > pkt->pts && st->pts_wrap_bits<63
@@ -1111,7 +1111,7 @@ static void compute_pkt_fields(AVFormatContext *s, AVStream *st,
/* keyframe computation */
if (pc->key_frame == 1)
pkt->flags |= AV_PKT_FLAG_KEY;
- else if (pc->key_frame == -1 && pc->pict_type == FF_I_TYPE)
+ else if (pc->key_frame == -1 && pc->pict_type == AV_PICTURE_TYPE_I)
pkt->flags |= AV_PKT_FLAG_KEY;
}
if (pc)
@@ -1173,8 +1173,9 @@ static int av_read_frame_internal(AVFormatContext *s, AVPacket *pkt)
compute_pkt_fields(s, st, st->parser, pkt);
if((s->iformat->flags & AVFMT_GENERIC_INDEX) && pkt->flags & AV_PKT_FLAG_KEY){
+ int64_t pos= (st->parser->flags & PARSER_FLAG_COMPLETE_FRAMES) ? pkt->pos : st->parser->frame_offset;
ff_reduce_index(s, st->index);
- av_add_index_entry(st, st->parser->frame_offset, pkt->dts,
+ av_add_index_entry(st, pos, pkt->dts,
0, 0, AVINDEX_KEYFRAME);
}
@@ -1867,18 +1868,23 @@ static int av_has_duration(AVFormatContext *ic)
*/
static void av_update_stream_timings(AVFormatContext *ic)
{
- int64_t start_time, start_time1, end_time, end_time1;
+ int64_t start_time, start_time1, start_time_text, end_time, end_time1;
int64_t duration, duration1;
int i;
AVStream *st;
start_time = INT64_MAX;
+ start_time_text = INT64_MAX;
end_time = INT64_MIN;
duration = INT64_MIN;
for(i = 0;i < ic->nb_streams; i++) {
st = ic->streams[i];
if (st->start_time != AV_NOPTS_VALUE && st->time_base.den) {
start_time1= av_rescale_q(st->start_time, st->time_base, AV_TIME_BASE_Q);
+ if (st->codec->codec_id == CODEC_ID_DVB_TELETEXT) {
+ if (start_time1 < start_time_text)
+ start_time_text = start_time1;
+ } else
if (start_time1 < start_time)
start_time = start_time1;
if (st->duration != AV_NOPTS_VALUE) {
@@ -1894,6 +1900,8 @@ static void av_update_stream_timings(AVFormatContext *ic)
duration = duration1;
}
}
+ if (start_time == INT64_MAX || (start_time > start_time_text && start_time - start_time_text < AV_TIME_BASE))
+ start_time = start_time_text;
if (start_time != INT64_MAX) {
ic->start_time = start_time;
if (end_time != INT64_MIN) {
@@ -2814,8 +2822,6 @@ AVChapter *ff_new_chapter(AVFormatContext *s, int id, AVRational time_base, int6
int av_set_parameters(AVFormatContext *s, AVFormatParameters *ap)
{
- int ret;
-
if (s->oformat->priv_data_size > 0) {
s->priv_data = av_mallocz(s->oformat->priv_data_size);
if (!s->priv_data)
@@ -3144,12 +3150,12 @@ static int ff_interleave_compare_dts(AVFormatContext *s, AVPacket *next, AVPacke
{
AVStream *st = s->streams[ pkt ->stream_index];
AVStream *st2= s->streams[ next->stream_index];
- int64_t a= st2->time_base.num * (int64_t)st ->time_base.den;
- int64_t b= st ->time_base.num * (int64_t)st2->time_base.den;
- int64_t dts1 = av_rescale_rnd(pkt->dts, b, a, AV_ROUND_DOWN);
- if (dts1==next->dts && dts1==av_rescale_rnd(pkt->dts, b, a, AV_ROUND_UP))
+ int comp = av_compare_ts(next->dts, st2->time_base, pkt->dts,
+ st->time_base);
+
+ if (comp == 0)
return pkt->stream_index < next->stream_index;
- return dts1 < next->dts;
+ return comp > 0;
}
int av_interleave_packet_per_dts(AVFormatContext *s, AVPacket *out, AVPacket *pkt, int flush){
diff --git a/libavformat/wtv.c b/libavformat/wtv.c
index 795ea39bf9..926f924520 100644
--- a/libavformat/wtv.c
+++ b/libavformat/wtv.c
@@ -1,5 +1,5 @@
/*
- * Windows Television (WTV) demuxer
+ * Windows Television (WTV)
* Copyright (c) 2010-2011 Peter Ross <pross@xvid.org>
*
* This file is part of FFmpeg.
@@ -19,1061 +19,27 @@
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
-/**
- * @file
- * Windows Television (WTV) demuxer
- * @author Peter Ross <pross@xvid.org>
- */
-
-#include "libavutil/intreadwrite.h"
-#include "libavutil/intfloat_readwrite.h"
-#include "avformat.h"
-#include "internal.h"
-#include "riff.h"
-#include "asf.h"
-#include "mpegts.h"
-#include <strings.h>
-
-/* Macros for formating GUIDs */
-#define PRI_PRETTY_GUID \
- "%08x-%04x-%04x-%02x%02x%02x%02x%02x%02x%02x%02x"
-#define ARG_PRETTY_GUID(g) \
- AV_RL32(g),AV_RL16(g+4),AV_RL16(g+6),g[8],g[9],g[10],g[11],g[12],g[13],g[14],g[15]
-#define LEN_PRETTY_GUID 34
-
-/*
- *
- * File system routines
- *
- */
-
-#define WTV_SECTOR_BITS 12
-#define WTV_SECTOR_SIZE (1 << WTV_SECTOR_BITS)
-#define WTV_BIGSECTOR_BITS 18
-
-typedef struct {
- AVIOContext *pb_filesystem; /** file system (AVFormatContext->pb) */
-
- int sector_bits; /** sector shift bits; used to convert sector number into pb_filesystem offset */
- uint32_t *sectors; /** file allocation table */
- int nb_sectors; /** number of sectors */
-
- int error;
- int64_t position;
- int64_t length;
-} WtvFile;
-
-/**
- * @return bytes read, 0 on end of file, or <0 on error
- */
-static int wtvfile_read_packet(void *opaque, uint8_t *buf, int buf_size)
-{
- WtvFile *wf = opaque;
- AVIOContext *pb = wf->pb_filesystem;
- int nread = 0;
-
- if (wf->error || pb->error)
- return -1;
- if (wf->position >= wf->length || url_feof(pb))
- return 0;
-
- buf_size = FFMIN(buf_size, wf->length - wf->position);
- while(nread < buf_size) {
- int n;
- int remaining_in_sector = (1 << wf->sector_bits) - (wf->position & ((1 << wf->sector_bits) - 1));
- int read_request = FFMIN(buf_size - nread, remaining_in_sector);
-
- n = avio_read(pb, buf, read_request);
- if (n <= 0)
- break;
- nread += n;
- buf += n;
- wf->position += n;
- if (n == remaining_in_sector) {
- int i = wf->position >> wf->sector_bits;
- if (i >= wf->nb_sectors ||
- (wf->sectors[i] != wf->sectors[i - 1] + (1 << (wf->sector_bits - WTV_SECTOR_BITS)) &&
- avio_seek(pb, (int64_t)wf->sectors[i] << WTV_SECTOR_BITS, SEEK_SET) < 0)) {
- wf->error = 1;
- break;
- }
- }
- }
- return nread;
-}
-
-/**
- * @return position (or file length)
- */
-static int64_t wtvfile_seek(void *opaque, int64_t offset, int whence)
-{
- WtvFile *wf = opaque;
- AVIOContext *pb = wf->pb_filesystem;
-
- if (whence == AVSEEK_SIZE)
- return wf->length;
- else if (whence == SEEK_CUR)
- offset = wf->position + offset;
- else if (whence == SEEK_END)
- offset = wf->length;
-
- wf->error = offset < 0 || offset >= wf->length ||
- avio_seek(pb, ((int64_t)wf->sectors[offset >> wf->sector_bits] << WTV_SECTOR_BITS)
- + (offset & ((1 << wf->sector_bits) - 1)), SEEK_SET) < 0;
- wf->position = offset;
- return offset;
-}
-
-/**
- * read non-zero integers (le32) from input stream
- * @param pb
- * @param[out] data destination
- * @param count maximum number of integers to read
- * @return total number of integers read
- */
-static int read_ints(AVIOContext *pb, uint32_t *data, int count)
-{
- int i, total = 0;
- for (i = 0; i < count; i++) {
- if ((data[total] = avio_rl32(pb)))
- total++;
- }
- return total;
-}
-
-/**
- * Open file
- * @param first_sector First sector
- * @param length Length of file (bytes)
- * @param depth File allocation table depth
- * @return NULL on error
- */
-static AVIOContext * wtvfile_open_sector(int first_sector, uint64_t length, int depth, AVFormatContext *s)
-{
- AVIOContext *pb;
- WtvFile *wf;
- uint8_t *buffer;
-
- if (avio_seek(s->pb, first_sector << WTV_SECTOR_BITS, SEEK_SET) < 0)
- return NULL;
-
- wf = av_mallocz(sizeof(WtvFile));
- if (!wf)
- return NULL;
-
- if (depth == 0) {
- wf->sectors = av_malloc(sizeof(uint32_t));
- if (!wf->sectors) {
- av_free(wf);
- return NULL;
- }
- wf->sectors[0] = first_sector;
- wf->nb_sectors = 1;
- wf->sector_bits = WTV_SECTOR_BITS;
- } else if (depth == 1) {
- wf->sectors = av_malloc(WTV_SECTOR_SIZE);
- if (!wf->sectors) {
- av_free(wf);
- return NULL;
- }
- wf->nb_sectors = read_ints(s->pb, wf->sectors, WTV_SECTOR_SIZE / 4);
- wf->sector_bits = length & (1ULL<<63) ? WTV_SECTOR_BITS : WTV_BIGSECTOR_BITS;
- } else if (depth == 2) {
- uint32_t sectors1[WTV_SECTOR_SIZE / 4];
- int nb_sectors1 = read_ints(s->pb, sectors1, WTV_SECTOR_SIZE / 4);
- int i;
-
- wf->sectors = av_malloc(nb_sectors1 << WTV_SECTOR_BITS);
- if (!wf->sectors) {
- av_free(wf);
- return NULL;
- }
- wf->nb_sectors = 0;
- for (i = 0; i < nb_sectors1; i++) {
- if (avio_seek(s->pb, (int64_t)sectors1[i] << WTV_SECTOR_BITS, SEEK_SET) < 0)
- break;
- wf->nb_sectors += read_ints(s->pb, wf->sectors + i * WTV_SECTOR_SIZE / 4, WTV_SECTOR_SIZE / 4);
- }
- wf->sector_bits = length & (1ULL<<63) ? WTV_SECTOR_BITS : WTV_BIGSECTOR_BITS;
- } else {
- av_log(s, AV_LOG_ERROR, "unsupported file allocation table depth (0x%x)\n", depth);
- av_free(wf);
- return NULL;
- }
-
- if (!wf->nb_sectors) {
- av_free(wf->sectors);
- av_free(wf);
- return NULL;
- }
-
- /* check length */
- length &= 0xFFFFFFFFFFFF;
- if (length > ((int64_t)wf->nb_sectors << wf->sector_bits)) {
- av_log(s, AV_LOG_WARNING, "reported file length (0x%"PRIx64") exceeds number of available sectors (0x%"PRIx64")\n", length, (int64_t)wf->nb_sectors << wf->sector_bits);
- length = (int64_t)wf->nb_sectors << wf->sector_bits;
- }
- wf->length = length;
+#include "wtv.h"
- /* seek to intial sector */
- wf->position = 0;
- if (avio_seek(s->pb, (int64_t)wf->sectors[0] << WTV_SECTOR_BITS, SEEK_SET) < 0) {
- av_free(wf->sectors);
- av_free(wf);
- return NULL;
- }
-
- wf->pb_filesystem = s->pb;
- buffer = av_malloc(1 << wf->sector_bits);
- if (!buffer) {
- av_free(wf->sectors);
- av_free(wf);
- return NULL;
- }
-
- pb = avio_alloc_context(buffer, 1 << wf->sector_bits, 0, wf,
- wtvfile_read_packet, NULL, wtvfile_seek);
- if (!pb) {
- av_free(buffer);
- av_free(wf->sectors);
- av_free(wf);
- }
- return pb;
-}
-
-static const ff_asf_guid dir_entry_guid =
+/* WTV GUIDs*/
+const ff_asf_guid ff_dir_entry_guid =
{0x92,0xB7,0x74,0x91,0x59,0x70,0x70,0x44,0x88,0xDF,0x06,0x3B,0x82,0xCC,0x21,0x3D};
-
-/**
- * Open file using filename
- * @param[in] buf directory buffer
- * @param buf_size directory buffer size
- * @param[in] filename
- * @param filename_size size of filename
- * @return NULL on error
- */
-static AVIOContext * wtvfile_open2(AVFormatContext *s, const uint8_t *buf, int buf_size, const uint8_t *filename, int filename_size)
-{
- const uint8_t *buf_end = buf + buf_size;
-
- while(buf + 48 <= buf_end) {
- int dir_length, name_size, first_sector, depth;
- uint64_t file_length;
- const uint8_t *name;
- if (ff_guidcmp(buf, dir_entry_guid)) {
- av_log(s, AV_LOG_ERROR, "unknown guid "FF_PRI_GUID", expected dir_entry_guid; "
- "remaining directory entries ignored\n", FF_ARG_GUID(buf));
- break;
- }
- dir_length = AV_RL16(buf + 16);
- file_length = AV_RL64(buf + 24);
- name_size = 2 * AV_RL32(buf + 32);
- if (buf + 48 + name_size > buf_end) {
- av_log(s, AV_LOG_ERROR, "filename exceeds buffer size; remaining directory entries ignored\n");
- break;
- }
- first_sector = AV_RL32(buf + 40 + name_size);
- depth = AV_RL32(buf + 44 + name_size);
-
- /* compare file name; test optional null terminator */
- name = buf + 40;
- if (name_size >= filename_size &&
- !memcmp(name, filename, filename_size) &&
- (name_size < filename_size + 2 || !AV_RN16(name + filename_size)))
- return wtvfile_open_sector(first_sector, file_length, depth, s);
-
- buf += dir_length;
- }
- return 0;
-}
-
-#define wtvfile_open(s, buf, buf_size, filename) \
- wtvfile_open2(s, buf, buf_size, filename, sizeof(filename))
-
-/**
- * Close file opened with wtvfile_open_sector(), or wtv_open()
- */
-static void wtvfile_close(AVIOContext *pb)
-{
- WtvFile *wf = pb->opaque;
- av_free(wf->sectors);
- av_free(pb);
-}
-
-/*
- *
- * Main demuxer
- *
- */
-
-typedef struct {
- int seen_data;
-} WtvStream;
-
-typedef struct {
- AVIOContext *pb; /** timeline file */
- int64_t epoch;
- int64_t pts; /** pts for next data chunk */
- int64_t last_valid_pts; /** latest valid pts, used for interative seeking */
-
- /* maintain private seek index, as the AVIndexEntry->pos is relative to the
- start of the 'timeline' file, not the file system (AVFormatContext->pb) */
- AVIndexEntry *index_entries;
- int nb_index_entries;
- unsigned int index_entries_allocated_size;
-} WtvContext;
-
-/* WTV GUIDs */
-static const ff_asf_guid wtv_guid =
+const ff_asf_guid ff_wtv_guid =
{0xB7,0xD8,0x00,0x20,0x37,0x49,0xDA,0x11,0xA6,0x4E,0x00,0x07,0xE9,0x5E,0xAD,0x8D};
-static const ff_asf_guid metadata_guid =
- {0x5A,0xFE,0xD7,0x6D,0xC8,0x1D,0x8F,0x4A,0x99,0x22,0xFA,0xB1,0x1C,0x38,0x14,0x53};
-static const ff_asf_guid timestamp_guid =
+const ff_asf_guid ff_timestamp_guid =
{0x5B,0x05,0xE6,0x1B,0x97,0xA9,0x49,0x43,0x88,0x17,0x1A,0x65,0x5A,0x29,0x8A,0x97};
-static const ff_asf_guid data_guid =
+const ff_asf_guid ff_data_guid =
{0x95,0xC3,0xD2,0xC2,0x7E,0x9A,0xDA,0x11,0x8B,0xF7,0x00,0x07,0xE9,0x5E,0xAD,0x8D};
-static const ff_asf_guid stream_guid =
+const ff_asf_guid ff_stream_guid =
{0xED,0xA4,0x13,0x23,0x2D,0xBF,0x4F,0x45,0xAD,0x8A,0xD9,0x5B,0xA7,0xF9,0x1F,0xEE};
-static const ff_asf_guid stream2_guid =
- {0xA2,0xC3,0xD2,0xC2,0x7E,0x9A,0xDA,0x11,0x8B,0xF7,0x00,0x07,0xE9,0x5E,0xAD,0x8D};
-static const ff_asf_guid EVENTID_SubtitleSpanningEvent =
- {0x48,0xC0,0xCE,0x5D,0xB9,0xD0,0x63,0x41,0x87,0x2C,0x4F,0x32,0x22,0x3B,0xE8,0x8A};
-static const ff_asf_guid EVENTID_LanguageSpanningEvent =
- {0x6D,0x66,0x92,0xE2,0x02,0x9C,0x8D,0x44,0xAA,0x8D,0x78,0x1A,0x93,0xFD,0xC3,0x95};
-static const ff_asf_guid EVENTID_AudioDescriptorSpanningEvent =
- {0x1C,0xD4,0x7B,0x10,0xDA,0xA6,0x91,0x46,0x83,0x69,0x11,0xB2,0xCD,0xAA,0x28,0x8E};
-static const ff_asf_guid EVENTID_CtxADescriptorSpanningEvent =
- {0xE6,0xA2,0xB4,0x3A,0x47,0x42,0x34,0x4B,0x89,0x6C,0x30,0xAF,0xA5,0xD2,0x1C,0x24};
-static const ff_asf_guid EVENTID_CSDescriptorSpanningEvent =
- {0xD9,0x79,0xE7,0xEf,0xF0,0x97,0x86,0x47,0x80,0x0D,0x95,0xCF,0x50,0x5D,0xDC,0x66};
-static const ff_asf_guid EVENTID_DVBScramblingControlSpanningEvent =
- {0xC4,0xE1,0xD4,0x4B,0xA1,0x90,0x09,0x41,0x82,0x36,0x27,0xF0,0x0E,0x7D,0xCC,0x5B};
-static const ff_asf_guid EVENTID_StreamIDSpanningEvent =
- {0x68,0xAB,0xF1,0xCA,0x53,0xE1,0x41,0x4D,0xA6,0xB3,0xA7,0xC9,0x98,0xDB,0x75,0xEE};
-static const ff_asf_guid EVENTID_TeletextSpanningEvent =
- {0x50,0xD9,0x99,0x95,0x33,0x5F,0x17,0x46,0xAF,0x7C,0x1E,0x54,0xB5,0x10,0xDA,0xA3};
-static const ff_asf_guid EVENTID_AudioTypeSpanningEvent =
- {0xBE,0xBF,0x1C,0x50,0x49,0xB8,0xCE,0x42,0x9B,0xE9,0x3D,0xB8,0x69,0xFB,0x82,0xB3};
-
-/* Windows media GUIDs */
-
-/* Media types */
-static const ff_asf_guid mediatype_audio =
+const ff_asf_guid ff_mediatype_audio =
{'a','u','d','s',FF_MEDIASUBTYPE_BASE_GUID};
-static const ff_asf_guid mediatype_video =
+const ff_asf_guid ff_mediatype_video =
{'v','i','d','s',FF_MEDIASUBTYPE_BASE_GUID};
-static const ff_asf_guid mediasubtype_mpeg1payload =
- {0x81,0xEB,0x36,0xE4,0x4F,0x52,0xCE,0x11,0x9F,0x53,0x00,0x20,0xAF,0x0B,0xA7,0x70};
-static const ff_asf_guid mediatype_mpeg2_sections =
- {0x6C,0x17,0x5F,0x45,0x06,0x4B,0xCE,0x47,0x9A,0xEF,0x8C,0xAE,0xF7,0x3D,0xF7,0xB5};
-static const ff_asf_guid mediatype_mpeg2_pes =
- {0x20,0x80,0x6D,0xE0,0x46,0xDB,0xCF,0x11,0xB4,0xD1,0x00,0x80,0x5F,0x6C,0xBB,0xEA};
-static const ff_asf_guid mediatype_mstvcaption =
- {0x89,0x8A,0x8B,0xB8,0x49,0xB0,0x80,0x4C,0xAD,0xCF,0x58,0x98,0x98,0x5E,0x22,0xC1};
-
-/* Media subtypes */
-static const ff_asf_guid mediasubtype_cpfilters_processed =
- {0x28,0xBD,0xAD,0x46,0xD0,0x6F,0x96,0x47,0x93,0xB2,0x15,0x5C,0x51,0xDC,0x04,0x8D};
-static const ff_asf_guid mediasubtype_dvb_subtitle =
- {0xC3,0xCB,0xFF,0x34,0xB3,0xD5,0x71,0x41,0x90,0x02,0xD4,0xC6,0x03,0x01,0x69,0x7F};
-static const ff_asf_guid mediasubtype_teletext =
- {0xE3,0x76,0x2A,0xF7,0x0A,0xEB,0xD0,0x11,0xAC,0xE4,0x00,0x00,0xC0,0xCC,0x16,0xBA};
-static const ff_asf_guid mediasubtype_dtvccdata =
- {0xAA,0xDD,0x2A,0xF5,0xF0,0x36,0xF5,0x43,0x95,0xEA,0x6D,0x86,0x64,0x84,0x26,0x2A};
-static const ff_asf_guid mediasubtype_mpeg2_sections =
- {0x79,0x85,0x9F,0x4A,0xF8,0x6B,0x92,0x43,0x8A,0x6D,0xD2,0xDD,0x09,0xFA,0x78,0x61};
-
-/* Formats */
-static const ff_asf_guid format_cpfilters_processed =
- {0x6F,0xB3,0x39,0x67,0x5F,0x1D,0xC2,0x4A,0x81,0x92,0x28,0xBB,0x0E,0x73,0xD1,0x6A};
-static const ff_asf_guid format_waveformatex =
- {0x81,0x9F,0x58,0x05,0x56,0xC3,0xCE,0x11,0xBF,0x01,0x00,0xAA,0x00,0x55,0x59,0x5A};
-static const ff_asf_guid format_videoinfo2 =
- {0xA0,0x76,0x2A,0xF7,0x0A,0xEB,0xD0,0x11,0xAC,0xE4,0x00,0x00,0xC0,0xCC,0x16,0xBA};
-static const ff_asf_guid format_mpeg2_video =
- {0xE3,0x80,0x6D,0xE0,0x46,0xDB,0xCF,0x11,0xB4,0xD1,0x00,0x80,0x5F,0x6C,0xBB,0xEA};
-static const ff_asf_guid format_none =
+const ff_asf_guid ff_format_none =
{0xD6,0x17,0x64,0x0F,0x18,0xC3,0xD0,0x11,0xA4,0x3F,0x00,0xA0,0xC9,0x22,0x31,0x96};
-static const AVCodecGuid video_guids[] = {
+const AVCodecGuid ff_video_guids[] = {
{CODEC_ID_MPEG2VIDEO, {0x26,0x80,0x6D,0xE0,0x46,0xDB,0xCF,0x11,0xB4,0xD1,0x00,0x80,0x5F,0x6C,0xBB,0xEA}},
{CODEC_ID_NONE}
};
-
-static int read_probe(AVProbeData *p)
-{
- return ff_guidcmp(p->buf, wtv_guid) ? 0 : AVPROBE_SCORE_MAX;
-}
-
-/**
- * Convert win32 FILETIME to ISO-8601 string
- */
-static void filetime_to_iso8601(char *buf, int buf_size, int64_t value)
-{
- time_t t = (value / 10000000LL) - 11644473600LL;
- strftime(buf, buf_size, "%Y-%m-%d %H:%M:%S", gmtime(&t));
-}
-
-/**
- * Convert crazy time (100ns since 1 Jan 0001) to ISO-8601 string
- */
-static void crazytime_to_iso8601(char *buf, int buf_size, int64_t value)
-{
- time_t t = (value / 10000000LL) - 719162LL*86400LL;
- strftime(buf, buf_size, "%Y-%m-%d %H:%M:%S", gmtime(&t));
-}
-
-/**
- * Convert OLE DATE to ISO-8601 string
- */
-static void oledate_to_iso8601(char *buf, int buf_size, int64_t value)
-{
- time_t t = 631112400LL + 86400*av_int2dbl(value);
- strftime(buf, buf_size, "%Y-%m-%d %H:%M:%S", gmtime(&t));
-}
-
-static void get_attachment(AVFormatContext *s, AVIOContext *pb, int length)
-{
- char mime[1024];
- char description[1024];
- unsigned int filesize;
- AVStream *st;
- int64_t pos = avio_tell(pb);
-
- avio_get_str16le(pb, INT_MAX, mime, sizeof(mime));
- if (strcmp(mime, "image/jpeg"))
- goto done;
-
- avio_r8(pb);
- avio_get_str16le(pb, INT_MAX, description, sizeof(description));
- filesize = avio_rl32(pb);
- if (!filesize)
- goto done;
-
- st = av_new_stream(s, 0);
- if (!st)
- goto done;
- av_metadata_set2(&st->metadata, "title", description, 0);
- st->codec->codec_id = CODEC_ID_MJPEG;
- st->codec->codec_type = AVMEDIA_TYPE_ATTACHMENT;
- st->codec->extradata = av_mallocz(filesize);
- if (!st->codec->extradata)
- goto done;
- st->codec->extradata_size = filesize;
- avio_read(pb, st->codec->extradata, filesize);
-done:
- avio_seek(pb, pos + length, SEEK_SET);
-}
-
-static void get_tag(AVFormatContext *s, AVIOContext *pb, const char *key, int type, int length)
-{
- int buf_size = FFMAX(2*length, LEN_PRETTY_GUID) + 1;
- char *buf = av_malloc(buf_size);
- if (!buf)
- return;
-
- if (type == 0 && length == 4) {
- snprintf(buf, buf_size, "%"PRIi32, avio_rl32(pb));
- } else if (type == 1) {
- avio_get_str16le(pb, length, buf, buf_size);
- if (!strlen(buf)) {
- av_free(buf);
- return;
- }
- } else if (type == 3 && length == 4) {
- strcpy(buf, avio_rl32(pb) ? "true" : "false");
- } else if (type == 4 && length == 8) {
- int64_t num = avio_rl64(pb);
- if (!strcmp(key, "WM/EncodingTime") ||
- !strcmp(key, "WM/MediaOriginalBroadcastDateTime"))
- filetime_to_iso8601(buf, buf_size, num);
- else if (!strcmp(key, "WM/WMRVEncodeTime") ||
- !strcmp(key, "WM/WMRVEndTime"))
- crazytime_to_iso8601(buf, buf_size, num);
- else if (!strcmp(key, "WM/WMRVExpirationDate"))
- oledate_to_iso8601(buf, buf_size, num);
- else if (!strcmp(key, "WM/WMRVBitrate"))
- snprintf(buf, buf_size, "%f", av_int2dbl(num));
- else
- snprintf(buf, buf_size, "%"PRIi64, num);
- } else if (type == 5 && length == 2) {
- snprintf(buf, buf_size, "%"PRIi16, avio_rl16(pb));
- } else if (type == 6 && length == 16) {
- ff_asf_guid guid;
- avio_read(pb, guid, 16);
- snprintf(buf, buf_size, PRI_PRETTY_GUID, ARG_PRETTY_GUID(guid));
- } else if (type == 2 && !strcmp(key, "WM/Picture")) {
- get_attachment(s, pb, length);
- av_freep(&buf);
- return;
- } else {
- av_freep(&buf);
- av_log(s, AV_LOG_WARNING, "unsupported metadata entry; key:%s, type:%d, length:0x%x\n", key, type, length);
- avio_skip(pb, length);
- return;
- }
-
- av_metadata_set2(&s->metadata, key, buf, 0);
- av_freep(&buf);
-}
-
-/**
- * Parse metadata entries
- */
-static void parse_legacy_attrib(AVFormatContext *s, AVIOContext *pb)
-{
- ff_asf_guid guid;
- int length, type;
- while(!url_feof(pb)) {
- char key[1024];
- ff_get_guid(pb, &guid);
- type = avio_rl32(pb);
- length = avio_rl32(pb);
- if (!length)
- break;
- if (ff_guidcmp(&guid, metadata_guid)) {
- av_log(s, AV_LOG_WARNING, "unknown guid "FF_PRI_GUID", expected metadata_guid; "
- "remaining metadata entries ignored\n", FF_ARG_GUID(guid));
- break;
- }
- avio_get_str16le(pb, INT_MAX, key, sizeof(key));
- get_tag(s, pb, key, type, length);
- }
-
- ff_metadata_conv(&s->metadata, NULL, ff_asf_metadata_conv);
-}
-
-/**
- * parse VIDEOINFOHEADER2 structure
- * @return bytes consumed
- */
-static int parse_videoinfoheader2(AVFormatContext *s, AVStream *st)
-{
- WtvContext *wtv = s->priv_data;
- AVIOContext *pb = wtv->pb;
-
- avio_skip(pb, 72); // picture aspect ratio is unreliable
- ff_get_bmp_header(pb, st);
-
- return 72 + 40;
-}
-
-/**
- * Parse MPEG1WAVEFORMATEX extradata structure
- */
-static void parse_mpeg1waveformatex(AVStream *st)
-{
- /* fwHeadLayer */
- switch (AV_RL16(st->codec->extradata)) {
- case 0x0001 : st->codec->codec_id = CODEC_ID_MP1; break;
- case 0x0002 : st->codec->codec_id = CODEC_ID_MP2; break;
- case 0x0004 : st->codec->codec_id = CODEC_ID_MP3; break;
- }
-
- st->codec->bit_rate = AV_RL32(st->codec->extradata + 2); /* dwHeadBitrate */
-
- /* dwHeadMode */
- switch (AV_RL16(st->codec->extradata + 6)) {
- case 1 : case 2 : case 4 : st->codec->channels = 2; break;
- case 8 : st->codec->channels = 1; break;
- }
-}
-
-/**
- * Initialise stream
- * @param st Stream to initialise, or NULL to create and initialise new stream
- * @return NULL on error
- */
-static AVStream * new_stream(AVFormatContext *s, AVStream *st, int sid, int codec_type)
-{
- if (st) {
- if (st->codec->extradata) {
- av_freep(&st->codec->extradata);
- st->codec->extradata_size = 0;
- }
- } else {
- WtvStream *wst = av_mallocz(sizeof(WtvStream));
- if (!wst)
- return NULL;
- st = av_new_stream(s, sid);
- if (!st)
- return NULL;
- st->priv_data = wst;
- }
- st->codec->codec_type = codec_type;
- st->need_parsing = AVSTREAM_PARSE_FULL;
- av_set_pts_info(st, 64, 1, 10000000);
- return st;
-}
-
-/**
- * parse Media Type structure and populate stream
- * @param st Stream, or NULL to create new stream
- * @param mediatype Mediatype GUID
- * @param subtype Subtype GUID
- * @param formattype Format GUID
- * @param size Size of format buffer
- * @return NULL on error
- */
-static AVStream * parse_media_type(AVFormatContext *s, AVStream *st, int sid,
- ff_asf_guid mediatype, ff_asf_guid subtype,
- ff_asf_guid formattype, int size)
-{
- WtvContext *wtv = s->priv_data;
- AVIOContext *pb = wtv->pb;
- if (!ff_guidcmp(subtype, mediasubtype_cpfilters_processed) &&
- !ff_guidcmp(formattype, format_cpfilters_processed)) {
- ff_asf_guid actual_subtype;
- ff_asf_guid actual_formattype;
-
- if (size < 32) {
- av_log(s, AV_LOG_WARNING, "format buffer size underflow\n");
- avio_skip(pb, size);
- return NULL;
- }
-
- avio_skip(pb, size - 32);
- ff_get_guid(pb, &actual_subtype);
- ff_get_guid(pb, &actual_formattype);
- avio_seek(pb, -size, SEEK_CUR);
-
- st = parse_media_type(s, st, sid, mediatype, actual_subtype, actual_formattype, size - 32);
- avio_skip(pb, 32);
- return st;
- } else if (!ff_guidcmp(mediatype, mediatype_audio)) {
- st = new_stream(s, st, sid, AVMEDIA_TYPE_AUDIO);
- if (!st)
- return NULL;
- if (!ff_guidcmp(formattype, format_waveformatex)) {
- int ret = ff_get_wav_header(pb, st->codec, size);
- if (ret < 0)
- return NULL;
- } else {
- if (ff_guidcmp(formattype, format_none))
- av_log(s, AV_LOG_WARNING, "unknown formattype:"FF_PRI_GUID"\n", FF_ARG_GUID(formattype));
- avio_skip(pb, size);
- }
-
- if (!memcmp(subtype + 4, (const uint8_t[]){FF_MEDIASUBTYPE_BASE_GUID}, 12)) {
- st->codec->codec_id = ff_wav_codec_get_id(AV_RL32(subtype), st->codec->bits_per_coded_sample);
- } else if (!ff_guidcmp(subtype, mediasubtype_mpeg1payload)) {
- if (st->codec->extradata && st->codec->extradata_size >= 22)
- parse_mpeg1waveformatex(st);
- else
- av_log(s, AV_LOG_WARNING, "MPEG1WAVEFORMATEX underflow\n");
- } else {
- st->codec->codec_id = ff_codec_guid_get_id(ff_codec_wav_guids, subtype);
- if (st->codec->codec_id == CODEC_ID_NONE)
- av_log(s, AV_LOG_WARNING, "unknown subtype:"FF_PRI_GUID"\n", FF_ARG_GUID(subtype));
- }
- return st;
- } else if (!ff_guidcmp(mediatype, mediatype_video)) {
- st = new_stream(s, st, sid, AVMEDIA_TYPE_VIDEO);
- if (!st)
- return NULL;
- if (!ff_guidcmp(formattype, format_videoinfo2)) {
- int consumed = parse_videoinfoheader2(s, st);
- avio_skip(pb, FFMAX(size - consumed, 0));
- } else if (!ff_guidcmp(formattype, format_mpeg2_video)) {
- int consumed = parse_videoinfoheader2(s, st);
- avio_skip(pb, FFMAX(size - consumed, 0));
- } else {
- if (ff_guidcmp(formattype, format_none))
- av_log(s, AV_LOG_WARNING, "unknown formattype:"FF_PRI_GUID"\n", FF_ARG_GUID(formattype));
- avio_skip(pb, size);
- }
-
- if (!memcmp(subtype + 4, (const uint8_t[]){FF_MEDIASUBTYPE_BASE_GUID}, 12)) {
- st->codec->codec_id = ff_codec_get_id(ff_codec_bmp_tags, AV_RL32(subtype));
- } else {
- st->codec->codec_id = ff_codec_guid_get_id(video_guids, subtype);
- }
- if (st->codec->codec_id == CODEC_ID_NONE)
- av_log(s, AV_LOG_WARNING, "unknown subtype:"FF_PRI_GUID"\n", FF_ARG_GUID(subtype));
- return st;
- } else if (!ff_guidcmp(mediatype, mediatype_mpeg2_pes) &&
- !ff_guidcmp(subtype, mediasubtype_dvb_subtitle)) {
- st = new_stream(s, st, sid, AVMEDIA_TYPE_SUBTITLE);
- if (!st)
- return NULL;
- if (ff_guidcmp(formattype, format_none))
- av_log(s, AV_LOG_WARNING, "unknown formattype:"FF_PRI_GUID"\n", FF_ARG_GUID(formattype));
- avio_skip(pb, size);
- st->codec->codec_id = CODEC_ID_DVB_SUBTITLE;
- return st;
- } else if (!ff_guidcmp(mediatype, mediatype_mstvcaption) &&
- (!ff_guidcmp(subtype, mediasubtype_teletext) || !ff_guidcmp(subtype, mediasubtype_dtvccdata))) {
- st = new_stream(s, st, sid, AVMEDIA_TYPE_SUBTITLE);
- if (!st)
- return NULL;
- if (ff_guidcmp(formattype, format_none))
- av_log(s, AV_LOG_WARNING, "unknown formattype:"FF_PRI_GUID"\n", FF_ARG_GUID(formattype));
- avio_skip(pb, size);
- st->codec->codec_id = CODEC_ID_DVB_TELETEXT;
- return st;
- } else if (!ff_guidcmp(mediatype, mediatype_mpeg2_sections) &&
- !ff_guidcmp(subtype, mediasubtype_mpeg2_sections)) {
- if (ff_guidcmp(formattype, format_none))
- av_log(s, AV_LOG_WARNING, "unknown formattype:"FF_PRI_GUID"\n", FF_ARG_GUID(formattype));
- avio_skip(pb, size);
- return NULL;
- }
-
- av_log(s, AV_LOG_WARNING, "unknown media type, mediatype:"FF_PRI_GUID
- ", subtype:"FF_PRI_GUID", formattype:"FF_PRI_GUID"\n",
- FF_ARG_GUID(mediatype), FF_ARG_GUID(subtype), FF_ARG_GUID(formattype));
- avio_skip(pb, size);
- return NULL;
-}
-
-enum {
- SEEK_TO_DATA = 0,
- SEEK_TO_PTS,
-};
-
-/**
- * Parse WTV chunks
- * @param mode SEEK_TO_DATA or SEEK_TO_PTS
- * @param seekts timestamp
- * @param[out] len Length of data chunk
- * @return stream index of data chunk, or <0 on error
- */
-static int parse_chunks(AVFormatContext *s, int mode, int64_t seekts, int *len_ptr)
-{
- WtvContext *wtv = s->priv_data;
- AVIOContext *pb = wtv->pb;
- while (!url_feof(pb)) {
- ff_asf_guid g;
- int len, sid, consumed;
-
- ff_get_guid(pb, &g);
- len = avio_rl32(pb);
- if (len < 32)
- break;
- sid = avio_rl32(pb) & 0x7FFF;
- avio_skip(pb, 8);
- consumed = 32;
-
- if (!ff_guidcmp(g, stream_guid)) {
- if (ff_find_stream_index(s, sid) < 0) {
- ff_asf_guid mediatype, subtype, formattype;
- int size;
- avio_skip(pb, 28);
- ff_get_guid(pb, &mediatype);
- ff_get_guid(pb, &subtype);
- avio_skip(pb, 12);
- ff_get_guid(pb, &formattype);
- size = avio_rl32(pb);
- parse_media_type(s, 0, sid, mediatype, subtype, formattype, size);
- consumed += 92 + size;
- }
- } else if (!ff_guidcmp(g, stream2_guid)) {
- int stream_index = ff_find_stream_index(s, sid);
- if (stream_index >= 0 && !((WtvStream*)s->streams[stream_index]->priv_data)->seen_data) {
- ff_asf_guid mediatype, subtype, formattype;
- int size;
- avio_skip(pb, 12);
- ff_get_guid(pb, &mediatype);
- ff_get_guid(pb, &subtype);
- avio_skip(pb, 12);
- ff_get_guid(pb, &formattype);
- size = avio_rl32(pb);
- parse_media_type(s, s->streams[stream_index], sid, mediatype, subtype, formattype, size);
- consumed += 76 + size;
- }
- } else if (!ff_guidcmp(g, EVENTID_AudioDescriptorSpanningEvent) ||
- !ff_guidcmp(g, EVENTID_CtxADescriptorSpanningEvent) ||
- !ff_guidcmp(g, EVENTID_CSDescriptorSpanningEvent) ||
- !ff_guidcmp(g, EVENTID_StreamIDSpanningEvent) ||
- !ff_guidcmp(g, EVENTID_SubtitleSpanningEvent) ||
- !ff_guidcmp(g, EVENTID_TeletextSpanningEvent)) {
- int stream_index = ff_find_stream_index(s, sid);
- if (stream_index >= 0) {
- AVStream *st = s->streams[stream_index];
- uint8_t buf[258];
- const uint8_t *pbuf = buf;
- int buf_size;
-
- avio_skip(pb, 8);
- consumed += 8;
- if (!ff_guidcmp(g, EVENTID_CtxADescriptorSpanningEvent) ||
- !ff_guidcmp(g, EVENTID_CSDescriptorSpanningEvent)) {
- avio_skip(pb, 6);
- consumed += 6;
- }
-
- buf_size = FFMIN(len - consumed, sizeof(buf));
- avio_read(pb, buf, buf_size);
- consumed += buf_size;
- ff_parse_mpeg2_descriptor(s, st, 0, &pbuf, buf + buf_size, 0, 0, 0, 0);
- }
- } else if (!ff_guidcmp(g, EVENTID_AudioTypeSpanningEvent)) {
- int stream_index = ff_find_stream_index(s, sid);
- if (stream_index >= 0) {
- AVStream *st = s->streams[stream_index];
- int audio_type;
- avio_skip(pb, 8);
- audio_type = avio_r8(pb);
- if (audio_type == 2)
- st->disposition |= AV_DISPOSITION_HEARING_IMPAIRED;
- else if (audio_type == 3)
- st->disposition |= AV_DISPOSITION_VISUAL_IMPAIRED;
- consumed += 9;
- }
- } else if (!ff_guidcmp(g, EVENTID_DVBScramblingControlSpanningEvent)) {
- int stream_index = ff_find_stream_index(s, sid);
- if (stream_index >= 0) {
- avio_skip(pb, 12);
- if (avio_rl32(pb))
- av_log(s, AV_LOG_WARNING, "DVB scrambled stream detected (st:%d), decoding will likely fail\n", stream_index);
- consumed += 16;
- }
- } else if (!ff_guidcmp(g, EVENTID_LanguageSpanningEvent)) {
- int stream_index = ff_find_stream_index(s, sid);
- if (stream_index >= 0) {
- AVStream *st = s->streams[stream_index];
- uint8_t language[4];
- avio_skip(pb, 12);
- avio_read(pb, language, 3);
- if (language[0]) {
- language[3] = 0;
- av_metadata_set2(&st->metadata, "language", language, 0);
- if (!strcmp(language, "nar") || !strcmp(language, "NAR"))
- st->disposition |= AV_DISPOSITION_VISUAL_IMPAIRED;
- }
- consumed += 15;
- }
- } else if (!ff_guidcmp(g, timestamp_guid)) {
- int stream_index = ff_find_stream_index(s, sid);
- if (stream_index >= 0) {
- avio_skip(pb, 8);
- wtv->pts = avio_rl64(pb);
- consumed += 16;
- if (wtv->pts == -1)
- wtv->pts = AV_NOPTS_VALUE;
- else {
- wtv->last_valid_pts = wtv->pts;
- if (wtv->epoch == AV_NOPTS_VALUE || wtv->pts < wtv->epoch)
- wtv->epoch = wtv->pts;
- if (mode == SEEK_TO_PTS && wtv->pts >= seekts) {
-#define WTV_PAD8(x) (((x) + 7) & ~7)
- avio_skip(pb, WTV_PAD8(len) - consumed);
- return 0;
- }
- }
- }
- } else if (!ff_guidcmp(g, data_guid)) {
- int stream_index = ff_find_stream_index(s, sid);
- if (mode == SEEK_TO_DATA && stream_index >= 0 && len > 32) {
- WtvStream *wst = s->streams[stream_index]->priv_data;
- wst->seen_data = 1;
- if (len_ptr) {
- *len_ptr = len;
- }
- return stream_index;
- }
- } else if (
- !ff_guidcmp(g, /* DSATTRIB_CAPTURE_STREAMTIME */ (const ff_asf_guid){0x14,0x56,0x1A,0x0C,0xCD,0x30,0x40,0x4F,0xBC,0xBF,0xD0,0x3E,0x52,0x30,0x62,0x07}) ||
- !ff_guidcmp(g, /* DSATTRIB_PicSampleSeq */ (const ff_asf_guid){0x02,0xAE,0x5B,0x2F,0x8F,0x7B,0x60,0x4F,0x82,0xD6,0xE4,0xEA,0x2F,0x1F,0x4C,0x99}) ||
- !ff_guidcmp(g, /* DSATTRIB_TRANSPORT_PROPERTIES */ (const ff_asf_guid){0x12,0xF6,0x22,0xB6,0xAD,0x47,0x71,0x46,0xAD,0x6C,0x05,0xA9,0x8E,0x65,0xDE,0x3A}) ||
- !ff_guidcmp(g, /* dvr_ms_vid_frame_rep_data */ (const ff_asf_guid){0xCC,0x32,0x64,0xDD,0x29,0xE2,0xDB,0x40,0x80,0xF6,0xD2,0x63,0x28,0xD2,0x76,0x1F}) ||
- !ff_guidcmp(g, /* EVENTID_ChannelChangeSpanningEvent */ (const ff_asf_guid){0xE5,0xC5,0x67,0x90,0x5C,0x4C,0x05,0x42,0x86,0xC8,0x7A,0xFE,0x20,0xFE,0x1E,0xFA}) ||
- !ff_guidcmp(g, /* EVENTID_ChannelInfoSpanningEvent */ (const ff_asf_guid){0x80,0x6D,0xF3,0x41,0x32,0x41,0xC2,0x4C,0xB1,0x21,0x01,0xA4,0x32,0x19,0xD8,0x1B}) ||
- !ff_guidcmp(g, /* EVENTID_ChannelTypeSpanningEvent */ (const ff_asf_guid){0x51,0x1D,0xAB,0x72,0xD2,0x87,0x9B,0x48,0xBA,0x11,0x0E,0x08,0xDC,0x21,0x02,0x43}) ||
- !ff_guidcmp(g, /* EVENTID_PIDListSpanningEvent */ (const ff_asf_guid){0x65,0x8F,0xFC,0x47,0xBB,0xE2,0x34,0x46,0x9C,0xEF,0xFD,0xBF,0xE6,0x26,0x1D,0x5C}) ||
- !ff_guidcmp(g, /* EVENTID_SignalAndServiceStatusSpanningEvent */ (const ff_asf_guid){0xCB,0xC5,0x68,0x80,0x04,0x3C,0x2B,0x49,0xB4,0x7D,0x03,0x08,0x82,0x0D,0xCE,0x51}) ||
- !ff_guidcmp(g, /* EVENTID_StreamTypeSpanningEvent */ (const ff_asf_guid){0xBC,0x2E,0xAF,0x82,0xA6,0x30,0x64,0x42,0xA8,0x0B,0xAD,0x2E,0x13,0x72,0xAC,0x60}) ||
- !ff_guidcmp(g, (const ff_asf_guid){0x1E,0xBE,0xC3,0xC5,0x43,0x92,0xDC,0x11,0x85,0xE5,0x00,0x12,0x3F,0x6F,0x73,0xB9}) ||
- !ff_guidcmp(g, (const ff_asf_guid){0x3B,0x86,0xA2,0xB1,0xEB,0x1E,0xC3,0x44,0x8C,0x88,0x1C,0xA3,0xFF,0xE3,0xE7,0x6A}) ||
- !ff_guidcmp(g, (const ff_asf_guid){0x4E,0x7F,0x4C,0x5B,0xC4,0xD0,0x38,0x4B,0xA8,0x3E,0x21,0x7F,0x7B,0xBF,0x52,0xE7}) ||
- !ff_guidcmp(g, (const ff_asf_guid){0x63,0x36,0xEB,0xFE,0xA1,0x7E,0xD9,0x11,0x83,0x08,0x00,0x07,0xE9,0x5E,0xAD,0x8D}) ||
- !ff_guidcmp(g, (const ff_asf_guid){0x70,0xE9,0xF1,0xF8,0x89,0xA4,0x4C,0x4D,0x83,0x73,0xB8,0x12,0xE0,0xD5,0xF8,0x1E}) ||
- !ff_guidcmp(g, (const ff_asf_guid){0x96,0xC3,0xD2,0xC2,0x7E,0x9A,0xDA,0x11,0x8B,0xF7,0x00,0x07,0xE9,0x5E,0xAD,0x8D}) ||
- !ff_guidcmp(g, (const ff_asf_guid){0x97,0xC3,0xD2,0xC2,0x7E,0x9A,0xDA,0x11,0x8B,0xF7,0x00,0x07,0xE9,0x5E,0xAD,0x8D}) ||
- !ff_guidcmp(g, (const ff_asf_guid){0xA1,0xC3,0xD2,0xC2,0x7E,0x9A,0xDA,0x11,0x8B,0xF7,0x00,0x07,0xE9,0x5E,0xAD,0x8D})) {
- //ignore known guids
- } else
- av_log(s, AV_LOG_WARNING, "unsupported chunk:"FF_PRI_GUID"\n", FF_ARG_GUID(g));
-
- avio_skip(pb, WTV_PAD8(len) - consumed);
- }
- return AVERROR_EOF;
-}
-
-/* declare utf16le strings */
-#define _ , 0,
-static const uint8_t timeline_le16[] =
- {'t'_'i'_'m'_'e'_'l'_'i'_'n'_'e', 0};
-static const uint8_t table_0_entries_legacy_attrib_le16[] =
- {'t'_'a'_'b'_'l'_'e'_'.'_'0'_'.'_'e'_'n'_'t'_'r'_'i'_'e'_'s'_'.'_'l'_'e'_'g'_'a'_'c'_'y'_'_'_'a'_'t'_'t'_'r'_'i'_'b', 0};
-static const uint8_t table_0_entries_time_le16[] =
- {'t'_'a'_'b'_'l'_'e'_'.'_'0'_'.'_'e'_'n'_'t'_'r'_'i'_'e'_'s'_'.'_'t'_'i'_'m'_'e', 0};
-static const uint8_t timeline_table_0_entries_Events_le16[] =
- {'t'_'i'_'m'_'e'_'l'_'i'_'n'_'e'_'.'_'t'_'a'_'b'_'l'_'e'_'.'_'0'_'.'_'e'_'n'_'t'_'r'_'i'_'e'_'s'_'.'_'E'_'v'_'e'_'n'_'t'_'s', 0};
-#undef _
-
-static int read_header(AVFormatContext *s, AVFormatParameters *ap)
-{
- WtvContext *wtv = s->priv_data;
- int root_sector, root_size;
- uint8_t root[WTV_SECTOR_SIZE];
- AVIOContext *pb;
- int64_t timeline_pos;
- int ret;
-
- wtv->epoch =
- wtv->pts =
- wtv->last_valid_pts = AV_NOPTS_VALUE;
-
- /* read root directory sector */
- avio_skip(s->pb, 0x30);
- root_size = avio_rl32(s->pb);
- if (root_size > sizeof(root)) {
- av_log(s, AV_LOG_ERROR, "root directory size exceeds sector size\n");
- return AVERROR_INVALIDDATA;
- }
- avio_skip(s->pb, 4);
- root_sector = avio_rl32(s->pb);
-
- avio_seek(s->pb, root_sector << WTV_SECTOR_BITS, SEEK_SET);
- root_size = avio_read(s->pb, root, root_size);
- if (root_size < 0)
- return AVERROR_INVALIDDATA;
-
- /* parse chunks up until first data chunk */
- wtv->pb = wtvfile_open(s, root, root_size, timeline_le16);
- if (!wtv->pb) {
- av_log(s, AV_LOG_ERROR, "timeline data missing\n");
- return AVERROR_INVALIDDATA;
- }
-
- ret = parse_chunks(s, SEEK_TO_DATA, 0, 0);
- if (ret < 0)
- return ret;
- avio_seek(wtv->pb, -32, SEEK_CUR);
-
- timeline_pos = avio_tell(s->pb); // save before opening another file
-
- /* read metadata */
- pb = wtvfile_open(s, root, root_size, table_0_entries_legacy_attrib_le16);
- if (pb) {
- parse_legacy_attrib(s, pb);
- wtvfile_close(pb);
- }
-
- /* read seek index */
- if (s->nb_streams) {
- AVStream *st = s->streams[0];
- pb = wtvfile_open(s, root, root_size, table_0_entries_time_le16);
- if (pb) {
- while(1) {
- uint64_t timestamp = avio_rl64(pb);
- uint64_t frame_nb = avio_rl64(pb);
- if (url_feof(pb))
- break;
- ff_add_index_entry(&wtv->index_entries, &wtv->nb_index_entries, &wtv->index_entries_allocated_size,
- 0, timestamp, frame_nb, 0, AVINDEX_KEYFRAME);
- }
- wtvfile_close(pb);
-
- if (wtv->nb_index_entries) {
- pb = wtvfile_open(s, root, root_size, timeline_table_0_entries_Events_le16);
- if (pb) {
- int i;
- while (1) {
- uint64_t frame_nb = avio_rl64(pb);
- uint64_t position = avio_rl64(pb);
- if (url_feof(pb))
- break;
- for (i = wtv->nb_index_entries - 1; i >= 0; i--) {
- AVIndexEntry *e = wtv->index_entries + i;
- if (frame_nb > e->size)
- break;
- if (position > e->pos)
- e->pos = position;
- }
- }
- wtvfile_close(pb);
- st->duration = wtv->index_entries[wtv->nb_index_entries - 1].timestamp;
- }
- }
- }
- }
-
- avio_seek(s->pb, timeline_pos, SEEK_SET);
- return 0;
-}
-
-static int read_packet(AVFormatContext *s, AVPacket *pkt)
-{
- WtvContext *wtv = s->priv_data;
- AVIOContext *pb = wtv->pb;
- int stream_index, len, ret;
-
- stream_index = parse_chunks(s, SEEK_TO_DATA, 0, &len);
- if (stream_index < 0)
- return stream_index;
-
- ret = av_get_packet(pb, pkt, len - 32);
- if (ret < 0)
- return ret;
- pkt->stream_index = stream_index;
- pkt->pts = wtv->pts;
- avio_skip(pb, WTV_PAD8(len) - len);
- return 0;
-}
-
-static int read_seek(AVFormatContext *s, int stream_index,
- int64_t ts, int flags)
-{
- WtvContext *wtv = s->priv_data;
- AVIOContext *pb = wtv->pb;
- AVStream *st = s->streams[0];
- int64_t ts_relative;
- int i;
-
- if ((flags & AVSEEK_FLAG_FRAME) || (flags & AVSEEK_FLAG_BYTE))
- return AVERROR(ENOSYS);
-
- /* timestamp adjustment is required because wtv->pts values are absolute,
- * whereas AVIndexEntry->timestamp values are relative to epoch. */
- ts_relative = ts;
- if (wtv->epoch != AV_NOPTS_VALUE)
- ts_relative -= wtv->epoch;
-
- i = ff_index_search_timestamp(wtv->index_entries, wtv->nb_index_entries, ts_relative, flags);
- if (i < 0) {
- if (wtv->last_valid_pts == AV_NOPTS_VALUE || ts < wtv->last_valid_pts)
- avio_seek(pb, 0, SEEK_SET);
- else if (st->duration != AV_NOPTS_VALUE && ts_relative > st->duration && wtv->nb_index_entries)
- avio_seek(pb, wtv->index_entries[wtv->nb_index_entries - 1].pos, SEEK_SET);
- if (parse_chunks(s, SEEK_TO_PTS, ts, 0) < 0)
- return AVERROR(ERANGE);
- return 0;
- }
- wtv->pts = wtv->index_entries[i].timestamp;
- if (wtv->epoch != AV_NOPTS_VALUE)
- wtv->pts += wtv->epoch;
- wtv->last_valid_pts = wtv->pts;
- avio_seek(pb, wtv->index_entries[i].pos, SEEK_SET);
- return 0;
-}
-
-static int read_close(AVFormatContext *s)
-{
- WtvContext *wtv = s->priv_data;
- wtvfile_close(wtv->pb);
- return 0;
-}
-
-AVInputFormat ff_wtv_demuxer = {
- .name = "wtv",
- .long_name = NULL_IF_CONFIG_SMALL("Windows Television (WTV)"),
- .priv_data_size = sizeof(WtvContext),
- .read_probe = read_probe,
- .read_header = read_header,
- .read_packet = read_packet,
- .read_seek = read_seek,
- .read_close = read_close,
- .flags = AVFMT_SHOW_IDS,
-};
diff --git a/libavformat/wtv.h b/libavformat/wtv.h
new file mode 100644
index 0000000000..252804d602
--- /dev/null
+++ b/libavformat/wtv.h
@@ -0,0 +1,41 @@
+/*
+ * Windows Television (WTV)
+ * Copyright (c) 2010-2011 Peter Ross <pross@xvid.org>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVFORMAT_WTV_H
+#define AVFORMAT_WTV_H
+
+#include "riff.h"
+#include "asf.h"
+
+#define WTV_SECTOR_BITS 12
+#define WTV_SECTOR_SIZE (1 << WTV_SECTOR_BITS)
+#define WTV_BIGSECTOR_BITS 18
+
+extern const ff_asf_guid ff_dir_entry_guid;
+extern const ff_asf_guid ff_wtv_guid;
+extern const ff_asf_guid ff_timestamp_guid;
+extern const ff_asf_guid ff_data_guid;
+extern const ff_asf_guid ff_stream_guid;
+extern const ff_asf_guid ff_mediatype_audio;
+extern const ff_asf_guid ff_mediatype_video;
+extern const ff_asf_guid ff_format_none;
+extern const AVCodecGuid ff_video_guids[];
+#endif /* AVFORMAT_WTV_H */
diff --git a/libavformat/wtvdec.c b/libavformat/wtvdec.c
new file mode 100644
index 0000000000..5cbec0576a
--- /dev/null
+++ b/libavformat/wtvdec.c
@@ -0,0 +1,1060 @@
+/*
+ * Windows Television (WTV) demuxer
+ * Copyright (c) 2010-2011 Peter Ross <pross@xvid.org>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * Windows Television (WTV) demuxer
+ * @author Peter Ross <pross@xvid.org>
+ */
+
+#include "libavutil/intreadwrite.h"
+#include "libavutil/intfloat_readwrite.h"
+#include "avformat.h"
+#include "internal.h"
+#include "wtv.h"
+#include "mpegts.h"
+#include <strings.h>
+
+/* Macros for formating GUIDs */
+#define PRI_PRETTY_GUID \
+ "%08x-%04x-%04x-%02x%02x%02x%02x%02x%02x%02x%02x"
+#define ARG_PRETTY_GUID(g) \
+ AV_RL32(g),AV_RL16(g+4),AV_RL16(g+6),g[8],g[9],g[10],g[11],g[12],g[13],g[14],g[15]
+#define LEN_PRETTY_GUID 34
+
+/*
+ *
+ * File system routines
+ *
+ */
+
+typedef struct {
+ AVIOContext *pb_filesystem; /** file system (AVFormatContext->pb) */
+
+ int sector_bits; /** sector shift bits; used to convert sector number into pb_filesystem offset */
+ uint32_t *sectors; /** file allocation table */
+ int nb_sectors; /** number of sectors */
+
+ int error;
+ int64_t position;
+ int64_t length;
+} WtvFile;
+
+/**
+ * @return bytes read, 0 on end of file, or <0 on error
+ */
+static int wtvfile_read_packet(void *opaque, uint8_t *buf, int buf_size)
+{
+ WtvFile *wf = opaque;
+ AVIOContext *pb = wf->pb_filesystem;
+ int nread = 0;
+
+ if (wf->error || pb->error)
+ return -1;
+ if (wf->position >= wf->length || url_feof(pb))
+ return 0;
+
+ buf_size = FFMIN(buf_size, wf->length - wf->position);
+ while(nread < buf_size) {
+ int n;
+ int remaining_in_sector = (1 << wf->sector_bits) - (wf->position & ((1 << wf->sector_bits) - 1));
+ int read_request = FFMIN(buf_size - nread, remaining_in_sector);
+
+ n = avio_read(pb, buf, read_request);
+ if (n <= 0)
+ break;
+ nread += n;
+ buf += n;
+ wf->position += n;
+ if (n == remaining_in_sector) {
+ int i = wf->position >> wf->sector_bits;
+ if (i >= wf->nb_sectors ||
+ (wf->sectors[i] != wf->sectors[i - 1] + (1 << (wf->sector_bits - WTV_SECTOR_BITS)) &&
+ avio_seek(pb, (int64_t)wf->sectors[i] << WTV_SECTOR_BITS, SEEK_SET) < 0)) {
+ wf->error = 1;
+ break;
+ }
+ }
+ }
+ return nread;
+}
+
+/**
+ * @return position (or file length)
+ */
+static int64_t wtvfile_seek(void *opaque, int64_t offset, int whence)
+{
+ WtvFile *wf = opaque;
+ AVIOContext *pb = wf->pb_filesystem;
+
+ if (whence == AVSEEK_SIZE)
+ return wf->length;
+ else if (whence == SEEK_CUR)
+ offset = wf->position + offset;
+ else if (whence == SEEK_END)
+ offset = wf->length;
+
+ wf->error = offset < 0 || offset >= wf->length ||
+ avio_seek(pb, ((int64_t)wf->sectors[offset >> wf->sector_bits] << WTV_SECTOR_BITS)
+ + (offset & ((1 << wf->sector_bits) - 1)), SEEK_SET) < 0;
+ wf->position = offset;
+ return offset;
+}
+
+/**
+ * read non-zero integers (le32) from input stream
+ * @param pb
+ * @param[out] data destination
+ * @param count maximum number of integers to read
+ * @return total number of integers read
+ */
+static int read_ints(AVIOContext *pb, uint32_t *data, int count)
+{
+ int i, total = 0;
+ for (i = 0; i < count; i++) {
+ if ((data[total] = avio_rl32(pb)))
+ total++;
+ }
+ return total;
+}
+
+/**
+ * Open file
+ * @param first_sector First sector
+ * @param length Length of file (bytes)
+ * @param depth File allocation table depth
+ * @return NULL on error
+ */
+static AVIOContext * wtvfile_open_sector(int first_sector, uint64_t length, int depth, AVFormatContext *s)
+{
+ AVIOContext *pb;
+ WtvFile *wf;
+ uint8_t *buffer;
+
+ if (avio_seek(s->pb, first_sector << WTV_SECTOR_BITS, SEEK_SET) < 0)
+ return NULL;
+
+ wf = av_mallocz(sizeof(WtvFile));
+ if (!wf)
+ return NULL;
+
+ if (depth == 0) {
+ wf->sectors = av_malloc(sizeof(uint32_t));
+ if (!wf->sectors) {
+ av_free(wf);
+ return NULL;
+ }
+ wf->sectors[0] = first_sector;
+ wf->nb_sectors = 1;
+ wf->sector_bits = WTV_SECTOR_BITS;
+ } else if (depth == 1) {
+ wf->sectors = av_malloc(WTV_SECTOR_SIZE);
+ if (!wf->sectors) {
+ av_free(wf);
+ return NULL;
+ }
+ wf->nb_sectors = read_ints(s->pb, wf->sectors, WTV_SECTOR_SIZE / 4);
+ wf->sector_bits = length & (1ULL<<63) ? WTV_SECTOR_BITS : WTV_BIGSECTOR_BITS;
+ } else if (depth == 2) {
+ uint32_t sectors1[WTV_SECTOR_SIZE / 4];
+ int nb_sectors1 = read_ints(s->pb, sectors1, WTV_SECTOR_SIZE / 4);
+ int i;
+
+ wf->sectors = av_malloc(nb_sectors1 << WTV_SECTOR_BITS);
+ if (!wf->sectors) {
+ av_free(wf);
+ return NULL;
+ }
+ wf->nb_sectors = 0;
+ for (i = 0; i < nb_sectors1; i++) {
+ if (avio_seek(s->pb, (int64_t)sectors1[i] << WTV_SECTOR_BITS, SEEK_SET) < 0)
+ break;
+ wf->nb_sectors += read_ints(s->pb, wf->sectors + i * WTV_SECTOR_SIZE / 4, WTV_SECTOR_SIZE / 4);
+ }
+ wf->sector_bits = length & (1ULL<<63) ? WTV_SECTOR_BITS : WTV_BIGSECTOR_BITS;
+ } else {
+ av_log(s, AV_LOG_ERROR, "unsupported file allocation table depth (0x%x)\n", depth);
+ av_free(wf);
+ return NULL;
+ }
+
+ if (!wf->nb_sectors) {
+ av_free(wf->sectors);
+ av_free(wf);
+ return NULL;
+ }
+
+ /* check length */
+ length &= 0xFFFFFFFFFFFF;
+ if (length > ((int64_t)wf->nb_sectors << wf->sector_bits)) {
+ av_log(s, AV_LOG_WARNING, "reported file length (0x%"PRIx64") exceeds number of available sectors (0x%"PRIx64")\n", length, (int64_t)wf->nb_sectors << wf->sector_bits);
+ length = (int64_t)wf->nb_sectors << wf->sector_bits;
+ }
+ wf->length = length;
+
+ /* seek to intial sector */
+ wf->position = 0;
+ if (avio_seek(s->pb, (int64_t)wf->sectors[0] << WTV_SECTOR_BITS, SEEK_SET) < 0) {
+ av_free(wf->sectors);
+ av_free(wf);
+ return NULL;
+ }
+
+ wf->pb_filesystem = s->pb;
+ buffer = av_malloc(1 << wf->sector_bits);
+ if (!buffer) {
+ av_free(wf->sectors);
+ av_free(wf);
+ return NULL;
+ }
+
+ pb = avio_alloc_context(buffer, 1 << wf->sector_bits, 0, wf,
+ wtvfile_read_packet, NULL, wtvfile_seek);
+ if (!pb) {
+ av_free(buffer);
+ av_free(wf->sectors);
+ av_free(wf);
+ }
+ return pb;
+}
+
+/**
+ * Open file using filename
+ * @param[in] buf directory buffer
+ * @param buf_size directory buffer size
+ * @param[in] filename
+ * @param filename_size size of filename
+ * @return NULL on error
+ */
+static AVIOContext * wtvfile_open2(AVFormatContext *s, const uint8_t *buf, int buf_size, const uint8_t *filename, int filename_size)
+{
+ const uint8_t *buf_end = buf + buf_size;
+
+ while(buf + 48 <= buf_end) {
+ int dir_length, name_size, first_sector, depth;
+ uint64_t file_length;
+ const uint8_t *name;
+ if (ff_guidcmp(buf, ff_dir_entry_guid)) {
+ av_log(s, AV_LOG_ERROR, "unknown guid "FF_PRI_GUID", expected dir_entry_guid; "
+ "remaining directory entries ignored\n", FF_ARG_GUID(buf));
+ break;
+ }
+ dir_length = AV_RL16(buf + 16);
+ file_length = AV_RL64(buf + 24);
+ name_size = 2 * AV_RL32(buf + 32);
+ if (buf + 48 + name_size > buf_end) {
+ av_log(s, AV_LOG_ERROR, "filename exceeds buffer size; remaining directory entries ignored\n");
+ break;
+ }
+ first_sector = AV_RL32(buf + 40 + name_size);
+ depth = AV_RL32(buf + 44 + name_size);
+
+ /* compare file name; test optional null terminator */
+ name = buf + 40;
+ if (name_size >= filename_size &&
+ !memcmp(name, filename, filename_size) &&
+ (name_size < filename_size + 2 || !AV_RN16(name + filename_size)))
+ return wtvfile_open_sector(first_sector, file_length, depth, s);
+
+ buf += dir_length;
+ }
+ return 0;
+}
+
+#define wtvfile_open(s, buf, buf_size, filename) \
+ wtvfile_open2(s, buf, buf_size, filename, sizeof(filename))
+
+/**
+ * Close file opened with wtvfile_open_sector(), or wtv_open()
+ */
+static void wtvfile_close(AVIOContext *pb)
+{
+ WtvFile *wf = pb->opaque;
+ av_free(wf->sectors);
+ av_free(pb);
+}
+
+/*
+ *
+ * Main demuxer
+ *
+ */
+
+typedef struct {
+ int seen_data;
+} WtvStream;
+
+typedef struct {
+ AVIOContext *pb; /** timeline file */
+ int64_t epoch;
+ int64_t pts; /** pts for next data chunk */
+ int64_t last_valid_pts; /** latest valid pts, used for interative seeking */
+
+ /* maintain private seek index, as the AVIndexEntry->pos is relative to the
+ start of the 'timeline' file, not the file system (AVFormatContext->pb) */
+ AVIndexEntry *index_entries;
+ int nb_index_entries;
+ unsigned int index_entries_allocated_size;
+} WtvContext;
+
+/* WTV GUIDs */
+static const ff_asf_guid metadata_guid =
+ {0x5A,0xFE,0xD7,0x6D,0xC8,0x1D,0x8F,0x4A,0x99,0x22,0xFA,0xB1,0x1C,0x38,0x14,0x53};
+static const ff_asf_guid stream2_guid =
+ {0xA2,0xC3,0xD2,0xC2,0x7E,0x9A,0xDA,0x11,0x8B,0xF7,0x00,0x07,0xE9,0x5E,0xAD,0x8D};
+static const ff_asf_guid EVENTID_SubtitleSpanningEvent =
+ {0x48,0xC0,0xCE,0x5D,0xB9,0xD0,0x63,0x41,0x87,0x2C,0x4F,0x32,0x22,0x3B,0xE8,0x8A};
+static const ff_asf_guid EVENTID_LanguageSpanningEvent =
+ {0x6D,0x66,0x92,0xE2,0x02,0x9C,0x8D,0x44,0xAA,0x8D,0x78,0x1A,0x93,0xFD,0xC3,0x95};
+static const ff_asf_guid EVENTID_AudioDescriptorSpanningEvent =
+ {0x1C,0xD4,0x7B,0x10,0xDA,0xA6,0x91,0x46,0x83,0x69,0x11,0xB2,0xCD,0xAA,0x28,0x8E};
+static const ff_asf_guid EVENTID_CtxADescriptorSpanningEvent =
+ {0xE6,0xA2,0xB4,0x3A,0x47,0x42,0x34,0x4B,0x89,0x6C,0x30,0xAF,0xA5,0xD2,0x1C,0x24};
+static const ff_asf_guid EVENTID_CSDescriptorSpanningEvent =
+ {0xD9,0x79,0xE7,0xEf,0xF0,0x97,0x86,0x47,0x80,0x0D,0x95,0xCF,0x50,0x5D,0xDC,0x66};
+static const ff_asf_guid EVENTID_DVBScramblingControlSpanningEvent =
+ {0xC4,0xE1,0xD4,0x4B,0xA1,0x90,0x09,0x41,0x82,0x36,0x27,0xF0,0x0E,0x7D,0xCC,0x5B};
+static const ff_asf_guid EVENTID_StreamIDSpanningEvent =
+ {0x68,0xAB,0xF1,0xCA,0x53,0xE1,0x41,0x4D,0xA6,0xB3,0xA7,0xC9,0x98,0xDB,0x75,0xEE};
+static const ff_asf_guid EVENTID_TeletextSpanningEvent =
+ {0x50,0xD9,0x99,0x95,0x33,0x5F,0x17,0x46,0xAF,0x7C,0x1E,0x54,0xB5,0x10,0xDA,0xA3};
+static const ff_asf_guid EVENTID_AudioTypeSpanningEvent =
+ {0xBE,0xBF,0x1C,0x50,0x49,0xB8,0xCE,0x42,0x9B,0xE9,0x3D,0xB8,0x69,0xFB,0x82,0xB3};
+
+/* Windows media GUIDs */
+
+/* Media types */
+static const ff_asf_guid mediasubtype_mpeg1payload =
+ {0x81,0xEB,0x36,0xE4,0x4F,0x52,0xCE,0x11,0x9F,0x53,0x00,0x20,0xAF,0x0B,0xA7,0x70};
+static const ff_asf_guid mediatype_mpeg2_sections =
+ {0x6C,0x17,0x5F,0x45,0x06,0x4B,0xCE,0x47,0x9A,0xEF,0x8C,0xAE,0xF7,0x3D,0xF7,0xB5};
+static const ff_asf_guid mediatype_mpeg2_pes =
+ {0x20,0x80,0x6D,0xE0,0x46,0xDB,0xCF,0x11,0xB4,0xD1,0x00,0x80,0x5F,0x6C,0xBB,0xEA};
+static const ff_asf_guid mediatype_mstvcaption =
+ {0x89,0x8A,0x8B,0xB8,0x49,0xB0,0x80,0x4C,0xAD,0xCF,0x58,0x98,0x98,0x5E,0x22,0xC1};
+
+/* Media subtypes */
+static const ff_asf_guid mediasubtype_cpfilters_processed =
+ {0x28,0xBD,0xAD,0x46,0xD0,0x6F,0x96,0x47,0x93,0xB2,0x15,0x5C,0x51,0xDC,0x04,0x8D};
+static const ff_asf_guid mediasubtype_dvb_subtitle =
+ {0xC3,0xCB,0xFF,0x34,0xB3,0xD5,0x71,0x41,0x90,0x02,0xD4,0xC6,0x03,0x01,0x69,0x7F};
+static const ff_asf_guid mediasubtype_teletext =
+ {0xE3,0x76,0x2A,0xF7,0x0A,0xEB,0xD0,0x11,0xAC,0xE4,0x00,0x00,0xC0,0xCC,0x16,0xBA};
+static const ff_asf_guid mediasubtype_dtvccdata =
+ {0xAA,0xDD,0x2A,0xF5,0xF0,0x36,0xF5,0x43,0x95,0xEA,0x6D,0x86,0x64,0x84,0x26,0x2A};
+static const ff_asf_guid mediasubtype_mpeg2_sections =
+ {0x79,0x85,0x9F,0x4A,0xF8,0x6B,0x92,0x43,0x8A,0x6D,0xD2,0xDD,0x09,0xFA,0x78,0x61};
+
+/* Formats */
+static const ff_asf_guid format_cpfilters_processed =
+ {0x6F,0xB3,0x39,0x67,0x5F,0x1D,0xC2,0x4A,0x81,0x92,0x28,0xBB,0x0E,0x73,0xD1,0x6A};
+static const ff_asf_guid format_waveformatex =
+ {0x81,0x9F,0x58,0x05,0x56,0xC3,0xCE,0x11,0xBF,0x01,0x00,0xAA,0x00,0x55,0x59,0x5A};
+static const ff_asf_guid format_videoinfo2 =
+ {0xA0,0x76,0x2A,0xF7,0x0A,0xEB,0xD0,0x11,0xAC,0xE4,0x00,0x00,0xC0,0xCC,0x16,0xBA};
+static const ff_asf_guid format_mpeg2_video =
+ {0xE3,0x80,0x6D,0xE0,0x46,0xDB,0xCF,0x11,0xB4,0xD1,0x00,0x80,0x5F,0x6C,0xBB,0xEA};
+
+static int read_probe(AVProbeData *p)
+{
+ return ff_guidcmp(p->buf, ff_wtv_guid) ? 0 : AVPROBE_SCORE_MAX;
+}
+
+/**
+ * Convert win32 FILETIME to ISO-8601 string
+ */
+static void filetime_to_iso8601(char *buf, int buf_size, int64_t value)
+{
+ time_t t = (value / 10000000LL) - 11644473600LL;
+ strftime(buf, buf_size, "%Y-%m-%d %H:%M:%S", gmtime(&t));
+}
+
+/**
+ * Convert crazy time (100ns since 1 Jan 0001) to ISO-8601 string
+ */
+static void crazytime_to_iso8601(char *buf, int buf_size, int64_t value)
+{
+ time_t t = (value / 10000000LL) - 719162LL*86400LL;
+ strftime(buf, buf_size, "%Y-%m-%d %H:%M:%S", gmtime(&t));
+}
+
+/**
+ * Convert OLE DATE to ISO-8601 string
+ * @return <0 on error
+ */
+static int oledate_to_iso8601(char *buf, int buf_size, int64_t value)
+{
+ time_t t = (av_int2dbl(value) - 25569.0) * 86400;
+ struct tm *result= gmtime(&t);
+ if (!result)
+ return -1;
+ strftime(buf, buf_size, "%Y-%m-%d %H:%M:%S", result);
+ return 0;
+}
+
+static void get_attachment(AVFormatContext *s, AVIOContext *pb, int length)
+{
+ char mime[1024];
+ char description[1024];
+ unsigned int filesize;
+ AVStream *st;
+ int64_t pos = avio_tell(pb);
+
+ avio_get_str16le(pb, INT_MAX, mime, sizeof(mime));
+ if (strcmp(mime, "image/jpeg"))
+ goto done;
+
+ avio_r8(pb);
+ avio_get_str16le(pb, INT_MAX, description, sizeof(description));
+ filesize = avio_rl32(pb);
+ if (!filesize)
+ goto done;
+
+ st = av_new_stream(s, 0);
+ if (!st)
+ goto done;
+ av_metadata_set2(&st->metadata, "title", description, 0);
+ st->codec->codec_id = CODEC_ID_MJPEG;
+ st->codec->codec_type = AVMEDIA_TYPE_ATTACHMENT;
+ st->codec->extradata = av_mallocz(filesize);
+ if (!st->codec->extradata)
+ goto done;
+ st->codec->extradata_size = filesize;
+ avio_read(pb, st->codec->extradata, filesize);
+done:
+ avio_seek(pb, pos + length, SEEK_SET);
+}
+
+static void get_tag(AVFormatContext *s, AVIOContext *pb, const char *key, int type, int length)
+{
+ int buf_size = FFMAX(2*length, LEN_PRETTY_GUID) + 1;
+ char *buf = av_malloc(buf_size);
+ if (!buf)
+ return;
+
+ if (type == 0 && length == 4) {
+ snprintf(buf, buf_size, "%"PRIi32, avio_rl32(pb));
+ } else if (type == 1) {
+ avio_get_str16le(pb, length, buf, buf_size);
+ if (!strlen(buf)) {
+ av_free(buf);
+ return;
+ }
+ } else if (type == 3 && length == 4) {
+ strcpy(buf, avio_rl32(pb) ? "true" : "false");
+ } else if (type == 4 && length == 8) {
+ int64_t num = avio_rl64(pb);
+ if (!strcmp(key, "WM/EncodingTime") ||
+ !strcmp(key, "WM/MediaOriginalBroadcastDateTime"))
+ filetime_to_iso8601(buf, buf_size, num);
+ else if (!strcmp(key, "WM/WMRVEncodeTime") ||
+ !strcmp(key, "WM/WMRVEndTime"))
+ crazytime_to_iso8601(buf, buf_size, num);
+ else if (!strcmp(key, "WM/WMRVExpirationDate")) {
+ if (oledate_to_iso8601(buf, buf_size, num) < 0 ) {
+ av_free(buf);
+ return;
+ }
+ } else if (!strcmp(key, "WM/WMRVBitrate"))
+ snprintf(buf, buf_size, "%f", av_int2dbl(num));
+ else
+ snprintf(buf, buf_size, "%"PRIi64, num);
+ } else if (type == 5 && length == 2) {
+ snprintf(buf, buf_size, "%"PRIi16, avio_rl16(pb));
+ } else if (type == 6 && length == 16) {
+ ff_asf_guid guid;
+ avio_read(pb, guid, 16);
+ snprintf(buf, buf_size, PRI_PRETTY_GUID, ARG_PRETTY_GUID(guid));
+ } else if (type == 2 && !strcmp(key, "WM/Picture")) {
+ get_attachment(s, pb, length);
+ av_freep(&buf);
+ return;
+ } else {
+ av_freep(&buf);
+ av_log(s, AV_LOG_WARNING, "unsupported metadata entry; key:%s, type:%d, length:0x%x\n", key, type, length);
+ avio_skip(pb, length);
+ return;
+ }
+
+ av_metadata_set2(&s->metadata, key, buf, 0);
+ av_freep(&buf);
+}
+
+/**
+ * Parse metadata entries
+ */
+static void parse_legacy_attrib(AVFormatContext *s, AVIOContext *pb)
+{
+ ff_asf_guid guid;
+ int length, type;
+ while(!url_feof(pb)) {
+ char key[1024];
+ ff_get_guid(pb, &guid);
+ type = avio_rl32(pb);
+ length = avio_rl32(pb);
+ if (!length)
+ break;
+ if (ff_guidcmp(&guid, metadata_guid)) {
+ av_log(s, AV_LOG_WARNING, "unknown guid "FF_PRI_GUID", expected metadata_guid; "
+ "remaining metadata entries ignored\n", FF_ARG_GUID(guid));
+ break;
+ }
+ avio_get_str16le(pb, INT_MAX, key, sizeof(key));
+ get_tag(s, pb, key, type, length);
+ }
+
+ ff_metadata_conv(&s->metadata, NULL, ff_asf_metadata_conv);
+}
+
+/**
+ * parse VIDEOINFOHEADER2 structure
+ * @return bytes consumed
+ */
+static int parse_videoinfoheader2(AVFormatContext *s, AVStream *st)
+{
+ WtvContext *wtv = s->priv_data;
+ AVIOContext *pb = wtv->pb;
+
+ avio_skip(pb, 72); // picture aspect ratio is unreliable
+ ff_get_bmp_header(pb, st);
+
+ return 72 + 40;
+}
+
+/**
+ * Parse MPEG1WAVEFORMATEX extradata structure
+ */
+static void parse_mpeg1waveformatex(AVStream *st)
+{
+ /* fwHeadLayer */
+ switch (AV_RL16(st->codec->extradata)) {
+ case 0x0001 : st->codec->codec_id = CODEC_ID_MP1; break;
+ case 0x0002 : st->codec->codec_id = CODEC_ID_MP2; break;
+ case 0x0004 : st->codec->codec_id = CODEC_ID_MP3; break;
+ }
+
+ st->codec->bit_rate = AV_RL32(st->codec->extradata + 2); /* dwHeadBitrate */
+
+ /* dwHeadMode */
+ switch (AV_RL16(st->codec->extradata + 6)) {
+ case 1 : case 2 : case 4 : st->codec->channels = 2; break;
+ case 8 : st->codec->channels = 1; break;
+ }
+}
+
+/**
+ * Initialise stream
+ * @param st Stream to initialise, or NULL to create and initialise new stream
+ * @return NULL on error
+ */
+static AVStream * new_stream(AVFormatContext *s, AVStream *st, int sid, int codec_type)
+{
+ if (st) {
+ if (st->codec->extradata) {
+ av_freep(&st->codec->extradata);
+ st->codec->extradata_size = 0;
+ }
+ } else {
+ WtvStream *wst = av_mallocz(sizeof(WtvStream));
+ if (!wst)
+ return NULL;
+ st = av_new_stream(s, sid);
+ if (!st)
+ return NULL;
+ st->priv_data = wst;
+ }
+ st->codec->codec_type = codec_type;
+ st->need_parsing = AVSTREAM_PARSE_FULL;
+ av_set_pts_info(st, 64, 1, 10000000);
+ return st;
+}
+
+/**
+ * parse Media Type structure and populate stream
+ * @param st Stream, or NULL to create new stream
+ * @param mediatype Mediatype GUID
+ * @param subtype Subtype GUID
+ * @param formattype Format GUID
+ * @param size Size of format buffer
+ * @return NULL on error
+ */
+static AVStream * parse_media_type(AVFormatContext *s, AVStream *st, int sid,
+ ff_asf_guid mediatype, ff_asf_guid subtype,
+ ff_asf_guid formattype, int size)
+{
+ WtvContext *wtv = s->priv_data;
+ AVIOContext *pb = wtv->pb;
+ if (!ff_guidcmp(subtype, mediasubtype_cpfilters_processed) &&
+ !ff_guidcmp(formattype, format_cpfilters_processed)) {
+ ff_asf_guid actual_subtype;
+ ff_asf_guid actual_formattype;
+
+ if (size < 32) {
+ av_log(s, AV_LOG_WARNING, "format buffer size underflow\n");
+ avio_skip(pb, size);
+ return NULL;
+ }
+
+ avio_skip(pb, size - 32);
+ ff_get_guid(pb, &actual_subtype);
+ ff_get_guid(pb, &actual_formattype);
+ avio_seek(pb, -size, SEEK_CUR);
+
+ st = parse_media_type(s, st, sid, mediatype, actual_subtype, actual_formattype, size - 32);
+ avio_skip(pb, 32);
+ return st;
+ } else if (!ff_guidcmp(mediatype, ff_mediatype_audio)) {
+ st = new_stream(s, st, sid, AVMEDIA_TYPE_AUDIO);
+ if (!st)
+ return NULL;
+ if (!ff_guidcmp(formattype, format_waveformatex)) {
+ int ret = ff_get_wav_header(pb, st->codec, size);
+ if (ret < 0)
+ return NULL;
+ } else {
+ if (ff_guidcmp(formattype, ff_format_none))
+ av_log(s, AV_LOG_WARNING, "unknown formattype:"FF_PRI_GUID"\n", FF_ARG_GUID(formattype));
+ avio_skip(pb, size);
+ }
+
+ if (!memcmp(subtype + 4, (const uint8_t[]){FF_MEDIASUBTYPE_BASE_GUID}, 12)) {
+ st->codec->codec_id = ff_wav_codec_get_id(AV_RL32(subtype), st->codec->bits_per_coded_sample);
+ } else if (!ff_guidcmp(subtype, mediasubtype_mpeg1payload)) {
+ if (st->codec->extradata && st->codec->extradata_size >= 22)
+ parse_mpeg1waveformatex(st);
+ else
+ av_log(s, AV_LOG_WARNING, "MPEG1WAVEFORMATEX underflow\n");
+ } else {
+ st->codec->codec_id = ff_codec_guid_get_id(ff_codec_wav_guids, subtype);
+ if (st->codec->codec_id == CODEC_ID_NONE)
+ av_log(s, AV_LOG_WARNING, "unknown subtype:"FF_PRI_GUID"\n", FF_ARG_GUID(subtype));
+ }
+ return st;
+ } else if (!ff_guidcmp(mediatype, ff_mediatype_video)) {
+ st = new_stream(s, st, sid, AVMEDIA_TYPE_VIDEO);
+ if (!st)
+ return NULL;
+ if (!ff_guidcmp(formattype, format_videoinfo2)) {
+ int consumed = parse_videoinfoheader2(s, st);
+ avio_skip(pb, FFMAX(size - consumed, 0));
+ } else if (!ff_guidcmp(formattype, format_mpeg2_video)) {
+ int consumed = parse_videoinfoheader2(s, st);
+ avio_skip(pb, FFMAX(size - consumed, 0));
+ } else {
+ if (ff_guidcmp(formattype, ff_format_none))
+ av_log(s, AV_LOG_WARNING, "unknown formattype:"FF_PRI_GUID"\n", FF_ARG_GUID(formattype));
+ avio_skip(pb, size);
+ }
+
+ if (!memcmp(subtype + 4, (const uint8_t[]){FF_MEDIASUBTYPE_BASE_GUID}, 12)) {
+ st->codec->codec_id = ff_codec_get_id(ff_codec_bmp_tags, AV_RL32(subtype));
+ } else {
+ st->codec->codec_id = ff_codec_guid_get_id(ff_video_guids, subtype);
+ }
+ if (st->codec->codec_id == CODEC_ID_NONE)
+ av_log(s, AV_LOG_WARNING, "unknown subtype:"FF_PRI_GUID"\n", FF_ARG_GUID(subtype));
+ return st;
+ } else if (!ff_guidcmp(mediatype, mediatype_mpeg2_pes) &&
+ !ff_guidcmp(subtype, mediasubtype_dvb_subtitle)) {
+ st = new_stream(s, st, sid, AVMEDIA_TYPE_SUBTITLE);
+ if (!st)
+ return NULL;
+ if (ff_guidcmp(formattype, ff_format_none))
+ av_log(s, AV_LOG_WARNING, "unknown formattype:"FF_PRI_GUID"\n", FF_ARG_GUID(formattype));
+ avio_skip(pb, size);
+ st->codec->codec_id = CODEC_ID_DVB_SUBTITLE;
+ return st;
+ } else if (!ff_guidcmp(mediatype, mediatype_mstvcaption) &&
+ (!ff_guidcmp(subtype, mediasubtype_teletext) || !ff_guidcmp(subtype, mediasubtype_dtvccdata))) {
+ st = new_stream(s, st, sid, AVMEDIA_TYPE_SUBTITLE);
+ if (!st)
+ return NULL;
+ if (ff_guidcmp(formattype, ff_format_none))
+ av_log(s, AV_LOG_WARNING, "unknown formattype:"FF_PRI_GUID"\n", FF_ARG_GUID(formattype));
+ avio_skip(pb, size);
+ st->codec->codec_id = CODEC_ID_DVB_TELETEXT;
+ return st;
+ } else if (!ff_guidcmp(mediatype, mediatype_mpeg2_sections) &&
+ !ff_guidcmp(subtype, mediasubtype_mpeg2_sections)) {
+ if (ff_guidcmp(formattype, ff_format_none))
+ av_log(s, AV_LOG_WARNING, "unknown formattype:"FF_PRI_GUID"\n", FF_ARG_GUID(formattype));
+ avio_skip(pb, size);
+ return NULL;
+ }
+
+ av_log(s, AV_LOG_WARNING, "unknown media type, mediatype:"FF_PRI_GUID
+ ", subtype:"FF_PRI_GUID", formattype:"FF_PRI_GUID"\n",
+ FF_ARG_GUID(mediatype), FF_ARG_GUID(subtype), FF_ARG_GUID(formattype));
+ avio_skip(pb, size);
+ return NULL;
+}
+
+enum {
+ SEEK_TO_DATA = 0,
+ SEEK_TO_PTS,
+};
+
+/**
+ * Parse WTV chunks
+ * @param mode SEEK_TO_DATA or SEEK_TO_PTS
+ * @param seekts timestamp
+ * @param[out] len Length of data chunk
+ * @return stream index of data chunk, or <0 on error
+ */
+static int parse_chunks(AVFormatContext *s, int mode, int64_t seekts, int *len_ptr)
+{
+ WtvContext *wtv = s->priv_data;
+ AVIOContext *pb = wtv->pb;
+ while (!url_feof(pb)) {
+ ff_asf_guid g;
+ int len, sid, consumed;
+
+ ff_get_guid(pb, &g);
+ len = avio_rl32(pb);
+ if (len < 32)
+ break;
+ sid = avio_rl32(pb) & 0x7FFF;
+ avio_skip(pb, 8);
+ consumed = 32;
+
+ if (!ff_guidcmp(g, ff_stream_guid)) {
+ if (ff_find_stream_index(s, sid) < 0) {
+ ff_asf_guid mediatype, subtype, formattype;
+ int size;
+ avio_skip(pb, 28);
+ ff_get_guid(pb, &mediatype);
+ ff_get_guid(pb, &subtype);
+ avio_skip(pb, 12);
+ ff_get_guid(pb, &formattype);
+ size = avio_rl32(pb);
+ parse_media_type(s, 0, sid, mediatype, subtype, formattype, size);
+ consumed += 92 + size;
+ }
+ } else if (!ff_guidcmp(g, stream2_guid)) {
+ int stream_index = ff_find_stream_index(s, sid);
+ if (stream_index >= 0 && !((WtvStream*)s->streams[stream_index]->priv_data)->seen_data) {
+ ff_asf_guid mediatype, subtype, formattype;
+ int size;
+ avio_skip(pb, 12);
+ ff_get_guid(pb, &mediatype);
+ ff_get_guid(pb, &subtype);
+ avio_skip(pb, 12);
+ ff_get_guid(pb, &formattype);
+ size = avio_rl32(pb);
+ parse_media_type(s, s->streams[stream_index], sid, mediatype, subtype, formattype, size);
+ consumed += 76 + size;
+ }
+ } else if (!ff_guidcmp(g, EVENTID_AudioDescriptorSpanningEvent) ||
+ !ff_guidcmp(g, EVENTID_CtxADescriptorSpanningEvent) ||
+ !ff_guidcmp(g, EVENTID_CSDescriptorSpanningEvent) ||
+ !ff_guidcmp(g, EVENTID_StreamIDSpanningEvent) ||
+ !ff_guidcmp(g, EVENTID_SubtitleSpanningEvent) ||
+ !ff_guidcmp(g, EVENTID_TeletextSpanningEvent)) {
+ int stream_index = ff_find_stream_index(s, sid);
+ if (stream_index >= 0) {
+ AVStream *st = s->streams[stream_index];
+ uint8_t buf[258];
+ const uint8_t *pbuf = buf;
+ int buf_size;
+
+ avio_skip(pb, 8);
+ consumed += 8;
+ if (!ff_guidcmp(g, EVENTID_CtxADescriptorSpanningEvent) ||
+ !ff_guidcmp(g, EVENTID_CSDescriptorSpanningEvent)) {
+ avio_skip(pb, 6);
+ consumed += 6;
+ }
+
+ buf_size = FFMIN(len - consumed, sizeof(buf));
+ avio_read(pb, buf, buf_size);
+ consumed += buf_size;
+ ff_parse_mpeg2_descriptor(s, st, 0, &pbuf, buf + buf_size, 0, 0, 0, 0);
+ }
+ } else if (!ff_guidcmp(g, EVENTID_AudioTypeSpanningEvent)) {
+ int stream_index = ff_find_stream_index(s, sid);
+ if (stream_index >= 0) {
+ AVStream *st = s->streams[stream_index];
+ int audio_type;
+ avio_skip(pb, 8);
+ audio_type = avio_r8(pb);
+ if (audio_type == 2)
+ st->disposition |= AV_DISPOSITION_HEARING_IMPAIRED;
+ else if (audio_type == 3)
+ st->disposition |= AV_DISPOSITION_VISUAL_IMPAIRED;
+ consumed += 9;
+ }
+ } else if (!ff_guidcmp(g, EVENTID_DVBScramblingControlSpanningEvent)) {
+ int stream_index = ff_find_stream_index(s, sid);
+ if (stream_index >= 0) {
+ avio_skip(pb, 12);
+ if (avio_rl32(pb))
+ av_log(s, AV_LOG_WARNING, "DVB scrambled stream detected (st:%d), decoding will likely fail\n", stream_index);
+ consumed += 16;
+ }
+ } else if (!ff_guidcmp(g, EVENTID_LanguageSpanningEvent)) {
+ int stream_index = ff_find_stream_index(s, sid);
+ if (stream_index >= 0) {
+ AVStream *st = s->streams[stream_index];
+ uint8_t language[4];
+ avio_skip(pb, 12);
+ avio_read(pb, language, 3);
+ if (language[0]) {
+ language[3] = 0;
+ av_metadata_set2(&st->metadata, "language", language, 0);
+ if (!strcmp(language, "nar") || !strcmp(language, "NAR"))
+ st->disposition |= AV_DISPOSITION_VISUAL_IMPAIRED;
+ }
+ consumed += 15;
+ }
+ } else if (!ff_guidcmp(g, ff_timestamp_guid)) {
+ int stream_index = ff_find_stream_index(s, sid);
+ if (stream_index >= 0) {
+ avio_skip(pb, 8);
+ wtv->pts = avio_rl64(pb);
+ consumed += 16;
+ if (wtv->pts == -1)
+ wtv->pts = AV_NOPTS_VALUE;
+ else {
+ wtv->last_valid_pts = wtv->pts;
+ if (wtv->epoch == AV_NOPTS_VALUE || wtv->pts < wtv->epoch)
+ wtv->epoch = wtv->pts;
+ if (mode == SEEK_TO_PTS && wtv->pts >= seekts) {
+#define WTV_PAD8(x) (((x) + 7) & ~7)
+ avio_skip(pb, WTV_PAD8(len) - consumed);
+ return 0;
+ }
+ }
+ }
+ } else if (!ff_guidcmp(g, ff_data_guid)) {
+ int stream_index = ff_find_stream_index(s, sid);
+ if (mode == SEEK_TO_DATA && stream_index >= 0 && len > 32) {
+ WtvStream *wst = s->streams[stream_index]->priv_data;
+ wst->seen_data = 1;
+ if (len_ptr) {
+ *len_ptr = len;
+ }
+ return stream_index;
+ }
+ } else if (
+ !ff_guidcmp(g, /* DSATTRIB_CAPTURE_STREAMTIME */ (const ff_asf_guid){0x14,0x56,0x1A,0x0C,0xCD,0x30,0x40,0x4F,0xBC,0xBF,0xD0,0x3E,0x52,0x30,0x62,0x07}) ||
+ !ff_guidcmp(g, /* DSATTRIB_PicSampleSeq */ (const ff_asf_guid){0x02,0xAE,0x5B,0x2F,0x8F,0x7B,0x60,0x4F,0x82,0xD6,0xE4,0xEA,0x2F,0x1F,0x4C,0x99}) ||
+ !ff_guidcmp(g, /* DSATTRIB_TRANSPORT_PROPERTIES */ (const ff_asf_guid){0x12,0xF6,0x22,0xB6,0xAD,0x47,0x71,0x46,0xAD,0x6C,0x05,0xA9,0x8E,0x65,0xDE,0x3A}) ||
+ !ff_guidcmp(g, /* dvr_ms_vid_frame_rep_data */ (const ff_asf_guid){0xCC,0x32,0x64,0xDD,0x29,0xE2,0xDB,0x40,0x80,0xF6,0xD2,0x63,0x28,0xD2,0x76,0x1F}) ||
+ !ff_guidcmp(g, /* EVENTID_ChannelChangeSpanningEvent */ (const ff_asf_guid){0xE5,0xC5,0x67,0x90,0x5C,0x4C,0x05,0x42,0x86,0xC8,0x7A,0xFE,0x20,0xFE,0x1E,0xFA}) ||
+ !ff_guidcmp(g, /* EVENTID_ChannelInfoSpanningEvent */ (const ff_asf_guid){0x80,0x6D,0xF3,0x41,0x32,0x41,0xC2,0x4C,0xB1,0x21,0x01,0xA4,0x32,0x19,0xD8,0x1B}) ||
+ !ff_guidcmp(g, /* EVENTID_ChannelTypeSpanningEvent */ (const ff_asf_guid){0x51,0x1D,0xAB,0x72,0xD2,0x87,0x9B,0x48,0xBA,0x11,0x0E,0x08,0xDC,0x21,0x02,0x43}) ||
+ !ff_guidcmp(g, /* EVENTID_PIDListSpanningEvent */ (const ff_asf_guid){0x65,0x8F,0xFC,0x47,0xBB,0xE2,0x34,0x46,0x9C,0xEF,0xFD,0xBF,0xE6,0x26,0x1D,0x5C}) ||
+ !ff_guidcmp(g, /* EVENTID_SignalAndServiceStatusSpanningEvent */ (const ff_asf_guid){0xCB,0xC5,0x68,0x80,0x04,0x3C,0x2B,0x49,0xB4,0x7D,0x03,0x08,0x82,0x0D,0xCE,0x51}) ||
+ !ff_guidcmp(g, /* EVENTID_StreamTypeSpanningEvent */ (const ff_asf_guid){0xBC,0x2E,0xAF,0x82,0xA6,0x30,0x64,0x42,0xA8,0x0B,0xAD,0x2E,0x13,0x72,0xAC,0x60}) ||
+ !ff_guidcmp(g, (const ff_asf_guid){0x1E,0xBE,0xC3,0xC5,0x43,0x92,0xDC,0x11,0x85,0xE5,0x00,0x12,0x3F,0x6F,0x73,0xB9}) ||
+ !ff_guidcmp(g, (const ff_asf_guid){0x3B,0x86,0xA2,0xB1,0xEB,0x1E,0xC3,0x44,0x8C,0x88,0x1C,0xA3,0xFF,0xE3,0xE7,0x6A}) ||
+ !ff_guidcmp(g, (const ff_asf_guid){0x4E,0x7F,0x4C,0x5B,0xC4,0xD0,0x38,0x4B,0xA8,0x3E,0x21,0x7F,0x7B,0xBF,0x52,0xE7}) ||
+ !ff_guidcmp(g, (const ff_asf_guid){0x63,0x36,0xEB,0xFE,0xA1,0x7E,0xD9,0x11,0x83,0x08,0x00,0x07,0xE9,0x5E,0xAD,0x8D}) ||
+ !ff_guidcmp(g, (const ff_asf_guid){0x70,0xE9,0xF1,0xF8,0x89,0xA4,0x4C,0x4D,0x83,0x73,0xB8,0x12,0xE0,0xD5,0xF8,0x1E}) ||
+ !ff_guidcmp(g, (const ff_asf_guid){0x96,0xC3,0xD2,0xC2,0x7E,0x9A,0xDA,0x11,0x8B,0xF7,0x00,0x07,0xE9,0x5E,0xAD,0x8D}) ||
+ !ff_guidcmp(g, (const ff_asf_guid){0x97,0xC3,0xD2,0xC2,0x7E,0x9A,0xDA,0x11,0x8B,0xF7,0x00,0x07,0xE9,0x5E,0xAD,0x8D}) ||
+ !ff_guidcmp(g, (const ff_asf_guid){0xA1,0xC3,0xD2,0xC2,0x7E,0x9A,0xDA,0x11,0x8B,0xF7,0x00,0x07,0xE9,0x5E,0xAD,0x8D})) {
+ //ignore known guids
+ } else
+ av_log(s, AV_LOG_WARNING, "unsupported chunk:"FF_PRI_GUID"\n", FF_ARG_GUID(g));
+
+ avio_skip(pb, WTV_PAD8(len) - consumed);
+ }
+ return AVERROR_EOF;
+}
+
+/* declare utf16le strings */
+#define _ , 0,
+static const uint8_t timeline_le16[] =
+ {'t'_'i'_'m'_'e'_'l'_'i'_'n'_'e', 0};
+static const uint8_t table_0_entries_legacy_attrib_le16[] =
+ {'t'_'a'_'b'_'l'_'e'_'.'_'0'_'.'_'e'_'n'_'t'_'r'_'i'_'e'_'s'_'.'_'l'_'e'_'g'_'a'_'c'_'y'_'_'_'a'_'t'_'t'_'r'_'i'_'b', 0};
+static const uint8_t table_0_entries_time_le16[] =
+ {'t'_'a'_'b'_'l'_'e'_'.'_'0'_'.'_'e'_'n'_'t'_'r'_'i'_'e'_'s'_'.'_'t'_'i'_'m'_'e', 0};
+static const uint8_t timeline_table_0_entries_Events_le16[] =
+ {'t'_'i'_'m'_'e'_'l'_'i'_'n'_'e'_'.'_'t'_'a'_'b'_'l'_'e'_'.'_'0'_'.'_'e'_'n'_'t'_'r'_'i'_'e'_'s'_'.'_'E'_'v'_'e'_'n'_'t'_'s', 0};
+#undef _
+
+static int read_header(AVFormatContext *s, AVFormatParameters *ap)
+{
+ WtvContext *wtv = s->priv_data;
+ int root_sector, root_size;
+ uint8_t root[WTV_SECTOR_SIZE];
+ AVIOContext *pb;
+ int64_t timeline_pos;
+ int ret;
+
+ wtv->epoch =
+ wtv->pts =
+ wtv->last_valid_pts = AV_NOPTS_VALUE;
+
+ /* read root directory sector */
+ avio_skip(s->pb, 0x30);
+ root_size = avio_rl32(s->pb);
+ if (root_size > sizeof(root)) {
+ av_log(s, AV_LOG_ERROR, "root directory size exceeds sector size\n");
+ return AVERROR_INVALIDDATA;
+ }
+ avio_skip(s->pb, 4);
+ root_sector = avio_rl32(s->pb);
+
+ avio_seek(s->pb, root_sector << WTV_SECTOR_BITS, SEEK_SET);
+ root_size = avio_read(s->pb, root, root_size);
+ if (root_size < 0)
+ return AVERROR_INVALIDDATA;
+
+ /* parse chunks up until first data chunk */
+ wtv->pb = wtvfile_open(s, root, root_size, timeline_le16);
+ if (!wtv->pb) {
+ av_log(s, AV_LOG_ERROR, "timeline data missing\n");
+ return AVERROR_INVALIDDATA;
+ }
+
+ ret = parse_chunks(s, SEEK_TO_DATA, 0, 0);
+ if (ret < 0)
+ return ret;
+ avio_seek(wtv->pb, -32, SEEK_CUR);
+
+ timeline_pos = avio_tell(s->pb); // save before opening another file
+
+ /* read metadata */
+ pb = wtvfile_open(s, root, root_size, table_0_entries_legacy_attrib_le16);
+ if (pb) {
+ parse_legacy_attrib(s, pb);
+ wtvfile_close(pb);
+ }
+
+ /* read seek index */
+ if (s->nb_streams) {
+ AVStream *st = s->streams[0];
+ pb = wtvfile_open(s, root, root_size, table_0_entries_time_le16);
+ if (pb) {
+ while(1) {
+ uint64_t timestamp = avio_rl64(pb);
+ uint64_t frame_nb = avio_rl64(pb);
+ if (url_feof(pb))
+ break;
+ ff_add_index_entry(&wtv->index_entries, &wtv->nb_index_entries, &wtv->index_entries_allocated_size,
+ 0, timestamp, frame_nb, 0, AVINDEX_KEYFRAME);
+ }
+ wtvfile_close(pb);
+
+ if (wtv->nb_index_entries) {
+ pb = wtvfile_open(s, root, root_size, timeline_table_0_entries_Events_le16);
+ if (pb) {
+ int i;
+ while (1) {
+ uint64_t frame_nb = avio_rl64(pb);
+ uint64_t position = avio_rl64(pb);
+ if (url_feof(pb))
+ break;
+ for (i = wtv->nb_index_entries - 1; i >= 0; i--) {
+ AVIndexEntry *e = wtv->index_entries + i;
+ if (frame_nb > e->size)
+ break;
+ if (position > e->pos)
+ e->pos = position;
+ }
+ }
+ wtvfile_close(pb);
+ st->duration = wtv->index_entries[wtv->nb_index_entries - 1].timestamp;
+ }
+ }
+ }
+ }
+
+ avio_seek(s->pb, timeline_pos, SEEK_SET);
+ return 0;
+}
+
+static int read_packet(AVFormatContext *s, AVPacket *pkt)
+{
+ WtvContext *wtv = s->priv_data;
+ AVIOContext *pb = wtv->pb;
+ int stream_index, len, ret;
+
+ stream_index = parse_chunks(s, SEEK_TO_DATA, 0, &len);
+ if (stream_index < 0)
+ return stream_index;
+
+ ret = av_get_packet(pb, pkt, len - 32);
+ if (ret < 0)
+ return ret;
+ pkt->stream_index = stream_index;
+ pkt->pts = wtv->pts;
+ avio_skip(pb, WTV_PAD8(len) - len);
+ return 0;
+}
+
+static int read_seek(AVFormatContext *s, int stream_index,
+ int64_t ts, int flags)
+{
+ WtvContext *wtv = s->priv_data;
+ AVIOContext *pb = wtv->pb;
+ AVStream *st = s->streams[0];
+ int64_t ts_relative;
+ int i;
+
+ if ((flags & AVSEEK_FLAG_FRAME) || (flags & AVSEEK_FLAG_BYTE))
+ return AVERROR(ENOSYS);
+
+ /* timestamp adjustment is required because wtv->pts values are absolute,
+ * whereas AVIndexEntry->timestamp values are relative to epoch. */
+ ts_relative = ts;
+ if (wtv->epoch != AV_NOPTS_VALUE)
+ ts_relative -= wtv->epoch;
+
+ i = ff_index_search_timestamp(wtv->index_entries, wtv->nb_index_entries, ts_relative, flags);
+ if (i < 0) {
+ if (wtv->last_valid_pts == AV_NOPTS_VALUE || ts < wtv->last_valid_pts)
+ avio_seek(pb, 0, SEEK_SET);
+ else if (st->duration != AV_NOPTS_VALUE && ts_relative > st->duration && wtv->nb_index_entries)
+ avio_seek(pb, wtv->index_entries[wtv->nb_index_entries - 1].pos, SEEK_SET);
+ if (parse_chunks(s, SEEK_TO_PTS, ts, 0) < 0)
+ return AVERROR(ERANGE);
+ return 0;
+ }
+ wtv->pts = wtv->index_entries[i].timestamp;
+ if (wtv->epoch != AV_NOPTS_VALUE)
+ wtv->pts += wtv->epoch;
+ wtv->last_valid_pts = wtv->pts;
+ avio_seek(pb, wtv->index_entries[i].pos, SEEK_SET);
+ return 0;
+}
+
+static int read_close(AVFormatContext *s)
+{
+ WtvContext *wtv = s->priv_data;
+ wtvfile_close(wtv->pb);
+ return 0;
+}
+
+AVInputFormat ff_wtv_demuxer = {
+ .name = "wtv",
+ .long_name = NULL_IF_CONFIG_SMALL("Windows Television (WTV)"),
+ .priv_data_size = sizeof(WtvContext),
+ .read_probe = read_probe,
+ .read_header = read_header,
+ .read_packet = read_packet,
+ .read_seek = read_seek,
+ .read_close = read_close,
+ .flags = AVFMT_SHOW_IDS,
+};
diff --git a/libavutil/Makefile b/libavutil/Makefile
index baee496e7c..6fd261489e 100644
--- a/libavutil/Makefile
+++ b/libavutil/Makefile
@@ -76,7 +76,7 @@ OBJS-$(ARCH_ARM) += arm/cpu.o
OBJS-$(ARCH_PPC) += ppc/cpu.o
OBJS-$(ARCH_X86) += x86/cpu.o
-TESTPROGS = adler32 aes base64 cpu crc des lls md5 pca sha softfloat tree
+TESTPROGS = adler32 aes base64 cpu crc des lls md5 pca sha tree
TESTPROGS-$(HAVE_LZO1X_999_COMPRESS) += lzo
DIRS = arm bfin sh4 x86
diff --git a/libavutil/avutil.h b/libavutil/avutil.h
index 5d712e0db2..3f2c839e3e 100644
--- a/libavutil/avutil.h
+++ b/libavutil/avutil.h
@@ -40,8 +40,8 @@
#define AV_VERSION(a, b, c) AV_VERSION_DOT(a, b, c)
#define LIBAVUTIL_VERSION_MAJOR 50
-#define LIBAVUTIL_VERSION_MINOR 40
-#define LIBAVUTIL_VERSION_MICRO 1
+#define LIBAVUTIL_VERSION_MINOR 41
+#define LIBAVUTIL_VERSION_MICRO 0
#define LIBAVUTIL_VERSION_INT AV_VERSION_INT(LIBAVUTIL_VERSION_MAJOR, \
LIBAVUTIL_VERSION_MINOR, \
diff --git a/libavutil/common.h b/libavutil/common.h
index 5814297cf2..1cd2de2909 100644
--- a/libavutil/common.h
+++ b/libavutil/common.h
@@ -47,6 +47,8 @@
#define RSHIFT(a,b) ((a) > 0 ? ((a) + ((1<<(b))>>1))>>(b) : ((a) + ((1<<(b))>>1)-1)>>(b))
/* assume b>0 */
#define ROUNDED_DIV(a,b) (((a)>0 ? (a) + ((b)>>1) : (a) - ((b)>>1))/(b))
+#define FFUDIV(a,b) (((a)>0 ?(a):(a)-(b)+1) / (b))
+#define FFUMOD(a,b) ((a)-(b)*FFUDIV(a,b))
#define FFABS(a) ((a) >= 0 ? (a) : (-(a)))
#define FFSIGN(a) ((a) > 0 ? 1 : -1)
@@ -170,6 +172,18 @@ static av_always_inline av_const int32_t av_clipl_int32_c(int64_t a)
}
/**
+ * Clip a signed integer to an unsigned power of two range.
+ * @param a value to clip
+ * @param p bit position to clip at
+ * @return clipped value
+ */
+static av_always_inline av_const unsigned av_clip_uintp2_c(int a, int p)
+{
+ if (a & ~((1<<p) - 1)) return -a >> 31 & ((1<<p) - 1);
+ else return a;
+}
+
+/**
* Clip a float value into the amin-amax range.
* @param a value to clip
* @param amin minimum value of the clip range
@@ -362,6 +376,9 @@ static av_always_inline av_const int av_popcount_c(uint32_t x)
#ifndef av_clipl_int32
# define av_clipl_int32 av_clipl_int32_c
#endif
+#ifndef av_clip_uintp2
+# define av_clip_uintp2 av_clip_uintp2_c
+#endif
#ifndef av_clipf
# define av_clipf av_clipf_c
#endif
diff --git a/libavutil/eval.c b/libavutil/eval.c
index 494b936c4b..98b4e0ac52 100644
--- a/libavutil/eval.c
+++ b/libavutil/eval.c
@@ -122,6 +122,7 @@ struct AVExpr {
e_mod, e_max, e_min, e_eq, e_gt, e_gte,
e_pow, e_mul, e_div, e_add,
e_last, e_st, e_while, e_floor, e_ceil, e_trunc,
+ e_sqrt,
} type;
double value; // is sign in other types
union {
@@ -148,6 +149,7 @@ static double eval_expr(Parser *p, AVExpr *e)
case e_floor: return e->value * floor(eval_expr(p, e->param[0]));
case e_ceil : return e->value * ceil (eval_expr(p, e->param[0]));
case e_trunc: return e->value * trunc(eval_expr(p, e->param[0]));
+ case e_sqrt: return e->value * sqrt (eval_expr(p, e->param[0]));
case e_while: {
double d = NAN;
while (eval_expr(p, e->param[0]))
@@ -282,6 +284,7 @@ static int parse_primary(AVExpr **e, Parser *p)
else if (strmatch(next, "floor" )) d->type = e_floor;
else if (strmatch(next, "ceil" )) d->type = e_ceil;
else if (strmatch(next, "trunc" )) d->type = e_trunc;
+ else if (strmatch(next, "sqrt" )) d->type = e_sqrt;
else {
for (i=0; p->func1_names && p->func1_names[i]; i++) {
if (strmatch(next, p->func1_names[i])) {
@@ -449,6 +452,7 @@ static int verify_expr(AVExpr *e)
case e_floor:
case e_ceil:
case e_trunc:
+ case e_sqrt:
return verify_expr(e->param[0]);
default: return verify_expr(e->param[0]) && verify_expr(e->param[1]);
}
@@ -628,6 +632,8 @@ int main(void)
"trunc(-123.123)",
"ceil(123.123)",
"ceil(-123.123)",
+ "sqrt(1764)",
+ "sqrt(-1)",
NULL
};
diff --git a/libavutil/fifo.c b/libavutil/fifo.c
index cfb716e53b..d10198972d 100644
--- a/libavutil/fifo.c
+++ b/libavutil/fifo.c
@@ -38,7 +38,7 @@ AVFifoBuffer *av_fifo_alloc(unsigned int size)
void av_fifo_free(AVFifoBuffer *f)
{
if(f){
- av_free(f->buffer);
+ av_freep(&f->buffer);
av_free(f);
}
}
diff --git a/libavutil/file.c b/libavutil/file.c
index c79f68aab3..0704080c5b 100644
--- a/libavutil/file.c
+++ b/libavutil/file.c
@@ -76,7 +76,7 @@ int av_file_map(const char *filename, uint8_t **bufptr, size_t *size,
#if HAVE_MMAP
ptr = mmap(NULL, *size, PROT_READ|PROT_WRITE, MAP_PRIVATE, fd, 0);
- if ((int)(ptr) == -1) {
+ if (ptr == MAP_FAILED) {
err = AVERROR(errno);
av_strerror(err, errbuf, sizeof(errbuf));
av_log(&file_log_ctx, AV_LOG_ERROR, "Error occurred in mmap(): %s\n", errbuf);
diff --git a/libavutil/internal.h b/libavutil/internal.h
index 0bd842f4c6..b5cc2a6cd1 100644
--- a/libavutil/internal.h
+++ b/libavutil/internal.h
@@ -137,6 +137,8 @@
#define sprintf sprintf_is_forbidden_due_to_security_issues_use_snprintf
#undef strcat
#define strcat strcat_is_forbidden_due_to_security_issues_use_av_strlcat
+#undef strncpy
+#define strncpy strncpy_is_forbidden_due_to_security_issues_use_av_strlcpy
#undef exit
#define exit exit_is_forbidden
#ifndef LIBAVFORMAT_BUILD
diff --git a/libavutil/log.c b/libavutil/log.c
index abf939d65e..4e9cfe9c01 100644
--- a/libavutil/log.c
+++ b/libavutil/log.c
@@ -114,7 +114,7 @@ void av_log_default_callback(void* ptr, int level, const char* fmt, va_list vl)
vsnprintf(line + strlen(line), sizeof(line) - strlen(line), fmt, vl);
- print_prefix= line[strlen(line)-1] == '\n';
+ print_prefix = strlen(line) && line[strlen(line)-1] == '\n';
#if HAVE_ISATTY
if(!is_atty) is_atty= isatty(2) ? 1 : -1;
diff --git a/libavutil/log.h b/libavutil/log.h
index 0bcf2c4982..c87125d2d5 100644
--- a/libavutil/log.h
+++ b/libavutil/log.h
@@ -143,7 +143,7 @@ const char* av_default_item_name(void* ctx);
#ifdef DEBUG
# define av_dlog(pctx, ...) av_log(pctx, AV_LOG_DEBUG, __VA_ARGS__)
#else
-# define av_dlog(pctx, ...)
+# define av_dlog(pctx, ...) do { if (0) av_log(pctx, AV_LOG_DEBUG, __VA_ARGS__); } while (0)
#endif
/**
diff --git a/libavutil/mathematics.c b/libavutil/mathematics.c
index c6851cb755..cfe8fbc9eb 100644
--- a/libavutil/mathematics.c
+++ b/libavutil/mathematics.c
@@ -27,6 +27,7 @@
#include <stdint.h>
#include <limits.h>
#include "mathematics.h"
+#include "libavutil/common.h"
const uint8_t ff_sqrt_tab[256]={
0, 16, 23, 28, 32, 36, 40, 43, 46, 48, 51, 54, 56, 58, 60, 62, 64, 66, 68, 70, 72, 74, 76, 77, 79, 80, 82, 84, 85, 87, 88, 90,
@@ -139,6 +140,8 @@ int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq){
int av_compare_ts(int64_t ts_a, AVRational tb_a, int64_t ts_b, AVRational tb_b){
int64_t a= tb_a.num * (int64_t)tb_b.den;
int64_t b= tb_b.num * (int64_t)tb_a.den;
+ if((FFABS(ts_a)|a|FFABS(ts_b)|b)<=INT_MAX)
+ return (ts_a*a > ts_b*b) - (ts_a*a < ts_b*b);
if (av_rescale_rnd(ts_a, a, b, AV_ROUND_DOWN) < ts_b) return -1;
if (av_rescale_rnd(ts_b, b, a, AV_ROUND_DOWN) < ts_a) return 1;
return 0;
diff --git a/libavutil/mem.c b/libavutil/mem.c
index de3a3635e7..de77e73ddb 100644
--- a/libavutil/mem.c
+++ b/libavutil/mem.c
@@ -24,6 +24,8 @@
* default memory allocator for libavutil
*/
+#define _XOPEN_SOURCE 600
+
#include "config.h"
#include <limits.h>
@@ -57,6 +59,8 @@ void free(void *ptr);
#endif /* MALLOC_PREFIX */
+#define ALIGN (HAVE_AVX ? 32 : 16)
+
/* You can redefine av_malloc and av_free in your project to use your
memory allocator. You do not need to suppress this file because the
linker will do it automatically. */
@@ -71,21 +75,19 @@ void *av_malloc(FF_INTERNAL_MEM_TYPE size)
/* let's disallow possible ambiguous cases */
if(size > (INT_MAX-32) )
return NULL;
- else if(!size)
- size= 1;
#if CONFIG_MEMALIGN_HACK
- ptr = malloc(size+32);
+ ptr = malloc(size+ALIGN);
if(!ptr)
return ptr;
- diff= ((-(long)ptr - 1)&31) + 1;
+ diff= ((-(long)ptr - 1)&(ALIGN-1)) + 1;
ptr = (char*)ptr + diff;
((char*)ptr)[-1]= diff;
#elif HAVE_POSIX_MEMALIGN
- if (posix_memalign(&ptr,32,size))
+ if (posix_memalign(&ptr,ALIGN,size))
ptr = NULL;
#elif HAVE_MEMALIGN
- ptr = memalign(32,size);
+ ptr = memalign(ALIGN,size);
/* Why 64?
Indeed, we should align it:
on 4 for 386
@@ -113,6 +115,8 @@ void *av_malloc(FF_INTERNAL_MEM_TYPE size)
#else
ptr = malloc(size);
#endif
+ if(!ptr && !size)
+ ptr= av_malloc(1);
return ptr;
}
@@ -132,7 +136,7 @@ void *av_realloc(void *ptr, FF_INTERNAL_MEM_TYPE size)
diff= ((char*)ptr)[-1];
return (char*)realloc((char*)ptr - diff, size + diff) + diff;
#else
- return realloc(ptr, size);
+ return realloc(ptr, size + !size);
#endif
}
@@ -173,3 +177,23 @@ char *av_strdup(const char *s)
return ptr;
}
+/* add one element to a dynamic array */
+void av_dynarray_add(void *tab_ptr, int *nb_ptr, void *elem)
+{
+ /* see similar ffmpeg.c:grow_array() */
+ int nb, nb_alloc;
+ intptr_t *tab;
+
+ nb = *nb_ptr;
+ tab = *(intptr_t**)tab_ptr;
+ if ((nb & (nb - 1)) == 0) {
+ if (nb == 0)
+ nb_alloc = 1;
+ else
+ nb_alloc = nb * 2;
+ tab = av_realloc(tab, nb_alloc * sizeof(intptr_t));
+ *(intptr_t**)tab_ptr = tab;
+ }
+ tab[nb++] = (intptr_t)elem;
+ *nb_ptr = nb;
+}
diff --git a/libavutil/mem.h b/libavutil/mem.h
index 2bfbd0eac5..b4059dc32c 100644
--- a/libavutil/mem.h
+++ b/libavutil/mem.h
@@ -131,4 +131,13 @@ char *av_strdup(const char *s) av_malloc_attrib;
*/
void av_freep(void *ptr);
+/**
+ * Add an element to a dynamic array.
+ *
+ * @param tab_ptr Pointer to the array.
+ * @param nb_ptr Pointer to the number of elements in the array.
+ * @param elem Element to be added.
+ */
+void av_dynarray_add(void *tab_ptr, int *nb_ptr, void *elem);
+
#endif /* AVUTIL_MEM_H */
diff --git a/libavutil/opt.c b/libavutil/opt.c
index e6cf34081c..720ad99e8e 100644
--- a/libavutil/opt.c
+++ b/libavutil/opt.c
@@ -33,10 +33,9 @@
//FIXME order them and do a bin search
const AVOption *av_find_opt(void *v, const char *name, const char *unit, int mask, int flags)
{
- AVClass *c= *(AVClass**)v; //FIXME silly way of storing AVClass
- const AVOption *o= c->option;
+ const AVOption *o = NULL;
- for (; o && o->name; o++) {
+ while ((o = av_next_option(v, o))) {
if (!strcmp(o->name, name) && (!unit || (o->unit && !strcmp(o->unit, unit))) && (o->flags & mask) == flags)
return o;
}
@@ -120,7 +119,7 @@ int av_set_string3(void *obj, const char *name, const char *val, int alloc, cons
*o_out = o;
if (!o)
return AVERROR_OPTION_NOT_FOUND;
- if (!val || o->offset<=0)
+ if ((!val && o->type != FF_OPT_TYPE_STRING) || o->offset<=0)
return AVERROR(EINVAL);
if (o->type == FF_OPT_TYPE_BINARY) {
@@ -164,8 +163,8 @@ int av_set_string3(void *obj, const char *name, const char *val, int alloc, cons
{
const AVOption *o_named= av_find_opt(obj, buf, o->unit, 0, 0);
if (o_named && o_named->type == FF_OPT_TYPE_CONST)
- d= o_named->default_val;
- else if (!strcmp(buf, "default")) d= o->default_val;
+ d= o_named->default_val.dbl;
+ else if (!strcmp(buf, "default")) d= o->default_val.dbl;
else if (!strcmp(buf, "max" )) d= o->max;
else if (!strcmp(buf, "min" )) d= o->min;
else if (!strcmp(buf, "none" )) d= 0;
@@ -417,25 +416,25 @@ void av_opt_set_defaults2(void *s, int mask, int flags)
case FF_OPT_TYPE_FLAGS:
case FF_OPT_TYPE_INT: {
int val;
- val = opt->default_val;
+ val = opt->default_val.dbl;
av_set_int(s, opt->name, val);
}
break;
case FF_OPT_TYPE_INT64:
- if ((double)(opt->default_val+0.6) == opt->default_val)
+ if ((double)(opt->default_val.dbl+0.6) == opt->default_val.dbl)
av_log(s, AV_LOG_DEBUG, "loss of precision in default of %s\n", opt->name);
- av_set_int(s, opt->name, opt->default_val);
+ av_set_int(s, opt->name, opt->default_val.dbl);
break;
case FF_OPT_TYPE_DOUBLE:
case FF_OPT_TYPE_FLOAT: {
double val;
- val = opt->default_val;
+ val = opt->default_val.dbl;
av_set_double(s, opt->name, val);
}
break;
case FF_OPT_TYPE_RATIONAL: {
AVRational val;
- val = av_d2q(opt->default_val, INT_MAX);
+ val = av_d2q(opt->default_val.dbl, INT_MAX);
av_set_q(s, opt->name, val);
}
break;
diff --git a/libavutil/opt.h b/libavutil/opt.h
index 67c2dd7c12..b04c7905d6 100644
--- a/libavutil/opt.h
+++ b/libavutil/opt.h
@@ -64,69 +64,23 @@ typedef struct AVOption {
/**
* the default value for scalar options
*/
- double default_val;
- double min; ///< minimum valid value for the option
- double max; ///< maximum valid value for the option
-
- int flags;
-#define AV_OPT_FLAG_ENCODING_PARAM 1 ///< a generic parameter which can be set by the user for muxing or encoding
-#define AV_OPT_FLAG_DECODING_PARAM 2 ///< a generic parameter which can be set by the user for demuxing or decoding
-#define AV_OPT_FLAG_METADATA 4 ///< some data extracted or inserted into the file like title, comment, ...
-#define AV_OPT_FLAG_AUDIO_PARAM 8
-#define AV_OPT_FLAG_VIDEO_PARAM 16
-#define AV_OPT_FLAG_SUBTITLE_PARAM 32
-//FIXME think about enc-audio, ... style flags
-
- /**
- * The logical unit to which the option belongs. Non-constant
- * options and corresponding named constants share the same
- * unit. May be NULL.
- */
- const char *unit;
-} AVOption;
-
-/**
- * AVOption2.
- * THIS IS NOT PART OF THE API/ABI YET!
- * This is identical to AVOption except that default_val was replaced by
- * an union, it should be compatible with AVOption on normal platforms.
- */
-typedef struct AVOption2 {
- const char *name;
-
- /**
- * short English help text
- * @todo What about other languages?
- */
- const char *help;
-
- /**
- * The offset relative to the context structure where the option
- * value is stored. It should be 0 for named constants.
- */
- int offset;
- enum AVOptionType type;
-
- /**
- * the default value for scalar options
- */
union {
double dbl;
const char *str;
+ /* TODO those are unused now */
+ int64_t i64;
+ AVRational q;
} default_val;
-
double min; ///< minimum valid value for the option
double max; ///< maximum valid value for the option
int flags;
-/*
#define AV_OPT_FLAG_ENCODING_PARAM 1 ///< a generic parameter which can be set by the user for muxing or encoding
#define AV_OPT_FLAG_DECODING_PARAM 2 ///< a generic parameter which can be set by the user for demuxing or decoding
#define AV_OPT_FLAG_METADATA 4 ///< some data extracted or inserted into the file like title, comment, ...
#define AV_OPT_FLAG_AUDIO_PARAM 8
#define AV_OPT_FLAG_VIDEO_PARAM 16
#define AV_OPT_FLAG_SUBTITLE_PARAM 32
-*/
//FIXME think about enc-audio, ... style flags
/**
@@ -135,8 +89,7 @@ typedef struct AVOption2 {
* unit. May be NULL.
*/
const char *unit;
-} AVOption2;
-
+} AVOption;
/**
* Look for an option in obj. Look only for the options which
diff --git a/libavutil/pixdesc.c b/libavutil/pixdesc.c
index dae6dc264b..f8f99289f9 100644
--- a/libavutil/pixdesc.c
+++ b/libavutil/pixdesc.c
@@ -809,6 +809,29 @@ const AVPixFmtDescriptor av_pix_fmt_descriptors[PIX_FMT_NB] = {
},
.flags = PIX_FMT_BE,
},
+ [PIX_FMT_YUV422P10LE] = {
+ .name = "yuv422p10le",
+ .nb_components= 3,
+ .log2_chroma_w= 1,
+ .log2_chroma_h= 0,
+ .comp = {
+ {0,1,1,0,9}, /* Y */
+ {1,1,1,0,9}, /* U */
+ {2,1,1,0,9}, /* V */
+ },
+ },
+ [PIX_FMT_YUV422P10BE] = {
+ .name = "yuv422p10be",
+ .nb_components= 3,
+ .log2_chroma_w= 1,
+ .log2_chroma_h= 0,
+ .comp = {
+ {0,1,1,0,9}, /* Y */
+ {1,1,1,0,9}, /* U */
+ {2,1,1,0,9}, /* V */
+ },
+ .flags = PIX_FMT_BE,
+ },
[PIX_FMT_YUV422P16LE] = {
.name = "yuv422p16le",
.nb_components= 3,
diff --git a/libavutil/pixfmt.h b/libavutil/pixfmt.h
index f781967a7d..95972f9373 100644
--- a/libavutil/pixfmt.h
+++ b/libavutil/pixfmt.h
@@ -136,11 +136,13 @@ enum PixelFormat {
PIX_FMT_BGR48BE, ///< packed RGB 16:16:16, 48bpp, 16B, 16G, 16R, the 2-byte value for each R/G/B component is stored as big-endian
PIX_FMT_BGR48LE, ///< packed RGB 16:16:16, 48bpp, 16B, 16G, 16R, the 2-byte value for each R/G/B component is stored as little-endian
- //the following 4 formats are deprecated and should be replaced by PIX_FMT_YUV420P16* with the bpp stored seperately
+ //the following 6 formats are deprecated and should be replaced by PIX_FMT_YUV420P16* with the bpp stored seperately
PIX_FMT_YUV420P9BE, ///< planar YUV 4:2:0, 13.5bpp, (1 Cr & Cb sample per 2x2 Y samples), big-endian
PIX_FMT_YUV420P9LE, ///< planar YUV 4:2:0, 13.5bpp, (1 Cr & Cb sample per 2x2 Y samples), little-endian
PIX_FMT_YUV420P10BE,///< planar YUV 4:2:0, 15bpp, (1 Cr & Cb sample per 2x2 Y samples), big-endian
PIX_FMT_YUV420P10LE,///< planar YUV 4:2:0, 15bpp, (1 Cr & Cb sample per 2x2 Y samples), little-endian
+ PIX_FMT_YUV422P10BE,///< planar YUV 4:2:2, 20bpp, (1 Cr & Cb sample per 2x1 Y samples), big-endian
+ PIX_FMT_YUV422P10LE,///< planar YUV 4:2:2, 20bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian
PIX_FMT_NB, ///< number of pixel formats, DO NOT USE THIS if you want to link with shared libav* because the number of formats might differ between versions
};
@@ -170,6 +172,7 @@ enum PixelFormat {
#define PIX_FMT_YUV420P9 PIX_FMT_NE(YUV420P9BE , YUV420P9LE)
#define PIX_FMT_YUV420P10 PIX_FMT_NE(YUV420P10BE, YUV420P10LE)
+#define PIX_FMT_YUV422P10 PIX_FMT_NE(YUV422P10BE, YUV422P10LE)
#define PIX_FMT_YUV420P16 PIX_FMT_NE(YUV420P16BE, YUV420P16LE)
#define PIX_FMT_YUV422P16 PIX_FMT_NE(YUV422P16BE, YUV422P16LE)
#define PIX_FMT_YUV444P16 PIX_FMT_NE(YUV444P16BE, YUV444P16LE)
diff --git a/libavutil/ppc/cpu.c b/libavutil/ppc/cpu.c
index 8db27391c0..fc38be6f65 100644
--- a/libavutil/ppc/cpu.c
+++ b/libavutil/ppc/cpu.c
@@ -17,10 +17,8 @@
*/
#ifdef __APPLE__
-#undef _POSIX_C_SOURCE
#include <sys/sysctl.h>
#elif defined(__OpenBSD__)
-#undef _POSIX_C_SOURCE
#include <sys/param.h>
#include <sys/sysctl.h>
#include <machine/cpu.h>
diff --git a/libpostproc/postprocess.c b/libpostproc/postprocess.c
index 02d7eae3f1..03e5f194d7 100644
--- a/libpostproc/postprocess.c
+++ b/libpostproc/postprocess.c
@@ -86,6 +86,7 @@ try to unroll inner for(x=0 ... loop to avoid these damn if(x ... checks
//#define DEBUG_BRIGHTNESS
#include "postprocess.h"
#include "postprocess_internal.h"
+#include "libavutil/avstring.h"
unsigned postproc_version(void)
{
@@ -766,7 +767,7 @@ pp_mode *pp_get_mode_by_name_and_quality(const char *name, int quality)
ppMode->maxClippedThreshold= 0.01;
ppMode->error=0;
- strncpy(temp, name, GET_MODE_BUFFER_SIZE);
+ av_strlcpy(temp, name, GET_MODE_BUFFER_SIZE);
av_log(NULL, AV_LOG_DEBUG, "pp: %s\n", name);
@@ -1103,4 +1104,3 @@ void pp_postprocess(const uint8_t * src[3], const int srcStride[3],
}
}
}
-
diff --git a/libswscale/options.c b/libswscale/options.c
index 5817d3faae..d3cd0a3190 100644
--- a/libswscale/options.c
+++ b/libswscale/options.c
@@ -34,40 +34,40 @@ static const char * sws_context_to_name(void * ptr)
#define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
static const AVOption options[] = {
- { "sws_flags", "scaler/cpu flags", OFFSET(flags), FF_OPT_TYPE_FLAGS, DEFAULT, 0, UINT_MAX, VE, "sws_flags" },
- { "fast_bilinear", "fast bilinear", 0, FF_OPT_TYPE_CONST, SWS_FAST_BILINEAR, INT_MIN, INT_MAX, VE, "sws_flags" },
- { "bilinear", "bilinear", 0, FF_OPT_TYPE_CONST, SWS_BILINEAR, INT_MIN, INT_MAX, VE, "sws_flags" },
- { "bicubic", "bicubic", 0, FF_OPT_TYPE_CONST, SWS_BICUBIC, INT_MIN, INT_MAX, VE, "sws_flags" },
- { "experimental", "experimental", 0, FF_OPT_TYPE_CONST, SWS_X, INT_MIN, INT_MAX, VE, "sws_flags" },
- { "neighbor", "nearest neighbor", 0, FF_OPT_TYPE_CONST, SWS_POINT, INT_MIN, INT_MAX, VE, "sws_flags" },
- { "area", "averaging area", 0, FF_OPT_TYPE_CONST, SWS_AREA, INT_MIN, INT_MAX, VE, "sws_flags" },
- { "bicublin", "luma bicubic, chroma bilinear", 0, FF_OPT_TYPE_CONST, SWS_BICUBLIN, INT_MIN, INT_MAX, VE, "sws_flags" },
- { "gauss", "gaussian", 0, FF_OPT_TYPE_CONST, SWS_GAUSS, INT_MIN, INT_MAX, VE, "sws_flags" },
- { "sinc", "sinc", 0, FF_OPT_TYPE_CONST, SWS_SINC, INT_MIN, INT_MAX, VE, "sws_flags" },
- { "lanczos", "lanczos", 0, FF_OPT_TYPE_CONST, SWS_LANCZOS, INT_MIN, INT_MAX, VE, "sws_flags" },
- { "spline", "natural bicubic spline", 0, FF_OPT_TYPE_CONST, SWS_SPLINE, INT_MIN, INT_MAX, VE, "sws_flags" },
- { "print_info", "print info", 0, FF_OPT_TYPE_CONST, SWS_PRINT_INFO, INT_MIN, INT_MAX, VE, "sws_flags" },
- { "accurate_rnd", "accurate rounding", 0, FF_OPT_TYPE_CONST, SWS_ACCURATE_RND, INT_MIN, INT_MAX, VE, "sws_flags" },
- { "mmx", "MMX SIMD acceleration", 0, FF_OPT_TYPE_CONST, SWS_CPU_CAPS_MMX, INT_MIN, INT_MAX, VE, "sws_flags" },
- { "mmx2", "MMX2 SIMD acceleration", 0, FF_OPT_TYPE_CONST, SWS_CPU_CAPS_MMX2, INT_MIN, INT_MAX, VE, "sws_flags" },
- { "sse2", "SSE2 SIMD acceleration", 0, FF_OPT_TYPE_CONST, SWS_CPU_CAPS_SSE2, INT_MIN, INT_MAX, VE, "sws_flags" },
- { "3dnow", "3DNOW SIMD acceleration", 0, FF_OPT_TYPE_CONST, SWS_CPU_CAPS_3DNOW, INT_MIN, INT_MAX, VE, "sws_flags" },
- { "altivec", "AltiVec SIMD acceleration", 0, FF_OPT_TYPE_CONST, SWS_CPU_CAPS_ALTIVEC, INT_MIN, INT_MAX, VE, "sws_flags" },
- { "bfin", "Blackfin SIMD acceleration", 0, FF_OPT_TYPE_CONST, SWS_CPU_CAPS_BFIN, INT_MIN, INT_MAX, VE, "sws_flags" },
- { "full_chroma_int", "full chroma interpolation", 0 , FF_OPT_TYPE_CONST, SWS_FULL_CHR_H_INT, INT_MIN, INT_MAX, VE, "sws_flags" },
- { "full_chroma_inp", "full chroma input", 0 , FF_OPT_TYPE_CONST, SWS_FULL_CHR_H_INP, INT_MIN, INT_MAX, VE, "sws_flags" },
- { "bitexact", "", 0 , FF_OPT_TYPE_CONST, SWS_BITEXACT, INT_MIN, INT_MAX, VE, "sws_flags" },
+ { "sws_flags", "scaler/cpu flags", OFFSET(flags), FF_OPT_TYPE_FLAGS, {.dbl = DEFAULT }, 0, UINT_MAX, VE, "sws_flags" },
+ { "fast_bilinear", "fast bilinear", 0, FF_OPT_TYPE_CONST, {.dbl = SWS_FAST_BILINEAR }, INT_MIN, INT_MAX, VE, "sws_flags" },
+ { "bilinear", "bilinear", 0, FF_OPT_TYPE_CONST, {.dbl = SWS_BILINEAR }, INT_MIN, INT_MAX, VE, "sws_flags" },
+ { "bicubic", "bicubic", 0, FF_OPT_TYPE_CONST, {.dbl = SWS_BICUBIC }, INT_MIN, INT_MAX, VE, "sws_flags" },
+ { "experimental", "experimental", 0, FF_OPT_TYPE_CONST, {.dbl = SWS_X }, INT_MIN, INT_MAX, VE, "sws_flags" },
+ { "neighbor", "nearest neighbor", 0, FF_OPT_TYPE_CONST, {.dbl = SWS_POINT }, INT_MIN, INT_MAX, VE, "sws_flags" },
+ { "area", "averaging area", 0, FF_OPT_TYPE_CONST, {.dbl = SWS_AREA }, INT_MIN, INT_MAX, VE, "sws_flags" },
+ { "bicublin", "luma bicubic, chroma bilinear", 0, FF_OPT_TYPE_CONST, {.dbl = SWS_BICUBLIN }, INT_MIN, INT_MAX, VE, "sws_flags" },
+ { "gauss", "gaussian", 0, FF_OPT_TYPE_CONST, {.dbl = SWS_GAUSS }, INT_MIN, INT_MAX, VE, "sws_flags" },
+ { "sinc", "sinc", 0, FF_OPT_TYPE_CONST, {.dbl = SWS_SINC }, INT_MIN, INT_MAX, VE, "sws_flags" },
+ { "lanczos", "lanczos", 0, FF_OPT_TYPE_CONST, {.dbl = SWS_LANCZOS }, INT_MIN, INT_MAX, VE, "sws_flags" },
+ { "spline", "natural bicubic spline", 0, FF_OPT_TYPE_CONST, {.dbl = SWS_SPLINE }, INT_MIN, INT_MAX, VE, "sws_flags" },
+ { "print_info", "print info", 0, FF_OPT_TYPE_CONST, {.dbl = SWS_PRINT_INFO }, INT_MIN, INT_MAX, VE, "sws_flags" },
+ { "accurate_rnd", "accurate rounding", 0, FF_OPT_TYPE_CONST, {.dbl = SWS_ACCURATE_RND }, INT_MIN, INT_MAX, VE, "sws_flags" },
+ { "mmx", "MMX SIMD acceleration", 0, FF_OPT_TYPE_CONST, {.dbl = SWS_CPU_CAPS_MMX }, INT_MIN, INT_MAX, VE, "sws_flags" },
+ { "mmx2", "MMX2 SIMD acceleration", 0, FF_OPT_TYPE_CONST, {.dbl = SWS_CPU_CAPS_MMX2 }, INT_MIN, INT_MAX, VE, "sws_flags" },
+ { "sse2", "SSE2 SIMD acceleration", 0, FF_OPT_TYPE_CONST, {.dbl = SWS_CPU_CAPS_SSE2 }, INT_MIN, INT_MAX, VE, "sws_flags" },
+ { "3dnow", "3DNOW SIMD acceleration", 0, FF_OPT_TYPE_CONST, {.dbl = SWS_CPU_CAPS_3DNOW }, INT_MIN, INT_MAX, VE, "sws_flags" },
+ { "altivec", "AltiVec SIMD acceleration", 0, FF_OPT_TYPE_CONST, {.dbl = SWS_CPU_CAPS_ALTIVEC }, INT_MIN, INT_MAX, VE, "sws_flags" },
+ { "bfin", "Blackfin SIMD acceleration", 0, FF_OPT_TYPE_CONST, {.dbl = SWS_CPU_CAPS_BFIN }, INT_MIN, INT_MAX, VE, "sws_flags" },
+ { "full_chroma_int", "full chroma interpolation", 0 , FF_OPT_TYPE_CONST, {.dbl = SWS_FULL_CHR_H_INT }, INT_MIN, INT_MAX, VE, "sws_flags" },
+ { "full_chroma_inp", "full chroma input", 0 , FF_OPT_TYPE_CONST, {.dbl = SWS_FULL_CHR_H_INP }, INT_MIN, INT_MAX, VE, "sws_flags" },
+ { "bitexact", "", 0 , FF_OPT_TYPE_CONST, {.dbl = SWS_BITEXACT }, INT_MIN, INT_MAX, VE, "sws_flags" },
- { "srcw", "source width" , OFFSET(srcW), FF_OPT_TYPE_INT, 16, 1, INT_MAX, VE },
- { "srch", "source height" , OFFSET(srcH), FF_OPT_TYPE_INT, 16, 1, INT_MAX, VE },
- { "dstw", "destination width" , OFFSET(dstW), FF_OPT_TYPE_INT, 16, 1, INT_MAX, VE },
- { "dsth", "destination height", OFFSET(dstH), FF_OPT_TYPE_INT, 16, 1, INT_MAX, VE },
- { "src_format", "source format" , OFFSET(srcFormat), FF_OPT_TYPE_INT, DEFAULT, 0, PIX_FMT_NB-1, VE },
- { "dst_format", "destination format", OFFSET(dstFormat), FF_OPT_TYPE_INT, DEFAULT, 0, PIX_FMT_NB-1, VE },
- { "src_range" , "source range" , OFFSET(srcRange) , FF_OPT_TYPE_INT, DEFAULT, 0, 1, VE },
- { "dst_range" , "destination range" , OFFSET(dstRange) , FF_OPT_TYPE_INT, DEFAULT, 0, 1, VE },
- { "param0" , "scaler param 0" , OFFSET(param[0]) , FF_OPT_TYPE_DOUBLE, SWS_PARAM_DEFAULT, INT_MIN, INT_MAX, VE },
- { "param1" , "scaler param 1" , OFFSET(param[1]) , FF_OPT_TYPE_DOUBLE, SWS_PARAM_DEFAULT, INT_MIN, INT_MAX, VE },
+ { "srcw", "source width" , OFFSET(srcW), FF_OPT_TYPE_INT, {.dbl = 16 }, 1, INT_MAX, VE },
+ { "srch", "source height" , OFFSET(srcH), FF_OPT_TYPE_INT, {.dbl = 16 }, 1, INT_MAX, VE },
+ { "dstw", "destination width" , OFFSET(dstW), FF_OPT_TYPE_INT, {.dbl = 16 }, 1, INT_MAX, VE },
+ { "dsth", "destination height", OFFSET(dstH), FF_OPT_TYPE_INT, {.dbl = 16 }, 1, INT_MAX, VE },
+ { "src_format", "source format" , OFFSET(srcFormat), FF_OPT_TYPE_INT, {.dbl = DEFAULT }, 0, PIX_FMT_NB-1, VE },
+ { "dst_format", "destination format", OFFSET(dstFormat), FF_OPT_TYPE_INT, {.dbl = DEFAULT }, 0, PIX_FMT_NB-1, VE },
+ { "src_range" , "source range" , OFFSET(srcRange) , FF_OPT_TYPE_INT, {.dbl = DEFAULT }, 0, 1, VE },
+ { "dst_range" , "destination range" , OFFSET(dstRange) , FF_OPT_TYPE_INT, {.dbl = DEFAULT }, 0, 1, VE },
+ { "param0" , "scaler param 0" , OFFSET(param[0]) , FF_OPT_TYPE_DOUBLE, {.dbl = SWS_PARAM_DEFAULT}, INT_MIN, INT_MAX, VE },
+ { "param1" , "scaler param 1" , OFFSET(param[1]) , FF_OPT_TYPE_DOUBLE, {.dbl = SWS_PARAM_DEFAULT}, INT_MIN, INT_MAX, VE },
{ NULL }
};
diff --git a/libswscale/ppc/swscale_altivec_template.c b/libswscale/ppc/swscale_altivec_template.c
index 299fe51b28..c7aa0fd2e6 100644
--- a/libswscale/ppc/swscale_altivec_template.c
+++ b/libswscale/ppc/swscale_altivec_template.c
@@ -161,12 +161,12 @@ yuv2yuvX_altivec_real(const int16_t *lumFilter, const int16_t **lumSrc, int lumF
perm = vec_lvsl(0, chrSrc[j]);
l1 = vec_ld(0, chrSrc[j]);
- l1_V = vec_ld(2048 << 1, chrSrc[j]);
+ l1_V = vec_ld(VOFW << 1, chrSrc[j]);
for (i = 0; i < (chrDstW - 7); i+=8) {
int offset = i << 2;
vector signed short l2 = vec_ld((i << 1) + 16, chrSrc[j]);
- vector signed short l2_V = vec_ld(((i + 2048) << 1) + 16, chrSrc[j]);
+ vector signed short l2_V = vec_ld(((i + VOFW) << 1) + 16, chrSrc[j]);
vector signed int v1 = vec_ld(offset, u);
vector signed int v2 = vec_ld(offset + 16, u);
@@ -174,7 +174,7 @@ yuv2yuvX_altivec_real(const int16_t *lumFilter, const int16_t **lumSrc, int lumF
vector signed int v2_V = vec_ld(offset + 16, v);
vector signed short ls = vec_perm(l1, l2, perm); // chrSrc[j][i] ... chrSrc[j][i+7]
- vector signed short ls_V = vec_perm(l1_V, l2_V, perm); // chrSrc[j][i+2048] ... chrSrc[j][i+2055]
+ vector signed short ls_V = vec_perm(l1_V, l2_V, perm); // chrSrc[j][i+VOFW] ... chrSrc[j][i+2055]
vector signed int i1 = vec_mule(vChrFilter, ls);
vector signed int i2 = vec_mulo(vChrFilter, ls);
@@ -201,7 +201,7 @@ yuv2yuvX_altivec_real(const int16_t *lumFilter, const int16_t **lumSrc, int lumF
}
for ( ; i < chrDstW; i++) {
u[i] += chrSrc[j][i] * chrFilter[j];
- v[i] += chrSrc[j][i + 2048] * chrFilter[j];
+ v[i] += chrSrc[j][i + VOFW] * chrFilter[j];
}
}
altivec_packIntArrayToCharArray(u, uDest, chrDstW);
diff --git a/libswscale/ppc/yuv2rgb_altivec.c b/libswscale/ppc/yuv2rgb_altivec.c
index bf605bc757..2b58eb27c9 100644
--- a/libswscale/ppc/yuv2rgb_altivec.c
+++ b/libswscale/ppc/yuv2rgb_altivec.c
@@ -817,7 +817,7 @@ ff_yuv2packedX_altivec(SwsContext *c,
for (j=0; j<chrFilterSize; j++) {
X = vec_ld (0, &chrSrc[j][i/2]);
U = vec_mradds (X, CCoeffs[j], U);
- X = vec_ld (0, &chrSrc[j][i/2+2048]);
+ X = vec_ld (0, &chrSrc[j][i/2+VOFW]);
V = vec_mradds (X, CCoeffs[j], V);
}
@@ -895,7 +895,7 @@ ff_yuv2packedX_altivec(SwsContext *c,
for (j=0; j<chrFilterSize; j++) {
X = vec_ld (0, &chrSrc[j][i/2]);
U = vec_mradds (X, CCoeffs[j], U);
- X = vec_ld (0, &chrSrc[j][i/2+2048]);
+ X = vec_ld (0, &chrSrc[j][i/2+VOFW]);
V = vec_mradds (X, CCoeffs[j], V);
}
diff --git a/libswscale/rgb2rgb_template.c b/libswscale/rgb2rgb_template.c
index 9293c460d5..9af0eaa366 100644
--- a/libswscale/rgb2rgb_template.c
+++ b/libswscale/rgb2rgb_template.c
@@ -2264,6 +2264,9 @@ static inline void RENAME(rgb24toyv12)(const uint8_t *src, uint8_t *ydst, uint8_
ydst += lumStride;
src += srcStride;
+ if(y+1 == height)
+ break;
+
for (i=0; i<chromWidth; i++) {
unsigned int b = src[6*i+0];
unsigned int g = src[6*i+1];
diff --git a/libswscale/swscale.c b/libswscale/swscale.c
index b72ed79ae9..d53af2771d 100644
--- a/libswscale/swscale.c
+++ b/libswscale/swscale.c
@@ -266,14 +266,122 @@ DECLARE_ALIGNED(8, const uint8_t, dither_8x8_220)[8][8]={
};
#endif
+DECLARE_ALIGNED(8, const uint8_t, dithers)[8][8][8]={
+{
+ { 0, 1, 0, 1, 0, 1, 0, 1,},
+ { 1, 0, 1, 0, 1, 0, 1, 0,},
+ { 0, 1, 0, 1, 0, 1, 0, 1,},
+ { 1, 0, 1, 0, 1, 0, 1, 0,},
+ { 0, 1, 0, 1, 0, 1, 0, 1,},
+ { 1, 0, 1, 0, 1, 0, 1, 0,},
+ { 0, 1, 0, 1, 0, 1, 0, 1,},
+ { 1, 0, 1, 0, 1, 0, 1, 0,},
+},{
+ { 1, 2, 1, 2, 1, 2, 1, 2,},
+ { 3, 0, 3, 0, 3, 0, 3, 0,},
+ { 1, 2, 1, 2, 1, 2, 1, 2,},
+ { 3, 0, 3, 0, 3, 0, 3, 0,},
+ { 1, 2, 1, 2, 1, 2, 1, 2,},
+ { 3, 0, 3, 0, 3, 0, 3, 0,},
+ { 1, 2, 1, 2, 1, 2, 1, 2,},
+ { 3, 0, 3, 0, 3, 0, 3, 0,},
+},{
+ { 2, 4, 3, 5, 2, 4, 3, 5,},
+ { 6, 0, 7, 1, 6, 0, 7, 1,},
+ { 3, 5, 2, 4, 3, 5, 2, 4,},
+ { 7, 1, 6, 0, 7, 1, 6, 0,},
+ { 2, 4, 3, 5, 2, 4, 3, 5,},
+ { 6, 0, 7, 1, 6, 0, 7, 1,},
+ { 3, 5, 2, 4, 3, 5, 2, 4,},
+ { 7, 1, 6, 0, 7, 1, 6, 0,},
+},{
+ { 4, 8, 7, 11, 4, 8, 7, 11,},
+ { 12, 0, 15, 3, 12, 0, 15, 3,},
+ { 6, 10, 5, 9, 6, 10, 5, 9,},
+ { 14, 2, 13, 1, 14, 2, 13, 1,},
+ { 4, 8, 7, 11, 4, 8, 7, 11,},
+ { 12, 0, 15, 3, 12, 0, 15, 3,},
+ { 6, 10, 5, 9, 6, 10, 5, 9,},
+ { 14, 2, 13, 1, 14, 2, 13, 1,},
+},{
+ { 9, 17, 15, 23, 8, 16, 14, 22,},
+ { 25, 1, 31, 7, 24, 0, 30, 6,},
+ { 13, 21, 11, 19, 12, 20, 10, 18,},
+ { 29, 5, 27, 3, 28, 4, 26, 2,},
+ { 8, 16, 14, 22, 9, 17, 15, 23,},
+ { 24, 0, 30, 6, 25, 1, 31, 7,},
+ { 12, 20, 10, 18, 13, 21, 11, 19,},
+ { 28, 4, 26, 2, 29, 5, 27, 3,},
+},{
+ { 18, 34, 30, 46, 17, 33, 29, 45,},
+ { 50, 2, 62, 14, 49, 1, 61, 13,},
+ { 26, 42, 22, 38, 25, 41, 21, 37,},
+ { 58, 10, 54, 6, 57, 9, 53, 5,},
+ { 16, 32, 28, 44, 19, 35, 31, 47,},
+ { 48, 0, 60, 12, 51, 3, 63, 15,},
+ { 24, 40, 20, 36, 27, 43, 23, 39,},
+ { 56, 8, 52, 4, 59, 11, 55, 7,},
+},{
+ { 18, 34, 30, 46, 17, 33, 29, 45,},
+ { 50, 2, 62, 14, 49, 1, 61, 13,},
+ { 26, 42, 22, 38, 25, 41, 21, 37,},
+ { 58, 10, 54, 6, 57, 9, 53, 5,},
+ { 16, 32, 28, 44, 19, 35, 31, 47,},
+ { 48, 0, 60, 12, 51, 3, 63, 15,},
+ { 24, 40, 20, 36, 27, 43, 23, 39,},
+ { 56, 8, 52, 4, 59, 11, 55, 7,},
+},{
+ { 36, 68, 60, 92, 34, 66, 58, 90,},
+ { 100, 4,124, 28, 98, 2,122, 26,},
+ { 52, 84, 44, 76, 50, 82, 42, 74,},
+ { 116, 20,108, 12,114, 18,106, 10,},
+ { 32, 64, 56, 88, 38, 70, 62, 94,},
+ { 96, 0,120, 24,102, 6,126, 30,},
+ { 48, 80, 40, 72, 54, 86, 46, 78,},
+ { 112, 16,104, 8,118, 22,110, 14,},
+}};
+
+uint16_t dither_scale[15][16]={
+{ 2, 3, 3, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,},
+{ 2, 3, 7, 7, 13, 13, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25,},
+{ 3, 3, 4, 15, 15, 29, 57, 57, 57, 113, 113, 113, 113, 113, 113, 113,},
+{ 3, 4, 4, 5, 31, 31, 61, 121, 241, 241, 241, 241, 481, 481, 481, 481,},
+{ 3, 4, 5, 5, 6, 63, 63, 125, 249, 497, 993, 993, 993, 993, 993, 1985,},
+{ 3, 5, 6, 6, 6, 7, 127, 127, 253, 505, 1009, 2017, 4033, 4033, 4033, 4033,},
+{ 3, 5, 6, 7, 7, 7, 8, 255, 255, 509, 1017, 2033, 4065, 8129,16257,16257,},
+{ 3, 5, 6, 8, 8, 8, 8, 9, 511, 511, 1021, 2041, 4081, 8161,16321,32641,},
+{ 3, 5, 7, 8, 9, 9, 9, 9, 10, 1023, 1023, 2045, 4089, 8177,16353,32705,},
+{ 3, 5, 7, 8, 10, 10, 10, 10, 10, 11, 2047, 2047, 4093, 8185,16369,32737,},
+{ 3, 5, 7, 8, 10, 11, 11, 11, 11, 11, 12, 4095, 4095, 8189,16377,32753,},
+{ 3, 5, 7, 9, 10, 12, 12, 12, 12, 12, 12, 13, 8191, 8191,16381,32761,},
+{ 3, 5, 7, 9, 10, 12, 13, 13, 13, 13, 13, 13, 14,16383,16383,32765,},
+{ 3, 5, 7, 9, 10, 12, 14, 14, 14, 14, 14, 14, 14, 15,32767,32767,},
+{ 3, 5, 7, 9, 11, 12, 14, 15, 15, 15, 15, 15, 15, 15, 16,65535,},
+};
+
static av_always_inline void yuv2yuvX16inC_template(const int16_t *lumFilter, const int16_t **lumSrc, int lumFilterSize,
const int16_t *chrFilter, const int16_t **chrSrc, int chrFilterSize,
const int16_t **alpSrc, uint16_t *dest, uint16_t *uDest, uint16_t *vDest, uint16_t *aDest,
- int dstW, int chrDstW, int big_endian)
+ int dstW, int chrDstW, int big_endian, int output_bits)
{
//FIXME Optimize (just quickly written not optimized..)
int i;
-
+ int shift = 11 + 16 - output_bits;
+
+#define output_pixel(pos, val) \
+ if (big_endian) { \
+ if (output_bits == 16) { \
+ AV_WB16(pos, av_clip_uint16(val >> shift)); \
+ } else { \
+ AV_WB16(pos, av_clip_uintp2(val >> shift, output_bits)); \
+ } \
+ } else { \
+ if (output_bits == 16) { \
+ AV_WL16(pos, av_clip_uint16(val >> shift)); \
+ } else { \
+ AV_WL16(pos, av_clip_uintp2(val >> shift, output_bits)); \
+ } \
+ }
for (i = 0; i < dstW; i++) {
int val = 1 << 10;
int j;
@@ -281,11 +389,7 @@ static av_always_inline void yuv2yuvX16inC_template(const int16_t *lumFilter, co
for (j = 0; j < lumFilterSize; j++)
val += lumSrc[j][i] * lumFilter[j];
- if (big_endian) {
- AV_WB16(&dest[i], av_clip_uint16(val >> 11));
- } else {
- AV_WL16(&dest[i], av_clip_uint16(val >> 11));
- }
+ output_pixel(&dest[i], val);
}
if (uDest) {
@@ -299,13 +403,8 @@ static av_always_inline void yuv2yuvX16inC_template(const int16_t *lumFilter, co
v += chrSrc[j][i + VOFW] * chrFilter[j];
}
- if (big_endian) {
- AV_WB16(&uDest[i], av_clip_uint16(u >> 11));
- AV_WB16(&vDest[i], av_clip_uint16(v >> 11));
- } else {
- AV_WL16(&uDest[i], av_clip_uint16(u >> 11));
- AV_WL16(&vDest[i], av_clip_uint16(v >> 11));
- }
+ output_pixel(&uDest[i], u);
+ output_pixel(&vDest[i], v);
}
}
@@ -317,10 +416,50 @@ static av_always_inline void yuv2yuvX16inC_template(const int16_t *lumFilter, co
for (j = 0; j < lumFilterSize; j++)
val += alpSrc[j][i] * lumFilter[j];
+ output_pixel(&aDest[i], val);
+ }
+ }
+}
+
+static av_always_inline void yuv2yuvXNinC_template(const int16_t *lumFilter, const int16_t **lumSrc, int lumFilterSize,
+ const int16_t *chrFilter, const int16_t **chrSrc, int chrFilterSize,
+ const int16_t **alpSrc, uint16_t *dest, uint16_t *uDest, uint16_t *vDest, uint16_t *aDest,
+ int dstW, int chrDstW, int big_endian, int depth)
+{
+ //FIXME Optimize (just quickly written not optimized..)
+ int i;
+
+ for (i = 0; i < dstW; i++) {
+ int val = 1 << (26-depth);
+ int j;
+
+ for (j = 0; j < lumFilterSize; j++)
+ val += lumSrc[j][i] * lumFilter[j];
+
+ if (big_endian) {
+ AV_WB16(&dest[i], av_clip(val >> (27-depth), 0, (1<<depth)-1));
+ } else {
+ AV_WL16(&dest[i], av_clip(val >> (27-depth), 0, (1<<depth)-1));
+ }
+ }
+
+ if (uDest) {
+ for (i = 0; i < chrDstW; i++) {
+ int u = 1 << (26-depth);
+ int v = 1 << (26-depth);
+ int j;
+
+ for (j = 0; j < chrFilterSize; j++) {
+ u += chrSrc[j][i ] * chrFilter[j];
+ v += chrSrc[j][i + VOFW] * chrFilter[j];
+ }
+
if (big_endian) {
- AV_WB16(&aDest[i], av_clip_uint16(val >> 11));
+ AV_WB16(&uDest[i], av_clip(u >> (27-depth), 0, (1<<depth)-1));
+ AV_WB16(&vDest[i], av_clip(v >> (27-depth), 0, (1<<depth)-1));
} else {
- AV_WL16(&aDest[i], av_clip_uint16(val >> 11));
+ AV_WL16(&uDest[i], av_clip(u >> (27-depth), 0, (1<<depth)-1));
+ AV_WL16(&vDest[i], av_clip(v >> (27-depth), 0, (1<<depth)-1));
}
}
}
@@ -331,18 +470,27 @@ static inline void yuv2yuvX16inC(const int16_t *lumFilter, const int16_t **lumSr
const int16_t **alpSrc, uint16_t *dest, uint16_t *uDest, uint16_t *vDest, uint16_t *aDest, int dstW, int chrDstW,
enum PixelFormat dstFormat)
{
- if (isBE(dstFormat)) {
- yuv2yuvX16inC_template(lumFilter, lumSrc, lumFilterSize,
- chrFilter, chrSrc, chrFilterSize,
- alpSrc,
- dest, uDest, vDest, aDest,
- dstW, chrDstW, 1);
+ if (isNBPS(dstFormat)) {
+ const int depth = av_pix_fmt_descriptors[dstFormat].comp[0].depth_minus1+1;
+ yuv2yuvXNinC_template(lumFilter, lumSrc, lumFilterSize,
+ chrFilter, chrSrc, chrFilterSize,
+ alpSrc,
+ dest, uDest, vDest, aDest,
+ dstW, chrDstW, isBE(dstFormat), depth);
} else {
- yuv2yuvX16inC_template(lumFilter, lumSrc, lumFilterSize,
- chrFilter, chrSrc, chrFilterSize,
- alpSrc,
- dest, uDest, vDest, aDest,
- dstW, chrDstW, 0);
+ if (isBE(dstFormat)) {
+ yuv2yuvX16inC_template(lumFilter, lumSrc, lumFilterSize,
+ chrFilter, chrSrc, chrFilterSize,
+ alpSrc,
+ dest, uDest, vDest, aDest,
+ dstW, chrDstW, 1, 16);
+ } else {
+ yuv2yuvX16inC_template(lumFilter, lumSrc, lumFilterSize,
+ chrFilter, chrSrc, chrFilterSize,
+ alpSrc,
+ dest, uDest, vDest, aDest,
+ dstW, chrDstW, 0, 16);
+ }
}
}
@@ -1657,7 +1805,7 @@ static int rgbToRgbWrapper(SwsContext *c, const uint8_t* src[], int srcStride[],
if ((dstFormat == PIX_FMT_RGB32_1 || dstFormat == PIX_FMT_BGR32_1) && !isRGBA32(srcFormat))
dstPtr += ALT32_CORR;
- if (dstStride[0]*srcBpp == srcStride[0]*dstBpp && srcStride[0] > 0)
+ if (dstStride[0]*srcBpp == srcStride[0]*dstBpp && srcStride[0] > 0 && !(srcStride[0]%srcBpp))
conv(srcPtr, dstPtr + dstStride[0]*srcSliceY, srcSliceH*srcStride[0]);
else {
int i;
@@ -1729,6 +1877,28 @@ static int packedCopyWrapper(SwsContext *c, const uint8_t* src[], int srcStride[
return srcSliceH;
}
+#define DITHER_COPY(dst, dstStride, src, srcStride, bswap, dbswap)\
+ uint16_t scale= dither_scale[dst_depth-1][src_depth-1];\
+ int shift= src_depth-dst_depth + dither_scale[src_depth-2][dst_depth-1];\
+ for (i = 0; i < height; i++) {\
+ uint8_t *dither= dithers[src_depth-9][i&7];\
+ for (j = 0; j < length-7; j+=8){\
+ dst[j+0] = dbswap((bswap(src[j+0]) + dither[0])*scale>>shift);\
+ dst[j+1] = dbswap((bswap(src[j+1]) + dither[1])*scale>>shift);\
+ dst[j+2] = dbswap((bswap(src[j+2]) + dither[2])*scale>>shift);\
+ dst[j+3] = dbswap((bswap(src[j+3]) + dither[3])*scale>>shift);\
+ dst[j+4] = dbswap((bswap(src[j+4]) + dither[4])*scale>>shift);\
+ dst[j+5] = dbswap((bswap(src[j+5]) + dither[5])*scale>>shift);\
+ dst[j+6] = dbswap((bswap(src[j+6]) + dither[6])*scale>>shift);\
+ dst[j+7] = dbswap((bswap(src[j+7]) + dither[7])*scale>>shift);\
+ }\
+ for (; j < length; j++)\
+ dst[j] = dbswap((bswap(src[j]) + dither[j&7])*scale>>shift);\
+ dst += dstStride;\
+ src += srcStride;\
+ }
+
+
static int planarCopyWrapper(SwsContext *c, const uint8_t* src[], int srcStride[], int srcSliceY,
int srcSliceH, uint8_t* dst[], int dstStride[])
{
@@ -1748,42 +1918,72 @@ static int planarCopyWrapper(SwsContext *c, const uint8_t* src[], int srcStride[
length*=2;
fillPlane(dst[plane], dstStride[plane], length, height, y, (plane==3) ? 255 : 128);
} else {
- if(isNBPS(c->srcFormat)) {
- const int depth = av_pix_fmt_descriptors[c->srcFormat].comp[plane].depth_minus1+1;
- uint16_t *srcPtr2 = (uint16_t*)srcPtr;
-
- if (is16BPS(c->dstFormat)) {
- uint16_t *dstPtr2 = (uint16_t*)dstPtr;
+ if(isNBPS(c->srcFormat) || isNBPS(c->dstFormat)
+ || (is16BPS(c->srcFormat) != is16BPS(c->dstFormat))
+ ) {
+ const int src_depth = av_pix_fmt_descriptors[c->srcFormat].comp[plane].depth_minus1+1;
+ const int dst_depth = av_pix_fmt_descriptors[c->dstFormat].comp[plane].depth_minus1+1;
+ const uint16_t *srcPtr2 = (const uint16_t*)srcPtr;
+ uint16_t *dstPtr2 = (uint16_t*)dstPtr;
+
+ if (dst_depth == 8) {
+ if(isBE(c->srcFormat) == HAVE_BIGENDIAN){
+ DITHER_COPY(dstPtr, dstStride[plane], srcPtr2, srcStride[plane]/2, , )
+ } else {
+ DITHER_COPY(dstPtr, dstStride[plane], srcPtr2, srcStride[plane]/2, av_bswap16, )
+ }
+ } else if (src_depth == 8) {
for (i = 0; i < height; i++) {
- for (j = 0; j < length; j++)
- dstPtr2[j] = (srcPtr2[j]<<(16-depth)) | (srcPtr2[j]>>(2*depth-16));
+ if(isBE(c->dstFormat)){
+ for (j = 0; j < length; j++)
+ AV_WB16(&dstPtr2[j], (srcPtr[j]<<(dst_depth-8)) |
+ (srcPtr[j]>>(2*8-dst_depth)));
+ } else {
+ for (j = 0; j < length; j++)
+ AV_WL16(&dstPtr2[j], (srcPtr[j]<<(dst_depth-8)) |
+ (srcPtr[j]>>(2*8-dst_depth)));
+ }
dstPtr2 += dstStride[plane]/2;
- srcPtr2 += srcStride[plane]/2;
+ srcPtr += srcStride[plane];
}
- } else {
- // FIXME Maybe dither instead.
+ } else if (src_depth <= dst_depth) {
for (i = 0; i < height; i++) {
- for (j = 0; j < length; j++)
- dstPtr[j] = srcPtr2[j]>>(depth-8);
- dstPtr += dstStride[plane];
+#define COPY_UP(r,w) \
+ for (j = 0; j < length; j++){ \
+ unsigned int v= r(&srcPtr2[j]);\
+ w(&dstPtr2[j], (v<<(dst_depth-src_depth)) | \
+ (v>>(2*src_depth-dst_depth)));\
+ }
+ if(isBE(c->srcFormat)){
+ if(isBE(c->dstFormat)){
+ COPY_UP(AV_RB16, AV_WB16)
+ } else {
+ COPY_UP(AV_RB16, AV_WL16)
+ }
+ } else {
+ if(isBE(c->dstFormat)){
+ COPY_UP(AV_RL16, AV_WB16)
+ } else {
+ COPY_UP(AV_RL16, AV_WL16)
+ }
+ }
+ dstPtr2 += dstStride[plane]/2;
srcPtr2 += srcStride[plane]/2;
}
- }
- } else if(is16BPS(c->srcFormat) && !is16BPS(c->dstFormat)) {
- if (!isBE(c->srcFormat)) srcPtr++;
- for (i=0; i<height; i++) {
- for (j=0; j<length; j++) dstPtr[j] = srcPtr[j<<1];
- srcPtr+= srcStride[plane];
- dstPtr+= dstStride[plane];
- }
- } else if(!is16BPS(c->srcFormat) && is16BPS(c->dstFormat)) {
- for (i=0; i<height; i++) {
- for (j=0; j<length; j++) {
- dstPtr[ j<<1 ] = srcPtr[j];
- dstPtr[(j<<1)+1] = srcPtr[j];
+ } else {
+ if(isBE(c->srcFormat) == HAVE_BIGENDIAN){
+ if(isBE(c->dstFormat) == HAVE_BIGENDIAN){
+ DITHER_COPY(dstPtr2, dstStride[plane]/2, srcPtr2, srcStride[plane]/2, , )
+ } else {
+ DITHER_COPY(dstPtr2, dstStride[plane]/2, srcPtr2, srcStride[plane]/2, , av_bswap16)
+ }
+ }else{
+ if(isBE(c->dstFormat) == HAVE_BIGENDIAN){
+ DITHER_COPY(dstPtr2, dstStride[plane]/2, srcPtr2, srcStride[plane]/2, av_bswap16, )
+ } else {
+ DITHER_COPY(dstPtr2, dstStride[plane]/2, srcPtr2, srcStride[plane]/2, av_bswap16, av_bswap16)
+ }
}
- srcPtr+= srcStride[plane];
- dstPtr+= dstStride[plane];
}
} else if(is16BPS(c->srcFormat) && is16BPS(c->dstFormat)
&& isBE(c->srcFormat) != isBE(c->dstFormat)) {
diff --git a/libswscale/swscale_internal.h b/libswscale/swscale_internal.h
index de7859e3c0..03c5bf9736 100644
--- a/libswscale/swscale_internal.h
+++ b/libswscale/swscale_internal.h
@@ -35,11 +35,7 @@
#define MAX_FILTER_SIZE 256
-#if ARCH_X86
#define VOFW 21504
-#else
-#define VOFW 2048 // faster on PPC and not tested on others
-#endif
#define VOF (VOFW*2)
@@ -359,7 +355,10 @@ const char *sws_format_name(enum PixelFormat format);
|| (x)==PIX_FMT_YUV420P9BE \
|| (x)==PIX_FMT_YUV420P10LE \
|| (x)==PIX_FMT_YUV420P10BE \
+ || (x)==PIX_FMT_YUV422P10LE \
+ || (x)==PIX_FMT_YUV422P10BE \
)
+#define is9_OR_10BPS isNBPS //for ronald
#define isBE(x) ((x)&1)
#define isPlanar8YUV(x) ( \
(x)==PIX_FMT_YUV410P \
@@ -377,11 +376,13 @@ const char *sws_format_name(enum PixelFormat format);
|| (x)==PIX_FMT_YUV420P9LE \
|| (x)==PIX_FMT_YUV420P10LE \
|| (x)==PIX_FMT_YUV420P16LE \
+ || (x)==PIX_FMT_YUV422P10LE \
|| (x)==PIX_FMT_YUV422P16LE \
|| (x)==PIX_FMT_YUV444P16LE \
|| (x)==PIX_FMT_YUV420P9BE \
|| (x)==PIX_FMT_YUV420P10BE \
|| (x)==PIX_FMT_YUV420P16BE \
+ || (x)==PIX_FMT_YUV422P10BE \
|| (x)==PIX_FMT_YUV422P16BE \
|| (x)==PIX_FMT_YUV444P16BE \
)
diff --git a/libswscale/swscale_template.c b/libswscale/swscale_template.c
index 864bafc85c..e53cfc0752 100644
--- a/libswscale/swscale_template.c
+++ b/libswscale/swscale_template.c
@@ -951,7 +951,7 @@ static inline void RENAME(yuv2yuv1)(SwsContext *c, const int16_t *lumSrc, const
#if COMPILE_TEMPLATE_MMX
if(!(c->flags & SWS_BITEXACT)) {
long p= 4;
- const uint8_t *src[4]= {alpSrc + dstW, lumSrc + dstW, chrSrc + chrDstW, chrSrc + VOFW + chrDstW};
+ const int16_t *src[4]= {alpSrc + dstW, lumSrc + dstW, chrSrc + chrDstW, chrSrc + VOFW + chrDstW};
uint8_t *dst[4]= {aDest, dest, uDest, vDest};
x86_reg counter[4]= {dstW, dstW, chrDstW, chrDstW};
@@ -1233,8 +1233,8 @@ static inline void RENAME(yuv2packed2)(SwsContext *c, const uint16_t *buf0, cons
: "%r8"
);
#else
- *(const uint16_t **)(&c->u_temp)=abuf0;
- *(const uint16_t **)(&c->v_temp)=abuf1;
+ c->u_temp=(intptr_t)abuf0;
+ c->v_temp=(intptr_t)abuf1;
__asm__ volatile(
"mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
"mov %4, %%"REG_b" \n\t"
@@ -1678,6 +1678,8 @@ static inline void RENAME(LEToUV)(uint8_t *dstU, uint8_t *dstV, const uint8_t *s
);
#else
int i;
+ // FIXME I don't think this code is right for YUV444/422, since then h is not subsampled so
+ // we need to skip each second pixel. Same for BEToUV.
for (i=0; i<width; i++) {
dstU[i]= src1[2*i + 1];
dstV[i]= src2[2*i + 1];
@@ -1827,27 +1829,31 @@ static inline void RENAME(nv21ToUV)(uint8_t *dstU, uint8_t *dstV,
}
// FIXME Maybe dither instead.
-#define YUV_NBPS(depth) \
-static inline void RENAME(yuv ## depth ## ToUV)(uint8_t *dstU, uint8_t *dstV, \
- const uint16_t *srcU, const uint16_t *srcV, \
- long width, uint32_t *unused) \
+#ifndef YUV_NBPS
+#define YUV_NBPS(depth, endianness, rfunc) \
+static inline void endianness ## depth ## ToUV_c(uint8_t *dstU, uint8_t *dstV, \
+ const uint16_t *srcU, const uint16_t *srcV, \
+ long width, uint32_t *unused) \
{ \
int i; \
for (i = 0; i < width; i++) { \
- dstU[i] = srcU[i]>>(depth-8); \
- dstV[i] = srcV[i]>>(depth-8); \
+ dstU[i] = rfunc(&srcU[i])>>(depth-8); \
+ dstV[i] = rfunc(&srcV[i])>>(depth-8); \
} \
} \
\
-static inline void RENAME(yuv ## depth ## ToY)(uint8_t *dstY, const uint16_t *srcY, long width, uint32_t *unused) \
+static inline void endianness ## depth ## ToY_c(uint8_t *dstY, const uint16_t *srcY, long width, uint32_t *unused) \
{ \
int i; \
for (i = 0; i < width; i++) \
- dstY[i] = srcY[i]>>(depth-8); \
+ dstY[i] = rfunc(&srcY[i])>>(depth-8); \
} \
-YUV_NBPS( 9)
-YUV_NBPS(10)
+YUV_NBPS( 9, LE, AV_RL16)
+YUV_NBPS( 9, BE, AV_RB16)
+YUV_NBPS(10, LE, AV_RL16)
+YUV_NBPS(10, BE, AV_RB16)
+#endif // YUV_NBPS
#if COMPILE_TEMPLATE_MMX
static inline void RENAME(bgr24ToY_mmx)(uint8_t *dst, const uint8_t *src, long width, enum PixelFormat srcFormat)
@@ -2406,7 +2412,7 @@ static inline void RENAME(hyscale)(SwsContext *c, uint16_t *dst, long dstWidth,
uint32_t *pal, int isAlpha)
{
void (*toYV12)(uint8_t *, const uint8_t *, long, uint32_t *) = isAlpha ? c->alpToYV12 : c->lumToYV12;
- void (*convertRange)(uint16_t *, int) = isAlpha ? NULL : c->lumConvertRange;
+ void (*convertRange)(int16_t *, int) = isAlpha ? NULL : c->lumConvertRange;
src += isAlpha ? c->alpSrcOffset : c->lumSrcOffset;
@@ -2817,7 +2823,7 @@ static int RENAME(swScale)(SwsContext *c, const uint8_t* src[], int srcStride[],
} else if (isPlanarYUV(dstFormat) || dstFormat==PIX_FMT_GRAY8) { //YV12 like
const int chrSkipMask= (1<<c->chrDstVSubSample)-1;
if ((dstY&chrSkipMask) || isGray(dstFormat)) uDest=vDest= NULL; //FIXME split functions in lumi / chromi
- if (is16BPS(dstFormat)) {
+ if (is16BPS(dstFormat) || isNBPS(dstFormat)) {
yuv2yuvX16inC(
vLumFilter+dstY*vLumFilterSize , lumSrcPtr, vLumFilterSize,
vChrFilter+chrDstY*vChrFilterSize, chrSrcPtr, vChrFilterSize,
@@ -2894,7 +2900,7 @@ static int RENAME(swScale)(SwsContext *c, const uint8_t* src[], int srcStride[],
} else if (isPlanarYUV(dstFormat) || dstFormat==PIX_FMT_GRAY8) { //YV12
const int chrSkipMask= (1<<c->chrDstVSubSample)-1;
if ((dstY&chrSkipMask) || isGray(dstFormat)) uDest=vDest= NULL; //FIXME split functions in lumi / chromi
- if (is16BPS(dstFormat)) {
+ if (is16BPS(dstFormat) || isNBPS(dstFormat)) {
yuv2yuvX16inC(
vLumFilter+dstY*vLumFilterSize , lumSrcPtr, vLumFilterSize,
vChrFilter+chrDstY*vChrFilterSize, chrSrcPtr, vChrFilterSize,
@@ -2978,8 +2984,12 @@ static void RENAME(sws_init_swScale)(SwsContext *c)
case PIX_FMT_PAL8 :
case PIX_FMT_BGR4_BYTE:
case PIX_FMT_RGB4_BYTE: c->chrToYV12 = palToUV; break;
- case PIX_FMT_YUV420P9 : c->chrToYV12 = (void*)RENAME(yuv9ToUV ); break;
- case PIX_FMT_YUV420P10: c->chrToYV12 = (void*)RENAME(yuv10ToUV); break;
+ case PIX_FMT_YUV420P9BE: c->chrToYV12 = BE9ToUV_c; break;
+ case PIX_FMT_YUV420P9LE: c->chrToYV12 = LE9ToUV_c; break;
+ case PIX_FMT_YUV422P10BE:
+ case PIX_FMT_YUV420P10BE: c->chrToYV12 = BE10ToUV_c; break;
+ case PIX_FMT_YUV422P10LE:
+ case PIX_FMT_YUV420P10LE: c->chrToYV12 = LE10ToUV_c; break;
case PIX_FMT_YUV420P16BE:
case PIX_FMT_YUV422P16BE:
case PIX_FMT_YUV444P16BE: c->chrToYV12 = RENAME(BEToUV); break;
@@ -3026,8 +3036,12 @@ static void RENAME(sws_init_swScale)(SwsContext *c)
c->lumToYV12 = NULL;
c->alpToYV12 = NULL;
switch (srcFormat) {
- case PIX_FMT_YUV420P9 : c->lumToYV12 = (void*)RENAME(yuv9ToY ); break;
- case PIX_FMT_YUV420P10: c->lumToYV12 = (void*)RENAME(yuv10ToY); break;
+ case PIX_FMT_YUV420P9BE: c->lumToYV12 = BE9ToY_c; break;
+ case PIX_FMT_YUV420P9LE: c->lumToYV12 = LE9ToY_c; break;
+ case PIX_FMT_YUV422P10BE:
+ case PIX_FMT_YUV420P10BE: c->lumToYV12 = BE10ToY_c; break;
+ case PIX_FMT_YUV422P10LE:
+ case PIX_FMT_YUV420P10LE: c->lumToYV12 = LE10ToY_c; break;
case PIX_FMT_YUYV422 :
case PIX_FMT_YUV420P16BE:
case PIX_FMT_YUV422P16BE:
diff --git a/libswscale/utils.c b/libswscale/utils.c
index d96cc57ccf..ea44190ace 100644
--- a/libswscale/utils.c
+++ b/libswscale/utils.c
@@ -107,14 +107,17 @@ const char *swscale_license(void)
|| (x)==PIX_FMT_YUV440P \
|| (x)==PIX_FMT_MONOWHITE \
|| (x)==PIX_FMT_MONOBLACK \
+ || (x)==PIX_FMT_YUV420P9LE \
+ || (x)==PIX_FMT_YUV420P10LE \
|| (x)==PIX_FMT_YUV420P16LE \
|| (x)==PIX_FMT_YUV422P16LE \
|| (x)==PIX_FMT_YUV444P16LE \
+ || (x)==PIX_FMT_YUV420P9BE \
+ || (x)==PIX_FMT_YUV420P10BE \
|| (x)==PIX_FMT_YUV420P16BE \
|| (x)==PIX_FMT_YUV422P16BE \
|| (x)==PIX_FMT_YUV444P16BE \
- || (x)==PIX_FMT_YUV420P9 \
- || (x)==PIX_FMT_YUV420P10 \
+ || (x)==PIX_FMT_YUV422P10 \
)
int sws_isSupportedInput(enum PixelFormat pix_fmt)
@@ -142,9 +145,14 @@ int sws_isSupportedInput(enum PixelFormat pix_fmt)
|| (x)==PIX_FMT_GRAY8 \
|| (x)==PIX_FMT_YUV410P \
|| (x)==PIX_FMT_YUV440P \
+ || (x)==PIX_FMT_YUV422P10 \
+ || (x)==PIX_FMT_YUV420P9LE \
+ || (x)==PIX_FMT_YUV420P10LE \
|| (x)==PIX_FMT_YUV420P16LE \
|| (x)==PIX_FMT_YUV422P16LE \
|| (x)==PIX_FMT_YUV444P16LE \
+ || (x)==PIX_FMT_YUV420P9BE \
+ || (x)==PIX_FMT_YUV420P10BE \
|| (x)==PIX_FMT_YUV420P16BE \
|| (x)==PIX_FMT_YUV422P16BE \
|| (x)==PIX_FMT_YUV444P16BE \
diff --git a/tests/codec-regression.sh b/tests/codec-regression.sh
index 2525e6cc1a..5f4e539381 100755
--- a/tests/codec-regression.sh
+++ b/tests/codec-regression.sh
@@ -24,323 +24,333 @@ fi
if [ -n "$do_mpeg" ] ; then
# mpeg1
-do_video_encoding mpeg1.mpg "-qscale 10" "-f mpeg1video"
+do_video_encoding mpeg1.mpg "-qscale 10 -f mpeg1video"
do_video_decoding
fi
if [ -n "$do_mpeg2" ] ; then
# mpeg2
-do_video_encoding mpeg2.mpg "-qscale 10" "-vcodec mpeg2video -f mpeg1video"
+do_video_encoding mpeg2.mpg "-qscale 10 -vcodec mpeg2video -f mpeg1video"
do_video_decoding
# mpeg2 encoding intra vlc qprd
-do_video_encoding mpeg2ivlc-qprd.mpg "-vb 500k -bf 2 -trellis 1 -flags +qprd+mv0 -flags2 +ivlc -cmp 2 -subcmp 2 -mbd rd" "-vcodec mpeg2video -f mpeg2video"
+do_video_encoding mpeg2ivlc-qprd.mpg "-vb 500k -bf 2 -trellis 1 -flags +qprd+mv0 -flags2 +ivlc -cmp 2 -subcmp 2 -mbd rd -vcodec mpeg2video -f mpeg2video"
do_video_decoding
#mpeg2 4:2:2 encoding
-do_video_encoding mpeg2_422.mpg "-vb 1000k -bf 2 -trellis 1 -flags +qprd+mv0+ildct+ilme -flags2 +ivlc -mbd rd" "-vcodec mpeg2video -pix_fmt yuv422p -f mpeg2video"
+do_video_encoding mpeg2_422.mpg "-vb 1000k -bf 2 -trellis 1 -flags +qprd+mv0+ildct+ilme -flags2 +ivlc -mbd rd -vcodec mpeg2video -pix_fmt yuv422p -f mpeg2video"
do_video_decoding
# mpeg2
-do_video_encoding mpeg2.mpg "-qscale 10" "-vcodec mpeg2video -idct int -dct int -f mpeg1video"
+do_video_encoding mpeg2.mpg "-qscale 10 -vcodec mpeg2video -idct int -dct int -f mpeg1video"
do_video_decoding "-idct int"
# mpeg2 encoding interlaced
-do_video_encoding mpeg2i.mpg "-qscale 10" "-vcodec mpeg2video -f mpeg1video -flags +ildct+ilme"
+do_video_encoding mpeg2i.mpg "-qscale 10 -vcodec mpeg2video -f mpeg1video -flags +ildct+ilme"
do_video_decoding
fi
if [ -n "$do_mpeg2thread" ] ; then
# mpeg2 encoding interlaced
-do_video_encoding mpeg2thread.mpg "-qscale 10" "-vcodec mpeg2video -f mpeg1video -bf 2 -flags +ildct+ilme -threads 2"
+do_video_encoding mpeg2thread.mpg "-qscale 10 -vcodec mpeg2video -f mpeg1video -bf 2 -flags +ildct+ilme -threads 2"
do_video_decoding
# mpeg2 encoding interlaced using intra vlc
-do_video_encoding mpeg2threadivlc.mpg "-qscale 10" "-vcodec mpeg2video -f mpeg1video -bf 2 -flags +ildct+ilme -flags2 +ivlc -threads 2"
+do_video_encoding mpeg2threadivlc.mpg "-qscale 10 -vcodec mpeg2video -f mpeg1video -bf 2 -flags +ildct+ilme -flags2 +ivlc -threads 2"
do_video_decoding
# mpeg2 encoding interlaced
file=${outfile}mpeg2reuse.mpg
-do_ffmpeg $file -sameq -me_threshold 256 -mb_threshold 1024 -i ${target_path}/${outfile}mpeg2thread.mpg -vcodec mpeg2video -f mpeg1video -bf 2 -flags +ildct+ilme -threads 4
+do_ffmpeg $file $DEC_OPTS -me_threshold 256 -i ${target_path}/${outfile}mpeg2thread.mpg $ENC_OPTS -sameq -me_threshold 256 -mb_threshold 1024 -vcodec mpeg2video -f mpeg1video -bf 2 -flags +ildct+ilme -threads 4
do_video_decoding
fi
if [ -n "$do_msmpeg4v2" ] ; then
-do_video_encoding msmpeg4v2.avi "-qscale 10" "-an -vcodec msmpeg4v2"
+do_video_encoding msmpeg4v2.avi "-qscale 10 -an -vcodec msmpeg4v2"
do_video_decoding
fi
if [ -n "$do_msmpeg4" ] ; then
-do_video_encoding msmpeg4.avi "-qscale 10" "-an -vcodec msmpeg4"
+do_video_encoding msmpeg4.avi "-qscale 10 -an -vcodec msmpeg4"
do_video_decoding
fi
+if [ -n "$do_msvideo1" ] ; then
+do_video_encoding msvideo1.avi "-an -vcodec msvideo1"
+do_video_decoding "" "-pix_fmt yuv420p"
+fi
+
if [ -n "$do_wmv1" ] ; then
-do_video_encoding wmv1.avi "-qscale 10" "-an -vcodec wmv1"
+do_video_encoding wmv1.avi "-qscale 10 -an -vcodec wmv1"
do_video_decoding
fi
if [ -n "$do_wmv2" ] ; then
-do_video_encoding wmv2.avi "-qscale 10" "-an -vcodec wmv2"
+do_video_encoding wmv2.avi "-qscale 10 -an -vcodec wmv2"
do_video_decoding
fi
if [ -n "$do_h261" ] ; then
-do_video_encoding h261.avi "-qscale 11" "-s 352x288 -an -vcodec h261"
+do_video_encoding h261.avi "-qscale 11 -s 352x288 -an -vcodec h261"
do_video_decoding
fi
if [ -n "$do_h263" ] ; then
-do_video_encoding h263.avi "-qscale 10" "-s 352x288 -an -vcodec h263"
+do_video_encoding h263.avi "-qscale 10 -s 352x288 -an -vcodec h263"
do_video_decoding
fi
if [ -n "$do_h263p" ] ; then
-do_video_encoding h263p.avi "-qscale 2 -flags +umv+aiv+aic" "-s 352x288 -an -vcodec h263p -ps 300"
+do_video_encoding h263p.avi "-qscale 2 -flags +umv+aiv+aic -s 352x288 -an -vcodec h263p -ps 300"
do_video_decoding
fi
if [ -n "$do_mpeg4" ] ; then
-do_video_encoding odivx.mp4 "-flags +mv4 -mbd bits -qscale 10" "-an -vcodec mpeg4"
+do_video_encoding odivx.mp4 "-flags +mv4 -mbd bits -qscale 10 -an -vcodec mpeg4"
do_video_decoding
fi
if [ -n "$do_huffyuv" ] ; then
-do_video_encoding huffyuv.avi "" "-an -vcodec huffyuv -pix_fmt yuv422p -sws_flags neighbor+bitexact"
+do_video_encoding huffyuv.avi "-an -vcodec huffyuv -pix_fmt yuv422p -sws_flags neighbor+bitexact"
do_video_decoding "" "-strict -2 -pix_fmt yuv420p -sws_flags neighbor+bitexact"
fi
if [ -n "$do_rc" ] ; then
-do_video_encoding mpeg4-rc.avi "-b 400k -bf 2" "-an -vcodec mpeg4"
+do_video_encoding mpeg4-rc.avi "-b 400k -bf 2 -an -vcodec mpeg4"
do_video_decoding
fi
if [ -n "$do_mpeg4adv" ] ; then
-do_video_encoding mpeg4-adv.avi "-qscale 9 -flags +mv4+part+aic -trellis 1 -mbd bits -ps 200" "-an -vcodec mpeg4"
+do_video_encoding mpeg4-adv.avi "-qscale 9 -flags +mv4+part+aic -trellis 1 -mbd bits -ps 200 -an -vcodec mpeg4"
do_video_decoding
-do_video_encoding mpeg4-qprd.avi "-b 450k -bf 2 -trellis 1 -flags +mv4+qprd+mv0 -cmp 2 -subcmp 2 -mbd rd" "-an -vcodec mpeg4"
+do_video_encoding mpeg4-qprd.avi "-b 450k -bf 2 -trellis 1 -flags +mv4+qprd+mv0 -cmp 2 -subcmp 2 -mbd rd -an -vcodec mpeg4"
do_video_decoding
-do_video_encoding mpeg4-adap.avi "-b 550k -bf 2 -flags +mv4+mv0 -trellis 1 -cmp 1 -subcmp 2 -mbd rd -scplx_mask 0.3" "-an -vcodec mpeg4"
+do_video_encoding mpeg4-adap.avi "-b 550k -bf 2 -flags +mv4+mv0 -trellis 1 -cmp 1 -subcmp 2 -mbd rd -scplx_mask 0.3 -an -vcodec mpeg4"
do_video_decoding
-do_video_encoding mpeg4-Q.avi "-qscale 7 -flags +mv4+qpel -mbd 2 -bf 2 -cmp 1 -subcmp 2" "-an -vcodec mpeg4"
+do_video_encoding mpeg4-Q.avi "-qscale 7 -flags +mv4+qpel -mbd 2 -bf 2 -cmp 1 -subcmp 2 -an -vcodec mpeg4"
do_video_decoding
fi
if [ -n "$do_mpeg4thread" ] ; then
-do_video_encoding mpeg4-thread.avi "-b 500k -flags +mv4+part+aic -trellis 1 -mbd bits -ps 200 -bf 2" "-an -vcodec mpeg4 -threads 2"
+do_video_encoding mpeg4-thread.avi "-b 500k -flags +mv4+part+aic -trellis 1 -mbd bits -ps 200 -bf 2 -an -vcodec mpeg4 -threads 2"
do_video_decoding
fi
if [ -n "$do_error" ] ; then
-do_video_encoding error-mpeg4-adv.avi "-qscale 7 -flags +mv4+part+aic -mbd rd -ps 250 -error 10" "-an -vcodec mpeg4"
+do_video_encoding error-mpeg4-adv.avi "-qscale 7 -flags +mv4+part+aic -mbd rd -ps 250 -error 10 -an -vcodec mpeg4"
do_video_decoding
fi
if [ -n "$do_mpeg4nr" ] ; then
-do_video_encoding mpeg4-nr.avi "-qscale 8 -flags +mv4 -mbd rd -nr 200" "-an -vcodec mpeg4"
+do_video_encoding mpeg4-nr.avi "-qscale 8 -flags +mv4 -mbd rd -nr 200 -an -vcodec mpeg4"
do_video_decoding
fi
if [ -n "$do_mpeg1b" ] ; then
-do_video_encoding mpeg1b.mpg "-qscale 8 -bf 3 -ps 200" "-an -vcodec mpeg1video -f mpeg1video"
+do_video_encoding mpeg1b.mpg "-qscale 8 -bf 3 -ps 200 -an -vcodec mpeg1video -f mpeg1video"
do_video_decoding
fi
if [ -n "$do_mjpeg" ] ; then
-do_video_encoding mjpeg.avi "-qscale 9" "-an -vcodec mjpeg -pix_fmt yuvj420p"
+do_video_encoding mjpeg.avi "-qscale 9 -an -vcodec mjpeg -pix_fmt yuvj420p"
do_video_decoding "" "-pix_fmt yuv420p"
fi
if [ -n "$do_ljpeg" ] ; then
-do_video_encoding ljpeg.avi "" "-an -vcodec ljpeg -strict -1"
+do_video_encoding ljpeg.avi "-an -vcodec ljpeg -strict -1"
do_video_decoding
fi
if [ -n "$do_jpegls" ] ; then
-do_video_encoding jpegls.avi "" "-an -vcodec jpegls -vtag MJPG -sws_flags neighbor+full_chroma_int+accurate_rnd+bitexact"
+do_video_encoding jpegls.avi "-an -vcodec jpegls -vtag MJPG -sws_flags neighbor+full_chroma_int+accurate_rnd+bitexact"
do_video_decoding "" "-pix_fmt yuv420p -sws_flags area+bitexact"
fi
if [ -n "$do_rv10" ] ; then
-do_video_encoding rv10.rm "-qscale 10" "-an"
+do_video_encoding rv10.rm "-qscale 10 -an"
do_video_decoding
fi
if [ -n "$do_rv20" ] ; then
-do_video_encoding rv20.rm "-qscale 10" "-vcodec rv20 -an"
+do_video_encoding rv20.rm "-qscale 10 -vcodec rv20 -an"
do_video_decoding
fi
if [ -n "$do_asv1" ] ; then
-do_video_encoding asv1.avi "-qscale 10" "-an -vcodec asv1"
+do_video_encoding asv1.avi "-qscale 10 -an -vcodec asv1"
do_video_decoding
fi
if [ -n "$do_asv2" ] ; then
-do_video_encoding asv2.avi "-qscale 10" "-an -vcodec asv2"
+do_video_encoding asv2.avi "-qscale 10 -an -vcodec asv2"
do_video_decoding
fi
if [ -n "$do_flv" ] ; then
-do_video_encoding flv.flv "-qscale 10" "-an -vcodec flv"
+do_video_encoding flv.flv "-qscale 10 -an -vcodec flv"
do_video_decoding
fi
if [ -n "$do_ffv1" ] ; then
-do_video_encoding ffv1.avi "-strict -2" "-an -vcodec ffv1"
+do_video_encoding ffv1.avi "-strict -2 -an -vcodec ffv1"
do_video_decoding
fi
if [ -n "$do_snow" ] ; then
-do_video_encoding snow.avi "-strict -2" "-an -vcodec snow -qscale 2 -flags +qpel -me_method iter -dia_size 2 -cmp 12 -subcmp 12 -s 128x64"
+do_video_encoding snow.avi "-strict -2 -an -vcodec snow -qscale 2 -flags +qpel -me_method iter -dia_size 2 -cmp 12 -subcmp 12 -s 128x64"
do_video_decoding "" "-s 352x288"
fi
if [ -n "$do_snowll" ] ; then
-do_video_encoding snow53.avi "-strict -2" "-an -vcodec snow -qscale .001 -pred 1 -flags +mv4+qpel"
+do_video_encoding snow53.avi "-strict -2 -an -vcodec snow -qscale .001 -pred 1 -flags +mv4+qpel"
do_video_decoding
fi
if [ -n "$do_dv" ] ; then
-do_video_encoding dv.dv "-dct int" "-s pal -an"
+do_video_encoding dv.dv "-dct int -s pal -an"
do_video_decoding "" "-s cif"
-do_video_encoding dv411.dv "-dct int" "-s pal -an -pix_fmt yuv411p -sws_flags area+accurate_rnd+bitexact"
+do_video_encoding dv411.dv "-dct int -s pal -an -pix_fmt yuv411p -sws_flags area+accurate_rnd+bitexact"
do_video_decoding "" "-s cif -sws_flags area+accurate_rnd+bitexact"
fi
if [ -n "$do_dv50" ] ; then
-do_video_encoding dv50.dv "-dct int" "-s pal -pix_fmt yuv422p -an -sws_flags neighbor+bitexact"
+do_video_encoding dv50.dv "-dct int -s pal -pix_fmt yuv422p -an -sws_flags neighbor+bitexact"
do_video_decoding "" "-s cif -pix_fmt yuv420p -sws_flags neighbor+bitexact"
fi
if [ -n "$do_dnxhd_1080i" ] ; then
# FIXME: interlaced raw DNxHD decoding is broken
-do_video_encoding dnxhd-1080i.mov "" "-vcodec dnxhd -flags +ildct -s hd1080 -b 120M -pix_fmt yuv422p -vframes 5 -an"
+do_video_encoding dnxhd-1080i.mov "-vcodec dnxhd -flags +ildct -s hd1080 -b 120M -pix_fmt yuv422p -vframes 5 -an"
do_video_decoding "-r 25" "-s cif -pix_fmt yuv420p"
fi
if [ -n "$do_dnxhd_720p" ] ; then
-do_video_encoding dnxhd-720p.dnxhd "" "-s hd720 -b 90M -pix_fmt yuv422p -vframes 5 -an"
+do_video_encoding dnxhd-720p.dnxhd "-s hd720 -b 90M -pix_fmt yuv422p -vframes 5 -an"
do_video_decoding "-r 25" "-s cif -pix_fmt yuv420p"
fi
if [ -n "$do_dnxhd_720p_rd" ] ; then
-do_video_encoding dnxhd-720p-rd.dnxhd "" "-threads 4 -mbd rd -s hd720 -b 90M -pix_fmt yuv422p -vframes 5 -an"
+do_video_encoding dnxhd-720p-rd.dnxhd "-threads 4 -mbd rd -s hd720 -b 90M -pix_fmt yuv422p -vframes 5 -an"
do_video_decoding "-r 25" "-s cif -pix_fmt yuv420p"
fi
if [ -n "$do_svq1" ] ; then
-do_video_encoding svq1.mov "" "-an -vcodec svq1 -qscale 3 -pix_fmt yuv410p"
+do_video_encoding svq1.mov "-an -vcodec svq1 -qscale 3 -pix_fmt yuv410p"
do_video_decoding "" "-pix_fmt yuv420p"
fi
if [ -n "$do_flashsv" ] ; then
-do_video_encoding flashsv.flv "" "-an -vcodec flashsv -sws_flags neighbor+full_chroma_int+accurate_rnd+bitexact"
+do_video_encoding flashsv.flv "-an -vcodec flashsv -sws_flags neighbor+full_chroma_int+accurate_rnd+bitexact"
do_video_decoding "" "-pix_fmt yuv420p -sws_flags area+accurate_rnd+bitexact"
fi
if [ -n "$do_flashsv2" ] ; then
-do_video_encoding flashsv2.flv "" "-an -vcodec flashsv2 -sws_flags neighbor+full_chroma_int+accurate_rnd+bitexact -strict experimental"
+do_video_encoding flashsv2.flv "-an -vcodec flashsv2 -sws_flags neighbor+full_chroma_int+accurate_rnd+bitexact -strict experimental"
#do_video_decoding "" "-pix_fmt yuv420p -sws_flags area+accurate_rnd+bitexact"
fi
if [ -n "$do_roq" ] ; then
-do_video_encoding roqav.roq "" "-vframes 5"
+do_video_encoding roqav.roq "-vframes 5"
do_video_decoding "" "-pix_fmt yuv420p"
fi
if [ -n "$do_qtrle" ] ; then
-do_video_encoding qtrle.mov "" "-an -vcodec qtrle"
+do_video_encoding qtrle.mov "-an -vcodec qtrle"
+do_video_decoding "" "-pix_fmt yuv420p"
+fi
+
+if [ -n "$do_qtrlegray" ] ; then
+do_video_encoding qtrlegray.mov "-an -vcodec qtrle -pix_fmt gray"
do_video_decoding "" "-pix_fmt yuv420p"
fi
if [ -n "$do_rgb" ] ; then
-do_video_encoding rgb.avi "" "-an -vcodec rawvideo -pix_fmt bgr24"
+do_video_encoding rgb.avi "-an -vcodec rawvideo -pix_fmt bgr24"
do_video_decoding "" "-pix_fmt yuv420p"
fi
if [ -n "$do_yuv" ] ; then
-do_video_encoding yuv.avi "" "-an -vcodec rawvideo -pix_fmt yuv420p"
+do_video_encoding yuv.avi "-an -vcodec rawvideo -pix_fmt yuv420p"
do_video_decoding "" "-pix_fmt yuv420p"
fi
if [ -n "$do_mp2" ] ; then
-do_audio_encoding mp2.mp2 "-ar 44100"
+do_audio_encoding mp2.mp2
do_audio_decoding
$tiny_psnr $pcm_dst $pcm_ref 2 1924 >> $logfile
fi
if [ -n "$do_ac3_fixed" ] ; then
-do_audio_encoding ac3.rm "" "-vn -acodec ac3_fixed"
+do_audio_encoding ac3.rm "-vn -acodec ac3_fixed"
# binaries configured with --disable-sse decode ac3 differently
#do_audio_decoding
#$tiny_psnr $pcm_dst $pcm_ref 2 1024 >> $logfile
fi
if [ -n "$do_g726" ] ; then
-do_audio_encoding g726.wav "-ar 44100" "-ab 32k -ac 1 -ar 8000 -acodec g726"
+do_audio_encoding g726.wav "-ab 32k -ac 1 -ar 8000 -acodec g726"
do_audio_decoding
fi
if [ -n "$do_adpcm_ima_wav" ] ; then
-do_audio_encoding adpcm_ima.wav "-ar 44100" "-acodec adpcm_ima_wav"
+do_audio_encoding adpcm_ima.wav "-acodec adpcm_ima_wav"
do_audio_decoding
fi
if [ -n "$do_adpcm_ima_qt" ] ; then
-do_audio_encoding adpcm_qt.aiff "-ar 44100" "-acodec adpcm_ima_qt"
+do_audio_encoding adpcm_qt.aiff "-acodec adpcm_ima_qt"
do_audio_decoding
fi
if [ -n "$do_adpcm_ms" ] ; then
-do_audio_encoding adpcm_ms.wav "-ar 44100" "-acodec adpcm_ms"
+do_audio_encoding adpcm_ms.wav "-acodec adpcm_ms"
do_audio_decoding
fi
if [ -n "$do_adpcm_yam" ] ; then
-do_audio_encoding adpcm_yam.wav "-ar 44100" "-acodec adpcm_yamaha"
+do_audio_encoding adpcm_yam.wav "-acodec adpcm_yamaha"
do_audio_decoding
fi
if [ -n "$do_adpcm_swf" ] ; then
-do_audio_encoding adpcm_swf.flv "-ar 44100" "-acodec adpcm_swf"
+do_audio_encoding adpcm_swf.flv "-acodec adpcm_swf"
do_audio_decoding
fi
if [ -n "$do_alac" ] ; then
-do_audio_encoding alac.m4a "-ar 44100" "-acodec alac -compression_level 1"
+do_audio_encoding alac.m4a "-acodec alac -compression_level 1"
do_audio_decoding
fi
if [ -n "$do_flac" ] ; then
-do_audio_encoding flac.flac "-ar 44100" "-acodec flac -compression_level 2"
+do_audio_encoding flac.flac "-acodec flac -compression_level 2"
do_audio_decoding
fi
if [ -n "$do_wmav1" ] ; then
-do_audio_encoding wmav1.asf "-ar 44100" "-acodec wmav1"
-do_ffmpeg_nomd5 $pcm_dst -i $target_path/$file -f wav
+do_audio_encoding wmav1.asf "-acodec wmav1"
+do_ffmpeg_nomd5 $pcm_dst $DEC_OPTS -i $target_path/$file -f wav
$tiny_psnr $pcm_dst $pcm_ref 2 8192 >> $logfile
fi
if [ -n "$do_wmav2" ] ; then
-do_audio_encoding wmav2.asf "-ar 44100" "-acodec wmav2"
-do_ffmpeg_nomd5 $pcm_dst -i $target_path/$file -f wav
+do_audio_encoding wmav2.asf "-acodec wmav2"
+do_ffmpeg_nomd5 $pcm_dst $DEC_OPTS -i $target_path/$file -f wav
$tiny_psnr $pcm_dst $pcm_ref 2 8192 >> $logfile
fi
#if [ -n "$do_vorbis" ] ; then
# vorbis
#disabled because it is broken
-#do_audio_encoding vorbis.asf "-ar 44100" "-acodec vorbis"
+#do_audio_encoding vorbis.asf "-acodec vorbis"
#do_audio_decoding
#fi
do_audio_enc_dec() {
- do_audio_encoding $3.$1 "" "$4 -sample_fmt $2 -acodec $3"
+ do_audio_encoding $3.$1 "$4 -sample_fmt $2 -acodec $3"
do_audio_decoding
}
diff --git a/tests/fate/aac.mak b/tests/fate/aac.mak
index 41df4f619f..6701e149d2 100644
--- a/tests/fate/aac.mak
+++ b/tests/fate/aac.mak
@@ -22,6 +22,10 @@ FATE_AAC += fate-aac-latm_000000001180bc60
fate-aac-latm_000000001180bc60: CMD = pcm -i $(SAMPLES)/aac/latm_000000001180bc60.mpg
fate-aac-latm_000000001180bc60: REF = $(SAMPLES)/aac/latm_000000001180bc60.s16
+FATE_AAC += fate-aac-ap05_48
+fate-aac-ap05_48: CMD = pcm -i $(SAMPLES)/aac/ap05_48.mp4
+fate-aac-ap05_48: REF = $(SAMPLES)/aac/ap05_48.s16
+
FATE_TESTS += $(FATE_AAC)
fate-aac: $(FATE_AAC)
$(FATE_AAC): CMP = oneoff
diff --git a/tests/fate/h264.mak b/tests/fate/h264.mak
index 259e1e0ea0..0f18be4888 100644
--- a/tests/fate/h264.mak
+++ b/tests/fate/h264.mak
@@ -127,6 +127,12 @@ FATE_H264 = aud_mw_e \
frext-hpcvflnl_bcrm_a \
frext-hpcvmolq_brcm_b \
frext-hpcvnl_brcm_a \
+ frext-pph10i1_panasonic_a \
+ frext-pph10i2_panasonic_a \
+ frext-pph10i3_panasonic_a \
+ frext-pph10i5_panasonic_a \
+ frext-pph10i6_panasonic_a \
+ frext-pph10i7_panasonic_a \
hcbp2_hhi_a \
hcmp1_hhi_a \
ls_sva_d \
@@ -301,6 +307,12 @@ fate-h264-conformance-frext-hpcvfl_bcrm_a: CMD = framecrc -i $(SAMPLES)/h264-co
fate-h264-conformance-frext-hpcvflnl_bcrm_a: CMD = framecrc -i $(SAMPLES)/h264-conformance/FRext/HPCVFLNL_BRCM_A.264 -vsync 0
fate-h264-conformance-frext-hpcvmolq_brcm_b: CMD = framecrc -i $(SAMPLES)/h264-conformance/FRext/HPCVMOLQ_BRCM_B.264
fate-h264-conformance-frext-hpcvnl_brcm_a: CMD = framecrc -i $(SAMPLES)/h264-conformance/FRext/HPCVNL_BRCM_A.264
+fate-h264-conformance-frext-pph10i1_panasonic_a: CMD = framecrc -i $(SAMPLES)/h264-conformance/FRext/PPH10I1_Panasonic_A.264 -pix_fmt yuv420p10le
+fate-h264-conformance-frext-pph10i2_panasonic_a: CMD = framecrc -i $(SAMPLES)/h264-conformance/FRext/PPH10I2_Panasonic_A.264 -pix_fmt yuv420p10le
+fate-h264-conformance-frext-pph10i3_panasonic_a: CMD = framecrc -i $(SAMPLES)/h264-conformance/FRext/PPH10I3_Panasonic_A.264 -pix_fmt yuv420p10le
+fate-h264-conformance-frext-pph10i5_panasonic_a: CMD = framecrc -i $(SAMPLES)/h264-conformance/FRext/PPH10I5_Panasonic_A.264 -pix_fmt yuv420p10le
+fate-h264-conformance-frext-pph10i6_panasonic_a: CMD = framecrc -i $(SAMPLES)/h264-conformance/FRext/PPH10I6_Panasonic_A.264 -pix_fmt yuv420p10le
+fate-h264-conformance-frext-pph10i7_panasonic_a: CMD = framecrc -i $(SAMPLES)/h264-conformance/FRext/PPH10I7_Panasonic_A.264 -pix_fmt yuv420p10le
fate-h264-conformance-hcbp2_hhi_a: CMD = framecrc -vsync 0 -strict 1 -i $(SAMPLES)/h264-conformance/HCBP2_HHI_A.264
fate-h264-conformance-hcmp1_hhi_a: CMD = framecrc -vsync 0 -strict 1 -i $(SAMPLES)/h264-conformance/HCMP1_HHI_A.264
fate-h264-conformance-ls_sva_d: CMD = framecrc -i $(SAMPLES)/h264-conformance/LS_SVA_D.264
diff --git a/tests/lavf-regression.sh b/tests/lavf-regression.sh
index b14a58efcd..28f53f78b0 100755
--- a/tests/lavf-regression.sh
+++ b/tests/lavf-regression.sh
@@ -14,15 +14,15 @@ eval do_$test=y
do_lavf()
{
file=${outfile}lavf.$1
- do_ffmpeg $file -t 1 -qscale 10 -f image2 -vcodec pgmyuv -i $raw_src -f s16le -i $pcm_src $2
- do_ffmpeg_crc $file -i $target_path/$file $3
+ do_ffmpeg $file $DEC_OPTS -f image2 -vcodec pgmyuv -i $raw_src $DEC_OPTS -f s16le -i $pcm_src $ENC_OPTS -t 1 -qscale 10 $2
+ do_ffmpeg_crc $file $DEC_OPTS -i $target_path/$file $3
}
do_streamed_images()
{
file=${outfile}${1}pipe.$1
- do_ffmpeg $file -t 1 -qscale 10 -f image2 -vcodec pgmyuv -i $raw_src -f image2pipe
- do_ffmpeg_crc $file -f image2pipe -i $target_path/$file
+ do_ffmpeg $file $DEC_OPTS -f image2 -vcodec pgmyuv -i $raw_src -f image2pipe $ENC_OPTS -t 1 -qscale 10
+ do_ffmpeg_crc $file $DEC_OPTS -f image2pipe -i $target_path/$file
}
do_image_formats()
@@ -30,18 +30,17 @@ do_image_formats()
outfile="$datadir/images/$1/"
mkdir -p "$outfile"
file=${outfile}%02d.$1
- $echov $ffmpeg -t 0.5 -y -qscale 10 -f image2 -vcodec pgmyuv -i $raw_src $2 $3 -flags +bitexact -sws_flags +accurate_rnd+bitexact $target_path/$file
- $ffmpeg -t 0.5 -y -qscale 10 -f image2 -vcodec pgmyuv -i $raw_src $2 $3 -flags +bitexact -sws_flags +accurate_rnd+bitexact $target_path/$file
+ run_ffmpeg $DEC_OPTS -f image2 -vcodec pgmyuv -i $raw_src $2 $ENC_OPTS $3 -t 0.5 -y -qscale 10 $target_path/$file
do_md5sum ${outfile}02.$1 >> $logfile
- do_ffmpeg_crc $file $3 -i $target_path/$file
+ do_ffmpeg_crc $file $DEC_OPTS $3 -i $target_path/$file
wc -c ${outfile}02.$1 >> $logfile
}
do_audio_only()
{
file=${outfile}lavf.$1
- do_ffmpeg $file -t 1 -qscale 10 $2 -f s16le -i $pcm_src $3
- do_ffmpeg_crc $file -i $target_path/$file
+ do_ffmpeg $file $DEC_OPTS $2 -f s16le -i $pcm_src $ENC_OPTS -t 1 -qscale 10 $3
+ do_ffmpeg_crc $file $DEC_OPTS -i $target_path/$file
}
rm -f "$logfile"
@@ -57,7 +56,7 @@ fi
if [ -n "$do_rm" ] ; then
file=${outfile}lavf.rm
-do_ffmpeg $file -t 1 -qscale 10 -f image2 -vcodec pgmyuv -i $raw_src -f s16le -i $pcm_src -acodec ac3_fixed
+do_ffmpeg $file $DEC_OPTS -f image2 -vcodec pgmyuv -i $raw_src $DEC_OPTS -f s16le -i $pcm_src $ENC_OPTS -t 1 -qscale 10 -acodec ac3_fixed
# broken
#do_ffmpeg_crc $file -i $target_path/$file
fi
@@ -128,13 +127,13 @@ fi
if [ -n "$do_gif" ] ; then
file=${outfile}lavf.gif
-do_ffmpeg $file -t 1 -qscale 10 -f image2 -vcodec pgmyuv -i $raw_src -pix_fmt rgb24
-do_ffmpeg_crc $file -i $target_path/$file -pix_fmt rgb24
+do_ffmpeg $file $DEC_OPTS -f image2 -vcodec pgmyuv -i $raw_src $ENC_OPTS -t 1 -qscale 10 -pix_fmt rgb24
+do_ffmpeg_crc $file $DEC_OPTS -i $target_path/$file -pix_fmt rgb24
fi
if [ -n "$do_yuv4mpeg" ] ; then
file=${outfile}lavf.y4m
-do_ffmpeg $file -t 1 -qscale 10 -f image2 -vcodec pgmyuv -i $raw_src
+do_ffmpeg $file $DEC_OPTS -f image2 -vcodec pgmyuv -i $raw_src $ENC_OPTS -t 1 -qscale 10
#do_ffmpeg_crc $file -i $target_path/$file
fi
@@ -169,7 +168,7 @@ do_image_formats sgi
fi
if [ -n "$do_jpg" ] ; then
-do_image_formats jpg "-flags +bitexact -dct fastint -idct simple -pix_fmt yuvj420p" "-f image2"
+do_image_formats jpg "-pix_fmt yuvj420p" "-f image2"
fi
if [ -n "$do_pcx" ] ; then
@@ -228,9 +227,9 @@ conversions="yuv420p yuv422p yuv444p yuyv422 yuv410p yuv411p yuvj420p \
monob yuv440p yuvj440p"
for pix_fmt in $conversions ; do
file=${outfile}${pix_fmt}.yuv
- do_ffmpeg_nocheck $file -r 1 -t 1 -f image2 -vcodec pgmyuv -i $raw_src \
- -f rawvideo -s 352x288 -pix_fmt $pix_fmt $target_path/$raw_dst
- do_ffmpeg $file -f rawvideo -s 352x288 -pix_fmt $pix_fmt -i $target_path/$raw_dst \
- -f rawvideo -s 352x288 -pix_fmt yuv444p
+ do_ffmpeg_nocheck $file $DEC_OPTS -r 1 -t 1 -f image2 -vcodec pgmyuv -i $raw_src \
+ $ENC_OPTS -f rawvideo -s 352x288 -pix_fmt $pix_fmt $target_path/$raw_dst
+ do_ffmpeg $file $DEC_OPTS -f rawvideo -s 352x288 -pix_fmt $pix_fmt -i $target_path/$raw_dst \
+ $ENC_OPTS -f rawvideo -s 352x288 -pix_fmt yuv444p
done
fi
diff --git a/tests/lavfi-regression.sh b/tests/lavfi-regression.sh
index ed13f7050a..129358090e 100755
--- a/tests/lavfi-regression.sh
+++ b/tests/lavfi-regression.sh
@@ -19,8 +19,8 @@ do_video_filter() {
filters=$2
shift 2
printf '%-20s' $label >>$logfile
- run_ffmpeg -f image2 -vcodec pgmyuv -i $raw_src \
- -vf "$filters" -vcodec rawvideo $* -f nut md5: >>$logfile
+ run_ffmpeg $DEC_OPTS -f image2 -vcodec pgmyuv -i $raw_src \
+ $ENC_OPTS -vf "$filters" -vcodec rawvideo $* -f nut md5: >>$logfile
}
do_lavfi() {
@@ -55,7 +55,7 @@ do_lavfi_pixfmts(){
$ffmpeg -pix_fmts list 2>/dev/null | sed -ne '9,$p' | grep '^\..\.' | cut -d' ' -f2 | sort >$exclude_fmts
$showfiltfmts scale | awk -F '[ \r]' '/^OUTPUT/{ print $3 }' | sort | comm -23 - $exclude_fmts >$out_fmts
- pix_fmts=$($showfiltfmts $filter | awk -F '[ \r]' '/^INPUT/{ print $3 }' | sort | comm -12 - $out_fmts)
+ pix_fmts=$($showfiltfmts $filter $filter_args | awk -F '[ \r]' '/^INPUT/{ print $3 }' | sort | comm -12 - $out_fmts)
for pix_fmt in $pix_fmts; do
do_video_filter $pix_fmt "slicify=random,format=$pix_fmt,$filter=$filter_args" -pix_fmt $pix_fmt
done
diff --git a/tests/ref/acodec/adpcm_ima_qt b/tests/ref/acodec/adpcm_ima_qt
index 6e4415660e..cdd60e06b9 100644
--- a/tests/ref/acodec/adpcm_ima_qt
+++ b/tests/ref/acodec/adpcm_ima_qt
@@ -1,4 +1,4 @@
-3c06fd2f7831e3e8735b936e23ca220c *./tests/data/acodec/adpcm_qt.aiff
+057d27978b35888776512e4e9669a63b *./tests/data/acodec/adpcm_qt.aiff
281252 ./tests/data/acodec/adpcm_qt.aiff
-9580492803ba1c1a3746367b24b751c8 *./tests/data/adpcm_ima_qt.acodec.out.wav
-stddev: 914.65 PSNR: 37.10 MAXDIFF:34026 bytes: 1058560/ 1058400
+3890343c0c20934e014d7ac93f5d65bd *./tests/data/adpcm_ima_qt.acodec.out.wav
+stddev: 918.61 PSNR: 37.07 MAXDIFF:34029 bytes: 1058560/ 1058400
diff --git a/tests/ref/fate/h264-conformance-frext-pph10i1_panasonic_a b/tests/ref/fate/h264-conformance-frext-pph10i1_panasonic_a
new file mode 100644
index 0000000000..1cfc313e32
--- /dev/null
+++ b/tests/ref/fate/h264-conformance-frext-pph10i1_panasonic_a
@@ -0,0 +1,10 @@
+0, 0, 2764800, 0xcc4df07d
+0, 3600, 2764800, 0x85f9e6d4
+0, 7200, 2764800, 0x23ffe90d
+0, 10800, 2764800, 0xf0a6d453
+0, 14400, 2764800, 0x913a6392
+0, 18000, 2764800, 0xcc5f9736
+0, 21600, 2764800, 0x43f9f9ce
+0, 25200, 2764800, 0xc874b44e
+0, 28800, 2764800, 0x83b665e6
+0, 32400, 2764800, 0x5ea2e31e
diff --git a/tests/ref/fate/h264-conformance-frext-pph10i2_panasonic_a b/tests/ref/fate/h264-conformance-frext-pph10i2_panasonic_a
new file mode 100644
index 0000000000..274bdaf2b1
--- /dev/null
+++ b/tests/ref/fate/h264-conformance-frext-pph10i2_panasonic_a
@@ -0,0 +1,10 @@
+0, 0, 2764800, 0x4f710132
+0, 3600, 2764800, 0x57e5b713
+0, 7200, 2764800, 0xcca01477
+0, 10800, 2764800, 0xa19a95cd
+0, 14400, 2764800, 0x700a757d
+0, 18000, 2764800, 0xd8c6f60f
+0, 21600, 2764800, 0x95a1bbc7
+0, 25200, 2764800, 0x0582077a
+0, 28800, 2764800, 0x91595f91
+0, 32400, 2764800, 0xf5fe034a
diff --git a/tests/ref/fate/h264-conformance-frext-pph10i3_panasonic_a b/tests/ref/fate/h264-conformance-frext-pph10i3_panasonic_a
new file mode 100644
index 0000000000..195e45a67b
--- /dev/null
+++ b/tests/ref/fate/h264-conformance-frext-pph10i3_panasonic_a
@@ -0,0 +1,10 @@
+0, 0, 2764800, 0xda69f69e
+0, 3600, 2764800, 0x29ed832f
+0, 7200, 2764800, 0xb3244cc4
+0, 10800, 2764800, 0xe41a312c
+0, 14400, 2764800, 0xac0b344b
+0, 18000, 2764800, 0xc585aa20
+0, 21600, 2764800, 0x0952054c
+0, 25200, 2764800, 0xd1a02f87
+0, 28800, 2764800, 0xfcbfe87c
+0, 32400, 2764800, 0xe4e9b8a2
diff --git a/tests/ref/fate/h264-conformance-frext-pph10i4_panasonic_a b/tests/ref/fate/h264-conformance-frext-pph10i4_panasonic_a
new file mode 100644
index 0000000000..d351a7eb1f
--- /dev/null
+++ b/tests/ref/fate/h264-conformance-frext-pph10i4_panasonic_a
@@ -0,0 +1,19 @@
+0, 0, 6220800, 0xca2a2a5e
+0, 3600, 6220800, 0x8009a65e
+0, 7200, 6220800, 0x63e72b3b
+0, 10800, 6220800, 0x7459a1cc
+0, 14400, 6220800, 0x02191aa9
+0, 18000, 6220800, 0x88dca590
+0, 21600, 6220800, 0x56dd150a
+0, 25200, 6220800, 0x5f56a56f
+0, 28800, 6220800, 0x67ada4b7
+0, 32400, 6220800, 0x88dca590
+0, 36000, 6220800, 0xd3b09fe5
+0, 39600, 6220800, 0x2223998c
+0, 43200, 6220800, 0x5e5b2da5
+0, 46800, 6220800, 0x88dca590
+0, 50400, 6220800, 0x5e5b2da5
+0, 54000, 6220800, 0x88dca590
+0, 57600, 6220800, 0x5e5b2da5
+0, 61200, 6220800, 0x88dca590
+0, 64800, 6220800, 0x26e1ec8b
diff --git a/tests/ref/fate/h264-conformance-frext-pph10i5_panasonic_a b/tests/ref/fate/h264-conformance-frext-pph10i5_panasonic_a
new file mode 100644
index 0000000000..1afbac01e7
--- /dev/null
+++ b/tests/ref/fate/h264-conformance-frext-pph10i5_panasonic_a
@@ -0,0 +1,10 @@
+0, 0, 6220800, 0x1df58ce9
+0, 3600, 6220800, 0x8f2859ce
+0, 7200, 6220800, 0x229cc7ff
+0, 10800, 6220800, 0x73e86984
+0, 14400, 6220800, 0xb6d4504b
+0, 18000, 6220800, 0x4e7d4883
+0, 21600, 6220800, 0xbec3f0f7
+0, 25200, 6220800, 0x1d9af065
+0, 28800, 6220800, 0x44851549
+0, 32400, 6220800, 0xfcf8728e
diff --git a/tests/ref/fate/h264-conformance-frext-pph10i6_panasonic_a b/tests/ref/fate/h264-conformance-frext-pph10i6_panasonic_a
new file mode 100644
index 0000000000..6d105466c9
--- /dev/null
+++ b/tests/ref/fate/h264-conformance-frext-pph10i6_panasonic_a
@@ -0,0 +1,10 @@
+0, 0, 6220800, 0x408daf70
+0, 3600, 6220800, 0x59b254a3
+0, 7200, 6220800, 0x4cf4279c
+0, 10800, 6220800, 0x5c9437ae
+0, 14400, 6220800, 0x986c3eb8
+0, 18000, 6220800, 0x23fd883e
+0, 21600, 6220800, 0x84f222fe
+0, 25200, 6220800, 0xe7f91107
+0, 28800, 6220800, 0xb544b31e
+0, 32400, 6220800, 0x1ebdde56
diff --git a/tests/ref/fate/h264-conformance-frext-pph10i7_panasonic_a b/tests/ref/fate/h264-conformance-frext-pph10i7_panasonic_a
new file mode 100644
index 0000000000..28825446f9
--- /dev/null
+++ b/tests/ref/fate/h264-conformance-frext-pph10i7_panasonic_a
@@ -0,0 +1,10 @@
+0, 0, 6220800, 0xf81873fe
+0, 3600, 6220800, 0x7b96fbdc
+0, 7200, 6220800, 0x75dbafc4
+0, 10800, 6220800, 0x7524301e
+0, 14400, 6220800, 0x0f3621ab
+0, 18000, 6220800, 0xa5e25b35
+0, 21600, 6220800, 0x063a8116
+0, 25200, 6220800, 0x48ebc8ff
+0, 28800, 6220800, 0x1f635df8
+0, 32400, 6220800, 0xe282c8bd
diff --git a/tests/ref/fate/qt-ima4-mono b/tests/ref/fate/qt-ima4-mono
index 66767d5d30..b8fc5f99de 100644
--- a/tests/ref/fate/qt-ima4-mono
+++ b/tests/ref/fate/qt-ima4-mono
@@ -1 +1 @@
-721b51fd66c3bb3dc49dd88d404188eb
+e178ed520edf2f46492ae740d88f5815
diff --git a/tests/ref/fate/qt-ima4-stereo b/tests/ref/fate/qt-ima4-stereo
index 5e6b1237d5..84c9f46d02 100644
--- a/tests/ref/fate/qt-ima4-stereo
+++ b/tests/ref/fate/qt-ima4-stereo
@@ -1 +1 @@
-c9e4c21fb62eca34a533f3a9ad2e394a
+d22be0e193dcbba1068a1ca6ab04cf77
diff --git a/tests/ref/fate/v210 b/tests/ref/fate/v210
index 8cbf7df787..6a551a56b5 100644
--- a/tests/ref/fate/v210
+++ b/tests/ref/fate/v210
@@ -1 +1 @@
-0, 0, 3686400, 0x8d5c3847
+0, 0, 3686400, 0x75ee1dde
diff --git a/tests/ref/fate/vc1 b/tests/ref/fate/vc1
index 69e9b4ad64..901f81a97f 100644
--- a/tests/ref/fate/vc1
+++ b/tests/ref/fate/vc1
@@ -1,15 +1,15 @@
-0, 0, 38016, 0xf4715db5
-0, 3600, 38016, 0xf4715db5
-0, 7200, 38016, 0xf4715db5
-0, 10800, 38016, 0xf46af0e1
-0, 14400, 38016, 0x9c1c2cf1
-0, 18000, 38016, 0xff12d87f
-0, 21600, 38016, 0x7408432b
-0, 25200, 38016, 0x7408432b
-0, 28800, 38016, 0x8d11479a
-0, 32400, 38016, 0x8d11479a
-0, 36000, 38016, 0xc4a121ab
-0, 39600, 38016, 0xc4a121ab
-0, 43200, 38016, 0xc4a121ab
-0, 46800, 38016, 0xc4a121ab
-0, 50400, 38016, 0xc4a121ab
+0, 0, 38016, 0xa6f15db5
+0, 3600, 38016, 0xa6f15db5
+0, 7200, 38016, 0xa6f15db5
+0, 10800, 38016, 0x5c4ef0e7
+0, 14400, 38016, 0x53a42d1d
+0, 18000, 38016, 0x68f7d89e
+0, 21600, 38016, 0xc15f4368
+0, 25200, 38016, 0xc15f4368
+0, 28800, 38016, 0xd1bd47a8
+0, 32400, 38016, 0xd1bd47a8
+0, 36000, 38016, 0xe1e821ca
+0, 39600, 38016, 0xe1e821ca
+0, 43200, 38016, 0xe1e821ca
+0, 46800, 38016, 0xe1e821ca
+0, 50400, 38016, 0xe1e821ca
diff --git a/tests/ref/lavfi/pixdesc_be b/tests/ref/lavfi/pixdesc_be
index c5e849d28d..afea41d1a1 100644
--- a/tests/ref/lavfi/pixdesc_be
+++ b/tests/ref/lavfi/pixdesc_be
@@ -27,9 +27,14 @@ uyvy422 adcf64516a19fce44df77082bdb16291
yuv410p 2d9225153c83ee1132397d619d94d1b3
yuv411p 8b298af3e43348ca1b11eb8a3252ac6c
yuv420p eba2f135a08829387e2f698ff72a2939
+yuv420p10be 7605e266c088d0fcf68c7b27c3ceff5f
+yuv420p10le 4228ee628c6deec123a13b9784516cc7
yuv420p16be 16c009a235cd52b74791a895423152a3
yuv420p16le 2d59c4f1d0314a5a957a7cfc4b6fabcc
+yuv420p9be ce880fa07830e5297c22acf6e20555ce
+yuv420p9le 16543fda8f87d94a6cf857d2e8d4461a
yuv422p c9bba4529821d796a6ab09f6a5fd355a
+yuv422p10be bdc13b630fd668b34c6fe1aae28dfc71
yuv422p16be 5499502e1c29534a158a1fe60e889f60
yuv422p16le e3d61fde6978591596bc36b914386623
yuv440p 5a064afe2b453bb52cdb3f176b1aa1cf
diff --git a/tests/ref/lavfi/pixdesc_le b/tests/ref/lavfi/pixdesc_le
index 30adb2dd1d..a96635d508 100644
--- a/tests/ref/lavfi/pixdesc_le
+++ b/tests/ref/lavfi/pixdesc_le
@@ -27,9 +27,14 @@ uyvy422 adcf64516a19fce44df77082bdb16291
yuv410p 2d9225153c83ee1132397d619d94d1b3
yuv411p 8b298af3e43348ca1b11eb8a3252ac6c
yuv420p eba2f135a08829387e2f698ff72a2939
+yuv420p10be 7605e266c088d0fcf68c7b27c3ceff5f
+yuv420p10le 4228ee628c6deec123a13b9784516cc7
yuv420p16be 16c009a235cd52b74791a895423152a3
yuv420p16le 2d59c4f1d0314a5a957a7cfc4b6fabcc
+yuv420p9be ce880fa07830e5297c22acf6e20555ce
+yuv420p9le 16543fda8f87d94a6cf857d2e8d4461a
yuv422p c9bba4529821d796a6ab09f6a5fd355a
+yuv422p10le d0607c260a45c973e6639f4e449730ad
yuv422p16be 5499502e1c29534a158a1fe60e889f60
yuv422p16le e3d61fde6978591596bc36b914386623
yuv440p 5a064afe2b453bb52cdb3f176b1aa1cf
diff --git a/tests/ref/lavfi/pixfmts_copy_le b/tests/ref/lavfi/pixfmts_copy_le
index 30adb2dd1d..a96635d508 100644
--- a/tests/ref/lavfi/pixfmts_copy_le
+++ b/tests/ref/lavfi/pixfmts_copy_le
@@ -27,9 +27,14 @@ uyvy422 adcf64516a19fce44df77082bdb16291
yuv410p 2d9225153c83ee1132397d619d94d1b3
yuv411p 8b298af3e43348ca1b11eb8a3252ac6c
yuv420p eba2f135a08829387e2f698ff72a2939
+yuv420p10be 7605e266c088d0fcf68c7b27c3ceff5f
+yuv420p10le 4228ee628c6deec123a13b9784516cc7
yuv420p16be 16c009a235cd52b74791a895423152a3
yuv420p16le 2d59c4f1d0314a5a957a7cfc4b6fabcc
+yuv420p9be ce880fa07830e5297c22acf6e20555ce
+yuv420p9le 16543fda8f87d94a6cf857d2e8d4461a
yuv422p c9bba4529821d796a6ab09f6a5fd355a
+yuv422p10le d0607c260a45c973e6639f4e449730ad
yuv422p16be 5499502e1c29534a158a1fe60e889f60
yuv422p16le e3d61fde6978591596bc36b914386623
yuv440p 5a064afe2b453bb52cdb3f176b1aa1cf
diff --git a/tests/ref/lavfi/pixfmts_null_le b/tests/ref/lavfi/pixfmts_null_le
index 30adb2dd1d..a96635d508 100644
--- a/tests/ref/lavfi/pixfmts_null_le
+++ b/tests/ref/lavfi/pixfmts_null_le
@@ -27,9 +27,14 @@ uyvy422 adcf64516a19fce44df77082bdb16291
yuv410p 2d9225153c83ee1132397d619d94d1b3
yuv411p 8b298af3e43348ca1b11eb8a3252ac6c
yuv420p eba2f135a08829387e2f698ff72a2939
+yuv420p10be 7605e266c088d0fcf68c7b27c3ceff5f
+yuv420p10le 4228ee628c6deec123a13b9784516cc7
yuv420p16be 16c009a235cd52b74791a895423152a3
yuv420p16le 2d59c4f1d0314a5a957a7cfc4b6fabcc
+yuv420p9be ce880fa07830e5297c22acf6e20555ce
+yuv420p9le 16543fda8f87d94a6cf857d2e8d4461a
yuv422p c9bba4529821d796a6ab09f6a5fd355a
+yuv422p10le d0607c260a45c973e6639f4e449730ad
yuv422p16be 5499502e1c29534a158a1fe60e889f60
yuv422p16le e3d61fde6978591596bc36b914386623
yuv440p 5a064afe2b453bb52cdb3f176b1aa1cf
diff --git a/tests/ref/lavfi/pixfmts_scale_le b/tests/ref/lavfi/pixfmts_scale_le
index fc8a07f4af..5c32a363c8 100644
--- a/tests/ref/lavfi/pixfmts_scale_le
+++ b/tests/ref/lavfi/pixfmts_scale_le
@@ -27,9 +27,14 @@ uyvy422 314bd486277111a95d9369b944fa0400
yuv410p 7df8f6d69b56a8dcb6c7ee908e5018b5
yuv411p 1143e7c5cc28fe0922b051b17733bc4c
yuv420p fdad2d8df8985e3d17e73c71f713cb14
+yuv420p10be 6d335e75b553da590135cf8bb999610c
+yuv420p10le d510ddbabefd03ef39ec943fcb51b709
yuv420p16be 29a0265764530070f5cd3251cc01f66a
yuv420p16le 6f3a265b084a78baec229238d9f7945f
+yuv420p9be ec4983b7a949c0472110a7a2c58e278a
+yuv420p9le c136dce5913a722eee44ab72cff664b2
yuv422p 918e37701ee7377d16a8a6c119c56a40
+yuv422p10le aeb0ef08a883f43429ca9d886d8fc095
yuv422p16be ef3e865fc1d0c68977c735323c50af6e
yuv422p16le 428a9b96214c09cb5a983ce36d6961ff
yuv440p 461503fdb9b90451020aa3b25ddf041c
diff --git a/tests/ref/lavfi/pixfmts_vflip_le b/tests/ref/lavfi/pixfmts_vflip_le
index dc2e7402f2..3029d2d550 100644
--- a/tests/ref/lavfi/pixfmts_vflip_le
+++ b/tests/ref/lavfi/pixfmts_vflip_le
@@ -27,9 +27,14 @@ uyvy422 ffbd36720c77398d9a0d03ce2625928f
yuv410p 7bfb39d7afb49d6a6173e6b23ae321eb
yuv411p 4a90048cc3a65fac150e53289700efe1
yuv420p 2e6d6062e8cad37fb3ab2c433b55f382
+yuv420p10be df97d20b3b4a10c174d4360552c4160d
+yuv420p10le 4b5249208602b941332945c926f80ae9
yuv420p16be 539076782902664a8acf381bf4f713e8
yuv420p16le 0f609e588e5a258644ef85170d70e030
+yuv420p9be be40ec975fb2873891643cbbbddbc3b0
+yuv420p9le 7e606310d3f5ff12badf911e8f333471
yuv422p d7f5cb44d9b0210d66d6a8762640ab34
+yuv422p10le 11b57f2bd9661024153f3973b9090cdb
yuv422p16be 9bd8f8c961822b586fa4cf992be54acc
yuv422p16le 9c4a1239605c7952b736ac3130163f14
yuv440p 876385e96165acf51271b20e5d85a416
diff --git a/tests/ref/seek/dv411_dv b/tests/ref/seek/dv411_dv
index baaeefb2cf..d318794157 100644
--- a/tests/ref/seek/dv411_dv
+++ b/tests/ref/seek/dv411_dv
@@ -1,53 +1,53 @@
-ret: 0 st: 0 flags:1 dts: 0.000000 pts: 0.000000 pos: -1 size:144000
+ret: 0 st: 0 flags:1 dts: 0.000000 pts: 0.000000 pos: 0 size:144000
ret: 0 st:-1 flags:0 ts:-1.000000
-ret: 0 st: 0 flags:1 dts: 0.000000 pts: 0.000000 pos: -1 size:144000
+ret: 0 st: 0 flags:1 dts: 0.000000 pts: 0.000000 pos: 0 size:144000
ret: 0 st:-1 flags:1 ts: 1.894167
-ret: 0 st: 0 flags:1 dts: 1.880000 pts: 1.880000 pos: -1 size:144000
+ret: 0 st: 0 flags:1 dts: 1.880000 pts: 1.880000 pos:6768000 size:144000
ret: 0 st: 0 flags:0 ts: 0.800000
-ret: 0 st: 0 flags:1 dts: 0.800000 pts: 0.800000 pos: -1 size:144000
+ret: 0 st: 0 flags:1 dts: 0.800000 pts: 0.800000 pos:2880000 size:144000
ret: 0 st: 0 flags:1 ts:-0.320000
-ret: 0 st: 0 flags:1 dts: 0.000000 pts: 0.000000 pos: -1 size:144000
+ret: 0 st: 0 flags:1 dts: 0.000000 pts: 0.000000 pos: 0 size:144000
ret: 0 st:-1 flags:0 ts: 2.576668
-ret: 0 st: 0 flags:1 dts: 1.960000 pts: 1.960000 pos: -1 size:144000
+ret: 0 st: 0 flags:1 dts: 1.960000 pts: 1.960000 pos:7056000 size:144000
ret: 0 st:-1 flags:1 ts: 1.470835
-ret: 0 st: 0 flags:1 dts: 1.480000 pts: 1.480000 pos: -1 size:144000
+ret: 0 st: 0 flags:1 dts: 1.480000 pts: 1.480000 pos:5328000 size:144000
ret: 0 st: 0 flags:0 ts: 0.360000
-ret: 0 st: 0 flags:1 dts: 0.360000 pts: 0.360000 pos: -1 size:144000
+ret: 0 st: 0 flags:1 dts: 0.360000 pts: 0.360000 pos:1296000 size:144000
ret: 0 st: 0 flags:1 ts:-0.760000
-ret: 0 st: 0 flags:1 dts: 0.000000 pts: 0.000000 pos: -1 size:144000
+ret: 0 st: 0 flags:1 dts: 0.000000 pts: 0.000000 pos: 0 size:144000
ret: 0 st:-1 flags:0 ts: 2.153336
-ret: 0 st: 0 flags:1 dts: 1.960000 pts: 1.960000 pos: -1 size:144000
+ret: 0 st: 0 flags:1 dts: 1.960000 pts: 1.960000 pos:7056000 size:144000
ret: 0 st:-1 flags:1 ts: 1.047503
-ret: 0 st: 0 flags:1 dts: 1.040000 pts: 1.040000 pos: -1 size:144000
+ret: 0 st: 0 flags:1 dts: 1.040000 pts: 1.040000 pos:3744000 size:144000
ret: 0 st: 0 flags:0 ts:-0.040000
-ret: 0 st: 0 flags:1 dts: 0.000000 pts: 0.000000 pos: -1 size:144000
+ret: 0 st: 0 flags:1 dts: 0.000000 pts: 0.000000 pos: 0 size:144000
ret: 0 st: 0 flags:1 ts: 2.840000
-ret: 0 st: 0 flags:1 dts: 1.960000 pts: 1.960000 pos: -1 size:144000
+ret: 0 st: 0 flags:1 dts: 1.960000 pts: 1.960000 pos:7056000 size:144000
ret: 0 st:-1 flags:0 ts: 1.730004
-ret: 0 st: 0 flags:1 dts: 1.720000 pts: 1.720000 pos: -1 size:144000
+ret: 0 st: 0 flags:1 dts: 1.720000 pts: 1.720000 pos:6192000 size:144000
ret: 0 st:-1 flags:1 ts: 0.624171
-ret: 0 st: 0 flags:1 dts: 0.640000 pts: 0.640000 pos: -1 size:144000
+ret: 0 st: 0 flags:1 dts: 0.640000 pts: 0.640000 pos:2304000 size:144000
ret: 0 st: 0 flags:0 ts:-0.480000
-ret: 0 st: 0 flags:1 dts: 0.000000 pts: 0.000000 pos: -1 size:144000
+ret: 0 st: 0 flags:1 dts: 0.000000 pts: 0.000000 pos: 0 size:144000
ret: 0 st: 0 flags:1 ts: 2.400000
-ret: 0 st: 0 flags:1 dts: 1.960000 pts: 1.960000 pos: -1 size:144000
+ret: 0 st: 0 flags:1 dts: 1.960000 pts: 1.960000 pos:7056000 size:144000
ret: 0 st:-1 flags:0 ts: 1.306672
-ret: 0 st: 0 flags:1 dts: 1.320000 pts: 1.320000 pos: -1 size:144000
+ret: 0 st: 0 flags:1 dts: 1.320000 pts: 1.320000 pos:4752000 size:144000
ret: 0 st:-1 flags:1 ts: 0.200839
-ret: 0 st: 0 flags:1 dts: 0.200000 pts: 0.200000 pos: -1 size:144000
+ret: 0 st: 0 flags:1 dts: 0.200000 pts: 0.200000 pos: 720000 size:144000
ret: 0 st: 0 flags:0 ts:-0.920000
-ret: 0 st: 0 flags:1 dts: 0.000000 pts: 0.000000 pos: -1 size:144000
+ret: 0 st: 0 flags:1 dts: 0.000000 pts: 0.000000 pos: 0 size:144000
ret: 0 st: 0 flags:1 ts: 2.000000
-ret: 0 st: 0 flags:1 dts: 1.960000 pts: 1.960000 pos: -1 size:144000
+ret: 0 st: 0 flags:1 dts: 1.960000 pts: 1.960000 pos:7056000 size:144000
ret: 0 st:-1 flags:0 ts: 0.883340
-ret: 0 st: 0 flags:1 dts: 0.880000 pts: 0.880000 pos: -1 size:144000
+ret: 0 st: 0 flags:1 dts: 0.880000 pts: 0.880000 pos:3168000 size:144000
ret: 0 st:-1 flags:1 ts:-0.222493
-ret: 0 st: 0 flags:1 dts: 0.000000 pts: 0.000000 pos: -1 size:144000
+ret: 0 st: 0 flags:1 dts: 0.000000 pts: 0.000000 pos: 0 size:144000
ret: 0 st: 0 flags:0 ts: 2.680000
-ret: 0 st: 0 flags:1 dts: 1.960000 pts: 1.960000 pos: -1 size:144000
+ret: 0 st: 0 flags:1 dts: 1.960000 pts: 1.960000 pos:7056000 size:144000
ret: 0 st: 0 flags:1 ts: 1.560000
-ret: 0 st: 0 flags:1 dts: 1.560000 pts: 1.560000 pos: -1 size:144000
+ret: 0 st: 0 flags:1 dts: 1.560000 pts: 1.560000 pos:5616000 size:144000
ret: 0 st:-1 flags:0 ts: 0.460008
-ret: 0 st: 0 flags:1 dts: 0.480000 pts: 0.480000 pos: -1 size:144000
+ret: 0 st: 0 flags:1 dts: 0.480000 pts: 0.480000 pos:1728000 size:144000
ret: 0 st:-1 flags:1 ts:-0.645825
-ret: 0 st: 0 flags:1 dts: 0.000000 pts: 0.000000 pos: -1 size:144000
+ret: 0 st: 0 flags:1 dts: 0.000000 pts: 0.000000 pos: 0 size:144000
diff --git a/tests/ref/seek/dv50_dv b/tests/ref/seek/dv50_dv
index 1f07e5b71f..fae6d1b225 100644
--- a/tests/ref/seek/dv50_dv
+++ b/tests/ref/seek/dv50_dv
@@ -1,53 +1,53 @@
-ret: 0 st: 0 flags:1 dts: 0.000000 pts: 0.000000 pos: -1 size:288000
+ret: 0 st: 0 flags:1 dts: 0.000000 pts: 0.000000 pos: 0 size:288000
ret: 0 st:-1 flags:0 ts:-1.000000
-ret: 0 st: 0 flags:1 dts: 0.000000 pts: 0.000000 pos: -1 size:288000
+ret: 0 st: 0 flags:1 dts: 0.000000 pts: 0.000000 pos: 0 size:288000
ret: 0 st:-1 flags:1 ts: 1.894167
-ret: 0 st: 0 flags:1 dts: 1.880000 pts: 1.880000 pos: -1 size:288000
+ret: 0 st: 0 flags:1 dts: 1.880000 pts: 1.880000 pos:13536000 size:288000
ret: 0 st: 0 flags:0 ts: 0.800000
-ret: 0 st: 0 flags:1 dts: 0.800000 pts: 0.800000 pos: -1 size:288000
+ret: 0 st: 0 flags:1 dts: 0.800000 pts: 0.800000 pos:5760000 size:288000
ret: 0 st: 0 flags:1 ts:-0.320000
-ret: 0 st: 0 flags:1 dts: 0.000000 pts: 0.000000 pos: -1 size:288000
+ret: 0 st: 0 flags:1 dts: 0.000000 pts: 0.000000 pos: 0 size:288000
ret: 0 st:-1 flags:0 ts: 2.576668
-ret: 0 st: 0 flags:1 dts: 1.960000 pts: 1.960000 pos: -1 size:288000
+ret: 0 st: 0 flags:1 dts: 1.960000 pts: 1.960000 pos:14112000 size:288000
ret: 0 st:-1 flags:1 ts: 1.470835
-ret: 0 st: 0 flags:1 dts: 1.480000 pts: 1.480000 pos: -1 size:288000
+ret: 0 st: 0 flags:1 dts: 1.480000 pts: 1.480000 pos:10656000 size:288000
ret: 0 st: 0 flags:0 ts: 0.360000
-ret: 0 st: 0 flags:1 dts: 0.360000 pts: 0.360000 pos: -1 size:288000
+ret: 0 st: 0 flags:1 dts: 0.360000 pts: 0.360000 pos:2592000 size:288000
ret: 0 st: 0 flags:1 ts:-0.760000
-ret: 0 st: 0 flags:1 dts: 0.000000 pts: 0.000000 pos: -1 size:288000
+ret: 0 st: 0 flags:1 dts: 0.000000 pts: 0.000000 pos: 0 size:288000
ret: 0 st:-1 flags:0 ts: 2.153336
-ret: 0 st: 0 flags:1 dts: 1.960000 pts: 1.960000 pos: -1 size:288000
+ret: 0 st: 0 flags:1 dts: 1.960000 pts: 1.960000 pos:14112000 size:288000
ret: 0 st:-1 flags:1 ts: 1.047503
-ret: 0 st: 0 flags:1 dts: 1.040000 pts: 1.040000 pos: -1 size:288000
+ret: 0 st: 0 flags:1 dts: 1.040000 pts: 1.040000 pos:7488000 size:288000
ret: 0 st: 0 flags:0 ts:-0.040000
-ret: 0 st: 0 flags:1 dts: 0.000000 pts: 0.000000 pos: -1 size:288000
+ret: 0 st: 0 flags:1 dts: 0.000000 pts: 0.000000 pos: 0 size:288000
ret: 0 st: 0 flags:1 ts: 2.840000
-ret: 0 st: 0 flags:1 dts: 1.960000 pts: 1.960000 pos: -1 size:288000
+ret: 0 st: 0 flags:1 dts: 1.960000 pts: 1.960000 pos:14112000 size:288000
ret: 0 st:-1 flags:0 ts: 1.730004
-ret: 0 st: 0 flags:1 dts: 1.720000 pts: 1.720000 pos: -1 size:288000
+ret: 0 st: 0 flags:1 dts: 1.720000 pts: 1.720000 pos:12384000 size:288000
ret: 0 st:-1 flags:1 ts: 0.624171
-ret: 0 st: 0 flags:1 dts: 0.640000 pts: 0.640000 pos: -1 size:288000
+ret: 0 st: 0 flags:1 dts: 0.640000 pts: 0.640000 pos:4608000 size:288000
ret: 0 st: 0 flags:0 ts:-0.480000
-ret: 0 st: 0 flags:1 dts: 0.000000 pts: 0.000000 pos: -1 size:288000
+ret: 0 st: 0 flags:1 dts: 0.000000 pts: 0.000000 pos: 0 size:288000
ret: 0 st: 0 flags:1 ts: 2.400000
-ret: 0 st: 0 flags:1 dts: 1.960000 pts: 1.960000 pos: -1 size:288000
+ret: 0 st: 0 flags:1 dts: 1.960000 pts: 1.960000 pos:14112000 size:288000
ret: 0 st:-1 flags:0 ts: 1.306672
-ret: 0 st: 0 flags:1 dts: 1.320000 pts: 1.320000 pos: -1 size:288000
+ret: 0 st: 0 flags:1 dts: 1.320000 pts: 1.320000 pos:9504000 size:288000
ret: 0 st:-1 flags:1 ts: 0.200839
-ret: 0 st: 0 flags:1 dts: 0.200000 pts: 0.200000 pos: -1 size:288000
+ret: 0 st: 0 flags:1 dts: 0.200000 pts: 0.200000 pos:1440000 size:288000
ret: 0 st: 0 flags:0 ts:-0.920000
-ret: 0 st: 0 flags:1 dts: 0.000000 pts: 0.000000 pos: -1 size:288000
+ret: 0 st: 0 flags:1 dts: 0.000000 pts: 0.000000 pos: 0 size:288000
ret: 0 st: 0 flags:1 ts: 2.000000
-ret: 0 st: 0 flags:1 dts: 1.960000 pts: 1.960000 pos: -1 size:288000
+ret: 0 st: 0 flags:1 dts: 1.960000 pts: 1.960000 pos:14112000 size:288000
ret: 0 st:-1 flags:0 ts: 0.883340
-ret: 0 st: 0 flags:1 dts: 0.880000 pts: 0.880000 pos: -1 size:288000
+ret: 0 st: 0 flags:1 dts: 0.880000 pts: 0.880000 pos:6336000 size:288000
ret: 0 st:-1 flags:1 ts:-0.222493
-ret: 0 st: 0 flags:1 dts: 0.000000 pts: 0.000000 pos: -1 size:288000
+ret: 0 st: 0 flags:1 dts: 0.000000 pts: 0.000000 pos: 0 size:288000
ret: 0 st: 0 flags:0 ts: 2.680000
-ret: 0 st: 0 flags:1 dts: 1.960000 pts: 1.960000 pos: -1 size:288000
+ret: 0 st: 0 flags:1 dts: 1.960000 pts: 1.960000 pos:14112000 size:288000
ret: 0 st: 0 flags:1 ts: 1.560000
-ret: 0 st: 0 flags:1 dts: 1.560000 pts: 1.560000 pos: -1 size:288000
+ret: 0 st: 0 flags:1 dts: 1.560000 pts: 1.560000 pos:11232000 size:288000
ret: 0 st:-1 flags:0 ts: 0.460008
-ret: 0 st: 0 flags:1 dts: 0.480000 pts: 0.480000 pos: -1 size:288000
+ret: 0 st: 0 flags:1 dts: 0.480000 pts: 0.480000 pos:3456000 size:288000
ret: 0 st:-1 flags:1 ts:-0.645825
-ret: 0 st: 0 flags:1 dts: 0.000000 pts: 0.000000 pos: -1 size:288000
+ret: 0 st: 0 flags:1 dts: 0.000000 pts: 0.000000 pos: 0 size:288000
diff --git a/tests/ref/seek/dv_dv b/tests/ref/seek/dv_dv
index baaeefb2cf..d318794157 100644
--- a/tests/ref/seek/dv_dv
+++ b/tests/ref/seek/dv_dv
@@ -1,53 +1,53 @@
-ret: 0 st: 0 flags:1 dts: 0.000000 pts: 0.000000 pos: -1 size:144000
+ret: 0 st: 0 flags:1 dts: 0.000000 pts: 0.000000 pos: 0 size:144000
ret: 0 st:-1 flags:0 ts:-1.000000
-ret: 0 st: 0 flags:1 dts: 0.000000 pts: 0.000000 pos: -1 size:144000
+ret: 0 st: 0 flags:1 dts: 0.000000 pts: 0.000000 pos: 0 size:144000
ret: 0 st:-1 flags:1 ts: 1.894167
-ret: 0 st: 0 flags:1 dts: 1.880000 pts: 1.880000 pos: -1 size:144000
+ret: 0 st: 0 flags:1 dts: 1.880000 pts: 1.880000 pos:6768000 size:144000
ret: 0 st: 0 flags:0 ts: 0.800000
-ret: 0 st: 0 flags:1 dts: 0.800000 pts: 0.800000 pos: -1 size:144000
+ret: 0 st: 0 flags:1 dts: 0.800000 pts: 0.800000 pos:2880000 size:144000
ret: 0 st: 0 flags:1 ts:-0.320000
-ret: 0 st: 0 flags:1 dts: 0.000000 pts: 0.000000 pos: -1 size:144000
+ret: 0 st: 0 flags:1 dts: 0.000000 pts: 0.000000 pos: 0 size:144000
ret: 0 st:-1 flags:0 ts: 2.576668
-ret: 0 st: 0 flags:1 dts: 1.960000 pts: 1.960000 pos: -1 size:144000
+ret: 0 st: 0 flags:1 dts: 1.960000 pts: 1.960000 pos:7056000 size:144000
ret: 0 st:-1 flags:1 ts: 1.470835
-ret: 0 st: 0 flags:1 dts: 1.480000 pts: 1.480000 pos: -1 size:144000
+ret: 0 st: 0 flags:1 dts: 1.480000 pts: 1.480000 pos:5328000 size:144000
ret: 0 st: 0 flags:0 ts: 0.360000
-ret: 0 st: 0 flags:1 dts: 0.360000 pts: 0.360000 pos: -1 size:144000
+ret: 0 st: 0 flags:1 dts: 0.360000 pts: 0.360000 pos:1296000 size:144000
ret: 0 st: 0 flags:1 ts:-0.760000
-ret: 0 st: 0 flags:1 dts: 0.000000 pts: 0.000000 pos: -1 size:144000
+ret: 0 st: 0 flags:1 dts: 0.000000 pts: 0.000000 pos: 0 size:144000
ret: 0 st:-1 flags:0 ts: 2.153336
-ret: 0 st: 0 flags:1 dts: 1.960000 pts: 1.960000 pos: -1 size:144000
+ret: 0 st: 0 flags:1 dts: 1.960000 pts: 1.960000 pos:7056000 size:144000
ret: 0 st:-1 flags:1 ts: 1.047503
-ret: 0 st: 0 flags:1 dts: 1.040000 pts: 1.040000 pos: -1 size:144000
+ret: 0 st: 0 flags:1 dts: 1.040000 pts: 1.040000 pos:3744000 size:144000
ret: 0 st: 0 flags:0 ts:-0.040000
-ret: 0 st: 0 flags:1 dts: 0.000000 pts: 0.000000 pos: -1 size:144000
+ret: 0 st: 0 flags:1 dts: 0.000000 pts: 0.000000 pos: 0 size:144000
ret: 0 st: 0 flags:1 ts: 2.840000
-ret: 0 st: 0 flags:1 dts: 1.960000 pts: 1.960000 pos: -1 size:144000
+ret: 0 st: 0 flags:1 dts: 1.960000 pts: 1.960000 pos:7056000 size:144000
ret: 0 st:-1 flags:0 ts: 1.730004
-ret: 0 st: 0 flags:1 dts: 1.720000 pts: 1.720000 pos: -1 size:144000
+ret: 0 st: 0 flags:1 dts: 1.720000 pts: 1.720000 pos:6192000 size:144000
ret: 0 st:-1 flags:1 ts: 0.624171
-ret: 0 st: 0 flags:1 dts: 0.640000 pts: 0.640000 pos: -1 size:144000
+ret: 0 st: 0 flags:1 dts: 0.640000 pts: 0.640000 pos:2304000 size:144000
ret: 0 st: 0 flags:0 ts:-0.480000
-ret: 0 st: 0 flags:1 dts: 0.000000 pts: 0.000000 pos: -1 size:144000
+ret: 0 st: 0 flags:1 dts: 0.000000 pts: 0.000000 pos: 0 size:144000
ret: 0 st: 0 flags:1 ts: 2.400000
-ret: 0 st: 0 flags:1 dts: 1.960000 pts: 1.960000 pos: -1 size:144000
+ret: 0 st: 0 flags:1 dts: 1.960000 pts: 1.960000 pos:7056000 size:144000
ret: 0 st:-1 flags:0 ts: 1.306672
-ret: 0 st: 0 flags:1 dts: 1.320000 pts: 1.320000 pos: -1 size:144000
+ret: 0 st: 0 flags:1 dts: 1.320000 pts: 1.320000 pos:4752000 size:144000
ret: 0 st:-1 flags:1 ts: 0.200839
-ret: 0 st: 0 flags:1 dts: 0.200000 pts: 0.200000 pos: -1 size:144000
+ret: 0 st: 0 flags:1 dts: 0.200000 pts: 0.200000 pos: 720000 size:144000
ret: 0 st: 0 flags:0 ts:-0.920000
-ret: 0 st: 0 flags:1 dts: 0.000000 pts: 0.000000 pos: -1 size:144000
+ret: 0 st: 0 flags:1 dts: 0.000000 pts: 0.000000 pos: 0 size:144000
ret: 0 st: 0 flags:1 ts: 2.000000
-ret: 0 st: 0 flags:1 dts: 1.960000 pts: 1.960000 pos: -1 size:144000
+ret: 0 st: 0 flags:1 dts: 1.960000 pts: 1.960000 pos:7056000 size:144000
ret: 0 st:-1 flags:0 ts: 0.883340
-ret: 0 st: 0 flags:1 dts: 0.880000 pts: 0.880000 pos: -1 size:144000
+ret: 0 st: 0 flags:1 dts: 0.880000 pts: 0.880000 pos:3168000 size:144000
ret: 0 st:-1 flags:1 ts:-0.222493
-ret: 0 st: 0 flags:1 dts: 0.000000 pts: 0.000000 pos: -1 size:144000
+ret: 0 st: 0 flags:1 dts: 0.000000 pts: 0.000000 pos: 0 size:144000
ret: 0 st: 0 flags:0 ts: 2.680000
-ret: 0 st: 0 flags:1 dts: 1.960000 pts: 1.960000 pos: -1 size:144000
+ret: 0 st: 0 flags:1 dts: 1.960000 pts: 1.960000 pos:7056000 size:144000
ret: 0 st: 0 flags:1 ts: 1.560000
-ret: 0 st: 0 flags:1 dts: 1.560000 pts: 1.560000 pos: -1 size:144000
+ret: 0 st: 0 flags:1 dts: 1.560000 pts: 1.560000 pos:5616000 size:144000
ret: 0 st:-1 flags:0 ts: 0.460008
-ret: 0 st: 0 flags:1 dts: 0.480000 pts: 0.480000 pos: -1 size:144000
+ret: 0 st: 0 flags:1 dts: 0.480000 pts: 0.480000 pos:1728000 size:144000
ret: 0 st:-1 flags:1 ts:-0.645825
-ret: 0 st: 0 flags:1 dts: 0.000000 pts: 0.000000 pos: -1 size:144000
+ret: 0 st: 0 flags:1 dts: 0.000000 pts: 0.000000 pos: 0 size:144000
diff --git a/tests/ref/seek/lavf_dv b/tests/ref/seek/lavf_dv
index 3c49749a6b..0000ff5abe 100644
--- a/tests/ref/seek/lavf_dv
+++ b/tests/ref/seek/lavf_dv
@@ -1,53 +1,53 @@
-ret: 0 st: 0 flags:1 dts: 0.000000 pts: 0.000000 pos: -1 size:144000
+ret: 0 st: 0 flags:1 dts: 0.000000 pts: 0.000000 pos: 0 size:144000
ret: 0 st:-1 flags:0 ts:-1.000000
-ret: 0 st: 0 flags:1 dts: 0.000000 pts: 0.000000 pos: -1 size:144000
+ret: 0 st: 0 flags:1 dts: 0.000000 pts: 0.000000 pos: 0 size:144000
ret: 0 st:-1 flags:1 ts: 1.894167
-ret: 0 st: 0 flags:1 dts: 0.960000 pts: 0.960000 pos: -1 size:144000
+ret: 0 st: 0 flags:1 dts: 0.960000 pts: 0.960000 pos:3456000 size:144000
ret: 0 st: 0 flags:0 ts: 0.800000
-ret: 0 st: 0 flags:1 dts: 0.800000 pts: 0.800000 pos: -1 size:144000
+ret: 0 st: 0 flags:1 dts: 0.800000 pts: 0.800000 pos:2880000 size:144000
ret: 0 st: 0 flags:1 ts:-0.320000
-ret: 0 st: 0 flags:1 dts: 0.000000 pts: 0.000000 pos: -1 size:144000
+ret: 0 st: 0 flags:1 dts: 0.000000 pts: 0.000000 pos: 0 size:144000
ret: 0 st: 1 flags:0 ts: 2.576667
-ret: 0 st: 0 flags:1 dts: 0.960000 pts: 0.960000 pos: -1 size:144000
+ret: 0 st: 0 flags:1 dts: 0.960000 pts: 0.960000 pos:3456000 size:144000
ret: 0 st: 1 flags:1 ts: 1.470833
-ret: 0 st: 0 flags:1 dts: 0.960000 pts: 0.960000 pos: -1 size:144000
+ret: 0 st: 0 flags:1 dts: 0.960000 pts: 0.960000 pos:3456000 size:144000
ret: 0 st:-1 flags:0 ts: 0.365002
-ret: 0 st: 0 flags:1 dts: 0.360000 pts: 0.360000 pos: -1 size:144000
+ret: 0 st: 0 flags:1 dts: 0.360000 pts: 0.360000 pos:1296000 size:144000
ret: 0 st:-1 flags:1 ts:-0.740831
-ret: 0 st: 0 flags:1 dts: 0.000000 pts: 0.000000 pos: -1 size:144000
+ret: 0 st: 0 flags:1 dts: 0.000000 pts: 0.000000 pos: 0 size:144000
ret: 0 st: 0 flags:0 ts: 2.160000
-ret: 0 st: 0 flags:1 dts: 0.960000 pts: 0.960000 pos: -1 size:144000
+ret: 0 st: 0 flags:1 dts: 0.960000 pts: 0.960000 pos:3456000 size:144000
ret: 0 st: 0 flags:1 ts: 1.040000
-ret: 0 st: 0 flags:1 dts: 0.960000 pts: 0.960000 pos: -1 size:144000
+ret: 0 st: 0 flags:1 dts: 0.960000 pts: 0.960000 pos:3456000 size:144000
ret: 0 st: 1 flags:0 ts:-0.058333
-ret: 0 st: 0 flags:1 dts: 0.000000 pts: 0.000000 pos: -1 size:144000
+ret: 0 st: 0 flags:1 dts: 0.000000 pts: 0.000000 pos: 0 size:144000
ret: 0 st: 1 flags:1 ts: 2.835833
-ret: 0 st: 0 flags:1 dts: 0.960000 pts: 0.960000 pos: -1 size:144000
+ret: 0 st: 0 flags:1 dts: 0.960000 pts: 0.960000 pos:3456000 size:144000
ret: 0 st:-1 flags:0 ts: 1.730004
-ret: 0 st: 0 flags:1 dts: 0.960000 pts: 0.960000 pos: -1 size:144000
+ret: 0 st: 0 flags:1 dts: 0.960000 pts: 0.960000 pos:3456000 size:144000
ret: 0 st:-1 flags:1 ts: 0.624171
-ret: 0 st: 0 flags:1 dts: 0.640000 pts: 0.640000 pos: -1 size:144000
+ret: 0 st: 0 flags:1 dts: 0.640000 pts: 0.640000 pos:2304000 size:144000
ret: 0 st: 0 flags:0 ts:-0.480000
-ret: 0 st: 0 flags:1 dts: 0.000000 pts: 0.000000 pos: -1 size:144000
+ret: 0 st: 0 flags:1 dts: 0.000000 pts: 0.000000 pos: 0 size:144000
ret: 0 st: 0 flags:1 ts: 2.400000
-ret: 0 st: 0 flags:1 dts: 0.960000 pts: 0.960000 pos: -1 size:144000
+ret: 0 st: 0 flags:1 dts: 0.960000 pts: 0.960000 pos:3456000 size:144000
ret: 0 st: 1 flags:0 ts: 1.306667
-ret: 0 st: 0 flags:1 dts: 0.960000 pts: 0.960000 pos: -1 size:144000
+ret: 0 st: 0 flags:1 dts: 0.960000 pts: 0.960000 pos:3456000 size:144000
ret: 0 st: 1 flags:1 ts: 0.200833
-ret: 0 st: 0 flags:1 dts: 0.960000 pts: 0.960000 pos: -1 size:144000
+ret: 0 st: 0 flags:1 dts: 0.960000 pts: 0.960000 pos:3456000 size:144000
ret: 0 st:-1 flags:0 ts:-0.904994
-ret: 0 st: 0 flags:1 dts: 0.000000 pts: 0.000000 pos: -1 size:144000
+ret: 0 st: 0 flags:1 dts: 0.000000 pts: 0.000000 pos: 0 size:144000
ret: 0 st:-1 flags:1 ts: 1.989173
-ret: 0 st: 0 flags:1 dts: 0.960000 pts: 0.960000 pos: -1 size:144000
+ret: 0 st: 0 flags:1 dts: 0.960000 pts: 0.960000 pos:3456000 size:144000
ret: 0 st: 0 flags:0 ts: 0.880000
-ret: 0 st: 0 flags:1 dts: 0.880000 pts: 0.880000 pos: -1 size:144000
+ret: 0 st: 0 flags:1 dts: 0.880000 pts: 0.880000 pos:3168000 size:144000
ret: 0 st: 0 flags:1 ts:-0.240000
-ret: 0 st: 0 flags:1 dts: 0.000000 pts: 0.000000 pos: -1 size:144000
+ret: 0 st: 0 flags:1 dts: 0.000000 pts: 0.000000 pos: 0 size:144000
ret: 0 st: 1 flags:0 ts: 2.671667
-ret: 0 st: 0 flags:1 dts: 0.960000 pts: 0.960000 pos: -1 size:144000
+ret: 0 st: 0 flags:1 dts: 0.960000 pts: 0.960000 pos:3456000 size:144000
ret: 0 st: 1 flags:1 ts: 1.565833
-ret: 0 st: 0 flags:1 dts: 0.960000 pts: 0.960000 pos: -1 size:144000
+ret: 0 st: 0 flags:1 dts: 0.960000 pts: 0.960000 pos:3456000 size:144000
ret: 0 st:-1 flags:0 ts: 0.460008
-ret: 0 st: 0 flags:1 dts: 0.480000 pts: 0.480000 pos: -1 size:144000
+ret: 0 st: 0 flags:1 dts: 0.480000 pts: 0.480000 pos:1728000 size:144000
ret: 0 st:-1 flags:1 ts:-0.645825
-ret: 0 st: 0 flags:1 dts: 0.000000 pts: 0.000000 pos: -1 size:144000
+ret: 0 st: 0 flags:1 dts: 0.000000 pts: 0.000000 pos: 0 size:144000
diff --git a/tests/ref/vsynth1/error b/tests/ref/vsynth1/error
index c3543f9d36..4d1e9e5b0c 100644
--- a/tests/ref/vsynth1/error
+++ b/tests/ref/vsynth1/error
@@ -1,4 +1,4 @@
7416dfd319f04044d4575dc9d1b406e1 *./tests/data/vsynth1/error-mpeg4-adv.avi
756836 ./tests/data/vsynth1/error-mpeg4-adv.avi
-ef8bfcd6e0883daba95d0f32486ebe2d *./tests/data/error.vsynth1.out.yuv
-stddev: 18.05 PSNR: 23.00 MAXDIFF: 245 bytes: 7603200/ 7603200
+79e94ba32b37759397362cbcb479d4d3 *./tests/data/error.vsynth1.out.yuv
+stddev: 18.36 PSNR: 22.85 MAXDIFF: 243 bytes: 7603200/ 7603200
diff --git a/tests/ref/vsynth1/msvideo1 b/tests/ref/vsynth1/msvideo1
new file mode 100644
index 0000000000..479234d846
--- /dev/null
+++ b/tests/ref/vsynth1/msvideo1
@@ -0,0 +1,4 @@
+267a152a73cbc5ac4694a6e3b254be34 *./tests/data/vsynth1/msvideo1.avi
+2162264 ./tests/data/vsynth1/msvideo1.avi
+cf15dd12b8347567ae350383bf4ef4bb *./tests/data/msvideo1.vsynth1.out.yuv
+stddev: 11.81 PSNR: 26.68 MAXDIFF: 151 bytes: 7603200/ 7603200
diff --git a/tests/ref/vsynth1/qtrlegray b/tests/ref/vsynth1/qtrlegray
new file mode 100644
index 0000000000..686611394e
--- /dev/null
+++ b/tests/ref/vsynth1/qtrlegray
@@ -0,0 +1,4 @@
+0544350c00f33f21e29b5edd965c3f03 *./tests/data/vsynth1/qtrlegray.mov
+5113428 ./tests/data/vsynth1/qtrlegray.mov
+29def4aed035ed65d3a89f7d382fccbe *./tests/data/qtrlegray.vsynth1.out.yuv
+stddev: 25.95 PSNR: 19.85 MAXDIFF: 122 bytes: 7603200/ 7603200
diff --git a/tests/ref/vsynth2/error b/tests/ref/vsynth2/error
index 4181b2d299..a6bfcd4fe2 100644
--- a/tests/ref/vsynth2/error
+++ b/tests/ref/vsynth2/error
@@ -1,4 +1,4 @@
90e65096aa9ebafa3fe3f44a5a47cdc4 *./tests/data/vsynth2/error-mpeg4-adv.avi
176588 ./tests/data/vsynth2/error-mpeg4-adv.avi
-9fe1082179f80179439953c7397a46ef *./tests/data/error.vsynth2.out.yuv
-stddev: 9.00 PSNR: 29.04 MAXDIFF: 168 bytes: 7603200/ 7603200
+96baa9e4c24c837a3ba5abd8dd2cdd30 *./tests/data/error.vsynth2.out.yuv
+stddev: 8.98 PSNR: 29.06 MAXDIFF: 184 bytes: 7603200/ 7603200
diff --git a/tests/ref/vsynth2/msvideo1 b/tests/ref/vsynth2/msvideo1
new file mode 100644
index 0000000000..b630b159e4
--- /dev/null
+++ b/tests/ref/vsynth2/msvideo1
@@ -0,0 +1,4 @@
+5dddbbd6616d9be4bc0fd0c9650bd9e3 *./tests/data/vsynth2/msvideo1.avi
+1259308 ./tests/data/vsynth2/msvideo1.avi
+e2e7a952135f6307a74f2e178dc0df20 *./tests/data/msvideo1.vsynth2.out.yuv
+stddev: 7.42 PSNR: 30.71 MAXDIFF: 123 bytes: 7603200/ 7603200
diff --git a/tests/ref/vsynth2/qtrlegray b/tests/ref/vsynth2/qtrlegray
new file mode 100644
index 0000000000..8c7b98a4bd
--- /dev/null
+++ b/tests/ref/vsynth2/qtrlegray
@@ -0,0 +1,4 @@
+55c6e5af44ece0621d1d4c91b282a544 *./tests/data/vsynth2/qtrlegray.mov
+5111417 ./tests/data/vsynth2/qtrlegray.mov
+f63b5ebdfdba750e547c25131b0a3fd1 *./tests/data/qtrlegray.vsynth2.out.yuv
+stddev: 19.42 PSNR: 22.36 MAXDIFF: 72 bytes: 7603200/ 7603200
diff --git a/tests/regression-funcs.sh b/tests/regression-funcs.sh
index 4093656736..0e4ea44f46 100755
--- a/tests/regression-funcs.sh
+++ b/tests/regression-funcs.sh
@@ -53,7 +53,10 @@ echov(){
. $(dirname $0)/md5.sh
-FFMPEG_OPTS="-v 0 -threads $threads -y -flags +bitexact -dct fastint -idct simple -sws_flags +accurate_rnd+bitexact"
+FFMPEG_OPTS="-v 0 -y"
+COMMON_OPTS="-flags +bitexact -idct simple -sws_flags +accurate_rnd+bitexact"
+DEC_OPTS="$COMMON_OPTS -threads $threads"
+ENC_OPTS="$COMMON_OPTS -dct fastint"
run_ffmpeg()
{
@@ -115,22 +118,22 @@ do_ffmpeg_nocheck()
do_video_decoding()
{
- do_ffmpeg $raw_dst $1 -i $target_path/$file -f rawvideo $2
+ do_ffmpeg $raw_dst $DEC_OPTS $1 -i $target_path/$file -f rawvideo $ENC_OPTS $2
}
do_video_encoding()
{
file=${outfile}$1
- do_ffmpeg $file $2 -f image2 -vcodec pgmyuv -i $raw_src $3
+ do_ffmpeg $file $DEC_OPTS -f image2 -vcodec pgmyuv -i $raw_src $ENC_OPTS $2
}
do_audio_encoding()
{
file=${outfile}$1
- do_ffmpeg $file -ab 128k -ac 2 -f s16le -i $pcm_src $3
+ do_ffmpeg $file $DEC_OPTS -ac 2 -f s16le -i $pcm_src -ab 128k $ENC_OPTS $2
}
do_audio_decoding()
{
- do_ffmpeg $pcm_dst -i $target_path/$file -sample_fmt s16 -f wav
+ do_ffmpeg $pcm_dst $DEC_OPTS -i $target_path/$file -sample_fmt s16 -f wav
}
diff --git a/tests/rotozoom.c b/tests/rotozoom.c
index 47da1b0526..822c2bce5e 100644
--- a/tests/rotozoom.c
+++ b/tests/rotozoom.c
@@ -24,47 +24,52 @@
#include <stdio.h>
#include <inttypes.h>
-#define FIXP (1<<16)
-#define MY_PI 205887 //(M_PI*FIX)
+#define FIXP (1 << 16)
+#define MY_PI 205887 //(M_PI * FIX)
-static int64_t int_pow(int64_t a, int p){
- int64_t v= FIXP;
+static int64_t int_pow(int64_t a, int p)
+{
+ int64_t v = FIXP;
- for(; p; p--){
- v*= a;
- v/= FIXP;
+ for (; p; p--) {
+ v *= a;
+ v /= FIXP;
}
return v;
}
-static int64_t int_sin(int64_t a){
- if(a<0) a= MY_PI-a; // 0..inf
- a %= 2*MY_PI; // 0..2PI
+static int64_t int_sin(int64_t a)
+{
+ if (a < 0)
+ a = MY_PI - a; // 0..inf
+ a %= 2 * MY_PI; // 0..2PI
- if(a>=MY_PI*3/2) a -= 2*MY_PI; // -PI/2 .. 3PI/2
- if(a>=MY_PI/2 ) a = MY_PI - a; // -PI/2 .. PI/2
+ if (a >= MY_PI * 3 / 2)
+ a -= 2 * MY_PI; // -PI / 2 .. 3PI / 2
+ if (a >= MY_PI /2)
+ a = MY_PI - a; // -PI / 2 .. PI / 2
- return a - int_pow(a, 3)/6 + int_pow(a, 5)/120 - int_pow(a, 7)/5040;
+ return a - int_pow(a, 3) / 6 + int_pow(a, 5) / 120 - int_pow(a, 7) / 5040;
}
#define SCALEBITS 8
#define ONE_HALF (1 << (SCALEBITS - 1))
-#define FIX(x) ((int) ((x) * (1L<<SCALEBITS) + 0.5))
-typedef unsigned char UINT8;
+#define FIX(x) ((int) ((x) * (1L << SCALEBITS) + 0.5))
-static void rgb24_to_yuv420p(UINT8 *lum, UINT8 *cb, UINT8 *cr,
- UINT8 *src, int width, int height)
+static void rgb24_to_yuv420p(unsigned char *lum, unsigned char *cb,
+ unsigned char *cr, unsigned char *src,
+ int width, int height)
{
int wrap, wrap3, x, y;
int r, g, b, r1, g1, b1;
- UINT8 *p;
+ unsigned char *p;
- wrap = width;
+ wrap = width;
wrap3 = width * 3;
p = src;
- for(y=0;y<height;y+=2) {
- for(x=0;x<width;x+=2) {
+ for (y = 0; y < height; y += 2) {
+ for (x = 0; x < width; x += 2) {
r = p[0];
g = p[1];
b = p[2];
@@ -81,7 +86,7 @@ static void rgb24_to_yuv420p(UINT8 *lum, UINT8 *cb, UINT8 *cr,
b1 += b;
lum[1] = (FIX(0.29900) * r + FIX(0.58700) * g +
FIX(0.11400) * b + ONE_HALF) >> SCALEBITS;
- p += wrap3;
+ p += wrap3;
lum += wrap;
r = p[0];
@@ -104,14 +109,14 @@ static void rgb24_to_yuv420p(UINT8 *lum, UINT8 *cb, UINT8 *cr,
cb[0] = ((- FIX(0.16874) * r1 - FIX(0.33126) * g1 +
FIX(0.50000) * b1 + 4 * ONE_HALF - 1) >> (SCALEBITS + 2)) + 128;
cr[0] = ((FIX(0.50000) * r1 - FIX(0.41869) * g1 -
- FIX(0.08131) * b1 + 4 * ONE_HALF - 1) >> (SCALEBITS + 2)) + 128;
+ FIX(0.08131) * b1 + 4 * ONE_HALF - 1) >> (SCALEBITS + 2)) + 128;
cb++;
cr++;
- p += -wrap3 + 2 * 3;
- lum += -wrap + 2;
+ p += -wrap3 + 2 * 3;
+ lum += -wrap + 2;
}
- p += wrap3;
+ p += wrap3;
lum += wrap;
}
}
@@ -119,7 +124,7 @@ static void rgb24_to_yuv420p(UINT8 *lum, UINT8 *cb, UINT8 *cr,
/* cif format */
#define DEFAULT_WIDTH 352
#define DEFAULT_HEIGHT 288
-#define DEFAULT_NB_PICT 50
+#define DEFAULT_NB_PICT 50
static void pgmyuv_save(const char *filename, int w, int h,
unsigned char *rgb_tab)
@@ -130,19 +135,19 @@ static void pgmyuv_save(const char *filename, int w, int h,
unsigned char *lum_tab, *cb_tab, *cr_tab;
lum_tab = malloc(w * h);
- cb_tab = malloc((w * h) / 4);
- cr_tab = malloc((w * h) / 4);
+ cb_tab = malloc(w * h / 4);
+ cr_tab = malloc(w * h / 4);
rgb24_to_yuv420p(lum_tab, cb_tab, cr_tab, rgb_tab, w, h);
- f = fopen(filename,"wb");
- fprintf(f, "P5\n%d %d\n%d\n", w, (h * 3) / 2, 255);
+ f = fopen(filename, "wb");
+ fprintf(f, "P5\n%d %d\n%d\n", w, h * 3 / 2, 255);
fwrite(lum_tab, 1, w * h, f);
h2 = h / 2;
w2 = w / 2;
cb = cb_tab;
cr = cr_tab;
- for(i=0;i<h2;i++) {
+ for (i = 0; i < h2; i++) {
fwrite(cb, 1, w2, f);
fwrite(cr, 1, w2, f);
cb += w2;
@@ -172,104 +177,100 @@ static void put_pixel(int x, int y, int r, int g, int b)
p[2] = b;
}
-unsigned char tab_r[256*256];
-unsigned char tab_g[256*256];
-unsigned char tab_b[256*256];
+unsigned char tab_r[256 * 256];
+unsigned char tab_g[256 * 256];
+unsigned char tab_b[256 * 256];
int h_cos [360];
int h_sin [360];
-static int ipol(uint8_t *src, int x, int y){
- int int_x= x>>16;
- int int_y= y>>16;
- int frac_x= x&0xFFFF;
- int frac_y= y&0xFFFF;
- int s00= src[ ( int_x &255) + 256*( int_y &255) ];
- int s01= src[ ((int_x+1)&255) + 256*( int_y &255) ];
- int s10= src[ ( int_x &255) + 256*((int_y+1)&255) ];
- int s11= src[ ((int_x+1)&255) + 256*((int_y+1)&255) ];
- int s0= (((1<<16) - frac_x)*s00 + frac_x*s01)>>8;
- int s1= (((1<<16) - frac_x)*s10 + frac_x*s11)>>8;
-
- return (((1<<16) - frac_y)*s0 + frac_y*s1)>>24;
+static int ipol(uint8_t *src, int x, int y)
+{
+ int int_x = x >> 16;
+ int int_y = y >> 16;
+ int frac_x = x & 0xFFFF;
+ int frac_y = y & 0xFFFF;
+ int s00 = src[( int_x & 255) + 256 * ( int_y & 255)];
+ int s01 = src[((int_x + 1) & 255) + 256 * ( int_y & 255)];
+ int s10 = src[( int_x & 255) + 256 * ((int_y + 1) & 255)];
+ int s11 = src[((int_x + 1) & 255) + 256 * ((int_y + 1) & 255)];
+ int s0 = (((1 << 16) - frac_x) * s00 + frac_x * s01) >> 8;
+ int s1 = (((1 << 16) - frac_x) * s10 + frac_x * s11) >> 8;
+
+ return (((1 << 16) - frac_y) * s0 + frac_y * s1) >> 24;
}
static void gen_image(int num, int w, int h)
{
- const int c = h_cos [num % 360];
- const int s = h_sin [num % 360];
+ const int c = h_cos [num % 360];
+ const int s = h_sin [num % 360];
- const int xi = -(w/2) * c;
- const int yi = (w/2) * s;
+ const int xi = -(w / 2) * c;
+ const int yi = (w / 2) * s;
- const int xj = -(h/2) * s;
- const int yj = -(h/2) * c;
- int i,j;
+ const int xj = -(h / 2) * s;
+ const int yj = -(h / 2) * c;
+ int i, j;
- int x,y;
- int xprime = xj;
- int yprime = yj;
+ int x, y;
+ int xprime = xj;
+ int yprime = yj;
+ for (j = 0; j < h; j++) {
+ x = xprime + xi + FIXP * w / 2;
+ xprime += s;
- for (j=0;j<h;j++) {
+ y = yprime + yi + FIXP * h / 2;
+ yprime += c;
- x = xprime + xi + FIXP*w/2;
- xprime += s;
-
- y = yprime + yi + FIXP*h/2;
- yprime += c;
-
- for ( i=0 ; i<w ; i++ ) {
- x += c;
- y -= s;
-#if 1
- put_pixel(i, j, ipol(tab_r, x, y), ipol(tab_g, x, y), ipol(tab_b, x, y));
-#else
- {
- unsigned dep;
- dep = ((x>>16)&255) + (((y>>16)&255)<<8);
- put_pixel(i, j, tab_r[dep], tab_g[dep], tab_b[dep]);
- }
-#endif
+ for (i = 0; i < w; i++ ) {
+ x += c;
+ y -= s;
+ put_pixel(i, j, ipol(tab_r, x, y), ipol(tab_g, x, y), ipol(tab_b, x, y));
+ }
}
- }
}
#define W 256
#define H 256
-static void init_demo(const char *filename) {
- int i,j;
- int h;
- int radian;
- char line[3 * W];
-
- FILE *fichier;
-
- fichier = fopen(filename,"rb");
- if (!fichier) {
- perror(filename);
- exit(1);
- }
-
- fread(line, 1, 15, fichier);
- for (i=0;i<H;i++) {
- fread(line,1,3*W,fichier);
- for (j=0;j<W;j++) {
- tab_r[W*i+j] = line[3*j ];
- tab_g[W*i+j] = line[3*j + 1];
- tab_b[W*i+j] = line[3*j + 2];
+static int init_demo(const char *filename)
+{
+ int i, j;
+ int h;
+ int radian;
+ char line[3 * W];
+
+ FILE *input_file;
+
+ input_file = fopen(filename, "rb");
+ if (!input_file) {
+ perror(filename);
+ return 1;
}
- }
- fclose(fichier);
-
- /* tables sin/cos */
- for (i=0;i<360;i++) {
- radian = 2*i*MY_PI/360;
- h = 2*FIXP + int_sin (radian);
- h_cos[i] = ( h * int_sin (radian + MY_PI/2) )/2/FIXP;
- h_sin[i] = ( h * int_sin (radian ) )/2/FIXP;
- }
+
+ if (fread(line, 1, 15, input_file) != 15)
+ return 1;
+ for (i = 0; i < H; i++) {
+ if (fread(line, 1, 3 * W, input_file) != 3 * W)
+ return 1;
+ for (j = 0; j < W; j++) {
+ tab_r[W * i + j] = line[3 * j ];
+ tab_g[W * i + j] = line[3 * j + 1];
+ tab_b[W * i + j] = line[3 * j + 2];
+ }
+ }
+ fclose(input_file);
+
+ /* tables sin/cos */
+ for (i = 0; i < 360; i++) {
+ radian = 2 * i * MY_PI / 360;
+ h = 2 * FIXP + int_sin (radian);
+ h_cos[i] = h * int_sin(radian + MY_PI / 2) / 2 / FIXP;
+ h_sin[i] = h * int_sin(radian) / 2 / FIXP;
+ }
+
+ return 0;
}
int main(int argc, char **argv)
@@ -280,20 +281,21 @@ int main(int argc, char **argv)
if (argc != 3) {
printf("usage: %s directory/ image.pnm\n"
"generate a test video stream\n", argv[0]);
- exit(1);
+ return 1;
}
w = DEFAULT_WIDTH;
h = DEFAULT_HEIGHT;
rgb_tab = malloc(w * h * 3);
- wrap = w * 3;
- width = w;
- height = h;
+ wrap = w * 3;
+ width = w;
+ height = h;
- init_demo(argv[2]);
+ if (init_demo(argv[2]))
+ return 1;
- for(i=0;i<DEFAULT_NB_PICT;i++) {
+ for (i = 0; i < DEFAULT_NB_PICT; i++) {
snprintf(buf, sizeof(buf), "%s%02d.pgm", argv[1], i);
gen_image(i, w, h);
pgmyuv_save(buf, w, h, rgb_tab);