diff options
author | Michael Niedermayer <michaelni@gmx.at> | 2014-06-23 20:59:48 +0200 |
---|---|---|
committer | Michael Niedermayer <michaelni@gmx.at> | 2014-06-23 21:10:10 +0200 |
commit | d7463c681363d6057601e705d0bf775738529841 (patch) | |
tree | 5aaab61d15d220df649ead3e432c36f72219ab2a /libavcodec/x86 | |
parent | 32cf26cc6a0dc1425ed60b8ac99552fd858fab9b (diff) | |
parent | fab9df63a3156ffe1f9490aafaea41e03ef60ddf (diff) | |
download | ffmpeg-d7463c681363d6057601e705d0bf775738529841.tar.gz |
Merge commit 'fab9df63a3156ffe1f9490aafaea41e03ef60ddf'
* commit 'fab9df63a3156ffe1f9490aafaea41e03ef60ddf':
dsputil: Split off global motion compensation bits into a separate context
Conflicts:
libavcodec/dsputil.c
libavcodec/dsputil.h
libavcodec/ppc/dsputil_altivec.h
libavcodec/x86/dsputil_init.c
libavcodec/x86/dsputil_mmx.c
libavcodec/x86/dsputil_x86.h
Merged-by: Michael Niedermayer <michaelni@gmx.at>
Diffstat (limited to 'libavcodec/x86')
-rw-r--r-- | libavcodec/x86/Makefile | 3 | ||||
-rw-r--r-- | libavcodec/x86/dsputil_init.c | 3 | ||||
-rw-r--r-- | libavcodec/x86/dsputil_mmx.c | 123 | ||||
-rw-r--r-- | libavcodec/x86/dsputil_x86.h | 5 | ||||
-rw-r--r-- | libavcodec/x86/mpegvideodsp.c | 161 |
5 files changed, 163 insertions, 132 deletions
diff --git a/libavcodec/x86/Makefile b/libavcodec/x86/Makefile index 61d73ffcb4..efb371eb59 100644 --- a/libavcodec/x86/Makefile +++ b/libavcodec/x86/Makefile @@ -26,7 +26,8 @@ OBJS-$(CONFIG_HUFFYUVDSP) += x86/huffyuvdsp_init.o OBJS-$(CONFIG_HUFFYUVENCDSP) += x86/huffyuvencdsp_mmx.o OBJS-$(CONFIG_LPC) += x86/lpc.o OBJS-$(CONFIG_MPEGAUDIODSP) += x86/mpegaudiodsp.o -OBJS-$(CONFIG_MPEGVIDEO) += x86/mpegvideo.o +OBJS-$(CONFIG_MPEGVIDEO) += x86/mpegvideo.o \ + x86/mpegvideodsp.o OBJS-$(CONFIG_MPEGVIDEOENC) += x86/mpegvideoenc.o OBJS-$(CONFIG_QPELDSP) += x86/qpeldsp_init.o OBJS-$(CONFIG_VIDEODSP) += x86/videodsp_init.o diff --git a/libavcodec/x86/dsputil_init.c b/libavcodec/x86/dsputil_init.c index 63eea89a32..f2e312eb53 100644 --- a/libavcodec/x86/dsputil_init.c +++ b/libavcodec/x86/dsputil_init.c @@ -58,9 +58,6 @@ static av_cold void dsputil_init_mmx(DSPContext *c, AVCodecContext *avctx, } } -#if CONFIG_VIDEODSP - c->gmc = ff_gmc_mmx; -#endif #endif /* HAVE_MMX_INLINE */ #if HAVE_MMX_EXTERNAL diff --git a/libavcodec/x86/dsputil_mmx.c b/libavcodec/x86/dsputil_mmx.c index 310039957f..19c03d8586 100644 --- a/libavcodec/x86/dsputil_mmx.c +++ b/libavcodec/x86/dsputil_mmx.c @@ -247,127 +247,4 @@ void ff_draw_edges_mmx(uint8_t *buf, int wrap, int width, int height, } } -#if CONFIG_VIDEODSP -void ff_gmc_mmx(uint8_t *dst, uint8_t *src, - int stride, int h, int ox, int oy, - int dxx, int dxy, int dyx, int dyy, - int shift, int r, int width, int height) -{ - const int w = 8; - const int ix = ox >> (16 + shift); - const int iy = oy >> (16 + shift); - const int oxs = ox >> 4; - const int oys = oy >> 4; - const int dxxs = dxx >> 4; - const int dxys = dxy >> 4; - const int dyxs = dyx >> 4; - const int dyys = dyy >> 4; - const uint16_t r4[4] = { r, r, r, r }; - const uint16_t dxy4[4] = { dxys, dxys, dxys, dxys }; - const uint16_t dyy4[4] = { dyys, dyys, dyys, dyys }; - const uint64_t shift2 = 2 * shift; -#define MAX_STRIDE 4096U -#define MAX_H 8U - uint8_t edge_buf[(MAX_H + 1) * MAX_STRIDE]; - int x, y; - - const int dxw = (dxx - (1 << (16 + shift))) * (w - 1); - const int dyh = (dyy - (1 << (16 + shift))) * (h - 1); - const int dxh = dxy * (h - 1); - const int dyw = dyx * (w - 1); - int need_emu = (unsigned) ix >= width - w || - (unsigned) iy >= height - h; - - if ( // non-constant fullpel offset (3% of blocks) - ((ox ^ (ox + dxw)) | (ox ^ (ox + dxh)) | (ox ^ (ox + dxw + dxh)) | - (oy ^ (oy + dyw)) | (oy ^ (oy + dyh)) | (oy ^ (oy + dyw + dyh))) >> (16 + shift) || - // uses more than 16 bits of subpel mv (only at huge resolution) - (dxx | dxy | dyx | dyy) & 15 || - (need_emu && (h > MAX_H || stride > MAX_STRIDE))) { - // FIXME could still use mmx for some of the rows - ff_gmc_c(dst, src, stride, h, ox, oy, dxx, dxy, dyx, dyy, - shift, r, width, height); - return; - } - - src += ix + iy * stride; - if (need_emu) { - ff_emulated_edge_mc_8(edge_buf, src, stride, stride, w + 1, h + 1, ix, iy, width, height); - src = edge_buf; - } - - __asm__ volatile ( - "movd %0, %%mm6 \n\t" - "pxor %%mm7, %%mm7 \n\t" - "punpcklwd %%mm6, %%mm6 \n\t" - "punpcklwd %%mm6, %%mm6 \n\t" - :: "r" (1 << shift)); - - for (x = 0; x < w; x += 4) { - uint16_t dx4[4] = { oxs - dxys + dxxs * (x + 0), - oxs - dxys + dxxs * (x + 1), - oxs - dxys + dxxs * (x + 2), - oxs - dxys + dxxs * (x + 3) }; - uint16_t dy4[4] = { oys - dyys + dyxs * (x + 0), - oys - dyys + dyxs * (x + 1), - oys - dyys + dyxs * (x + 2), - oys - dyys + dyxs * (x + 3) }; - - for (y = 0; y < h; y++) { - __asm__ volatile ( - "movq %0, %%mm4 \n\t" - "movq %1, %%mm5 \n\t" - "paddw %2, %%mm4 \n\t" - "paddw %3, %%mm5 \n\t" - "movq %%mm4, %0 \n\t" - "movq %%mm5, %1 \n\t" - "psrlw $12, %%mm4 \n\t" - "psrlw $12, %%mm5 \n\t" - : "+m" (*dx4), "+m" (*dy4) - : "m" (*dxy4), "m" (*dyy4)); - - __asm__ volatile ( - "movq %%mm6, %%mm2 \n\t" - "movq %%mm6, %%mm1 \n\t" - "psubw %%mm4, %%mm2 \n\t" - "psubw %%mm5, %%mm1 \n\t" - "movq %%mm2, %%mm0 \n\t" - "movq %%mm4, %%mm3 \n\t" - "pmullw %%mm1, %%mm0 \n\t" // (s - dx) * (s - dy) - "pmullw %%mm5, %%mm3 \n\t" // dx * dy - "pmullw %%mm5, %%mm2 \n\t" // (s - dx) * dy - "pmullw %%mm4, %%mm1 \n\t" // dx * (s - dy) - - "movd %4, %%mm5 \n\t" - "movd %3, %%mm4 \n\t" - "punpcklbw %%mm7, %%mm5 \n\t" - "punpcklbw %%mm7, %%mm4 \n\t" - "pmullw %%mm5, %%mm3 \n\t" // src[1, 1] * dx * dy - "pmullw %%mm4, %%mm2 \n\t" // src[0, 1] * (s - dx) * dy - - "movd %2, %%mm5 \n\t" - "movd %1, %%mm4 \n\t" - "punpcklbw %%mm7, %%mm5 \n\t" - "punpcklbw %%mm7, %%mm4 \n\t" - "pmullw %%mm5, %%mm1 \n\t" // src[1, 0] * dx * (s - dy) - "pmullw %%mm4, %%mm0 \n\t" // src[0, 0] * (s - dx) * (s - dy) - "paddw %5, %%mm1 \n\t" - "paddw %%mm3, %%mm2 \n\t" - "paddw %%mm1, %%mm0 \n\t" - "paddw %%mm2, %%mm0 \n\t" - - "psrlw %6, %%mm0 \n\t" - "packuswb %%mm0, %%mm0 \n\t" - "movd %%mm0, %0 \n\t" - - : "=m" (dst[x + y * stride]) - : "m" (src[0]), "m" (src[1]), - "m" (src[stride]), "m" (src[stride + 1]), - "m" (*r4), "m" (shift2)); - src += stride; - } - src += 4 - h * stride; - } -} -#endif #endif /* HAVE_INLINE_ASM */ diff --git a/libavcodec/x86/dsputil_x86.h b/libavcodec/x86/dsputil_x86.h index 14b7482cb9..ee5a9d4bef 100644 --- a/libavcodec/x86/dsputil_x86.h +++ b/libavcodec/x86/dsputil_x86.h @@ -43,12 +43,7 @@ void ff_put_signed_pixels_clamped_sse2(const int16_t *block, uint8_t *pixels, void ff_draw_edges_mmx(uint8_t *buf, int wrap, int width, int height, int w, int h, int sides); -void ff_gmc_mmx(uint8_t *dst, uint8_t *src, - int stride, int h, int ox, int oy, - int dxx, int dxy, int dyx, int dyy, - int shift, int r, int width, int height); void ff_mmx_idct(int16_t *block); void ff_mmxext_idct(int16_t *block); - #endif /* AVCODEC_X86_DSPUTIL_X86_H */ diff --git a/libavcodec/x86/mpegvideodsp.c b/libavcodec/x86/mpegvideodsp.c new file mode 100644 index 0000000000..941a8e2e4c --- /dev/null +++ b/libavcodec/x86/mpegvideodsp.c @@ -0,0 +1,161 @@ +/* + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#include "config.h" +#include "libavutil/attributes.h" +#include "libavutil/cpu.h" +#include "libavutil/x86/asm.h" +#include "libavutil/x86/cpu.h" +#include "libavcodec/mpegvideodsp.h" +#include "libavcodec/videodsp.h" + +#if HAVE_INLINE_ASM + +static void gmc_mmx(uint8_t *dst, uint8_t *src, + int stride, int h, int ox, int oy, + int dxx, int dxy, int dyx, int dyy, + int shift, int r, int width, int height) +{ + const int w = 8; + const int ix = ox >> (16 + shift); + const int iy = oy >> (16 + shift); + const int oxs = ox >> 4; + const int oys = oy >> 4; + const int dxxs = dxx >> 4; + const int dxys = dxy >> 4; + const int dyxs = dyx >> 4; + const int dyys = dyy >> 4; + const uint16_t r4[4] = { r, r, r, r }; + const uint16_t dxy4[4] = { dxys, dxys, dxys, dxys }; + const uint16_t dyy4[4] = { dyys, dyys, dyys, dyys }; + const uint64_t shift2 = 2 * shift; +#define MAX_STRIDE 4096U +#define MAX_H 8U + uint8_t edge_buf[(MAX_H + 1) * MAX_STRIDE]; + int x, y; + + const int dxw = (dxx - (1 << (16 + shift))) * (w - 1); + const int dyh = (dyy - (1 << (16 + shift))) * (h - 1); + const int dxh = dxy * (h - 1); + const int dyw = dyx * (w - 1); + int need_emu = (unsigned) ix >= width - w || + (unsigned) iy >= height - h; + + if ( // non-constant fullpel offset (3% of blocks) + ((ox ^ (ox + dxw)) | (ox ^ (ox + dxh)) | (ox ^ (ox + dxw + dxh)) | + (oy ^ (oy + dyw)) | (oy ^ (oy + dyh)) | (oy ^ (oy + dyw + dyh))) >> (16 + shift) || + // uses more than 16 bits of subpel mv (only at huge resolution) + (dxx | dxy | dyx | dyy) & 15 || + (need_emu && (h > MAX_H || stride > MAX_STRIDE))) { + // FIXME could still use mmx for some of the rows + ff_gmc_c(dst, src, stride, h, ox, oy, dxx, dxy, dyx, dyy, + shift, r, width, height); + return; + } + + src += ix + iy * stride; + if (need_emu) { + ff_emulated_edge_mc_8(edge_buf, src, stride, stride, w + 1, h + 1, ix, iy, width, height); + src = edge_buf; + } + + __asm__ volatile ( + "movd %0, %%mm6 \n\t" + "pxor %%mm7, %%mm7 \n\t" + "punpcklwd %%mm6, %%mm6 \n\t" + "punpcklwd %%mm6, %%mm6 \n\t" + :: "r" (1 << shift)); + + for (x = 0; x < w; x += 4) { + uint16_t dx4[4] = { oxs - dxys + dxxs * (x + 0), + oxs - dxys + dxxs * (x + 1), + oxs - dxys + dxxs * (x + 2), + oxs - dxys + dxxs * (x + 3) }; + uint16_t dy4[4] = { oys - dyys + dyxs * (x + 0), + oys - dyys + dyxs * (x + 1), + oys - dyys + dyxs * (x + 2), + oys - dyys + dyxs * (x + 3) }; + + for (y = 0; y < h; y++) { + __asm__ volatile ( + "movq %0, %%mm4 \n\t" + "movq %1, %%mm5 \n\t" + "paddw %2, %%mm4 \n\t" + "paddw %3, %%mm5 \n\t" + "movq %%mm4, %0 \n\t" + "movq %%mm5, %1 \n\t" + "psrlw $12, %%mm4 \n\t" + "psrlw $12, %%mm5 \n\t" + : "+m" (*dx4), "+m" (*dy4) + : "m" (*dxy4), "m" (*dyy4)); + + __asm__ volatile ( + "movq %%mm6, %%mm2 \n\t" + "movq %%mm6, %%mm1 \n\t" + "psubw %%mm4, %%mm2 \n\t" + "psubw %%mm5, %%mm1 \n\t" + "movq %%mm2, %%mm0 \n\t" + "movq %%mm4, %%mm3 \n\t" + "pmullw %%mm1, %%mm0 \n\t" // (s - dx) * (s - dy) + "pmullw %%mm5, %%mm3 \n\t" // dx * dy + "pmullw %%mm5, %%mm2 \n\t" // (s - dx) * dy + "pmullw %%mm4, %%mm1 \n\t" // dx * (s - dy) + + "movd %4, %%mm5 \n\t" + "movd %3, %%mm4 \n\t" + "punpcklbw %%mm7, %%mm5 \n\t" + "punpcklbw %%mm7, %%mm4 \n\t" + "pmullw %%mm5, %%mm3 \n\t" // src[1, 1] * dx * dy + "pmullw %%mm4, %%mm2 \n\t" // src[0, 1] * (s - dx) * dy + + "movd %2, %%mm5 \n\t" + "movd %1, %%mm4 \n\t" + "punpcklbw %%mm7, %%mm5 \n\t" + "punpcklbw %%mm7, %%mm4 \n\t" + "pmullw %%mm5, %%mm1 \n\t" // src[1, 0] * dx * (s - dy) + "pmullw %%mm4, %%mm0 \n\t" // src[0, 0] * (s - dx) * (s - dy) + "paddw %5, %%mm1 \n\t" + "paddw %%mm3, %%mm2 \n\t" + "paddw %%mm1, %%mm0 \n\t" + "paddw %%mm2, %%mm0 \n\t" + + "psrlw %6, %%mm0 \n\t" + "packuswb %%mm0, %%mm0 \n\t" + "movd %%mm0, %0 \n\t" + + : "=m" (dst[x + y * stride]) + : "m" (src[0]), "m" (src[1]), + "m" (src[stride]), "m" (src[stride + 1]), + "m" (*r4), "m" (shift2)); + src += stride; + } + src += 4 - h * stride; + } +} + +#endif /* HAVE_INLINE_ASM */ + +av_cold void ff_mpegvideodsp_init_x86(MpegVideoDSPContext *c) +{ +#if HAVE_INLINE_ASM + int cpu_flags = av_get_cpu_flags(); + + if (INLINE_MMX(cpu_flags)) + c->gmc = gmc_mmx; +#endif /* HAVE_INLINE_ASM */ +} |