diff options
author | Ronald S. Bultje <rsbultje@gmail.com> | 2013-03-08 20:40:16 -0800 |
---|---|---|
committer | Martin Storsjö <martin@martin.st> | 2013-04-08 12:38:33 +0300 |
commit | 610b18e2e3d8ef5eca3e78f33a0625689b8d2bb9 (patch) | |
tree | a29b386fff4c4e642332b4d5178a4c548fe3a5b8 /libavcodec | |
parent | 0f598457089023f86829b34fef582f3810c491b2 (diff) | |
download | ffmpeg-610b18e2e3d8ef5eca3e78f33a0625689b8d2bb9.tar.gz |
x86: qpel: Move fullpel and l2 functions to a separate file
This way, they can be shared between mpeg4qpel and h264qpel without
requiring either one to be compiled unconditionally.
Signed-off-by: Martin Storsjö <martin@martin.st>
Diffstat (limited to 'libavcodec')
-rw-r--r-- | libavcodec/x86/Makefile | 1 | ||||
-rw-r--r-- | libavcodec/x86/dsputil.asm | 192 | ||||
-rw-r--r-- | libavcodec/x86/hpeldsp.asm | 41 | ||||
-rw-r--r-- | libavcodec/x86/qpel.asm | 256 |
4 files changed, 257 insertions, 233 deletions
diff --git a/libavcodec/x86/Makefile b/libavcodec/x86/Makefile index a759e6e9e0..cc60f2f0b3 100644 --- a/libavcodec/x86/Makefile +++ b/libavcodec/x86/Makefile @@ -76,3 +76,4 @@ YASM-OBJS += x86/dsputil.o \ x86/fmtconvert.o \ x86/hpeldsp.o \ x86/mpeg4qpel.o \ + x86/qpel.o \ diff --git a/libavcodec/x86/dsputil.asm b/libavcodec/x86/dsputil.asm index 078f58c8d4..5d73ff8ee4 100644 --- a/libavcodec/x86/dsputil.asm +++ b/libavcodec/x86/dsputil.asm @@ -648,195 +648,3 @@ BSWAP32_BUF INIT_XMM ssse3 BSWAP32_BUF - - -; FIXME: All of the code below should be put back in h264_qpel_8bit.asm. -; Unfortunately it is unconditionally used from dsputil_mmx.c since 71155d7 .. - -%macro op_avgh 3 - movh %3, %2 - pavgb %1, %3 - movh %2, %1 -%endmacro - -%macro op_avg 2 - pavgb %1, %2 - mova %2, %1 -%endmacro - -%macro op_puth 2-3 - movh %2, %1 -%endmacro - -%macro op_put 2 - mova %2, %1 -%endmacro - -; void pixels4_l2_mmxext(uint8_t *dst, uint8_t *src1, uint8_t *src2, int dstStride, int src1Stride, int h) -%macro PIXELS4_L2 1 -%define OP op_%1h -cglobal %1_pixels4_l2, 6,6 - movsxdifnidn r3, r3d - movsxdifnidn r4, r4d - test r5d, 1 - je .loop - movd m0, [r1] - movd m1, [r2] - add r1, r4 - add r2, 4 - pavgb m0, m1 - OP m0, [r0], m3 - add r0, r3 - dec r5d -.loop: - mova m0, [r1] - mova m1, [r1+r4] - lea r1, [r1+2*r4] - pavgb m0, [r2] - pavgb m1, [r2+4] - OP m0, [r0], m3 - OP m1, [r0+r3], m3 - lea r0, [r0+2*r3] - mova m0, [r1] - mova m1, [r1+r4] - lea r1, [r1+2*r4] - pavgb m0, [r2+8] - pavgb m1, [r2+12] - OP m0, [r0], m3 - OP m1, [r0+r3], m3 - lea r0, [r0+2*r3] - add r2, 16 - sub r5d, 4 - jne .loop - REP_RET -%endmacro - -INIT_MMX mmxext -PIXELS4_L2 put -PIXELS4_L2 avg - -; void pixels8_l2_mmxext(uint8_t *dst, uint8_t *src1, uint8_t *src2, int dstStride, int src1Stride, int h) -%macro PIXELS8_L2 1 -%define OP op_%1 -cglobal %1_pixels8_l2, 6,6 - movsxdifnidn r3, r3d - movsxdifnidn r4, r4d - test r5d, 1 - je .loop - mova m0, [r1] - mova m1, [r2] - add r1, r4 - add r2, 8 - pavgb m0, m1 - OP m0, [r0] - add r0, r3 - dec r5d -.loop: - mova m0, [r1] - mova m1, [r1+r4] - lea r1, [r1+2*r4] - pavgb m0, [r2] - pavgb m1, [r2+8] - OP m0, [r0] - OP m1, [r0+r3] - lea r0, [r0+2*r3] - mova m0, [r1] - mova m1, [r1+r4] - lea r1, [r1+2*r4] - pavgb m0, [r2+16] - pavgb m1, [r2+24] - OP m0, [r0] - OP m1, [r0+r3] - lea r0, [r0+2*r3] - add r2, 32 - sub r5d, 4 - jne .loop - REP_RET -%endmacro - -INIT_MMX mmxext -PIXELS8_L2 put -PIXELS8_L2 avg - -; void pixels16_l2_mmxext(uint8_t *dst, uint8_t *src1, uint8_t *src2, int dstStride, int src1Stride, int h) -%macro PIXELS16_L2 1 -%define OP op_%1 -cglobal %1_pixels16_l2, 6,6 - movsxdifnidn r3, r3d - movsxdifnidn r4, r4d - test r5d, 1 - je .loop - mova m0, [r1] - mova m1, [r1+8] - pavgb m0, [r2] - pavgb m1, [r2+8] - add r1, r4 - add r2, 16 - OP m0, [r0] - OP m1, [r0+8] - add r0, r3 - dec r5d -.loop: - mova m0, [r1] - mova m1, [r1+8] - add r1, r4 - pavgb m0, [r2] - pavgb m1, [r2+8] - OP m0, [r0] - OP m1, [r0+8] - add r0, r3 - mova m0, [r1] - mova m1, [r1+8] - add r1, r4 - pavgb m0, [r2+16] - pavgb m1, [r2+24] - OP m0, [r0] - OP m1, [r0+8] - add r0, r3 - add r2, 32 - sub r5d, 2 - jne .loop - REP_RET -%endmacro - -INIT_MMX mmxext -PIXELS16_L2 put -PIXELS16_L2 avg - -INIT_MMX mmxext -; void pixels(uint8_t *block, const uint8_t *pixels, int line_size, int h) -%macro PIXELS48 2 -%if %2 == 4 -%define OP movh -%else -%define OP mova -%endif -cglobal %1_pixels%2, 4,5 - movsxdifnidn r2, r2d - lea r4, [r2*3] -.loop: - OP m0, [r1] - OP m1, [r1+r2] - OP m2, [r1+r2*2] - OP m3, [r1+r4] - lea r1, [r1+r2*4] -%ifidn %1, avg - pavgb m0, [r0] - pavgb m1, [r0+r2] - pavgb m2, [r0+r2*2] - pavgb m3, [r0+r4] -%endif - OP [r0], m0 - OP [r0+r2], m1 - OP [r0+r2*2], m2 - OP [r0+r4], m3 - sub r3d, 4 - lea r0, [r0+r2*4] - jne .loop - RET -%endmacro - -PIXELS48 put, 4 -PIXELS48 avg, 4 -PIXELS48 put, 8 -PIXELS48 avg, 8 diff --git a/libavcodec/x86/hpeldsp.asm b/libavcodec/x86/hpeldsp.asm index d38186c857..ec04d99e77 100644 --- a/libavcodec/x86/hpeldsp.asm +++ b/libavcodec/x86/hpeldsp.asm @@ -452,44 +452,3 @@ INIT_MMX mmxext AVG_PIXELS8_XY2 INIT_MMX 3dnow AVG_PIXELS8_XY2 - -INIT_XMM sse2 -; void put_pixels16_sse2(uint8_t *block, const uint8_t *pixels, ptrdiff_t line_size, int h) -cglobal put_pixels16, 4,5,4 - lea r4, [r2*3] -.loop: - movu m0, [r1] - movu m1, [r1+r2] - movu m2, [r1+r2*2] - movu m3, [r1+r4] - lea r1, [r1+r2*4] - mova [r0], m0 - mova [r0+r2], m1 - mova [r0+r2*2], m2 - mova [r0+r4], m3 - sub r3d, 4 - lea r0, [r0+r2*4] - jnz .loop - REP_RET - -; void avg_pixels16_sse2(uint8_t *block, const uint8_t *pixels, ptrdiff_t line_size, int h) -cglobal avg_pixels16, 4,5,4 - lea r4, [r2*3] -.loop: - movu m0, [r1] - movu m1, [r1+r2] - movu m2, [r1+r2*2] - movu m3, [r1+r4] - lea r1, [r1+r2*4] - pavgb m0, [r0] - pavgb m1, [r0+r2] - pavgb m2, [r0+r2*2] - pavgb m3, [r0+r4] - mova [r0], m0 - mova [r0+r2], m1 - mova [r0+r2*2], m2 - mova [r0+r4], m3 - sub r3d, 4 - lea r0, [r0+r2*4] - jnz .loop - REP_RET diff --git a/libavcodec/x86/qpel.asm b/libavcodec/x86/qpel.asm new file mode 100644 index 0000000000..ba97e2aec8 --- /dev/null +++ b/libavcodec/x86/qpel.asm @@ -0,0 +1,256 @@ +;****************************************************************************** +;* MMX optimized DSP utils +;* Copyright (c) 2008 Loren Merritt +;* Copyright (c) 2003-2013 Michael Niedermayer +;* Copyright (c) 2013 Daniel Kang +;* +;* This file is part of Libav. +;* +;* Libav is free software; you can redistribute it and/or +;* modify it under the terms of the GNU Lesser General Public +;* License as published by the Free Software Foundation; either +;* version 2.1 of the License, or (at your option) any later version. +;* +;* Libav is distributed in the hope that it will be useful, +;* but WITHOUT ANY WARRANTY; without even the implied warranty of +;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +;* Lesser General Public License for more details. +;* +;* You should have received a copy of the GNU Lesser General Public +;* License along with Libav; if not, write to the Free Software +;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA +;****************************************************************************** + +%include "libavutil/x86/x86util.asm" + +SECTION .text + +%macro op_avgh 3 + movh %3, %2 + pavgb %1, %3 + movh %2, %1 +%endmacro + +%macro op_avg 2 + pavgb %1, %2 + mova %2, %1 +%endmacro + +%macro op_puth 2-3 + movh %2, %1 +%endmacro + +%macro op_put 2 + mova %2, %1 +%endmacro + +; void pixels4_l2_mmxext(uint8_t *dst, uint8_t *src1, uint8_t *src2, int dstStride, int src1Stride, int h) +%macro PIXELS4_L2 1 +%define OP op_%1h +cglobal %1_pixels4_l2, 6,6 + movsxdifnidn r3, r3d + movsxdifnidn r4, r4d + test r5d, 1 + je .loop + movd m0, [r1] + movd m1, [r2] + add r1, r4 + add r2, 4 + pavgb m0, m1 + OP m0, [r0], m3 + add r0, r3 + dec r5d +.loop: + mova m0, [r1] + mova m1, [r1+r4] + lea r1, [r1+2*r4] + pavgb m0, [r2] + pavgb m1, [r2+4] + OP m0, [r0], m3 + OP m1, [r0+r3], m3 + lea r0, [r0+2*r3] + mova m0, [r1] + mova m1, [r1+r4] + lea r1, [r1+2*r4] + pavgb m0, [r2+8] + pavgb m1, [r2+12] + OP m0, [r0], m3 + OP m1, [r0+r3], m3 + lea r0, [r0+2*r3] + add r2, 16 + sub r5d, 4 + jne .loop + REP_RET +%endmacro + +INIT_MMX mmxext +PIXELS4_L2 put +PIXELS4_L2 avg + +; void pixels8_l2_mmxext(uint8_t *dst, uint8_t *src1, uint8_t *src2, int dstStride, int src1Stride, int h) +%macro PIXELS8_L2 1 +%define OP op_%1 +cglobal %1_pixels8_l2, 6,6 + movsxdifnidn r3, r3d + movsxdifnidn r4, r4d + test r5d, 1 + je .loop + mova m0, [r1] + mova m1, [r2] + add r1, r4 + add r2, 8 + pavgb m0, m1 + OP m0, [r0] + add r0, r3 + dec r5d +.loop: + mova m0, [r1] + mova m1, [r1+r4] + lea r1, [r1+2*r4] + pavgb m0, [r2] + pavgb m1, [r2+8] + OP m0, [r0] + OP m1, [r0+r3] + lea r0, [r0+2*r3] + mova m0, [r1] + mova m1, [r1+r4] + lea r1, [r1+2*r4] + pavgb m0, [r2+16] + pavgb m1, [r2+24] + OP m0, [r0] + OP m1, [r0+r3] + lea r0, [r0+2*r3] + add r2, 32 + sub r5d, 4 + jne .loop + REP_RET +%endmacro + +INIT_MMX mmxext +PIXELS8_L2 put +PIXELS8_L2 avg + +; void pixels16_l2_mmxext(uint8_t *dst, uint8_t *src1, uint8_t *src2, int dstStride, int src1Stride, int h) +%macro PIXELS16_L2 1 +%define OP op_%1 +cglobal %1_pixels16_l2, 6,6 + movsxdifnidn r3, r3d + movsxdifnidn r4, r4d + test r5d, 1 + je .loop + mova m0, [r1] + mova m1, [r1+8] + pavgb m0, [r2] + pavgb m1, [r2+8] + add r1, r4 + add r2, 16 + OP m0, [r0] + OP m1, [r0+8] + add r0, r3 + dec r5d +.loop: + mova m0, [r1] + mova m1, [r1+8] + add r1, r4 + pavgb m0, [r2] + pavgb m1, [r2+8] + OP m0, [r0] + OP m1, [r0+8] + add r0, r3 + mova m0, [r1] + mova m1, [r1+8] + add r1, r4 + pavgb m0, [r2+16] + pavgb m1, [r2+24] + OP m0, [r0] + OP m1, [r0+8] + add r0, r3 + add r2, 32 + sub r5d, 2 + jne .loop + REP_RET +%endmacro + +INIT_MMX mmxext +PIXELS16_L2 put +PIXELS16_L2 avg + +INIT_MMX mmxext +; void pixels(uint8_t *block, const uint8_t *pixels, int line_size, int h) +%macro PIXELS48 2 +%if %2 == 4 +%define OP movh +%else +%define OP mova +%endif +cglobal %1_pixels%2, 4,5 + movsxdifnidn r2, r2d + lea r4, [r2*3] +.loop: + OP m0, [r1] + OP m1, [r1+r2] + OP m2, [r1+r2*2] + OP m3, [r1+r4] + lea r1, [r1+r2*4] +%ifidn %1, avg + pavgb m0, [r0] + pavgb m1, [r0+r2] + pavgb m2, [r0+r2*2] + pavgb m3, [r0+r4] +%endif + OP [r0], m0 + OP [r0+r2], m1 + OP [r0+r2*2], m2 + OP [r0+r4], m3 + sub r3d, 4 + lea r0, [r0+r2*4] + jne .loop + RET +%endmacro + +PIXELS48 put, 4 +PIXELS48 avg, 4 +PIXELS48 put, 8 +PIXELS48 avg, 8 + + +INIT_XMM sse2 +; void put_pixels16_sse2(uint8_t *block, const uint8_t *pixels, ptrdiff_t line_size, int h) +cglobal put_pixels16, 4,5,4 + lea r4, [r2*3] +.loop: + movu m0, [r1] + movu m1, [r1+r2] + movu m2, [r1+r2*2] + movu m3, [r1+r4] + lea r1, [r1+r2*4] + mova [r0], m0 + mova [r0+r2], m1 + mova [r0+r2*2], m2 + mova [r0+r4], m3 + sub r3d, 4 + lea r0, [r0+r2*4] + jnz .loop + REP_RET + +; void avg_pixels16_sse2(uint8_t *block, const uint8_t *pixels, ptrdiff_t line_size, int h) +cglobal avg_pixels16, 4,5,4 + lea r4, [r2*3] +.loop: + movu m0, [r1] + movu m1, [r1+r2] + movu m2, [r1+r2*2] + movu m3, [r1+r4] + lea r1, [r1+r2*4] + pavgb m0, [r0] + pavgb m1, [r0+r2] + pavgb m2, [r0+r2*2] + pavgb m3, [r0+r4] + mova [r0], m0 + mova [r0+r2], m1 + mova [r0+r2*2], m2 + mova [r0+r4], m3 + sub r3d, 4 + lea r0, [r0+r2*4] + jnz .loop + REP_RET |