diff options
author | Michael Niedermayer <michaelni@gmx.at> | 2014-12-27 23:17:21 +0100 |
---|---|---|
committer | Michael Niedermayer <michaelni@gmx.at> | 2014-12-27 23:17:29 +0100 |
commit | 17dde95ec52e45b063776f4303d93ea2329e29f0 (patch) | |
tree | f70c7f66ab6be789dc87674a65303acaf6931057 /libavcodec/x86 | |
parent | 035180901de9bbea873001b82d96dd2b7a45d76a (diff) | |
parent | 3aefca68cae603aac77a826de20d94ce24c7ec8f (diff) | |
download | ffmpeg-17dde95ec52e45b063776f4303d93ea2329e29f0.tar.gz |
Merge remote-tracking branch 'rbultje/vp9-32bit-lpf'
* rbultje/vp9-32bit-lpf:
vp9/x86: add myself to copyright holders for loopfilter assembly.
vp9/x86: make filter_16_h work on 32-bit.
vp9/x86: make filter_48/84/88_h work on 32-bit.
vp9/x86: make filter_44_h work on 32-bit.
vp9/x86: make filter_16_v work on 32-bit.
vp9/x86: make filter_48/84_v work on 32-bit.
vp9/x86: make filter_88_v work on 32-bit.
vp9/x86: make filter_44_v work on 32-bit.
vp8/x86: save one register in SIGN_ADD/SUB.
vp9/x86: store unpacked intermediates for filter6/14 on stack.
vp8/x86: move variable assigned inside macro branch.
vp9/x86: simplify ABSSUM_CMP by inverting the comparison meaning.
vp8/x86: remove unused register from ABSSUB_CMP macro.
vp9/x86: slightly simplify 44/48/84/88 h stores.
vp9/x86: make cglobal statement more conservative in register allocation.
vp9/x86: save one register in loopfilter surface coverage.
Merged-by: Michael Niedermayer <michaelni@gmx.at>
Diffstat (limited to 'libavcodec/x86')
-rw-r--r-- | libavcodec/x86/vp9dsp_init.c | 22 | ||||
-rw-r--r-- | libavcodec/x86/vp9lpf.asm | 973 |
2 files changed, 658 insertions, 337 deletions
diff --git a/libavcodec/x86/vp9dsp_init.c b/libavcodec/x86/vp9dsp_init.c index 722b525426..3a306428de 100644 --- a/libavcodec/x86/vp9dsp_init.c +++ b/libavcodec/x86/vp9dsp_init.c @@ -342,18 +342,16 @@ av_cold void ff_vp9dsp_init_x86(VP9DSPContext *dsp) init_subpel2(4, idx, 4, type, opt) #define init_lpf(opt) do { \ - if (ARCH_X86_64) { \ - dsp->loop_filter_16[0] = ff_vp9_loop_filter_h_16_16_##opt; \ - dsp->loop_filter_16[1] = ff_vp9_loop_filter_v_16_16_##opt; \ - dsp->loop_filter_mix2[0][0][0] = ff_vp9_loop_filter_h_44_16_##opt; \ - dsp->loop_filter_mix2[0][0][1] = ff_vp9_loop_filter_v_44_16_##opt; \ - dsp->loop_filter_mix2[0][1][0] = ff_vp9_loop_filter_h_48_16_##opt; \ - dsp->loop_filter_mix2[0][1][1] = ff_vp9_loop_filter_v_48_16_##opt; \ - dsp->loop_filter_mix2[1][0][0] = ff_vp9_loop_filter_h_84_16_##opt; \ - dsp->loop_filter_mix2[1][0][1] = ff_vp9_loop_filter_v_84_16_##opt; \ - dsp->loop_filter_mix2[1][1][0] = ff_vp9_loop_filter_h_88_16_##opt; \ - dsp->loop_filter_mix2[1][1][1] = ff_vp9_loop_filter_v_88_16_##opt; \ - } \ + dsp->loop_filter_16[0] = ff_vp9_loop_filter_h_16_16_##opt; \ + dsp->loop_filter_16[1] = ff_vp9_loop_filter_v_16_16_##opt; \ + dsp->loop_filter_mix2[0][0][0] = ff_vp9_loop_filter_h_44_16_##opt; \ + dsp->loop_filter_mix2[0][0][1] = ff_vp9_loop_filter_v_44_16_##opt; \ + dsp->loop_filter_mix2[0][1][0] = ff_vp9_loop_filter_h_48_16_##opt; \ + dsp->loop_filter_mix2[0][1][1] = ff_vp9_loop_filter_v_48_16_##opt; \ + dsp->loop_filter_mix2[1][0][0] = ff_vp9_loop_filter_h_84_16_##opt; \ + dsp->loop_filter_mix2[1][0][1] = ff_vp9_loop_filter_v_84_16_##opt; \ + dsp->loop_filter_mix2[1][1][0] = ff_vp9_loop_filter_h_88_16_##opt; \ + dsp->loop_filter_mix2[1][1][1] = ff_vp9_loop_filter_v_88_16_##opt; \ } while (0) #define init_ipred(sz, opt, t, e) \ diff --git a/libavcodec/x86/vp9lpf.asm b/libavcodec/x86/vp9lpf.asm index 416f08f090..2c4fe214da 100644 --- a/libavcodec/x86/vp9lpf.asm +++ b/libavcodec/x86/vp9lpf.asm @@ -2,6 +2,7 @@ ;* VP9 loop filter SIMD optimizations ;* ;* Copyright (C) 2013-2014 Clément Bœsch <u pkh me> +;* Copyright (C) 2014 Ronald S. Bultje <rsbultje@gmail.com> ;* ;* This file is part of FFmpeg. ;* @@ -22,8 +23,6 @@ %include "libavutil/x86/x86util.asm" -%if ARCH_X86_64 - SECTION_RODATA cextern pb_3 @@ -35,6 +34,7 @@ pb_40: times 16 db 0x40 pb_81: times 16 db 0x81 pb_f8: times 16 db 0xf8 pb_fe: times 16 db 0xfe +pb_ff: times 16 db 0xff cextern pw_4 cextern pw_8 @@ -51,27 +51,48 @@ mask_mix48: times 8 db 0x00 SECTION .text +%macro SCRATCH 3 +%if ARCH_X86_64 + SWAP %1, %2 +%else + mova [%3], m%1 +%endif +%endmacro + +%macro UNSCRATCH 3 +%if ARCH_X86_64 + SWAP %1, %2 +%else + mova m%1, [%3] +%endif +%endmacro + ; %1 = abs(%2-%3) %macro ABSSUB 4 ; dst, src1 (RO), src2 (RO), tmp +%if ARCH_X86_64 psubusb %1, %3, %2 psubusb %4, %2, %3 +%else + mova %1, %3 + mova %4, %2 + psubusb %1, %2 + psubusb %4, %3 +%endif por %1, %4 %endmacro -; %1 = %1<=%2 -%macro CMP_LTE 3-4 ; src/dst, cmp, tmp, pb_80 -%if %0 == 4 - pxor %1, %4 +; %1 = %1>%2 +%macro CMP_GT 2-3 ; src/dst, cmp, pb_80 +%if %0 == 3 + pxor %1, %3 %endif - pcmpgtb %3, %2, %1 ; cmp > src? - pcmpeqb %1, %2 ; cmp == src? XXX: avoid this with a -1/+1 well placed? - por %1, %3 ; cmp >= src? + pcmpgtb %1, %2 %endmacro -; %1 = abs(%2-%3) <= %4 -%macro ABSSUB_CMP 6-7 [pb_80]; dst, src1, src2, cmp, tmp1, tmp2, [pb_80] - ABSSUB %1, %2, %3, %6 ; dst = abs(src1-src2) - CMP_LTE %1, %4, %6, %7 ; dst <= cmp +; %1 = abs(%2-%3) > %4 +%macro ABSSUB_GT 5-6 [pb_80]; dst, src1, src2, cmp, tmp, [pb_80] + ABSSUB %1, %2, %3, %5 ; dst = abs(src1-src2) + CMP_GT %1, %4, %6 ; dst > cmp %endmacro %macro MASK_APPLY 4 ; %1=new_data/dst %2=old_data %3=mask %4=tmp @@ -80,39 +101,59 @@ SECTION .text por %1, %4 ; new&mask | old&~mask %endmacro -%macro FILTER_SUBx2_ADDx2 8 ; %1=dst %2=h/l %3=cache %4=sub1 %5=sub2 %6=add1 %7=add2 %8=rshift - punpck%2bw %3, %4, m0 - psubw %1, %3 - punpck%2bw %3, %5, m0 - psubw %1, %3 - punpck%2bw %3, %6, m0 - paddw %1, %3 - punpck%2bw %3, %7, m0 +%macro UNPACK 4 +%if ARCH_X86_64 + punpck%1bw %2, %3, %4 +%else + mova %2, %3 + punpck%1bw %2, %4 +%endif +%endmacro + +%macro FILTER_SUBx2_ADDx2 11 ; %1=dst %2=h/l %3=cache %4=stack_off %5=sub1 %6=sub2 %7=add1 + ; %8=add2 %9=rshift, [unpack], [unpack_is_mem_on_x86_32] + psubw %3, [rsp+%4+%5*32] + psubw %3, [rsp+%4+%6*32] + paddw %3, [rsp+%4+%7*32] +%ifnidn %10, "" +%if %11 == 0 + punpck%2bw %1, %10, m0 +%else + UNPACK %2, %1, %10, m0 +%endif + mova [rsp+%4+%8*32], %1 paddw %3, %1 - psraw %1, %3, %8 +%else + paddw %3, [rsp+%4+%8*32] +%endif + psraw %1, %3, %9 %endmacro -%macro FILTER_INIT 8 ; tmp1, tmp2, cacheL, cacheH, dstp, filterid, mask, source - FILTER%6_INIT %1, l, %3 - FILTER%6_INIT %2, h, %4 +; FIXME interleave l/h better (for instruction pairing) +%macro FILTER_INIT 9 ; tmp1, tmp2, cacheL, cacheH, dstp, stack_off, filterid, mask, source + FILTER%7_INIT %1, l, %3, %6 + 0 + FILTER%7_INIT %2, h, %4, %6 + 16 packuswb %1, %2 - MASK_APPLY %1, %8, %7, %2 + MASK_APPLY %1, %9, %8, %2 mova %5, %1 %endmacro -%macro FILTER_UPDATE 11-14 ; tmp1, tmp2, cacheL, cacheH, dstp, -, -, +, +, rshift, mask, [source], [preload reg + value] -%if %0 == 13 ; no source + preload - mova %12, %13 -%elif %0 == 14 ; source + preload - mova %13, %14 + +%macro FILTER_UPDATE 12-16 "", "", "", 0 ; tmp1, tmp2, cacheL, cacheH, dstp, stack_off, -, -, +, +, rshift, + ; mask, [source], [unpack + src], [unpack_is_mem_on_x86_32] +; FIXME interleave this properly with the subx2/addx2 +%ifnidn %15, "" +%if %16 == 0 || ARCH_X86_64 + mova %14, %15 %endif - FILTER_SUBx2_ADDx2 %1, l, %3, %6, %7, %8, %9, %10 - FILTER_SUBx2_ADDx2 %2, h, %4, %6, %7, %8, %9, %10 +%endif + FILTER_SUBx2_ADDx2 %1, l, %3, %6 + 0, %7, %8, %9, %10, %11, %14, %16 + FILTER_SUBx2_ADDx2 %2, h, %4, %6 + 16, %7, %8, %9, %10, %11, %14, %16 packuswb %1, %2 -%if %0 == 12 || %0 == 14 - MASK_APPLY %1, %12, %11, %2 +%ifnidn %13, "" + MASK_APPLY %1, %13, %12, %2 %else - MASK_APPLY %1, %5, %11, %2 + MASK_APPLY %1, %5, %12, %2 %endif mova %5, %1 %endmacro @@ -139,57 +180,61 @@ SECTION .text %endmacro ; clip_u8(u8 + i8) -%macro SIGN_ADD 5 ; dst, u8, i8, tmp1, tmp2 - EXTRACT_POS_NEG %3, %4, %5 - psubusb %1, %2, %4 ; sub the negatives - paddusb %1, %5 ; add the positives +%macro SIGN_ADD 4 ; dst, u8, i8, tmp1 + EXTRACT_POS_NEG %3, %4, %1 + paddusb %1, %2 ; add the positives + psubusb %1, %4 ; sub the negatives %endmacro ; clip_u8(u8 - i8) -%macro SIGN_SUB 5 ; dst, u8, i8, tmp1, tmp2 - EXTRACT_POS_NEG %3, %4, %5 - psubusb %1, %2, %5 ; sub the positives - paddusb %1, %4 ; add the negatives +%macro SIGN_SUB 4 ; dst, u8, i8, tmp1 + EXTRACT_POS_NEG %3, %1, %4 + paddusb %1, %2 ; add the negatives + psubusb %1, %4 ; sub the positives %endmacro -%macro FILTER6_INIT 3 ; %1=dst %2=h/l %3=cache - punpck%2bw %1, m14, m0 ; p3: B->W +%macro FILTER6_INIT 4 ; %1=dst %2=h/l %3=cache, %4=stack_off + UNPACK %2, %1, rp3, m0 ; p3: B->W + mova [rsp+%4+0*32], %1 paddw %3, %1, %1 ; p3*2 paddw %3, %1 ; p3*3 - punpck%2bw %1, m15, m0 ; p2: B->W + punpck%2bw %1, m1, m0 ; p2: B->W + mova [rsp+%4+1*32], %1 paddw %3, %1 ; p3*3 + p2 paddw %3, %1 ; p3*3 + p2*2 - punpck%2bw %1, m10, m0 ; p1: B->W + UNPACK %2, %1, rp1, m0 ; p1: B->W + mova [rsp+%4+2*32], %1 paddw %3, %1 ; p3*3 + p2*2 + p1 - punpck%2bw %1, m11, m0 ; p0: B->W + UNPACK %2, %1, rp0, m0 ; p0: B->W + mova [rsp+%4+3*32], %1 paddw %3, %1 ; p3*3 + p2*2 + p1 + p0 - punpck%2bw %1, m12, m0 ; q0: B->W + UNPACK %2, %1, rq0, m0 ; q0: B->W + mova [rsp+%4+4*32], %1 paddw %3, %1 ; p3*3 + p2*2 + p1 + p0 + q0 paddw %3, [pw_4] ; p3*3 + p2*2 + p1 + p0 + q0 + 4 psraw %1, %3, 3 ; (p3*3 + p2*2 + p1 + p0 + q0 + 4) >> 3 %endmacro -%macro FILTER14_INIT 3 ; %1=dst %2=h/l %3=cache +%macro FILTER14_INIT 4 ; %1=dst %2=h/l %3=cache, %4=stack_off punpck%2bw %1, m2, m0 ; p7: B->W + mova [rsp+%4+ 8*32], %1 psllw %3, %1, 3 ; p7*8 psubw %3, %1 ; p7*7 punpck%2bw %1, m3, m0 ; p6: B->W + mova [rsp+%4+ 9*32], %1 paddw %3, %1 ; p7*7 + p6 paddw %3, %1 ; p7*7 + p6*2 - punpck%2bw %1, m8, m0 ; p5: B->W + UNPACK %2, %1, rp5, m0 ; p5: B->W + mova [rsp+%4+10*32], %1 paddw %3, %1 ; p7*7 + p6*2 + p5 - punpck%2bw %1, m9, m0 ; p4: B->W + UNPACK %2, %1, rp4, m0 ; p4: B->W + mova [rsp+%4+11*32], %1 paddw %3, %1 ; p7*7 + p6*2 + p5 + p4 - punpck%2bw %1, m14, m0 ; p3: B->W - paddw %3, %1 ; p7*7 + p6*2 + p5 + p4 + p3 - punpck%2bw %1, m15, m0 ; p2: B->W - paddw %3, %1 ; p7*7 + p6*2 + p5 + .. + p2 - punpck%2bw %1, m10, m0 ; p1: B->W - paddw %3, %1 ; p7*7 + p6*2 + p5 + .. + p1 - punpck%2bw %1, m11, m0 ; p0: B->W - paddw %3, %1 ; p7*7 + p6*2 + p5 + .. + p0 - punpck%2bw %1, m12, m0 ; q0: B->W - paddw %3, %1 ; p7*7 + p6*2 + p5 + .. + p0 + q0 + paddw %3, [rsp+%4+ 0*32] ; p7*7 + p6*2 + p5 + p4 + p3 + paddw %3, [rsp+%4+ 1*32] ; p7*7 + p6*2 + p5 + .. + p2 + paddw %3, [rsp+%4+ 2*32] ; p7*7 + p6*2 + p5 + .. + p1 + paddw %3, [rsp+%4+ 3*32] ; p7*7 + p6*2 + p5 + .. + p0 + paddw %3, [rsp+%4+ 4*32] ; p7*7 + p6*2 + p5 + .. + p0 + q0 paddw %3, [pw_8] ; p7*7 + p6*2 + p5 + .. + p0 + q0 + 8 psraw %1, %3, 4 ; (p7*7 + p6*2 + p5 + .. + p0 + q0 + 8) >> 4 %endmacro @@ -245,55 +290,66 @@ SECTION .text SWAP %12, %14 %endmacro -; transpose 16 half lines (high part) to 8 full centered lines -%macro TRANSPOSE16x8B 16 - punpcklbw m%1, m%2 - punpcklbw m%3, m%4 - punpcklbw m%5, m%6 - punpcklbw m%7, m%8 - punpcklbw m%9, m%10 - punpcklbw m%11, m%12 - punpcklbw m%13, m%14 - punpcklbw m%15, m%16 - SBUTTERFLY wd, %1, %3, %2 - SBUTTERFLY wd, %5, %7, %2 - SBUTTERFLY wd, %9, %11, %2 - SBUTTERFLY wd, %13, %15, %2 - SBUTTERFLY dq, %1, %5, %2 - SBUTTERFLY dq, %3, %7, %2 - SBUTTERFLY dq, %9, %13, %2 - SBUTTERFLY dq, %11, %15, %2 - SBUTTERFLY qdq, %1, %9, %2 - SBUTTERFLY qdq, %3, %11, %2 - SBUTTERFLY qdq, %5, %13, %2 - SBUTTERFLY qdq, %7, %15, %2 - SWAP %5, %1 - SWAP %6, %9 - SWAP %7, %1 - SWAP %8, %13 - SWAP %9, %3 - SWAP %10, %11 - SWAP %11, %1 - SWAP %12, %15 +%macro TRANSPOSE8x8B 13 + SBUTTERFLY bw, %1, %2, %7 + movdq%10 m%7, %9 + movdqa %11, m%2 + SBUTTERFLY bw, %3, %4, %2 + SBUTTERFLY bw, %5, %6, %2 + SBUTTERFLY bw, %7, %8, %2 + SBUTTERFLY wd, %1, %3, %2 + movdqa m%2, %11 + movdqa %11, m%3 + SBUTTERFLY wd, %2, %4, %3 + SBUTTERFLY wd, %5, %7, %3 + SBUTTERFLY wd, %6, %8, %3 + SBUTTERFLY dq, %1, %5, %3 + SBUTTERFLY dq, %2, %6, %3 + movdqa m%3, %11 + movh %12, m%2 + movhps %13, m%2 + SBUTTERFLY dq, %3, %7, %2 + SBUTTERFLY dq, %4, %8, %2 + SWAP %2, %5 + SWAP %4, %7 %endmacro %macro DEFINE_REAL_P7_TO_Q7 0-1 0 -%define P7 dst1q + 2*mstrideq + %1 -%define P6 dst1q + mstrideq + %1 -%define P5 dst1q + %1 -%define P4 dst1q + strideq + %1 -%define P3 dstq + 4*mstrideq + %1 -%define P2 dstq + mstride3q + %1 -%define P1 dstq + 2*mstrideq + %1 -%define P0 dstq + mstrideq + %1 -%define Q0 dstq + %1 -%define Q1 dstq + strideq + %1 -%define Q2 dstq + 2*strideq + %1 -%define Q3 dstq + stride3q + %1 -%define Q4 dstq + 4*strideq + %1 -%define Q5 dst2q + mstrideq + %1 -%define Q6 dst2q + %1 -%define Q7 dst2q + strideq + %1 +%define P7 dstq + 4*mstrideq + %1 +%define P6 dstq + mstride3q + %1 +%define P5 dstq + 2*mstrideq + %1 +%define P4 dstq + mstrideq + %1 +%define P3 dstq + %1 +%define P2 dstq + strideq + %1 +%define P1 dstq + 2* strideq + %1 +%define P0 dstq + stride3q + %1 +%define Q0 dstq + 4* strideq + %1 +%define Q1 dst2q + mstride3q + %1 +%define Q2 dst2q + 2*mstrideq + %1 +%define Q3 dst2q + mstrideq + %1 +%define Q4 dst2q + %1 +%define Q5 dst2q + strideq + %1 +%define Q6 dst2q + 2* strideq + %1 +%define Q7 dst2q + stride3q + %1 +%endmacro + +%macro DEFINE_TRANSPOSED_P7_TO_Q7 0-1 0 +%define P3 rsp + 0 + %1 +%define P2 rsp + 16 + %1 +%define P1 rsp + 32 + %1 +%define P0 rsp + 48 + %1 +%define Q0 rsp + 64 + %1 +%define Q1 rsp + 80 + %1 +%define Q2 rsp + 96 + %1 +%define Q3 rsp + 112 + %1 +%define P7 rsp + 128 + %1 +%define P6 rsp + 144 + %1 +%define P5 rsp + 160 + %1 +%define P4 rsp + 176 + %1 +%define Q4 rsp + 192 + %1 +%define Q5 rsp + 208 + %1 +%define Q6 rsp + 224 + %1 +%define Q7 rsp + 240 + %1 %endmacro ; ..............AB -> AAAAAAAABBBBBBBB @@ -307,27 +363,40 @@ SECTION .text %endif %endmacro -%macro LOOPFILTER 2 ; %1=v/h %2=size1 - lea mstrideq, [strideq] - neg mstrideq +%macro LOOPFILTER 5 ; %1=v/h %2=size1 %3+%4=stack, %5=32bit stack only +%if UNIX64 +cglobal vp9_loop_filter_%1_%2_16, 5, 9, 16, %3 + %4, dst, stride, E, I, H, mstride, dst2, stride3, mstride3 +%else +%if WIN64 +cglobal vp9_loop_filter_%1_%2_16, 4, 8, 16, %3 + %4, dst, stride, E, I, mstride, dst2, stride3, mstride3 +%else +cglobal vp9_loop_filter_%1_%2_16, 2, 6, 16, %3 + %4 + %5, dst, stride, mstride, dst2, stride3, mstride3 +%define Ed dword r2m +%define Id dword r3m +%endif +%define Hd dword r4m +%endif - lea stride3q, [strideq+2*strideq] - mov mstride3q, stride3q - neg mstride3q + mov mstrideq, strideq + neg mstrideq + + lea stride3q, [strideq*3] + lea mstride3q, [mstrideq*3] %ifidn %1, h %if %2 > 16 %define movx movh - lea dstq, [dstq + 8*strideq - 4] + lea dstq, [dstq + 4*strideq - 4] %else %define movx movu - lea dstq, [dstq + 8*strideq - 8] ; go from top center (h pos) to center left (v pos) + lea dstq, [dstq + 4*strideq - 8] ; go from top center (h pos) to center left (v pos) %endif + lea dst2q, [dstq + 8*strideq] +%else + lea dstq, [dstq + 4*mstrideq] + lea dst2q, [dstq + 8*strideq] %endif - lea dst1q, [dstq + 2*mstride3q] ; dst1q = &dst[stride * -6] - lea dst2q, [dstq + 2* stride3q] ; dst2q = &dst[stride * +6] - DEFINE_REAL_P7_TO_Q7 %ifidn %1, h @@ -337,8 +406,11 @@ SECTION .text movx m3, [P4] movx m4, [P3] movx m5, [P2] +%if ARCH_X86_64 || %2 != 16 movx m6, [P1] +%endif movx m7, [P0] +%if ARCH_X86_64 movx m8, [Q0] movx m9, [Q1] movx m10, [Q2] @@ -347,32 +419,32 @@ SECTION .text movx m13, [Q5] movx m14, [Q6] movx m15, [Q7] -%define P7 rsp + 0 -%define P6 rsp + 16 -%define P5 rsp + 32 -%define P4 rsp + 48 -%define P3 rsp + 64 -%define P2 rsp + 80 -%define P1 rsp + 96 -%define P0 rsp + 112 -%define Q0 rsp + 128 -%define Q1 rsp + 144 -%define Q2 rsp + 160 -%define Q3 rsp + 176 -%define Q4 rsp + 192 -%define Q5 rsp + 208 -%define Q6 rsp + 224 -%define Q7 rsp + 240 - + DEFINE_TRANSPOSED_P7_TO_Q7 %if %2 == 16 TRANSPOSE16x16B 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, [rsp] mova [P7], m0 mova [P6], m1 mova [P5], m2 mova [P4], m3 -%else - TRANSPOSE16x8B 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 -%endif +%else ; %2 == 44/48/84/88 + ; 8x16 transpose + punpcklbw m0, m1 + punpcklbw m2, m3 + punpcklbw m4, m5 + punpcklbw m6, m7 + punpcklbw m8, m9 + punpcklbw m10, m11 + punpcklbw m12, m13 + punpcklbw m14, m15 + TRANSPOSE8x8W 0, 2, 4, 6, 8, 10, 12, 14, 15 + SWAP 0, 4 + SWAP 2, 5 + SWAP 0, 6 + SWAP 0, 7 + SWAP 10, 9 + SWAP 12, 10 + SWAP 14, 11 +%endif ; %2 mova [P3], m4 mova [P2], m5 mova [P1], m6 @@ -386,8 +458,80 @@ SECTION .text mova [Q5], m13 mova [Q6], m14 mova [Q7], m15 -%endif -%endif +%endif ; %2 +%else ; x86-32 +%if %2 == 16 + TRANSPOSE8x8B 0, 1, 2, 3, 4, 5, 6, 7, [P1], u, [rsp+%3+%4], [rsp+64], [rsp+80] + DEFINE_TRANSPOSED_P7_TO_Q7 + movh [P7], m0 + movh [P5], m1 + movh [P3], m2 + movh [P1], m3 + movh [Q2], m5 + movh [Q4], m6 + movh [Q6], m7 + movhps [P6], m0 + movhps [P4], m1 + movhps [P2], m2 + movhps [P0], m3 + movhps [Q3], m5 + movhps [Q5], m6 + movhps [Q7], m7 + DEFINE_REAL_P7_TO_Q7 + movx m0, [Q0] + movx m1, [Q1] + movx m2, [Q2] + movx m3, [Q3] + movx m4, [Q4] + movx m5, [Q5] + movx m7, [Q7] + TRANSPOSE8x8B 0, 1, 2, 3, 4, 5, 6, 7, [Q6], u, [rsp+%3+%4], [rsp+72], [rsp+88] + DEFINE_TRANSPOSED_P7_TO_Q7 8 + movh [P7], m0 + movh [P5], m1 + movh [P3], m2 + movh [P1], m3 + movh [Q2], m5 + movh [Q4], m6 + movh [Q6], m7 + movhps [P6], m0 + movhps [P4], m1 + movhps [P2], m2 + movhps [P0], m3 + movhps [Q3], m5 + movhps [Q5], m6 + movhps [Q7], m7 + DEFINE_TRANSPOSED_P7_TO_Q7 +%else ; %2 == 44/48/84/88 + punpcklbw m0, m1 + punpcklbw m2, m3 + punpcklbw m4, m5 + punpcklbw m6, m7 + movx m1, [Q0] + movx m3, [Q1] + movx m5, [Q2] + movx m7, [Q3] + punpcklbw m1, m3 + punpcklbw m5, m7 + movx m3, [Q4] + movx m7, [Q5] + punpcklbw m3, m7 + mova [rsp], m3 + movx m3, [Q6] + movx m7, [Q7] + punpcklbw m3, m7 + DEFINE_TRANSPOSED_P7_TO_Q7 + TRANSPOSE8x8W 0, 2, 4, 6, 1, 5, 7, 3, [rsp], [Q0], 1 + mova [P3], m0 + mova [P2], m2 + mova [P1], m4 + mova [P0], m6 + mova [Q1], m5 + mova [Q2], m7 + mova [Q3], m3 +%endif ; %2 +%endif ; x86-32/64 +%endif ; %1 == h ; calc fm mask %if %2 == 16 @@ -408,6 +552,7 @@ SECTION .text mova m0, [pb_80] pxor m2, m0 pxor m3, m0 +%if ARCH_X86_64 %ifidn %1, v mova m8, [P3] mova m9, [P2] @@ -425,38 +570,61 @@ SECTION .text SWAP 10, 6, 14 SWAP 11, 7, 15 %endif - ABSSUB_CMP m5, m8, m9, m2, m6, m7, m0 ; m5 = abs(p3-p2) <= I - ABSSUB_CMP m1, m9, m10, m2, m6, m7, m0 ; m1 = abs(p2-p1) <= I - pand m5, m1 - ABSSUB_CMP m1, m10, m11, m2, m6, m7, m0 ; m1 = abs(p1-p0) <= I - pand m5, m1 - ABSSUB_CMP m1, m12, m13, m2, m6, m7, m0 ; m1 = abs(q1-q0) <= I - pand m5, m1 - ABSSUB_CMP m1, m13, m14, m2, m6, m7, m0 ; m1 = abs(q2-q1) <= I - pand m5, m1 - ABSSUB_CMP m1, m14, m15, m2, m6, m7, m0 ; m1 = abs(q3-q2) <= I - pand m5, m1 - ABSSUB m1, m11, m12, m7 ; abs(p0-q0) +%define rp3 m8 +%define rp2 m9 +%define rp1 m10 +%define rp0 m11 +%define rq0 m12 +%define rq1 m13 +%define rq2 m14 +%define rq3 m15 +%else +%define rp3 [P3] +%define rp2 [P2] +%define rp1 [P1] +%define rp0 [P0] +%define rq0 [Q0] +%define rq1 [Q1] +%define rq2 [Q2] +%define rq3 [Q3] +%endif + ABSSUB_GT m5, rp3, rp2, m2, m7, m0 ; m5 = abs(p3-p2) <= I + ABSSUB_GT m1, rp2, rp1, m2, m7, m0 ; m1 = abs(p2-p1) <= I + por m5, m1 + ABSSUB_GT m1, rp1, rp0, m2, m7, m0 ; m1 = abs(p1-p0) <= I + por m5, m1 + ABSSUB_GT m1, rq0, rq1, m2, m7, m0 ; m1 = abs(q1-q0) <= I + por m5, m1 + ABSSUB_GT m1, rq1, rq2, m2, m7, m0 ; m1 = abs(q2-q1) <= I + por m5, m1 + ABSSUB_GT m1, rq2, rq3, m2, m7, m0 ; m1 = abs(q3-q2) <= I + por m5, m1 + ABSSUB m1, rp0, rq0, m7 ; abs(p0-q0) paddusb m1, m1 ; abs(p0-q0) * 2 - ABSSUB m2, m10, m13, m7 ; abs(p1-q1) + ABSSUB m2, rp1, rq1, m7 ; abs(p1-q1) pand m2, [pb_fe] ; drop lsb so shift can work psrlq m2, 1 ; abs(p1-q1)/2 paddusb m1, m2 ; abs(p0-q0)*2 + abs(p1-q1)/2 pxor m1, m0 - pcmpgtb m4, m3, m1 ; E > X? - pcmpeqb m3, m1 ; E == X? - por m3, m4 ; E >= X? - pand m3, m5 ; fm final value + pcmpgtb m1, m3 + por m1, m5 ; fm final value + SWAP 1, 3 + pxor m3, [pb_ff] ; (m3: fm, m8..15: p3 p2 p1 p0 q0 q1 q2 q3) ; calc flat8in (if not 44_16) and hev masks - mova m6, [pb_81] ; [1 1 1 1 ...] ^ 0x80 %if %2 != 44 - ABSSUB_CMP m2, m8, m11, m6, m4, m5 ; abs(p3 - p0) <= 1 + mova m6, [pb_81] ; [1 1 1 1 ...] ^ 0x80 + ABSSUB_GT m2, rp3, rp0, m6, m5 ; abs(p3 - p0) <= 1 +%if ARCH_X86_64 mova m8, [pb_80] - ABSSUB_CMP m1, m9, m11, m6, m4, m5, m8 ; abs(p2 - p0) <= 1 - pand m2, m1 - ABSSUB m4, m10, m11, m5 ; abs(p1 - p0) +%define rb80 m8 +%else +%define rb80 [pb_80] +%endif + ABSSUB_GT m1, rp2, rp0, m6, m5, rb80 ; abs(p2 - p0) <= 1 + por m2, m1 + ABSSUB m4, rp1, rp0, m5 ; abs(p1 - p0) %if %2 == 16 %if cpuflag(ssse3) pxor m0, m0 @@ -466,21 +634,22 @@ SECTION .text movd m7, Hd SPLATB_MIX m7 %endif - pxor m7, m8 - pxor m4, m8 + pxor m7, rb80 + pxor m4, rb80 pcmpgtb m0, m4, m7 ; abs(p1 - p0) > H (1/2 hev condition) - CMP_LTE m4, m6, m5 ; abs(p1 - p0) <= 1 - pand m2, m4 ; (flat8in) - ABSSUB m4, m13, m12, m1 ; abs(q1 - q0) - pxor m4, m8 + CMP_GT m4, m6 ; abs(p1 - p0) <= 1 + por m2, m4 ; (flat8in) + ABSSUB m4, rq1, rq0, m1 ; abs(q1 - q0) + pxor m4, rb80 pcmpgtb m5, m4, m7 ; abs(q1 - q0) > H (2/2 hev condition) por m0, m5 ; hev final value - CMP_LTE m4, m6, m5 ; abs(q1 - q0) <= 1 - pand m2, m4 ; (flat8in) - ABSSUB_CMP m1, m14, m12, m6, m4, m5, m8 ; abs(q2 - q0) <= 1 - pand m2, m1 - ABSSUB_CMP m1, m15, m12, m6, m4, m5, m8 ; abs(q3 - q0) <= 1 - pand m2, m1 ; flat8in final value + CMP_GT m4, m6 ; abs(q1 - q0) <= 1 + por m2, m4 ; (flat8in) + ABSSUB_GT m1, rq2, rq0, m6, m5, rb80 ; abs(q2 - q0) <= 1 + por m2, m1 + ABSSUB_GT m1, rq3, rq0, m6, m5, rb80 ; abs(q3 - q0) <= 1 + por m2, m1 ; flat8in final value + pxor m2, [pb_ff] %if %2 == 84 || %2 == 48 pand m2, [mask_mix%2] %endif @@ -489,10 +658,10 @@ SECTION .text movd m7, Hd SPLATB_MIX m7 pxor m7, m6 - ABSSUB m4, m10, m11, m1 ; abs(p1 - p0) + ABSSUB m4, rp1, rp0, m1 ; abs(p1 - p0) pxor m4, m6 pcmpgtb m0, m4, m7 ; abs(p1 - p0) > H (1/2 hev condition) - ABSSUB m4, m13, m12, m1 ; abs(q1 - q0) + ABSSUB m4, rq1, rq0, m1 ; abs(q1 - q0) pxor m4, m6 pcmpgtb m5, m4, m7 ; abs(q1 - q0) > H (2/2 hev condition) por m0, m5 ; hev final value @@ -501,29 +670,58 @@ SECTION .text %if %2 == 16 ; (m0: hev, m2: flat8in, m3: fm, m6: pb_81, m9..15: p2 p1 p0 q0 q1 q2 q3) ; calc flat8out mask +%if ARCH_X86_64 mova m8, [P7] mova m9, [P6] - ABSSUB_CMP m1, m8, m11, m6, m4, m5 ; abs(p7 - p0) <= 1 - ABSSUB_CMP m7, m9, m11, m6, m4, m5 ; abs(p6 - p0) <= 1 - pand m1, m7 +%define rp7 m8 +%define rp6 m9 +%else +%define rp7 [P7] +%define rp6 [P6] +%endif + ABSSUB_GT m1, rp7, rp0, m6, m5 ; abs(p7 - p0) <= 1 + ABSSUB_GT m7, rp6, rp0, m6, m5 ; abs(p6 - p0) <= 1 + por m1, m7 +%if ARCH_X86_64 mova m8, [P5] mova m9, [P4] - ABSSUB_CMP m7, m8, m11, m6, m4, m5 ; abs(p5 - p0) <= 1 - pand m1, m7 - ABSSUB_CMP m7, m9, m11, m6, m4, m5 ; abs(p4 - p0) <= 1 - pand m1, m7 +%define rp5 m8 +%define rp4 m9 +%else +%define rp5 [P5] +%define rp4 [P4] +%endif + ABSSUB_GT m7, rp5, rp0, m6, m5 ; abs(p5 - p0) <= 1 + por m1, m7 + ABSSUB_GT m7, rp4, rp0, m6, m5 ; abs(p4 - p0) <= 1 + por m1, m7 +%if ARCH_X86_64 mova m14, [Q4] mova m15, [Q5] - ABSSUB_CMP m7, m14, m12, m6, m4, m5 ; abs(q4 - q0) <= 1 - pand m1, m7 - ABSSUB_CMP m7, m15, m12, m6, m4, m5 ; abs(q5 - q0) <= 1 - pand m1, m7 +%define rq4 m14 +%define rq5 m15 +%else +%define rq4 [Q4] +%define rq5 [Q5] +%endif + ABSSUB_GT m7, rq4, rq0, m6, m5 ; abs(q4 - q0) <= 1 + por m1, m7 + ABSSUB_GT m7, rq5, rq0, m6, m5 ; abs(q5 - q0) <= 1 + por m1, m7 +%if ARCH_X86_64 mova m14, [Q6] mova m15, [Q7] - ABSSUB_CMP m7, m14, m12, m6, m4, m5 ; abs(q4 - q0) <= 1 - pand m1, m7 - ABSSUB_CMP m7, m15, m12, m6, m4, m5 ; abs(q5 - q0) <= 1 - pand m1, m7 ; flat8out final value +%define rq6 m14 +%define rq7 m15 +%else +%define rq6 [Q6] +%define rq7 [Q7] +%endif + ABSSUB_GT m7, rq6, rq0, m6, m5 ; abs(q4 - q0) <= 1 + por m1, m7 + ABSSUB_GT m7, rq7, rq0, m6, m5 ; abs(q5 - q0) <= 1 + por m1, m7 ; flat8out final value + pxor m1, [pb_ff] %endif ; if (fm) { @@ -542,66 +740,85 @@ SECTION .text ; filter2() %if %2 != 44 mova m6, [pb_80] ; already in m6 if 44_16 + SCRATCH 2, 15, rsp+%3+%4 +%if %2 == 16 + SCRATCH 1, 8, rsp+%3+%4+16 %endif - pxor m15, m12, m6 ; q0 ^ 0x80 - pxor m14, m11, m6 ; p0 ^ 0x80 - psubsb m15, m14 ; (signed) q0 - p0 - pxor m4, m10, m6 ; p1 ^ 0x80 - pxor m5, m13, m6 ; q1 ^ 0x80 +%endif + pxor m2, m6, rq0 ; q0 ^ 0x80 + pxor m4, m6, rp0 ; p0 ^ 0x80 + psubsb m2, m4 ; (signed) q0 - p0 + pxor m4, m6, rp1 ; p1 ^ 0x80 + pxor m5, m6, rq1 ; q1 ^ 0x80 psubsb m4, m5 ; (signed) p1 - q1 - paddsb m4, m15 ; (q0 - p0) + (p1 - q1) - paddsb m4, m15 ; 2*(q0 - p0) + (p1 - q1) - paddsb m4, m15 ; 3*(q0 - p0) + (p1 - q1) + paddsb m4, m2 ; (q0 - p0) + (p1 - q1) + paddsb m4, m2 ; 2*(q0 - p0) + (p1 - q1) + paddsb m4, m2 ; 3*(q0 - p0) + (p1 - q1) paddsb m6, m4, [pb_4] ; m6: f1 = clip(f + 4, 127) paddsb m4, [pb_3] ; m4: f2 = clip(f + 3, 127) +%if ARCH_X86_64 mova m14, [pb_10] ; will be reused in filter4() - SRSHIFT3B_2X m6, m4, m14, m7 ; f1 and f2 sign byte shift by 3 - SIGN_SUB m7, m12, m6, m5, m9 ; m7 = q0 - f1 - SIGN_ADD m8, m11, m4, m5, m9 ; m8 = p0 + f2 +%define rb10 m14 +%else +%define rb10 [pb_10] +%endif + SRSHIFT3B_2X m6, m4, rb10, m7 ; f1 and f2 sign byte shift by 3 + SIGN_SUB m7, rq0, m6, m5 ; m7 = q0 - f1 + SIGN_ADD m1, rp0, m4, m5 ; m1 = p0 + f2 %if %2 != 44 - pandn m6, m2, m3 ; ~mask(in) & mask(fm) +%if ARCH_X86_64 + pandn m6, m15, m3 ; ~mask(in) & mask(fm) +%else + mova m6, [rsp+%3+%4] + pandn m6, m3 +%endif pand m6, m0 ; (~mask(in) & mask(fm)) & mask(hev) %else pand m6, m3, m0 %endif - MASK_APPLY m7, m12, m6, m5 ; m7 = filter2(q0) & mask / we write it in filter4() - MASK_APPLY m8, m11, m6, m5 ; m8 = filter2(p0) & mask / we write it in filter4() + MASK_APPLY m7, rq0, m6, m5 ; m7 = filter2(q0) & mask / we write it in filter4() + MASK_APPLY m1, rp0, m6, m5 ; m1 = filter2(p0) & mask / we write it in filter4() - ; (m0: hev, [m1: flat8out], [m2: flat8in], m3: fm, m7..m8: q0' p0', m10..13: p1 p0 q0 q1, m14: pb_10, m15: q0-p0) + ; (m0: hev, m1: p0', m2: q0-p0, m3: fm, m7: q0', [m8: flat8out], m10..13: p1 p0 q0 q1, m14: pb_10, [m15: flat8in], ) ; filter4() - mova m4, m15 - paddsb m15, m4 ; 2 * (q0 - p0) - paddsb m15, m4 ; 3 * (q0 - p0) - paddsb m6, m15, [pb_4] ; m6: f1 = clip(f + 4, 127) - paddsb m15, [pb_3] ; m15: f2 = clip(f + 3, 127) - SRSHIFT3B_2X m6, m15, m14, m9 ; f1 and f2 sign byte shift by 3 + mova m4, m2 + paddsb m2, m4 ; 2 * (q0 - p0) + paddsb m2, m4 ; 3 * (q0 - p0) + paddsb m6, m2, [pb_4] ; m6: f1 = clip(f + 4, 127) + paddsb m2, [pb_3] ; m2: f2 = clip(f + 3, 127) + SRSHIFT3B_2X m6, m2, rb10, m4 ; f1 and f2 sign byte shift by 3 %if %2 != 44 -%define p0tmp m7 -%define q0tmp m9 - pandn m5, m2, m3 ; ~mask(in) & mask(fm) +%if ARCH_X86_64 + pandn m5, m15, m3 ; ~mask(in) & mask(fm) +%else + mova m5, [rsp+%3+%4] + pandn m5, m3 +%endif pandn m0, m5 ; ~mask(hev) & (~mask(in) & mask(fm)) %else -%define p0tmp m1 -%define q0tmp m2 pandn m0, m3 %endif - SIGN_SUB q0tmp, m12, m6, m4, m14 ; q0 - f1 - MASK_APPLY q0tmp, m7, m0, m5 ; filter4(q0) & mask - mova [Q0], q0tmp - SIGN_ADD p0tmp, m11, m15, m4, m14 ; p0 + f2 - MASK_APPLY p0tmp, m8, m0, m5 ; filter4(p0) & mask - mova [P0], p0tmp + SIGN_SUB m5, rq0, m6, m4 ; q0 - f1 + MASK_APPLY m5, m7, m0, m4 ; filter4(q0) & mask + mova [Q0], m5 + SIGN_ADD m7, rp0, m2, m4 ; p0 + f2 + MASK_APPLY m7, m1, m0, m4 ; filter4(p0) & mask + mova [P0], m7 paddb m6, [pb_80] ; - pxor m8, m8 ; f=(f1+1)>>1 - pavgb m6, m8 ; + pxor m1, m1 ; f=(f1+1)>>1 + pavgb m6, m1 ; psubb m6, [pb_40] ; - SIGN_ADD m7, m10, m6, m8, m9 ; p1 + f - SIGN_SUB m4, m13, m6, m8, m9 ; q1 - f - MASK_APPLY m7, m10, m0, m14 ; m7 = filter4(p1) - MASK_APPLY m4, m13, m0, m14 ; m4 = filter4(q1) - mova [P1], m7 + SIGN_ADD m1, rp1, m6, m2 ; p1 + f + SIGN_SUB m4, rq1, m6, m2 ; q1 - f + MASK_APPLY m1, rp1, m0, m2 ; m1 = filter4(p1) + MASK_APPLY m4, rq1, m0, m2 ; m4 = filter4(q1) + mova [P1], m1 mova [Q1], m4 +%if %2 != 44 + UNSCRATCH 2, 15, rsp+%3+%4 +%endif + ; ([m1: flat8out], m2: flat8in, m3: fm, m10..13: p1 p0 q0 q1) ; filter6() %if %2 != 44 @@ -610,18 +827,34 @@ SECTION .text pand m3, m2 %else pand m2, m3 ; mask(fm) & mask(in) - pandn m3, m1, m2 ; ~mask(out) & (mask(fm) & mask(in)) +%if ARCH_X86_64 + pandn m3, m8, m2 ; ~mask(out) & (mask(fm) & mask(in)) +%else + mova m3, [rsp+%3+%4+16] + pandn m3, m2 +%endif %endif +%if ARCH_X86_64 mova m14, [P3] - mova m15, [P2] - mova m8, [Q2] mova m9, [Q3] - FILTER_INIT m4, m5, m6, m7, [P2], 6, m3, m15 ; [p2] - FILTER_UPDATE m6, m7, m4, m5, [P1], m14, m15, m10, m13, 3, m3 ; [p1] -p3 -p2 +p1 +q1 - FILTER_UPDATE m4, m5, m6, m7, [P0], m14, m10, m11, m8, 3, m3 ; [p0] -p3 -p1 +p0 +q2 - FILTER_UPDATE m6, m7, m4, m5, [Q0], m14, m11, m12, m9, 3, m3 ; [q0] -p3 -p0 +q0 +q3 - FILTER_UPDATE m4, m5, m6, m7, [Q1], m15, m12, m13, m9, 3, m3 ; [q1] -p2 -q0 +q1 +q3 - FILTER_UPDATE m6, m7, m4, m5, [Q2], m10, m13, m8, m9, 3, m3, m8 ; [q2] -p1 -q1 +q2 +q3 +%define rp3 m14 +%define rq3 m9 +%else +%define rp3 [P3] +%define rq3 [Q3] +%endif + mova m1, [P2] + FILTER_INIT m4, m5, m6, m7, [P2], %4, 6, m3, m1 ; [p2] + mova m1, [Q2] + FILTER_UPDATE m4, m5, m6, m7, [P1], %4, 0, 1, 2, 5, 3, m3, "", rq1, "", 1 ; [p1] -p3 -p2 +p1 +q1 + FILTER_UPDATE m4, m5, m6, m7, [P0], %4, 0, 2, 3, 6, 3, m3, "", m1 ; [p0] -p3 -p1 +p0 +q2 + FILTER_UPDATE m4, m5, m6, m7, [Q0], %4, 0, 3, 4, 7, 3, m3, "", rq3, "", 1 ; [q0] -p3 -p0 +q0 +q3 + FILTER_UPDATE m4, m5, m6, m7, [Q1], %4, 1, 4, 5, 7, 3, m3, "" ; [q1] -p2 -q0 +q1 +q3 + FILTER_UPDATE m4, m5, m6, m7, [Q2], %4, 2, 5, 6, 7, 3, m3, m1 ; [q2] -p1 -q1 +q2 +q3 +%endif + +%if %2 == 16 + UNSCRATCH 1, 8, rsp+%3+%4+16 %endif ; (m0: 0, [m1: flat8out], m2: fm & flat8in, m8..15: q2 q3 p1 p0 q0 q1 p3 p2) @@ -649,22 +882,49 @@ SECTION .text pand m1, m2 ; mask(out) & (mask(fm) & mask(in)) mova m2, [P7] mova m3, [P6] +%if ARCH_X86_64 mova m8, [P5] mova m9, [P4] - FILTER_INIT m4, m5, m6, m7, [P6], 14, m1, m3 - FILTER_UPDATE m6, m7, m4, m5, [P5], m2, m3, m8, m13, 4, m1, m8 ; [p5] -p7 -p6 +p5 +q1 - FILTER_UPDATE m4, m5, m6, m7, [P4], m2, m8, m9, m13, 4, m1, m9, m13, [Q2] ; [p4] -p7 -p5 +p4 +q2 - FILTER_UPDATE m6, m7, m4, m5, [P3], m2, m9, m14, m13, 4, m1, m14, m13, [Q3] ; [p3] -p7 -p4 +p3 +q3 - FILTER_UPDATE m4, m5, m6, m7, [P2], m2, m14, m15, m13, 4, m1, m13, [Q4] ; [p2] -p7 -p3 +p2 +q4 - FILTER_UPDATE m6, m7, m4, m5, [P1], m2, m15, m10, m13, 4, m1, m13, [Q5] ; [p1] -p7 -p2 +p1 +q5 - FILTER_UPDATE m4, m5, m6, m7, [P0], m2, m10, m11, m13, 4, m1, m13, [Q6] ; [p0] -p7 -p1 +p0 +q6 - FILTER_UPDATE m6, m7, m4, m5, [Q0], m2, m11, m12, m13, 4, m1, m13, [Q7] ; [q0] -p7 -p0 +q0 +q7 - FILTER_UPDATE m4, m5, m6, m7, [Q1], m3, m12, m2, m13, 4, m1, m2, [Q1] ; [q1] -p6 -q0 +q1 +q7 - FILTER_UPDATE m6, m7, m4, m5, [Q2], m8, m2, m3, m13, 4, m1, m3, [Q2] ; [q2] -p5 -q1 +q2 +q7 - FILTER_UPDATE m4, m5, m6, m7, [Q3], m9, m3, m8, m13, 4, m1, m8, m8, [Q3] ; [q3] -p4 -q2 +q3 +q7 - FILTER_UPDATE m6, m7, m4, m5, [Q4], m14, m8, m9, m13, 4, m1, m9, m9, [Q4] ; [q4] -p3 -q3 +q4 +q7 - FILTER_UPDATE m4, m5, m6, m7, [Q5], m15, m9, m14, m13, 4, m1, m14, m14, [Q5] ; [q5] -p2 -q4 +q5 +q7 - FILTER_UPDATE m6, m7, m4, m5, [Q6], m10, m14, m15, m13, 4, m1, m15, m15, [Q6] ; [q6] -p1 -q5 +q6 +q7 +%define rp5 m8 +%define rp4 m9 +%define rp5s m8 +%define rp4s m9 +%define rp3s m14 +%define rq4 m8 +%define rq5 m9 +%define rq6 m14 +%define rq7 m15 +%define rq4s m8 +%define rq5s m9 +%define rq6s m14 +%else +%define rp5 [P5] +%define rp4 [P4] +%define rp5s "" +%define rp4s "" +%define rp3s "" +%define rq4 [Q4] +%define rq5 [Q5] +%define rq6 [Q6] +%define rq7 [Q7] +%define rq4s "" +%define rq5s "" +%define rq6s "" +%endif + FILTER_INIT m4, m5, m6, m7, [P6], %4, 14, m1, m3 ; [p6] + FILTER_UPDATE m4, m5, m6, m7, [P5], %4, 8, 9, 10, 5, 4, m1, rp5s ; [p5] -p7 -p6 +p5 +q1 + FILTER_UPDATE m4, m5, m6, m7, [P4], %4, 8, 10, 11, 6, 4, m1, rp4s ; [p4] -p7 -p5 +p4 +q2 + FILTER_UPDATE m4, m5, m6, m7, [P3], %4, 8, 11, 0, 7, 4, m1, rp3s ; [p3] -p7 -p4 +p3 +q3 + FILTER_UPDATE m4, m5, m6, m7, [P2], %4, 8, 0, 1, 12, 4, m1, "", rq4, [Q4], 1 ; [p2] -p7 -p3 +p2 +q4 + FILTER_UPDATE m4, m5, m6, m7, [P1], %4, 8, 1, 2, 13, 4, m1, "", rq5, [Q5], 1 ; [p1] -p7 -p2 +p1 +q5 + FILTER_UPDATE m4, m5, m6, m7, [P0], %4, 8, 2, 3, 14, 4, m1, "", rq6, [Q6], 1 ; [p0] -p7 -p1 +p0 +q6 + FILTER_UPDATE m4, m5, m6, m7, [Q0], %4, 8, 3, 4, 15, 4, m1, "", rq7, [Q7], 1 ; [q0] -p7 -p0 +q0 +q7 + FILTER_UPDATE m4, m5, m6, m7, [Q1], %4, 9, 4, 5, 15, 4, m1, "" ; [q1] -p6 -q0 +q1 +q7 + FILTER_UPDATE m4, m5, m6, m7, [Q2], %4, 10, 5, 6, 15, 4, m1, "" ; [q2] -p5 -q1 +q2 +q7 + FILTER_UPDATE m4, m5, m6, m7, [Q3], %4, 11, 6, 7, 15, 4, m1, "" ; [q3] -p4 -q2 +q3 +q7 + FILTER_UPDATE m4, m5, m6, m7, [Q4], %4, 0, 7, 12, 15, 4, m1, rq4s ; [q4] -p3 -q3 +q4 +q7 + FILTER_UPDATE m4, m5, m6, m7, [Q5], %4, 1, 12, 13, 15, 4, m1, rq5s ; [q5] -p2 -q4 +q5 +q7 + FILTER_UPDATE m4, m5, m6, m7, [Q6], %4, 2, 13, 14, 15, 4, m1, rq6s ; [q6] -p1 -q5 +q6 +q7 %endif %ifidn %1, h @@ -675,8 +935,11 @@ SECTION .text mova m3, [P4] mova m4, [P3] mova m5, [P2] +%if ARCH_X86_64 mova m6, [P1] +%endif mova m7, [P0] +%if ARCH_X86_64 mova m8, [Q0] mova m9, [Q1] mova m10, [Q2] @@ -703,42 +966,86 @@ SECTION .text movu [Q5], m13 movu [Q6], m14 movu [Q7], m15 +%else + DEFINE_REAL_P7_TO_Q7 + TRANSPOSE8x8B 0, 1, 2, 3, 4, 5, 6, 7, [rsp+32], a, [rsp+%3+%4], [Q0], [Q1] + movh [P7], m0 + movh [P5], m1 + movh [P3], m2 + movh [P1], m3 + movh [Q2], m5 + movh [Q4], m6 + movh [Q6], m7 + movhps [P6], m0 + movhps [P4], m1 + movhps [P2], m2 + movhps [P0], m3 + movhps [Q3], m5 + movhps [Q5], m6 + movhps [Q7], m7 + DEFINE_TRANSPOSED_P7_TO_Q7 + mova m0, [Q0] + mova m1, [Q1] + mova m2, [Q2] + mova m3, [Q3] + mova m4, [Q4] + mova m5, [Q5] + mova m7, [Q7] + DEFINE_REAL_P7_TO_Q7 8 + TRANSPOSE8x8B 0, 1, 2, 3, 4, 5, 6, 7, [rsp+224], a, [rsp+%3+%4], [Q0], [Q1] + movh [P7], m0 + movh [P5], m1 + movh [P3], m2 + movh [P1], m3 + movh [Q2], m5 + movh [Q4], m6 + movh [Q6], m7 + movhps [P6], m0 + movhps [P4], m1 + movhps [P2], m2 + movhps [P0], m3 + movhps [Q3], m5 + movhps [Q5], m6 + movhps [Q7], m7 +%endif %elif %2 == 44 - SWAP 0, 7 ; m0 = p1 + SWAP 0, 1 ; m0 = p1 + SWAP 1, 7 ; m1 = p0 + SWAP 2, 5 ; m2 = q0 SWAP 3, 4 ; m3 = q1 DEFINE_REAL_P7_TO_Q7 2 - SBUTTERFLY bw, 0, 1, 8 - SBUTTERFLY bw, 2, 3, 8 - SBUTTERFLY wd, 0, 2, 8 - SBUTTERFLY wd, 1, 3, 8 - SBUTTERFLY dq, 0, 4, 8 - SBUTTERFLY dq, 1, 5, 8 - SBUTTERFLY dq, 2, 6, 8 - SBUTTERFLY dq, 3, 7, 8 + SBUTTERFLY bw, 0, 1, 4 + SBUTTERFLY bw, 2, 3, 4 + SBUTTERFLY wd, 0, 2, 4 + SBUTTERFLY wd, 1, 3, 4 movd [P7], m0 - punpckhqdq m0, m8 - movd [P6], m0 - movd [Q0], m1 - punpckhqdq m1, m9 - movd [Q1], m1 movd [P3], m2 - punpckhqdq m2, m10 - movd [P2], m2 + movd [Q0], m1 movd [Q4], m3 - punpckhqdq m3, m11 + psrldq m0, 4 + psrldq m1, 4 + psrldq m2, 4 + psrldq m3, 4 + movd [P6], m0 + movd [P2], m2 + movd [Q1], m1 movd [Q5], m3 - movd [P5], m4 - punpckhqdq m4, m12 - movd [P4], m4 - movd [Q2], m5 - punpckhqdq m5, m13 - movd [Q3], m5 - movd [P1], m6 - punpckhqdq m6, m14 - movd [P0], m6 - movd [Q6], m7 - punpckhqdq m7, m8 - movd [Q7], m7 + psrldq m0, 4 + psrldq m1, 4 + psrldq m2, 4 + psrldq m3, 4 + movd [P5], m0 + movd [P1], m2 + movd [Q2], m1 + movd [Q6], m3 + psrldq m0, 4 + psrldq m1, 4 + psrldq m2, 4 + psrldq m3, 4 + movd [P4], m0 + movd [P0], m2 + movd [Q3], m1 + movd [Q7], m3 %else ; the following code do a transpose of 8 full lines to 16 half ; lines (high part). It is inlined to avoid the need of a staging area @@ -748,9 +1055,12 @@ SECTION .text mova m3, [P0] mova m4, [Q0] mova m5, [Q1] +%if ARCH_X86_64 mova m6, [Q2] +%endif mova m7, [Q3] DEFINE_REAL_P7_TO_Q7 +%if ARCH_X86_64 SBUTTERFLY bw, 0, 1, 8 SBUTTERFLY bw, 2, 3, 8 SBUTTERFLY bw, 4, 5, 8 @@ -763,54 +1073,67 @@ SECTION .text SBUTTERFLY dq, 1, 5, 8 SBUTTERFLY dq, 2, 6, 8 SBUTTERFLY dq, 3, 7, 8 - movh [P7], m0 - punpckhqdq m0, m8 - movh [P6], m0 - movh [Q0], m1 - punpckhqdq m1, m9 - movh [Q1], m1 - movh [P3], m2 - punpckhqdq m2, m10 - movh [P2], m2 - movh [Q4], m3 - punpckhqdq m3, m11 - movh [Q5], m3 - movh [P5], m4 - punpckhqdq m4, m12 - movh [P4], m4 - movh [Q2], m5 - punpckhqdq m5, m13 - movh [Q3], m5 - movh [P1], m6 - punpckhqdq m6, m14 - movh [P0], m6 - movh [Q6], m7 - punpckhqdq m7, m8 - movh [Q7], m7 +%else + SBUTTERFLY bw, 0, 1, 6 + mova [rsp+64], m1 + mova m6, [rsp+96] + SBUTTERFLY bw, 2, 3, 1 + SBUTTERFLY bw, 4, 5, 1 + SBUTTERFLY bw, 6, 7, 1 + SBUTTERFLY wd, 0, 2, 1 + mova [rsp+96], m2 + mova m1, [rsp+64] + SBUTTERFLY wd, 1, 3, 2 + SBUTTERFLY wd, 4, 6, 2 + SBUTTERFLY wd, 5, 7, 2 + SBUTTERFLY dq, 0, 4, 2 + SBUTTERFLY dq, 1, 5, 2 + movh [Q0], m1 + movhps [Q1], m1 + mova m2, [rsp+96] + SBUTTERFLY dq, 2, 6, 1 + SBUTTERFLY dq, 3, 7, 1 +%endif + SWAP 3, 6 + SWAP 1, 4 + movh [P7], m0 + movhps [P6], m0 + movh [P5], m1 + movhps [P4], m1 + movh [P3], m2 + movhps [P2], m2 + movh [P1], m3 + movhps [P0], m3 +%if ARCH_X86_64 + movh [Q0], m4 + movhps [Q1], m4 +%endif + movh [Q2], m5 + movhps [Q3], m5 + movh [Q4], m6 + movhps [Q5], m6 + movh [Q6], m7 + movhps [Q7], m7 %endif %endif RET %endmacro -%macro LPF_16_VH 2 -INIT_XMM %2 -cglobal vp9_loop_filter_v_%1_16, 5,10,16, dst, stride, E, I, H, mstride, dst1, dst2, stride3, mstride3 - LOOPFILTER v, %1 -cglobal vp9_loop_filter_h_%1_16, 5,10,16, 256, dst, stride, E, I, H, mstride, dst1, dst2, stride3, mstride3 - LOOPFILTER h, %1 +%macro LPF_16_VH 5 +INIT_XMM %5 +LOOPFILTER v, %1, %2, 0, %4 +LOOPFILTER h, %1, %2, %3, %4 %endmacro -%macro LPF_16_VH_ALL_OPTS 1 -LPF_16_VH %1, sse2 -LPF_16_VH %1, ssse3 -LPF_16_VH %1, avx +%macro LPF_16_VH_ALL_OPTS 4 +LPF_16_VH %1, %2, %3, %4, sse2 +LPF_16_VH %1, %2, %3, %4, ssse3 +LPF_16_VH %1, %2, %3, %4, avx %endmacro -LPF_16_VH_ALL_OPTS 16 -LPF_16_VH_ALL_OPTS 44 -LPF_16_VH_ALL_OPTS 48 -LPF_16_VH_ALL_OPTS 84 -LPF_16_VH_ALL_OPTS 88 - -%endif ; x86-64 +LPF_16_VH_ALL_OPTS 16, 512, 256, 32 +LPF_16_VH_ALL_OPTS 44, 0, 128, 0 +LPF_16_VH_ALL_OPTS 48, 256, 128, 16 +LPF_16_VH_ALL_OPTS 84, 256, 128, 16 +LPF_16_VH_ALL_OPTS 88, 256, 128, 16 |