aboutsummaryrefslogtreecommitdiffstats
path: root/libavcodec/x86/vp9itxfm.asm
blob: ebf30440b2fe38a5d5849474ff0303528852acb8 (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
;******************************************************************************
;* VP9 IDCT SIMD optimizations
;*
;* Copyright (C) 2013 Clément Bœsch <u pkh me>
;*
;* This file is part of FFmpeg.
;*
;* FFmpeg is free software; you can redistribute it and/or
;* modify it under the terms of the GNU Lesser General Public
;* License as published by the Free Software Foundation; either
;* version 2.1 of the License, or (at your option) any later version.
;*
;* FFmpeg is distributed in the hope that it will be useful,
;* but WITHOUT ANY WARRANTY; without even the implied warranty of
;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
;* Lesser General Public License for more details.
;*
;* You should have received a copy of the GNU Lesser General Public
;* License along with FFmpeg; if not, write to the Free Software
;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
;******************************************************************************

%include "libavutil/x86/x86util.asm"

SECTION_RODATA

pw_11585x2: times 8 dw 23170

%macro VP9_IDCT_COEFFS 2
pw_m%1_%2: dw -%1, %2, -%1, %2, -%1, %2, -%1, %2
pw_%2_%1:  dw  %2, %1,  %2, %1,  %2, %1,  %2, %1
%endmacro

%macro VP9_IDCT_COEFFS_ALL 2
pw_%1x2: times 8 dw %1*2
pw_%2x2: times 8 dw %2*2
VP9_IDCT_COEFFS %1, %2
%endmacro

VP9_IDCT_COEFFS_ALL 15137,  6270
VP9_IDCT_COEFFS_ALL 16069,  3196
VP9_IDCT_COEFFS_ALL  9102, 13623

pd_8192: times 4 dd 8192
pw_2048: times 8 dw 2048
pw_1024: times 8 dw 1024

SECTION .text

; (a*x + b*y + round) >> shift
%macro VP9_MULSUB_2W_2X 6 ; dst1, dst2, src (unchanged), round, coefs1, coefs2
    pmaddwd            m%1, m%3, %5
    pmaddwd            m%2, m%3, %6
    paddd              m%1,  %4
    paddd              m%2,  %4
    psrad              m%1,  14
    psrad              m%2,  14
%endmacro

%macro VP9_UNPACK_MULSUB_2W_4X 7 ; dst1, dst2, coef1, coef2, rnd, tmp1, tmp2
    punpckhwd          m%6, m%2, m%1
    VP9_MULSUB_2W_2X    %7,  %6,  %6, %5, [pw_m%3_%4], [pw_%4_%3]
    punpcklwd          m%2, m%1
    VP9_MULSUB_2W_2X    %1,  %2,  %2, %5, [pw_m%3_%4], [pw_%4_%3]
    packssdw           m%1, m%7
    packssdw           m%2, m%6
%endmacro

%macro VP9_STORE_2X 5 ; reg1, reg2, tmp1, tmp2, zero
    movh               m%3, [dstq]
    movh               m%4, [dstq+strideq]
    punpcklbw          m%3, m%5
    punpcklbw          m%4, m%5
    paddw              m%3, m%1
    paddw              m%4, m%2
    packuswb           m%3, m%5
    packuswb           m%4, m%5
    movh            [dstq], m%3
    movh    [dstq+strideq], m%4
%endmacro

;-------------------------------------------------------------------------------------------
; void vp9_idct_idct_4x4_add_<opt>(uint8_t *dst, ptrdiff_t stride, int16_t *block, int eob);
;-------------------------------------------------------------------------------------------

%macro VP9_IDCT4_1D_FINALIZE 0
    SUMSUB_BA            w, 3, 2, 4                         ; m3=t3+t0, m2=-t3+t0
    SUMSUB_BA            w, 1, 0, 4                         ; m1=t2+t1, m0=-t2+t1
    SWAP                 0, 3, 2                            ; 3102 -> 0123
%endmacro

%macro VP9_IDCT4_1D 0
    SUMSUB_BA            w, 2, 0, 4                         ; m2=IN(0)+IN(2) m0=IN(0)-IN(2)
    pmulhrsw            m2, m6                              ; m2=t0
    pmulhrsw            m0, m6                              ; m0=t1
    VP9_UNPACK_MULSUB_2W_4X 1, 3, 15137, 6270, m7, 4, 5     ; m1=t2, m3=t3
    VP9_IDCT4_1D_FINALIZE
%endmacro

; 2x2 top left corner
%macro VP9_IDCT4_2x2_1D 0
    pmulhrsw            m0, m5                              ; m0=t1
    mova                m2, m0                              ; m2=t0
    mova                m3, m1
    pmulhrsw            m1, m6                              ; m1=t2
    pmulhrsw            m3, m7                              ; m3=t3
    VP9_IDCT4_1D_FINALIZE
%endmacro

%macro VP9_IDCT4_WRITEOUT 0
    mova                m5, [pw_2048]
    pmulhrsw            m0, m5              ; (x*2048 + (1<<14))>>15 <=> (x+8)>>4
    pmulhrsw            m1, m5
    VP9_STORE_2X         0,  1,  6,  7,  4
    lea               dstq, [dstq+2*strideq]
    pmulhrsw            m2, m5
    pmulhrsw            m3, m5
    VP9_STORE_2X         2,  3,  6,  7,  4
%endmacro

INIT_MMX ssse3
cglobal vp9_idct_idct_4x4_add, 4,4,0, dst, stride, block, eob

    cmp eobd, 4 ; 2x2 or smaller
    jg .idctfull

    cmp eobd, 1 ; faster path for when only DC is set
    jne .idct2x2

    movd                m0, [blockq]
    mova                m5, [pw_11585x2]
    pmulhrsw            m0, m5
    pmulhrsw            m0, m5
    pshufw              m0, m0, 0
    pxor                m4, m4
    movh          [blockq], m4
    pmulhrsw            m0, [pw_2048]       ; (x*2048 + (1<<14))>>15 <=> (x+8)>>4
    VP9_STORE_2X         0,  0,  6,  7,  4
    lea               dstq, [dstq+2*strideq]
    VP9_STORE_2X         0,  0,  6,  7,  4
    RET

; faster path for when only top left 2x2 block is set
.idct2x2:
    movd                m0, [blockq+0]
    movd                m1, [blockq+8]
    mova                m5, [pw_11585x2]
    mova                m6, [pw_6270x2]
    mova                m7, [pw_15137x2]
    VP9_IDCT4_2x2_1D
    TRANSPOSE4x4W  0, 1, 2, 3, 4
    VP9_IDCT4_2x2_1D
    pxor                m4, m4  ; used for the block reset, and VP9_STORE_2X
    movh       [blockq+ 0], m4
    movh       [blockq+ 8], m4
    VP9_IDCT4_WRITEOUT
    RET

.idctfull: ; generic full 4x4 idct/idct
    mova                m0, [blockq+ 0]
    mova                m1, [blockq+ 8]
    mova                m2, [blockq+16]
    mova                m3, [blockq+24]
    mova                m6, [pw_11585x2]
    mova                m7, [pd_8192]       ; rounding
    VP9_IDCT4_1D
    TRANSPOSE4x4W  0, 1, 2, 3, 4
    VP9_IDCT4_1D
    pxor                m4, m4  ; used for the block reset, and VP9_STORE_2X
    mova       [blockq+ 0], m4
    mova       [blockq+ 8], m4
    mova       [blockq+16], m4
    mova       [blockq+24], m4
    VP9_IDCT4_WRITEOUT
    RET

;-------------------------------------------------------------------------------------------
; void vp9_idct_idct_8x8_add_<opt>(uint8_t *dst, ptrdiff_t stride, int16_t *block, int eob);
;-------------------------------------------------------------------------------------------

%if ARCH_X86_64 ; TODO: 32-bit? (32-bit limited to 8 xmm reg, we use 13 here)
%macro VP9_IDCT8_1D_FINALIZE 0
    SUMSUB_BA            w,  3, 10, 4                       ;  m3=t0+t7, m10=t0-t7
    SUMSUB_BA            w,  1,  2, 4                       ;  m1=t1+t6,  m2=t1-t6
    SUMSUB_BA            w, 11,  0, 4                       ; m11=t2+t5,  m0=t2-t5
    SUMSUB_BA            w,  9,  8, 4                       ;  m9=t3+t4,  m8=t3-t4
    SWAP                11, 10, 2
    SWAP                 3,  9, 0
%endmacro

%macro VP9_IDCT8_1D 0
    SUMSUB_BA            w, 8, 0, 4                         ; m8=IN(0)+IN(4) m0=IN(0)-IN(4)
    pmulhrsw            m8, m12                             ; m8=t0a
    pmulhrsw            m0, m12                             ; m0=t1a
    VP9_UNPACK_MULSUB_2W_4X 2, 10, 15137,  6270, m7, 4, 5   ; m2=t2a, m10=t3a
    VP9_UNPACK_MULSUB_2W_4X 1, 11, 16069,  3196, m7, 4, 5   ; m1=t4a, m11=t7a
    VP9_UNPACK_MULSUB_2W_4X 9,  3,  9102, 13623, m7, 4, 5   ; m9=t5a,  m3=t6a
    SUMSUB_BA            w, 10,  8, 4                       ; m10=t0a+t3a (t0),  m8=t0a-t3a (t3)
    SUMSUB_BA            w,  2,  0, 4                       ;  m2=t1a+t2a (t1),  m0=t1a-t2a (t2)
    SUMSUB_BA            w,  9,  1, 4                       ;  m9=t4a+t5a (t4),  m1=t4a-t5a (t5a)
    SUMSUB_BA            w,  3, 11, 4                       ;  m3=t7a+t6a (t7), m11=t7a-t6a (t6a)
    SUMSUB_BA            w,  1, 11, 4                       ;  m1=t6a+t5a (t6), m11=t6a-t5a (t5)
    pmulhrsw            m1, m12                             ; m1=t6
    pmulhrsw           m11, m12                             ; m11=t5
    VP9_IDCT8_1D_FINALIZE
%endmacro

%macro VP9_IDCT8_4x4_1D 0
    pmulhrsw            m0, m12                             ; m0=t1a/t0a
    pmulhrsw           m10, m2, [pw_15137x2]                ; m10=t3a
    pmulhrsw            m2, [pw_6270x2]                     ; m2=t2a
    pmulhrsw           m11, m1, [pw_16069x2]                ; m11=t7a
    pmulhrsw            m1, [pw_3196x2]                     ; m1=t4a
    pmulhrsw            m9, m3, [pw_9102x2]                 ; m9=-t5a
    pmulhrsw            m3, [pw_13623x2]                    ; m3=t6a
    psubw               m8, m0, m10                         ; m8=t0a-t3a (t3)
    paddw              m10, m0                              ; m10=t0a+t3a (t0)
    SUMSUB_BA            w,  2,  0, 4                       ;  m2=t1a+t2a (t1),  m0=t1a-t2a (t2)
    SUMSUB_BA            w,  9,  1, 4                       ;  m1=t4a+t5a (t4),  m9=t4a-t5a (t5a)
    SWAP                 1,  9
    SUMSUB_BA            w,  3, 11, 4                       ;  m3=t7a+t6a (t7), m11=t7a-t6a (t6a)
    SUMSUB_BA            w,  1, 11, 4                       ;  m1=t6a+t5a (t6), m11=t6a-t5a (t5)
    pmulhrsw            m1, m12                             ; m1=t6
    pmulhrsw           m11, m12                             ; m11=t5
    VP9_IDCT8_1D_FINALIZE
%endmacro

; TODO: a lot of t* copies can probably be removed and merged with
; following SUMSUBs from VP9_IDCT8_1D_FINALIZE with AVX
%macro VP9_IDCT8_2x2_1D 0
    pmulhrsw            m0, m12                             ;  m0=t0
    mova                m3, m1
    pmulhrsw            m1, m6                              ;  m1=t4
    pmulhrsw            m3, m7                              ;  m3=t7
    mova                m2, m0                              ;  m2=t1
    mova               m10, m0                              ; m10=t2
    mova                m8, m0                              ;  m8=t3
    mova               m11, m3                              ; t5 = t7a ...
    mova                m9, m3                              ; t6 = t7a ...
    psubw              m11, m1                              ; t5 = t7a - t4a
    paddw               m9, m1                              ; t6 = t7a + t4a
    pmulhrsw           m11, m12                             ; m11=t5
    pmulhrsw            m9, m12                             ;  m9=t6
    SWAP                 0, 10
    SWAP                 9,  1
    VP9_IDCT8_1D_FINALIZE
%endmacro

%macro VP9_IDCT8_WRITEOUT 0
    mova                m5, [pw_1024]
    pmulhrsw            m0, m5              ; (x*1024 + (1<<14))>>15 <=> (x+16)>>5
    pmulhrsw            m1, m5
    VP9_STORE_2X         0,  1,  6,  7,  4
    lea               dstq, [dstq+2*strideq]
    pmulhrsw            m2, m5
    pmulhrsw            m3, m5
    VP9_STORE_2X         2,  3,  6,  7,  4
    lea               dstq, [dstq+2*strideq]
    pmulhrsw            m8, m5
    pmulhrsw            m9, m5
    VP9_STORE_2X         8,  9,  6,  7,  4
    lea               dstq, [dstq+2*strideq]
    pmulhrsw           m10, m5
    pmulhrsw           m11, m5
    VP9_STORE_2X        10, 11,  6,  7,  4
%endmacro

INIT_XMM ssse3
cglobal vp9_idct_idct_8x8_add, 4,4,13, dst, stride, block, eob

    mova               m12, [pw_11585x2]    ; often used

    cmp eobd, 12 ; top left half or less
    jg .idctfull

    cmp eobd, 3  ; top left corner or less
    jg .idcthalf

    cmp eobd, 1 ; faster path for when only DC is set
    jne .idcttopleftcorner

    movd                m0, [blockq]
    pmulhrsw            m0, m12
    pmulhrsw            m0, m12
    SPLATW              m0, m0, 0
    pxor                m4, m4
    movd          [blockq], m4
    mova                m5, [pw_1024]
    pmulhrsw            m0, m5              ; (x*1024 + (1<<14))>>15 <=> (x+16)>>5
    VP9_STORE_2X         0,  0,  6,  7,  4
    lea               dstq, [dstq+2*strideq]
    VP9_STORE_2X         0,  0,  6,  7,  4
    lea               dstq, [dstq+2*strideq]
    VP9_STORE_2X         0,  0,  6,  7,  4
    lea               dstq, [dstq+2*strideq]
    VP9_STORE_2X         0,  0,  6,  7,  4
    RET

; faster path for when only left corner is set (3 input: DC, right to DC, below
; to DC). Note: also working with a 2x2 block
.idcttopleftcorner:
    movd                m0, [blockq+0]
    movd                m1, [blockq+16]
    mova                m6, [pw_3196x2]
    mova                m7, [pw_16069x2]
    VP9_IDCT8_2x2_1D
    TRANSPOSE8x8W  0, 1, 2, 3, 8, 9, 10, 11, 4
    VP9_IDCT8_2x2_1D
    pxor                m4, m4  ; used for the block reset, and VP9_STORE_2X
    movd       [blockq+ 0], m4
    movd       [blockq+16], m4
    VP9_IDCT8_WRITEOUT
    RET

.idcthalf:
    movh                m0, [blockq + 0]
    movh                m1, [blockq +16]
    movh                m2, [blockq +32]
    movh                m3, [blockq +48]
    VP9_IDCT8_4x4_1D
    TRANSPOSE8x8W  0, 1, 2, 3, 8, 9, 10, 11, 4
    VP9_IDCT8_4x4_1D
    pxor                m4, m4
    movh       [blockq+ 0], m4
    movh       [blockq+16], m4
    movh       [blockq+32], m4
    movh       [blockq+48], m4
    VP9_IDCT8_WRITEOUT
    RET

.idctfull: ; generic full 8x8 idct/idct
    mova                m0, [blockq+  0]    ; IN(0)
    mova                m1, [blockq+ 16]    ; IN(1)
    mova                m2, [blockq+ 32]    ; IN(2)
    mova                m3, [blockq+ 48]    ; IN(3)
    mova                m8, [blockq+ 64]    ; IN(4)
    mova                m9, [blockq+ 80]    ; IN(5)
    mova               m10, [blockq+ 96]    ; IN(6)
    mova               m11, [blockq+112]    ; IN(7)
    mova                m7, [pd_8192]       ; rounding
    VP9_IDCT8_1D
    TRANSPOSE8x8W  0, 1, 2, 3, 8, 9, 10, 11, 4
    VP9_IDCT8_1D
    pxor                m4, m4  ; used for the block reset, and VP9_STORE_2X
    mova      [blockq+  0], m4
    mova      [blockq+ 16], m4
    mova      [blockq+ 32], m4
    mova      [blockq+ 48], m4
    mova      [blockq+ 64], m4
    mova      [blockq+ 80], m4
    mova      [blockq+ 96], m4
    mova      [blockq+112], m4
    VP9_IDCT8_WRITEOUT
    RET
%endif