aboutsummaryrefslogtreecommitdiffstats
path: root/libavcodec/x86/imdct36_sse.asm
blob: 95a7c0154c4e428bbc74e5947df4abbd1fab5e82 (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
;******************************************************************************
;* 36 point SSE-optimized IMDCT transform
;* Copyright (c) 2011 Vitor Sessak
;*
;* This file is part of Libav.
;*
;* Libav is free software; you can redistribute it and/or
;* modify it under the terms of the GNU Lesser General Public
;* License as published by the Free Software Foundation; either
;* version 2.1 of the License, or (at your option) any later version.
;*
;* Libav is distributed in the hope that it will be useful,
;* but WITHOUT ANY WARRANTY; without even the implied warranty of
;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
;* Lesser General Public License for more details.
;*
;* You should have received a copy of the GNU Lesser General Public
;* License along with Libav; if not, write to the Free Software
;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
;******************************************************************************

%include "libavutil/x86/x86inc.asm"
%include "libavutil/x86/x86util.asm"

SECTION_RODATA

align 16
ps_mask:  dd 0, ~0, ~0, ~0
ps_mask2: dd 0, ~0,  0, ~0
ps_mask3: dd 0,  0,  0, ~0
ps_mask4: dd 0, ~0,  0,  0

ps_val1:  dd          -0.5,          -0.5, -0.8660254038, -0.8660254038
ps_val2:  dd           1.0,           1.0,  0.8660254038,  0.8660254038
ps_val3:  dd  0.1736481777,  0.1736481777,  0.3420201433,  0.3420201433
ps_val4:  dd -0.7660444431, -0.7660444431,  0.8660254038,  0.8660254038
ps_val5:  dd -0.9396926208, -0.9396926208, -0.9848077530, -0.9848077530
ps_val6:  dd           0.5,           0.5, -0.6427876097, -0.6427876097
ps_val7:  dd           1.0,           1.0, -0.6427876097, -0.6427876097

ps_p1p1m1m1: dd 0,          0, 0x80000000, 0x80000000
ps_p1m1p1m1: dd 0, 0x80000000,          0, 0x80000000

ps_cosh:       dd 1.0, 0.50190991877167369479,  1.0,  5.73685662283492756461
               dd 1.0, 0.51763809020504152469,  1.0,  1.93185165257813657349
               dd 1.0, 0.55168895948124587824, -1.0, -1.18310079157624925896
               dd 1.0, 0.61038729438072803416, -1.0, -0.87172339781054900991
               dd 1.0, 0.70710678118654752439,  0.0,  0.0

ps_cosh_sse3:  dd 1.0, -0.50190991877167369479,  1.0, -5.73685662283492756461
               dd 1.0, -0.51763809020504152469,  1.0, -1.93185165257813657349
               dd 1.0, -0.55168895948124587824, -1.0,  1.18310079157624925896
               dd 1.0, -0.61038729438072803416, -1.0,  0.87172339781054900991
               dd 1.0,  0.70710678118654752439,  0.0,  0.0

%define SBLIMIT 32
SECTION_TEXT

%macro PSHUFD_SSE_AVX 3
    shufps %1, %2, %2, %3
%endmacro
%macro PSHUFD_SSE2 3
    pshufd %1, %2, %3
%endmacro

; input  %1={x1,x2,x3,x4}, %2={y1,y2,y3,y4}
; output %3={x3,x4,y1,y2}
%macro BUILDINVHIGHLOW_SSE 3
    movlhps %3, %2
    movhlps %3, %1
%endmacro
%macro BUILDINVHIGHLOW_AVX 3
    shufps %3, %1, %2, 0x4e
%endmacro

; input  %1={x1,x2,x3,x4}, %2={y1,y2,y3,y4}
; output %3={x4,y1,y2,y3}
%macro ROTLEFT_SSE 3
    BUILDINVHIGHLOW %1, %2, %3
    shufps  %3, %3, %2, 0x99
%endmacro

%macro ROTLEFT_SSSE3 3
    palignr  %3, %2, %1, 12
%endmacro

%macro INVERTHL_SSE1 2
    movhlps %1, %2
    movlhps %1, %2
%endmacro

%macro INVERTHL_SSE2 2
    PSHUFD  %1, %2, 0x4e
%endmacro

%macro BUTTERF_SSE12 3
    INVERTHL %2, %1
    xorps    %1, [ps_p1p1m1m1]
    addps    %1, %2
    mulps    %1, [ps_cosh + %3]
    PSHUFD   %2, %1, 0xb1
    xorps    %1, [ps_p1m1p1m1]
    addps    %1, %2
%endmacro
%macro BUTTERF_SSE3 3
    INVERTHL %2, %1
    xorps    %1, %1, [ps_p1p1m1m1]
    addps    %1, %1, %2
    mulps    %1, %1, [ps_cosh_sse3 + %3]
    PSHUFD   %2, %1, 0xb1
    addsubps %1, %1, %2
%endmacro

%macro STORE 3
    movhlps %2, %1
    movss   [%3             ], %1
    movss   [%3 +  8*SBLIMIT], %2
    shufps  %1, %1, 0xb1
    movss   [%3 +  4*SBLIMIT], %1
    movhlps %2, %1
    movss   [%3 + 12*SBLIMIT], %2
%endmacro

%macro LOADA64 2
   movlps   %1, [%2]
   movhps   %1, [%2 + 8]
%endmacro

%macro STOREA64 2
   movlps   [%1    ], %2
   movhps   [%1 + 8], %2
%endmacro

%macro DEFINE_IMDCT 1
cglobal imdct36_float_%1, 4,4,9, out, buf, in, win

    ; for(i=17;i>=1;i--) in[i] += in[i-1];
    LOADA64 m0, inq
    LOADA64 m1, inq + 16

    ROTLEFT m0, m1, m5

    PSHUFD  m6, m0, 0x93
    andps   m6, m6, [ps_mask]
    addps   m0, m0, m6

    LOADA64 m2, inq + 32

    ROTLEFT m1, m2, m7

    addps   m1, m1, m5
    LOADA64 m3, inq + 48

    ROTLEFT m2, m3, m5

    xorps   m4, m4, m4
    movlps  m4, [inq+64]
    BUILDINVHIGHLOW m3, m4, m6
    shufps  m6, m6, m4, 0xa9

    addps   m4, m4, m6
    addps   m2, m2, m7
    addps   m3, m3, m5

    ; for(i=17;i>=3;i-=2) in[i] += in[i-2];
    movlhps m5, m5, m0
    andps   m5, m5, [ps_mask3]

    BUILDINVHIGHLOW m0, m1, m7
    andps   m7, m7, [ps_mask2]

    addps   m0, m0, m5

    BUILDINVHIGHLOW m1, m2, m6
    andps   m6, m6, [ps_mask2]

    addps  m1, m1, m7

    BUILDINVHIGHLOW m2, m3, m7
    andps   m7, m7, [ps_mask2]

    addps   m2, m2, m6

    movhlps m6, m6, m3
    andps   m6, m6, [ps_mask4]

    addps  m3, m3, m7
    addps  m4, m4, m6

    ; Populate tmp[]
    movlhps m6, m1, m5    ; zero out high values
    subps   m6, m6, m4

    subps  m5, m0, m3

%ifdef ARCH_X86_64
    SWAP   m5, m8
%endif

    mulps  m7, m2, [ps_val1]

%ifdef ARCH_X86_64
    mulps  m5, m8, [ps_val2]
%else
    mulps  m5, m5, [ps_val2]
%endif
    addps  m7, m7, m5

    mulps  m5, m6, [ps_val1]
    subps  m7, m7, m5

%ifdef ARCH_X86_64
    SWAP   m5, m8
%else
    subps  m5, m0, m3
%endif

    subps  m5, m5, m6
    addps  m5, m5, m2

    shufps m6, m4, m3, 0xe4
    subps  m6, m6, m2
    mulps  m6, m6, [ps_val3]

    addps  m4, m4, m1
    mulps  m4, m4, [ps_val4]

    shufps m1, m1, m0, 0xe4
    addps  m1, m1, m2
    mulps  m1, m1, [ps_val5]

    mulps  m3, m3, [ps_val6]
    mulps  m0, m0, [ps_val7]
    addps  m0, m0, m3

    xorps  m2, m1, [ps_p1p1m1m1]
    subps  m2, m2, m4
    addps  m2, m2, m0

    addps  m3, m4, m0
    subps  m3, m3, m6
    xorps  m3, m3, [ps_p1p1m1m1]

    shufps m0, m0, m4, 0xe4
    subps  m0, m0, m1
    addps  m0, m0, m6

    BUILDINVHIGHLOW m2, m3, m4
    shufps  m3, m3, m2, 0x4e

    ; we have tmp = {SwAPLH(m0), SwAPLH(m7), m3, m4, m5}

    BUTTERF  m0, m1, 0
    BUTTERF  m7, m2, 16
    BUTTERF  m3, m6, 32
    BUTTERF  m4, m1, 48

    mulps   m5, m5, [ps_cosh + 64]
    PSHUFD  m1, m5, 0xe1
    xorps   m5, m5, [ps_p1m1p1m1]
    addps   m5, m5, m1

    ; permutates:
    ; m0    0  1  2  3     =>     2  6 10 14   m1
    ; m7    4  5  6  7     =>     3  7 11 15   m2
    ; m3    8  9 10 11     =>    17 13  9  5   m3
    ; m4   12 13 14 15     =>    16 12  8  4   m5
    ; m5   16 17 xx xx     =>     0  1 xx xx   m0

    unpckhps m1, m0, m7
    unpckhps m6, m3, m4
    movhlps  m2, m6, m1
    movlhps  m1, m1, m6

    unpcklps m5, m5, m4
    unpcklps m3, m3, m7
    movhlps  m4, m3, m5
    movlhps  m5, m5, m3
    SWAP m4, m3
    ; permutation done

    PSHUFD  m6, m2, 0xb1
    movlps  m7, [bufq + 64]
    mulps   m6, m6, [winq + 16*4]
    addps   m6, m6, m7
    movss   [outq + 64*SBLIMIT], m6
    shufps  m6, m6, m6, 0xb1
    movss   [outq + 68*SBLIMIT], m6

    mulps   m6, m3, [winq + 4*4]
    LOADA64 m4, bufq + 16
    addps   m6, m6, m4
    STORE   m6, m7, outq + 16*SBLIMIT

    shufps  m4, m0, m3, 0xb5
    mulps   m4, m4, [winq + 8*4]
    LOADA64 m7, bufq + 32
    addps   m4, m4, m7
    STORE   m4, m6, outq + 32*SBLIMIT

    shufps  m3, m3, m2, 0xb1
    mulps   m3, m3, [winq + 12*4]
    LOADA64 m7, bufq + 48
    addps   m3, m3, m7
    STORE   m3, m7, outq + 48*SBLIMIT

    mulps   m2, m2, [winq]
    LOADA64 m6, bufq
    addps   m2, m2, m6
    STORE   m2, m7, outq

    mulps    m4, m1, [winq + 20*4]
    STOREA64 bufq, m4

    mulps    m3, m5, [winq + 24*4]
    STOREA64 bufq + 16, m3

    shufps   m0, m0, m5, 0xb0
    mulps    m0, m0, [winq + 28*4]
    STOREA64 bufq + 32, m0

    shufps   m5, m5, m1, 0xb1
    mulps    m5, m5, [winq + 32*4]
    STOREA64 bufq + 48, m5

    shufps   m1, m1, m1, 0xb1
    mulps    m1, m1, [winq + 36*4]
    movlps  [bufq + 64], m1
    RET
%endmacro

%define PSHUFD PSHUFD_SSE_AVX
%define INVERTHL INVERTHL_SSE1
%define BUTTERF  BUTTERF_SSE12
%define BUTTERF0 BUTTERF0_SSE12
%define BUILDINVHIGHLOW BUILDINVHIGHLOW_SSE
%define ROTLEFT ROTLEFT_SSE

INIT_XMM

DEFINE_IMDCT sse

%define PSHUFD PSHUFD_SSE2
%define INVERTHL INVERTHL_SSE2

DEFINE_IMDCT sse2

%define BUTTERF  BUTTERF_SSE3
%define BUTTERF0 BUTTERF0_SSE3

DEFINE_IMDCT sse3

%define ROTLEFT ROTLEFT_SSSE3

DEFINE_IMDCT ssse3

%define BUILDINVHIGHLOW BUILDINVHIGHLOW_AVX
%define PSHUFD PSHUFD_SSE_AVX

%ifdef HAVE_AVX
INIT_AVX
DEFINE_IMDCT avx
%endif