aboutsummaryrefslogtreecommitdiffstats
path: root/libavutil/x86/float_dsp.asm
blob: e84ba525668043179c3c3f0484d1b5c134c89bfd (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
;*****************************************************************************
;* x86-optimized Float DSP functions
;*
;* Copyright 2006 Loren Merritt
;*
;* This file is part of FFmpeg.
;*
;* FFmpeg is free software; you can redistribute it and/or
;* modify it under the terms of the GNU Lesser General Public
;* License as published by the Free Software Foundation; either
;* version 2.1 of the License, or (at your option) any later version.
;*
;* FFmpeg is distributed in the hope that it will be useful,
;* but WITHOUT ANY WARRANTY; without even the implied warranty of
;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
;* Lesser General Public License for more details.
;*
;* You should have received a copy of the GNU Lesser General Public
;* License along with FFmpeg; if not, write to the Free Software
;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
;******************************************************************************

%include "libavutil/x86/x86util.asm"

SECTION_RODATA 32
pd_reverse: dd 7, 6, 5, 4, 3, 2, 1, 0

SECTION .text

;-----------------------------------------------------------------------------
; void vector_fmul(float *dst, const float *src0, const float *src1, int len)
;-----------------------------------------------------------------------------
%macro VECTOR_FMUL 0
cglobal vector_fmul, 4,4,2, dst, src0, src1, len
    lea       lenq, [lend*4 - 64]
ALIGN 16
.loop:
%assign a 0
%rep 32/mmsize
    mova      m0,   [src0q + lenq + (a+0)*mmsize]
    mova      m1,   [src0q + lenq + (a+1)*mmsize]
    mulps     m0, m0, [src1q + lenq + (a+0)*mmsize]
    mulps     m1, m1, [src1q + lenq + (a+1)*mmsize]
    mova      [dstq + lenq + (a+0)*mmsize], m0
    mova      [dstq + lenq + (a+1)*mmsize], m1
%assign a a+2
%endrep

    sub       lenq, 64
    jge       .loop
    RET
%endmacro

INIT_XMM sse
VECTOR_FMUL
%if HAVE_AVX_EXTERNAL
INIT_YMM avx
VECTOR_FMUL
%endif

;-----------------------------------------------------------------------------
; void vector_dmul(double *dst, const double *src0, const double *src1, int len)
;-----------------------------------------------------------------------------
%macro VECTOR_DMUL 0
cglobal vector_dmul, 4,4,4, dst, src0, src1, len
    lea       lend, [lenq*8 - mmsize*4]
ALIGN 16
.loop:
    movaps    m0,     [src0q + lenq + 0*mmsize]
    movaps    m1,     [src0q + lenq + 1*mmsize]
    movaps    m2,     [src0q + lenq + 2*mmsize]
    movaps    m3,     [src0q + lenq + 3*mmsize]
    mulpd     m0, m0, [src1q + lenq + 0*mmsize]
    mulpd     m1, m1, [src1q + lenq + 1*mmsize]
    mulpd     m2, m2, [src1q + lenq + 2*mmsize]
    mulpd     m3, m3, [src1q + lenq + 3*mmsize]
    movaps    [dstq + lenq + 0*mmsize], m0
    movaps    [dstq + lenq + 1*mmsize], m1
    movaps    [dstq + lenq + 2*mmsize], m2
    movaps    [dstq + lenq + 3*mmsize], m3

    sub       lenq, mmsize*4
    jge       .loop
    RET
%endmacro

INIT_XMM sse2
VECTOR_DMUL
%if HAVE_AVX_EXTERNAL
INIT_YMM avx
VECTOR_DMUL
%endif

;------------------------------------------------------------------------------
; void ff_vector_fmac_scalar(float *dst, const float *src, float mul, int len)
;------------------------------------------------------------------------------

%macro VECTOR_FMAC_SCALAR 0
%if UNIX64
cglobal vector_fmac_scalar, 3,3,5, dst, src, len
%else
cglobal vector_fmac_scalar, 4,4,5, dst, src, mul, len
%endif
%if ARCH_X86_32
    VBROADCASTSS m0, mulm
%else
%if WIN64
    SWAP 0, 2
%endif
    shufps      xm0, xm0, 0
%if cpuflag(avx)
    vinsertf128  m0, m0, xm0, 1
%endif
%endif
    lea    lenq, [lend*4-64]
.loop:
%if cpuflag(fma3)
    mova     m1,     [dstq+lenq]
    mova     m2,     [dstq+lenq+1*mmsize]
    fmaddps  m1, m0, [srcq+lenq], m1
    fmaddps  m2, m0, [srcq+lenq+1*mmsize], m2
%else ; cpuflag
    mulps    m1, m0, [srcq+lenq]
    mulps    m2, m0, [srcq+lenq+1*mmsize]
%if mmsize < 32
    mulps    m3, m0, [srcq+lenq+2*mmsize]
    mulps    m4, m0, [srcq+lenq+3*mmsize]
%endif ; mmsize
    addps    m1, m1, [dstq+lenq]
    addps    m2, m2, [dstq+lenq+1*mmsize]
%if mmsize < 32
    addps    m3, m3, [dstq+lenq+2*mmsize]
    addps    m4, m4, [dstq+lenq+3*mmsize]
%endif ; mmsize
%endif ; cpuflag
    mova  [dstq+lenq], m1
    mova  [dstq+lenq+1*mmsize], m2
%if mmsize < 32
    mova  [dstq+lenq+2*mmsize], m3
    mova  [dstq+lenq+3*mmsize], m4
%endif ; mmsize
    sub    lenq, 64
    jge .loop
    RET
%endmacro

INIT_XMM sse
VECTOR_FMAC_SCALAR
%if HAVE_AVX_EXTERNAL
INIT_YMM avx
VECTOR_FMAC_SCALAR
%endif
%if HAVE_FMA3_EXTERNAL
INIT_YMM fma3
VECTOR_FMAC_SCALAR
%endif

;------------------------------------------------------------------------------
; void ff_vector_fmul_scalar(float *dst, const float *src, float mul, int len)
;------------------------------------------------------------------------------

%macro VECTOR_FMUL_SCALAR 0
%if UNIX64
cglobal vector_fmul_scalar, 3,3,2, dst, src, len
%else
cglobal vector_fmul_scalar, 4,4,3, dst, src, mul, len
%endif
%if ARCH_X86_32
    movss    m0, mulm
%elif WIN64
    SWAP 0, 2
%endif
    shufps   m0, m0, 0
    lea    lenq, [lend*4-mmsize]
.loop:
    mova     m1, [srcq+lenq]
    mulps    m1, m0
    mova  [dstq+lenq], m1
    sub    lenq, mmsize
    jge .loop
    RET
%endmacro

INIT_XMM sse
VECTOR_FMUL_SCALAR

;------------------------------------------------------------------------------
; void ff_vector_dmac_scalar(double *dst, const double *src, double mul,
;                            int len)
;------------------------------------------------------------------------------

%macro VECTOR_DMAC_SCALAR 0
%if ARCH_X86_32
cglobal vector_dmac_scalar, 2,4,5, dst, src, mul, len, lenaddr
    mov          lenq, lenaddrm
    VBROADCASTSD m0, mulm
%else
%if UNIX64
cglobal vector_dmac_scalar, 3,3,5, dst, src, len
%else
cglobal vector_dmac_scalar, 4,4,5, dst, src, mul, len
    SWAP 0, 2
%endif
    movlhps     xm0, xm0
%if cpuflag(avx)
    vinsertf128  m0, m0, xm0, 1
%endif
%endif
    lea    lenq, [lend*8-mmsize*4]
.loop:
%if cpuflag(fma3)
    movaps   m1,     [dstq+lenq]
    movaps   m2,     [dstq+lenq+1*mmsize]
    movaps   m3,     [dstq+lenq+2*mmsize]
    movaps   m4,     [dstq+lenq+3*mmsize]
    fmaddpd  m1, m0, [srcq+lenq], m1
    fmaddpd  m2, m0, [srcq+lenq+1*mmsize], m2
    fmaddpd  m3, m0, [srcq+lenq+2*mmsize], m3
    fmaddpd  m4, m0, [srcq+lenq+3*mmsize], m4
%else ; cpuflag
    mulpd    m1, m0, [srcq+lenq]
    mulpd    m2, m0, [srcq+lenq+1*mmsize]
    mulpd    m3, m0, [srcq+lenq+2*mmsize]
    mulpd    m4, m0, [srcq+lenq+3*mmsize]
    addpd    m1, m1, [dstq+lenq]
    addpd    m2, m2, [dstq+lenq+1*mmsize]
    addpd    m3, m3, [dstq+lenq+2*mmsize]
    addpd    m4, m4, [dstq+lenq+3*mmsize]
%endif ; cpuflag
    movaps [dstq+lenq], m1
    movaps [dstq+lenq+1*mmsize], m2
    movaps [dstq+lenq+2*mmsize], m3
    movaps [dstq+lenq+3*mmsize], m4
    sub    lenq, mmsize*4
    jge .loop
    RET
%endmacro

INIT_XMM sse2
VECTOR_DMAC_SCALAR
%if HAVE_AVX_EXTERNAL
INIT_YMM avx
VECTOR_DMAC_SCALAR
%endif
%if HAVE_FMA3_EXTERNAL
INIT_YMM fma3
VECTOR_DMAC_SCALAR
%endif

;------------------------------------------------------------------------------
; void ff_vector_dmul_scalar(double *dst, const double *src, double mul,
;                            int len)
;------------------------------------------------------------------------------

%macro VECTOR_DMUL_SCALAR 0
%if ARCH_X86_32
cglobal vector_dmul_scalar, 3,4,3, dst, src, mul, len, lenaddr
    mov          lenq, lenaddrm
%elif UNIX64
cglobal vector_dmul_scalar, 3,3,3, dst, src, len
%else
cglobal vector_dmul_scalar, 4,4,3, dst, src, mul, len
%endif
%if ARCH_X86_32
    VBROADCASTSD   m0, mulm
%else
%if WIN64
    SWAP 0, 2
%endif
    movlhps       xm0, xm0
%if cpuflag(avx)
    vinsertf128   ym0, ym0, xm0, 1
%endif
%endif
    lea          lenq, [lend*8-2*mmsize]
.loop:
    mulpd          m1, m0, [srcq+lenq       ]
    mulpd          m2, m0, [srcq+lenq+mmsize]
    movaps [dstq+lenq       ], m1
    movaps [dstq+lenq+mmsize], m2
    sub          lenq, 2*mmsize
    jge .loop
    RET
%endmacro

INIT_XMM sse2
VECTOR_DMUL_SCALAR
%if HAVE_AVX_EXTERNAL
INIT_YMM avx
VECTOR_DMUL_SCALAR
%endif

;-----------------------------------------------------------------------------
; vector_fmul_window(float *dst, const float *src0,
;                    const float *src1, const float *win, int len);
;-----------------------------------------------------------------------------
INIT_XMM sse
cglobal vector_fmul_window, 5, 6, 6, dst, src0, src1, win, len, len1
    shl     lend, 2
    lea    len1q, [lenq - mmsize]
    add    src0q, lenq
    add     dstq, lenq
    add     winq, lenq
    neg     lenq
.loop:
    mova      m0, [winq  + lenq]
    mova      m4, [src0q + lenq]
    mova      m1, [winq  + len1q]
    mova      m5, [src1q + len1q]
    shufps    m1, m1, 0x1b
    shufps    m5, m5, 0x1b
    mova      m2, m0
    mova      m3, m1
    mulps     m2, m4
    mulps     m3, m5
    mulps     m1, m4
    mulps     m0, m5
    addps     m2, m3
    subps     m1, m0
    shufps    m2, m2, 0x1b
    mova      [dstq + lenq], m1
    mova      [dstq + len1q], m2
    sub       len1q, mmsize
    add       lenq,  mmsize
    jl .loop
    RET

;-----------------------------------------------------------------------------
; vector_fmul_add(float *dst, const float *src0, const float *src1,
;                 const float *src2, int len)
;-----------------------------------------------------------------------------
%macro VECTOR_FMUL_ADD 0
cglobal vector_fmul_add, 5,5,4, dst, src0, src1, src2, len
    lea       lenq, [lend*4 - 2*mmsize]
ALIGN 16
.loop:
    mova    m0,   [src0q + lenq]
    mova    m1,   [src0q + lenq + mmsize]
%if cpuflag(fma3)
    mova    m2,     [src2q + lenq]
    mova    m3,     [src2q + lenq + mmsize]
    fmaddps m0, m0, [src1q + lenq], m2
    fmaddps m1, m1, [src1q + lenq + mmsize], m3
%else
    mulps   m0, m0, [src1q + lenq]
    mulps   m1, m1, [src1q + lenq + mmsize]
    addps   m0, m0, [src2q + lenq]
    addps   m1, m1, [src2q + lenq + mmsize]
%endif
    mova    [dstq + lenq], m0
    mova    [dstq + lenq + mmsize], m1

    sub     lenq,   2*mmsize
    jge     .loop
    RET
%endmacro

INIT_XMM sse
VECTOR_FMUL_ADD
%if HAVE_AVX_EXTERNAL
INIT_YMM avx
VECTOR_FMUL_ADD
%endif
%if HAVE_FMA3_EXTERNAL
INIT_YMM fma3
VECTOR_FMUL_ADD
%endif

;-----------------------------------------------------------------------------
; void vector_fmul_reverse(float *dst, const float *src0, const float *src1,
;                          int len)
;-----------------------------------------------------------------------------
%macro VECTOR_FMUL_REVERSE 0
cglobal vector_fmul_reverse, 4,4,2, dst, src0, src1, len
%if cpuflag(avx2)
    movaps  m2, [pd_reverse]
%endif
    lea       lenq, [lend*4 - 2*mmsize]
ALIGN 16
.loop:
%if cpuflag(avx2)
    vpermps m0, m2, [src1q]
    vpermps m1, m2, [src1q+mmsize]
%elif cpuflag(avx)
    vmovaps     xmm0, [src1q + 16]
    vinsertf128 m0, m0, [src1q], 1
    vshufps     m0, m0, m0, q0123
    vmovaps     xmm1, [src1q + mmsize + 16]
    vinsertf128 m1, m1, [src1q + mmsize], 1
    vshufps     m1, m1, m1, q0123
%else
    mova    m0, [src1q]
    mova    m1, [src1q + mmsize]
    shufps  m0, m0, q0123
    shufps  m1, m1, q0123
%endif
    mulps   m0, m0, [src0q + lenq + mmsize]
    mulps   m1, m1, [src0q + lenq]
    movaps  [dstq + lenq + mmsize], m0
    movaps  [dstq + lenq], m1
    add     src1q, 2*mmsize
    sub     lenq,  2*mmsize
    jge     .loop
    RET
%endmacro

INIT_XMM sse
VECTOR_FMUL_REVERSE
%if HAVE_AVX_EXTERNAL
INIT_YMM avx
VECTOR_FMUL_REVERSE
%endif
%if HAVE_AVX2_EXTERNAL
INIT_YMM avx2
VECTOR_FMUL_REVERSE
%endif

; float scalarproduct_float_sse(const float *v1, const float *v2, int len)
INIT_XMM sse
cglobal scalarproduct_float, 3,3,2, v1, v2, offset
    shl   offsetd, 2
    add       v1q, offsetq
    add       v2q, offsetq
    neg   offsetq
    xorps    xmm0, xmm0
.loop:
    movaps   xmm1, [v1q+offsetq]
    mulps    xmm1, [v2q+offsetq]
    addps    xmm0, xmm1
    add   offsetq, 16
    js .loop
    movhlps  xmm1, xmm0
    addps    xmm0, xmm1
    movss    xmm1, xmm0
    shufps   xmm0, xmm0, 1
    addss    xmm0, xmm1
%if ARCH_X86_64 == 0
    movss     r0m,  xmm0
    fld dword r0m
%endif
    RET

INIT_YMM fma3
cglobal scalarproduct_float, 3,5,8, v1, v2, size, len, offset
    xor   offsetq, offsetq
    xorps      m0, m0, m0
    shl     sized, 2
    mov      lenq, sizeq
    cmp      lenq, 32
    jl   .l16
    cmp      lenq, 64
    jl   .l32
    xorps    m1, m1, m1
    cmp      lenq, 128
    jl   .l64
    and    lenq, ~127
    xorps    m2, m2, m2
    xorps    m3, m3, m3
.loop128:
    movups   m4, [v1q+offsetq]
    movups   m5, [v1q+offsetq + 32]
    movups   m6, [v1q+offsetq + 64]
    movups   m7, [v1q+offsetq + 96]
    fmaddps  m0, m4, [v2q+offsetq     ], m0
    fmaddps  m1, m5, [v2q+offsetq + 32], m1
    fmaddps  m2, m6, [v2q+offsetq + 64], m2
    fmaddps  m3, m7, [v2q+offsetq + 96], m3
    add   offsetq, 128
    cmp   offsetq, lenq
    jl .loop128
    addps    m0, m0, m2
    addps    m1, m1, m3
    mov      lenq, sizeq
    and      lenq, 127
    cmp      lenq, 64
    jge .l64
    addps    m0, m0, m1
    cmp      lenq, 32
    jge .l32
    vextractf128 xmm2, m0, 1
    addps    xmm0, xmm2
    cmp      lenq, 16
    jge .l16
    movhlps  xmm1, xmm0
    addps    xmm0, xmm1
    movss    xmm1, xmm0
    shufps   xmm0, xmm0, 1
    addss    xmm0, xmm1
%if ARCH_X86_64 == 0
    movss r0m, xm0
    fld dword r0m
%endif
    RET
.l64:
    and    lenq, ~63
    add    lenq, offsetq
.loop64:
    movups   m4, [v1q+offsetq]
    movups   m5, [v1q+offsetq + 32]
    fmaddps  m0, m4, [v2q+offsetq], m0
    fmaddps  m1, m5, [v2q+offsetq + 32], m1
    add   offsetq, 64
    cmp   offsetq, lenq
    jl .loop64
    addps    m0, m0, m1
    mov      lenq, sizeq
    and      lenq, 63
    cmp      lenq, 32
    jge .l32
    vextractf128 xmm2, m0, 1
    addps    xmm0, xmm2
    cmp      lenq, 16
    jge .l16
    movhlps  xmm1, xmm0
    addps    xmm0, xmm1
    movss    xmm1, xmm0
    shufps   xmm0, xmm0, 1
    addss    xmm0, xmm1
%if ARCH_X86_64 == 0
    movss r0m, xm0
    fld dword r0m
%endif
    RET
.l32:
    and    lenq, ~31
    add    lenq, offsetq
.loop32:
    movups   m4, [v1q+offsetq]
    fmaddps  m0, m4, [v2q+offsetq], m0
    add   offsetq, 32
    cmp   offsetq, lenq
    jl .loop32
    vextractf128 xmm2, m0, 1
    addps    xmm0, xmm2
    mov      lenq, sizeq
    and      lenq, 31
    cmp      lenq, 16
    jge .l16
    movhlps  xmm1, xmm0
    addps    xmm0, xmm1
    movss    xmm1, xmm0
    shufps   xmm0, xmm0, 1
    addss    xmm0, xmm1
%if ARCH_X86_64 == 0
    movss r0m, xm0
    fld dword r0m
%endif
    RET
.l16:
    and    lenq, ~15
    add    lenq, offsetq
.loop16:
    movaps   xmm1, [v1q+offsetq]
    mulps    xmm1, [v2q+offsetq]
    addps    xmm0, xmm1
    add   offsetq, 16
    cmp   offsetq, lenq
    jl .loop16
    movhlps  xmm1, xmm0
    addps    xmm0, xmm1
    movss    xmm1, xmm0
    shufps   xmm0, xmm0, 1
    addss    xmm0, xmm1
%if ARCH_X86_64 == 0
    movss r0m, xm0
    fld dword r0m
%endif
    RET

;-----------------------------------------------------------------------------
; void ff_butterflies_float(float *src0, float *src1, int len);
;-----------------------------------------------------------------------------
INIT_XMM sse
cglobal butterflies_float, 3,3,3, src0, src1, len
    shl       lend, 2
    add      src0q, lenq
    add      src1q, lenq
    neg       lenq
.loop:
    mova        m0, [src0q + lenq]
    mova        m1, [src1q + lenq]
    subps       m2, m0, m1
    addps       m0, m0, m1
    mova        [src1q + lenq], m2
    mova        [src0q + lenq], m0
    add       lenq, mmsize
    jl .loop
    RET