1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
|
;******************************************************************************
;* x86 optimized channel mixing
;* Copyright (c) 2012 Justin Ruggles <justin.ruggles@gmail.com>
;*
;* This file is part of Libav.
;*
;* Libav is free software; you can redistribute it and/or
;* modify it under the terms of the GNU Lesser General Public
;* License as published by the Free Software Foundation; either
;* version 2.1 of the License, or (at your option) any later version.
;*
;* Libav is distributed in the hope that it will be useful,
;* but WITHOUT ANY WARRANTY; without even the implied warranty of
;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
;* Lesser General Public License for more details.
;*
;* You should have received a copy of the GNU Lesser General Public
;* License along with Libav; if not, write to the Free Software
;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
;******************************************************************************
%include "libavutil/x86/x86util.asm"
%include "util.asm"
SECTION_TEXT
;-----------------------------------------------------------------------------
; void ff_mix_2_to_1_fltp_flt(float **src, float **matrix, int len,
; int out_ch, int in_ch);
;-----------------------------------------------------------------------------
%macro MIX_2_TO_1_FLTP_FLT 0
cglobal mix_2_to_1_fltp_flt, 3,4,6, src, matrix, len, src1
mov src1q, [srcq+gprsize]
mov srcq, [srcq ]
sub src1q, srcq
mov matrixq, [matrixq ]
VBROADCASTSS m4, [matrixq ]
VBROADCASTSS m5, [matrixq+4]
ALIGN 16
.loop:
mulps m0, m4, [srcq ]
mulps m1, m5, [srcq+src1q ]
mulps m2, m4, [srcq+ mmsize]
mulps m3, m5, [srcq+src1q+mmsize]
addps m0, m0, m1
addps m2, m2, m3
mova [srcq ], m0
mova [srcq+mmsize], m2
add srcq, mmsize*2
sub lend, mmsize*2/4
jg .loop
REP_RET
%endmacro
INIT_XMM sse
MIX_2_TO_1_FLTP_FLT
INIT_YMM avx
MIX_2_TO_1_FLTP_FLT
;-----------------------------------------------------------------------------
; void ff_mix_2_to_1_s16p_flt(int16_t **src, float **matrix, int len,
; int out_ch, int in_ch);
;-----------------------------------------------------------------------------
%macro MIX_2_TO_1_S16P_FLT 0
cglobal mix_2_to_1_s16p_flt, 3,4,6, src, matrix, len, src1
mov src1q, [srcq+gprsize]
mov srcq, [srcq]
sub src1q, srcq
mov matrixq, [matrixq ]
VBROADCASTSS m4, [matrixq ]
VBROADCASTSS m5, [matrixq+4]
ALIGN 16
.loop:
mova m0, [srcq ]
mova m2, [srcq+src1q]
S16_TO_S32_SX 0, 1
S16_TO_S32_SX 2, 3
cvtdq2ps m0, m0
cvtdq2ps m1, m1
cvtdq2ps m2, m2
cvtdq2ps m3, m3
mulps m0, m4
mulps m1, m4
mulps m2, m5
mulps m3, m5
addps m0, m2
addps m1, m3
cvtps2dq m0, m0
cvtps2dq m1, m1
packssdw m0, m1
mova [srcq], m0
add srcq, mmsize
sub lend, mmsize/2
jg .loop
REP_RET
%endmacro
INIT_XMM sse2
MIX_2_TO_1_S16P_FLT
INIT_XMM sse4
MIX_2_TO_1_S16P_FLT
;-----------------------------------------------------------------------------
; void ff_mix_2_to_1_s16p_q8(int16_t **src, int16_t **matrix, int len,
; int out_ch, int in_ch);
;-----------------------------------------------------------------------------
INIT_XMM sse2
cglobal mix_2_to_1_s16p_q8, 3,4,6, src, matrix, len, src1
mov src1q, [srcq+gprsize]
mov srcq, [srcq]
sub src1q, srcq
mov matrixq, [matrixq]
movd m4, [matrixq]
movd m5, [matrixq]
SPLATW m4, m4, 0
SPLATW m5, m5, 1
pxor m0, m0
punpcklwd m4, m0
punpcklwd m5, m0
ALIGN 16
.loop:
mova m0, [srcq ]
mova m2, [srcq+src1q]
punpckhwd m1, m0, m0
punpcklwd m0, m0
punpckhwd m3, m2, m2
punpcklwd m2, m2
pmaddwd m0, m4
pmaddwd m1, m4
pmaddwd m2, m5
pmaddwd m3, m5
paddd m0, m2
paddd m1, m3
psrad m0, 8
psrad m1, 8
packssdw m0, m1
mova [srcq], m0
add srcq, mmsize
sub lend, mmsize/2
jg .loop
REP_RET
;-----------------------------------------------------------------------------
; void ff_mix_1_to_2_fltp_flt(float **src, float **matrix, int len,
; int out_ch, int in_ch);
;-----------------------------------------------------------------------------
%macro MIX_1_TO_2_FLTP_FLT 0
cglobal mix_1_to_2_fltp_flt, 3,5,4, src0, matrix0, len, src1, matrix1
mov src1q, [src0q+gprsize]
mov src0q, [src0q]
sub src1q, src0q
mov matrix1q, [matrix0q+gprsize]
mov matrix0q, [matrix0q]
VBROADCASTSS m2, [matrix0q]
VBROADCASTSS m3, [matrix1q]
ALIGN 16
.loop:
mova m0, [src0q]
mulps m1, m0, m3
mulps m0, m0, m2
mova [src0q ], m0
mova [src0q+src1q], m1
add src0q, mmsize
sub lend, mmsize/4
jg .loop
REP_RET
%endmacro
INIT_XMM sse
MIX_1_TO_2_FLTP_FLT
INIT_YMM avx
MIX_1_TO_2_FLTP_FLT
;-----------------------------------------------------------------------------
; void ff_mix_1_to_2_s16p_flt(int16_t **src, float **matrix, int len,
; int out_ch, int in_ch);
;-----------------------------------------------------------------------------
%macro MIX_1_TO_2_S16P_FLT 0
cglobal mix_1_to_2_s16p_flt, 3,5,6, src0, matrix0, len, src1, matrix1
mov src1q, [src0q+gprsize]
mov src0q, [src0q]
sub src1q, src0q
mov matrix1q, [matrix0q+gprsize]
mov matrix0q, [matrix0q]
VBROADCASTSS m4, [matrix0q]
VBROADCASTSS m5, [matrix1q]
ALIGN 16
.loop:
mova m0, [src0q]
S16_TO_S32_SX 0, 2
cvtdq2ps m0, m0
cvtdq2ps m2, m2
mulps m1, m0, m5
mulps m0, m0, m4
mulps m3, m2, m5
mulps m2, m2, m4
cvtps2dq m0, m0
cvtps2dq m1, m1
cvtps2dq m2, m2
cvtps2dq m3, m3
packssdw m0, m2
packssdw m1, m3
mova [src0q ], m0
mova [src0q+src1q], m1
add src0q, mmsize
sub lend, mmsize/2
jg .loop
REP_RET
%endmacro
INIT_XMM sse2
MIX_1_TO_2_S16P_FLT
INIT_XMM sse4
MIX_1_TO_2_S16P_FLT
INIT_XMM avx
MIX_1_TO_2_S16P_FLT
;-----------------------------------------------------------------------------
; void ff_mix_3_8_to_1_2_fltp/s16p_flt(float/int16_t **src, float **matrix,
; int len, int out_ch, int in_ch);
;-----------------------------------------------------------------------------
%macro MIX_3_8_TO_1_2_FLT 3 ; %1 = in channels, %2 = out channels, %3 = s16p or fltp
; define some names to make the code clearer
%assign in_channels %1
%assign out_channels %2
%assign stereo out_channels - 1
%ifidn %3, s16p
%assign is_s16 1
%else
%assign is_s16 0
%endif
; determine how many matrix elements must go on the stack vs. mmregs
%assign matrix_elements in_channels * out_channels
%if is_s16
%if stereo
%assign needed_mmregs 7
%else
%assign needed_mmregs 5
%endif
%else
%if stereo
%assign needed_mmregs 4
%else
%assign needed_mmregs 3
%endif
%endif
%assign matrix_elements_mm num_mmregs - needed_mmregs
%if matrix_elements < matrix_elements_mm
%assign matrix_elements_mm matrix_elements
%endif
%if matrix_elements_mm < matrix_elements
%assign matrix_elements_stack matrix_elements - matrix_elements_mm
%else
%assign matrix_elements_stack 0
%endif
%assign matrix_stack_size matrix_elements_stack * mmsize
%assign needed_stack_size -1 * matrix_stack_size
%if ARCH_X86_32 && in_channels >= 7
%assign needed_stack_size needed_stack_size - 16
%endif
cglobal mix_%1_to_%2_%3_flt, 3,in_channels+2,needed_mmregs+matrix_elements_mm, needed_stack_size, src0, src1, len, src2, src3, src4, src5, src6, src7
; define src pointers on stack if needed
%if matrix_elements_stack > 0 && ARCH_X86_32 && in_channels >= 7
%define src5m [rsp+matrix_stack_size+0]
%define src6m [rsp+matrix_stack_size+4]
%define src7m [rsp+matrix_stack_size+8]
%endif
; load matrix pointers
%define matrix0q r1q
%define matrix1q r3q
%if stereo
mov matrix1q, [matrix0q+gprsize]
%endif
mov matrix0q, [matrix0q]
; define matrix coeff names
%assign %%i 0
%assign %%j needed_mmregs
%rep in_channels
%if %%i >= matrix_elements_mm
CAT_XDEFINE mx_stack_0_, %%i, 1
CAT_XDEFINE mx_0_, %%i, [rsp+(%%i-matrix_elements_mm)*mmsize]
%else
CAT_XDEFINE mx_stack_0_, %%i, 0
CAT_XDEFINE mx_0_, %%i, m %+ %%j
%assign %%j %%j+1
%endif
%assign %%i %%i+1
%endrep
%if stereo
%assign %%i 0
%rep in_channels
%if in_channels + %%i >= matrix_elements_mm
CAT_XDEFINE mx_stack_1_, %%i, 1
CAT_XDEFINE mx_1_, %%i, [rsp+(in_channels+%%i-matrix_elements_mm)*mmsize]
%else
CAT_XDEFINE mx_stack_1_, %%i, 0
CAT_XDEFINE mx_1_, %%i, m %+ %%j
%assign %%j %%j+1
%endif
%assign %%i %%i+1
%endrep
%endif
; load/splat matrix coeffs
%assign %%i 0
%rep in_channels
%if mx_stack_0_ %+ %%i
VBROADCASTSS m0, [matrix0q+4*%%i]
mova mx_0_ %+ %%i, m0
%else
VBROADCASTSS mx_0_ %+ %%i, [matrix0q+4*%%i]
%endif
%if stereo
%if mx_stack_1_ %+ %%i
VBROADCASTSS m0, [matrix1q+4*%%i]
mova mx_1_ %+ %%i, m0
%else
VBROADCASTSS mx_1_ %+ %%i, [matrix1q+4*%%i]
%endif
%endif
%assign %%i %%i+1
%endrep
; load channel pointers to registers as offsets from the first channel pointer
%if ARCH_X86_64
movsxd lenq, r2d
%endif
shl lenq, 2-is_s16
%assign %%i 1
%rep (in_channels - 1)
%if ARCH_X86_32 && in_channels >= 7 && %%i >= 5
mov src5q, [src0q+%%i*gprsize]
add src5q, lenq
mov src %+ %%i %+ m, src5q
%else
mov src %+ %%i %+ q, [src0q+%%i*gprsize]
add src %+ %%i %+ q, lenq
%endif
%assign %%i %%i+1
%endrep
mov src0q, [src0q]
add src0q, lenq
neg lenq
.loop:
; for x86-32 with 7-8 channels we do not have enough gp registers for all src
; pointers, so we have to load some of them from the stack each time
%define copy_src_from_stack ARCH_X86_32 && in_channels >= 7 && %%i >= 5
%if is_s16
; mix with s16p input
mova m0, [src0q+lenq]
S16_TO_S32_SX 0, 1
cvtdq2ps m0, m0
cvtdq2ps m1, m1
%if stereo
mulps m2, m0, mx_1_0
mulps m3, m1, mx_1_0
%endif
mulps m0, m0, mx_0_0
mulps m1, m1, mx_0_0
%assign %%i 1
%rep (in_channels - 1)
%if copy_src_from_stack
%define src_ptr src5q
%else
%define src_ptr src %+ %%i %+ q
%endif
%if stereo
%if copy_src_from_stack
mov src_ptr, src %+ %%i %+ m
%endif
mova m4, [src_ptr+lenq]
S16_TO_S32_SX 4, 5
cvtdq2ps m4, m4
cvtdq2ps m5, m5
fmaddps m2, m4, mx_1_ %+ %%i, m2, m6
fmaddps m3, m5, mx_1_ %+ %%i, m3, m6
fmaddps m0, m4, mx_0_ %+ %%i, m0, m4
fmaddps m1, m5, mx_0_ %+ %%i, m1, m5
%else
%if copy_src_from_stack
mov src_ptr, src %+ %%i %+ m
%endif
mova m2, [src_ptr+lenq]
S16_TO_S32_SX 2, 3
cvtdq2ps m2, m2
cvtdq2ps m3, m3
fmaddps m0, m2, mx_0_ %+ %%i, m0, m4
fmaddps m1, m3, mx_0_ %+ %%i, m1, m4
%endif
%assign %%i %%i+1
%endrep
%if stereo
cvtps2dq m2, m2
cvtps2dq m3, m3
packssdw m2, m3
mova [src1q+lenq], m2
%endif
cvtps2dq m0, m0
cvtps2dq m1, m1
packssdw m0, m1
mova [src0q+lenq], m0
%else
; mix with fltp input
%if stereo || mx_stack_0_0
mova m0, [src0q+lenq]
%endif
%if stereo
mulps m1, m0, mx_1_0
%endif
%if stereo || mx_stack_0_0
mulps m0, m0, mx_0_0
%else
mulps m0, [src0q+lenq], mx_0_0
%endif
%assign %%i 1
%rep (in_channels - 1)
%if copy_src_from_stack
%define src_ptr src5q
mov src_ptr, src %+ %%i %+ m
%else
%define src_ptr src %+ %%i %+ q
%endif
; avoid extra load for mono if matrix is in a mm register
%if stereo || mx_stack_0_ %+ %%i
mova m2, [src_ptr+lenq]
%endif
%if stereo
fmaddps m1, m2, mx_1_ %+ %%i, m1, m3
%endif
%if stereo || mx_stack_0_ %+ %%i
fmaddps m0, m2, mx_0_ %+ %%i, m0, m2
%else
fmaddps m0, mx_0_ %+ %%i, [src_ptr+lenq], m0, m1
%endif
%assign %%i %%i+1
%endrep
mova [src0q+lenq], m0
%if stereo
mova [src1q+lenq], m1
%endif
%endif
add lenq, mmsize
jl .loop
; zero ymm high halves
%if mmsize == 32
vzeroupper
%endif
RET
%endmacro
%macro MIX_3_8_TO_1_2_FLT_FUNCS 0
%assign %%i 3
%rep 6
INIT_XMM sse
MIX_3_8_TO_1_2_FLT %%i, 1, fltp
MIX_3_8_TO_1_2_FLT %%i, 2, fltp
INIT_XMM sse2
MIX_3_8_TO_1_2_FLT %%i, 1, s16p
MIX_3_8_TO_1_2_FLT %%i, 2, s16p
INIT_XMM sse4
MIX_3_8_TO_1_2_FLT %%i, 1, s16p
MIX_3_8_TO_1_2_FLT %%i, 2, s16p
; do not use ymm AVX or FMA4 in x86-32 for 6 or more channels due to stack alignment issues
%if ARCH_X86_64 || %%i < 6
INIT_YMM avx
%else
INIT_XMM avx
%endif
MIX_3_8_TO_1_2_FLT %%i, 1, fltp
MIX_3_8_TO_1_2_FLT %%i, 2, fltp
INIT_XMM avx
MIX_3_8_TO_1_2_FLT %%i, 1, s16p
MIX_3_8_TO_1_2_FLT %%i, 2, s16p
%if HAVE_FMA4_EXTERNAL
%if ARCH_X86_64 || %%i < 6
INIT_YMM fma4
%else
INIT_XMM fma4
%endif
MIX_3_8_TO_1_2_FLT %%i, 1, fltp
MIX_3_8_TO_1_2_FLT %%i, 2, fltp
INIT_XMM fma4
MIX_3_8_TO_1_2_FLT %%i, 1, s16p
MIX_3_8_TO_1_2_FLT %%i, 2, s16p
%endif
%assign %%i %%i+1
%endrep
%endmacro
MIX_3_8_TO_1_2_FLT_FUNCS
|