1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
|
;*****************************************************************************
;* x86-optimized functions for showcqt filter
;*
;* Copyright (C) 2016 Muhammad Faiz <mfcc64@gmail.com>
;*
;* This file is part of FFmpeg.
;*
;* FFmpeg is free software; you can redistribute it and/or
;* modify it under the terms of the GNU Lesser General Public
;* License as published by the Free Software Foundation; either
;* version 2.1 of the License, or (at your option) any later version.
;*
;* FFmpeg is distributed in the hope that it will be useful,
;* but WITHOUT ANY WARRANTY; without even the implied warranty of
;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
;* Lesser General Public License for more details.
;*
;* You should have received a copy of the GNU Lesser General Public
;* License along with FFmpeg; if not, write to the Free Software
;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
;******************************************************************************
%include "libavutil/x86/x86util.asm"
%if ARCH_X86_64
%define pointer resq
%else
%define pointer resd
%endif
struc Coeffs
.val: pointer 1
.start: resd 1
.len: resd 1
.sizeof:
endstruc
%macro EMULATE_HADDPS 3 ; dst, src, tmp
%if cpuflag(sse3)
haddps %1, %2
%else
movaps %3, %1
shufps %1, %2, q2020
shufps %3, %2, q3131
addps %1, %3
%endif
%endmacro ; EMULATE_HADDPS
%macro EMULATE_FMADDPS 5 ; dst, src1, src2, src3, tmp
%if cpuflag(fma3) || cpuflag(fma4)
fmaddps %1, %2, %3, %4
%else
mulps %5, %2, %3
addps %1, %4, %5
%endif
%endmacro ; EMULATE_FMADDPS
%macro CQT_CALC 9
; %1 = a_re, %2 = a_im, %3 = b_re, %4 = b_im
; %5 = m_re, %6 = m_im, %7 = tmp, %8 = coeffval, %9 = coeffsq_offset
mov id, xd
add id, [coeffsq + Coeffs.start + %9]
movaps m%5, [srcq + 8 * iq]
movaps m%7, [srcq + 8 * iq + mmsize]
shufps m%6, m%5, m%7, q3131
shufps m%5, m%5, m%7, q2020
sub id, fft_lend
EMULATE_FMADDPS m%2, m%6, m%8, m%2, m%6
neg id
EMULATE_FMADDPS m%1, m%5, m%8, m%1, m%5
movups m%5, [srcq + 8 * iq - mmsize + 8]
movups m%7, [srcq + 8 * iq - 2*mmsize + 8]
%if mmsize == 32
vperm2f128 m%5, m%5, m%5, 1
vperm2f128 m%7, m%7, m%7, 1
%endif
shufps m%6, m%5, m%7, q1313
shufps m%5, m%5, m%7, q0202
EMULATE_FMADDPS m%4, m%6, m%8, m%4, m%6
EMULATE_FMADDPS m%3, m%5, m%8, m%3, m%5
%endmacro ; CQT_CALC
%macro CQT_SEPARATE 6 ; a_re, a_im, b_re, b_im, tmp, tmp2
addps m%5, m%4, m%2
subps m%6, m%3, m%1
addps m%1, m%3
subps m%2, m%4
EMULATE_HADDPS m%5, m%6, m%3
EMULATE_HADDPS m%1, m%2, m%3
EMULATE_HADDPS m%1, m%5, m%2
%if mmsize == 32
vextractf128 xmm%2, m%1, 1
addps xmm%1, xmm%2
%endif
%endmacro ; CQT_SEPARATE
%macro DECLARE_CQT_CALC 0
; ff_showcqt_cqt_calc_*(dst, src, coeffs, len, fft_len)
%if ARCH_X86_64
cglobal showcqt_cqt_calc, 5, 10, 12, dst, src, coeffs, len, fft_len, x, coeffs_val, coeffs_val2, i, coeffs_len
align 16
.loop_k:
mov xd, [coeffsq + Coeffs.len]
xorps m0, m0
movaps m1, m0
movaps m2, m0
mov coeffs_lend, [coeffsq + Coeffs.len + Coeffs.sizeof]
movaps m3, m0
movaps m8, m0
cmp coeffs_lend, xd
movaps m9, m0
movaps m10, m0
movaps m11, m0
cmova coeffs_lend, xd
xor xd, xd
test coeffs_lend, coeffs_lend
jz .check_loop_b
mov coeffs_valq, [coeffsq + Coeffs.val]
mov coeffs_val2q, [coeffsq + Coeffs.val + Coeffs.sizeof]
align 16
.loop_ab:
movaps m7, [coeffs_valq + 4 * xq]
CQT_CALC 0, 1, 2, 3, 4, 5, 6, 7, 0
movaps m7, [coeffs_val2q + 4 * xq]
CQT_CALC 8, 9, 10, 11, 4, 5, 6, 7, Coeffs.sizeof
add xd, mmsize/4
cmp xd, coeffs_lend
jb .loop_ab
.check_loop_b:
cmp xd, [coeffsq + Coeffs.len + Coeffs.sizeof]
jae .check_loop_a
align 16
.loop_b:
movaps m7, [coeffs_val2q + 4 * xq]
CQT_CALC 8, 9, 10, 11, 4, 5, 6, 7, Coeffs.sizeof
add xd, mmsize/4
cmp xd, [coeffsq + Coeffs.len + Coeffs.sizeof]
jb .loop_b
.loop_end:
CQT_SEPARATE 0, 1, 2, 3, 4, 5
CQT_SEPARATE 8, 9, 10, 11, 4, 5
mulps xmm0, xmm0
mulps xmm8, xmm8
EMULATE_HADDPS xmm0, xmm8, xmm1
movaps [dstq], xmm0
sub lend, 2
lea dstq, [dstq + 16]
lea coeffsq, [coeffsq + 2*Coeffs.sizeof]
jnz .loop_k
REP_RET
align 16
.check_loop_a:
cmp xd, [coeffsq + Coeffs.len]
jae .loop_end
align 16
.loop_a:
movaps m7, [coeffs_valq + 4 * xq]
CQT_CALC 0, 1, 2, 3, 4, 5, 6, 7, 0
add xd, mmsize/4
cmp xd, [coeffsq + Coeffs.len]
jb .loop_a
jmp .loop_end
%else
cglobal showcqt_cqt_calc, 4, 7, 8, dst, src, coeffs, len, x, coeffs_val, i
%define fft_lend r4m
align 16
.loop_k:
mov xd, [coeffsq + Coeffs.len]
xorps m0, m0
movaps m1, m0
movaps m2, m0
movaps m3, m0
test xd, xd
jz .store
mov coeffs_valq, [coeffsq + Coeffs.val]
xor xd, xd
align 16
.loop_x:
movaps m7, [coeffs_valq + 4 * xq]
CQT_CALC 0, 1, 2, 3, 4, 5, 6, 7, 0
add xd, mmsize/4
cmp xd, [coeffsq + Coeffs.len]
jb .loop_x
CQT_SEPARATE 0, 1, 2, 3, 4, 5
mulps xmm0, xmm0
EMULATE_HADDPS xmm0, xmm0, xmm1
.store:
movlps [dstq], xmm0
sub lend, 1
lea dstq, [dstq + 8]
lea coeffsq, [coeffsq + Coeffs.sizeof]
jnz .loop_k
REP_RET
%endif ; ARCH_X86_64
%endmacro ; DECLARE_CQT_CALC
INIT_XMM sse
DECLARE_CQT_CALC
INIT_XMM sse3
DECLARE_CQT_CALC
INIT_YMM avx
DECLARE_CQT_CALC
INIT_YMM fma3
DECLARE_CQT_CALC
INIT_XMM fma4
DECLARE_CQT_CALC
|