aboutsummaryrefslogtreecommitdiffstats
path: root/libavcodec/riscv/aacpsdsp_rvv.S
blob: 80bd19f6ad21e03710d5aafeb557c929b48f00ae (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
/*
 * Copyright © 2022 Rémi Denis-Courmont.
 *
 * This file is part of FFmpeg.
 *
 * FFmpeg is free software; you can redistribute it and/or
 * modify it under the terms of the GNU Lesser General Public
 * License as published by the Free Software Foundation; either
 * version 2.1 of the License, or (at your option) any later version.
 *
 * FFmpeg is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 * Lesser General Public License for more details.
 *
 * You should have received a copy of the GNU Lesser General Public
 * License along with FFmpeg; if not, write to the Free Software
 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
 */

#include "libavutil/riscv/asm.S"

func ff_ps_add_squares_rvv, zve32f
1:
        vsetvli     t0, a2, e32, m1, ta, ma
        vlseg2e32.v v24, (a1)
        sub         a2, a2, t0
        vle32.v     v16, (a0)
        sh3add      a1, t0, a1
        vfmacc.vv   v16, v24, v24
        vfmacc.vv   v16, v25, v25
        vse32.v     v16, (a0)
        sh2add      a0, t0, a0
        bnez        a2, 1b

        ret
endfunc

func ff_ps_mul_pair_single_rvv, zve32f
1:
        vsetvli     t0, a3, e32, m1, ta, ma
        vlseg2e32.v v24, (a1)
        sub         a3, a3, t0
        vle32.v     v16, (a2)
        sh3add      a1, t0, a1
        vfmul.vv    v24, v24, v16
        sh2add      a2, t0, a2
        vfmul.vv    v25, v25, v16
        vsseg2e32.v v24, (a0)
        sh3add      a0, t0, a0
        bnez        a3, 1b

        ret
endfunc

func ff_ps_hybrid_analysis_rvv, zve32f
        /* We need 26 FP registers, for 20 scratch ones. Spill fs0-fs5. */
        addi    sp, sp, -48
        .irp n, 0, 1, 2, 3, 4, 5
HWD     fsd     fs\n, (8 * \n)(sp)
NOHWD   fsw     fs\n, (4 * \n)(sp)
        .endr

        .macro input, j, fd0, fd1, fd2, fd3
        flw     \fd0, (4 * ((\j * 2) + 0))(a1)
        flw     fs4, (4 * (((12 - \j) * 2) + 0))(a1)
        flw     \fd1, (4 * ((\j * 2) + 1))(a1)
        fsub.s  \fd3, \fd0, fs4
        flw     fs5, (4 * (((12 - \j) * 2) + 1))(a1)
        fadd.s  \fd2, \fd1, fs5
        fadd.s  \fd0, \fd0, fs4
        fsub.s  \fd1, \fd1, fs5
        .endm

        //         re0, re1, im0, im1
        input   0, ft0, ft1, ft2, ft3
        input   1, ft4, ft5, ft6, ft7
        input   2, ft8, ft9, ft10, ft11
        input   3, fa0, fa1, fa2, fa3
        input   4, fa4, fa5, fa6, fa7
        input   5, fs0, fs1, fs2, fs3
        flw     fs4, (4 * ((6 * 2) + 0))(a1)
        flw     fs5, (4 * ((6 * 2) + 1))(a1)

        add        a2, a2, 6 * 2 * 4 // point to filter[i][6][0]
        li         t4, 8 * 2 * 4 // filter byte stride
        slli       a3, a3, 3 // output byte stride
1:
        .macro filter, vs0, vs1, fo0, fo1, fo2, fo3
        vfmacc.vf  v8, \fo0, \vs0
        vfmacc.vf  v9, \fo2, \vs0
        vfnmsac.vf v8, \fo1, \vs1
        vfmacc.vf  v9, \fo3, \vs1
        .endm

        vsetvli    t0, a4, e32, m1, ta, ma
        /*
         * The filter (a2) has 16 segments, of which 13 need to be extracted.
         * R-V V supports only up to 8 segments, so unrolling is unavoidable.
         */
        addi       t1, a2, -48
        vlse32.v   v22, (a2), t4
        addi       t2, a2, -44
        vlse32.v   v16, (t1), t4
        addi       t1, a2, -40
        vfmul.vf   v8, v22, fs4
        vlse32.v   v24, (t2), t4
        addi       t2, a2, -36
        vfmul.vf   v9, v22, fs5
        vlse32.v   v17, (t1), t4
        addi       t1, a2, -32
        vlse32.v   v25, (t2), t4
        addi       t2, a2, -28
        filter     v16, v24, ft0, ft1, ft2, ft3
        vlse32.v   v18, (t1), t4
        addi       t1, a2, -24
        vlse32.v   v26, (t2), t4
        addi       t2, a2, -20
        filter     v17, v25, ft4, ft5, ft6, ft7
        vlse32.v   v19, (t1), t4
        addi       t1, a2, -16
        vlse32.v   v27, (t2), t4
        addi       t2, a2, -12
        filter     v18, v26, ft8, ft9, ft10, ft11
        vlse32.v   v20, (t1), t4
        addi       t1, a2, -8
        vlse32.v   v28, (t2), t4
        addi       t2, a2, -4
        filter     v19, v27, fa0, fa1, fa2, fa3
        vlse32.v   v21, (t1), t4
        sub        a4, a4, t0
        vlse32.v   v29, (t2), t4
        slli       t1, t0, 3 + 1 + 2 // ctz(8 * 2 * 4)
        add        a2, a2, t1
        filter     v20, v28, fa4, fa5, fa6, fa7
        filter     v21, v29, fs0, fs1, fs2, fs3

        add        t2, a0, 4
        vsse32.v   v8, (a0), a3
        mul        t0, t0, a3
        vsse32.v   v9, (t2), a3
        add        a0, a0, t0
        bnez       a4, 1b

        .irp n, 5, 4, 3, 2, 1, 0
HWD     fld     fs\n, (8 * \n)(sp)
NOHWD   flw     fs\n, (4 * \n)(sp)
        .endr
        addi    sp, sp, 48
        ret
        .purgem input
        .purgem filter
endfunc

func ff_ps_hybrid_analysis_ileave_rvv, zve32x /* no needs for zve32f here */
        slli        t0, a2, 5 + 1 + 2 // ctz(32 * 2 * 4)
        sh2add      a1, a2, a1
        add         a0, a0, t0
        addi        a2, a2, -64
        li          t1, 38 * 64 * 4
        li          t6, 64 * 4 // (uint8_t *)L[x][j+1][i] - L[x][j][i]
        add         a4, a1, t1 // &L[1]
        beqz        a2, 3f
1:
        mv          t0, a0
        mv          t1, a1
        mv          t3, a3
        mv          t4, a4
        addi        a2, a2, 1
2:
        vsetvli     t5, t3, e32, m1, ta, ma
        vlse32.v    v16, (t1), t6
        sub         t3, t3, t5
        vlse32.v    v17, (t4), t6
        mul         t2, t5, t6
        vsseg2e32.v v16, (t0)
        sh3add      t0, t5, t0
        add         t1, t1, t2
        add         t4, t4, t2
        bnez        t3, 2b

        add         a0, a0, 32 * 2 * 4
        add         a1, a1, 4
        add         a4, a4, 4
        bnez        a2, 1b
3:
        ret
endfunc

func ff_ps_hybrid_synthesis_deint_rvv, zve32x
        slli        t1, a2, 5 + 1 + 2
        sh2add      a0, a2, a0
        add         a1, a1, t1
        addi        a2, a2, -64
        li          t1, 38 * 64 * 4
        li          t6, 64 * 4
        add         a4, a0, t1
        beqz        a2, 3f
1:
        mv          t0, a0
        mv          t1, a1
        mv          t3, a3
        mv          t4, a4
        addi        a2, a2, 1
2:
        vsetvli     t5, t3, e32, m1, ta, ma
        vlseg2e32.v v16, (t1)
        sub         t3, t3, t5
        vsse32.v    v16, (t0), t6
        mul         t2, t5, t6
        vsse32.v    v17, (t4), t6
        sh3add      t1, t5, t1
        add         t0, t0, t2
        add         t4, t4, t2
        bnez        t3, 2b

        add         a0, a0, 4
        add         a1, a1, 32 * 2 * 4
        add         a4, a4, 4
        bnez        a2, 1b
3:
        ret
endfunc

func ff_ps_stereo_interpolate_rvv, zve32f
        vsetvli      t0, zero, e32, m1, ta, ma
        vid.v        v24
        flw          ft0,   (a2)
        vadd.vi      v24, v24, 1   // v24[i] = i + 1
        flw          ft1,  4(a2)
        vfcvt.f.xu.v v24, v24
        flw          ft2,  8(a2)
        vfmv.v.f     v16, ft0
        flw          ft3, 12(a2)
        vfmv.v.f     v17, ft1
        flw          ft0,   (a3)
        vfmv.v.f     v18, ft2
        flw          ft1,  4(a3)
        vfmv.v.f     v19, ft3
        flw          ft2,  8(a3)
        vfmv.v.f     v20, ft0
        flw          ft3, 12(a3)
        vfmv.v.f     v21, ft1
        fcvt.s.wu    ft4, t0       // (float)(vlenb / sizeof (float))
        vfmv.v.f     v22, ft2
        fmul.s       ft0, ft0, ft4
        vfmv.v.f     v23, ft3
        fmul.s       ft1, ft1, ft4
        vfmacc.vv    v16, v24, v20 // h0 += (i + 1) * h0_step
        fmul.s       ft2, ft2, ft4
        vfmacc.vv    v17, v24, v21
        fmul.s       ft3, ft3, ft4
        vfmacc.vv    v18, v24, v22
        vfmacc.vv    v19, v24, v23
1:
        vsetvli   t0, a4, e32, m1, ta, ma
        vlseg2e32.v v8, (a0)     // v8:l_re, v9:l_im
        sub       a4, a4, t0
        vlseg2e32.v v10, (a1)    // v10:r_re, v11:r_im
        vfmul.vv  v12, v8, v16
        vfmul.vv  v13, v9, v16
        vfmul.vv  v14, v8, v17
        vfmul.vv  v15, v9, v17
        vfmacc.vv v12, v10, v18
        vfmacc.vv v13, v11, v18
        vfmacc.vv v14, v10, v19
        vfmacc.vv v15, v11, v19
        vsseg2e32.v v12, (a0)
        sh3add    a0, t0, a0
        vsseg2e32.v v14, (a1)
        sh3add    a1, t0, a1
        vfadd.vf  v16, v16, ft0 // h0 += (vlenb / sizeof (float)) * h0_step
        vfadd.vf  v17, v17, ft1
        vfadd.vf  v18, v18, ft2
        vfadd.vf  v19, v19, ft3
        bnez      a4, 1b

        ret
endfunc