diff options
author | Rémi Denis-Courmont <remi@remlab.net> | 2024-07-22 20:23:50 +0300 |
---|---|---|
committer | Rémi Denis-Courmont <remi@remlab.net> | 2024-07-25 18:55:48 +0300 |
commit | 5f10173fa173f27405d4700522fea2b59fa82416 (patch) | |
tree | 59857528ec20467ae7fd7aab5d783c2b023db46b /libavutil | |
parent | e91a8cc4de2600dd6f18c139714fa1cad6db7ab9 (diff) | |
download | ffmpeg-5f10173fa173f27405d4700522fea2b59fa82416.tar.gz |
lavu/riscv: require B or zba explicitly
Diffstat (limited to 'libavutil')
-rw-r--r-- | libavutil/riscv/fixed_dsp_rvv.S | 14 | ||||
-rw-r--r-- | libavutil/riscv/float_dsp_rvv.S | 24 |
2 files changed, 19 insertions, 19 deletions
diff --git a/libavutil/riscv/fixed_dsp_rvv.S b/libavutil/riscv/fixed_dsp_rvv.S index 6bac5813b8..0fa6aab3d4 100644 --- a/libavutil/riscv/fixed_dsp_rvv.S +++ b/libavutil/riscv/fixed_dsp_rvv.S @@ -20,7 +20,7 @@ #include "asm.S" -func ff_vector_fmul_window_scaled_rvv, zve64x +func ff_vector_fmul_window_scaled_rvv, zve64x, zba csrwi vxrm, 0 vsetvli t0, zero, e16, m1, ta, ma sh2add a2, a4, a2 @@ -68,7 +68,7 @@ func ff_vector_fmul_window_scaled_rvv, zve64x ret endfunc -func ff_vector_fmul_window_fixed_rvv, zve64x +func ff_vector_fmul_window_fixed_rvv, zve64x, zba csrwi vxrm, 0 vsetvli t0, zero, e16, m1, ta, ma sh2add a2, a4, a2 @@ -112,7 +112,7 @@ func ff_vector_fmul_window_fixed_rvv, zve64x ret endfunc -func ff_vector_fmul_fixed_rvv, zve32x +func ff_vector_fmul_fixed_rvv, zve32x, zba csrwi vxrm, 0 1: vsetvli t0, a3, e32, m4, ta, ma @@ -129,7 +129,7 @@ func ff_vector_fmul_fixed_rvv, zve32x ret endfunc -func ff_vector_fmul_reverse_fixed_rvv, zve32x +func ff_vector_fmul_reverse_fixed_rvv, zve32x, zba csrwi vxrm, 0 // e16/m4 and e32/m8 are possible but slow the gathers down. vsetvli t0, zero, e16, m1, ta, ma @@ -155,7 +155,7 @@ func ff_vector_fmul_reverse_fixed_rvv, zve32x ret endfunc -func ff_vector_fmul_add_fixed_rvv, zve32x +func ff_vector_fmul_add_fixed_rvv, zve32x, zba csrwi vxrm, 0 1: vsetvli t0, a4, e32, m8, ta, ma @@ -175,7 +175,7 @@ func ff_vector_fmul_add_fixed_rvv, zve32x ret endfunc -func ff_scalarproduct_fixed_rvv, zve64x +func ff_scalarproduct_fixed_rvv, zve64x, zba li t1, 1 << 30 vsetvli t0, zero, e64, m8, ta, ma vmv.v.x v8, zero @@ -198,7 +198,7 @@ func ff_scalarproduct_fixed_rvv, zve64x endfunc // (a0) = (a0) + (a1), (a1) = (a0) - (a1) [0..a2-1] -func ff_butterflies_fixed_rvv, zve32x +func ff_butterflies_fixed_rvv, zve32x, zba 1: vsetvli t0, a2, e32, m4, ta, ma vle32.v v16, (a0) diff --git a/libavutil/riscv/float_dsp_rvv.S b/libavutil/riscv/float_dsp_rvv.S index 2f0ade6db6..c7744cf0e8 100644 --- a/libavutil/riscv/float_dsp_rvv.S +++ b/libavutil/riscv/float_dsp_rvv.S @@ -21,7 +21,7 @@ #include "asm.S" // (a0) = (a1) * (a2) [0..a3-1] -func ff_vector_fmul_rvv, zve32f +func ff_vector_fmul_rvv, zve32f, zba 1: vsetvli t0, a3, e32, m8, ta, ma vle32.v v16, (a1) @@ -38,7 +38,7 @@ func ff_vector_fmul_rvv, zve32f endfunc // (a0) += (a1) * fa0 [0..a2-1] -func ff_vector_fmac_scalar_rvv, zve32f +func ff_vector_fmac_scalar_rvv, zve32f, zba NOHWF fmv.w.x fa0, a2 NOHWF mv a2, a3 1: @@ -57,7 +57,7 @@ NOHWF mv a2, a3 endfunc // (a0) = (a1) * fa0 [0..a2-1] -func ff_vector_fmul_scalar_rvv, zve32f +func ff_vector_fmul_scalar_rvv, zve32f, zba NOHWF fmv.w.x fa0, a2 NOHWF mv a2, a3 1: @@ -73,7 +73,7 @@ NOHWF mv a2, a3 ret endfunc -func ff_vector_fmul_window_rvv, zve32f +func ff_vector_fmul_window_rvv, zve32f, zba // a0: dst, a1: src0, a2: src1, a3: window, a4: length // e16/m2 and e32/m4 are possible but slower due to gather. vsetvli t0, zero, e16, m1, ta, ma @@ -113,7 +113,7 @@ func ff_vector_fmul_window_rvv, zve32f endfunc // (a0) = (a1) * (a2) + (a3) [0..a4-1] -func ff_vector_fmul_add_rvv, zve32f +func ff_vector_fmul_add_rvv, zve32f, zba 1: vsetvli t0, a4, e32, m8, ta, ma vle32.v v8, (a1) @@ -133,7 +133,7 @@ endfunc // TODO factor vrsub, separate last iteration? // (a0) = (a1) * reverse(a2) [0..a3-1] -func ff_vector_fmul_reverse_rvv, zve32f +func ff_vector_fmul_reverse_rvv, zve32f, zba // e16/m4 and e32/m8 are possible but slower due to gather. vsetvli t0, zero, e16, m1, ta, ma sh2add a2, a3, a2 @@ -159,7 +159,7 @@ func ff_vector_fmul_reverse_rvv, zve32f endfunc // (a0) = (a0) + (a1), (a1) = (a0) - (a1) [0..a2-1] -func ff_butterflies_float_rvv, zve32f +func ff_butterflies_float_rvv, zve32f, zba 1: vsetvli t0, a2, e32, m8, ta, ma vle32.v v16, (a0) @@ -177,7 +177,7 @@ func ff_butterflies_float_rvv, zve32f endfunc // a0 = (a0).(a1) [0..a2-1] -func ff_scalarproduct_float_rvv, zve32f +func ff_scalarproduct_float_rvv, zve32f, zba vsetvli t0, zero, e32, m8, ta, ma vmv.v.x v8, zero vmv.s.x v0, zero @@ -199,7 +199,7 @@ NOHWF fmv.x.w a0, fa0 endfunc // (a0) = (a1) * (a2) [0..a3-1] -func ff_vector_dmul_rvv, zve64d +func ff_vector_dmul_rvv, zve64d, zba 1: vsetvli t0, a3, e64, m8, ta, ma vle64.v v16, (a1) @@ -216,7 +216,7 @@ func ff_vector_dmul_rvv, zve64d endfunc // (a0) += (a1) * fa0 [0..a2-1] -func ff_vector_dmac_scalar_rvv, zve64d +func ff_vector_dmac_scalar_rvv, zve64d, zba NOHWD fmv.d.x fa0, a2 NOHWD mv a2, a3 1: @@ -234,7 +234,7 @@ NOHWD mv a2, a3 endfunc // (a0) = (a1) * fa0 [0..a2-1] -func ff_vector_dmul_scalar_rvv, zve64d +func ff_vector_dmul_scalar_rvv, zve64d, zba NOHWD fmv.d.x fa0, a2 NOHWD mv a2, a3 1: @@ -250,7 +250,7 @@ NOHWD mv a2, a3 ret endfunc -func ff_scalarproduct_double_rvv, zve64f +func ff_scalarproduct_double_rvv, zve64f, zba vsetvli t0, zero, e64, m8, ta, ma vmv.v.x v8, zero vmv.s.x v0, zero |