aboutsummaryrefslogtreecommitdiffstats
path: root/libavcodec/riscv
diff options
context:
space:
mode:
authorRémi Denis-Courmont <remi@remlab.net>2023-07-15 23:23:50 +0300
committerRémi Denis-Courmont <remi@remlab.net>2023-11-12 14:03:09 +0200
commiteb508702a899f4a41d3653d90b077b487083d2ab (patch)
treeb52ded1270009f9851301c4691e752ea7ce1d0be /libavcodec/riscv
parentab78d22553fe7052e0763f9bb319b0ba9c3bca47 (diff)
downloadffmpeg-eb508702a899f4a41d3653d90b077b487083d2ab.tar.gz
lavc/aacpsdsp: rework R-V V add_squares
Segmented loads may be slower than not. So this advantageously uses a unit-strided load and narrowing shifts instead. Before: ps_add_squares_c: 60757.7 ps_add_squares_rvv_f32: 22242.5 After: ps_add_squares_c: 60516.0 ps_add_squares_rvv_i64: 17067.7
Diffstat (limited to 'libavcodec/riscv')
-rw-r--r--libavcodec/riscv/aacpsdsp_init.c3
-rw-r--r--libavcodec/riscv/aacpsdsp_rvv.S9
2 files changed, 8 insertions, 4 deletions
diff --git a/libavcodec/riscv/aacpsdsp_init.c b/libavcodec/riscv/aacpsdsp_init.c
index c5ec796232..f72d1bc330 100644
--- a/libavcodec/riscv/aacpsdsp_init.c
+++ b/libavcodec/riscv/aacpsdsp_init.c
@@ -46,7 +46,8 @@ av_cold void ff_psdsp_init_riscv(PSDSPContext *c)
c->hybrid_analysis = ff_ps_hybrid_analysis_rvv;
if (flags & AV_CPU_FLAG_RVB_ADDR) {
- c->add_squares = ff_ps_add_squares_rvv;
+ if (flags & AV_CPU_FLAG_RVV_I64)
+ c->add_squares = ff_ps_add_squares_rvv;
c->mul_pair_single = ff_ps_mul_pair_single_rvv;
c->stereo_interpolate[0] = ff_ps_stereo_interpolate_rvv;
}
diff --git a/libavcodec/riscv/aacpsdsp_rvv.S b/libavcodec/riscv/aacpsdsp_rvv.S
index fe250cd83b..cf872599c8 100644
--- a/libavcodec/riscv/aacpsdsp_rvv.S
+++ b/libavcodec/riscv/aacpsdsp_rvv.S
@@ -1,5 +1,5 @@
/*
- * Copyright © 2022 Rémi Denis-Courmont.
+ * Copyright © 2022-2023 Rémi Denis-Courmont.
*
* This file is part of FFmpeg.
*
@@ -20,13 +20,16 @@
#include "libavutil/riscv/asm.S"
-func ff_ps_add_squares_rvv, zve32f
+func ff_ps_add_squares_rvv, zve64f
+ li t1, 32
1:
vsetvli t0, a2, e32, m4, ta, ma
- vlseg2e32.v v24, (a1)
+ vle64.v v8, (a1)
sub a2, a2, t0
+ vnsrl.wx v24, v8, zero
vle32.v v16, (a0)
sh3add a1, t0, a1
+ vnsrl.wx v28, v8, t1
vfmacc.vv v16, v24, v24
vfmacc.vv v16, v28, v28
vse32.v v16, (a0)