aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorRémi Denis-Courmont <remi@remlab.net>2024-06-02 19:54:48 +0300
committerRémi Denis-Courmont <remi@remlab.net>2024-06-07 17:53:05 +0300
commit04397a29deccf72575a7b02ed56eb37bef2934cc (patch)
treed5a27f5b56e35bb720769628055aadea59b52714
parent8d117024fe05dcf0f45693b47118e4c26e0940af (diff)
downloadffmpeg-04397a29deccf72575a7b02ed56eb37bef2934cc.tar.gz
lavc/vc1dsp: R-V V vc1_inv_trans_8x8
T-Head C908 (cycles): vc1dsp.vc1_inv_trans_8x8_c: 871.7 vc1dsp.vc1_inv_trans_8x8_rvv_i32: 286.7
-rw-r--r--libavcodec/riscv/vc1dsp_init.c2
-rw-r--r--libavcodec/riscv/vc1dsp_rvv.S110
2 files changed, 112 insertions, 0 deletions
diff --git a/libavcodec/riscv/vc1dsp_init.c b/libavcodec/riscv/vc1dsp_init.c
index e4838fb347..b8a1015ce5 100644
--- a/libavcodec/riscv/vc1dsp_init.c
+++ b/libavcodec/riscv/vc1dsp_init.c
@@ -26,6 +26,7 @@
#include "libavcodec/vc1.h"
void ff_vc1_inv_trans_8x8_dc_rvv(uint8_t *dest, ptrdiff_t stride, int16_t *block);
+void ff_vc1_inv_trans_8x8_rvv(int16_t block[64]);
void ff_vc1_inv_trans_4x8_dc_rvv(uint8_t *dest, ptrdiff_t stride, int16_t *block);
void ff_vc1_inv_trans_8x4_dc_rvv(uint8_t *dest, ptrdiff_t stride, int16_t *block);
void ff_vc1_inv_trans_4x4_dc_rvv(uint8_t *dest, ptrdiff_t stride, int16_t *block);
@@ -53,6 +54,7 @@ av_cold void ff_vc1dsp_init_riscv(VC1DSPContext *dsp)
# if HAVE_RVV
if (flags & AV_CPU_FLAG_RVV_I32) {
if (ff_rv_vlen_least(128)) {
+ dsp->vc1_inv_trans_8x8 = ff_vc1_inv_trans_8x8_rvv;
dsp->vc1_inv_trans_4x8_dc = ff_vc1_inv_trans_4x8_dc_rvv;
dsp->vc1_inv_trans_4x4_dc = ff_vc1_inv_trans_4x4_dc_rvv;
dsp->avg_vc1_mspel_pixels_tab[0][0] = ff_avg_pixels16x16_rvv;
diff --git a/libavcodec/riscv/vc1dsp_rvv.S b/libavcodec/riscv/vc1dsp_rvv.S
index 8b3a830a4a..e15783d113 100644
--- a/libavcodec/riscv/vc1dsp_rvv.S
+++ b/libavcodec/riscv/vc1dsp_rvv.S
@@ -113,6 +113,116 @@ func ff_vc1_inv_trans_4x4_dc_rvv, zve32x
ret
endfunc
+ .variant_cc ff_vc1_inv_trans_8_rvv
+func ff_vc1_inv_trans_8_rvv, zve32x
+ li t4, 12
+ vsll.vi v18, v6, 4
+ li t2, 6
+ vmul.vx v8, v0, t4
+ li t5, 15
+ vmul.vx v10, v4, t4
+ li t3, 9
+ # t[2..5] = [6 9 12 15]
+ vsll.vi v12, v2, 4
+ vmul.vx v14, v6, t2
+ vmul.vx v16, v2, t2
+ vadd.vv v26, v12, v14 # t3
+ vadd.vv v24, v8, v10 # t1
+ vsub.vv v25, v8, v10 # t2
+ vsub.vv v27, v16, v18 # t4
+ vadd.vv v28, v24, v26 # t5
+ vsub.vv v31, v24, v26 # t8
+ vadd.vv v29, v25, v27 # t6
+ vsub.vv v30, v25, v27 # t7
+ vsll.vi v20, v1, 4
+ vmul.vx v21, v3, t5
+ vmul.vx v22, v5, t3
+ vsll.vi v23, v7, 2
+ vadd.vv v20, v20, v21
+ vadd.vv v22, v22, v23
+ vsll.vi v21, v3, 2
+ vadd.vv v16, v20, v22 # t1
+ vmul.vx v20, v1, t5
+ vsll.vi v22, v5, 4
+ vmul.vx v23, v7, t3
+ vsub.vv v20, v20, v21
+ vadd.vv v22, v22, v23
+ vsll.vi v21, v3, 4
+ vsub.vv v17, v20, v22 # t2
+ vmul.vx v20, v1, t3
+ vsll.vi v22, v5, 2
+ vmul.vx v23, v7, t5
+ vsub.vv v20, v20, v21
+ vadd.vv v22, v22, v23
+ vmul.vx v21, v3, t3
+ vadd.vv v18, v20, v22 # t3
+ vsll.vi v20, v1, 2
+ vmul.vx v22, v5, t5
+ vsll.vi v23, v7, 4
+ vsub.vv v20, v20, v21
+ vsub.vv v22, v22, v23
+ vadd.vv v0, v28, v16
+ vadd.vv v19, v20, v22 # t4
+ vadd.vv v1, v29, v17
+ vadd.vv v2, v30, v18
+ vadd.vv v3, v31, v19
+ vsub.vv v4, v31, v19
+ vsub.vv v5, v30, v18
+ vsub.vv v6, v29, v17
+ vsub.vv v7, v28, v16
+ jr t0
+endfunc
+
+func ff_vc1_inv_trans_8x8_rvv, zve32x
+ csrwi vxrm, 0
+ vsetivli zero, 8, e16, m1, ta, ma
+ addi a1, a0, 1 * 8 * 2
+ vle16.v v0, (a0)
+ addi a2, a0, 2 * 8 * 2
+ vle16.v v1, (a1)
+ addi a3, a0, 3 * 8 * 2
+ vle16.v v2, (a2)
+ addi a4, a0, 4 * 8 * 2
+ vle16.v v3, (a3)
+ addi a5, a0, 5 * 8 * 2
+ vle16.v v4, (a4)
+ addi a6, a0, 6 * 8 * 2
+ vle16.v v5, (a5)
+ addi a7, a0, 7 * 8 * 2
+ vle16.v v6, (a6)
+ vle16.v v7, (a7)
+ jal t0, ff_vc1_inv_trans_8_rvv
+ .irp n,0,1,2,3,4,5,6,7
+ vssra.vi v\n, v\n, 3
+ .endr
+ vsseg8e16.v v0, (a0)
+ .irp n,0,1,2,3,4,5,6,7
+ vle16.v v\n, (a\n)
+ .endr
+ jal t0, ff_vc1_inv_trans_8_rvv
+ vadd.vi v4, v4, 1
+ vadd.vi v5, v5, 1
+ vssra.vi v4, v4, 7
+ vssra.vi v5, v5, 7
+ vse16.v v4, (a4)
+ vadd.vi v6, v6, 1
+ vse16.v v5, (a5)
+ vadd.vi v7, v7, 1
+ vssra.vi v6, v6, 7
+ vssra.vi v7, v7, 7
+ vse16.v v6, (a6)
+ vssra.vi v0, v0, 7
+ vse16.v v7, (a7)
+ vssra.vi v1, v1, 7
+ vse16.v v0, (a0)
+ vssra.vi v2, v2, 7
+ vse16.v v1, (a1)
+ vssra.vi v3, v3, 7
+ vse16.v v2, (a2)
+ vse16.v v3, (a3)
+ ret
+endfunc
+
.macro mspel_op op pos n1 n2
add t1, \pos, a2
v\op\()e8.v v\n1, (\pos)