aboutsummaryrefslogtreecommitdiffstats
path: root/libavcodec/aarch64
diff options
context:
space:
mode:
authorLogan Lyu <Logan.Lyu@myais.com.cn>2023-10-26 09:17:36 +0800
committerMartin Storsjö <martin@martin.st>2023-10-31 14:02:53 +0200
commit265450b89e920c57fa35feff6d6d9953aec8f30e (patch)
tree3761359e840ee2e0712a02367cc865fac22180bc /libavcodec/aarch64
parent22c729150648f9f4b0d1e6f769d01bb4fb022016 (diff)
downloadffmpeg-265450b89e920c57fa35feff6d6d9953aec8f30e.tar.gz
lavc/aarch64: new optimization for 8-bit hevc_epel_hv
checkasm bench: put_hevc_epel_hv4_8_c: 213.7 put_hevc_epel_hv4_8_i8mm: 59.4 put_hevc_epel_hv6_8_c: 350.9 put_hevc_epel_hv6_8_i8mm: 130.2 put_hevc_epel_hv8_8_c: 548.7 put_hevc_epel_hv8_8_i8mm: 136.9 put_hevc_epel_hv12_8_c: 1126.7 put_hevc_epel_hv12_8_i8mm: 302.2 put_hevc_epel_hv16_8_c: 1925.2 put_hevc_epel_hv16_8_i8mm: 459.9 put_hevc_epel_hv24_8_c: 4301.9 put_hevc_epel_hv24_8_i8mm: 1024.9 put_hevc_epel_hv32_8_c: 7509.2 put_hevc_epel_hv32_8_i8mm: 1680.4 put_hevc_epel_hv48_8_c: 16566.9 put_hevc_epel_hv48_8_i8mm: 3945.4 put_hevc_epel_hv64_8_c: 29134.2 put_hevc_epel_hv64_8_i8mm: 6567.7 Co-Authored-By: J. Dekker <jdek@itanimul.li> Signed-off-by: Martin Storsjö <martin@martin.st>
Diffstat (limited to 'libavcodec/aarch64')
-rw-r--r--libavcodec/aarch64/hevcdsp_epel_neon.S256
-rw-r--r--libavcodec/aarch64/hevcdsp_init_aarch64.c5
2 files changed, 261 insertions, 0 deletions
diff --git a/libavcodec/aarch64/hevcdsp_epel_neon.S b/libavcodec/aarch64/hevcdsp_epel_neon.S
index 96ec58cdbc..a2a051210f 100644
--- a/libavcodec/aarch64/hevcdsp_epel_neon.S
+++ b/libavcodec/aarch64/hevcdsp_epel_neon.S
@@ -1018,6 +1018,262 @@ function ff_hevc_put_hevc_epel_h64_8_neon_i8mm, export=1
ret
endfunc
+
+function ff_hevc_put_hevc_epel_hv4_8_neon_i8mm, export=1
+ add w10, w3, #3
+ lsl x10, x10, #7
+ sub sp, sp, x10 // tmp_array
+ stp x5, x30, [sp, #-32]!
+ stp x0, x3, [sp, #16]
+ add x0, sp, #32
+ sub x1, x1, x2
+ add w3, w3, #3
+ bl X(ff_hevc_put_hevc_epel_h4_8_neon_i8mm)
+ ldp x0, x3, [sp, #16]
+ ldp x5, x30, [sp], #32
+ load_epel_filterh x5, x4
+ mov x10, #(MAX_PB_SIZE * 2)
+ ldr d16, [sp]
+ ldr d17, [sp, x10]
+ add sp, sp, x10, lsl #1
+ ld1 {v18.4h}, [sp], x10
+.macro calc src0, src1, src2, src3
+ ld1 {\src3\().4h}, [sp], x10
+ calc_epelh v4, \src0, \src1, \src2, \src3
+ subs w3, w3, #1
+ st1 {v4.4h}, [x0], x10
+.endm
+1: calc_all4
+.purgem calc
+2: ret
+endfunc
+
+function ff_hevc_put_hevc_epel_hv6_8_neon_i8mm, export=1
+ add w10, w3, #3
+ lsl x10, x10, #7
+ sub sp, sp, x10 // tmp_array
+ stp x5, x30, [sp, #-32]!
+ stp x0, x3, [sp, #16]
+ add x0, sp, #32
+ sub x1, x1, x2
+ add w3, w3, #3
+ bl X(ff_hevc_put_hevc_epel_h6_8_neon_i8mm)
+ ldp x0, x3, [sp, #16]
+ ldp x5, x30, [sp], #32
+ load_epel_filterh x5, x4
+ mov x5, #120
+ mov x10, #(MAX_PB_SIZE * 2)
+ ldr q16, [sp]
+ ldr q17, [sp, x10]
+ add sp, sp, x10, lsl #1
+ ld1 {v18.8h}, [sp], x10
+.macro calc src0, src1, src2, src3
+ ld1 {\src3\().8h}, [sp], x10
+ calc_epelh v4, \src0, \src1, \src2, \src3
+ calc_epelh2 v4, v5, \src0, \src1, \src2, \src3
+ st1 {v4.d}[0], [x0], #8
+ subs w3, w3, #1
+ st1 {v4.s}[2], [x0], x5
+.endm
+1: calc_all4
+.purgem calc
+2: ret
+endfunc
+
+function ff_hevc_put_hevc_epel_hv8_8_neon_i8mm, export=1
+ add w10, w3, #3
+ lsl x10, x10, #7
+ sub sp, sp, x10 // tmp_array
+ stp x5, x30, [sp, #-32]!
+ stp x0, x3, [sp, #16]
+ add x0, sp, #32
+ sub x1, x1, x2
+ add w3, w3, #3
+ bl X(ff_hevc_put_hevc_epel_h8_8_neon_i8mm)
+ ldp x0, x3, [sp, #16]
+ ldp x5, x30, [sp], #32
+ load_epel_filterh x5, x4
+ mov x10, #(MAX_PB_SIZE * 2)
+ ldr q16, [sp]
+ ldr q17, [sp, x10]
+ add sp, sp, x10, lsl #1
+ ld1 {v18.8h}, [sp], x10
+.macro calc src0, src1, src2, src3
+ ld1 {\src3\().8h}, [sp], x10
+ calc_epelh v4, \src0, \src1, \src2, \src3
+ calc_epelh2 v4, v5, \src0, \src1, \src2, \src3
+ subs w3, w3, #1
+ st1 {v4.8h}, [x0], x10
+.endm
+1: calc_all4
+.purgem calc
+2: ret
+endfunc
+
+function ff_hevc_put_hevc_epel_hv12_8_neon_i8mm, export=1
+ add w10, w3, #3
+ lsl x10, x10, #7
+ sub sp, sp, x10 // tmp_array
+ stp x5, x30, [sp, #-32]!
+ stp x0, x3, [sp, #16]
+ add x0, sp, #32
+ sub x1, x1, x2
+ add w3, w3, #3
+ bl X(ff_hevc_put_hevc_epel_h12_8_neon_i8mm)
+ ldp x0, x3, [sp, #16]
+ ldp x5, x30, [sp], #32
+ load_epel_filterh x5, x4
+ mov x5, #112
+ mov x10, #(MAX_PB_SIZE * 2)
+ ld1 {v16.8h, v17.8h}, [sp], x10
+ ld1 {v18.8h, v19.8h}, [sp], x10
+ ld1 {v20.8h, v21.8h}, [sp], x10
+.macro calc src0, src1, src2, src3, src4, src5, src6, src7
+ ld1 {\src6\().8h, \src7\().8h}, [sp], x10
+ calc_epelh v4, \src0, \src2, \src4, \src6
+ calc_epelh2 v4, v5, \src0, \src2, \src4, \src6
+ calc_epelh v5, \src1, \src3, \src5, \src7
+ st1 {v4.8h}, [x0], #16
+ subs w3, w3, #1
+ st1 {v5.4h}, [x0], x5
+.endm
+1: calc_all8
+.purgem calc
+2: ret
+endfunc
+
+function ff_hevc_put_hevc_epel_hv16_8_neon_i8mm, export=1
+ add w10, w3, #3
+ lsl x10, x10, #7
+ sub sp, sp, x10 // tmp_array
+ stp x5, x30, [sp, #-32]!
+ stp x0, x3, [sp, #16]
+ add x0, sp, #32
+ sub x1, x1, x2
+ add w3, w3, #3
+ bl X(ff_hevc_put_hevc_epel_h16_8_neon_i8mm)
+ ldp x0, x3, [sp, #16]
+ ldp x5, x30, [sp], #32
+ load_epel_filterh x5, x4
+ mov x10, #(MAX_PB_SIZE * 2)
+ ld1 {v16.8h, v17.8h}, [sp], x10
+ ld1 {v18.8h, v19.8h}, [sp], x10
+ ld1 {v20.8h, v21.8h}, [sp], x10
+.macro calc src0, src1, src2, src3, src4, src5, src6, src7
+ ld1 {\src6\().8h, \src7\().8h}, [sp], x10
+ calc_epelh v4, \src0, \src2, \src4, \src6
+ calc_epelh2 v4, v5, \src0, \src2, \src4, \src6
+ calc_epelh v5, \src1, \src3, \src5, \src7
+ calc_epelh2 v5, v6, \src1, \src3, \src5, \src7
+ subs w3, w3, #1
+ st1 {v4.8h, v5.8h}, [x0], x10
+.endm
+1: calc_all8
+.purgem calc
+2: ret
+endfunc
+
+function ff_hevc_put_hevc_epel_hv24_8_neon_i8mm, export=1
+ add w10, w3, #3
+ lsl x10, x10, #7
+ sub sp, sp, x10 // tmp_array
+ stp x5, x30, [sp, #-32]!
+ stp x0, x3, [sp, #16]
+ add x0, sp, #32
+ sub x1, x1, x2
+ add w3, w3, #3
+ bl X(ff_hevc_put_hevc_epel_h24_8_neon_i8mm)
+ ldp x0, x3, [sp, #16]
+ ldp x5, x30, [sp], #32
+ load_epel_filterh x5, x4
+ mov x10, #(MAX_PB_SIZE * 2)
+ ld1 {v16.8h, v17.8h, v18.8h}, [sp], x10
+ ld1 {v19.8h, v20.8h, v21.8h}, [sp], x10
+ ld1 {v22.8h, v23.8h, v24.8h}, [sp], x10
+.macro calc src0, src1, src2, src3, src4, src5, src6, src7, src8, src9, src10, src11
+ ld1 {\src9\().8h-\src11\().8h}, [sp], x10
+ calc_epelh v4, \src0, \src3, \src6, \src9
+ calc_epelh2 v4, v5, \src0, \src3, \src6, \src9
+ calc_epelh v5, \src1, \src4, \src7, \src10
+ calc_epelh2 v5, v6, \src1, \src4, \src7, \src10
+ calc_epelh v6, \src2, \src5, \src8, \src11
+ calc_epelh2 v6, v7, \src2, \src5, \src8, \src11
+ subs w3, w3, #1
+ st1 {v4.8h-v6.8h}, [x0], x10
+.endm
+1: calc_all12
+.purgem calc
+2: ret
+endfunc
+
+function ff_hevc_put_hevc_epel_hv32_8_neon_i8mm, export=1
+ stp x4, x5, [sp, #-64]!
+ stp x2, x3, [sp, #16]
+ stp x0, x1, [sp, #32]
+ str x30, [sp, #48]
+ mov x6, #16
+ bl X(ff_hevc_put_hevc_epel_hv16_8_neon_i8mm)
+ ldp x0, x1, [sp, #32]
+ ldp x2, x3, [sp, #16]
+ ldp x4, x5, [sp], #48
+ add x0, x0, #32
+ add x1, x1, #16
+ mov x6, #16
+ bl X(ff_hevc_put_hevc_epel_hv16_8_neon_i8mm)
+ ldr x30, [sp], #16
+ ret
+endfunc
+
+function ff_hevc_put_hevc_epel_hv48_8_neon_i8mm, export=1
+ stp x4, x5, [sp, #-64]!
+ stp x2, x3, [sp, #16]
+ stp x0, x1, [sp, #32]
+ str x30, [sp, #48]
+ mov x6, #24
+ bl X(ff_hevc_put_hevc_epel_hv24_8_neon_i8mm)
+ ldp x0, x1, [sp, #32]
+ ldp x2, x3, [sp, #16]
+ ldp x4, x5, [sp], #48
+ add x0, x0, #48
+ add x1, x1, #24
+ mov x6, #24
+ bl X(ff_hevc_put_hevc_epel_hv24_8_neon_i8mm)
+ ldr x30, [sp], #16
+ ret
+endfunc
+
+function ff_hevc_put_hevc_epel_hv64_8_neon_i8mm, export=1
+ stp x4, x5, [sp, #-64]!
+ stp x2, x3, [sp, #16]
+ stp x0, x1, [sp, #32]
+ str x30, [sp, #48]
+ mov x6, #16
+ bl X(ff_hevc_put_hevc_epel_hv16_8_neon_i8mm)
+ ldp x4, x5, [sp]
+ ldp x2, x3, [sp, #16]
+ ldp x0, x1, [sp, #32]
+ add x0, x0, #32
+ add x1, x1, #16
+ mov x6, #16
+ bl X(ff_hevc_put_hevc_epel_hv16_8_neon_i8mm)
+ ldp x4, x5, [sp]
+ ldp x2, x3, [sp, #16]
+ ldp x0, x1, [sp, #32]
+ add x0, x0, #64
+ add x1, x1, #32
+ mov x6, #16
+ bl X(ff_hevc_put_hevc_epel_hv16_8_neon_i8mm)
+ ldp x0, x1, [sp, #32]
+ ldp x2, x3, [sp, #16]
+ ldp x4, x5, [sp], #48
+ add x0, x0, #96
+ add x1, x1, #48
+ mov x6, #16
+ bl X(ff_hevc_put_hevc_epel_hv16_8_neon_i8mm)
+ ldr x30, [sp], #16
+ ret
+endfunc
+
function ff_hevc_put_hevc_epel_uni_hv4_8_neon_i8mm, export=1
add w10, w4, #3
lsl x10, x10, #7
diff --git a/libavcodec/aarch64/hevcdsp_init_aarch64.c b/libavcodec/aarch64/hevcdsp_init_aarch64.c
index 42aa76ddde..e54d8d7b1e 100644
--- a/libavcodec/aarch64/hevcdsp_init_aarch64.c
+++ b/libavcodec/aarch64/hevcdsp_init_aarch64.c
@@ -191,6 +191,10 @@ NEON8_FNPROTO(epel_h, (int16_t *dst,
const uint8_t *_src, ptrdiff_t _srcstride,
int height, intptr_t mx, intptr_t my, int width), _i8mm);
+NEON8_FNPROTO(epel_hv, (int16_t *dst,
+ const uint8_t *src, ptrdiff_t srcstride,
+ int height, intptr_t mx, intptr_t my, int width), _i8mm);
+
NEON8_FNPROTO(epel_uni_w_h, (uint8_t *_dst, ptrdiff_t _dststride,
const uint8_t *_src, ptrdiff_t _srcstride,
int height, int denom, int wx, int ox,
@@ -322,6 +326,7 @@ av_cold void ff_hevc_dsp_init_aarch64(HEVCDSPContext *c, const int bit_depth)
if (have_i8mm(cpu_flags)) {
NEON8_FNASSIGN(c->put_hevc_epel, 0, 1, epel_h, _i8mm);
+ NEON8_FNASSIGN(c->put_hevc_epel, 1, 1, epel_hv, _i8mm);
NEON8_FNASSIGN(c->put_hevc_epel_uni, 1, 1, epel_uni_hv, _i8mm);
NEON8_FNASSIGN(c->put_hevc_epel_uni_w, 0, 1, epel_uni_w_h ,_i8mm);
NEON8_FNASSIGN(c->put_hevc_qpel, 0, 1, qpel_h, _i8mm);