diff options
author | Martin Storsjö <martin@martin.st> | 2017-01-04 12:57:56 +0200 |
---|---|---|
committer | Martin Storsjö <martin@martin.st> | 2017-02-11 00:31:58 +0200 |
commit | 3fcf788fbbccc4130868e7abe58a88990290f7c1 (patch) | |
tree | f960fae64e606885c673d300951b74a4289041eb /libavcodec/aarch64 | |
parent | a76bf8cf1277ef6feb1580b578f5e6ca327e713c (diff) | |
download | ffmpeg-3fcf788fbbccc4130868e7abe58a88990290f7c1.tar.gz |
aarch64: vp9itxfm: Optimize 16x16 and 32x32 idct dc by unrolling
This work is sponsored by, and copyright, Google.
Before: Cortex A53
vp9_inv_dct_dct_16x16_sub1_add_neon: 235.3
vp9_inv_dct_dct_32x32_sub1_add_neon: 555.1
After:
vp9_inv_dct_dct_16x16_sub1_add_neon: 180.2
vp9_inv_dct_dct_32x32_sub1_add_neon: 475.3
Signed-off-by: Martin Storsjö <martin@martin.st>
Diffstat (limited to 'libavcodec/aarch64')
-rw-r--r-- | libavcodec/aarch64/vp9itxfm_neon.S | 54 |
1 files changed, 36 insertions, 18 deletions
diff --git a/libavcodec/aarch64/vp9itxfm_neon.S b/libavcodec/aarch64/vp9itxfm_neon.S index a199e9c603..d35f103a79 100644 --- a/libavcodec/aarch64/vp9itxfm_neon.S +++ b/libavcodec/aarch64/vp9itxfm_neon.S @@ -495,16 +495,23 @@ function idct16x16_dc_add_neon srshr v2.8h, v2.8h, #6 + mov x3, x0 mov x4, #16 1: // Loop to add the constant from v2 into all 16x16 outputs - ld1 {v3.16b}, [x0] - uaddw v4.8h, v2.8h, v3.8b - uaddw2 v5.8h, v2.8h, v3.16b - sqxtun v4.8b, v4.8h - sqxtun2 v4.16b, v5.8h - st1 {v4.16b}, [x0], x1 - subs x4, x4, #1 + subs x4, x4, #2 + ld1 {v3.16b}, [x0], x1 + ld1 {v4.16b}, [x0], x1 + uaddw v16.8h, v2.8h, v3.8b + uaddw2 v17.8h, v2.8h, v3.16b + uaddw v18.8h, v2.8h, v4.8b + uaddw2 v19.8h, v2.8h, v4.16b + sqxtun v3.8b, v16.8h + sqxtun2 v3.16b, v17.8h + sqxtun v4.8b, v18.8h + sqxtun2 v4.16b, v19.8h + st1 {v3.16b}, [x3], x1 + st1 {v4.16b}, [x3], x1 b.ne 1b ret @@ -1054,20 +1061,31 @@ function idct32x32_dc_add_neon srshr v0.8h, v2.8h, #6 + mov x3, x0 mov x4, #32 1: // Loop to add the constant v0 into all 32x32 outputs - ld1 {v1.16b,v2.16b}, [x0] - uaddw v3.8h, v0.8h, v1.8b - uaddw2 v4.8h, v0.8h, v1.16b - uaddw v5.8h, v0.8h, v2.8b - uaddw2 v6.8h, v0.8h, v2.16b - sqxtun v3.8b, v3.8h - sqxtun2 v3.16b, v4.8h - sqxtun v4.8b, v5.8h - sqxtun2 v4.16b, v6.8h - st1 {v3.16b,v4.16b}, [x0], x1 - subs x4, x4, #1 + subs x4, x4, #2 + ld1 {v1.16b,v2.16b}, [x0], x1 + uaddw v16.8h, v0.8h, v1.8b + uaddw2 v17.8h, v0.8h, v1.16b + ld1 {v3.16b,v4.16b}, [x0], x1 + uaddw v18.8h, v0.8h, v2.8b + uaddw2 v19.8h, v0.8h, v2.16b + uaddw v20.8h, v0.8h, v3.8b + uaddw2 v21.8h, v0.8h, v3.16b + uaddw v22.8h, v0.8h, v4.8b + uaddw2 v23.8h, v0.8h, v4.16b + sqxtun v1.8b, v16.8h + sqxtun2 v1.16b, v17.8h + sqxtun v2.8b, v18.8h + sqxtun2 v2.16b, v19.8h + sqxtun v3.8b, v20.8h + sqxtun2 v3.16b, v21.8h + st1 {v1.16b,v2.16b}, [x3], x1 + sqxtun v4.8b, v22.8h + sqxtun2 v4.16b, v23.8h + st1 {v3.16b,v4.16b}, [x3], x1 b.ne 1b ret |