diff options
author | Sebastian Pop <spop@amazon.com> | 2019-11-17 14:13:13 -0600 |
---|---|---|
committer | Michael Niedermayer <michael@niedermayer.cc> | 2019-12-17 23:41:47 +0100 |
commit | bd831912712e32b8e78d409bfa7ea7e668ce4b42 (patch) | |
tree | 9c83daba41b45fcb56f0cd013d2600fbfab39785 | |
parent | e43d66dc67186a2ca9fefec4e6c189116a3029ba (diff) | |
download | ffmpeg-bd831912712e32b8e78d409bfa7ea7e668ce4b42.tar.gz |
swscale/aarch64: use multiply accumulate and increase vector factor to 4
This patch implements ff_hscale_8_to_15_neon with NEON fused multiply accumulate
and bumps the vectorization factor from 2 to 4.
The speedup is of 25% on Graviton1 A1 instances based on A-72 cpus:
$ ffmpeg -nostats -f lavfi -i testsrc2=4k:d=2 -vf bench=start,scale=1024x1024,bench=stop -f null -
before: t:0.040303 avg:0.040287 max:0.040371 min:0.039214
after: t:0.032168 avg:0.032215 max:0.033081 min:0.032146
The speedup is of 39% on Graviton2 m6g instances based on Neoverse-N1 cpus:
$ ffmpeg -nostats -f lavfi -i testsrc2=4k:d=2 -vf bench=start,scale=1024x1024,bench=stop -f null -
before: t:0.019446 avg:0.019423 max:0.019493 min:0.019181
after: t:0.014015 avg:0.014096 max:0.015018 min:0.013971
Tested with `make check` on aarch64-linux.
Signed-off-by: Sebastian Pop <spop@amazon.com>
Reviewed-by: Jean-Baptiste Kempf <jb@videolan.org>
Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
-rw-r--r-- | libswscale/aarch64/hscale.S | 85 |
1 files changed, 53 insertions, 32 deletions
diff --git a/libswscale/aarch64/hscale.S b/libswscale/aarch64/hscale.S index cc78c1901d..8743183b51 100644 --- a/libswscale/aarch64/hscale.S +++ b/libswscale/aarch64/hscale.S @@ -21,39 +21,60 @@ #include "libavutil/aarch64/asm.S" function ff_hscale_8_to_15_neon, export=1 - add x10, x4, w6, UXTW #1 // filter2 = filter + filterSize*2 (x2 because int16) -1: ldr w8, [x5], #4 // filterPos[0] - ldr w9, [x5], #4 // filterPos[1] - movi v4.4S, #0 // val sum part 1 (for dst[0]) - movi v5.4S, #0 // val sum part 2 (for dst[1]) - mov w7, w6 // filterSize counter - mov x13, x3 // srcp = src -2: add x11, x13, w8, UXTW // srcp + filterPos[0] - add x12, x13, w9, UXTW // srcp + filterPos[1] - ld1 {v0.8B}, [x11] // srcp[filterPos[0] + {0..7}] - ld1 {v1.8B}, [x12] // srcp[filterPos[1] + {0..7}] - ld1 {v2.8H}, [x4], #16 // load 8x16-bit filter values, part 1 - ld1 {v3.8H}, [x10], #16 // ditto at filter+filterSize for part 2 - uxtl v0.8H, v0.8B // unpack part 1 to 16-bit - uxtl v1.8H, v1.8B // unpack part 2 to 16-bit - smull v16.4S, v0.4H, v2.4H // v16.i32{0..3} = part 1 of: srcp[filterPos[0] + {0..7}] * filter[{0..7}] - smull v18.4S, v1.4H, v3.4H // v18.i32{0..3} = part 1 of: srcp[filterPos[1] + {0..7}] * filter[{0..7}] - smull2 v17.4S, v0.8H, v2.8H // v17.i32{0..3} = part 2 of: srcp[filterPos[0] + {0..7}] * filter[{0..7}] - smull2 v19.4S, v1.8H, v3.8H // v19.i32{0..3} = part 2 of: srcp[filterPos[1] + {0..7}] * filter[{0..7}] - addp v16.4S, v16.4S, v17.4S // horizontal pair adding of the 8x32-bit multiplied values for part 1 into 4x32-bit - addp v18.4S, v18.4S, v19.4S // horizontal pair adding of the 8x32-bit multiplied values for part 2 into 4x32-bit - add v4.4S, v4.4S, v16.4S // update val accumulator for part 1 - add v5.4S, v5.4S, v18.4S // update val accumulator for part 2 - add x13, x13, #8 // srcp += 8 - subs w7, w7, #8 // processed 8/filterSize + sbfiz x7, x6, #1, #32 // filterSize*2 (*2 because int16) +1: ldr w18, [x5], #4 // filterPos[idx] + ldr w0, [x5], #4 // filterPos[idx + 1] + ldr w11, [x5], #4 // filterPos[idx + 2] + ldr w9, [x5], #4 // filterPos[idx + 3] + mov x16, x4 // filter0 = filter + add x12, x16, x7 // filter1 = filter0 + filterSize*2 + add x13, x12, x7 // filter2 = filter1 + filterSize*2 + add x4, x13, x7 // filter3 = filter2 + filterSize*2 + movi v0.2D, #0 // val sum part 1 (for dst[0]) + movi v1.2D, #0 // val sum part 2 (for dst[1]) + movi v2.2D, #0 // val sum part 3 (for dst[2]) + movi v3.2D, #0 // val sum part 4 (for dst[3]) + add x17, x3, w18, UXTW // srcp + filterPos[0] + add x18, x3, w0, UXTW // srcp + filterPos[1] + add x0, x3, w11, UXTW // srcp + filterPos[2] + add x11, x3, w9, UXTW // srcp + filterPos[3] + mov w15, w6 // filterSize counter +2: ld1 {v4.8B}, [x17], #8 // srcp[filterPos[0] + {0..7}] + ld1 {v5.8H}, [x16], #16 // load 8x16-bit filter values, part 1 + ld1 {v6.8B}, [x18], #8 // srcp[filterPos[1] + {0..7}] + ld1 {v7.8H}, [x12], #16 // load 8x16-bit at filter+filterSize + uxtl v4.8H, v4.8B // unpack part 1 to 16-bit + smlal v0.4S, v4.4H, v5.4H // v0 accumulates srcp[filterPos[0] + {0..3}] * filter[{0..3}] + smlal2 v0.4S, v4.8H, v5.8H // v0 accumulates srcp[filterPos[0] + {4..7}] * filter[{4..7}] + ld1 {v8.8B}, [x0], #8 // srcp[filterPos[2] + {0..7}] + ld1 {v9.8H}, [x13], #16 // load 8x16-bit at filter+2*filterSize + uxtl v6.8H, v6.8B // unpack part 2 to 16-bit + smlal v1.4S, v6.4H, v7.4H // v1 accumulates srcp[filterPos[1] + {0..3}] * filter[{0..3}] + uxtl v8.8H, v8.8B // unpack part 3 to 16-bit + smlal v2.4S, v8.4H, v9.4H // v2 accumulates srcp[filterPos[2] + {0..3}] * filter[{0..3}] + smlal2 v2.4S, V8.8H, v9.8H // v2 accumulates srcp[filterPos[2] + {4..7}] * filter[{4..7}] + ld1 {v10.8B}, [x11], #8 // srcp[filterPos[3] + {0..7}] + smlal2 v1.4S, v6.8H, v7.8H // v1 accumulates srcp[filterPos[1] + {4..7}] * filter[{4..7}] + ld1 {v11.8H}, [x4], #16 // load 8x16-bit at filter+3*filterSize + subs w15, w15, #8 // j -= 8: processed 8/filterSize + uxtl v10.8H, v10.8B // unpack part 4 to 16-bit + smlal v3.4S, v10.4H, v11.4H // v3 accumulates srcp[filterPos[3] + {0..3}] * filter[{0..3}] + smlal2 v3.4S, v10.8H, v11.8H // v3 accumulates srcp[filterPos[3] + {4..7}] * filter[{4..7}] b.gt 2b // inner loop if filterSize not consumed completely - mov x4, x10 // filter = filter2 - add x10, x10, w6, UXTW #1 // filter2 += filterSize*2 - addp v4.4S, v4.4S, v5.4S // horizontal pair adding of the 8x32-bit sums into 4x32-bit - addp v4.4S, v4.4S, v4.4S // horizontal pair adding of the 4x32-bit sums into 2x32-bit - sqshrn v4.4H, v4.4S, #7 // shift and clip the 2x16-bit final values - st1 {v4.S}[0], [x1], #4 // write to destination - subs w2, w2, #2 // dstW -= 2 + addp v0.4S, v0.4S, v0.4S // part0 horizontal pair adding + addp v1.4S, v1.4S, v1.4S // part1 horizontal pair adding + addp v2.4S, v2.4S, v2.4S // part2 horizontal pair adding + addp v3.4S, v3.4S, v3.4S // part3 horizontal pair adding + addp v0.4S, v0.4S, v0.4S // part0 horizontal pair adding + addp v1.4S, v1.4S, v1.4S // part1 horizontal pair adding + addp v2.4S, v2.4S, v2.4S // part2 horizontal pair adding + addp v3.4S, v3.4S, v3.4S // part3 horizontal pair adding + zip1 v0.4S, v0.4S, v1.4S // part01 = zip values from part0 and part1 + zip1 v2.4S, v2.4S, v3.4S // part23 = zip values from part2 and part3 + mov v0.d[1], v2.d[0] // part0123 = zip values from part01 and part23 + subs w2, w2, #4 // dstW -= 4 + sqshrn v0.4H, v0.4S, #7 // shift and clip the 2x16-bit final values + st1 {v0.4H}, [x1], #8 // write to destination part0123 b.gt 1b // loop until end of line ret endfunc |