diff options
author | Mans Rullgard <mans@mansr.com> | 2012-05-10 17:40:30 +0100 |
---|---|---|
committer | Mans Rullgard <mans@mansr.com> | 2012-05-10 22:56:37 +0100 |
commit | e54e6f25cfa8bb91af5ef2cff7204c631913a5b5 (patch) | |
tree | 48d51c37b3667febf236933ec63dcbaa503e5103 /libavcodec/arm/dsputil_neon.S | |
parent | 2eba6898c96fe3c920da0a0e30b829d05bf5cce1 (diff) | |
download | ffmpeg-e54e6f25cfa8bb91af5ef2cff7204c631913a5b5.tar.gz |
arm/neon: dsputil: use correct size specifiers on vld1/vst1
Change the size specifiers to match the actual element sizes
of the data. This makes no practical difference with strict
alignment checking disabled (the default) other than somewhat
documenting the code. With strict alignment checking on, it
avoids trapping the unaligned loads.
Signed-off-by: Mans Rullgard <mans@mansr.com>
Diffstat (limited to 'libavcodec/arm/dsputil_neon.S')
-rw-r--r-- | libavcodec/arm/dsputil_neon.S | 294 |
1 files changed, 147 insertions, 147 deletions
diff --git a/libavcodec/arm/dsputil_neon.S b/libavcodec/arm/dsputil_neon.S index 21b1aba32b..b59c901ea6 100644 --- a/libavcodec/arm/dsputil_neon.S +++ b/libavcodec/arm/dsputil_neon.S @@ -44,22 +44,22 @@ endfunc .if \avg mov r12, r0 .endif -1: vld1.64 {q0}, [r1], r2 - vld1.64 {q1}, [r1], r2 - vld1.64 {q2}, [r1], r2 +1: vld1.8 {q0}, [r1], r2 + vld1.8 {q1}, [r1], r2 + vld1.8 {q2}, [r1], r2 pld [r1, r2, lsl #2] - vld1.64 {q3}, [r1], r2 + vld1.8 {q3}, [r1], r2 pld [r1] pld [r1, r2] pld [r1, r2, lsl #1] .if \avg - vld1.64 {q8}, [r12,:128], r2 + vld1.8 {q8}, [r12,:128], r2 vrhadd.u8 q0, q0, q8 - vld1.64 {q9}, [r12,:128], r2 + vld1.8 {q9}, [r12,:128], r2 vrhadd.u8 q1, q1, q9 - vld1.64 {q10}, [r12,:128], r2 + vld1.8 {q10}, [r12,:128], r2 vrhadd.u8 q2, q2, q10 - vld1.64 {q11}, [r12,:128], r2 + vld1.8 {q11}, [r12,:128], r2 vrhadd.u8 q3, q3, q11 .endif subs r3, r3, #4 @@ -72,8 +72,8 @@ endfunc .endm .macro pixels16_x2 rnd=1, avg=0 -1: vld1.64 {d0-d2}, [r1], r2 - vld1.64 {d4-d6}, [r1], r2 +1: vld1.8 {d0-d2}, [r1], r2 + vld1.8 {d4-d6}, [r1], r2 pld [r1] pld [r1, r2] subs r3, r3, #2 @@ -88,21 +88,21 @@ endfunc vrhadd.u8 q2, q2, q3 sub r0, r0, r2 .endif - vst1.64 {q0}, [r0,:128], r2 - vst1.64 {q2}, [r0,:128], r2 + vst1.8 {q0}, [r0,:128], r2 + vst1.8 {q2}, [r0,:128], r2 bne 1b bx lr .endm .macro pixels16_y2 rnd=1, avg=0 sub r3, r3, #2 - vld1.64 {q0}, [r1], r2 - vld1.64 {q1}, [r1], r2 + vld1.8 {q0}, [r1], r2 + vld1.8 {q1}, [r1], r2 1: subs r3, r3, #2 avg q2, q0, q1 - vld1.64 {q0}, [r1], r2 + vld1.8 {q0}, [r1], r2 avg q3, q0, q1 - vld1.64 {q1}, [r1], r2 + vld1.8 {q1}, [r1], r2 pld [r1] pld [r1, r2] .if \avg @@ -112,12 +112,12 @@ endfunc vrhadd.u8 q3, q3, q9 sub r0, r0, r2 .endif - vst1.64 {q2}, [r0,:128], r2 - vst1.64 {q3}, [r0,:128], r2 + vst1.8 {q2}, [r0,:128], r2 + vst1.8 {q3}, [r0,:128], r2 bne 1b avg q2, q0, q1 - vld1.64 {q0}, [r1], r2 + vld1.8 {q0}, [r1], r2 avg q3, q0, q1 .if \avg vld1.8 {q8}, [r0,:128], r2 @@ -126,16 +126,16 @@ endfunc vrhadd.u8 q3, q3, q9 sub r0, r0, r2 .endif - vst1.64 {q2}, [r0,:128], r2 - vst1.64 {q3}, [r0,:128], r2 + vst1.8 {q2}, [r0,:128], r2 + vst1.8 {q3}, [r0,:128], r2 bx lr .endm .macro pixels16_xy2 rnd=1, avg=0 sub r3, r3, #2 - vld1.64 {d0-d2}, [r1], r2 - vld1.64 {d4-d6}, [r1], r2 + vld1.8 {d0-d2}, [r1], r2 + vld1.8 {d4-d6}, [r1], r2 NRND vmov.i16 q13, #1 pld [r1] pld [r1, r2] @@ -146,7 +146,7 @@ NRND vmov.i16 q13, #1 vaddl.u8 q9, d4, d6 vaddl.u8 q11, d5, d7 1: subs r3, r3, #2 - vld1.64 {d0-d2}, [r1], r2 + vld1.8 {d0-d2}, [r1], r2 vadd.u16 q12, q8, q9 pld [r1] NRND vadd.u16 q12, q12, q13 @@ -160,9 +160,9 @@ NRND vadd.u16 q1, q1, q13 vrhadd.u8 q14, q14, q8 .endif vaddl.u8 q8, d0, d30 - vld1.64 {d2-d4}, [r1], r2 + vld1.8 {d2-d4}, [r1], r2 vaddl.u8 q10, d1, d31 - vst1.64 {q14}, [r0,:128], r2 + vst1.8 {q14}, [r0,:128], r2 vadd.u16 q12, q8, q9 pld [r1, r2] NRND vadd.u16 q12, q12, q13 @@ -177,10 +177,10 @@ NRND vadd.u16 q0, q0, q13 .endif vaddl.u8 q9, d2, d4 vaddl.u8 q11, d3, d5 - vst1.64 {q15}, [r0,:128], r2 + vst1.8 {q15}, [r0,:128], r2 bgt 1b - vld1.64 {d0-d2}, [r1], r2 + vld1.8 {d0-d2}, [r1], r2 vadd.u16 q12, q8, q9 NRND vadd.u16 q12, q12, q13 vext.8 q15, q0, q1, #1 @@ -194,7 +194,7 @@ NRND vadd.u16 q1, q1, q13 .endif vaddl.u8 q8, d0, d30 vaddl.u8 q10, d1, d31 - vst1.64 {q14}, [r0,:128], r2 + vst1.8 {q14}, [r0,:128], r2 vadd.u16 q12, q8, q9 NRND vadd.u16 q12, q12, q13 vadd.u16 q0, q10, q11 @@ -205,44 +205,44 @@ NRND vadd.u16 q0, q0, q13 vld1.8 {q9}, [r0,:128] vrhadd.u8 q15, q15, q9 .endif - vst1.64 {q15}, [r0,:128], r2 + vst1.8 {q15}, [r0,:128], r2 bx lr .endm .macro pixels8 rnd=1, avg=0 -1: vld1.64 {d0}, [r1], r2 - vld1.64 {d1}, [r1], r2 - vld1.64 {d2}, [r1], r2 +1: vld1.8 {d0}, [r1], r2 + vld1.8 {d1}, [r1], r2 + vld1.8 {d2}, [r1], r2 pld [r1, r2, lsl #2] - vld1.64 {d3}, [r1], r2 + vld1.8 {d3}, [r1], r2 pld [r1] pld [r1, r2] pld [r1, r2, lsl #1] .if \avg - vld1.64 {d4}, [r0,:64], r2 + vld1.8 {d4}, [r0,:64], r2 vrhadd.u8 d0, d0, d4 - vld1.64 {d5}, [r0,:64], r2 + vld1.8 {d5}, [r0,:64], r2 vrhadd.u8 d1, d1, d5 - vld1.64 {d6}, [r0,:64], r2 + vld1.8 {d6}, [r0,:64], r2 vrhadd.u8 d2, d2, d6 - vld1.64 {d7}, [r0,:64], r2 + vld1.8 {d7}, [r0,:64], r2 vrhadd.u8 d3, d3, d7 sub r0, r0, r2, lsl #2 .endif subs r3, r3, #4 - vst1.64 {d0}, [r0,:64], r2 - vst1.64 {d1}, [r0,:64], r2 - vst1.64 {d2}, [r0,:64], r2 - vst1.64 {d3}, [r0,:64], r2 + vst1.8 {d0}, [r0,:64], r2 + vst1.8 {d1}, [r0,:64], r2 + vst1.8 {d2}, [r0,:64], r2 + vst1.8 {d3}, [r0,:64], r2 bne 1b bx lr .endm .macro pixels8_x2 rnd=1, avg=0 -1: vld1.64 {q0}, [r1], r2 +1: vld1.8 {q0}, [r1], r2 vext.8 d1, d0, d1, #1 - vld1.64 {q1}, [r1], r2 + vld1.8 {q1}, [r1], r2 vext.8 d3, d2, d3, #1 pld [r1] pld [r1, r2] @@ -255,21 +255,21 @@ NRND vadd.u16 q0, q0, q13 vrhadd.u8 q0, q0, q2 sub r0, r0, r2 .endif - vst1.64 {d0}, [r0,:64], r2 - vst1.64 {d1}, [r0,:64], r2 + vst1.8 {d0}, [r0,:64], r2 + vst1.8 {d1}, [r0,:64], r2 bne 1b bx lr .endm .macro pixels8_y2 rnd=1, avg=0 sub r3, r3, #2 - vld1.64 {d0}, [r1], r2 - vld1.64 {d1}, [r1], r2 + vld1.8 {d0}, [r1], r2 + vld1.8 {d1}, [r1], r2 1: subs r3, r3, #2 avg d4, d0, d1 - vld1.64 {d0}, [r1], r2 + vld1.8 {d0}, [r1], r2 avg d5, d0, d1 - vld1.64 {d1}, [r1], r2 + vld1.8 {d1}, [r1], r2 pld [r1] pld [r1, r2] .if \avg @@ -278,12 +278,12 @@ NRND vadd.u16 q0, q0, q13 vrhadd.u8 q2, q2, q1 sub r0, r0, r2 .endif - vst1.64 {d4}, [r0,:64], r2 - vst1.64 {d5}, [r0,:64], r2 + vst1.8 {d4}, [r0,:64], r2 + vst1.8 {d5}, [r0,:64], r2 bne 1b avg d4, d0, d1 - vld1.64 {d0}, [r1], r2 + vld1.8 {d0}, [r1], r2 avg d5, d0, d1 .if \avg vld1.8 {d2}, [r0,:64], r2 @@ -291,16 +291,16 @@ NRND vadd.u16 q0, q0, q13 vrhadd.u8 q2, q2, q1 sub r0, r0, r2 .endif - vst1.64 {d4}, [r0,:64], r2 - vst1.64 {d5}, [r0,:64], r2 + vst1.8 {d4}, [r0,:64], r2 + vst1.8 {d5}, [r0,:64], r2 bx lr .endm .macro pixels8_xy2 rnd=1, avg=0 sub r3, r3, #2 - vld1.64 {q0}, [r1], r2 - vld1.64 {q1}, [r1], r2 + vld1.8 {q0}, [r1], r2 + vld1.8 {q1}, [r1], r2 NRND vmov.i16 q11, #1 pld [r1] pld [r1, r2] @@ -309,14 +309,14 @@ NRND vmov.i16 q11, #1 vaddl.u8 q8, d0, d4 vaddl.u8 q9, d2, d6 1: subs r3, r3, #2 - vld1.64 {q0}, [r1], r2 + vld1.8 {q0}, [r1], r2 pld [r1] vadd.u16 q10, q8, q9 vext.8 d4, d0, d1, #1 NRND vadd.u16 q10, q10, q11 vaddl.u8 q8, d0, d4 shrn d5, q10, #2 - vld1.64 {q1}, [r1], r2 + vld1.8 {q1}, [r1], r2 vadd.u16 q10, q8, q9 pld [r1, r2] .if \avg @@ -324,7 +324,7 @@ NRND vadd.u16 q10, q10, q11 vrhadd.u8 d5, d5, d7 .endif NRND vadd.u16 q10, q10, q11 - vst1.64 {d5}, [r0,:64], r2 + vst1.8 {d5}, [r0,:64], r2 shrn d7, q10, #2 .if \avg vld1.8 {d5}, [r0,:64] @@ -332,10 +332,10 @@ NRND vadd.u16 q10, q10, q11 .endif vext.8 d6, d2, d3, #1 vaddl.u8 q9, d2, d6 - vst1.64 {d7}, [r0,:64], r2 + vst1.8 {d7}, [r0,:64], r2 bgt 1b - vld1.64 {q0}, [r1], r2 + vld1.8 {q0}, [r1], r2 vadd.u16 q10, q8, q9 vext.8 d4, d0, d1, #1 NRND vadd.u16 q10, q10, q11 @@ -347,13 +347,13 @@ NRND vadd.u16 q10, q10, q11 vrhadd.u8 d5, d5, d7 .endif NRND vadd.u16 q10, q10, q11 - vst1.64 {d5}, [r0,:64], r2 + vst1.8 {d5}, [r0,:64], r2 shrn d7, q10, #2 .if \avg vld1.8 {d5}, [r0,:64] vrhadd.u8 d7, d7, d5 .endif - vst1.64 {d7}, [r0,:64], r2 + vst1.8 {d7}, [r0,:64], r2 bx lr .endm @@ -429,147 +429,147 @@ endfunc pixfunc2 avg_, pixels8_xy2, avg=1 function ff_put_pixels_clamped_neon, export=1 - vld1.64 {d16-d19}, [r0,:128]! + vld1.16 {d16-d19}, [r0,:128]! vqmovun.s16 d0, q8 - vld1.64 {d20-d23}, [r0,:128]! + vld1.16 {d20-d23}, [r0,:128]! vqmovun.s16 d1, q9 - vld1.64 {d24-d27}, [r0,:128]! + vld1.16 {d24-d27}, [r0,:128]! vqmovun.s16 d2, q10 - vld1.64 {d28-d31}, [r0,:128]! + vld1.16 {d28-d31}, [r0,:128]! vqmovun.s16 d3, q11 - vst1.64 {d0}, [r1,:64], r2 + vst1.8 {d0}, [r1,:64], r2 vqmovun.s16 d4, q12 - vst1.64 {d1}, [r1,:64], r2 + vst1.8 {d1}, [r1,:64], r2 vqmovun.s16 d5, q13 - vst1.64 {d2}, [r1,:64], r2 + vst1.8 {d2}, [r1,:64], r2 vqmovun.s16 d6, q14 - vst1.64 {d3}, [r1,:64], r2 + vst1.8 {d3}, [r1,:64], r2 vqmovun.s16 d7, q15 - vst1.64 {d4}, [r1,:64], r2 - vst1.64 {d5}, [r1,:64], r2 - vst1.64 {d6}, [r1,:64], r2 - vst1.64 {d7}, [r1,:64], r2 + vst1.8 {d4}, [r1,:64], r2 + vst1.8 {d5}, [r1,:64], r2 + vst1.8 {d6}, [r1,:64], r2 + vst1.8 {d7}, [r1,:64], r2 bx lr endfunc function ff_put_signed_pixels_clamped_neon, export=1 vmov.u8 d31, #128 - vld1.64 {d16-d17}, [r0,:128]! + vld1.16 {d16-d17}, [r0,:128]! vqmovn.s16 d0, q8 - vld1.64 {d18-d19}, [r0,:128]! + vld1.16 {d18-d19}, [r0,:128]! vqmovn.s16 d1, q9 - vld1.64 {d16-d17}, [r0,:128]! + vld1.16 {d16-d17}, [r0,:128]! vqmovn.s16 d2, q8 - vld1.64 {d18-d19}, [r0,:128]! + vld1.16 {d18-d19}, [r0,:128]! vadd.u8 d0, d0, d31 - vld1.64 {d20-d21}, [r0,:128]! + vld1.16 {d20-d21}, [r0,:128]! vadd.u8 d1, d1, d31 - vld1.64 {d22-d23}, [r0,:128]! + vld1.16 {d22-d23}, [r0,:128]! vadd.u8 d2, d2, d31 - vst1.64 {d0}, [r1,:64], r2 + vst1.8 {d0}, [r1,:64], r2 vqmovn.s16 d3, q9 - vst1.64 {d1}, [r1,:64], r2 + vst1.8 {d1}, [r1,:64], r2 vqmovn.s16 d4, q10 - vst1.64 {d2}, [r1,:64], r2 + vst1.8 {d2}, [r1,:64], r2 vqmovn.s16 d5, q11 - vld1.64 {d24-d25}, [r0,:128]! + vld1.16 {d24-d25}, [r0,:128]! vadd.u8 d3, d3, d31 - vld1.64 {d26-d27}, [r0,:128]! + vld1.16 {d26-d27}, [r0,:128]! vadd.u8 d4, d4, d31 vadd.u8 d5, d5, d31 - vst1.64 {d3}, [r1,:64], r2 + vst1.8 {d3}, [r1,:64], r2 vqmovn.s16 d6, q12 - vst1.64 {d4}, [r1,:64], r2 + vst1.8 {d4}, [r1,:64], r2 vqmovn.s16 d7, q13 - vst1.64 {d5}, [r1,:64], r2 + vst1.8 {d5}, [r1,:64], r2 vadd.u8 d6, d6, d31 vadd.u8 d7, d7, d31 - vst1.64 {d6}, [r1,:64], r2 - vst1.64 {d7}, [r1,:64], r2 + vst1.8 {d6}, [r1,:64], r2 + vst1.8 {d7}, [r1,:64], r2 bx lr endfunc function ff_add_pixels_clamped_neon, export=1 mov r3, r1 - vld1.64 {d16}, [r1,:64], r2 - vld1.64 {d0-d1}, [r0,:128]! + vld1.8 {d16}, [r1,:64], r2 + vld1.16 {d0-d1}, [r0,:128]! vaddw.u8 q0, q0, d16 - vld1.64 {d17}, [r1,:64], r2 - vld1.64 {d2-d3}, [r0,:128]! + vld1.8 {d17}, [r1,:64], r2 + vld1.16 {d2-d3}, [r0,:128]! vqmovun.s16 d0, q0 - vld1.64 {d18}, [r1,:64], r2 + vld1.8 {d18}, [r1,:64], r2 vaddw.u8 q1, q1, d17 - vld1.64 {d4-d5}, [r0,:128]! + vld1.16 {d4-d5}, [r0,:128]! vaddw.u8 q2, q2, d18 - vst1.64 {d0}, [r3,:64], r2 + vst1.8 {d0}, [r3,:64], r2 vqmovun.s16 d2, q1 - vld1.64 {d19}, [r1,:64], r2 - vld1.64 {d6-d7}, [r0,:128]! + vld1.8 {d19}, [r1,:64], r2 + vld1.16 {d6-d7}, [r0,:128]! vaddw.u8 q3, q3, d19 vqmovun.s16 d4, q2 - vst1.64 {d2}, [r3,:64], r2 - vld1.64 {d16}, [r1,:64], r2 + vst1.8 {d2}, [r3,:64], r2 + vld1.8 {d16}, [r1,:64], r2 vqmovun.s16 d6, q3 - vld1.64 {d0-d1}, [r0,:128]! + vld1.16 {d0-d1}, [r0,:128]! vaddw.u8 q0, q0, d16 - vst1.64 {d4}, [r3,:64], r2 - vld1.64 {d17}, [r1,:64], r2 - vld1.64 {d2-d3}, [r0,:128]! + vst1.8 {d4}, [r3,:64], r2 + vld1.8 {d17}, [r1,:64], r2 + vld1.16 {d2-d3}, [r0,:128]! vaddw.u8 q1, q1, d17 - vst1.64 {d6}, [r3,:64], r2 + vst1.8 {d6}, [r3,:64], r2 vqmovun.s16 d0, q0 - vld1.64 {d18}, [r1,:64], r2 - vld1.64 {d4-d5}, [r0,:128]! + vld1.8 {d18}, [r1,:64], r2 + vld1.16 {d4-d5}, [r0,:128]! vaddw.u8 q2, q2, d18 - vst1.64 {d0}, [r3,:64], r2 + vst1.8 {d0}, [r3,:64], r2 vqmovun.s16 d2, q1 - vld1.64 {d19}, [r1,:64], r2 + vld1.8 {d19}, [r1,:64], r2 vqmovun.s16 d4, q2 - vld1.64 {d6-d7}, [r0,:128]! + vld1.16 {d6-d7}, [r0,:128]! vaddw.u8 q3, q3, d19 - vst1.64 {d2}, [r3,:64], r2 + vst1.8 {d2}, [r3,:64], r2 vqmovun.s16 d6, q3 - vst1.64 {d4}, [r3,:64], r2 - vst1.64 {d6}, [r3,:64], r2 + vst1.8 {d4}, [r3,:64], r2 + vst1.8 {d6}, [r3,:64], r2 bx lr endfunc function ff_vector_fmul_neon, export=1 subs r3, r3, #8 - vld1.64 {d0-d3}, [r1,:128]! - vld1.64 {d4-d7}, [r2,:128]! + vld1.32 {d0-d3}, [r1,:128]! + vld1.32 {d4-d7}, [r2,:128]! vmul.f32 q8, q0, q2 vmul.f32 q9, q1, q3 beq 3f bics ip, r3, #15 beq 2f 1: subs ip, ip, #16 - vld1.64 {d0-d1}, [r1,:128]! - vld1.64 {d4-d5}, [r2,:128]! + vld1.32 {d0-d1}, [r1,:128]! + vld1.32 {d4-d5}, [r2,:128]! vmul.f32 q10, q0, q2 - vld1.64 {d2-d3}, [r1,:128]! - vld1.64 {d6-d7}, [r2,:128]! + vld1.32 {d2-d3}, [r1,:128]! + vld1.32 {d6-d7}, [r2,:128]! vmul.f32 q11, q1, q3 - vst1.64 {d16-d19},[r0,:128]! - vld1.64 {d0-d1}, [r1,:128]! - vld1.64 {d4-d5}, [r2,:128]! + vst1.32 {d16-d19},[r0,:128]! + vld1.32 {d0-d1}, [r1,:128]! + vld1.32 {d4-d5}, [r2,:128]! vmul.f32 q8, q0, q2 - vld1.64 {d2-d3}, [r1,:128]! - vld1.64 {d6-d7}, [r2,:128]! + vld1.32 {d2-d3}, [r1,:128]! + vld1.32 {d6-d7}, [r2,:128]! vmul.f32 q9, q1, q3 - vst1.64 {d20-d23},[r0,:128]! + vst1.32 {d20-d23},[r0,:128]! bne 1b ands r3, r3, #15 beq 3f -2: vld1.64 {d0-d1}, [r1,:128]! - vld1.64 {d4-d5}, [r2,:128]! - vst1.64 {d16-d17},[r0,:128]! +2: vld1.32 {d0-d1}, [r1,:128]! + vld1.32 {d4-d5}, [r2,:128]! + vst1.32 {d16-d17},[r0,:128]! vmul.f32 q8, q0, q2 - vld1.64 {d2-d3}, [r1,:128]! - vld1.64 {d6-d7}, [r2,:128]! - vst1.64 {d18-d19},[r0,:128]! + vld1.32 {d2-d3}, [r1,:128]! + vld1.32 {d6-d7}, [r2,:128]! + vst1.32 {d18-d19},[r0,:128]! vmul.f32 q9, q1, q3 -3: vst1.64 {d16-d19},[r0,:128]! +3: vst1.32 {d16-d19},[r0,:128]! bx lr endfunc @@ -582,10 +582,10 @@ function ff_vector_fmul_window_neon, export=1 add r4, r3, r5, lsl #3 add ip, r0, r5, lsl #3 mov r5, #-16 - vld1.64 {d0,d1}, [r1,:128]! - vld1.64 {d2,d3}, [r2,:128], r5 - vld1.64 {d4,d5}, [r3,:128]! - vld1.64 {d6,d7}, [r4,:128], r5 + vld1.32 {d0,d1}, [r1,:128]! + vld1.32 {d2,d3}, [r2,:128], r5 + vld1.32 {d4,d5}, [r3,:128]! + vld1.32 {d6,d7}, [r4,:128], r5 1: subs lr, lr, #4 vmul.f32 d22, d0, d4 vrev64.32 q3, q3 @@ -595,19 +595,19 @@ function ff_vector_fmul_window_neon, export=1 vmul.f32 d21, d1, d6 beq 2f vmla.f32 d22, d3, d7 - vld1.64 {d0,d1}, [r1,:128]! + vld1.32 {d0,d1}, [r1,:128]! vmla.f32 d23, d2, d6 - vld1.64 {d18,d19},[r2,:128], r5 + vld1.32 {d18,d19},[r2,:128], r5 vmls.f32 d20, d3, d4 - vld1.64 {d24,d25},[r3,:128]! + vld1.32 {d24,d25},[r3,:128]! vmls.f32 d21, d2, d5 - vld1.64 {d6,d7}, [r4,:128], r5 + vld1.32 {d6,d7}, [r4,:128], r5 vmov q1, q9 vrev64.32 q11, q11 vmov q2, q12 vswp d22, d23 - vst1.64 {d20,d21},[r0,:128]! - vst1.64 {d22,d23},[ip,:128], r5 + vst1.32 {d20,d21},[r0,:128]! + vst1.32 {d22,d23},[ip,:128], r5 b 1b 2: vmla.f32 d22, d3, d7 vmla.f32 d23, d2, d6 @@ -615,8 +615,8 @@ function ff_vector_fmul_window_neon, export=1 vmls.f32 d21, d2, d5 vrev64.32 q11, q11 vswp d22, d23 - vst1.64 {d20,d21},[r0,:128]! - vst1.64 {d22,d23},[ip,:128], r5 + vst1.32 {d20,d21},[r0,:128]! + vst1.32 {d22,d23},[ip,:128], r5 pop {r4,r5,pc} endfunc |