diff options
author | Michael Niedermayer <michaelni@gmx.at> | 2013-07-22 11:55:20 +0200 |
---|---|---|
committer | Michael Niedermayer <michaelni@gmx.at> | 2013-07-22 11:55:40 +0200 |
commit | 3cab2280058c582102346a50a8a503216055bc2d (patch) | |
tree | e05718f7eb214b3d4ce7c6c99a9af822945f9e30 | |
parent | 1573274b914ed67ed6a71a02eddd4e874f1c54bd (diff) | |
parent | d6e4f5fef0d811e180fd7541941e07dca9e11dc0 (diff) | |
download | ffmpeg-3cab2280058c582102346a50a8a503216055bc2d.tar.gz |
Merge commit 'd6e4f5fef0d811e180fd7541941e07dca9e11dc0'
* commit 'd6e4f5fef0d811e180fd7541941e07dca9e11dc0':
arm: Add VFP-accelerated version of int32_to_float_fmul_array8
Merged-by: Michael Niedermayer <michaelni@gmx.at>
-rw-r--r-- | libavcodec/arm/fmtconvert_init_arm.c | 6 | ||||
-rw-r--r-- | libavcodec/arm/fmtconvert_vfp.S | 162 |
2 files changed, 167 insertions, 1 deletions
diff --git a/libavcodec/arm/fmtconvert_init_arm.c b/libavcodec/arm/fmtconvert_init_arm.c index a8ca276f7b..185f1b99dd 100644 --- a/libavcodec/arm/fmtconvert_init_arm.c +++ b/libavcodec/arm/fmtconvert_init_arm.c @@ -30,6 +30,9 @@ void ff_int32_to_float_fmul_scalar_neon(float *dst, const int32_t *src, void ff_int32_to_float_fmul_scalar_vfp(float *dst, const int32_t *src, float mul, int len); +void ff_int32_to_float_fmul_array8_vfp(FmtConvertContext *c, float *dst, + const int32_t *src, const float *mul, + int len); void ff_float_to_int16_neon(int16_t *dst, const float *src, long len); void ff_float_to_int16_interleave_neon(int16_t *, const float **, long, int); @@ -42,10 +45,11 @@ av_cold void ff_fmt_convert_init_arm(FmtConvertContext *c, AVCodecContext *avctx if (have_vfp(cpu_flags) && have_armv6(cpu_flags)) { if (!have_vfpv3(cpu_flags)) { - // This function doesn't use anything armv6 specific in itself, + // These functions don't use anything armv6 specific in themselves, // but ff_float_to_int16_vfp which is in the same assembly source // file does, thus the whole file requires armv6 to be built. c->int32_to_float_fmul_scalar = ff_int32_to_float_fmul_scalar_vfp; + c->int32_to_float_fmul_array8 = ff_int32_to_float_fmul_array8_vfp; } c->float_to_int16 = ff_float_to_int16_vfp; diff --git a/libavcodec/arm/fmtconvert_vfp.S b/libavcodec/arm/fmtconvert_vfp.S index 3cc3e56d0c..a6d4ebdbbc 100644 --- a/libavcodec/arm/fmtconvert_vfp.S +++ b/libavcodec/arm/fmtconvert_vfp.S @@ -83,6 +83,168 @@ endfunc * Assume len is a multiple of 8, destination buffer is at least 4 bytes aligned * (16 bytes alignment is best for BCM2835), little-endian. */ +@ void ff_int32_to_float_fmul_array8_vfp(FmtConvertContext *c, float *dst, const int32_t *src, const float *mul, int len) +function ff_int32_to_float_fmul_array8_vfp, export=1 + push {lr} + ldr a1, [sp, #4] + subs lr, a1, #3*8 + bcc 50f @ too short to pipeline + @ Now need to find (len / 8) % 3. The approximation + @ x / 24 = (x * 0xAB) >> 12 + @ is good for x < 4096, which is true for both AC3 and DCA. + mov a1, #0xAB + ldr ip, =0x03070000 @ RunFast mode, short vectors of length 8, stride 1 + mul a1, lr, a1 + vpush {s16-s31} + mov a1, a1, lsr #12 + add a1, a1, a1, lsl #1 + rsb a1, a1, lr, lsr #3 + cmp a1, #1 + fmrx a1, FPSCR + fmxr FPSCR, ip + beq 11f + blo 10f + @ Array is (2 + multiple of 3) x 8 floats long + @ drop through... + vldmia a3!, {s16-s23} + vldmia a4!, {s2,s3} + vldmia a3!, {s24-s31} + vcvt.f32.s32 s16, s16 + vcvt.f32.s32 s17, s17 + vcvt.f32.s32 s18, s18 + vcvt.f32.s32 s19, s19 + vcvt.f32.s32 s20, s20 + vcvt.f32.s32 s21, s21 + vcvt.f32.s32 s22, s22 + vcvt.f32.s32 s23, s23 + vmul.f32 s16, s16, s2 + @ drop through... +3: + vldmia a3!, {s8-s15} + vldmia a4!, {s1} + vcvt.f32.s32 s24, s24 + vcvt.f32.s32 s25, s25 + vcvt.f32.s32 s26, s26 + vcvt.f32.s32 s27, s27 + vcvt.f32.s32 s28, s28 + vcvt.f32.s32 s29, s29 + vcvt.f32.s32 s30, s30 + vcvt.f32.s32 s31, s31 + vmul.f32 s24, s24, s3 + vstmia a2!, {s16-s19} + vstmia a2!, {s20-s23} +2: + vldmia a3!, {s16-s23} + vldmia a4!, {s2} + vcvt.f32.s32 s8, s8 + vcvt.f32.s32 s9, s9 + vcvt.f32.s32 s10, s10 + vcvt.f32.s32 s11, s11 + vcvt.f32.s32 s12, s12 + vcvt.f32.s32 s13, s13 + vcvt.f32.s32 s14, s14 + vcvt.f32.s32 s15, s15 + vmul.f32 s8, s8, s1 + vstmia a2!, {s24-s27} + vstmia a2!, {s28-s31} +1: + vldmia a3!, {s24-s31} + vldmia a4!, {s3} + vcvt.f32.s32 s16, s16 + vcvt.f32.s32 s17, s17 + vcvt.f32.s32 s18, s18 + vcvt.f32.s32 s19, s19 + vcvt.f32.s32 s20, s20 + vcvt.f32.s32 s21, s21 + vcvt.f32.s32 s22, s22 + vcvt.f32.s32 s23, s23 + vmul.f32 s16, s16, s2 + vstmia a2!, {s8-s11} + vstmia a2!, {s12-s15} + + subs lr, lr, #8*3 + bpl 3b + + vcvt.f32.s32 s24, s24 + vcvt.f32.s32 s25, s25 + vcvt.f32.s32 s26, s26 + vcvt.f32.s32 s27, s27 + vcvt.f32.s32 s28, s28 + vcvt.f32.s32 s29, s29 + vcvt.f32.s32 s30, s30 + vcvt.f32.s32 s31, s31 + vmul.f32 s24, s24, s3 + vstmia a2!, {s16-s19} + vstmia a2!, {s20-s23} + vstmia a2!, {s24-s27} + vstmia a2!, {s28-s31} + + fmxr FPSCR, a1 + vpop {s16-s31} + pop {pc} + +10: @ Array is (multiple of 3) x 8 floats long + vldmia a3!, {s8-s15} + vldmia a4!, {s1,s2} + vldmia a3!, {s16-s23} + vcvt.f32.s32 s8, s8 + vcvt.f32.s32 s9, s9 + vcvt.f32.s32 s10, s10 + vcvt.f32.s32 s11, s11 + vcvt.f32.s32 s12, s12 + vcvt.f32.s32 s13, s13 + vcvt.f32.s32 s14, s14 + vcvt.f32.s32 s15, s15 + vmul.f32 s8, s8, s1 + b 1b + +11: @ Array is (1 + multiple of 3) x 8 floats long + vldmia a3!, {s24-s31} + vldmia a4!, {s3} + vldmia a3!, {s8-s15} + vldmia a4!, {s1} + vcvt.f32.s32 s24, s24 + vcvt.f32.s32 s25, s25 + vcvt.f32.s32 s26, s26 + vcvt.f32.s32 s27, s27 + vcvt.f32.s32 s28, s28 + vcvt.f32.s32 s29, s29 + vcvt.f32.s32 s30, s30 + vcvt.f32.s32 s31, s31 + vmul.f32 s24, s24, s3 + b 2b + +50: + ldr lr, =0x03070000 @ RunFast mode, short vectors of length 8, stride 1 + fmrx ip, FPSCR + fmxr FPSCR, lr +51: + vldmia a3!, {s8-s15} + vldmia a4!, {s0} + vcvt.f32.s32 s8, s8 + vcvt.f32.s32 s9, s9 + vcvt.f32.s32 s10, s10 + vcvt.f32.s32 s11, s11 + vcvt.f32.s32 s12, s12 + vcvt.f32.s32 s13, s13 + vcvt.f32.s32 s14, s14 + vcvt.f32.s32 s15, s15 + vmul.f32 s8, s8, s0 + subs a1, a1, #8 + vstmia a2!, {s8-s11} + vstmia a2!, {s12-s15} + bne 51b + + fmxr FPSCR, ip + pop {pc} +endfunc + +/** + * ARM VFP optimised int32 to float conversion. + * Assume len is a multiple of 8, destination buffer is at least 4 bytes aligned + * (16 bytes alignment is best for BCM2835), little-endian. + * TODO: could be further optimised by unrolling and interleaving, as above + */ @ void ff_int32_to_float_fmul_scalar_vfp(float *dst, const int32_t *src, float mul, int len) function ff_int32_to_float_fmul_scalar_vfp, export=1 VFP tmp .req a4 |