diff options
author | Michael Niedermayer <michaelni@gmx.at> | 2011-12-07 02:04:00 +0100 |
---|---|---|
committer | Michael Niedermayer <michaelni@gmx.at> | 2011-12-07 02:04:00 +0100 |
commit | ff4800c16bae15817b35eaa22d0113849849114b (patch) | |
tree | 123864d2f3118a00ba05995aa956838dde208a04 /libavcodec/arm | |
parent | d9ced9fec63e7363a52e4b2e7c3f7d2197256f38 (diff) | |
parent | 4d9ec050a21c953f5a6e31483cf5ebd344afa3fd (diff) | |
download | ffmpeg-ff4800c16bae15817b35eaa22d0113849849114b.tar.gz |
Merge remote-tracking branch 'qatar/master'
* qatar/master:
Code cleanup - mpegvideo.c - 500-1000line
rv40: NEON optimised weighted prediction
rv40: NEON optimised chroma MC
ARM: move NEON H264 chroma mc to a separate file
rv34: NEON optimised inverse transform functions
Conflicts:
libavcodec/mpegvideo.c
Merged-by: Michael Niedermayer <michaelni@gmx.at>
Diffstat (limited to 'libavcodec/arm')
-rw-r--r-- | libavcodec/arm/Makefile | 10 | ||||
-rw-r--r-- | libavcodec/arm/h264cmc_neon.S | 430 | ||||
-rw-r--r-- | libavcodec/arm/h264dsp_neon.S | 339 | ||||
-rw-r--r-- | libavcodec/arm/rv34dsp_init_neon.c | 33 | ||||
-rw-r--r-- | libavcodec/arm/rv34dsp_neon.S | 109 | ||||
-rw-r--r-- | libavcodec/arm/rv40dsp_init_neon.c | 44 | ||||
-rw-r--r-- | libavcodec/arm/rv40dsp_neon.S | 85 |
7 files changed, 711 insertions, 339 deletions
diff --git a/libavcodec/arm/Makefile b/libavcodec/arm/Makefile index cc5a2a7d39..52709b6ae9 100644 --- a/libavcodec/arm/Makefile +++ b/libavcodec/arm/Makefile @@ -55,6 +55,7 @@ NEON-OBJS-$(CONFIG_RDFT) += arm/rdft_neon.o \ NEON-OBJS-$(CONFIG_H264DSP) += arm/h264dsp_neon.o \ arm/h264idct_neon.o \ + arm/h264cmc_neon.o \ NEON-OBJS-$(CONFIG_H264PRED) += arm/h264pred_neon.o \ @@ -63,6 +64,15 @@ NEON-OBJS-$(CONFIG_AC3DSP) += arm/ac3dsp_neon.o NEON-OBJS-$(CONFIG_DCA_DECODER) += arm/dcadsp_neon.o \ arm/synth_filter_neon.o \ +NEON-OBJS-$(CONFIG_RV30_DECODER) += arm/rv34dsp_init_neon.o \ + arm/rv34dsp_neon.o \ + +NEON-OBJS-$(CONFIG_RV40_DECODER) += arm/rv34dsp_init_neon.o \ + arm/rv34dsp_neon.o \ + arm/rv40dsp_init_neon.o \ + arm/rv40dsp_neon.o \ + arm/h264cmc_neon.o \ + NEON-OBJS-$(CONFIG_VP3_DECODER) += arm/vp3dsp_neon.o NEON-OBJS-$(CONFIG_VP5_DECODER) += arm/vp56dsp_neon.o \ diff --git a/libavcodec/arm/h264cmc_neon.S b/libavcodec/arm/h264cmc_neon.S new file mode 100644 index 0000000000..a6feadd189 --- /dev/null +++ b/libavcodec/arm/h264cmc_neon.S @@ -0,0 +1,430 @@ +/* + * Copyright (c) 2008 Mans Rullgard <mans@mansr.com> + * + * This file is part of Libav. + * + * Libav is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * Libav is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with Libav; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#include "asm.S" + +/* chroma_mc8(uint8_t *dst, uint8_t *src, int stride, int h, int x, int y) */ +.macro h264_chroma_mc8 type, codec=h264 +function ff_\type\()_\codec\()_chroma_mc8_neon, export=1 + push {r4-r7, lr} + ldrd r4, [sp, #20] + .ifc \type,avg + mov lr, r0 + .endif + pld [r1] + pld [r1, r2] + + .ifc \codec,rv40 + movrel r6, rv40bias + lsr r7, r5, #1 + add r6, r6, r7, lsl #3 + lsr r7, r4, #1 + add r6, r6, r7, lsl #1 + vld1.16 {d22[],d23[]}, [r6,:16] + .endif + +A muls r7, r4, r5 +T mul r7, r4, r5 +T cmp r7, #0 + rsb r6, r7, r5, lsl #3 + rsb r12, r7, r4, lsl #3 + sub r4, r7, r4, lsl #3 + sub r4, r4, r5, lsl #3 + add r4, r4, #64 + + beq 2f + + add r5, r1, r2 + + vdup.8 d0, r4 + lsl r4, r2, #1 + vdup.8 d1, r12 + vld1.8 {d4, d5}, [r1], r4 + vdup.8 d2, r6 + vld1.8 {d6, d7}, [r5], r4 + vdup.8 d3, r7 + + vext.8 d5, d4, d5, #1 + vext.8 d7, d6, d7, #1 + +1: pld [r5] + vmull.u8 q8, d4, d0 + vmlal.u8 q8, d5, d1 + vld1.8 {d4, d5}, [r1], r4 + vmlal.u8 q8, d6, d2 + vext.8 d5, d4, d5, #1 + vmlal.u8 q8, d7, d3 + vmull.u8 q9, d6, d0 + subs r3, r3, #2 + vmlal.u8 q9, d7, d1 + vmlal.u8 q9, d4, d2 + vmlal.u8 q9, d5, d3 + vld1.8 {d6, d7}, [r5], r4 + pld [r1] + .ifc \codec,h264 + vrshrn.u16 d16, q8, #6 + vrshrn.u16 d17, q9, #6 + .else + vadd.u16 q8, q8, q11 + vadd.u16 q9, q9, q11 + vshrn.u16 d16, q8, #6 + vshrn.u16 d17, q9, #6 + .endif + .ifc \type,avg + vld1.8 {d20}, [lr,:64], r2 + vld1.8 {d21}, [lr,:64], r2 + vrhadd.u8 q8, q8, q10 + .endif + vext.8 d7, d6, d7, #1 + vst1.8 {d16}, [r0,:64], r2 + vst1.8 {d17}, [r0,:64], r2 + bgt 1b + + pop {r4-r7, pc} + +2: tst r6, r6 + add r12, r12, r6 + vdup.8 d0, r4 + vdup.8 d1, r12 + + beq 4f + + add r5, r1, r2 + lsl r4, r2, #1 + vld1.8 {d4}, [r1], r4 + vld1.8 {d6}, [r5], r4 + +3: pld [r5] + vmull.u8 q8, d4, d0 + vmlal.u8 q8, d6, d1 + vld1.8 {d4}, [r1], r4 + vmull.u8 q9, d6, d0 + vmlal.u8 q9, d4, d1 + vld1.8 {d6}, [r5], r4 + .ifc \codec,h264 + vrshrn.u16 d16, q8, #6 + vrshrn.u16 d17, q9, #6 + .else + vadd.u16 q8, q8, q11 + vadd.u16 q9, q9, q11 + vshrn.u16 d16, q8, #6 + vshrn.u16 d17, q9, #6 + .endif + .ifc \type,avg + vld1.8 {d20}, [lr,:64], r2 + vld1.8 {d21}, [lr,:64], r2 + vrhadd.u8 q8, q8, q10 + .endif + subs r3, r3, #2 + pld [r1] + vst1.8 {d16}, [r0,:64], r2 + vst1.8 {d17}, [r0,:64], r2 + bgt 3b + + pop {r4-r7, pc} + +4: vld1.8 {d4, d5}, [r1], r2 + vld1.8 {d6, d7}, [r1], r2 + vext.8 d5, d4, d5, #1 + vext.8 d7, d6, d7, #1 + +5: pld [r1] + subs r3, r3, #2 + vmull.u8 q8, d4, d0 + vmlal.u8 q8, d5, d1 + vld1.8 {d4, d5}, [r1], r2 + vmull.u8 q9, d6, d0 + vmlal.u8 q9, d7, d1 + pld [r1] + vext.8 d5, d4, d5, #1 + .ifc \codec,h264 + vrshrn.u16 d16, q8, #6 + vrshrn.u16 d17, q9, #6 + .else + vadd.u16 q8, q8, q11 + vadd.u16 q9, q9, q11 + vshrn.u16 d16, q8, #6 + vshrn.u16 d17, q9, #6 + .endif + .ifc \type,avg + vld1.8 {d20}, [lr,:64], r2 + vld1.8 {d21}, [lr,:64], r2 + vrhadd.u8 q8, q8, q10 + .endif + vld1.8 {d6, d7}, [r1], r2 + vext.8 d7, d6, d7, #1 + vst1.8 {d16}, [r0,:64], r2 + vst1.8 {d17}, [r0,:64], r2 + bgt 5b + + pop {r4-r7, pc} +endfunc +.endm + +/* chroma_mc4(uint8_t *dst, uint8_t *src, int stride, int h, int x, int y) */ +.macro h264_chroma_mc4 type, codec=h264 +function ff_\type\()_\codec\()_chroma_mc4_neon, export=1 + push {r4-r7, lr} + ldrd r4, [sp, #20] + .ifc \type,avg + mov lr, r0 + .endif + pld [r1] + pld [r1, r2] + + .ifc \codec,rv40 + movrel r6, rv40bias + lsr r7, r5, #1 + add r6, r6, r7, lsl #3 + lsr r7, r4, #1 + add r6, r6, r7, lsl #1 + vld1.16 {d22[],d23[]}, [r6,:16] + .endif + +A muls r7, r4, r5 +T mul r7, r4, r5 +T cmp r7, #0 + rsb r6, r7, r5, lsl #3 + rsb r12, r7, r4, lsl #3 + sub r4, r7, r4, lsl #3 + sub r4, r4, r5, lsl #3 + add r4, r4, #64 + + beq 2f + + add r5, r1, r2 + + vdup.8 d0, r4 + lsl r4, r2, #1 + vdup.8 d1, r12 + vld1.8 {d4}, [r1], r4 + vdup.8 d2, r6 + vld1.8 {d6}, [r5], r4 + vdup.8 d3, r7 + + vext.8 d5, d4, d5, #1 + vext.8 d7, d6, d7, #1 + vtrn.32 d4, d5 + vtrn.32 d6, d7 + + vtrn.32 d0, d1 + vtrn.32 d2, d3 + +1: pld [r5] + vmull.u8 q8, d4, d0 + vmlal.u8 q8, d6, d2 + vld1.8 {d4}, [r1], r4 + vext.8 d5, d4, d5, #1 + vtrn.32 d4, d5 + vmull.u8 q9, d6, d0 + vmlal.u8 q9, d4, d2 + vld1.8 {d6}, [r5], r4 + vadd.i16 d16, d16, d17 + vadd.i16 d17, d18, d19 + .ifc \codec,h264 + vrshrn.u16 d16, q8, #6 + .else + vadd.u16 q8, q8, q11 + vshrn.u16 d16, q8, #6 + .endif + subs r3, r3, #2 + pld [r1] + .ifc \type,avg + vld1.32 {d20[0]}, [lr,:32], r2 + vld1.32 {d20[1]}, [lr,:32], r2 + vrhadd.u8 d16, d16, d20 + .endif + vext.8 d7, d6, d7, #1 + vtrn.32 d6, d7 + vst1.32 {d16[0]}, [r0,:32], r2 + vst1.32 {d16[1]}, [r0,:32], r2 + bgt 1b + + pop {r4-r7, pc} + +2: tst r6, r6 + add r12, r12, r6 + vdup.8 d0, r4 + vdup.8 d1, r12 + vtrn.32 d0, d1 + + beq 4f + + vext.32 d1, d0, d1, #1 + add r5, r1, r2 + lsl r4, r2, #1 + vld1.32 {d4[0]}, [r1], r4 + vld1.32 {d4[1]}, [r5], r4 + +3: pld [r5] + vmull.u8 q8, d4, d0 + vld1.32 {d4[0]}, [r1], r4 + vmull.u8 q9, d4, d1 + vld1.32 {d4[1]}, [r5], r4 + vadd.i16 d16, d16, d17 + vadd.i16 d17, d18, d19 + .ifc \codec,h264 + vrshrn.u16 d16, q8, #6 + .else + vadd.u16 q8, q8, q11 + vshrn.u16 d16, q8, #6 + .endif + .ifc \type,avg + vld1.32 {d20[0]}, [lr,:32], r2 + vld1.32 {d20[1]}, [lr,:32], r2 + vrhadd.u8 d16, d16, d20 + .endif + subs r3, r3, #2 + pld [r1] + vst1.32 {d16[0]}, [r0,:32], r2 + vst1.32 {d16[1]}, [r0,:32], r2 + bgt 3b + + pop {r4-r7, pc} + +4: vld1.8 {d4}, [r1], r2 + vld1.8 {d6}, [r1], r2 + vext.8 d5, d4, d5, #1 + vext.8 d7, d6, d7, #1 + vtrn.32 d4, d5 + vtrn.32 d6, d7 + +5: vmull.u8 q8, d4, d0 + vmull.u8 q9, d6, d0 + subs r3, r3, #2 + vld1.8 {d4}, [r1], r2 + vext.8 d5, d4, d5, #1 + vtrn.32 d4, d5 + vadd.i16 d16, d16, d17 + vadd.i16 d17, d18, d19 + pld [r1] + .ifc \codec,h264 + vrshrn.u16 d16, q8, #6 + .else + vadd.u16 q8, q8, q11 + vshrn.u16 d16, q8, #6 + .endif + .ifc \type,avg + vld1.32 {d20[0]}, [lr,:32], r2 + vld1.32 {d20[1]}, [lr,:32], r2 + vrhadd.u8 d16, d16, d20 + .endif + vld1.8 {d6}, [r1], r2 + vext.8 d7, d6, d7, #1 + vtrn.32 d6, d7 + pld [r1] + vst1.32 {d16[0]}, [r0,:32], r2 + vst1.32 {d16[1]}, [r0,:32], r2 + bgt 5b + + pop {r4-r7, pc} +endfunc +.endm + +.macro h264_chroma_mc2 type +function ff_\type\()_h264_chroma_mc2_neon, export=1 + push {r4-r6, lr} + ldr r4, [sp, #16] + ldr lr, [sp, #20] + pld [r1] + pld [r1, r2] + orrs r5, r4, lr + beq 2f + + mul r5, r4, lr + rsb r6, r5, lr, lsl #3 + rsb r12, r5, r4, lsl #3 + sub r4, r5, r4, lsl #3 + sub r4, r4, lr, lsl #3 + add r4, r4, #64 + vdup.8 d0, r4 + vdup.8 d2, r12 + vdup.8 d1, r6 + vdup.8 d3, r5 + vtrn.16 q0, q1 +1: + vld1.32 {d4[0]}, [r1], r2 + vld1.32 {d4[1]}, [r1], r2 + vrev64.32 d5, d4 + vld1.32 {d5[1]}, [r1] + vext.8 q3, q2, q2, #1 + vtrn.16 q2, q3 + vmull.u8 q8, d4, d0 + vmlal.u8 q8, d5, d1 + .ifc \type,avg + vld1.16 {d18[0]}, [r0,:16], r2 + vld1.16 {d18[1]}, [r0,:16] + sub r0, r0, r2 + .endif + vtrn.32 d16, d17 + vadd.i16 d16, d16, d17 + vrshrn.u16 d16, q8, #6 + .ifc \type,avg + vrhadd.u8 d16, d16, d18 + .endif + vst1.16 {d16[0]}, [r0,:16], r2 + vst1.16 {d16[1]}, [r0,:16], r2 + subs r3, r3, #2 + bgt 1b + pop {r4-r6, pc} +2: + .ifc \type,put + ldrh_post r5, r1, r2 + strh_post r5, r0, r2 + ldrh_post r6, r1, r2 + strh_post r6, r0, r2 + .else + vld1.16 {d16[0]}, [r1], r2 + vld1.16 {d16[1]}, [r1], r2 + vld1.16 {d18[0]}, [r0,:16], r2 + vld1.16 {d18[1]}, [r0,:16] + sub r0, r0, r2 + vrhadd.u8 d16, d16, d18 + vst1.16 {d16[0]}, [r0,:16], r2 + vst1.16 {d16[1]}, [r0,:16], r2 + .endif + subs r3, r3, #2 + bgt 2b + pop {r4-r6, pc} +endfunc +.endm + +#if CONFIG_H264_DECODER + h264_chroma_mc8 put + h264_chroma_mc8 avg + h264_chroma_mc4 put + h264_chroma_mc4 avg + h264_chroma_mc2 put + h264_chroma_mc2 avg +#endif + +#if CONFIG_RV40_DECODER +const rv40bias + .short 0, 16, 32, 16 + .short 32, 28, 32, 28 + .short 0, 32, 16, 32 + .short 32, 28, 32, 28 +endconst + + h264_chroma_mc8 put, rv40 + h264_chroma_mc8 avg, rv40 + h264_chroma_mc4 put, rv40 + h264_chroma_mc4 avg, rv40 +#endif diff --git a/libavcodec/arm/h264dsp_neon.S b/libavcodec/arm/h264dsp_neon.S index 9f4da2cb7b..fc6b3b35d5 100644 --- a/libavcodec/arm/h264dsp_neon.S +++ b/libavcodec/arm/h264dsp_neon.S @@ -21,345 +21,6 @@ #include "asm.S" #include "neon.S" -/* chroma_mc8(uint8_t *dst, uint8_t *src, int stride, int h, int x, int y) */ -.macro h264_chroma_mc8 type -function ff_\type\()_h264_chroma_mc8_neon, export=1 - push {r4-r7, lr} - ldrd r4, [sp, #20] - .ifc \type,avg - mov lr, r0 - .endif - pld [r1] - pld [r1, r2] - -A muls r7, r4, r5 -T mul r7, r4, r5 -T cmp r7, #0 - rsb r6, r7, r5, lsl #3 - rsb r12, r7, r4, lsl #3 - sub r4, r7, r4, lsl #3 - sub r4, r4, r5, lsl #3 - add r4, r4, #64 - - beq 2f - - add r5, r1, r2 - - vdup.8 d0, r4 - lsl r4, r2, #1 - vdup.8 d1, r12 - vld1.8 {d4, d5}, [r1], r4 - vdup.8 d2, r6 - vld1.8 {d6, d7}, [r5], r4 - vdup.8 d3, r7 - - vext.8 d5, d4, d5, #1 - vext.8 d7, d6, d7, #1 - -1: pld [r5] - vmull.u8 q8, d4, d0 - vmlal.u8 q8, d5, d1 - vld1.8 {d4, d5}, [r1], r4 - vmlal.u8 q8, d6, d2 - vext.8 d5, d4, d5, #1 - vmlal.u8 q8, d7, d3 - vmull.u8 q9, d6, d0 - subs r3, r3, #2 - vmlal.u8 q9, d7, d1 - vmlal.u8 q9, d4, d2 - vmlal.u8 q9, d5, d3 - vrshrn.u16 d16, q8, #6 - vld1.8 {d6, d7}, [r5], r4 - pld [r1] - vrshrn.u16 d17, q9, #6 - .ifc \type,avg - vld1.8 {d20}, [lr,:64], r2 - vld1.8 {d21}, [lr,:64], r2 - vrhadd.u8 q8, q8, q10 - .endif - vext.8 d7, d6, d7, #1 - vst1.8 {d16}, [r0,:64], r2 - vst1.8 {d17}, [r0,:64], r2 - bgt 1b - - pop {r4-r7, pc} - -2: tst r6, r6 - add r12, r12, r6 - vdup.8 d0, r4 - vdup.8 d1, r12 - - beq 4f - - add r5, r1, r2 - lsl r4, r2, #1 - vld1.8 {d4}, [r1], r4 - vld1.8 {d6}, [r5], r4 - -3: pld [r5] - vmull.u8 q8, d4, d0 - vmlal.u8 q8, d6, d1 - vld1.8 {d4}, [r1], r4 - vmull.u8 q9, d6, d0 - vmlal.u8 q9, d4, d1 - vld1.8 {d6}, [r5], r4 - vrshrn.u16 d16, q8, #6 - vrshrn.u16 d17, q9, #6 - .ifc \type,avg - vld1.8 {d20}, [lr,:64], r2 - vld1.8 {d21}, [lr,:64], r2 - vrhadd.u8 q8, q8, q10 - .endif - subs r3, r3, #2 - pld [r1] - vst1.8 {d16}, [r0,:64], r2 - vst1.8 {d17}, [r0,:64], r2 - bgt 3b - - pop {r4-r7, pc} - -4: vld1.8 {d4, d5}, [r1], r2 - vld1.8 {d6, d7}, [r1], r2 - vext.8 d5, d4, d5, #1 - vext.8 d7, d6, d7, #1 - -5: pld [r1] - subs r3, r3, #2 - vmull.u8 q8, d4, d0 - vmlal.u8 q8, d5, d1 - vld1.8 {d4, d5}, [r1], r2 - vmull.u8 q9, d6, d0 - vmlal.u8 q9, d7, d1 - pld [r1] - vext.8 d5, d4, d5, #1 - vrshrn.u16 d16, q8, #6 - vrshrn.u16 d17, q9, #6 - .ifc \type,avg - vld1.8 {d20}, [lr,:64], r2 - vld1.8 {d21}, [lr,:64], r2 - vrhadd.u8 q8, q8, q10 - .endif - vld1.8 {d6, d7}, [r1], r2 - vext.8 d7, d6, d7, #1 - vst1.8 {d16}, [r0,:64], r2 - vst1.8 {d17}, [r0,:64], r2 - bgt 5b - - pop {r4-r7, pc} -endfunc -.endm - -/* chroma_mc4(uint8_t *dst, uint8_t *src, int stride, int h, int x, int y) */ -.macro h264_chroma_mc4 type -function ff_\type\()_h264_chroma_mc4_neon, export=1 - push {r4-r7, lr} - ldrd r4, [sp, #20] - .ifc \type,avg - mov lr, r0 - .endif - pld [r1] - pld [r1, r2] - -A muls r7, r4, r5 -T mul r7, r4, r5 -T cmp r7, #0 - rsb r6, r7, r5, lsl #3 - rsb r12, r7, r4, lsl #3 - sub r4, r7, r4, lsl #3 - sub r4, r4, r5, lsl #3 - add r4, r4, #64 - - beq 2f - - add r5, r1, r2 - - vdup.8 d0, r4 - lsl r4, r2, #1 - vdup.8 d1, r12 - vld1.8 {d4}, [r1], r4 - vdup.8 d2, r6 - vld1.8 {d6}, [r5], r4 - vdup.8 d3, r7 - - vext.8 d5, d4, d5, #1 - vext.8 d7, d6, d7, #1 - vtrn.32 d4, d5 - vtrn.32 d6, d7 - - vtrn.32 d0, d1 - vtrn.32 d2, d3 - -1: pld [r5] - vmull.u8 q8, d4, d0 - vmlal.u8 q8, d6, d2 - vld1.8 {d4}, [r1], r4 - vext.8 d5, d4, d5, #1 - vtrn.32 d4, d5 - vmull.u8 q9, d6, d0 - vmlal.u8 q9, d4, d2 - vld1.8 {d6}, [r5], r4 - vadd.i16 d16, d16, d17 - vadd.i16 d17, d18, d19 - vrshrn.u16 d16, q8, #6 - subs r3, r3, #2 - pld [r1] - .ifc \type,avg - vld1.32 {d20[0]}, [lr,:32], r2 - vld1.32 {d20[1]}, [lr,:32], r2 - vrhadd.u8 d16, d16, d20 - .endif - vext.8 d7, d6, d7, #1 - vtrn.32 d6, d7 - vst1.32 {d16[0]}, [r0,:32], r2 - vst1.32 {d16[1]}, [r0,:32], r2 - bgt 1b - - pop {r4-r7, pc} - -2: tst r6, r6 - add r12, r12, r6 - vdup.8 d0, r4 - vdup.8 d1, r12 - vtrn.32 d0, d1 - - beq 4f - - vext.32 d1, d0, d1, #1 - add r5, r1, r2 - lsl r4, r2, #1 - vld1.32 {d4[0]}, [r1], r4 - vld1.32 {d4[1]}, [r5], r4 - -3: pld [r5] - vmull.u8 q8, d4, d0 - vld1.32 {d4[0]}, [r1], r4 - vmull.u8 q9, d4, d1 - vld1.32 {d4[1]}, [r5], r4 - vadd.i16 d16, d16, d17 - vadd.i16 d17, d18, d19 - vrshrn.u16 d16, q8, #6 - .ifc \type,avg - vld1.32 {d20[0]}, [lr,:32], r2 - vld1.32 {d20[1]}, [lr,:32], r2 - vrhadd.u8 d16, d16, d20 - .endif - subs r3, r3, #2 - pld [r1] - vst1.32 {d16[0]}, [r0,:32], r2 - vst1.32 {d16[1]}, [r0,:32], r2 - bgt 3b - - pop {r4-r7, pc} - -4: vld1.8 {d4}, [r1], r2 - vld1.8 {d6}, [r1], r2 - vext.8 d5, d4, d5, #1 - vext.8 d7, d6, d7, #1 - vtrn.32 d4, d5 - vtrn.32 d6, d7 - -5: vmull.u8 q8, d4, d0 - vmull.u8 q9, d6, d0 - subs r3, r3, #2 - vld1.8 {d4}, [r1], r2 - vext.8 d5, d4, d5, #1 - vtrn.32 d4, d5 - vadd.i16 d16, d16, d17 - vadd.i16 d17, d18, d19 - pld [r1] - vrshrn.u16 d16, q8, #6 - .ifc \type,avg - vld1.32 {d20[0]}, [lr,:32], r2 - vld1.32 {d20[1]}, [lr,:32], r2 - vrhadd.u8 d16, d16, d20 - .endif - vld1.8 {d6}, [r1], r2 - vext.8 d7, d6, d7, #1 - vtrn.32 d6, d7 - pld [r1] - vst1.32 {d16[0]}, [r0,:32], r2 - vst1.32 {d16[1]}, [r0,:32], r2 - bgt 5b - - pop {r4-r7, pc} -endfunc -.endm - -.macro h264_chroma_mc2 type -function ff_\type\()_h264_chroma_mc2_neon, export=1 - push {r4-r6, lr} - ldr r4, [sp, #16] - ldr lr, [sp, #20] - pld [r1] - pld [r1, r2] - orrs r5, r4, lr - beq 2f - - mul r5, r4, lr - rsb r6, r5, lr, lsl #3 - rsb r12, r5, r4, lsl #3 - sub r4, r5, r4, lsl #3 - sub r4, r4, lr, lsl #3 - add r4, r4, #64 - vdup.8 d0, r4 - vdup.8 d2, r12 - vdup.8 d1, r6 - vdup.8 d3, r5 - vtrn.16 q0, q1 -1: - vld1.32 {d4[0]}, [r1], r2 - vld1.32 {d4[1]}, [r1], r2 - vrev64.32 d5, d4 - vld1.32 {d5[1]}, [r1] - vext.8 q3, q2, q2, #1 - vtrn.16 q2, q3 - vmull.u8 q8, d4, d0 - vmlal.u8 q8, d5, d1 - .ifc \type,avg - vld1.16 {d18[0]}, [r0,:16], r2 - vld1.16 {d18[1]}, [r0,:16] - sub r0, r0, r2 - .endif - vtrn.32 d16, d17 - vadd.i16 d16, d16, d17 - vrshrn.u16 d16, q8, #6 - .ifc \type,avg - vrhadd.u8 d16, d16, d18 - .endif - vst1.16 {d16[0]}, [r0,:16], r2 - vst1.16 {d16[1]}, [r0,:16], r2 - subs r3, r3, #2 - bgt 1b - pop {r4-r6, pc} -2: - .ifc \type,put - ldrh_post r5, r1, r2 - strh_post r5, r0, r2 - ldrh_post r6, r1, r2 - strh_post r6, r0, r2 - .else - vld1.16 {d16[0]}, [r1], r2 - vld1.16 {d16[1]}, [r1], r2 - vld1.16 {d18[0]}, [r0,:16], r2 - vld1.16 {d18[1]}, [r0,:16] - sub r0, r0, r2 - vrhadd.u8 d16, d16, d18 - vst1.16 {d16[0]}, [r0,:16], r2 - vst1.16 {d16[1]}, [r0,:16], r2 - .endif - subs r3, r3, #2 - bgt 2b - pop {r4-r6, pc} -endfunc -.endm - - h264_chroma_mc8 put - h264_chroma_mc8 avg - h264_chroma_mc4 put - h264_chroma_mc4 avg - h264_chroma_mc2 put - h264_chroma_mc2 avg - /* H.264 loop filter */ .macro h264_loop_filter_start diff --git a/libavcodec/arm/rv34dsp_init_neon.c b/libavcodec/arm/rv34dsp_init_neon.c new file mode 100644 index 0000000000..9a09fde7a9 --- /dev/null +++ b/libavcodec/arm/rv34dsp_init_neon.c @@ -0,0 +1,33 @@ +/* + * Copyright (c) 2011 Janne Grunau <janne-libav@jannau.net> + * + * This file is part of Libav. + * + * Libav is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * Libav is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with Libav; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#include <stdint.h> + +#include "libavcodec/avcodec.h" +#include "libavcodec/rv34dsp.h" + +void ff_rv34_inv_transform_neon(DCTELEM *block); +void ff_rv34_inv_transform_noround_neon(DCTELEM *block); + +void ff_rv34dsp_init_neon(RV34DSPContext *c, DSPContext* dsp) +{ + c->rv34_inv_transform_tab[0] = ff_rv34_inv_transform_neon; + c->rv34_inv_transform_tab[1] = ff_rv34_inv_transform_noround_neon; +} diff --git a/libavcodec/arm/rv34dsp_neon.S b/libavcodec/arm/rv34dsp_neon.S new file mode 100644 index 0000000000..f700f5c321 --- /dev/null +++ b/libavcodec/arm/rv34dsp_neon.S @@ -0,0 +1,109 @@ +/* + * Copyright (c) 2011 Janne Grunau <janne-libav@jannau.net> + * + * This file is part of Libav. + * + * Libav is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * Libav is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with Libav; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#include "asm.S" + +.macro rv34_inv_transform + mov r1, #16 + vld1.16 {d28}, [r0,:64], r1 @ block[i+8*0] + vld1.16 {d29}, [r0,:64], r1 @ block[i+8*1] + vld1.16 {d30}, [r0,:64], r1 @ block[i+8*2] + vld1.16 {d31}, [r0,:64], r1 @ block[i+8*3] + vmov.s16 d0, #13 + vshll.s16 q12, d29, #3 + vshll.s16 q13, d29, #4 + vshll.s16 q9, d31, #3 + vshll.s16 q1, d31, #4 + vmull.s16 q10, d28, d0 + vmlal.s16 q10, d30, d0 + vmull.s16 q11, d28, d0 + vmlsl.s16 q11, d30, d0 + vsubw.s16 q12, q12, d29 @ z2 = block[i+8*1]*7 + vaddw.s16 q13, q13, d29 @ z3 = block[i+8*1]*17 + vsubw.s16 q9, q9, d31 + vaddw.s16 q1, q1, d31 + vadd.s32 q13, q13, q9 @ z3 = 17*block[i+8*1] + 7*block[i+8*3] + vsub.s32 q12, q12, q1 @ z2 = 7*block[i+8*1] - 17*block[i+8*3] + vadd.s32 q1, q10, q13 @ z0 + z3 + vadd.s32 q2, q11, q12 @ z1 + z2 + vsub.s32 q8, q10, q13 @ z0 - z3 + vsub.s32 q3, q11, q12 @ z1 - z2 + vtrn.32 q1, q2 + vtrn.32 q3, q8 + vswp d3, d6 + vswp d5, d16 + vmov.s32 d0, #13 + vadd.s32 q10, q1, q3 + vsub.s32 q11, q1, q3 + vshl.s32 q12, q2, #3 + vshl.s32 q9, q2, #4 + vmul.s32 q13, q11, d0[0] + vshl.s32 q11, q8, #4 + vadd.s32 q9, q9, q2 + vshl.s32 q15, q8, #3 + vsub.s32 q12, q12, q2 + vadd.s32 q11, q11, q8 + vmul.s32 q14, q10, d0[0] + vsub.s32 q8, q15, q8 + vsub.s32 q12, q12, q11 + vadd.s32 q9, q9, q8 + vadd.s32 q2, q13, q12 @ z1 + z2 + vadd.s32 q1, q14, q9 @ z0 + z3 + vsub.s32 q3, q13, q12 @ z1 - z2 + vsub.s32 q15, q14, q9 @ z0 - z3 +.endm + +/* void ff_rv34_inv_transform_neon(DCTELEM *block); */ +function ff_rv34_inv_transform_neon, export=1 + mov r2, r0 + rv34_inv_transform + vrshrn.s32 d1, q2, #10 @ (z1 + z2) >> 10 + vrshrn.s32 d0, q1, #10 @ (z0 + z3) >> 10 + vrshrn.s32 d2, q3, #10 @ (z1 - z2) >> 10 + vrshrn.s32 d3, q15, #10 @ (z0 - z3) >> 10 + vst4.16 {d0[0], d1[0], d2[0], d3[0]}, [r2,:64], r1 + vst4.16 {d0[1], d1[1], d2[1], d3[1]}, [r2,:64], r1 + vst4.16 {d0[2], d1[2], d2[2], d3[2]}, [r2,:64], r1 + vst4.16 {d0[3], d1[3], d2[3], d3[3]}, [r2,:64], r1 + bx lr +endfunc + +/* void rv34_inv_transform_noround_neon(DCTELEM *block); */ +function ff_rv34_inv_transform_noround_neon, export=1 + mov r2, r0 + rv34_inv_transform + vshl.s32 q11, q2, #1 + vshl.s32 q10, q1, #1 + vshl.s32 q12, q3, #1 + vshl.s32 q13, q15, #1 + vadd.s32 q11, q11, q2 + vadd.s32 q10, q10, q1 + vadd.s32 q12, q12, q3 + vadd.s32 q13, q13, q15 + vshrn.s32 d0, q10, #11 @ (z0 + z3)*3 >> 11 + vshrn.s32 d1, q11, #11 @ (z1 + z2)*3 >> 11 + vshrn.s32 d2, q12, #11 @ (z1 - z2)*3 >> 11 + vshrn.s32 d3, q13, #11 @ (z0 - z3)*3 >> 11 + vst4.16 {d0[0], d1[0], d2[0], d3[0]}, [r2,:64], r1 + vst4.16 {d0[1], d1[1], d2[1], d3[1]}, [r2,:64], r1 + vst4.16 {d0[2], d1[2], d2[2], d3[2]}, [r2,:64], r1 + vst4.16 {d0[3], d1[3], d2[3], d3[3]}, [r2,:64], r1 + bx lr +endfunc diff --git a/libavcodec/arm/rv40dsp_init_neon.c b/libavcodec/arm/rv40dsp_init_neon.c new file mode 100644 index 0000000000..3a863e1916 --- /dev/null +++ b/libavcodec/arm/rv40dsp_init_neon.c @@ -0,0 +1,44 @@ +/* + * Copyright (c) 2011 Janne Grunau <janne-libav@jannau.net> + * + * This file is part of Libav. + * + * Libav is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * Libav is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with Libav; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#include <stdint.h> + +#include "libavcodec/avcodec.h" +#include "libavcodec/rv34dsp.h" + +void ff_put_rv40_chroma_mc8_neon(uint8_t *, uint8_t *, int, int, int, int); +void ff_put_rv40_chroma_mc4_neon(uint8_t *, uint8_t *, int, int, int, int); + +void ff_avg_rv40_chroma_mc8_neon(uint8_t *, uint8_t *, int, int, int, int); +void ff_avg_rv40_chroma_mc4_neon(uint8_t *, uint8_t *, int, int, int, int); + +void ff_rv40_weight_func_16_neon(uint8_t *, uint8_t *, uint8_t *, int, int, int); +void ff_rv40_weight_func_8_neon(uint8_t *, uint8_t *, uint8_t *, int, int, int); + +void ff_rv40dsp_init_neon(RV34DSPContext *c, DSPContext* dsp) +{ + c->put_chroma_pixels_tab[0] = ff_put_rv40_chroma_mc8_neon; + c->put_chroma_pixels_tab[1] = ff_put_rv40_chroma_mc4_neon; + c->avg_chroma_pixels_tab[0] = ff_avg_rv40_chroma_mc8_neon; + c->avg_chroma_pixels_tab[1] = ff_avg_rv40_chroma_mc4_neon; + + c->rv40_weight_pixels_tab[0] = ff_rv40_weight_func_16_neon; + c->rv40_weight_pixels_tab[1] = ff_rv40_weight_func_8_neon; +} diff --git a/libavcodec/arm/rv40dsp_neon.S b/libavcodec/arm/rv40dsp_neon.S new file mode 100644 index 0000000000..cafd98add0 --- /dev/null +++ b/libavcodec/arm/rv40dsp_neon.S @@ -0,0 +1,85 @@ +/* + * Copyright (c) 2011 Janne Grunau <janne-libav@jannau.net> + * + * This file is part of Libav. + * + * Libav is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * Libav is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with Libav; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#include "asm.S" + +.macro rv40_weight + vmovl.u8 q8, d2 + vmovl.u8 q9, d3 + vmovl.u8 q10, d4 + vmovl.u8 q11, d5 + vmull.u16 q2, d16, d0[2] + vmull.u16 q3, d17, d0[2] + vmull.u16 q8, d18, d0[2] + vmull.u16 q9, d19, d0[2] + vmull.u16 q12, d20, d0[0] + vmull.u16 q13, d21, d0[0] + vmull.u16 q14, d22, d0[0] + vmull.u16 q15, d23, d0[0] + vshrn.i32 d4, q2, #9 + vshrn.i32 d5, q3, #9 + vshrn.i32 d6, q8, #9 + vshrn.i32 d7, q9, #9 + vshrn.i32 d16, q12, #9 + vshrn.i32 d17, q13, #9 + vshrn.i32 d18, q14, #9 + vshrn.i32 d19, q15, #9 + vadd.u16 q2, q2, q8 + vadd.u16 q3, q3, q9 + vrshrn.i16 d2, q2, #5 + vrshrn.i16 d3, q3, #5 +.endm + +/* void ff_rv40_weight_func_16_neon(uint8_t *dst, uint8_t *src1, uint8_t *src2, + int w1, int w2, int stride) */ +function ff_rv40_weight_func_16_neon, export=1 + ldr r12, [sp] + vmov d0, r3, r12 + ldr r12, [sp, #4] + mov r3, #16 +1: + vld1.8 {q1}, [r1,:128], r12 + vld1.8 {q2}, [r2,:128], r12 + rv40_weight + vst1.8 {q1}, [r0,:128], r12 + subs r3, r3, #1 + bne 1b + bx lr +endfunc + +/* void ff_rv40_weight_func_8_neon(uint8_t *dst, uint8_t *src1, uint8_t *src2, + int w1, int w2, int stride) */ +function ff_rv40_weight_func_8_neon, export=1 + ldr r12, [sp] + vmov d0, r3, r12 + ldr r12, [sp, #4] + mov r3, #8 +1: + vld1.8 {d2}, [r1,:64], r12 + vld1.8 {d3}, [r1,:64], r12 + vld1.8 {d4}, [r2,:64], r12 + vld1.8 {d5}, [r2,:64], r12 + rv40_weight + vst1.8 {d2}, [r0,:64], r12 + vst1.8 {d3}, [r0,:64], r12 + subs r3, r3, #2 + bne 1b + bx lr +endfunc |