aboutsummaryrefslogtreecommitdiffstats
path: root/libavcodec/arm
diff options
context:
space:
mode:
authorMåns Rullgård <mans@mansr.com>2009-10-03 18:22:49 +0000
committerMåns Rullgård <mans@mansr.com>2009-10-03 18:22:49 +0000
commitabff992d360fb085b1129d033e9fcd34fa9fa0c1 (patch)
tree3d9475da1cc07c75d0a961ebe85b1c5ac268a8c6 /libavcodec/arm
parentf640478b56dfddf946c72cf012866e1b9d59e271 (diff)
downloadffmpeg-abff992d360fb085b1129d033e9fcd34fa9fa0c1.tar.gz
ARM: whitespace cosmetics
Originally committed as revision 20157 to svn://svn.ffmpeg.org/ffmpeg/trunk
Diffstat (limited to 'libavcodec/arm')
-rw-r--r--libavcodec/arm/dsputil_arm_s.S908
1 files changed, 454 insertions, 454 deletions
diff --git a/libavcodec/arm/dsputil_arm_s.S b/libavcodec/arm/dsputil_arm_s.S
index f10d6162f0..f62f405059 100644
--- a/libavcodec/arm/dsputil_arm_s.S
+++ b/libavcodec/arm/dsputil_arm_s.S
@@ -31,73 +31,73 @@
#if HAVE_ARMV5TE
function ff_prefetch_arm, export=1
- subs r2, r2, #1
- pld [r0]
- add r0, r0, r1
- bne ff_prefetch_arm
- bx lr
+ subs r2, r2, #1
+ pld [r0]
+ add r0, r0, r1
+ bne ff_prefetch_arm
+ bx lr
.endfunc
#endif
.macro ADJ_ALIGN_QUADWORD_D shift, Rd0, Rd1, Rd2, Rd3, Rn0, Rn1, Rn2, Rn3, Rn4
- mov \Rd0, \Rn0, lsr #(\shift * 8)
- mov \Rd1, \Rn1, lsr #(\shift * 8)
- mov \Rd2, \Rn2, lsr #(\shift * 8)
- mov \Rd3, \Rn3, lsr #(\shift * 8)
- orr \Rd0, \Rd0, \Rn1, lsl #(32 - \shift * 8)
- orr \Rd1, \Rd1, \Rn2, lsl #(32 - \shift * 8)
- orr \Rd2, \Rd2, \Rn3, lsl #(32 - \shift * 8)
- orr \Rd3, \Rd3, \Rn4, lsl #(32 - \shift * 8)
+ mov \Rd0, \Rn0, lsr #(\shift * 8)
+ mov \Rd1, \Rn1, lsr #(\shift * 8)
+ mov \Rd2, \Rn2, lsr #(\shift * 8)
+ mov \Rd3, \Rn3, lsr #(\shift * 8)
+ orr \Rd0, \Rd0, \Rn1, lsl #(32 - \shift * 8)
+ orr \Rd1, \Rd1, \Rn2, lsl #(32 - \shift * 8)
+ orr \Rd2, \Rd2, \Rn3, lsl #(32 - \shift * 8)
+ orr \Rd3, \Rd3, \Rn4, lsl #(32 - \shift * 8)
.endm
.macro ADJ_ALIGN_DOUBLEWORD shift, R0, R1, R2
- mov \R0, \R0, lsr #(\shift * 8)
- orr \R0, \R0, \R1, lsl #(32 - \shift * 8)
- mov \R1, \R1, lsr #(\shift * 8)
- orr \R1, \R1, \R2, lsl #(32 - \shift * 8)
+ mov \R0, \R0, lsr #(\shift * 8)
+ orr \R0, \R0, \R1, lsl #(32 - \shift * 8)
+ mov \R1, \R1, lsr #(\shift * 8)
+ orr \R1, \R1, \R2, lsl #(32 - \shift * 8)
.endm
.macro ADJ_ALIGN_DOUBLEWORD_D shift, Rdst0, Rdst1, Rsrc0, Rsrc1, Rsrc2
- mov \Rdst0, \Rsrc0, lsr #(\shift * 8)
- mov \Rdst1, \Rsrc1, lsr #(\shift * 8)
- orr \Rdst0, \Rdst0, \Rsrc1, lsl #(32 - (\shift * 8))
- orr \Rdst1, \Rdst1, \Rsrc2, lsl #(32 - (\shift * 8))
+ mov \Rdst0, \Rsrc0, lsr #(\shift * 8)
+ mov \Rdst1, \Rsrc1, lsr #(\shift * 8)
+ orr \Rdst0, \Rdst0, \Rsrc1, lsl #(32 - (\shift * 8))
+ orr \Rdst1, \Rdst1, \Rsrc2, lsl #(32 - (\shift * 8))
.endm
.macro RND_AVG32 Rd0, Rd1, Rn0, Rn1, Rm0, Rm1, Rmask
@ Rd = (Rn | Rm) - (((Rn ^ Rm) & ~0x01010101) >> 1)
@ Rmask = 0xFEFEFEFE
@ Rn = destroy
- eor \Rd0, \Rn0, \Rm0
- eor \Rd1, \Rn1, \Rm1
- orr \Rn0, \Rn0, \Rm0
- orr \Rn1, \Rn1, \Rm1
- and \Rd0, \Rd0, \Rmask
- and \Rd1, \Rd1, \Rmask
- sub \Rd0, \Rn0, \Rd0, lsr #1
- sub \Rd1, \Rn1, \Rd1, lsr #1
+ eor \Rd0, \Rn0, \Rm0
+ eor \Rd1, \Rn1, \Rm1
+ orr \Rn0, \Rn0, \Rm0
+ orr \Rn1, \Rn1, \Rm1
+ and \Rd0, \Rd0, \Rmask
+ and \Rd1, \Rd1, \Rmask
+ sub \Rd0, \Rn0, \Rd0, lsr #1
+ sub \Rd1, \Rn1, \Rd1, lsr #1
.endm
.macro NO_RND_AVG32 Rd0, Rd1, Rn0, Rn1, Rm0, Rm1, Rmask
@ Rd = (Rn & Rm) - (((Rn ^ Rm) & ~0x01010101) >> 1)
@ Rmask = 0xFEFEFEFE
@ Rn = destroy
- eor \Rd0, \Rn0, \Rm0
- eor \Rd1, \Rn1, \Rm1
- and \Rn0, \Rn0, \Rm0
- and \Rn1, \Rn1, \Rm1
- and \Rd0, \Rd0, \Rmask
- and \Rd1, \Rd1, \Rmask
- add \Rd0, \Rn0, \Rd0, lsr #1
- add \Rd1, \Rn1, \Rd1, lsr #1
+ eor \Rd0, \Rn0, \Rm0
+ eor \Rd1, \Rn1, \Rm1
+ and \Rn0, \Rn0, \Rm0
+ and \Rn1, \Rn1, \Rm1
+ and \Rd0, \Rd0, \Rmask
+ and \Rd1, \Rd1, \Rmask
+ add \Rd0, \Rn0, \Rd0, lsr #1
+ add \Rd1, \Rn1, \Rd1, lsr #1
.endm
.macro JMP_ALIGN tmp, reg
- ands \tmp, \reg, #3
- bic \reg, \reg, #3
- beq 1f
- subs \tmp, \tmp, #1
- beq 2f
- subs \tmp, \tmp, #1
- beq 3f
+ ands \tmp, \reg, #3
+ bic \reg, \reg, #3
+ beq 1f
+ subs \tmp, \tmp, #1
+ beq 2f
+ subs \tmp, \tmp, #1
+ beq 3f
b 4f
.endm
@@ -106,51 +106,51 @@ function ff_prefetch_arm, export=1
function put_pixels16_arm, export=1
@ void func(uint8_t *block, const uint8_t *pixels, int line_size, int h)
@ block = word aligned, pixles = unaligned
- pld [r1]
- stmfd sp!, {r4-r11, lr} @ R14 is also called LR
- JMP_ALIGN r5, r1
+ pld [r1]
+ stmfd sp!, {r4-r11, lr} @ R14 is also called LR
+ JMP_ALIGN r5, r1
1:
- ldmia r1, {r4-r7}
- add r1, r1, r2
- stmia r0, {r4-r7}
- pld [r1]
- subs r3, r3, #1
- add r0, r0, r2
- bne 1b
- ldmfd sp!, {r4-r11, pc}
+ ldmia r1, {r4-r7}
+ add r1, r1, r2
+ stmia r0, {r4-r7}
+ pld [r1]
+ subs r3, r3, #1
+ add r0, r0, r2
+ bne 1b
+ ldmfd sp!, {r4-r11, pc}
.align 5
2:
- ldmia r1, {r4-r8}
- add r1, r1, r2
- ADJ_ALIGN_QUADWORD_D 1, r9, r10, r11, r12, r4, r5, r6, r7, r8
- pld [r1]
- subs r3, r3, #1
- stmia r0, {r9-r12}
- add r0, r0, r2
- bne 2b
- ldmfd sp!, {r4-r11, pc}
+ ldmia r1, {r4-r8}
+ add r1, r1, r2
+ ADJ_ALIGN_QUADWORD_D 1, r9, r10, r11, r12, r4, r5, r6, r7, r8
+ pld [r1]
+ subs r3, r3, #1
+ stmia r0, {r9-r12}
+ add r0, r0, r2
+ bne 2b
+ ldmfd sp!, {r4-r11, pc}
.align 5
3:
- ldmia r1, {r4-r8}
- add r1, r1, r2
- ADJ_ALIGN_QUADWORD_D 2, r9, r10, r11, r12, r4, r5, r6, r7, r8
- pld [r1]
- subs r3, r3, #1
- stmia r0, {r9-r12}
- add r0, r0, r2
- bne 3b
- ldmfd sp!, {r4-r11, pc}
+ ldmia r1, {r4-r8}
+ add r1, r1, r2
+ ADJ_ALIGN_QUADWORD_D 2, r9, r10, r11, r12, r4, r5, r6, r7, r8
+ pld [r1]
+ subs r3, r3, #1
+ stmia r0, {r9-r12}
+ add r0, r0, r2
+ bne 3b
+ ldmfd sp!, {r4-r11, pc}
.align 5
4:
- ldmia r1, {r4-r8}
- add r1, r1, r2
- ADJ_ALIGN_QUADWORD_D 3, r9, r10, r11, r12, r4, r5, r6, r7, r8
- pld [r1]
- subs r3, r3, #1
- stmia r0, {r9-r12}
- add r0, r0, r2
- bne 4b
- ldmfd sp!, {r4-r11,pc}
+ ldmia r1, {r4-r8}
+ add r1, r1, r2
+ ADJ_ALIGN_QUADWORD_D 3, r9, r10, r11, r12, r4, r5, r6, r7, r8
+ pld [r1]
+ subs r3, r3, #1
+ stmia r0, {r9-r12}
+ add r0, r0, r2
+ bne 4b
+ ldmfd sp!, {r4-r11,pc}
.endfunc
@ ----------------------------------------------------------------
@@ -158,51 +158,51 @@ function put_pixels16_arm, export=1
function put_pixels8_arm, export=1
@ void func(uint8_t *block, const uint8_t *pixels, int line_size, int h)
@ block = word aligned, pixles = unaligned
- pld [r1]
- stmfd sp!, {r4-r5,lr} @ R14 is also called LR
- JMP_ALIGN r5, r1
+ pld [r1]
+ stmfd sp!, {r4-r5,lr} @ R14 is also called LR
+ JMP_ALIGN r5, r1
1:
- ldmia r1, {r4-r5}
- add r1, r1, r2
- subs r3, r3, #1
- pld [r1]
- stmia r0, {r4-r5}
- add r0, r0, r2
- bne 1b
- ldmfd sp!, {r4-r5,pc}
+ ldmia r1, {r4-r5}
+ add r1, r1, r2
+ subs r3, r3, #1
+ pld [r1]
+ stmia r0, {r4-r5}
+ add r0, r0, r2
+ bne 1b
+ ldmfd sp!, {r4-r5,pc}
.align 5
2:
- ldmia r1, {r4-r5, r12}
- add r1, r1, r2
- ADJ_ALIGN_DOUBLEWORD 1, r4, r5, r12
- pld [r1]
- subs r3, r3, #1
- stmia r0, {r4-r5}
- add r0, r0, r2
- bne 2b
- ldmfd sp!, {r4-r5,pc}
+ ldmia r1, {r4-r5, r12}
+ add r1, r1, r2
+ ADJ_ALIGN_DOUBLEWORD 1, r4, r5, r12
+ pld [r1]
+ subs r3, r3, #1
+ stmia r0, {r4-r5}
+ add r0, r0, r2
+ bne 2b
+ ldmfd sp!, {r4-r5,pc}
.align 5
3:
- ldmia r1, {r4-r5, r12}
- add r1, r1, r2
- ADJ_ALIGN_DOUBLEWORD 2, r4, r5, r12
- pld [r1]
- subs r3, r3, #1
- stmia r0, {r4-r5}
- add r0, r0, r2
- bne 3b
- ldmfd sp!, {r4-r5,pc}
+ ldmia r1, {r4-r5, r12}
+ add r1, r1, r2
+ ADJ_ALIGN_DOUBLEWORD 2, r4, r5, r12
+ pld [r1]
+ subs r3, r3, #1
+ stmia r0, {r4-r5}
+ add r0, r0, r2
+ bne 3b
+ ldmfd sp!, {r4-r5,pc}
.align 5
4:
- ldmia r1, {r4-r5, r12}
- add r1, r1, r2
- ADJ_ALIGN_DOUBLEWORD 3, r4, r5, r12
- pld [r1]
- subs r3, r3, #1
- stmia r0, {r4-r5}
- add r0, r0, r2
- bne 4b
- ldmfd sp!, {r4-r5,pc}
+ ldmia r1, {r4-r5, r12}
+ add r1, r1, r2
+ ADJ_ALIGN_DOUBLEWORD 3, r4, r5, r12
+ pld [r1]
+ subs r3, r3, #1
+ stmia r0, {r4-r5}
+ add r0, r0, r2
+ bne 4b
+ ldmfd sp!, {r4-r5,pc}
.endfunc
@ ----------------------------------------------------------------
@@ -210,118 +210,118 @@ function put_pixels8_arm, export=1
function put_pixels8_x2_arm, export=1
@ void func(uint8_t *block, const uint8_t *pixels, int line_size, int h)
@ block = word aligned, pixles = unaligned
- pld [r1]
- stmfd sp!, {r4-r10,lr} @ R14 is also called LR
- ldr r12, =0xfefefefe
- JMP_ALIGN r5, r1
+ pld [r1]
+ stmfd sp!, {r4-r10,lr} @ R14 is also called LR
+ ldr r12, =0xfefefefe
+ JMP_ALIGN r5, r1
1:
- ldmia r1, {r4-r5, r10}
- add r1, r1, r2
- ADJ_ALIGN_DOUBLEWORD_D 1, r6, r7, r4, r5, r10
- pld [r1]
- RND_AVG32 r8, r9, r4, r5, r6, r7, r12
- subs r3, r3, #1
- stmia r0, {r8-r9}
- add r0, r0, r2
- bne 1b
- ldmfd sp!, {r4-r10,pc}
+ ldmia r1, {r4-r5, r10}
+ add r1, r1, r2
+ ADJ_ALIGN_DOUBLEWORD_D 1, r6, r7, r4, r5, r10
+ pld [r1]
+ RND_AVG32 r8, r9, r4, r5, r6, r7, r12
+ subs r3, r3, #1
+ stmia r0, {r8-r9}
+ add r0, r0, r2
+ bne 1b
+ ldmfd sp!, {r4-r10,pc}
.align 5
2:
- ldmia r1, {r4-r5, r10}
- add r1, r1, r2
- ADJ_ALIGN_DOUBLEWORD_D 1, r6, r7, r4, r5, r10
- ADJ_ALIGN_DOUBLEWORD_D 2, r8, r9, r4, r5, r10
- pld [r1]
- RND_AVG32 r4, r5, r6, r7, r8, r9, r12
- subs r3, r3, #1
- stmia r0, {r4-r5}
- add r0, r0, r2
- bne 2b
- ldmfd sp!, {r4-r10,pc}
+ ldmia r1, {r4-r5, r10}
+ add r1, r1, r2
+ ADJ_ALIGN_DOUBLEWORD_D 1, r6, r7, r4, r5, r10
+ ADJ_ALIGN_DOUBLEWORD_D 2, r8, r9, r4, r5, r10
+ pld [r1]
+ RND_AVG32 r4, r5, r6, r7, r8, r9, r12
+ subs r3, r3, #1
+ stmia r0, {r4-r5}
+ add r0, r0, r2
+ bne 2b
+ ldmfd sp!, {r4-r10,pc}
.align 5
3:
- ldmia r1, {r4-r5, r10}
- add r1, r1, r2
- ADJ_ALIGN_DOUBLEWORD_D 2, r6, r7, r4, r5, r10
- ADJ_ALIGN_DOUBLEWORD_D 3, r8, r9, r4, r5, r10
- pld [r1]
- RND_AVG32 r4, r5, r6, r7, r8, r9, r12
- subs r3, r3, #1
- stmia r0, {r4-r5}
- add r0, r0, r2
- bne 3b
- ldmfd sp!, {r4-r10,pc}
+ ldmia r1, {r4-r5, r10}
+ add r1, r1, r2
+ ADJ_ALIGN_DOUBLEWORD_D 2, r6, r7, r4, r5, r10
+ ADJ_ALIGN_DOUBLEWORD_D 3, r8, r9, r4, r5, r10
+ pld [r1]
+ RND_AVG32 r4, r5, r6, r7, r8, r9, r12
+ subs r3, r3, #1
+ stmia r0, {r4-r5}
+ add r0, r0, r2
+ bne 3b
+ ldmfd sp!, {r4-r10,pc}
.align 5
4:
- ldmia r1, {r4-r5, r10}
- add r1, r1, r2
- ADJ_ALIGN_DOUBLEWORD_D 3, r6, r7, r4, r5, r10
- pld [r1]
- RND_AVG32 r8, r9, r6, r7, r5, r10, r12
- subs r3, r3, #1
- stmia r0, {r8-r9}
- add r0, r0, r2
- bne 4b
- ldmfd sp!, {r4-r10,pc} @@ update PC with LR content.
+ ldmia r1, {r4-r5, r10}
+ add r1, r1, r2
+ ADJ_ALIGN_DOUBLEWORD_D 3, r6, r7, r4, r5, r10
+ pld [r1]
+ RND_AVG32 r8, r9, r6, r7, r5, r10, r12
+ subs r3, r3, #1
+ stmia r0, {r8-r9}
+ add r0, r0, r2
+ bne 4b
+ ldmfd sp!, {r4-r10,pc} @@ update PC with LR content.
.endfunc
.align 5
function put_no_rnd_pixels8_x2_arm, export=1
@ void func(uint8_t *block, const uint8_t *pixels, int line_size, int h)
@ block = word aligned, pixles = unaligned
- pld [r1]
- stmfd sp!, {r4-r10,lr} @ R14 is also called LR
- ldr r12, =0xfefefefe
- JMP_ALIGN r5, r1
+ pld [r1]
+ stmfd sp!, {r4-r10,lr} @ R14 is also called LR
+ ldr r12, =0xfefefefe
+ JMP_ALIGN r5, r1
1:
- ldmia r1, {r4-r5, r10}
- add r1, r1, r2
- ADJ_ALIGN_DOUBLEWORD_D 1, r6, r7, r4, r5, r10
- pld [r1]
- NO_RND_AVG32 r8, r9, r4, r5, r6, r7, r12
- subs r3, r3, #1
- stmia r0, {r8-r9}
- add r0, r0, r2
- bne 1b
- ldmfd sp!, {r4-r10,pc}
+ ldmia r1, {r4-r5, r10}
+ add r1, r1, r2
+ ADJ_ALIGN_DOUBLEWORD_D 1, r6, r7, r4, r5, r10
+ pld [r1]
+ NO_RND_AVG32 r8, r9, r4, r5, r6, r7, r12
+ subs r3, r3, #1
+ stmia r0, {r8-r9}
+ add r0, r0, r2
+ bne 1b
+ ldmfd sp!, {r4-r10,pc}
.align 5
2:
- ldmia r1, {r4-r5, r10}
- add r1, r1, r2
- ADJ_ALIGN_DOUBLEWORD_D 1, r6, r7, r4, r5, r10
- ADJ_ALIGN_DOUBLEWORD_D 2, r8, r9, r4, r5, r10
- pld [r1]
- NO_RND_AVG32 r4, r5, r6, r7, r8, r9, r12
- subs r3, r3, #1
- stmia r0, {r4-r5}
- add r0, r0, r2
- bne 2b
- ldmfd sp!, {r4-r10,pc}
+ ldmia r1, {r4-r5, r10}
+ add r1, r1, r2
+ ADJ_ALIGN_DOUBLEWORD_D 1, r6, r7, r4, r5, r10
+ ADJ_ALIGN_DOUBLEWORD_D 2, r8, r9, r4, r5, r10
+ pld [r1]
+ NO_RND_AVG32 r4, r5, r6, r7, r8, r9, r12
+ subs r3, r3, #1
+ stmia r0, {r4-r5}
+ add r0, r0, r2
+ bne 2b
+ ldmfd sp!, {r4-r10,pc}
.align 5
3:
- ldmia r1, {r4-r5, r10}
- add r1, r1, r2
- ADJ_ALIGN_DOUBLEWORD_D 2, r6, r7, r4, r5, r10
- ADJ_ALIGN_DOUBLEWORD_D 3, r8, r9, r4, r5, r10
- pld [r1]
- NO_RND_AVG32 r4, r5, r6, r7, r8, r9, r12
- subs r3, r3, #1
- stmia r0, {r4-r5}
- add r0, r0, r2
- bne 3b
- ldmfd sp!, {r4-r10,pc}
+ ldmia r1, {r4-r5, r10}
+ add r1, r1, r2
+ ADJ_ALIGN_DOUBLEWORD_D 2, r6, r7, r4, r5, r10
+ ADJ_ALIGN_DOUBLEWORD_D 3, r8, r9, r4, r5, r10
+ pld [r1]
+ NO_RND_AVG32 r4, r5, r6, r7, r8, r9, r12
+ subs r3, r3, #1
+ stmia r0, {r4-r5}
+ add r0, r0, r2
+ bne 3b
+ ldmfd sp!, {r4-r10,pc}
.align 5
4:
- ldmia r1, {r4-r5, r10}
- add r1, r1, r2
- ADJ_ALIGN_DOUBLEWORD_D 3, r6, r7, r4, r5, r10
- pld [r1]
- NO_RND_AVG32 r8, r9, r6, r7, r5, r10, r12
- subs r3, r3, #1
- stmia r0, {r8-r9}
- add r0, r0, r2
- bne 4b
- ldmfd sp!, {r4-r10,pc} @@ update PC with LR content.
+ ldmia r1, {r4-r5, r10}
+ add r1, r1, r2
+ ADJ_ALIGN_DOUBLEWORD_D 3, r6, r7, r4, r5, r10
+ pld [r1]
+ NO_RND_AVG32 r8, r9, r6, r7, r5, r10, r12
+ subs r3, r3, #1
+ stmia r0, {r8-r9}
+ add r0, r0, r2
+ bne 4b
+ ldmfd sp!, {r4-r10,pc} @@ update PC with LR content.
.endfunc
@@ -330,196 +330,196 @@ function put_no_rnd_pixels8_x2_arm, export=1
function put_pixels8_y2_arm, export=1
@ void func(uint8_t *block, const uint8_t *pixels, int line_size, int h)
@ block = word aligned, pixles = unaligned
- pld [r1]
- stmfd sp!, {r4-r11,lr} @ R14 is also called LR
- mov r3, r3, lsr #1
- ldr r12, =0xfefefefe
- JMP_ALIGN r5, r1
+ pld [r1]
+ stmfd sp!, {r4-r11,lr} @ R14 is also called LR
+ mov r3, r3, lsr #1
+ ldr r12, =0xfefefefe
+ JMP_ALIGN r5, r1
1:
- ldmia r1, {r4-r5}
- add r1, r1, r2
-6: ldmia r1, {r6-r7}
- add r1, r1, r2
- pld [r1]
- RND_AVG32 r8, r9, r4, r5, r6, r7, r12
- ldmia r1, {r4-r5}
- add r1, r1, r2
- stmia r0, {r8-r9}
- add r0, r0, r2
- pld [r1]
- RND_AVG32 r8, r9, r6, r7, r4, r5, r12
- subs r3, r3, #1
- stmia r0, {r8-r9}
- add r0, r0, r2
- bne 6b
- ldmfd sp!, {r4-r11,pc}
+ ldmia r1, {r4-r5}
+ add r1, r1, r2
+6: ldmia r1, {r6-r7}
+ add r1, r1, r2
+ pld [r1]
+ RND_AVG32 r8, r9, r4, r5, r6, r7, r12
+ ldmia r1, {r4-r5}
+ add r1, r1, r2
+ stmia r0, {r8-r9}
+ add r0, r0, r2
+ pld [r1]
+ RND_AVG32 r8, r9, r6, r7, r4, r5, r12
+ subs r3, r3, #1
+ stmia r0, {r8-r9}
+ add r0, r0, r2
+ bne 6b
+ ldmfd sp!, {r4-r11,pc}
.align 5
2:
- ldmia r1, {r4-r6}
- add r1, r1, r2
- pld [r1]
- ADJ_ALIGN_DOUBLEWORD 1, r4, r5, r6
-6: ldmia r1, {r7-r9}
- add r1, r1, r2
- pld [r1]
- ADJ_ALIGN_DOUBLEWORD 1, r7, r8, r9
- RND_AVG32 r10, r11, r4, r5, r7, r8, r12
- stmia r0, {r10-r11}
- add r0, r0, r2
- ldmia r1, {r4-r6}
- add r1, r1, r2
- pld [r1]
- ADJ_ALIGN_DOUBLEWORD 1, r4, r5, r6
- subs r3, r3, #1
- RND_AVG32 r10, r11, r7, r8, r4, r5, r12
- stmia r0, {r10-r11}
- add r0, r0, r2
- bne 6b
- ldmfd sp!, {r4-r11,pc}
+ ldmia r1, {r4-r6}
+ add r1, r1, r2
+ pld [r1]
+ ADJ_ALIGN_DOUBLEWORD 1, r4, r5, r6
+6: ldmia r1, {r7-r9}
+ add r1, r1, r2
+ pld [r1]
+ ADJ_ALIGN_DOUBLEWORD 1, r7, r8, r9
+ RND_AVG32 r10, r11, r4, r5, r7, r8, r12
+ stmia r0, {r10-r11}
+ add r0, r0, r2
+ ldmia r1, {r4-r6}
+ add r1, r1, r2
+ pld [r1]
+ ADJ_ALIGN_DOUBLEWORD 1, r4, r5, r6
+ subs r3, r3, #1
+ RND_AVG32 r10, r11, r7, r8, r4, r5, r12
+ stmia r0, {r10-r11}
+ add r0, r0, r2
+ bne 6b
+ ldmfd sp!, {r4-r11,pc}
.align 5
3:
- ldmia r1, {r4-r6}
- add r1, r1, r2
- pld [r1]
- ADJ_ALIGN_DOUBLEWORD 2, r4, r5, r6
-6: ldmia r1, {r7-r9}
- add r1, r1, r2
- pld [r1]
- ADJ_ALIGN_DOUBLEWORD 2, r7, r8, r9
- RND_AVG32 r10, r11, r4, r5, r7, r8, r12
- stmia r0, {r10-r11}
- add r0, r0, r2
- ldmia r1, {r4-r6}
- add r1, r1, r2
- pld [r1]
- ADJ_ALIGN_DOUBLEWORD 2, r4, r5, r6
- subs r3, r3, #1
- RND_AVG32 r10, r11, r7, r8, r4, r5, r12
- stmia r0, {r10-r11}
- add r0, r0, r2
- bne 6b
- ldmfd sp!, {r4-r11,pc}
+ ldmia r1, {r4-r6}
+ add r1, r1, r2
+ pld [r1]
+ ADJ_ALIGN_DOUBLEWORD 2, r4, r5, r6
+6: ldmia r1, {r7-r9}
+ add r1, r1, r2
+ pld [r1]
+ ADJ_ALIGN_DOUBLEWORD 2, r7, r8, r9
+ RND_AVG32 r10, r11, r4, r5, r7, r8, r12
+ stmia r0, {r10-r11}
+ add r0, r0, r2
+ ldmia r1, {r4-r6}
+ add r1, r1, r2
+ pld [r1]
+ ADJ_ALIGN_DOUBLEWORD 2, r4, r5, r6
+ subs r3, r3, #1
+ RND_AVG32 r10, r11, r7, r8, r4, r5, r12
+ stmia r0, {r10-r11}
+ add r0, r0, r2
+ bne 6b
+ ldmfd sp!, {r4-r11,pc}
.align 5
4:
- ldmia r1, {r4-r6}
- add r1, r1, r2
- pld [r1]
- ADJ_ALIGN_DOUBLEWORD 3, r4, r5, r6
-6: ldmia r1, {r7-r9}
- add r1, r1, r2
- pld [r1]
- ADJ_ALIGN_DOUBLEWORD 3, r7, r8, r9
- RND_AVG32 r10, r11, r4, r5, r7, r8, r12
- stmia r0, {r10-r11}
- add r0, r0, r2
- ldmia r1, {r4-r6}
- add r1, r1, r2
- pld [r1]
- ADJ_ALIGN_DOUBLEWORD 3, r4, r5, r6
- subs r3, r3, #1
- RND_AVG32 r10, r11, r7, r8, r4, r5, r12
- stmia r0, {r10-r11}
- add r0, r0, r2
- bne 6b
- ldmfd sp!, {r4-r11,pc}
+ ldmia r1, {r4-r6}
+ add r1, r1, r2
+ pld [r1]
+ ADJ_ALIGN_DOUBLEWORD 3, r4, r5, r6
+6: ldmia r1, {r7-r9}
+ add r1, r1, r2
+ pld [r1]
+ ADJ_ALIGN_DOUBLEWORD 3, r7, r8, r9
+ RND_AVG32 r10, r11, r4, r5, r7, r8, r12
+ stmia r0, {r10-r11}
+ add r0, r0, r2
+ ldmia r1, {r4-r6}
+ add r1, r1, r2
+ pld [r1]
+ ADJ_ALIGN_DOUBLEWORD 3, r4, r5, r6
+ subs r3, r3, #1
+ RND_AVG32 r10, r11, r7, r8, r4, r5, r12
+ stmia r0, {r10-r11}
+ add r0, r0, r2
+ bne 6b
+ ldmfd sp!, {r4-r11,pc}
.endfunc
.align 5
function put_no_rnd_pixels8_y2_arm, export=1
@ void func(uint8_t *block, const uint8_t *pixels, int line_size, int h)
@ block = word aligned, pixles = unaligned
- pld [r1]
- stmfd sp!, {r4-r11,lr} @ R14 is also called LR
- mov r3, r3, lsr #1
- ldr r12, =0xfefefefe
- JMP_ALIGN r5, r1
+ pld [r1]
+ stmfd sp!, {r4-r11,lr} @ R14 is also called LR
+ mov r3, r3, lsr #1
+ ldr r12, =0xfefefefe
+ JMP_ALIGN r5, r1
1:
- ldmia r1, {r4-r5}
- add r1, r1, r2
-6: ldmia r1, {r6-r7}
- add r1, r1, r2
- pld [r1]
- NO_RND_AVG32 r8, r9, r4, r5, r6, r7, r12
- ldmia r1, {r4-r5}
- add r1, r1, r2
- stmia r0, {r8-r9}
- add r0, r0, r2
- pld [r1]
- NO_RND_AVG32 r8, r9, r6, r7, r4, r5, r12
- subs r3, r3, #1
- stmia r0, {r8-r9}
- add r0, r0, r2
- bne 6b
- ldmfd sp!, {r4-r11,pc}
+ ldmia r1, {r4-r5}
+ add r1, r1, r2
+6: ldmia r1, {r6-r7}
+ add r1, r1, r2
+ pld [r1]
+ NO_RND_AVG32 r8, r9, r4, r5, r6, r7, r12
+ ldmia r1, {r4-r5}
+ add r1, r1, r2
+ stmia r0, {r8-r9}
+ add r0, r0, r2
+ pld [r1]
+ NO_RND_AVG32 r8, r9, r6, r7, r4, r5, r12
+ subs r3, r3, #1
+ stmia r0, {r8-r9}
+ add r0, r0, r2
+ bne 6b
+ ldmfd sp!, {r4-r11,pc}
.align 5
2:
- ldmia r1, {r4-r6}
- add r1, r1, r2
- pld [r1]
- ADJ_ALIGN_DOUBLEWORD 1, r4, r5, r6
-6: ldmia r1, {r7-r9}
- add r1, r1, r2
- pld [r1]
- ADJ_ALIGN_DOUBLEWORD 1, r7, r8, r9
- NO_RND_AVG32 r10, r11, r4, r5, r7, r8, r12
- stmia r0, {r10-r11}
- add r0, r0, r2
- ldmia r1, {r4-r6}
- add r1, r1, r2
- pld [r1]
- ADJ_ALIGN_DOUBLEWORD 1, r4, r5, r6
- subs r3, r3, #1
- NO_RND_AVG32 r10, r11, r7, r8, r4, r5, r12
- stmia r0, {r10-r11}
- add r0, r0, r2
- bne 6b
- ldmfd sp!, {r4-r11,pc}
+ ldmia r1, {r4-r6}
+ add r1, r1, r2
+ pld [r1]
+ ADJ_ALIGN_DOUBLEWORD 1, r4, r5, r6
+6: ldmia r1, {r7-r9}
+ add r1, r1, r2
+ pld [r1]
+ ADJ_ALIGN_DOUBLEWORD 1, r7, r8, r9
+ NO_RND_AVG32 r10, r11, r4, r5, r7, r8, r12
+ stmia r0, {r10-r11}
+ add r0, r0, r2
+ ldmia r1, {r4-r6}
+ add r1, r1, r2
+ pld [r1]
+ ADJ_ALIGN_DOUBLEWORD 1, r4, r5, r6
+ subs r3, r3, #1
+ NO_RND_AVG32 r10, r11, r7, r8, r4, r5, r12
+ stmia r0, {r10-r11}
+ add r0, r0, r2
+ bne 6b
+ ldmfd sp!, {r4-r11,pc}
.align 5
3:
- ldmia r1, {r4-r6}
- add r1, r1, r2
- pld [r1]
- ADJ_ALIGN_DOUBLEWORD 2, r4, r5, r6
-6: ldmia r1, {r7-r9}
- add r1, r1, r2
- pld [r1]
- ADJ_ALIGN_DOUBLEWORD 2, r7, r8, r9
- NO_RND_AVG32 r10, r11, r4, r5, r7, r8, r12
- stmia r0, {r10-r11}
- add r0, r0, r2
- ldmia r1, {r4-r6}
- add r1, r1, r2
- pld [r1]
- ADJ_ALIGN_DOUBLEWORD 2, r4, r5, r6
- subs r3, r3, #1
- NO_RND_AVG32 r10, r11, r7, r8, r4, r5, r12
- stmia r0, {r10-r11}
- add r0, r0, r2
- bne 6b
- ldmfd sp!, {r4-r11,pc}
+ ldmia r1, {r4-r6}
+ add r1, r1, r2
+ pld [r1]
+ ADJ_ALIGN_DOUBLEWORD 2, r4, r5, r6
+6: ldmia r1, {r7-r9}
+ add r1, r1, r2
+ pld [r1]
+ ADJ_ALIGN_DOUBLEWORD 2, r7, r8, r9
+ NO_RND_AVG32 r10, r11, r4, r5, r7, r8, r12
+ stmia r0, {r10-r11}
+ add r0, r0, r2
+ ldmia r1, {r4-r6}
+ add r1, r1, r2
+ pld [r1]
+ ADJ_ALIGN_DOUBLEWORD 2, r4, r5, r6
+ subs r3, r3, #1
+ NO_RND_AVG32 r10, r11, r7, r8, r4, r5, r12
+ stmia r0, {r10-r11}
+ add r0, r0, r2
+ bne 6b
+ ldmfd sp!, {r4-r11,pc}
.align 5
4:
- ldmia r1, {r4-r6}
- add r1, r1, r2
- pld [r1]
- ADJ_ALIGN_DOUBLEWORD 3, r4, r5, r6
-6: ldmia r1, {r7-r9}
- add r1, r1, r2
- pld [r1]
- ADJ_ALIGN_DOUBLEWORD 3, r7, r8, r9
- NO_RND_AVG32 r10, r11, r4, r5, r7, r8, r12
- stmia r0, {r10-r11}
- add r0, r0, r2
- ldmia r1, {r4-r6}
- add r1, r1, r2
- pld [r1]
- ADJ_ALIGN_DOUBLEWORD 3, r4, r5, r6
- subs r3, r3, #1
- NO_RND_AVG32 r10, r11, r7, r8, r4, r5, r12
- stmia r0, {r10-r11}
- add r0, r0, r2
- bne 6b
- ldmfd sp!, {r4-r11,pc}
+ ldmia r1, {r4-r6}
+ add r1, r1, r2
+ pld [r1]
+ ADJ_ALIGN_DOUBLEWORD 3, r4, r5, r6
+6: ldmia r1, {r7-r9}
+ add r1, r1, r2
+ pld [r1]
+ ADJ_ALIGN_DOUBLEWORD 3, r7, r8, r9
+ NO_RND_AVG32 r10, r11, r4, r5, r7, r8, r12
+ stmia r0, {r10-r11}
+ add r0, r0, r2
+ ldmia r1, {r4-r6}
+ add r1, r1, r2
+ pld [r1]
+ ADJ_ALIGN_DOUBLEWORD 3, r4, r5, r6
+ subs r3, r3, #1
+ NO_RND_AVG32 r10, r11, r7, r8, r4, r5, r12
+ stmia r0, {r10-r11}
+ add r0, r0, r2
+ bne 6b
+ ldmfd sp!, {r4-r11,pc}
.endfunc
.ltorg
@@ -529,110 +529,110 @@ function put_no_rnd_pixels8_y2_arm, export=1
@ l1= (a & 0x03030303) + (b & 0x03030303) ?(+ 0x02020202)
@ h1= ((a & 0xFCFCFCFCUL) >> 2) + ((b & 0xFCFCFCFCUL) >> 2)
.if \align == 0
- ldmia r1, {r6-r8}
+ ldmia r1, {r6-r8}
.elseif \align == 3
- ldmia r1, {r5-r7}
+ ldmia r1, {r5-r7}
.else
- ldmia r1, {r8-r10}
+ ldmia r1, {r8-r10}
.endif
- add r1, r1, r2
- pld [r1]
+ add r1, r1, r2
+ pld [r1]
.if \align == 0
- ADJ_ALIGN_DOUBLEWORD_D 1, r4, r5, r6, r7, r8
+ ADJ_ALIGN_DOUBLEWORD_D 1, r4, r5, r6, r7, r8
.elseif \align == 1
- ADJ_ALIGN_DOUBLEWORD_D 1, r4, r5, r8, r9, r10
- ADJ_ALIGN_DOUBLEWORD_D 2, r6, r7, r8, r9, r10
+ ADJ_ALIGN_DOUBLEWORD_D 1, r4, r5, r8, r9, r10
+ ADJ_ALIGN_DOUBLEWORD_D 2, r6, r7, r8, r9, r10
.elseif \align == 2
- ADJ_ALIGN_DOUBLEWORD_D 2, r4, r5, r8, r9, r10
- ADJ_ALIGN_DOUBLEWORD_D 3, r6, r7, r8, r9, r10
+ ADJ_ALIGN_DOUBLEWORD_D 2, r4, r5, r8, r9, r10
+ ADJ_ALIGN_DOUBLEWORD_D 3, r6, r7, r8, r9, r10
.elseif \align == 3
- ADJ_ALIGN_DOUBLEWORD_D 3, r4, r5, r5, r6, r7
+ ADJ_ALIGN_DOUBLEWORD_D 3, r4, r5, r5, r6, r7
.endif
- ldr r14, =0x03030303
- tst r3, #1
- and r8, r4, r14
- and r9, r5, r14
- and r10, r6, r14
- and r11, r7, r14
- andeq r14, r14, r14, \rnd #1
- add r8, r8, r10
- add r9, r9, r11
- ldr r12, =0xfcfcfcfc >> 2
- addeq r8, r8, r14
- addeq r9, r9, r14
- and r4, r12, r4, lsr #2
- and r5, r12, r5, lsr #2
- and r6, r12, r6, lsr #2
- and r7, r12, r7, lsr #2
- add r10, r4, r6
- add r11, r5, r7
- subs r3, r3, #1
+ ldr r14, =0x03030303
+ tst r3, #1
+ and r8, r4, r14
+ and r9, r5, r14
+ and r10, r6, r14
+ and r11, r7, r14
+ andeq r14, r14, r14, \rnd #1
+ add r8, r8, r10
+ add r9, r9, r11
+ ldr r12, =0xfcfcfcfc >> 2
+ addeq r8, r8, r14
+ addeq r9, r9, r14
+ and r4, r12, r4, lsr #2
+ and r5, r12, r5, lsr #2
+ and r6, r12, r6, lsr #2
+ and r7, r12, r7, lsr #2
+ add r10, r4, r6
+ add r11, r5, r7
+ subs r3, r3, #1
.endm
.macro RND_XY2_EXPAND align, rnd
- RND_XY2_IT \align, \rnd
-6: stmfd sp!, {r8-r11}
- RND_XY2_IT \align, \rnd
- ldmfd sp!, {r4-r7}
- add r4, r4, r8
- add r5, r5, r9
- ldr r14, =0x0f0f0f0f
- add r6, r6, r10
- add r7, r7, r11
- and r4, r14, r4, lsr #2
- and r5, r14, r5, lsr #2
- add r4, r4, r6
- add r5, r5, r7
- stmia r0, {r4-r5}
- add r0, r0, r2
- bge 6b
- ldmfd sp!, {r4-r11,pc}
+ RND_XY2_IT \align, \rnd
+6: stmfd sp!, {r8-r11}
+ RND_XY2_IT \align, \rnd
+ ldmfd sp!, {r4-r7}
+ add r4, r4, r8
+ add r5, r5, r9
+ ldr r14, =0x0f0f0f0f
+ add r6, r6, r10
+ add r7, r7, r11
+ and r4, r14, r4, lsr #2
+ and r5, r14, r5, lsr #2
+ add r4, r4, r6
+ add r5, r5, r7
+ stmia r0, {r4-r5}
+ add r0, r0, r2
+ bge 6b
+ ldmfd sp!, {r4-r11,pc}
.endm
.align 5
function put_pixels8_xy2_arm, export=1
@ void func(uint8_t *block, const uint8_t *pixels, int line_size, int h)
@ block = word aligned, pixles = unaligned
- pld [r1]
- stmfd sp!, {r4-r11,lr} @ R14 is also called LR
- JMP_ALIGN r5, r1
+ pld [r1]
+ stmfd sp!, {r4-r11,lr} @ R14 is also called LR
+ JMP_ALIGN r5, r1
1:
- RND_XY2_EXPAND 0, lsl
+ RND_XY2_EXPAND 0, lsl
.align 5
2:
- RND_XY2_EXPAND 1, lsl
+ RND_XY2_EXPAND 1, lsl
.align 5
3:
- RND_XY2_EXPAND 2, lsl
+ RND_XY2_EXPAND 2, lsl
.align 5
4:
- RND_XY2_EXPAND 3, lsl
+ RND_XY2_EXPAND 3, lsl
.endfunc
.align 5
function put_no_rnd_pixels8_xy2_arm, export=1
@ void func(uint8_t *block, const uint8_t *pixels, int line_size, int h)
@ block = word aligned, pixles = unaligned
- pld [r1]
- stmfd sp!, {r4-r11,lr} @ R14 is also called LR
- JMP_ALIGN r5, r1
+ pld [r1]
+ stmfd sp!, {r4-r11,lr} @ R14 is also called LR
+ JMP_ALIGN r5, r1
1:
- RND_XY2_EXPAND 0, lsr
+ RND_XY2_EXPAND 0, lsr
.align 5
2:
- RND_XY2_EXPAND 1, lsr
+ RND_XY2_EXPAND 1, lsr
.align 5
3:
- RND_XY2_EXPAND 2, lsr
+ RND_XY2_EXPAND 2, lsr
.align 5
4:
- RND_XY2_EXPAND 3, lsr
+ RND_XY2_EXPAND 3, lsr
.endfunc
.align 5
@@ -657,23 +657,23 @@ function ff_add_pixels_clamped_ARM, export=1
movne r8, r7, lsr #24
mov r9, r6
ldrsh r5, [r0, #4] /* moved form [A] */
- orr r9, r9, r8, lsl #8
+ orr r9, r9, r8, lsl #8
/* block[2] and block[3] */
/* [A] */
ldrsh r7, [r0, #6]
and r6, r4, #0xFF0000
and r8, r4, #0xFF000000
- add r6, r5, r6, lsr #16
- add r8, r7, r8, lsr #24
+ add r6, r5, r6, lsr #16
+ add r8, r7, r8, lsr #24
mvn r5, r5
mvn r7, r7
tst r6, #0x100
movne r6, r5, lsr #24
tst r8, #0x100
movne r8, r7, lsr #24
- orr r9, r9, r6, lsl #16
+ orr r9, r9, r6, lsl #16
ldr r4, [r1, #4] /* moved form [B] */
- orr r9, r9, r8, lsl #24
+ orr r9, r9, r8, lsl #24
/* store dest */
ldrsh r5, [r0, #8] /* moved form [C] */
str r9, [r1]
@@ -686,7 +686,7 @@ function ff_add_pixels_clamped_ARM, export=1
and r6, r4, #0xFF
and r8, r4, #0xFF00
add r6, r5, r6
- add r8, r7, r8, lsr #8
+ add r8, r7, r8, lsr #8
mvn r5, r5
mvn r7, r7
tst r6, #0x100
@@ -695,23 +695,23 @@ function ff_add_pixels_clamped_ARM, export=1
movne r8, r7, lsr #24
mov r9, r6
ldrsh r5, [r0, #12] /* moved from [D] */
- orr r9, r9, r8, lsl #8
+ orr r9, r9, r8, lsl #8
/* block[6] and block[7] */
/* [D] */
ldrsh r7, [r0, #14]
and r6, r4, #0xFF0000
and r8, r4, #0xFF000000
- add r6, r5, r6, lsr #16
- add r8, r7, r8, lsr #24
+ add r6, r5, r6, lsr #16
+ add r8, r7, r8, lsr #24
mvn r5, r5
mvn r7, r7
tst r6, #0x100
movne r6, r5, lsr #24
tst r8, #0x100
movne r8, r7, lsr #24
- orr r9, r9, r6, lsl #16
+ orr r9, r9, r6, lsl #16
add r0, r0, #16 /* moved from [E] */
- orr r9, r9, r8, lsl #24
+ orr r9, r9, r8, lsl #24
subs r10, r10, #1 /* moved from [F] */
/* store dest */
str r9, [r1, #4]