aboutsummaryrefslogtreecommitdiffstats
path: root/libavcodec/ppc/idct_altivec.c
diff options
context:
space:
mode:
authorDiego Biurrun <diego@biurrun.de>2014-01-15 18:27:47 +0100
committerDiego Biurrun <diego@biurrun.de>2014-03-20 05:03:22 -0700
commit022184a646896c6b27c6cca387039b030685266e (patch)
tree3ba909e9839ab91ad62fbea5fe8e966f319ccd55 /libavcodec/ppc/idct_altivec.c
parent30f3f959879eee7890973e8cc9ce076450ced111 (diff)
downloadffmpeg-022184a646896c6b27c6cca387039b030685266e.tar.gz
ppc: dsputil: more K&R formatting cosmetics
Diffstat (limited to 'libavcodec/ppc/idct_altivec.c')
-rw-r--r--libavcodec/ppc/idct_altivec.c295
1 files changed, 154 insertions, 141 deletions
diff --git a/libavcodec/ppc/idct_altivec.c b/libavcodec/ppc/idct_altivec.c
index f6027897b7..9787e31c85 100644
--- a/libavcodec/ppc/idct_altivec.c
+++ b/libavcodec/ppc/idct_altivec.c
@@ -36,147 +36,153 @@
#if HAVE_ALTIVEC_H
#include <altivec.h>
#endif
+
#include "libavutil/ppc/types_altivec.h"
#include "dsputil_altivec.h"
#define IDCT_HALF \
/* 1st stage */ \
- t1 = vec_mradds (a1, vx7, vx1 ); \
- t8 = vec_mradds (a1, vx1, vec_subs (zero, vx7)); \
- t7 = vec_mradds (a2, vx5, vx3); \
- t3 = vec_mradds (ma2, vx3, vx5); \
+ t1 = vec_mradds(a1, vx7, vx1); \
+ t8 = vec_mradds(a1, vx1, vec_subs(zero, vx7)); \
+ t7 = vec_mradds(a2, vx5, vx3); \
+ t3 = vec_mradds(ma2, vx3, vx5); \
\
/* 2nd stage */ \
- t5 = vec_adds (vx0, vx4); \
- t0 = vec_subs (vx0, vx4); \
- t2 = vec_mradds (a0, vx6, vx2); \
- t4 = vec_mradds (a0, vx2, vec_subs (zero, vx6)); \
- t6 = vec_adds (t8, t3); \
- t3 = vec_subs (t8, t3); \
- t8 = vec_subs (t1, t7); \
- t1 = vec_adds (t1, t7); \
+ t5 = vec_adds(vx0, vx4); \
+ t0 = vec_subs(vx0, vx4); \
+ t2 = vec_mradds(a0, vx6, vx2); \
+ t4 = vec_mradds(a0, vx2, vec_subs(zero, vx6)); \
+ t6 = vec_adds(t8, t3); \
+ t3 = vec_subs(t8, t3); \
+ t8 = vec_subs(t1, t7); \
+ t1 = vec_adds(t1, t7); \
\
/* 3rd stage */ \
- t7 = vec_adds (t5, t2); \
- t2 = vec_subs (t5, t2); \
- t5 = vec_adds (t0, t4); \
- t0 = vec_subs (t0, t4); \
- t4 = vec_subs (t8, t3); \
- t3 = vec_adds (t8, t3); \
+ t7 = vec_adds(t5, t2); \
+ t2 = vec_subs(t5, t2); \
+ t5 = vec_adds(t0, t4); \
+ t0 = vec_subs(t0, t4); \
+ t4 = vec_subs(t8, t3); \
+ t3 = vec_adds(t8, t3); \
\
/* 4th stage */ \
- vy0 = vec_adds (t7, t1); \
- vy7 = vec_subs (t7, t1); \
- vy1 = vec_mradds (c4, t3, t5); \
- vy6 = vec_mradds (mc4, t3, t5); \
- vy2 = vec_mradds (c4, t4, t0); \
- vy5 = vec_mradds (mc4, t4, t0); \
- vy3 = vec_adds (t2, t6); \
- vy4 = vec_subs (t2, t6);
-
-
-#define IDCT \
- vec_s16 vx0, vx1, vx2, vx3, vx4, vx5, vx6, vx7; \
- vec_s16 vy0, vy1, vy2, vy3, vy4, vy5, vy6, vy7; \
- vec_s16 a0, a1, a2, ma2, c4, mc4, zero, bias; \
- vec_s16 t0, t1, t2, t3, t4, t5, t6, t7, t8; \
- vec_u16 shift; \
- \
- c4 = vec_splat (constants[0], 0); \
- a0 = vec_splat (constants[0], 1); \
- a1 = vec_splat (constants[0], 2); \
- a2 = vec_splat (constants[0], 3); \
- mc4 = vec_splat (constants[0], 4); \
- ma2 = vec_splat (constants[0], 5); \
- bias = (vec_s16)vec_splat ((vec_s32)constants[0], 3); \
- \
- zero = vec_splat_s16 (0); \
- shift = vec_splat_u16 (4); \
- \
- vx0 = vec_mradds (vec_sl (block[0], shift), constants[1], zero); \
- vx1 = vec_mradds (vec_sl (block[1], shift), constants[2], zero); \
- vx2 = vec_mradds (vec_sl (block[2], shift), constants[3], zero); \
- vx3 = vec_mradds (vec_sl (block[3], shift), constants[4], zero); \
- vx4 = vec_mradds (vec_sl (block[4], shift), constants[1], zero); \
- vx5 = vec_mradds (vec_sl (block[5], shift), constants[4], zero); \
- vx6 = vec_mradds (vec_sl (block[6], shift), constants[3], zero); \
- vx7 = vec_mradds (vec_sl (block[7], shift), constants[2], zero); \
- \
- IDCT_HALF \
- \
- vx0 = vec_mergeh (vy0, vy4); \
- vx1 = vec_mergel (vy0, vy4); \
- vx2 = vec_mergeh (vy1, vy5); \
- vx3 = vec_mergel (vy1, vy5); \
- vx4 = vec_mergeh (vy2, vy6); \
- vx5 = vec_mergel (vy2, vy6); \
- vx6 = vec_mergeh (vy3, vy7); \
- vx7 = vec_mergel (vy3, vy7); \
- \
- vy0 = vec_mergeh (vx0, vx4); \
- vy1 = vec_mergel (vx0, vx4); \
- vy2 = vec_mergeh (vx1, vx5); \
- vy3 = vec_mergel (vx1, vx5); \
- vy4 = vec_mergeh (vx2, vx6); \
- vy5 = vec_mergel (vx2, vx6); \
- vy6 = vec_mergeh (vx3, vx7); \
- vy7 = vec_mergel (vx3, vx7); \
- \
- vx0 = vec_adds (vec_mergeh (vy0, vy4), bias); \
- vx1 = vec_mergel (vy0, vy4); \
- vx2 = vec_mergeh (vy1, vy5); \
- vx3 = vec_mergel (vy1, vy5); \
- vx4 = vec_mergeh (vy2, vy6); \
- vx5 = vec_mergel (vy2, vy6); \
- vx6 = vec_mergeh (vy3, vy7); \
- vx7 = vec_mergel (vy3, vy7); \
- \
- IDCT_HALF \
- \
- shift = vec_splat_u16 (6); \
- vx0 = vec_sra (vy0, shift); \
- vx1 = vec_sra (vy1, shift); \
- vx2 = vec_sra (vy2, shift); \
- vx3 = vec_sra (vy3, shift); \
- vx4 = vec_sra (vy4, shift); \
- vx5 = vec_sra (vy5, shift); \
- vx6 = vec_sra (vy6, shift); \
- vx7 = vec_sra (vy7, shift);
-
+ vy0 = vec_adds(t7, t1); \
+ vy7 = vec_subs(t7, t1); \
+ vy1 = vec_mradds(c4, t3, t5); \
+ vy6 = vec_mradds(mc4, t3, t5); \
+ vy2 = vec_mradds(c4, t4, t0); \
+ vy5 = vec_mradds(mc4, t4, t0); \
+ vy3 = vec_adds(t2, t6); \
+ vy4 = vec_subs(t2, t6);
+
+#define IDCT \
+ vec_s16 vx0, vx1, vx2, vx3, vx4, vx5, vx6, vx7; \
+ vec_s16 vy0, vy1, vy2, vy3, vy4, vy5, vy6, vy7; \
+ vec_s16 a0, a1, a2, ma2, c4, mc4, zero, bias; \
+ vec_s16 t0, t1, t2, t3, t4, t5, t6, t7, t8; \
+ vec_u16 shift; \
+ \
+ c4 = vec_splat(constants[0], 0); \
+ a0 = vec_splat(constants[0], 1); \
+ a1 = vec_splat(constants[0], 2); \
+ a2 = vec_splat(constants[0], 3); \
+ mc4 = vec_splat(constants[0], 4); \
+ ma2 = vec_splat(constants[0], 5); \
+ bias = (vec_s16) vec_splat((vec_s32) constants[0], 3); \
+ \
+ zero = vec_splat_s16(0); \
+ shift = vec_splat_u16(4); \
+ \
+ vx0 = vec_mradds(vec_sl(block[0], shift), constants[1], zero); \
+ vx1 = vec_mradds(vec_sl(block[1], shift), constants[2], zero); \
+ vx2 = vec_mradds(vec_sl(block[2], shift), constants[3], zero); \
+ vx3 = vec_mradds(vec_sl(block[3], shift), constants[4], zero); \
+ vx4 = vec_mradds(vec_sl(block[4], shift), constants[1], zero); \
+ vx5 = vec_mradds(vec_sl(block[5], shift), constants[4], zero); \
+ vx6 = vec_mradds(vec_sl(block[6], shift), constants[3], zero); \
+ vx7 = vec_mradds(vec_sl(block[7], shift), constants[2], zero); \
+ \
+ IDCT_HALF \
+ \
+ vx0 = vec_mergeh(vy0, vy4); \
+ vx1 = vec_mergel(vy0, vy4); \
+ vx2 = vec_mergeh(vy1, vy5); \
+ vx3 = vec_mergel(vy1, vy5); \
+ vx4 = vec_mergeh(vy2, vy6); \
+ vx5 = vec_mergel(vy2, vy6); \
+ vx6 = vec_mergeh(vy3, vy7); \
+ vx7 = vec_mergel(vy3, vy7); \
+ \
+ vy0 = vec_mergeh(vx0, vx4); \
+ vy1 = vec_mergel(vx0, vx4); \
+ vy2 = vec_mergeh(vx1, vx5); \
+ vy3 = vec_mergel(vx1, vx5); \
+ vy4 = vec_mergeh(vx2, vx6); \
+ vy5 = vec_mergel(vx2, vx6); \
+ vy6 = vec_mergeh(vx3, vx7); \
+ vy7 = vec_mergel(vx3, vx7); \
+ \
+ vx0 = vec_adds(vec_mergeh(vy0, vy4), bias); \
+ vx1 = vec_mergel(vy0, vy4); \
+ vx2 = vec_mergeh(vy1, vy5); \
+ vx3 = vec_mergel(vy1, vy5); \
+ vx4 = vec_mergeh(vy2, vy6); \
+ vx5 = vec_mergel(vy2, vy6); \
+ vx6 = vec_mergeh(vy3, vy7); \
+ vx7 = vec_mergel(vy3, vy7); \
+ \
+ IDCT_HALF \
+ \
+ shift = vec_splat_u16(6); \
+ vx0 = vec_sra(vy0, shift); \
+ vx1 = vec_sra(vy1, shift); \
+ vx2 = vec_sra(vy2, shift); \
+ vx3 = vec_sra(vy3, shift); \
+ vx4 = vec_sra(vy4, shift); \
+ vx5 = vec_sra(vy5, shift); \
+ vx6 = vec_sra(vy6, shift); \
+ vx7 = vec_sra(vy7, shift);
static const vec_s16 constants[5] = {
- {23170, 13573, 6518, 21895, -23170, -21895, 32, 31},
- {16384, 22725, 21407, 19266, 16384, 19266, 21407, 22725},
- {22725, 31521, 29692, 26722, 22725, 26722, 29692, 31521},
- {21407, 29692, 27969, 25172, 21407, 25172, 27969, 29692},
- {19266, 26722, 25172, 22654, 19266, 22654, 25172, 26722}
+ { 23170, 13573, 6518, 21895, -23170, -21895, 32, 31 },
+ { 16384, 22725, 21407, 19266, 16384, 19266, 21407, 22725 },
+ { 22725, 31521, 29692, 26722, 22725, 26722, 29692, 31521 },
+ { 21407, 29692, 27969, 25172, 21407, 25172, 27969, 29692 },
+ { 19266, 26722, 25172, 22654, 19266, 22654, 25172, 26722 }
};
-void ff_idct_put_altivec(uint8_t* dest, int stride, int16_t *blk)
+void ff_idct_put_altivec(uint8_t *dest, int stride, int16_t *blk)
{
- vec_s16 *block = (vec_s16*)blk;
+ vec_s16 *block = (vec_s16 *) blk;
vec_u8 tmp;
IDCT
-#define COPY(dest,src) \
- tmp = vec_packsu (src, src); \
- vec_ste ((vec_u32)tmp, 0, (unsigned int *)dest); \
- vec_ste ((vec_u32)tmp, 4, (unsigned int *)dest);
-
- COPY (dest, vx0) dest += stride;
- COPY (dest, vx1) dest += stride;
- COPY (dest, vx2) dest += stride;
- COPY (dest, vx3) dest += stride;
- COPY (dest, vx4) dest += stride;
- COPY (dest, vx5) dest += stride;
- COPY (dest, vx6) dest += stride;
- COPY (dest, vx7)
+#define COPY(dest, src) \
+ tmp = vec_packsu(src, src); \
+ vec_ste((vec_u32) tmp, 0, (unsigned int *) dest); \
+ vec_ste((vec_u32) tmp, 4, (unsigned int *) dest);
+
+ COPY(dest, vx0)
+ dest += stride;
+ COPY(dest, vx1)
+ dest += stride;
+ COPY(dest, vx2)
+ dest += stride;
+ COPY(dest, vx3)
+ dest += stride;
+ COPY(dest, vx4)
+ dest += stride;
+ COPY(dest, vx5)
+ dest += stride;
+ COPY(dest, vx6)
+ dest += stride;
+ COPY(dest, vx7)
}
-void ff_idct_add_altivec(uint8_t* dest, int stride, int16_t *blk)
+void ff_idct_add_altivec(uint8_t *dest, int stride, int16_t *blk)
{
- vec_s16 *block = (vec_s16*)blk;
+ vec_s16 *block = (vec_s16 *) blk;
vec_u8 tmp;
vec_s16 tmp2, tmp3;
vec_u8 perm0;
@@ -185,27 +191,34 @@ void ff_idct_add_altivec(uint8_t* dest, int stride, int16_t *blk)
IDCT
- p0 = vec_lvsl (0, dest);
- p1 = vec_lvsl (stride, dest);
- p = vec_splat_u8 (-1);
- perm0 = vec_mergeh (p, p0);
- perm1 = vec_mergeh (p, p1);
-
-#define ADD(dest,src,perm) \
- /* *(uint64_t *)&tmp = *(uint64_t *)dest; */ \
- tmp = vec_ld (0, dest); \
- tmp2 = (vec_s16)vec_perm (tmp, (vec_u8)zero, perm); \
- tmp3 = vec_adds (tmp2, src); \
- tmp = vec_packsu (tmp3, tmp3); \
- vec_ste ((vec_u32)tmp, 0, (unsigned int *)dest); \
- vec_ste ((vec_u32)tmp, 4, (unsigned int *)dest);
-
- ADD (dest, vx0, perm0) dest += stride;
- ADD (dest, vx1, perm1) dest += stride;
- ADD (dest, vx2, perm0) dest += stride;
- ADD (dest, vx3, perm1) dest += stride;
- ADD (dest, vx4, perm0) dest += stride;
- ADD (dest, vx5, perm1) dest += stride;
- ADD (dest, vx6, perm0) dest += stride;
- ADD (dest, vx7, perm1)
+ p0 = vec_lvsl(0, dest);
+ p1 = vec_lvsl(stride, dest);
+ p = vec_splat_u8(-1);
+ perm0 = vec_mergeh(p, p0);
+ perm1 = vec_mergeh(p, p1);
+
+#define ADD(dest, src, perm) \
+ /* *(uint64_t *) &tmp = *(uint64_t *) dest; */ \
+ tmp = vec_ld(0, dest); \
+ tmp2 = (vec_s16) vec_perm(tmp, (vec_u8) zero, perm); \
+ tmp3 = vec_adds(tmp2, src); \
+ tmp = vec_packsu(tmp3, tmp3); \
+ vec_ste((vec_u32) tmp, 0, (unsigned int *) dest); \
+ vec_ste((vec_u32) tmp, 4, (unsigned int *) dest);
+
+ ADD(dest, vx0, perm0)
+ dest += stride;
+ ADD(dest, vx1, perm1)
+ dest += stride;
+ ADD(dest, vx2, perm0)
+ dest += stride;
+ ADD(dest, vx3, perm1)
+ dest += stride;
+ ADD(dest, vx4, perm0)
+ dest += stride;
+ ADD(dest, vx5, perm1)
+ dest += stride;
+ ADD(dest, vx6, perm0)
+ dest += stride;
+ ADD(dest, vx7, perm1)
}