diff options
author | Diego Biurrun <diego@biurrun.de> | 2014-01-15 18:27:47 +0100 |
---|---|---|
committer | Diego Biurrun <diego@biurrun.de> | 2014-03-20 05:03:22 -0700 |
commit | 022184a646896c6b27c6cca387039b030685266e (patch) | |
tree | 3ba909e9839ab91ad62fbea5fe8e966f319ccd55 /libavcodec/ppc/gmc_altivec.c | |
parent | 30f3f959879eee7890973e8cc9ce076450ced111 (diff) | |
download | ffmpeg-022184a646896c6b27c6cca387039b030685266e.tar.gz |
ppc: dsputil: more K&R formatting cosmetics
Diffstat (limited to 'libavcodec/ppc/gmc_altivec.c')
-rw-r--r-- | libavcodec/ppc/gmc_altivec.c | 83 |
1 files changed, 42 insertions, 41 deletions
diff --git a/libavcodec/ppc/gmc_altivec.c b/libavcodec/ppc/gmc_altivec.c index e156e818bb..a8ce8b302a 100644 --- a/libavcodec/ppc/gmc_altivec.c +++ b/libavcodec/ppc/gmc_altivec.c @@ -27,32 +27,36 @@ /* AltiVec-enhanced gmc1. ATM this code assumes stride is a multiple of 8 * to preserve proper dst alignment. */ -void ff_gmc1_altivec(uint8_t *dst /* align 8 */, uint8_t *src /* align1 */, int stride, int h, int x16, int y16, int rounder) +void ff_gmc1_altivec(uint8_t *dst /* align 8 */, uint8_t *src /* align1 */, + int stride, int h, int x16, int y16, int rounder) { const DECLARE_ALIGNED(16, unsigned short, rounder_a) = rounder; - const DECLARE_ALIGNED(16, unsigned short, ABCD)[8] = - { - (16-x16)*(16-y16), /* A */ - ( x16)*(16-y16), /* B */ - (16-x16)*( y16), /* C */ - ( x16)*( y16), /* D */ - 0, 0, 0, 0 /* padding */ - }; - register const vector unsigned char vczero = (const vector unsigned char)vec_splat_u8(0); - register const vector unsigned short vcsr8 = (const vector unsigned short)vec_splat_u16(8); - register vector unsigned char dstv, dstv2, src_0, src_1, srcvA, srcvB, srcvC, srcvD; - register vector unsigned short Av, Bv, Cv, Dv, rounderV, tempA, tempB, tempC, tempD; + const DECLARE_ALIGNED(16, unsigned short, ABCD)[8] = { + (16 - x16) * (16 - y16), /* A */ + (x16) * (16 - y16), /* B */ + (16 - x16) * (y16), /* C */ + (x16) * (y16), /* D */ + 0, 0, 0, 0 /* padding */ + }; + register const vector unsigned char vczero = + (const vector unsigned char) vec_splat_u8(0); + register const vector unsigned short vcsr8 = + (const vector unsigned short) vec_splat_u16(8); + register vector unsigned char dstv, dstv2, src_0, src_1, + srcvA, srcvB, srcvC, srcvD; + register vector unsigned short Av, Bv, Cv, Dv, rounderV, + tempA, tempB, tempC, tempD; int i; - unsigned long dst_odd = (unsigned long)dst & 0x0000000F; - unsigned long src_really_odd = (unsigned long)src & 0x0000000F; + unsigned long dst_odd = (unsigned long) dst & 0x0000000F; + unsigned long src_really_odd = (unsigned long) src & 0x0000000F; - tempA = vec_ld(0, (const unsigned short*)ABCD); - Av = vec_splat(tempA, 0); - Bv = vec_splat(tempA, 1); - Cv = vec_splat(tempA, 2); - Dv = vec_splat(tempA, 3); + tempA = vec_ld(0, (const unsigned short *) ABCD); + Av = vec_splat(tempA, 0); + Bv = vec_splat(tempA, 1); + Cv = vec_splat(tempA, 2); + Dv = vec_splat(tempA, 3); - rounderV = vec_splat((vec_u16)vec_lde(0, &rounder_a), 0); + rounderV = vec_splat((vec_u16) vec_lde(0, &rounder_a), 0); /* we'll be able to pick-up our 9 char elements at src from those * 32 bytes we load the first batch here, as inside the loop we can @@ -61,36 +65,34 @@ void ff_gmc1_altivec(uint8_t *dst /* align 8 */, uint8_t *src /* align1 */, int src_1 = vec_ld(16, src); srcvA = vec_perm(src_0, src_1, vec_lvsl(0, src)); - if (src_really_odd != 0x0000000F) { + if (src_really_odd != 0x0000000F) /* If src & 0xF == 0xF, then (src + 1) is properly aligned * on the second vector. */ srcvB = vec_perm(src_0, src_1, vec_lvsl(1, src)); - } else { + else srcvB = src_1; - } srcvA = vec_mergeh(vczero, srcvA); srcvB = vec_mergeh(vczero, srcvB); - for(i=0; i<h; i++) { - dst_odd = (unsigned long)dst & 0x0000000F; - src_really_odd = (((unsigned long)src) + stride) & 0x0000000F; + for (i = 0; i < h; i++) { + dst_odd = (unsigned long) dst & 0x0000000F; + src_really_odd = (((unsigned long) src) + stride) & 0x0000000F; dstv = vec_ld(0, dst); /* We'll be able to pick-up our 9 char elements at src + stride from * those 32 bytes then reuse the resulting 2 vectors srvcC and srcvD * as the next srcvA and srcvB. */ - src_0 = vec_ld(stride + 0, src); + src_0 = vec_ld(stride + 0, src); src_1 = vec_ld(stride + 16, src); srcvC = vec_perm(src_0, src_1, vec_lvsl(stride + 0, src)); - if (src_really_odd != 0x0000000F) { + if (src_really_odd != 0x0000000F) /* If src & 0xF == 0xF, then (src + 1) is properly aligned * on the second vector. */ srcvD = vec_perm(src_0, src_1, vec_lvsl(stride + 1, src)); - } else { + else srcvD = src_1; - } srcvC = vec_mergeh(vczero, srcvC); srcvD = vec_mergeh(vczero, srcvD); @@ -98,23 +100,22 @@ void ff_gmc1_altivec(uint8_t *dst /* align 8 */, uint8_t *src /* align1 */, int /* OK, now we (finally) do the math :-) * Those four instructions replace 32 int muls & 32 int adds. * Isn't AltiVec nice? */ - tempA = vec_mladd((vector unsigned short)srcvA, Av, rounderV); - tempB = vec_mladd((vector unsigned short)srcvB, Bv, tempA); - tempC = vec_mladd((vector unsigned short)srcvC, Cv, tempB); - tempD = vec_mladd((vector unsigned short)srcvD, Dv, tempC); + tempA = vec_mladd((vector unsigned short) srcvA, Av, rounderV); + tempB = vec_mladd((vector unsigned short) srcvB, Bv, tempA); + tempC = vec_mladd((vector unsigned short) srcvC, Cv, tempB); + tempD = vec_mladd((vector unsigned short) srcvD, Dv, tempC); srcvA = srcvC; srcvB = srcvD; tempD = vec_sr(tempD, vcsr8); - dstv2 = vec_pack(tempD, (vector unsigned short)vczero); + dstv2 = vec_pack(tempD, (vector unsigned short) vczero); - if (dst_odd) { - dstv2 = vec_perm(dstv, dstv2, vcprm(0,1,s0,s1)); - } else { - dstv2 = vec_perm(dstv, dstv2, vcprm(s0,s1,2,3)); - } + if (dst_odd) + dstv2 = vec_perm(dstv, dstv2, vcprm(0, 1, s0, s1)); + else + dstv2 = vec_perm(dstv, dstv2, vcprm(s0, s1, 2, 3)); vec_st(dstv2, 0, dst); |