diff options
author | Diego Biurrun <diego@biurrun.de> | 2005-12-22 01:10:11 +0000 |
---|---|---|
committer | Diego Biurrun <diego@biurrun.de> | 2005-12-22 01:10:11 +0000 |
commit | bb270c0896b39e1ae9277355e3c120ed3feb64a3 (patch) | |
tree | fc2fc2b1216d19acb3879abb6ea5a3b400f43fe4 /libavcodec/ppc | |
parent | 50827fcf44f34521df4708cdb633809b56fb9df3 (diff) | |
download | ffmpeg-bb270c0896b39e1ae9277355e3c120ed3feb64a3.tar.gz |
COSMETICS: tabs --> spaces, some prettyprinting
Originally committed as revision 4764 to svn://svn.ffmpeg.org/ffmpeg/trunk
Diffstat (limited to 'libavcodec/ppc')
-rw-r--r-- | libavcodec/ppc/dsputil_altivec.c | 128 | ||||
-rw-r--r-- | libavcodec/ppc/dsputil_h264_altivec.c | 48 | ||||
-rw-r--r-- | libavcodec/ppc/dsputil_ppc.c | 56 | ||||
-rw-r--r-- | libavcodec/ppc/dsputil_ppc.h | 12 | ||||
-rw-r--r-- | libavcodec/ppc/fft_altivec.c | 8 | ||||
-rw-r--r-- | libavcodec/ppc/gcc_fixes.h | 18 | ||||
-rw-r--r-- | libavcodec/ppc/idct_altivec.c | 246 | ||||
-rw-r--r-- | libavcodec/ppc/mpegvideo_altivec.c | 12 |
8 files changed, 264 insertions, 264 deletions
diff --git a/libavcodec/ppc/dsputil_altivec.c b/libavcodec/ppc/dsputil_altivec.c index 20ee382f29..da9b3dd15a 100644 --- a/libavcodec/ppc/dsputil_altivec.c +++ b/libavcodec/ppc/dsputil_altivec.c @@ -67,7 +67,7 @@ int sad16_x2_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h /* Read unaligned pixels into our vectors. The vectors are as follows: pix1v: pix1[0]-pix1[15] - pix2v: pix2[0]-pix2[15] pix2iv: pix2[1]-pix2[16] + pix2v: pix2[0]-pix2[15] pix2iv: pix2[1]-pix2[16] */ tv = (vector unsigned char *) pix1; pix1v = vec_perm(tv[0], tv[1], vec_lvsl(0, pix1)); @@ -184,7 +184,7 @@ int sad16_xy2_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int fact to avoid a potentially expensive unaligned read, as well as some splitting, and vector addition each time around the loop. Read unaligned pixels into our vectors. The vectors are as follows: - pix2v: pix2[0]-pix2[15] pix2iv: pix2[1]-pix2[16] + pix2v: pix2[0]-pix2[15] pix2iv: pix2[1]-pix2[16] Split the pixel vectors into shorts */ tv = (vector unsigned char *) &pix2[0]; @@ -204,7 +204,7 @@ int sad16_xy2_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int /* Read unaligned pixels into our vectors. The vectors are as follows: pix1v: pix1[0]-pix1[15] - pix3v: pix3[0]-pix3[15] pix3iv: pix3[1]-pix3[16] + pix3v: pix3[0]-pix3[15] pix3iv: pix3[1]-pix3[16] */ tv = (vector unsigned char *) pix1; pix1v = vec_perm(tv[0], tv[1], vec_lvsl(0, pix1)); @@ -273,7 +273,7 @@ int sad16_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h) for(i=0;i<h;i++) { - /* Read potentially unaligned pixels into t1 and t2 */ + /* Read potentially unaligned pixels into t1 and t2 */ perm1 = vec_lvsl(0, pix1); pix1v = (vector unsigned char *) pix1; perm2 = vec_lvsl(0, pix2); @@ -281,12 +281,12 @@ int sad16_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h) t1 = vec_perm(pix1v[0], pix1v[1], perm1); t2 = vec_perm(pix2v[0], pix2v[1], perm2); - /* Calculate a sum of abs differences vector */ + /* Calculate a sum of abs differences vector */ t3 = vec_max(t1, t2); t4 = vec_min(t1, t2); t5 = vec_sub(t3, t4); - /* Add each 4 pixel group together and put 4 results into sad */ + /* Add each 4 pixel group together and put 4 results into sad */ sad = vec_sum4s(t5, sad); pix1 += line_size; @@ -316,9 +316,9 @@ int sad8_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h) permclear = (vector unsigned char)AVV(255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0); for(i=0;i<h;i++) { - /* Read potentially unaligned pixels into t1 and t2 - Since we're reading 16 pixels, and actually only want 8, - mask out the last 8 pixels. The 0s don't change the sum. */ + /* Read potentially unaligned pixels into t1 and t2 + Since we're reading 16 pixels, and actually only want 8, + mask out the last 8 pixels. The 0s don't change the sum. */ perm1 = vec_lvsl(0, pix1); pix1v = (vector unsigned char *) pix1; perm2 = vec_lvsl(0, pix2); @@ -326,12 +326,12 @@ int sad8_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h) t1 = vec_and(vec_perm(pix1v[0], pix1v[1], perm1), permclear); t2 = vec_and(vec_perm(pix2v[0], pix2v[1], perm2), permclear); - /* Calculate a sum of abs differences vector */ + /* Calculate a sum of abs differences vector */ t3 = vec_max(t1, t2); t4 = vec_min(t1, t2); t5 = vec_sub(t3, t4); - /* Add each 4 pixel group together and put 4 results into sad */ + /* Add each 4 pixel group together and put 4 results into sad */ sad = vec_sum4s(t5, sad); pix1 += line_size; @@ -398,9 +398,9 @@ int sse8_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h) for(i=0;i<h;i++) { - /* Read potentially unaligned pixels into t1 and t2 - Since we're reading 16 pixels, and actually only want 8, - mask out the last 8 pixels. The 0s don't change the sum. */ + /* Read potentially unaligned pixels into t1 and t2 + Since we're reading 16 pixels, and actually only want 8, + mask out the last 8 pixels. The 0s don't change the sum. */ perm1 = vec_lvsl(0, pix1); pix1v = (vector unsigned char *) pix1; perm2 = vec_lvsl(0, pix2); @@ -413,7 +413,7 @@ int sse8_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h) of the fact that abs(a-b)^2 = (a-b)^2. */ - /* Calculate abs differences vector */ + /* Calculate abs differences vector */ t3 = vec_max(t1, t2); t4 = vec_min(t1, t2); t5 = vec_sub(t3, t4); @@ -451,7 +451,7 @@ int sse16_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h) sum = (vector unsigned int)vec_splat_u32(0); for(i=0;i<h;i++) { - /* Read potentially unaligned pixels into t1 and t2 */ + /* Read potentially unaligned pixels into t1 and t2 */ perm1 = vec_lvsl(0, pix1); pix1v = (vector unsigned char *) pix1; perm2 = vec_lvsl(0, pix2); @@ -464,7 +464,7 @@ int sse16_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h) of the fact that abs(a-b)^2 = (a-b)^2. */ - /* Calculate abs differences vector */ + /* Calculate abs differences vector */ t3 = vec_max(t1, t2); t4 = vec_min(t1, t2); t5 = vec_sub(t3, t4); @@ -498,12 +498,12 @@ int pix_sum_altivec(uint8_t * pix, int line_size) sad = (vector unsigned int)vec_splat_u32(0); for (i = 0; i < 16; i++) { - /* Read the potentially unaligned 16 pixels into t1 */ + /* Read the potentially unaligned 16 pixels into t1 */ perm = vec_lvsl(0, pix); pixv = (vector unsigned char *) pix; t1 = vec_perm(pixv[0], pixv[1], perm); - /* Add each 4 pixel group together and put 4 results into sad */ + /* Add each 4 pixel group together and put 4 results into sad */ sad = vec_sum4s(t1, sad); pix += line_size; @@ -1335,32 +1335,32 @@ POWERPC_PERF_START_COUNT(altivec_hadamard8_diff8x8_num, 1); 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07); -#define ONEITERBUTTERFLY(i, res) \ - { \ - register vector unsigned char src1, src2, srcO; \ - register vector unsigned char dst1, dst2, dstO; \ - src1 = vec_ld(stride * i, src); \ - if ((((stride * i) + (unsigned long)src) & 0x0000000F) > 8) \ - src2 = vec_ld((stride * i) + 16, src); \ - srcO = vec_perm(src1, src2, vec_lvsl(stride * i, src)); \ - dst1 = vec_ld(stride * i, dst); \ - if ((((stride * i) + (unsigned long)dst) & 0x0000000F) > 8) \ - dst2 = vec_ld((stride * i) + 16, dst); \ - dstO = vec_perm(dst1, dst2, vec_lvsl(stride * i, dst)); \ - /* promote the unsigned chars to signed shorts */ \ - /* we're in the 8x8 function, we only care for the first 8 */ \ - register vector signed short srcV = \ - (vector signed short)vec_mergeh((vector signed char)vzero, (vector signed char)srcO); \ - register vector signed short dstV = \ - (vector signed short)vec_mergeh((vector signed char)vzero, (vector signed char)dstO); \ - /* substractions inside the first butterfly */ \ - register vector signed short but0 = vec_sub(srcV, dstV); \ - register vector signed short op1 = vec_perm(but0, but0, perm1); \ - register vector signed short but1 = vec_mladd(but0, vprod1, op1); \ - register vector signed short op2 = vec_perm(but1, but1, perm2); \ - register vector signed short but2 = vec_mladd(but1, vprod2, op2); \ - register vector signed short op3 = vec_perm(but2, but2, perm3); \ - res = vec_mladd(but2, vprod3, op3); \ +#define ONEITERBUTTERFLY(i, res) \ + { \ + register vector unsigned char src1, src2, srcO; \ + register vector unsigned char dst1, dst2, dstO; \ + src1 = vec_ld(stride * i, src); \ + if ((((stride * i) + (unsigned long)src) & 0x0000000F) > 8) \ + src2 = vec_ld((stride * i) + 16, src); \ + srcO = vec_perm(src1, src2, vec_lvsl(stride * i, src)); \ + dst1 = vec_ld(stride * i, dst); \ + if ((((stride * i) + (unsigned long)dst) & 0x0000000F) > 8) \ + dst2 = vec_ld((stride * i) + 16, dst); \ + dstO = vec_perm(dst1, dst2, vec_lvsl(stride * i, dst)); \ + /* promote the unsigned chars to signed shorts */ \ + /* we're in the 8x8 function, we only care for the first 8 */ \ + register vector signed short srcV = \ + (vector signed short)vec_mergeh((vector signed char)vzero, (vector signed char)srcO); \ + register vector signed short dstV = \ + (vector signed short)vec_mergeh((vector signed char)vzero, (vector signed char)dstO); \ + /* substractions inside the first butterfly */ \ + register vector signed short but0 = vec_sub(srcV, dstV); \ + register vector signed short op1 = vec_perm(but0, but0, perm1); \ + register vector signed short but1 = vec_mladd(but0, vprod1, op1); \ + register vector signed short op2 = vec_perm(but1, but1, perm2); \ + register vector signed short but2 = vec_mladd(but1, vprod2, op2); \ + register vector signed short op3 = vec_perm(but2, but2, perm3); \ + res = vec_mladd(but2, vprod3, op3); \ } ONEITERBUTTERFLY(0, temp0); ONEITERBUTTERFLY(1, temp1); @@ -1480,26 +1480,26 @@ static int hadamard8_diff16x8_altivec(/*MpegEncContext*/ void *s, uint8_t *dst, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07); -#define ONEITERBUTTERFLY(i, res1, res2) \ - { \ +#define ONEITERBUTTERFLY(i, res1, res2) \ + { \ register vector unsigned char src1 asm ("v22"), src2 asm ("v23"); \ register vector unsigned char dst1 asm ("v24"), dst2 asm ("v25"); \ - src1 = vec_ld(stride * i, src); \ - src2 = vec_ld((stride * i) + 16, src); \ + src1 = vec_ld(stride * i, src); \ + src2 = vec_ld((stride * i) + 16, src); \ register vector unsigned char srcO asm ("v22") = vec_perm(src1, src2, vec_lvsl(stride * i, src)); \ - dst1 = vec_ld(stride * i, dst); \ - dst2 = vec_ld((stride * i) + 16, dst); \ + dst1 = vec_ld(stride * i, dst); \ + dst2 = vec_ld((stride * i) + 16, dst); \ register vector unsigned char dstO asm ("v23") = vec_perm(dst1, dst2, vec_lvsl(stride * i, dst)); \ - /* promote the unsigned chars to signed shorts */ \ + /* promote the unsigned chars to signed shorts */ \ register vector signed short srcV asm ("v24") = \ - (vector signed short)vec_mergeh((vector signed char)vzero, (vector signed char)srcO); \ + (vector signed short)vec_mergeh((vector signed char)vzero, (vector signed char)srcO); \ register vector signed short dstV asm ("v25") = \ - (vector signed short)vec_mergeh((vector signed char)vzero, (vector signed char)dstO); \ + (vector signed short)vec_mergeh((vector signed char)vzero, (vector signed char)dstO); \ register vector signed short srcW asm ("v26") = \ - (vector signed short)vec_mergel((vector signed char)vzero, (vector signed char)srcO); \ + (vector signed short)vec_mergel((vector signed char)vzero, (vector signed char)srcO); \ register vector signed short dstW asm ("v27") = \ - (vector signed short)vec_mergel((vector signed char)vzero, (vector signed char)dstO); \ - /* substractions inside the first butterfly */ \ + (vector signed short)vec_mergel((vector signed char)vzero, (vector signed char)dstO); \ + /* substractions inside the first butterfly */ \ register vector signed short but0 asm ("v28") = vec_sub(srcV, dstV); \ register vector signed short but0S asm ("v29") = vec_sub(srcW, dstW); \ register vector signed short op1 asm ("v30") = vec_perm(but0, but0, perm1); \ @@ -1511,9 +1511,9 @@ static int hadamard8_diff16x8_altivec(/*MpegEncContext*/ void *s, uint8_t *dst, register vector signed short op2S asm ("v27") = vec_perm(but1S, but1S, perm2); \ register vector signed short but2S asm ("v28") = vec_mladd(but1S, vprod2, op2S); \ register vector signed short op3 asm ("v29") = vec_perm(but2, but2, perm3); \ - res1 = vec_mladd(but2, vprod3, op3); \ + res1 = vec_mladd(but2, vprod3, op3); \ register vector signed short op3S asm ("v30") = vec_perm(but2S, but2S, perm3); \ - res2 = vec_mladd(but2S, vprod3, op3S); \ + res2 = vec_mladd(but2S, vprod3, op3S); \ } ONEITERBUTTERFLY(0, temp0, temp0S); ONEITERBUTTERFLY(1, temp1, temp1S); @@ -1623,12 +1623,12 @@ POWERPC_PERF_STOP_COUNT(altivec_hadamard8_diff16_num, 1); int has_altivec(void) { #ifdef __AMIGAOS4__ - ULONG result = 0; - extern struct ExecIFace *IExec; + ULONG result = 0; + extern struct ExecIFace *IExec; - IExec->GetCPUInfoTags(GCIT_VectorUnit, &result, TAG_DONE); - if (result == VECTORTYPE_ALTIVEC) return 1; - return 0; + IExec->GetCPUInfoTags(GCIT_VectorUnit, &result, TAG_DONE); + if (result == VECTORTYPE_ALTIVEC) return 1; + return 0; #else /* __AMIGAOS4__ */ #ifdef CONFIG_DARWIN diff --git a/libavcodec/ppc/dsputil_h264_altivec.c b/libavcodec/ppc/dsputil_h264_altivec.c index 7dd9dcf0a1..0efb169d8b 100644 --- a/libavcodec/ppc/dsputil_h264_altivec.c +++ b/libavcodec/ppc/dsputil_h264_altivec.c @@ -191,33 +191,33 @@ static void OPNAME ## h264_qpel ## SIZE ## _mc32_ ## CODETYPE(uint8_t *dst, uint /* from dsputil.c */ static inline void put_pixels8_l2(uint8_t * dst, const uint8_t * src1, const uint8_t * src2, int dst_stride, int src_stride1, int src_stride2, int h) { - int i; - for (i = 0; i < h; i++) { - uint32_t a, b; - a = (((const struct unaligned_32 *) (&src1[i * src_stride1]))->l); - b = (((const struct unaligned_32 *) (&src2[i * src_stride2]))->l); - *((uint32_t *) & dst[i * dst_stride]) = rnd_avg32(a, b); - a = (((const struct unaligned_32 *) (&src1[i * src_stride1 + 4]))->l); - b = (((const struct unaligned_32 *) (&src2[i * src_stride2 + 4]))->l); - *((uint32_t *) & dst[i * dst_stride + 4]) = rnd_avg32(a, b); - } + int i; + for (i = 0; i < h; i++) { + uint32_t a, b; + a = (((const struct unaligned_32 *) (&src1[i * src_stride1]))->l); + b = (((const struct unaligned_32 *) (&src2[i * src_stride2]))->l); + *((uint32_t *) & dst[i * dst_stride]) = rnd_avg32(a, b); + a = (((const struct unaligned_32 *) (&src1[i * src_stride1 + 4]))->l); + b = (((const struct unaligned_32 *) (&src2[i * src_stride2 + 4]))->l); + *((uint32_t *) & dst[i * dst_stride + 4]) = rnd_avg32(a, b); + } } static inline void avg_pixels8_l2(uint8_t * dst, const uint8_t * src1, const uint8_t * src2, int dst_stride, int src_stride1, int src_stride2, int h) { - int i; - for (i = 0; i < h; i++) { - uint32_t a, b; - a = (((const struct unaligned_32 *) (&src1[i * src_stride1]))->l); - b = (((const struct unaligned_32 *) (&src2[i * src_stride2]))->l); - *((uint32_t *) & dst[i * dst_stride]) = rnd_avg32(*((uint32_t *) & dst[i * dst_stride]), rnd_avg32(a, b)); - a = (((const struct unaligned_32 *) (&src1[i * src_stride1 + 4]))->l); - b = (((const struct unaligned_32 *) (&src2[i * src_stride2 + 4]))->l); - *((uint32_t *) & dst[i * dst_stride + 4]) = rnd_avg32(*((uint32_t *) & dst[i * dst_stride + 4]), rnd_avg32(a, b)); - } + int i; + for (i = 0; i < h; i++) { + uint32_t a, b; + a = (((const struct unaligned_32 *) (&src1[i * src_stride1]))->l); + b = (((const struct unaligned_32 *) (&src2[i * src_stride2]))->l); + *((uint32_t *) & dst[i * dst_stride]) = rnd_avg32(*((uint32_t *) & dst[i * dst_stride]), rnd_avg32(a, b)); + a = (((const struct unaligned_32 *) (&src1[i * src_stride1 + 4]))->l); + b = (((const struct unaligned_32 *) (&src2[i * src_stride2 + 4]))->l); + *((uint32_t *) & dst[i * dst_stride + 4]) = rnd_avg32(*((uint32_t *) & dst[i * dst_stride + 4]), rnd_avg32(a, b)); + } } static inline void put_pixels16_l2(uint8_t * dst, const uint8_t * src1, const uint8_t * src2, int dst_stride, int src_stride1, int src_stride2, int h) { - put_pixels8_l2(dst, src1, src2, dst_stride, src_stride1, src_stride2, h); - put_pixels8_l2(dst + 8, src1 + 8, src2 + 8, dst_stride, src_stride1, src_stride2, h); + put_pixels8_l2(dst, src1, src2, dst_stride, src_stride1, src_stride2, h); + put_pixels8_l2(dst + 8, src1 + 8, src2 + 8, dst_stride, src_stride1, src_stride2, h); } static inline void avg_pixels16_l2(uint8_t * dst, const uint8_t * src1, const uint8_t * src2, int dst_stride, int src_stride1, int src_stride2, int h) { - avg_pixels8_l2(dst, src1, src2, dst_stride, src_stride1, src_stride2, h); - avg_pixels8_l2(dst + 8, src1 + 8, src2 + 8, dst_stride, src_stride1, src_stride2, h); + avg_pixels8_l2(dst, src1, src2, dst_stride, src_stride1, src_stride2, h); + avg_pixels8_l2(dst + 8, src1 + 8, src2 + 8, dst_stride, src_stride1, src_stride2, h); } /* UNIMPLEMENTED YET !! */ diff --git a/libavcodec/ppc/dsputil_ppc.c b/libavcodec/ppc/dsputil_ppc.c index f8c7f94d3a..b7bec99b19 100644 --- a/libavcodec/ppc/dsputil_ppc.c +++ b/libavcodec/ppc/dsputil_ppc.c @@ -87,16 +87,16 @@ void powerpc_display_perf_report(void) { for (j = 0; j < POWERPC_NUM_PMC_ENABLED ; j++) { - if (perfdata[j][i][powerpc_data_num] != (unsigned long long)0) - av_log(NULL, AV_LOG_INFO, - " Function \"%s\" (pmc%d):\n\tmin: %llu\n\tmax: %llu\n\tavg: %1.2lf (%llu)\n", - perfname[i], - j+1, - perfdata[j][i][powerpc_data_min], - perfdata[j][i][powerpc_data_max], - (double)perfdata[j][i][powerpc_data_sum] / - (double)perfdata[j][i][powerpc_data_num], - perfdata[j][i][powerpc_data_num]); + if (perfdata[j][i][powerpc_data_num] != (unsigned long long)0) + av_log(NULL, AV_LOG_INFO, + " Function \"%s\" (pmc%d):\n\tmin: %llu\n\tmax: %llu\n\tavg: %1.2lf (%llu)\n", + perfname[i], + j+1, + perfdata[j][i][powerpc_data_min], + perfdata[j][i][powerpc_data_max], + (double)perfdata[j][i][powerpc_data_sum] / + (double)perfdata[j][i][powerpc_data_num], + perfdata[j][i][powerpc_data_num]); } } } @@ -179,7 +179,7 @@ POWERPC_PERF_START_COUNT(powerpc_clear_blocks_dcbz128, 1); } else for ( ; i < sizeof(DCTELEM)*6*64 ; i += 128) { - asm volatile("dcbzl %0,%1" : : "b" (blocks), "r" (i) : "memory"); + asm volatile("dcbzl %0,%1" : : "b" (blocks), "r" (i) : "memory"); } #else memset(blocks, 0, sizeof(DCTELEM)*6*64); @@ -284,25 +284,25 @@ void dsputil_init_ppc(DSPContext* c, AVCodecContext *avctx) c->put_no_rnd_pixels_tab[0][0] = put_pixels16_altivec; c->avg_pixels_tab[0][0] = avg_pixels16_altivec; c->avg_pixels_tab[1][0] = avg_pixels8_altivec; - c->avg_pixels_tab[1][3] = avg_pixels8_xy2_altivec; + c->avg_pixels_tab[1][3] = avg_pixels8_xy2_altivec; c->put_pixels_tab[1][3] = put_pixels8_xy2_altivec; c->put_no_rnd_pixels_tab[1][3] = put_no_rnd_pixels8_xy2_altivec; c->put_pixels_tab[0][3] = put_pixels16_xy2_altivec; c->put_no_rnd_pixels_tab[0][3] = put_no_rnd_pixels16_xy2_altivec; - c->gmc1 = gmc1_altivec; + c->gmc1 = gmc1_altivec; #ifdef CONFIG_DARWIN // ATM gcc-3.3 and gcc-3.4 fail to compile these in linux... - c->hadamard8_diff[0] = hadamard8_diff16_altivec; - c->hadamard8_diff[1] = hadamard8_diff8x8_altivec; + c->hadamard8_diff[0] = hadamard8_diff16_altivec; + c->hadamard8_diff[1] = hadamard8_diff8x8_altivec; #endif #ifdef CONFIG_ENCODERS - if (avctx->dct_algo == FF_DCT_AUTO || - avctx->dct_algo == FF_DCT_ALTIVEC) - { - c->fdct = fdct_altivec; - } + if (avctx->dct_algo == FF_DCT_AUTO || + avctx->dct_algo == FF_DCT_ALTIVEC) + { + c->fdct = fdct_altivec; + } #endif //CONFIG_ENCODERS if (avctx->lowres==0) @@ -325,14 +325,14 @@ void dsputil_init_ppc(DSPContext* c, AVCodecContext *avctx) int i, j; for (i = 0 ; i < powerpc_perf_total ; i++) { - for (j = 0; j < POWERPC_NUM_PMC_ENABLED ; j++) - { - perfdata[j][i][powerpc_data_min] = 0xFFFFFFFFFFFFFFFFULL; - perfdata[j][i][powerpc_data_max] = 0x0000000000000000ULL; - perfdata[j][i][powerpc_data_sum] = 0x0000000000000000ULL; - perfdata[j][i][powerpc_data_num] = 0x0000000000000000ULL; - } - } + for (j = 0; j < POWERPC_NUM_PMC_ENABLED ; j++) + { + perfdata[j][i][powerpc_data_min] = 0xFFFFFFFFFFFFFFFFULL; + perfdata[j][i][powerpc_data_max] = 0x0000000000000000ULL; + perfdata[j][i][powerpc_data_sum] = 0x0000000000000000ULL; + perfdata[j][i][powerpc_data_num] = 0x0000000000000000ULL; + } + } } #endif /* POWERPC_PERFORMANCE_REPORT */ } else diff --git a/libavcodec/ppc/dsputil_ppc.h b/libavcodec/ppc/dsputil_ppc.h index 7e01677f19..17b4c7d38f 100644 --- a/libavcodec/ppc/dsputil_ppc.h +++ b/libavcodec/ppc/dsputil_ppc.h @@ -114,10 +114,10 @@ extern unsigned long long perfdata[POWERPC_NUM_PMC_ENABLED][powerpc_perf_total][ #define POWERPC_GET_PMC6(a) do {} while (0) #endif #endif /* POWERPC_MODE_64BITS */ -#define POWERPC_PERF_DECLARE(a, cond) \ - POWERP_PMC_DATATYPE \ - pmc_start[POWERPC_NUM_PMC_ENABLED], \ - pmc_stop[POWERPC_NUM_PMC_ENABLED], \ +#define POWERPC_PERF_DECLARE(a, cond) \ + POWERP_PMC_DATATYPE \ + pmc_start[POWERPC_NUM_PMC_ENABLED], \ + pmc_stop[POWERPC_NUM_PMC_ENABLED], \ pmc_loop_index; #define POWERPC_PERF_START_COUNT(a, cond) do { \ POWERPC_GET_PMC6(pmc_start[5]); \ @@ -141,8 +141,8 @@ extern unsigned long long perfdata[POWERPC_NUM_PMC_ENABLED][powerpc_perf_total][ pmc_loop_index++) \ { \ if (pmc_stop[pmc_loop_index] >= pmc_start[pmc_loop_index]) \ - { \ - POWERP_PMC_DATATYPE diff = \ + { \ + POWERP_PMC_DATATYPE diff = \ pmc_stop[pmc_loop_index] - pmc_start[pmc_loop_index]; \ if (diff < perfdata[pmc_loop_index][a][powerpc_data_min]) \ perfdata[pmc_loop_index][a][powerpc_data_min] = diff; \ diff --git a/libavcodec/ppc/fft_altivec.c b/libavcodec/ppc/fft_altivec.c index 52aecc163f..1b27579ebd 100644 --- a/libavcodec/ppc/fft_altivec.c +++ b/libavcodec/ppc/fft_altivec.c @@ -65,8 +65,8 @@ void ff_fft_calc_altivec(FFTContext *s, FFTComplex *z) POWERPC_PERF_DECLARE(altivec_fft_num, s->nbits >= 6); #ifdef ALTIVEC_USE_REFERENCE_C_CODE int ln = s->nbits; - int j, np, np2; - int nblocks, nloops; + int j, np, np2; + int nblocks, nloops; register FFTComplex *p, *q; FFTComplex *exptab = s->exptab; int l; @@ -147,8 +147,8 @@ POWERPC_PERF_STOP_COUNT(altivec_fft_num, s->nbits >= 6); #endif int ln = s->nbits; - int j, np, np2; - int nblocks, nloops; + int j, np, np2; + int nblocks, nloops; register FFTComplex *p, *q; FFTComplex *cptr, *cptr1; int k; diff --git a/libavcodec/ppc/gcc_fixes.h b/libavcodec/ppc/gcc_fixes.h index 194a3f8be4..288fdf834f 100644 --- a/libavcodec/ppc/gcc_fixes.h +++ b/libavcodec/ppc/gcc_fixes.h @@ -30,31 +30,31 @@ */ static inline vector signed char ff_vmrglb (vector signed char const A, - vector signed char const B) + vector signed char const B) { static const vector unsigned char lowbyte = { - 0x08, 0x18, 0x09, 0x19, 0x0a, 0x1a, 0x0b, 0x1b, - 0x0c, 0x1c, 0x0d, 0x1d, 0x0e, 0x1e, 0x0f, 0x1f + 0x08, 0x18, 0x09, 0x19, 0x0a, 0x1a, 0x0b, 0x1b, + 0x0c, 0x1c, 0x0d, 0x1d, 0x0e, 0x1e, 0x0f, 0x1f }; return vec_perm (A, B, lowbyte); } static inline vector signed short ff_vmrglh (vector signed short const A, - vector signed short const B) + vector signed short const B) { static const vector unsigned char lowhalf = { - 0x08, 0x09, 0x18, 0x19, 0x0a, 0x0b, 0x1a, 0x1b, - 0x0c, 0x0d, 0x1c, 0x1d, 0x0e, 0x0f, 0x1e, 0x1f + 0x08, 0x09, 0x18, 0x19, 0x0a, 0x0b, 0x1a, 0x1b, + 0x0c, 0x0d, 0x1c, 0x1d, 0x0e, 0x0f, 0x1e, 0x1f }; return vec_perm (A, B, lowhalf); } static inline vector signed int ff_vmrglw (vector signed int const A, - vector signed int const B) + vector signed int const B) { static const vector unsigned char lowword = { - 0x08, 0x09, 0x0a, 0x0b, 0x18, 0x19, 0x1a, 0x1b, - 0x0c, 0x0d, 0x0e, 0x0f, 0x1c, 0x1d, 0x1e, 0x1f + 0x08, 0x09, 0x0a, 0x0b, 0x18, 0x19, 0x1a, 0x1b, + 0x0c, 0x0d, 0x0e, 0x0f, 0x1c, 0x1d, 0x1e, 0x1f }; return vec_perm (A, B, lowword); } diff --git a/libavcodec/ppc/idct_altivec.c b/libavcodec/ppc/idct_altivec.c index ac91199ea5..85bc57baef 100644 --- a/libavcodec/ppc/idct_altivec.c +++ b/libavcodec/ppc/idct_altivec.c @@ -51,108 +51,108 @@ #define vector_s32_t vector signed int #define vector_u32_t vector unsigned int -#define IDCT_HALF \ - /* 1st stage */ \ - t1 = vec_mradds (a1, vx7, vx1 ); \ - t8 = vec_mradds (a1, vx1, vec_subs (zero, vx7)); \ - t7 = vec_mradds (a2, vx5, vx3); \ - t3 = vec_mradds (ma2, vx3, vx5); \ - \ - /* 2nd stage */ \ - t5 = vec_adds (vx0, vx4); \ - t0 = vec_subs (vx0, vx4); \ - t2 = vec_mradds (a0, vx6, vx2); \ - t4 = vec_mradds (a0, vx2, vec_subs (zero, vx6)); \ - t6 = vec_adds (t8, t3); \ - t3 = vec_subs (t8, t3); \ - t8 = vec_subs (t1, t7); \ - t1 = vec_adds (t1, t7); \ - \ - /* 3rd stage */ \ - t7 = vec_adds (t5, t2); \ - t2 = vec_subs (t5, t2); \ - t5 = vec_adds (t0, t4); \ - t0 = vec_subs (t0, t4); \ - t4 = vec_subs (t8, t3); \ - t3 = vec_adds (t8, t3); \ - \ - /* 4th stage */ \ - vy0 = vec_adds (t7, t1); \ - vy7 = vec_subs (t7, t1); \ - vy1 = vec_mradds (c4, t3, t5); \ - vy6 = vec_mradds (mc4, t3, t5); \ - vy2 = vec_mradds (c4, t4, t0); \ - vy5 = vec_mradds (mc4, t4, t0); \ - vy3 = vec_adds (t2, t6); \ +#define IDCT_HALF \ + /* 1st stage */ \ + t1 = vec_mradds (a1, vx7, vx1 ); \ + t8 = vec_mradds (a1, vx1, vec_subs (zero, vx7)); \ + t7 = vec_mradds (a2, vx5, vx3); \ + t3 = vec_mradds (ma2, vx3, vx5); \ + \ + /* 2nd stage */ \ + t5 = vec_adds (vx0, vx4); \ + t0 = vec_subs (vx0, vx4); \ + t2 = vec_mradds (a0, vx6, vx2); \ + t4 = vec_mradds (a0, vx2, vec_subs (zero, vx6)); \ + t6 = vec_adds (t8, t3); \ + t3 = vec_subs (t8, t3); \ + t8 = vec_subs (t1, t7); \ + t1 = vec_adds (t1, t7); \ + \ + /* 3rd stage */ \ + t7 = vec_adds (t5, t2); \ + t2 = vec_subs (t5, t2); \ + t5 = vec_adds (t0, t4); \ + t0 = vec_subs (t0, t4); \ + t4 = vec_subs (t8, t3); \ + t3 = vec_adds (t8, t3); \ + \ + /* 4th stage */ \ + vy0 = vec_adds (t7, t1); \ + vy7 = vec_subs (t7, t1); \ + vy1 = vec_mradds (c4, t3, t5); \ + vy6 = vec_mradds (mc4, t3, t5); \ + vy2 = vec_mradds (c4, t4, t0); \ + vy5 = vec_mradds (mc4, t4, t0); \ + vy3 = vec_adds (t2, t6); \ vy4 = vec_subs (t2, t6); -#define IDCT \ - vector_s16_t vx0, vx1, vx2, vx3, vx4, vx5, vx6, vx7; \ - vector_s16_t vy0, vy1, vy2, vy3, vy4, vy5, vy6, vy7; \ - vector_s16_t a0, a1, a2, ma2, c4, mc4, zero, bias; \ - vector_s16_t t0, t1, t2, t3, t4, t5, t6, t7, t8; \ - vector_u16_t shift; \ - \ - c4 = vec_splat (constants[0], 0); \ - a0 = vec_splat (constants[0], 1); \ - a1 = vec_splat (constants[0], 2); \ - a2 = vec_splat (constants[0], 3); \ - mc4 = vec_splat (constants[0], 4); \ - ma2 = vec_splat (constants[0], 5); \ - bias = (vector_s16_t)vec_splat ((vector_s32_t)constants[0], 3); \ - \ - zero = vec_splat_s16 (0); \ - shift = vec_splat_u16 (4); \ - \ - vx0 = vec_mradds (vec_sl (block[0], shift), constants[1], zero); \ - vx1 = vec_mradds (vec_sl (block[1], shift), constants[2], zero); \ - vx2 = vec_mradds (vec_sl (block[2], shift), constants[3], zero); \ - vx3 = vec_mradds (vec_sl (block[3], shift), constants[4], zero); \ - vx4 = vec_mradds (vec_sl (block[4], shift), constants[1], zero); \ - vx5 = vec_mradds (vec_sl (block[5], shift), constants[4], zero); \ - vx6 = vec_mradds (vec_sl (block[6], shift), constants[3], zero); \ - vx7 = vec_mradds (vec_sl (block[7], shift), constants[2], zero); \ - \ - IDCT_HALF \ - \ - vx0 = vec_mergeh (vy0, vy4); \ - vx1 = vec_mergel (vy0, vy4); \ - vx2 = vec_mergeh (vy1, vy5); \ - vx3 = vec_mergel (vy1, vy5); \ - vx4 = vec_mergeh (vy2, vy6); \ - vx5 = vec_mergel (vy2, vy6); \ - vx6 = vec_mergeh (vy3, vy7); \ - vx7 = vec_mergel (vy3, vy7); \ - \ - vy0 = vec_mergeh (vx0, vx4); \ - vy1 = vec_mergel (vx0, vx4); \ - vy2 = vec_mergeh (vx1, vx5); \ - vy3 = vec_mergel (vx1, vx5); \ - vy4 = vec_mergeh (vx2, vx6); \ - vy5 = vec_mergel (vx2, vx6); \ - vy6 = vec_mergeh (vx3, vx7); \ - vy7 = vec_mergel (vx3, vx7); \ - \ - vx0 = vec_adds (vec_mergeh (vy0, vy4), bias); \ - vx1 = vec_mergel (vy0, vy4); \ - vx2 = vec_mergeh (vy1, vy5); \ - vx3 = vec_mergel (vy1, vy5); \ - vx4 = vec_mergeh (vy2, vy6); \ - vx5 = vec_mergel (vy2, vy6); \ - vx6 = vec_mergeh (vy3, vy7); \ - vx7 = vec_mergel (vy3, vy7); \ - \ - IDCT_HALF \ - \ - shift = vec_splat_u16 (6); \ - vx0 = vec_sra (vy0, shift); \ - vx1 = vec_sra (vy1, shift); \ - vx2 = vec_sra (vy2, shift); \ - vx3 = vec_sra (vy3, shift); \ - vx4 = vec_sra (vy4, shift); \ - vx5 = vec_sra (vy5, shift); \ - vx6 = vec_sra (vy6, shift); \ +#define IDCT \ + vector_s16_t vx0, vx1, vx2, vx3, vx4, vx5, vx6, vx7; \ + vector_s16_t vy0, vy1, vy2, vy3, vy4, vy5, vy6, vy7; \ + vector_s16_t a0, a1, a2, ma2, c4, mc4, zero, bias; \ + vector_s16_t t0, t1, t2, t3, t4, t5, t6, t7, t8; \ + vector_u16_t shift; \ + \ + c4 = vec_splat (constants[0], 0); \ + a0 = vec_splat (constants[0], 1); \ + a1 = vec_splat (constants[0], 2); \ + a2 = vec_splat (constants[0], 3); \ + mc4 = vec_splat (constants[0], 4); \ + ma2 = vec_splat (constants[0], 5); \ + bias = (vector_s16_t)vec_splat ((vector_s32_t)constants[0], 3); \ + \ + zero = vec_splat_s16 (0); \ + shift = vec_splat_u16 (4); \ + \ + vx0 = vec_mradds (vec_sl (block[0], shift), constants[1], zero); \ + vx1 = vec_mradds (vec_sl (block[1], shift), constants[2], zero); \ + vx2 = vec_mradds (vec_sl (block[2], shift), constants[3], zero); \ + vx3 = vec_mradds (vec_sl (block[3], shift), constants[4], zero); \ + vx4 = vec_mradds (vec_sl (block[4], shift), constants[1], zero); \ + vx5 = vec_mradds (vec_sl (block[5], shift), constants[4], zero); \ + vx6 = vec_mradds (vec_sl (block[6], shift), constants[3], zero); \ + vx7 = vec_mradds (vec_sl (block[7], shift), constants[2], zero); \ + \ + IDCT_HALF \ + \ + vx0 = vec_mergeh (vy0, vy4); \ + vx1 = vec_mergel (vy0, vy4); \ + vx2 = vec_mergeh (vy1, vy5); \ + vx3 = vec_mergel (vy1, vy5); \ + vx4 = vec_mergeh (vy2, vy6); \ + vx5 = vec_mergel (vy2, vy6); \ + vx6 = vec_mergeh (vy3, vy7); \ + vx7 = vec_mergel (vy3, vy7); \ + \ + vy0 = vec_mergeh (vx0, vx4); \ + vy1 = vec_mergel (vx0, vx4); \ + vy2 = vec_mergeh (vx1, vx5); \ + vy3 = vec_mergel (vx1, vx5); \ + vy4 = vec_mergeh (vx2, vx6); \ + vy5 = vec_mergel (vx2, vx6); \ + vy6 = vec_mergeh (vx3, vx7); \ + vy7 = vec_mergel (vx3, vx7); \ + \ + vx0 = vec_adds (vec_mergeh (vy0, vy4), bias); \ + vx1 = vec_mergel (vy0, vy4); \ + vx2 = vec_mergeh (vy1, vy5); \ + vx3 = vec_mergel (vy1, vy5); \ + vx4 = vec_mergeh (vy2, vy6); \ + vx5 = vec_mergel (vy2, vy6); \ + vx6 = vec_mergeh (vy3, vy7); \ + vx7 = vec_mergel (vy3, vy7); \ + \ + IDCT_HALF \ + \ + shift = vec_splat_u16 (6); \ + vx0 = vec_sra (vy0, shift); \ + vx1 = vec_sra (vy1, shift); \ + vx2 = vec_sra (vy2, shift); \ + vx3 = vec_sra (vy3, shift); \ + vx4 = vec_sra (vy4, shift); \ + vx5 = vec_sra (vy5, shift); \ + vx6 = vec_sra (vy6, shift); \ vx7 = vec_sra (vy7, shift); @@ -180,18 +180,18 @@ POWERPC_PERF_START_COUNT(altivec_idct_put_num, 1); #endif IDCT -#define COPY(dest,src) \ - tmp = vec_packsu (src, src); \ - vec_ste ((vector_u32_t)tmp, 0, (unsigned int *)dest); \ +#define COPY(dest,src) \ + tmp = vec_packsu (src, src); \ + vec_ste ((vector_u32_t)tmp, 0, (unsigned int *)dest); \ vec_ste ((vector_u32_t)tmp, 4, (unsigned int *)dest); - COPY (dest, vx0) dest += stride; - COPY (dest, vx1) dest += stride; - COPY (dest, vx2) dest += stride; - COPY (dest, vx3) dest += stride; - COPY (dest, vx4) dest += stride; - COPY (dest, vx5) dest += stride; - COPY (dest, vx6) dest += stride; + COPY (dest, vx0) dest += stride; + COPY (dest, vx1) dest += stride; + COPY (dest, vx2) dest += stride; + COPY (dest, vx3) dest += stride; + COPY (dest, vx4) dest += stride; + COPY (dest, vx5) dest += stride; + COPY (dest, vx6) dest += stride; COPY (dest, vx7) POWERPC_PERF_STOP_COUNT(altivec_idct_put_num, 1); @@ -225,22 +225,22 @@ POWERPC_PERF_START_COUNT(altivec_idct_add_num, 1); perm0 = vec_mergeh (p, p0); perm1 = vec_mergeh (p, p1); -#define ADD(dest,src,perm) \ - /* *(uint64_t *)&tmp = *(uint64_t *)dest; */ \ - tmp = vec_ld (0, dest); \ - tmp2 = (vector_s16_t)vec_perm (tmp, (vector_u8_t)zero, perm); \ - tmp3 = vec_adds (tmp2, src); \ - tmp = vec_packsu (tmp3, tmp3); \ - vec_ste ((vector_u32_t)tmp, 0, (unsigned int *)dest); \ +#define ADD(dest,src,perm) \ + /* *(uint64_t *)&tmp = *(uint64_t *)dest; */ \ + tmp = vec_ld (0, dest); \ + tmp2 = (vector_s16_t)vec_perm (tmp, (vector_u8_t)zero, perm); \ + tmp3 = vec_adds (tmp2, src); \ + tmp = vec_packsu (tmp3, tmp3); \ + vec_ste ((vector_u32_t)tmp, 0, (unsigned int *)dest); \ vec_ste ((vector_u32_t)tmp, 4, (unsigned int *)dest); - ADD (dest, vx0, perm0) dest += stride; - ADD (dest, vx1, perm1) dest += stride; - ADD (dest, vx2, perm0) dest += stride; - ADD (dest, vx3, perm1) dest += stride; - ADD (dest, vx4, perm0) dest += stride; - ADD (dest, vx5, perm1) dest += stride; - ADD (dest, vx6, perm0) dest += stride; + ADD (dest, vx0, perm0) dest += stride; + ADD (dest, vx1, perm1) dest += stride; + ADD (dest, vx2, perm0) dest += stride; + ADD (dest, vx3, perm1) dest += stride; + ADD (dest, vx4, perm0) dest += stride; + ADD (dest, vx5, perm1) dest += stride; + ADD (dest, vx6, perm0) dest += stride; ADD (dest, vx7, perm1) POWERPC_PERF_STOP_COUNT(altivec_idct_add_num, 1); diff --git a/libavcodec/ppc/mpegvideo_altivec.c b/libavcodec/ppc/mpegvideo_altivec.c index 4477d3ffa8..647221d83f 100644 --- a/libavcodec/ppc/mpegvideo_altivec.c +++ b/libavcodec/ppc/mpegvideo_altivec.c @@ -152,9 +152,9 @@ int dct_quantize_altivec(MpegEncContext* s, } // The following block could exist as a separate an altivec dct - // function. However, if we put it inline, the DCT data can remain - // in the vector local variables, as floats, which we'll use during the - // quantize step... + // function. However, if we put it inline, the DCT data can remain + // in the vector local variables, as floats, which we'll use during the + // quantize step... { const vector float vec_0_298631336 = (vector float)FOUROF(0.298631336f); const vector float vec_0_390180644 = (vector float)FOUROF(-0.390180644f); @@ -206,11 +206,11 @@ int dct_quantize_altivec(MpegEncContext* s, z1 = vec_madd(vec_add(tmp12, tmp13), vec_0_541196100, (vector float)zero); // dataptr[2] = (DCTELEM) DESCALE(z1 + MULTIPLY(tmp13, FIX_0_765366865), - // CONST_BITS-PASS1_BITS); + // CONST_BITS-PASS1_BITS); row2 = vec_madd(tmp13, vec_0_765366865, z1); // dataptr[6] = (DCTELEM) DESCALE(z1 + MULTIPLY(tmp12, - FIX_1_847759065), - // CONST_BITS-PASS1_BITS); + // CONST_BITS-PASS1_BITS); row6 = vec_madd(tmp12, vec_1_847759065, z1); z1 = vec_add(tmp4, tmp7); // z1 = tmp4 + tmp7; @@ -315,7 +315,7 @@ int dct_quantize_altivec(MpegEncContext* s, } // Load the bias vector (We add 0.5 to the bias so that we're - // rounding when we convert to int, instead of flooring.) + // rounding when we convert to int, instead of flooring.) { vector signed int biasInt; const vector float negOneFloat = (vector float)FOUROF(-1.0f); |