aboutsummaryrefslogtreecommitdiffstats
path: root/libavcodec/ppc
diff options
context:
space:
mode:
authorDiego Biurrun <diego@biurrun.de>2005-12-17 18:14:38 +0000
committerDiego Biurrun <diego@biurrun.de>2005-12-17 18:14:38 +0000
commit115329f16062074e11ccf3b89ead6176606c9696 (patch)
treee98aa993905a702688bf821737ab9a443969fc28 /libavcodec/ppc
parentd76319b1ab716320f6e6a4d690b85fe4504ebd5b (diff)
downloadffmpeg-115329f16062074e11ccf3b89ead6176606c9696.tar.gz
COSMETICS: Remove all trailing whitespace.
Originally committed as revision 4749 to svn://svn.ffmpeg.org/ffmpeg/trunk
Diffstat (limited to 'libavcodec/ppc')
-rw-r--r--libavcodec/ppc/dsputil_altivec.c186
-rw-r--r--libavcodec/ppc/dsputil_h264_altivec.c10
-rw-r--r--libavcodec/ppc/dsputil_h264_template_altivec.c68
-rw-r--r--libavcodec/ppc/dsputil_ppc.c10
-rw-r--r--libavcodec/ppc/fft_altivec.c42
-rw-r--r--libavcodec/ppc/gcc_fixes.h10
-rw-r--r--libavcodec/ppc/gmc_altivec.c30
-rw-r--r--libavcodec/ppc/idct_altivec.c2
-rw-r--r--libavcodec/ppc/mpegvideo_altivec.c28
-rw-r--r--libavcodec/ppc/mpegvideo_ppc.c4
10 files changed, 195 insertions, 195 deletions
diff --git a/libavcodec/ppc/dsputil_altivec.c b/libavcodec/ppc/dsputil_altivec.c
index 57b687dfde..20ee382f29 100644
--- a/libavcodec/ppc/dsputil_altivec.c
+++ b/libavcodec/ppc/dsputil_altivec.c
@@ -17,7 +17,7 @@
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
-
+
#include "../dsputil.h"
#include "gcc_fixes.h"
@@ -44,7 +44,7 @@ static void sigill_handler (int sig)
signal (sig, SIG_DFL);
raise (sig);
}
-
+
canjump = 0;
siglongjmp (jmpbuf, 1);
}
@@ -71,7 +71,7 @@ int sad16_x2_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h
*/
tv = (vector unsigned char *) pix1;
pix1v = vec_perm(tv[0], tv[1], vec_lvsl(0, pix1));
-
+
tv = (vector unsigned char *) &pix2[0];
pix2v = vec_perm(tv[0], tv[1], vec_lvsl(0, &pix2[0]));
@@ -86,7 +86,7 @@ int sad16_x2_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h
/* Add each 4 pixel group together and put 4 results into sad */
sad = vec_sum4s(t5, sad);
-
+
pix1 += line_size;
pix2 += line_size;
}
@@ -123,7 +123,7 @@ int sad16_y2_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h
*/
tv = (vector unsigned char *) &pix2[0];
pix2v = vec_perm(tv[0], tv[1], vec_lvsl(0, &pix2[0]));
-
+
for(i=0;i<h;i++) {
/*
Read unaligned pixels into our vectors. The vectors are as follows:
@@ -144,18 +144,18 @@ int sad16_y2_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h
/* Add each 4 pixel group together and put 4 results into sad */
sad = vec_sum4s(t5, sad);
-
+
pix1 += line_size;
pix2v = pix3v;
pix3 += line_size;
-
+
}
-
+
/* Sum up the four partial sums, and put the result into s */
sumdiffs = vec_sums((vector signed int) sad, (vector signed int) zero);
sumdiffs = vec_splat(sumdiffs, 3);
vec_ste(sumdiffs, 0, &s);
- return s;
+ return s;
}
int sad16_xy2_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h)
@@ -175,7 +175,7 @@ int sad16_xy2_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int
vector signed int sumdiffs;
sad = (vector unsigned int)vec_splat_u32(0);
-
+
s = 0;
/*
@@ -199,7 +199,7 @@ int sad16_xy2_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int
pix2ilv = (vector unsigned short) vec_mergel(zero, pix2iv);
t1 = vec_add(pix2hv, pix2ihv);
t2 = vec_add(pix2lv, pix2ilv);
-
+
for(i=0;i<h;i++) {
/*
Read unaligned pixels into our vectors. The vectors are as follows:
@@ -268,7 +268,7 @@ int sad16_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h)
vector unsigned char t1, t2, t3,t4, t5;
vector unsigned int sad;
vector signed int sumdiffs;
-
+
sad = (vector unsigned int)vec_splat_u32(0);
@@ -280,12 +280,12 @@ int sad16_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h)
pix2v = (vector unsigned char *) pix2;
t1 = vec_perm(pix1v[0], pix1v[1], perm1);
t2 = vec_perm(pix2v[0], pix2v[1], perm2);
-
- /* Calculate a sum of abs differences vector */
+
+ /* Calculate a sum of abs differences vector */
t3 = vec_max(t1, t2);
t4 = vec_min(t1, t2);
t5 = vec_sub(t3, t4);
-
+
/* Add each 4 pixel group together and put 4 results into sad */
sad = vec_sum4s(t5, sad);
@@ -297,7 +297,7 @@ int sad16_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h)
sumdiffs = vec_sums((vector signed int) sad, (vector signed int) zero);
sumdiffs = vec_splat(sumdiffs, 3);
vec_ste(sumdiffs, 0, &s);
-
+
return s;
}
@@ -326,7 +326,7 @@ int sad8_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h)
t1 = vec_and(vec_perm(pix1v[0], pix1v[1], perm1), permclear);
t2 = vec_and(vec_perm(pix2v[0], pix2v[1], perm2), permclear);
- /* Calculate a sum of abs differences vector */
+ /* Calculate a sum of abs differences vector */
t3 = vec_max(t1, t2);
t4 = vec_min(t1, t2);
t5 = vec_sub(t3, t4);
@@ -355,9 +355,9 @@ int pix_norm1_altivec(uint8_t *pix, int line_size)
vector unsigned char pixv;
vector unsigned int sv;
vector signed int sum;
-
+
sv = (vector unsigned int)vec_splat_u32(0);
-
+
s = 0;
for (i = 0; i < 16; i++) {
/* Read in the potentially unaligned pixels */
@@ -391,12 +391,12 @@ int sse8_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h)
vector unsigned char t1, t2, t3,t4, t5;
vector unsigned int sum;
vector signed int sumsqr;
-
+
sum = (vector unsigned int)vec_splat_u32(0);
permclear = (vector unsigned char)AVV(255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0);
-
+
for(i=0;i<h;i++) {
/* Read potentially unaligned pixels into t1 and t2
Since we're reading 16 pixels, and actually only want 8,
@@ -412,24 +412,24 @@ int sse8_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h)
Since we want to use unsigned chars, we can take advantage
of the fact that abs(a-b)^2 = (a-b)^2.
*/
-
- /* Calculate abs differences vector */
+
+ /* Calculate abs differences vector */
t3 = vec_max(t1, t2);
t4 = vec_min(t1, t2);
t5 = vec_sub(t3, t4);
-
+
/* Square the values and add them to our sum */
sum = vec_msum(t5, t5, sum);
-
+
pix1 += line_size;
pix2 += line_size;
}
-
+
/* Sum up the four partial sums, and put the result into s */
sumsqr = vec_sums((vector signed int) sum, (vector signed int) zero);
sumsqr = vec_splat(sumsqr, 3);
vec_ste(sumsqr, 0, &s);
-
+
return s;
}
@@ -447,9 +447,9 @@ int sse16_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h)
vector unsigned char t1, t2, t3,t4, t5;
vector unsigned int sum;
vector signed int sumsqr;
-
+
sum = (vector unsigned int)vec_splat_u32(0);
-
+
for(i=0;i<h;i++) {
/* Read potentially unaligned pixels into t1 and t2 */
perm1 = vec_lvsl(0, pix1);
@@ -463,24 +463,24 @@ int sse16_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h)
Since we want to use unsigned chars, we can take advantage
of the fact that abs(a-b)^2 = (a-b)^2.
*/
-
- /* Calculate abs differences vector */
+
+ /* Calculate abs differences vector */
t3 = vec_max(t1, t2);
t4 = vec_min(t1, t2);
t5 = vec_sub(t3, t4);
-
+
/* Square the values and add them to our sum */
sum = vec_msum(t5, t5, sum);
-
+
pix1 += line_size;
pix2 += line_size;
}
-
+
/* Sum up the four partial sums, and put the result into s */
sumsqr = vec_sums((vector signed int) sum, (vector signed int) zero);
sumsqr = vec_splat(sumsqr, 3);
vec_ste(sumsqr, 0, &s);
-
+
return s;
}
@@ -494,9 +494,9 @@ int pix_sum_altivec(uint8_t * pix, int line_size)
int i;
int s __attribute__((aligned(16)));
-
+
sad = (vector unsigned int)vec_splat_u32(0);
-
+
for (i = 0; i < 16; i++) {
/* Read the potentially unaligned 16 pixels into t1 */
perm = vec_lvsl(0, pix);
@@ -505,15 +505,15 @@ int pix_sum_altivec(uint8_t * pix, int line_size)
/* Add each 4 pixel group together and put 4 results into sad */
sad = vec_sum4s(t1, sad);
-
+
pix += line_size;
}
-
+
/* Sum up the four partial sums, and put the result into s */
sumdiffs = vec_sums((vector signed int) sad, (vector signed int) zero);
sumdiffs = vec_splat(sumdiffs, 3);
vec_ste(sumdiffs, 0, &s);
-
+
return s;
}
@@ -633,7 +633,7 @@ void add_bytes_altivec(uint8_t *dst, uint8_t *src, int w) {
#else /* ALTIVEC_USE_REFERENCE_C_CODE */
register int i;
register vector unsigned char vdst, vsrc;
-
+
/* dst and src are 16 bytes-aligned (guaranteed) */
for(i = 0 ; (i + 15) < w ; i++)
{
@@ -799,19 +799,19 @@ POWERPC_PERF_STOP_COUNT(altivec_avg_pixels8_num, 1);
int i;
POWERPC_PERF_START_COUNT(altivec_avg_pixels8_num, 1);
-
+
for (i = 0; i < h; i++) {
/*
block is 8 bytes-aligned, so we're either in the
left block (16 bytes-aligned) or in the right block (not)
*/
int rightside = ((unsigned long)block & 0x0000000F);
-
+
blockv = vec_ld(0, block);
pixelsv1 = vec_ld(0, (unsigned char*)pixels);
pixelsv2 = vec_ld(16, (unsigned char*)pixels);
pixelsv = vec_perm(pixelsv1, pixelsv2, vec_lvsl(0, pixels));
-
+
if (rightside)
{
pixelsv = vec_perm(blockv, pixelsv, vcprm(0,1,s0,s1));
@@ -820,17 +820,17 @@ POWERPC_PERF_START_COUNT(altivec_avg_pixels8_num, 1);
{
pixelsv = vec_perm(blockv, pixelsv, vcprm(s0,s1,2,3));
}
-
+
blockv = vec_avg(blockv, pixelsv);
vec_st(blockv, 0, block);
-
+
pixels += line_size;
block += line_size;
}
-
+
POWERPC_PERF_STOP_COUNT(altivec_avg_pixels8_num, 1);
-
+
#endif /* ALTIVEC_USE_REFERENCE_C_CODE */
}
@@ -886,7 +886,7 @@ POWERPC_PERF_STOP_COUNT(altivec_put_pixels8_xy2_num, 1);
pixelssum1, pixelssum2, temp3;
register const_vector unsigned char vczero = (const_vector unsigned char)vec_splat_u8(0);
register const_vector unsigned short vctwo = (const_vector unsigned short)vec_splat_u16(2);
-
+
temp1 = vec_ld(0, pixels);
temp2 = vec_ld(16, pixels);
pixelsv1 = vec_perm(temp1, temp2, vec_lvsl(0, pixels));
@@ -903,8 +903,8 @@ POWERPC_PERF_STOP_COUNT(altivec_put_pixels8_xy2_num, 1);
pixelssum1 = vec_add((vector unsigned short)pixelsv1,
(vector unsigned short)pixelsv2);
pixelssum1 = vec_add(pixelssum1, vctwo);
-
-POWERPC_PERF_START_COUNT(altivec_put_pixels8_xy2_num, 1);
+
+POWERPC_PERF_START_COUNT(altivec_put_pixels8_xy2_num, 1);
for (i = 0; i < h ; i++) {
int rightside = ((unsigned long)block & 0x0000000F);
blockv = vec_ld(0, block);
@@ -929,7 +929,7 @@ POWERPC_PERF_START_COUNT(altivec_put_pixels8_xy2_num, 1);
temp3 = vec_sra(temp3, vctwo);
pixelssum1 = vec_add(pixelssum2, vctwo);
pixelsavg = vec_packsu(temp3, (vector unsigned short) vczero);
-
+
if (rightside)
{
blockv = vec_perm(blockv, pixelsavg, vcprm(0, 1, s0, s1));
@@ -938,13 +938,13 @@ POWERPC_PERF_START_COUNT(altivec_put_pixels8_xy2_num, 1);
{
blockv = vec_perm(blockv, pixelsavg, vcprm(s0, s1, 2, 3));
}
-
+
vec_st(blockv, 0, block);
-
+
block += line_size;
pixels += line_size;
}
-
+
POWERPC_PERF_STOP_COUNT(altivec_put_pixels8_xy2_num, 1);
#endif /* ALTIVEC_USE_REFERENCE_C_CODE */
}
@@ -987,7 +987,7 @@ POWERPC_PERF_START_COUNT(altivec_put_no_rnd_pixels8_xy2_num, 1);
} pixels += 4 - line_size * (h + 1);
block += 4 - line_size * h;
}
-
+
POWERPC_PERF_STOP_COUNT(altivec_put_no_rnd_pixels8_xy2_num, 1);
#else /* ALTIVEC_USE_REFERENCE_C_CODE */
@@ -1002,7 +1002,7 @@ POWERPC_PERF_STOP_COUNT(altivec_put_no_rnd_pixels8_xy2_num, 1);
register const_vector unsigned char vczero = (const_vector unsigned char)vec_splat_u8(0);
register const_vector unsigned short vcone = (const_vector unsigned short)vec_splat_u16(1);
register const_vector unsigned short vctwo = (const_vector unsigned short)vec_splat_u16(2);
-
+
temp1 = vec_ld(0, pixels);
temp2 = vec_ld(16, pixels);
pixelsv1 = vec_perm(temp1, temp2, vec_lvsl(0, pixels));
@@ -1019,8 +1019,8 @@ POWERPC_PERF_STOP_COUNT(altivec_put_no_rnd_pixels8_xy2_num, 1);
pixelssum1 = vec_add((vector unsigned short)pixelsv1,
(vector unsigned short)pixelsv2);
pixelssum1 = vec_add(pixelssum1, vcone);
-
-POWERPC_PERF_START_COUNT(altivec_put_no_rnd_pixels8_xy2_num, 1);
+
+POWERPC_PERF_START_COUNT(altivec_put_no_rnd_pixels8_xy2_num, 1);
for (i = 0; i < h ; i++) {
int rightside = ((unsigned long)block & 0x0000000F);
blockv = vec_ld(0, block);
@@ -1045,7 +1045,7 @@ POWERPC_PERF_START_COUNT(altivec_put_no_rnd_pixels8_xy2_num, 1);
temp3 = vec_sra(temp3, vctwo);
pixelssum1 = vec_add(pixelssum2, vcone);
pixelsavg = vec_packsu(temp3, (vector unsigned short) vczero);
-
+
if (rightside)
{
blockv = vec_perm(blockv, pixelsavg, vcprm(0, 1, s0, s1));
@@ -1054,13 +1054,13 @@ POWERPC_PERF_START_COUNT(altivec_put_no_rnd_pixels8_xy2_num, 1);
{
blockv = vec_perm(blockv, pixelsavg, vcprm(s0, s1, 2, 3));
}
-
+
vec_st(blockv, 0, block);
-
+
block += line_size;
pixels += line_size;
}
-
+
POWERPC_PERF_STOP_COUNT(altivec_put_no_rnd_pixels8_xy2_num, 1);
#endif /* ALTIVEC_USE_REFERENCE_C_CODE */
}
@@ -1119,7 +1119,7 @@ POWERPC_PERF_STOP_COUNT(altivec_put_pixels16_xy2_num, 1);
register const_vector unsigned short vctwo = (const_vector unsigned short)vec_splat_u16(2);
POWERPC_PERF_START_COUNT(altivec_put_pixels16_xy2_num, 1);
-
+
temp1 = vec_ld(0, pixels);
temp2 = vec_ld(16, pixels);
pixelsv1 = vec_perm(temp1, temp2, vec_lvsl(0, pixels));
@@ -1141,7 +1141,7 @@ POWERPC_PERF_START_COUNT(altivec_put_pixels16_xy2_num, 1);
pixelssum1 = vec_add((vector unsigned short)pixelsv1,
(vector unsigned short)pixelsv2);
pixelssum1 = vec_add(pixelssum1, vctwo);
-
+
for (i = 0; i < h ; i++) {
blockv = vec_ld(0, block);
@@ -1161,7 +1161,7 @@ POWERPC_PERF_START_COUNT(altivec_put_pixels16_xy2_num, 1);
pixelsv4 = vec_mergel(vczero, pixelsv2);
pixelsv1 = vec_mergeh(vczero, pixelsv1);
pixelsv2 = vec_mergeh(vczero, pixelsv2);
-
+
pixelssum4 = vec_add((vector unsigned short)pixelsv3,
(vector unsigned short)pixelsv4);
pixelssum2 = vec_add((vector unsigned short)pixelsv1,
@@ -1175,13 +1175,13 @@ POWERPC_PERF_START_COUNT(altivec_put_pixels16_xy2_num, 1);
pixelssum1 = vec_add(pixelssum2, vctwo);
blockv = vec_packsu(temp3, temp4);
-
+
vec_st(blockv, 0, block);
-
+
block += line_size;
pixels += line_size;
}
-
+
POWERPC_PERF_STOP_COUNT(altivec_put_pixels16_xy2_num, 1);
#endif /* ALTIVEC_USE_REFERENCE_C_CODE */
}
@@ -1241,7 +1241,7 @@ POWERPC_PERF_STOP_COUNT(altivec_put_no_rnd_pixels16_xy2_num, 1);
register const_vector unsigned short vctwo = (const_vector unsigned short)vec_splat_u16(2);
POWERPC_PERF_START_COUNT(altivec_put_no_rnd_pixels16_xy2_num, 1);
-
+
temp1 = vec_ld(0, pixels);
temp2 = vec_ld(16, pixels);
pixelsv1 = vec_perm(temp1, temp2, vec_lvsl(0, pixels));
@@ -1263,7 +1263,7 @@ POWERPC_PERF_START_COUNT(altivec_put_no_rnd_pixels16_xy2_num, 1);
pixelssum1 = vec_add((vector unsigned short)pixelsv1,
(vector unsigned short)pixelsv2);
pixelssum1 = vec_add(pixelssum1, vcone);
-
+
for (i = 0; i < h ; i++) {
blockv = vec_ld(0, block);
@@ -1283,7 +1283,7 @@ POWERPC_PERF_START_COUNT(altivec_put_no_rnd_pixels16_xy2_num, 1);
pixelsv4 = vec_mergel(vczero, pixelsv2);
pixelsv1 = vec_mergeh(vczero, pixelsv1);
pixelsv2 = vec_mergeh(vczero, pixelsv2);
-
+
pixelssum4 = vec_add((vector unsigned short)pixelsv3,
(vector unsigned short)pixelsv4);
pixelssum2 = vec_add((vector unsigned short)pixelsv1,
@@ -1297,13 +1297,13 @@ POWERPC_PERF_START_COUNT(altivec_put_no_rnd_pixels16_xy2_num, 1);
pixelssum1 = vec_add(pixelssum2, vcone);
blockv = vec_packsu(temp3, temp4);
-
+
vec_st(blockv, 0, block);
-
+
block += line_size;
pixels += line_size;
}
-
+
POWERPC_PERF_STOP_COUNT(altivec_put_no_rnd_pixels16_xy2_num, 1);
#endif /* ALTIVEC_USE_REFERENCE_C_CODE */
}
@@ -1382,7 +1382,7 @@ POWERPC_PERF_START_COUNT(altivec_hadamard8_diff8x8_num, 1);
register vector signed short line5 = vec_sub(temp4, temp5);
register vector signed short line6 = vec_add(temp6, temp7);
register vector signed short line7 = vec_sub(temp6, temp7);
-
+
register vector signed short line0B = vec_add(line0, line2);
register vector signed short line2B = vec_sub(line0, line2);
register vector signed short line1B = vec_add(line1, line3);
@@ -1391,7 +1391,7 @@ POWERPC_PERF_START_COUNT(altivec_hadamard8_diff8x8_num, 1);
register vector signed short line6B = vec_sub(line4, line6);
register vector signed short line5B = vec_add(line5, line7);
register vector signed short line7B = vec_sub(line5, line7);
-
+
register vector signed short line0C = vec_add(line0B, line4B);
register vector signed short line4C = vec_sub(line0B, line4B);
register vector signed short line1C = vec_add(line1B, line5B);
@@ -1400,7 +1400,7 @@ POWERPC_PERF_START_COUNT(altivec_hadamard8_diff8x8_num, 1);
register vector signed short line6C = vec_sub(line2B, line6B);
register vector signed short line3C = vec_add(line3B, line7B);
register vector signed short line7C = vec_sub(line3B, line7B);
-
+
vsum = vec_sum4s(vec_abs(line0C), vec_splat_s32(0));
vsum = vec_sum4s(vec_abs(line1C), vsum);
vsum = vec_sum4s(vec_abs(line2C), vsum);
@@ -1421,7 +1421,7 @@ POWERPC_PERF_STOP_COUNT(altivec_hadamard8_diff8x8_num, 1);
16x8 works with 16 elements ; it allows to avoid replicating
loads, and give the compiler more rooms for scheduling.
It's only used from inside hadamard8_diff16_altivec.
-
+
Unfortunately, it seems gcc-3.3 is a bit dumb, and
the compiled code has a LOT of spill code, it seems
gcc (unlike xlc) cannot keep everything in registers
@@ -1429,11 +1429,11 @@ POWERPC_PERF_STOP_COUNT(altivec_hadamard8_diff8x8_num, 1);
registers allocation. It's not clean, but on
a 7450 the resulting code is much faster (best case
fall from 700+ cycles to 550).
-
+
xlc doesn't add spill code, but it doesn't know how to
schedule for the 7450, and its code isn't much faster than
gcc-3.3 on the 7450 (but uses 25% less instructions...)
-
+
On the 970, the hand-made RA is still a win (arount 690
vs. around 780), but xlc goes to around 660 on the
regular C code...
@@ -1535,7 +1535,7 @@ static int hadamard8_diff16x8_altivec(/*MpegEncContext*/ void *s, uint8_t *dst,
register vector signed short line5 = vec_sub(temp4, temp5);
register vector signed short line6 = vec_add(temp6, temp7);
register vector signed short line7 = vec_sub(temp6, temp7);
-
+
register vector signed short line0B = vec_add(line0, line2);
register vector signed short line2B = vec_sub(line0, line2);
register vector signed short line1B = vec_add(line1, line3);
@@ -1544,7 +1544,7 @@ static int hadamard8_diff16x8_altivec(/*MpegEncContext*/ void *s, uint8_t *dst,
register vector signed short line6B = vec_sub(line4, line6);
register vector signed short line5B = vec_add(line5, line7);
register vector signed short line7B = vec_sub(line5, line7);
-
+
register vector signed short line0C = vec_add(line0B, line4B);
register vector signed short line4C = vec_sub(line0B, line4B);
register vector signed short line1C = vec_add(line1B, line5B);
@@ -1553,7 +1553,7 @@ static int hadamard8_diff16x8_altivec(/*MpegEncContext*/ void *s, uint8_t *dst,
register vector signed short line6C = vec_sub(line2B, line6B);
register vector signed short line3C = vec_add(line3B, line7B);
register vector signed short line7C = vec_sub(line3B, line7B);
-
+
vsum = vec_sum4s(vec_abs(line0C), vec_splat_s32(0));
vsum = vec_sum4s(vec_abs(line1C), vsum);
vsum = vec_sum4s(vec_abs(line2C), vsum);
@@ -1649,12 +1649,12 @@ int has_altivec(void)
signal (SIGILL, SIG_DFL);
} else {
canjump = 1;
-
+
asm volatile ("mtspr 256, %0\n\t"
"vand %%v0, %%v0, %%v0"
:
: "r" (-1));
-
+
signal (SIGILL, SIG_DFL);
return 1;
}
@@ -1710,7 +1710,7 @@ POWERPC_PERF_STOP_COUNT(altivec_avg_pixels8_xy2_num, 1);
pixelssum1, pixelssum2, temp3;
register const_vector unsigned char vczero = (const_vector unsigned char)vec_splat_u8(0);
register const_vector unsigned short vctwo = (const_vector unsigned short)vec_splat_u16(2);
-
+
temp1 = vec_ld(0, pixels);
temp2 = vec_ld(16, pixels);
pixelsv1 = vec_perm(temp1, temp2, vec_lvsl(0, pixels));
@@ -1727,8 +1727,8 @@ POWERPC_PERF_STOP_COUNT(altivec_avg_pixels8_xy2_num, 1);
pixelssum1 = vec_add((vector unsigned short)pixelsv1,
(vector unsigned short)pixelsv2);
pixelssum1 = vec_add(pixelssum1, vctwo);
-
-POWERPC_PERF_START_COUNT(altivec_avg_pixels8_xy2_num, 1);
+
+POWERPC_PERF_START_COUNT(altivec_avg_pixels8_xy2_num, 1);
for (i = 0; i < h ; i++) {
int rightside = ((unsigned long)block & 0x0000000F);
blockv = vec_ld(0, block);
@@ -1753,7 +1753,7 @@ POWERPC_PERF_START_COUNT(altivec_avg_pixels8_xy2_num, 1);
temp3 = vec_sra(temp3, vctwo);
pixelssum1 = vec_add(pixelssum2, vctwo);
pixelsavg = vec_packsu(temp3, (vector unsigned short) vczero);
-
+
if (rightside)
{
blocktemp = vec_perm(blockv, pixelsavg, vcprm(0, 1, s0, s1));
@@ -1762,14 +1762,14 @@ POWERPC_PERF_START_COUNT(altivec_avg_pixels8_xy2_num, 1);
{
blocktemp = vec_perm(blockv, pixelsavg, vcprm(s0, s1, 2, 3));
}
-
+
blockv = vec_avg(blocktemp, blockv);
vec_st(blockv, 0, block);
-
+
block += line_size;
pixels += line_size;
}
-
+
POWERPC_PERF_STOP_COUNT(altivec_avg_pixels8_xy2_num, 1);
#endif /* ALTIVEC_USE_REFERENCE_C_CODE */
}
diff --git a/libavcodec/ppc/dsputil_h264_altivec.c b/libavcodec/ppc/dsputil_h264_altivec.c
index 1891e194ae..7dd9dcf0a1 100644
--- a/libavcodec/ppc/dsputil_h264_altivec.c
+++ b/libavcodec/ppc/dsputil_h264_altivec.c
@@ -15,7 +15,7 @@
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
-
+
#include "../dsputil.h"
#include "gcc_fixes.h"
@@ -228,7 +228,7 @@ H264_MC(put_, 16, altivec)
H264_MC(avg_, 16, altivec)
void dsputil_h264_init_ppc(DSPContext* c, AVCodecContext *avctx) {
-
+
#ifdef HAVE_ALTIVEC
if (has_altivec()) {
c->put_h264_chroma_pixels_tab[0] = put_h264_chroma_mc8_altivec;
@@ -251,16 +251,16 @@ void dsputil_h264_init_ppc(DSPContext* c, AVCodecContext *avctx) {
c->PFX ## _pixels_tab[IDX][13] = PFX ## NUM ## _mc13_altivec; \
c->PFX ## _pixels_tab[IDX][14] = PFX ## NUM ## _mc23_altivec; \
c->PFX ## _pixels_tab[IDX][15] = PFX ## NUM ## _mc33_altivec
-
+
dspfunc(put_h264_qpel, 0, 16);
dspfunc(avg_h264_qpel, 0, 16);
#undef dspfunc
-
+
} else
#endif /* HAVE_ALTIVEC */
{
// Non-AltiVec PPC optimisations
-
+
// ... pending ...
}
}
diff --git a/libavcodec/ppc/dsputil_h264_template_altivec.c b/libavcodec/ppc/dsputil_h264_template_altivec.c
index cb0fa954d7..e5f44501eb 100644
--- a/libavcodec/ppc/dsputil_h264_template_altivec.c
+++ b/libavcodec/ppc/dsputil_h264_template_altivec.c
@@ -47,7 +47,7 @@ void PREFIX_h264_chroma_mc8_altivec(uint8_t * dst, uint8_t * src, int stride, in
register int loadSecond = (((unsigned long)src) % 16) <= 7 ? 0 : 1;
register int reallyBadAlign = (((unsigned long)src) % 16) == 15 ? 1 : 0;
-
+
vector unsigned char vsrcAuc;
vector unsigned char vsrcBuc;
vector unsigned char vsrcperm0;
@@ -57,7 +57,7 @@ void PREFIX_h264_chroma_mc8_altivec(uint8_t * dst, uint8_t * src, int stride, in
vsrcBuc = vec_ld(16, src);
vsrcperm0 = vec_lvsl(0, src);
vsrcperm1 = vec_lvsl(1, src);
-
+
vector unsigned char vsrc0uc;
vector unsigned char vsrc1uc;
vsrc0uc = vec_perm(vsrcAuc, vsrcBuc, vsrcperm0);
@@ -65,7 +65,7 @@ void PREFIX_h264_chroma_mc8_altivec(uint8_t * dst, uint8_t * src, int stride, in
vsrc1uc = vsrcBuc;
else
vsrc1uc = vec_perm(vsrcAuc, vsrcBuc, vsrcperm1);
-
+
vector signed short vsrc0ssH = (vector signed short)vec_mergeh((vector unsigned char)vzero, (vector unsigned char)vsrc0uc);
vector signed short vsrc1ssH = (vector signed short)vec_mergeh((vector unsigned char)vzero, (vector unsigned char)vsrc1uc);
@@ -73,37 +73,37 @@ void PREFIX_h264_chroma_mc8_altivec(uint8_t * dst, uint8_t * src, int stride, in
for (i = 0 ; i < h ; i++) {
vector unsigned char vsrcCuc;
vsrcCuc = vec_ld(stride + 0, src);
-
+
vector unsigned char vsrc2uc;
vector unsigned char vsrc3uc;
vsrc2uc = vec_perm(vsrcCuc, vsrcCuc, vsrcperm0);
vsrc3uc = vec_perm(vsrcCuc, vsrcCuc, vsrcperm1);
-
+
vector signed short vsrc2ssH = (vector signed short)vec_mergeh((vector unsigned char)vzero, (vector unsigned char)vsrc2uc);
vector signed short vsrc3ssH = (vector signed short)vec_mergeh((vector unsigned char)vzero, (vector unsigned char)vsrc3uc);
-
+
vector signed short psum;
-
+
psum = vec_mladd(vA, vsrc0ssH, vec_splat_s16(0));
psum = vec_mladd(vB, vsrc1ssH, psum);
psum = vec_mladd(vC, vsrc2ssH, psum);
psum = vec_mladd(vD, vsrc3ssH, psum);
psum = vec_add(v32ss, psum);
psum = vec_sra(psum, v6us);
-
+
vector unsigned char vdst = vec_ld(0, dst);
vector unsigned char ppsum = (vector unsigned char)vec_packsu(psum, psum);
-
+
vector unsigned char vfdst = vec_perm(vdst, ppsum, fperm);
vector unsigned char fsum;
-
+
OP_U8_ALTIVEC(fsum, vfdst, vdst);
vec_st(fsum, 0, dst);
-
+
vsrc0ssH = vsrc2ssH;
vsrc1ssH = vsrc3ssH;
-
+
dst += stride;
src += stride;
}
@@ -113,7 +113,7 @@ void PREFIX_h264_chroma_mc8_altivec(uint8_t * dst, uint8_t * src, int stride, in
vector unsigned char vsrcDuc;
vsrcCuc = vec_ld(stride + 0, src);
vsrcDuc = vec_ld(stride + 16, src);
-
+
vector unsigned char vsrc2uc;
vector unsigned char vsrc3uc;
vsrc2uc = vec_perm(vsrcCuc, vsrcDuc, vsrcperm0);
@@ -121,32 +121,32 @@ void PREFIX_h264_chroma_mc8_altivec(uint8_t * dst, uint8_t * src, int stride, in
vsrc3uc = vsrcDuc;
else
vsrc3uc = vec_perm(vsrcCuc, vsrcDuc, vsrcperm1);
-
+
vector signed short vsrc2ssH = (vector signed short)vec_mergeh((vector unsigned char)vzero, (vector unsigned char)vsrc2uc);
vector signed short vsrc3ssH = (vector signed short)vec_mergeh((vector unsigned char)vzero, (vector unsigned char)vsrc3uc);
-
+
vector signed short psum;
-
+
psum = vec_mladd(vA, vsrc0ssH, vec_splat_s16(0));
psum = vec_mladd(vB, vsrc1ssH, psum);
psum = vec_mladd(vC, vsrc2ssH, psum);
psum = vec_mladd(vD, vsrc3ssH, psum);
psum = vec_add(v32ss, psum);
psum = vec_sr(psum, v6us);
-
+
vector unsigned char vdst = vec_ld(0, dst);
- vector unsigned char ppsum = (vector unsigned char)vec_pack(psum, psum);
-
+ vector unsigned char ppsum = (vector unsigned char)vec_pack(psum, psum);
+
vector unsigned char vfdst = vec_perm(vdst, ppsum, fperm);
vector unsigned char fsum;
-
+
OP_U8_ALTIVEC(fsum, vfdst, vdst);
vec_st(fsum, 0, dst);
-
+
vsrc0ssH = vsrc2ssH;
vsrc1ssH = vsrc3ssH;
-
+
dst += stride;
src += stride;
}
@@ -159,7 +159,7 @@ static void PREFIX_h264_qpel16_h_lowpass_altivec(uint8_t * dst, uint8_t * src, i
POWERPC_PERF_DECLARE(PREFIX_h264_qpel16_h_lowpass_num, 1);
POWERPC_PERF_START_COUNT(PREFIX_h264_qpel16_h_lowpass_num, 1);
register int i;
-
+
const vector signed int vzero = vec_splat_s32(0);
const vector unsigned char permM2 = vec_lvsl(-2, src);
const vector unsigned char permM1 = vec_lvsl(-1, src);
@@ -258,13 +258,13 @@ static void PREFIX_h264_qpel16_h_lowpass_altivec(uint8_t * dst, uint8_t * src, i
const vector signed short sum2B = vec_adds(srcM1B, srcP2B);
const vector signed short sum3A = vec_adds(srcM2A, srcP3A);
const vector signed short sum3B = vec_adds(srcM2B, srcP3B);
-
+
const vector signed short pp1A = vec_mladd(sum1A, v20ss, v16ss);
const vector signed short pp1B = vec_mladd(sum1B, v20ss, v16ss);
const vector signed short pp2A = vec_mladd(sum2A, v5ss, (vector signed short)vzero);
const vector signed short pp2B = vec_mladd(sum2B, v5ss, (vector signed short)vzero);
-
+
const vector signed short pp3A = vec_add(sum3A, pp1A);
const vector signed short pp3B = vec_add(sum3B, pp1B);
@@ -300,7 +300,7 @@ POWERPC_PERF_STOP_COUNT(PREFIX_h264_qpel16_h_lowpass_num, 1);
static void PREFIX_h264_qpel16_v_lowpass_altivec(uint8_t * dst, uint8_t * src, int dstStride, int srcStride) {
POWERPC_PERF_DECLARE(PREFIX_h264_qpel16_v_lowpass_num, 1);
POWERPC_PERF_START_COUNT(PREFIX_h264_qpel16_v_lowpass_num, 1);
-
+
register int i;
const vector signed int vzero = vec_splat_s32(0);
@@ -312,7 +312,7 @@ static void PREFIX_h264_qpel16_v_lowpass_altivec(uint8_t * dst, uint8_t * src, i
const vector unsigned char dstperm = vec_lvsr(0, dst);
const vector unsigned char neg1 = (const vector unsigned char)vec_splat_s8(-1);
const vector unsigned char dstmask = vec_perm((const vector unsigned char)vzero, neg1, dstperm);
-
+
uint8_t *srcbis = src - (srcStride * 2);
const vector unsigned char srcM2a = vec_ld(0, srcbis);
@@ -372,13 +372,13 @@ static void PREFIX_h264_qpel16_v_lowpass_altivec(uint8_t * dst, uint8_t * src, i
srcP1ssB = srcP2ssB;
srcP2ssA = srcP3ssA;
srcP2ssB = srcP3ssB;
-
+
const vector signed short pp1A = vec_mladd(sum1A, v20ss, v16ss);
const vector signed short pp1B = vec_mladd(sum1B, v20ss, v16ss);
const vector signed short pp2A = vec_mladd(sum2A, v5ss, (vector signed short)vzero);
const vector signed short pp2B = vec_mladd(sum2B, v5ss, (vector signed short)vzero);
-
+
const vector signed short pp3A = vec_add(sum3A, pp1A);
const vector signed short pp3B = vec_add(sum3B, pp1B);
@@ -513,7 +513,7 @@ static void PREFIX_h264_qpel16_hv_lowpass_altivec(uint8_t * dst, int16_t * tmp,
const vector signed short sum2B = vec_adds(srcM1B, srcP2B);
const vector signed short sum3A = vec_adds(srcM2A, srcP3A);
const vector signed short sum3B = vec_adds(srcM2B, srcP3B);
-
+
const vector signed short pp1A = vec_mladd(sum1A, v20ss, sum3A);
const vector signed short pp1B = vec_mladd(sum1B, v20ss, sum3B);
@@ -525,18 +525,18 @@ static void PREFIX_h264_qpel16_hv_lowpass_altivec(uint8_t * dst, int16_t * tmp,
vec_st(psumA, 0, tmp);
vec_st(psumB, 16, tmp);
-
+
src += srcStride;
tmp += tmpStride; /* int16_t*, and stride is 16, so it's OK here */
}
-
+
const vector unsigned char dstperm = vec_lvsr(0, dst);
const vector unsigned char neg1 = (const vector unsigned char)vec_splat_s8(-1);
const vector unsigned char dstmask = vec_perm((const vector unsigned char)vzero, neg1, dstperm);
const vector unsigned char mperm = (const vector unsigned char)
AVV(0x00, 0x08, 0x01, 0x09, 0x02, 0x0A, 0x03, 0x0B,
0x04, 0x0C, 0x05, 0x0D, 0x06, 0x0E, 0x07, 0x0F);
-
+
int16_t *tmpbis = tmp - (tmpStride * 21);
vector signed short tmpM2ssA = vec_ld(0, tmpbis);
@@ -607,7 +607,7 @@ static void PREFIX_h264_qpel16_hv_lowpass_altivec(uint8_t * dst, int16_t * tmp,
const vector signed int sumAo = vec_add(pp1cAo, pp32Ao);
const vector signed int sumBe = vec_add(pp1cBe, pp32Be);
const vector signed int sumBo = vec_add(pp1cBo, pp32Bo);
-
+
const vector signed int ssumAe = vec_sra(sumAe, v10ui);
const vector signed int ssumAo = vec_sra(sumAo, v10ui);
const vector signed int ssumBe = vec_sra(sumBe, v10ui);
diff --git a/libavcodec/ppc/dsputil_ppc.c b/libavcodec/ppc/dsputil_ppc.c
index 776f4235c7..f8c7f94d3a 100644
--- a/libavcodec/ppc/dsputil_ppc.c
+++ b/libavcodec/ppc/dsputil_ppc.c
@@ -227,7 +227,7 @@ long check_dcbzl_effect(void)
}
av_free(fakedata);
-
+
return count;
}
#else
@@ -257,10 +257,10 @@ void dsputil_init_ppc(DSPContext* c, AVCodecContext *avctx)
#ifdef HAVE_ALTIVEC
dsputil_h264_init_ppc(c, avctx);
-
+
if (has_altivec()) {
mm_flags |= MM_ALTIVEC;
-
+
// Altivec specific optimisations
c->pix_abs[0][1] = sad16_x2_altivec;
c->pix_abs[0][2] = sad16_y2_altivec;
@@ -289,7 +289,7 @@ void dsputil_init_ppc(DSPContext* c, AVCodecContext *avctx)
c->put_no_rnd_pixels_tab[1][3] = put_no_rnd_pixels8_xy2_altivec;
c->put_pixels_tab[0][3] = put_pixels16_xy2_altivec;
c->put_no_rnd_pixels_tab[0][3] = put_no_rnd_pixels16_xy2_altivec;
-
+
c->gmc1 = gmc1_altivec;
#ifdef CONFIG_DARWIN // ATM gcc-3.3 and gcc-3.4 fail to compile these in linux...
@@ -319,7 +319,7 @@ void dsputil_init_ppc(DSPContext* c, AVCodecContext *avctx)
#endif /* ALTIVEC_USE_REFERENCE_C_CODE */
}
}
-
+
#ifdef POWERPC_PERFORMANCE_REPORT
{
int i, j;
diff --git a/libavcodec/ppc/fft_altivec.c b/libavcodec/ppc/fft_altivec.c
index 29d85e87dd..52aecc163f 100644
--- a/libavcodec/ppc/fft_altivec.c
+++ b/libavcodec/ppc/fft_altivec.c
@@ -71,9 +71,9 @@ POWERPC_PERF_DECLARE(altivec_fft_num, s->nbits >= 6);
FFTComplex *exptab = s->exptab;
int l;
FFTSample tmp_re, tmp_im;
-
+
POWERPC_PERF_START_COUNT(altivec_fft_num, s->nbits >= 6);
-
+
np = 1 << ln;
/* pass 0 */
@@ -81,29 +81,29 @@ POWERPC_PERF_START_COUNT(altivec_fft_num, s->nbits >= 6);
p=&z[0];
j=(np >> 1);
do {
- BF(p[0].re, p[0].im, p[1].re, p[1].im,
+ BF(p[0].re, p[0].im, p[1].re, p[1].im,
p[0].re, p[0].im, p[1].re, p[1].im);
p+=2;
} while (--j != 0);
/* pass 1 */
-
+
p=&z[0];
j=np >> 2;
if (s->inverse) {
do {
- BF(p[0].re, p[0].im, p[2].re, p[2].im,
+ BF(p[0].re, p[0].im, p[2].re, p[2].im,
p[0].re, p[0].im, p[2].re, p[2].im);
- BF(p[1].re, p[1].im, p[3].re, p[3].im,
+ BF(p[1].re, p[1].im, p[3].re, p[3].im,
p[1].re, p[1].im, -p[3].im, p[3].re);
p+=4;
} while (--j != 0);
} else {
do {
- BF(p[0].re, p[0].im, p[2].re, p[2].im,
+ BF(p[0].re, p[0].im, p[2].re, p[2].im,
p[0].re, p[0].im, p[2].re, p[2].im);
- BF(p[1].re, p[1].im, p[3].re, p[3].im,
+ BF(p[1].re, p[1].im, p[3].re, p[3].im,
p[1].re, p[1].im, p[3].im, -p[3].re);
p+=4;
} while (--j != 0);
@@ -119,7 +119,7 @@ POWERPC_PERF_START_COUNT(altivec_fft_num, s->nbits >= 6);
for (j = 0; j < nblocks; ++j) {
BF(p->re, p->im, q->re, q->im,
p->re, p->im, q->re, q->im);
-
+
p++;
q++;
for(l = nblocks; l < np2; l += nblocks) {
@@ -145,7 +145,7 @@ POWERPC_PERF_STOP_COUNT(altivec_fft_num, s->nbits >= 6);
#else
register const vector float vczero = (const vector float){0.,0.,0.,0.};
#endif
-
+
int ln = s->nbits;
int j, np, np2;
int nblocks, nloops;
@@ -163,7 +163,7 @@ POWERPC_PERF_START_COUNT(altivec_fft_num, s->nbits >= 6);
r = (vector float *)&z[0];
c1 = vcii(p,p,n,n);
-
+
if (s->inverse)
{
c2 = vcii(p,p,n,p);
@@ -172,27 +172,27 @@ POWERPC_PERF_START_COUNT(altivec_fft_num, s->nbits >= 6);
{
c2 = vcii(p,p,p,n);
}
-
+
j = (np >> 2);
do {
a = vec_ld(0, r);
a1 = vec_ld(sizeof(vector float), r);
-
+
b = vec_perm(a,a,vcprmle(1,0,3,2));
a = vec_madd(a,c1,b);
/* do the pass 0 butterfly */
-
+
b = vec_perm(a1,a1,vcprmle(1,0,3,2));
b = vec_madd(a1,c1,b);
/* do the pass 0 butterfly */
-
+
/* multiply third by -i */
b = vec_perm(b,b,vcprmle(2,3,1,0));
-
+
/* do the pass 1 butterfly */
vec_st(vec_madd(b,c2,a), 0, r);
vec_st(vec_nmsub(b,c2,a), sizeof(vector float), r);
-
+
r += 2;
} while (--j != 0);
}
@@ -215,7 +215,7 @@ POWERPC_PERF_START_COUNT(altivec_fft_num, s->nbits >= 6);
a = vec_ld(0, (float*)p);
b = vec_ld(0, (float*)q);
-
+
/* complex mul */
c = vec_ld(0, (float*)cptr);
/* cre*re cim*re */
@@ -223,16 +223,16 @@ POWERPC_PERF_START_COUNT(altivec_fft_num, s->nbits >= 6);
c = vec_ld(sizeof(vector float), (float*)cptr);
/* -cim*im cre*im */
b = vec_madd(c, vec_perm(b,b,vcprmle(3,3,1,1)),t1);
-
+
/* butterfly */
vec_st(vec_add(a,b), 0, (float*)p);
vec_st(vec_sub(a,b), 0, (float*)q);
-
+
p += 2;
q += 2;
cptr += 4;
} while (--k);
-
+
p += nloops;
q += nloops;
} while (--j);
diff --git a/libavcodec/ppc/gcc_fixes.h b/libavcodec/ppc/gcc_fixes.h
index 13d4ff12e7..194a3f8be4 100644
--- a/libavcodec/ppc/gcc_fixes.h
+++ b/libavcodec/ppc/gcc_fixes.h
@@ -1,6 +1,6 @@
/*
* gcc fixes for altivec.
- * Used to workaround broken gcc (FSF gcc-3 pre gcc-3.3)
+ * Used to workaround broken gcc (FSF gcc-3 pre gcc-3.3)
* and to stay somewhat compatible with Darwin.
*/
@@ -19,7 +19,7 @@
# endif
#else
#define AVV(x...) {x}
-#if (__GNUC__ * 100 + __GNUC_MINOR__ < 303)
+#if (__GNUC__ * 100 + __GNUC_MINOR__ < 303)
/* This code was provided to me by Bartosch Pixa
* as a separate header file (broken_mergel.h).
@@ -58,9 +58,9 @@ static inline vector signed int ff_vmrglw (vector signed int const A,
};
return vec_perm (A, B, lowword);
}
-/*#define ff_vmrglb ff_vmrglb
-#define ff_vmrglh ff_vmrglh
-#define ff_vmrglw ff_vmrglw
+/*#define ff_vmrglb ff_vmrglb
+#define ff_vmrglh ff_vmrglh
+#define ff_vmrglw ff_vmrglw
*/
#undef vec_mergel
diff --git a/libavcodec/ppc/gmc_altivec.c b/libavcodec/ppc/gmc_altivec.c
index 3448216859..0c82c39718 100644
--- a/libavcodec/ppc/gmc_altivec.c
+++ b/libavcodec/ppc/gmc_altivec.c
@@ -40,7 +40,7 @@ POWERPC_PERF_DECLARE(altivec_gmc1_num, GMC1_PERF_COND);
int i;
POWERPC_PERF_START_COUNT(altivec_gmc1_num, GMC1_PERF_COND);
-
+
for(i=0; i<h; i++)
{
dst[0]= (A*src[0] + B*src[1] + C*src[stride+0] + D*src[stride+1] + rounder)>>8;
@@ -87,7 +87,7 @@ POWERPC_PERF_START_COUNT(altivec_gmc1_num, GMC1_PERF_COND);
Dv = vec_splat(tempA, 3);
rounderV = vec_ld(0, (unsigned short*)rounder_a);
-
+
// we'll be able to pick-up our 9 char elements
// at src from those 32 bytes
// we load the first batch here, as inside the loop
@@ -96,7 +96,7 @@ POWERPC_PERF_START_COUNT(altivec_gmc1_num, GMC1_PERF_COND);
src_0 = vec_ld(0, src);
src_1 = vec_ld(16, src);
srcvA = vec_perm(src_0, src_1, vec_lvsl(0, src));
-
+
if (src_really_odd != 0x0000000F)
{ // if src & 0xF == 0xF, then (src+1) is properly aligned on the second vector.
srcvB = vec_perm(src_0, src_1, vec_lvsl(1, src));
@@ -107,14 +107,14 @@ POWERPC_PERF_START_COUNT(altivec_gmc1_num, GMC1_PERF_COND);
}
srcvA = vec_mergeh(vczero, srcvA);
srcvB = vec_mergeh(vczero, srcvB);
-
+
for(i=0; i<h; i++)
{
dst_odd = (unsigned long)dst & 0x0000000F;
src_really_odd = (((unsigned long)src) + stride) & 0x0000000F;
-
+
dstv = vec_ld(0, dst);
-
+
// we we'll be able to pick-up our 9 char elements
// at src + stride from those 32 bytes
// then reuse the resulting 2 vectors srvcC and srcvD
@@ -122,7 +122,7 @@ POWERPC_PERF_START_COUNT(altivec_gmc1_num, GMC1_PERF_COND);
src_0 = vec_ld(stride + 0, src);
src_1 = vec_ld(stride + 16, src);
srcvC = vec_perm(src_0, src_1, vec_lvsl(stride + 0, src));
-
+
if (src_really_odd != 0x0000000F)
{ // if src & 0xF == 0xF, then (src+1) is properly aligned on the second vector.
srcvD = vec_perm(src_0, src_1, vec_lvsl(stride + 1, src));
@@ -131,10 +131,10 @@ POWERPC_PERF_START_COUNT(altivec_gmc1_num, GMC1_PERF_COND);
{
srcvD = src_1;
}
-
+
srcvC = vec_mergeh(vczero, srcvC);
srcvD = vec_mergeh(vczero, srcvD);
-
+
// OK, now we (finally) do the math :-)
// those four instructions replaces 32 int muls & 32 int adds.
@@ -143,14 +143,14 @@ POWERPC_PERF_START_COUNT(altivec_gmc1_num, GMC1_PERF_COND);
tempB = vec_mladd((vector unsigned short)srcvB, Bv, tempA);
tempC = vec_mladd((vector unsigned short)srcvC, Cv, tempB);
tempD = vec_mladd((vector unsigned short)srcvD, Dv, tempC);
-
+
srcvA = srcvC;
srcvB = srcvD;
-
+
tempD = vec_sr(tempD, vcsr8);
-
+
dstv2 = vec_pack(tempD, (vector unsigned short)vczero);
-
+
if (dst_odd)
{
dstv2 = vec_perm(dstv, dstv2, vcprm(0,1,s0,s1));
@@ -159,9 +159,9 @@ POWERPC_PERF_START_COUNT(altivec_gmc1_num, GMC1_PERF_COND);
{
dstv2 = vec_perm(dstv, dstv2, vcprm(s0,s1,2,3));
}
-
+
vec_st(dstv2, 0, dst);
-
+
dst += stride;
src += stride;
}
diff --git a/libavcodec/ppc/idct_altivec.c b/libavcodec/ppc/idct_altivec.c
index 3445adaddf..ac91199ea5 100644
--- a/libavcodec/ppc/idct_altivec.c
+++ b/libavcodec/ppc/idct_altivec.c
@@ -86,7 +86,7 @@
vy3 = vec_adds (t2, t6); \
vy4 = vec_subs (t2, t6);
-
+
#define IDCT \
vector_s16_t vx0, vx1, vx2, vx3, vx4, vx5, vx6, vx7; \
vector_s16_t vy0, vy1, vy2, vy3, vy4, vy5, vy6, vy7; \
diff --git a/libavcodec/ppc/mpegvideo_altivec.c b/libavcodec/ppc/mpegvideo_altivec.c
index 91e744af97..4477d3ffa8 100644
--- a/libavcodec/ppc/mpegvideo_altivec.c
+++ b/libavcodec/ppc/mpegvideo_altivec.c
@@ -25,7 +25,7 @@
#include "../mpegvideo.h"
#include "gcc_fixes.h"
-
+
#include "dsputil_altivec.h"
// Swaps two variables (used for altivec registers)
@@ -103,7 +103,7 @@ do { \
// slower, for dumb non-apple GCC
#define FOUROF(a) {a,a,a,a}
#endif
-int dct_quantize_altivec(MpegEncContext* s,
+int dct_quantize_altivec(MpegEncContext* s,
DCTELEM* data, int n,
int qscale, int* overflow)
{
@@ -273,7 +273,7 @@ int dct_quantize_altivec(MpegEncContext* s,
if (whichPass == 1)
{
// transpose the data for the second pass
-
+
// First, block transpose the upper right with lower left.
SWAP(row4, alt0);
SWAP(row5, alt1);
@@ -380,7 +380,7 @@ int dct_quantize_altivec(MpegEncContext* s,
vec_cmpgt(alt7, zero));
}
-
+
}
// Store the data back into the original block
@@ -469,7 +469,7 @@ int dct_quantize_altivec(MpegEncContext* s,
vec_ste(scanIndices_01, 0, &lastNonZeroChar);
lastNonZero = lastNonZeroChar;
-
+
// While the data is still in vectors we check for the transpose IDCT permute
// and handle it using the vector unit if we can. This is the permute used
// by the altivec idct, so it is common when using the altivec dct.
@@ -523,30 +523,30 @@ int dct_quantize_altivec(MpegEncContext* s,
AltiVec version of dct_unquantize_h263
this code assumes `block' is 16 bytes-aligned
*/
-void dct_unquantize_h263_altivec(MpegEncContext *s,
+void dct_unquantize_h263_altivec(MpegEncContext *s,
DCTELEM *block, int n, int qscale)
{
POWERPC_PERF_DECLARE(altivec_dct_unquantize_h263_num, 1);
int i, level, qmul, qadd;
int nCoeffs;
-
+
assert(s->block_last_index[n]>=0);
POWERPC_PERF_START_COUNT(altivec_dct_unquantize_h263_num, 1);
-
+
qadd = (qscale - 1) | 1;
qmul = qscale << 1;
-
+
if (s->mb_intra) {
if (!s->h263_aic) {
- if (n < 4)
+ if (n < 4)
block[0] = block[0] * s->y_dc_scale;
else
block[0] = block[0] * s->c_dc_scale;
}else
qadd = 0;
i = 1;
- nCoeffs= 63; //does not allways use zigzag table
+ nCoeffs= 63; //does not allways use zigzag table
} else {
i = 0;
nCoeffs= s->intra_scantable.raster_end[ s->block_last_index[n] ];
@@ -586,7 +586,7 @@ POWERPC_PERF_START_COUNT(altivec_dct_unquantize_h263_num, 1);
register vector bool short blockv_null, blockv_neg;
register short backup_0 = block[0];
register int j = 0;
-
+
qmulv = vec_ld(0, qmul8);
qaddv = vec_ld(0, qadd8);
nqaddv = vec_ld(0, nqadd8);
@@ -605,7 +605,7 @@ POWERPC_PERF_START_COUNT(altivec_dct_unquantize_h263_num, 1);
}
}
#endif
-
+
// vectorize all the 16 bytes-aligned blocks
// of 8 elements
for(; (j + 7) <= nCoeffs ; j+=8)
@@ -637,7 +637,7 @@ POWERPC_PERF_START_COUNT(altivec_dct_unquantize_h263_num, 1);
block[j] = level;
}
}
-
+
if (i == 1)
{ // cheat. this avoid special-casing the first iteration
block[0] = backup_0;
diff --git a/libavcodec/ppc/mpegvideo_ppc.c b/libavcodec/ppc/mpegvideo_ppc.c
index 832baced0c..1e24dea487 100644
--- a/libavcodec/ppc/mpegvideo_ppc.c
+++ b/libavcodec/ppc/mpegvideo_ppc.c
@@ -15,7 +15,7 @@
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
-
+
#include "../dsputil.h"
#include "../mpegvideo.h"
#include <time.h>
@@ -24,7 +24,7 @@
#include "dsputil_altivec.h"
#endif
-extern int dct_quantize_altivec(MpegEncContext *s,
+extern int dct_quantize_altivec(MpegEncContext *s,
DCTELEM *block, int n,
int qscale, int *overflow);
extern void dct_unquantize_h263_altivec(MpegEncContext *s,