diff options
author | Michael Niedermayer <michaelni@gmx.at> | 2014-11-24 11:39:26 +0100 |
---|---|---|
committer | Michael Niedermayer <michaelni@gmx.at> | 2014-11-24 12:13:00 +0100 |
commit | ea41e6d6373063c3a2a9bf48ce8e1c2e6340b694 (patch) | |
tree | dc454e62fb5233745a40d3d8110d17aed4c72317 /libavcodec/ppc | |
parent | b4d8724ab28d63d1297f3e48f491d827e48b6a81 (diff) | |
parent | 9c12c6ff9539e926df0b2a2299e915ae71872600 (diff) | |
download | ffmpeg-ea41e6d6373063c3a2a9bf48ce8e1c2e6340b694.tar.gz |
Merge commit '9c12c6ff9539e926df0b2a2299e915ae71872600'
* commit '9c12c6ff9539e926df0b2a2299e915ae71872600':
motion_est: convert stride to ptrdiff_t
Conflicts:
libavcodec/me_cmp.c
libavcodec/ppc/me_cmp.c
libavcodec/x86/me_cmp_init.c
See: 9c669672c7fd45ef1cad782ab551be438ceac6cd
Merged-by: Michael Niedermayer <michaelni@gmx.at>
Diffstat (limited to 'libavcodec/ppc')
-rw-r--r-- | libavcodec/ppc/me_cmp.c | 56 |
1 files changed, 28 insertions, 28 deletions
diff --git a/libavcodec/ppc/me_cmp.c b/libavcodec/ppc/me_cmp.c index e280ea458a..38a7ba1476 100644 --- a/libavcodec/ppc/me_cmp.c +++ b/libavcodec/ppc/me_cmp.c @@ -55,7 +55,7 @@ } #endif static int sad16_x2_altivec(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2, - int line_size, int h) + ptrdiff_t stride, int h) { int i; int __attribute__((aligned(16))) s = 0; @@ -83,8 +83,8 @@ static int sad16_x2_altivec(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2, /* Add each 4 pixel group together and put 4 results into sad. */ sad = vec_sum4s(t5, sad); - pix1 += line_size; - pix2 += line_size; + pix1 += stride; + pix2 += stride; } /* Sum up the four partial sums, and put the result into s. */ sumdiffs = vec_sums((vector signed int) sad, (vector signed int) zero); @@ -95,7 +95,7 @@ static int sad16_x2_altivec(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2, } static int sad16_y2_altivec(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2, - int line_size, int h) + ptrdiff_t stride, int h) { int i; int __attribute__((aligned(16))) s = 0; @@ -105,9 +105,9 @@ static int sad16_y2_altivec(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2, vector unsigned int sad = (vector unsigned int) vec_splat_u32(0); vector signed int sumdiffs; - uint8_t *pix3 = pix2 + line_size; + uint8_t *pix3 = pix2 + stride; - /* Due to the fact that pix3 = pix2 + line_size, the pix3 of one + /* Due to the fact that pix3 = pix2 + stride, the pix3 of one * iteration becomes pix2 in the next iteration. We can use this * fact to avoid a potentially expensive unaligned read, each * time around the loop. @@ -132,9 +132,9 @@ static int sad16_y2_altivec(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2, /* Add each 4 pixel group together and put 4 results into sad. */ sad = vec_sum4s(t5, sad); - pix1 += line_size; + pix1 += stride; pix2v = pix3v; - pix3 += line_size; + pix3 += stride; } /* Sum up the four partial sums, and put the result into s. */ @@ -145,11 +145,11 @@ static int sad16_y2_altivec(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2, } static int sad16_xy2_altivec(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2, - int line_size, int h) + ptrdiff_t stride, int h) { int i; int __attribute__((aligned(16))) s = 0; - uint8_t *pix3 = pix2 + line_size; + uint8_t *pix3 = pix2 + stride; const vector unsigned char zero = (const vector unsigned char) vec_splat_u8(0); const vector unsigned short two = @@ -163,7 +163,7 @@ static int sad16_xy2_altivec(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2, vector unsigned char perm1, perm2, pix2v, pix2iv; GET_PERM(perm1, perm2, pix2); - /* Due to the fact that pix3 = pix2 + line_size, the pix3 of one + /* Due to the fact that pix3 = pix2 + stride, the pix3 of one * iteration becomes pix2 in the next iteration. We can use this * fact to avoid a potentially expensive unaligned read, as well * as some splitting, and vector addition each time around the loop. @@ -219,8 +219,8 @@ static int sad16_xy2_altivec(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2, /* Add each 4 pixel group together and put 4 results into sad. */ sad = vec_sum4s(t5, sad); - pix1 += line_size; - pix3 += line_size; + pix1 += stride; + pix3 += stride; /* Transfer the calculated values for pix3 into pix2. */ t1 = t3; t2 = t4; @@ -234,7 +234,7 @@ static int sad16_xy2_altivec(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2, } static int sad16_altivec(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2, - int line_size, int h) + ptrdiff_t stride, int h) { int i; int __attribute__((aligned(16))) s; @@ -256,8 +256,8 @@ static int sad16_altivec(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2, /* Add each 4 pixel group together and put 4 results into sad. */ sad = vec_sum4s(t5, sad); - pix1 += line_size; - pix2 += line_size; + pix1 += stride; + pix2 += stride; } /* Sum up the four partial sums, and put the result into s. */ @@ -269,7 +269,7 @@ static int sad16_altivec(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2, } static int sad8_altivec(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2, - int line_size, int h) + ptrdiff_t stride, int h) { int i; int __attribute__((aligned(16))) s; @@ -298,8 +298,8 @@ static int sad8_altivec(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2, /* Add each 4 pixel group together and put 4 results into sad. */ sad = vec_sum4s(t5, sad); - pix1 += line_size; - pix2 += line_size; + pix1 += stride; + pix2 += stride; } /* Sum up the four partial sums, and put the result into s. */ @@ -313,7 +313,7 @@ static int sad8_altivec(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2, /* Sum of Squared Errors for an 8x8 block, AltiVec-enhanced. * It's the sad8_altivec code above w/ squaring added. */ static int sse8_altivec(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2, - int line_size, int h) + ptrdiff_t stride, int h) { int i; int __attribute__((aligned(16))) s; @@ -343,8 +343,8 @@ static int sse8_altivec(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2, /* Square the values and add them to our sum. */ sum = vec_msum(t5, t5, sum); - pix1 += line_size; - pix2 += line_size; + pix1 += stride; + pix2 += stride; } /* Sum up the four partial sums, and put the result into s. */ @@ -358,7 +358,7 @@ static int sse8_altivec(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2, /* Sum of Squared Errors for a 16x16 block, AltiVec-enhanced. * It's the sad16_altivec code above w/ squaring added. */ static int sse16_altivec(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2, - int line_size, int h) + ptrdiff_t stride, int h) { int i; int __attribute__((aligned(16))) s; @@ -383,8 +383,8 @@ static int sse16_altivec(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2, /* Square the values and add them to our sum. */ sum = vec_msum(t5, t5, sum); - pix1 += line_size; - pix2 += line_size; + pix1 += stride; + pix2 += stride; } /* Sum up the four partial sums, and put the result into s. */ @@ -396,7 +396,7 @@ static int sse16_altivec(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2, } static int hadamard8_diff8x8_altivec(MpegEncContext *s, uint8_t *dst, - uint8_t *src, int stride, int h) + uint8_t *src, ptrdiff_t stride, int h) { int __attribute__((aligned(16))) sum; register const vector unsigned char vzero = @@ -522,7 +522,7 @@ static int hadamard8_diff8x8_altivec(MpegEncContext *s, uint8_t *dst, * but xlc goes to around 660 on the regular C code... */ static int hadamard8_diff16x8_altivec(MpegEncContext *s, uint8_t *dst, - uint8_t *src, int stride, int h) + uint8_t *src, ptrdiff_t stride, int h) { int __attribute__((aligned(16))) sum; register vector signed short @@ -713,7 +713,7 @@ static int hadamard8_diff16x8_altivec(MpegEncContext *s, uint8_t *dst, } static int hadamard8_diff16_altivec(MpegEncContext *s, uint8_t *dst, - uint8_t *src, int stride, int h) + uint8_t *src, ptrdiff_t stride, int h) { int score = hadamard8_diff16x8_altivec(s, dst, src, stride, 8); |