aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMichael Niedermayer <michaelni@gmx.at>2015-05-31 21:38:58 +0200
committerMichael Niedermayer <michaelni@gmx.at>2015-05-31 21:38:58 +0200
commite8676c758a627efc1744e2084c01e832c03ed2d0 (patch)
treed2c287116454898f2cf892974e45ced6fe4ef8a8
parent876a663c668165e13feb7bad829d6ac034a6d416 (diff)
parent72cebae0d981dde144340cf51f3c323f01e215e5 (diff)
downloadffmpeg-e8676c758a627efc1744e2084c01e832c03ed2d0.tar.gz
Merge commit '72cebae0d981dde144340cf51f3c323f01e215e5'
* commit '72cebae0d981dde144340cf51f3c323f01e215e5': ppc: avutil: Use the abriged vector types Conflicts: libavutil/ppc/float_dsp_altivec.c libavutil/ppc/util_altivec.h Merged-by: Michael Niedermayer <michaelni@gmx.at>
-rw-r--r--libavutil/ppc/float_dsp_altivec.c14
-rw-r--r--libavutil/ppc/util_altivec.h16
2 files changed, 15 insertions, 15 deletions
diff --git a/libavutil/ppc/float_dsp_altivec.c b/libavutil/ppc/float_dsp_altivec.c
index 49af9f9abf..6aa3e51cef 100644
--- a/libavutil/ppc/float_dsp_altivec.c
+++ b/libavutil/ppc/float_dsp_altivec.c
@@ -25,7 +25,7 @@ void ff_vector_fmul_altivec(float *dst, const float *src0, const float *src1,
int len)
{
int i;
- vector float d0, d1, s, zero = (vector float)vec_splat_u32(0);
+ vec_f d0, d1, s, zero = (vec_f)vec_splat_u32(0);
for (i = 0; i < len - 7; i += 8) {
d0 = vec_ld( 0, src0 + i);
s = vec_ld( 0, src1 + i);
@@ -40,15 +40,15 @@ void ff_vector_fmul_altivec(float *dst, const float *src0, const float *src1,
void ff_vector_fmul_window_altivec(float *dst, const float *src0,
const float *src1, const float *win, int len)
{
- vector float zero, t0, t1, s0, s1, wi, wj;
- const vector unsigned char reverse = vcprm(3, 2, 1, 0);
+ vec_f zero, t0, t1, s0, s1, wi, wj;
+ const vec_u8 reverse = vcprm(3, 2, 1, 0);
int i, j;
dst += len;
win += len;
src0 += len;
- zero = (vector float)vec_splat_u32(0);
+ zero = (vec_f)vec_splat_u32(0);
for (i = -len * 4, j = len * 4 - 16; i < 0; i += 16, j -= 16) {
s0 = vec_ld(i, src0);
@@ -75,7 +75,7 @@ void ff_vector_fmul_add_altivec(float *dst, const float *src0,
int len)
{
int i;
- vector float d, ss0, ss1, ss2, t0, t1, edges;
+ vec_f d, ss0, ss1, ss2, t0, t1, edges;
for (i = 0; i < len - 3; i += 4) {
t0 = vec_ld(0, dst + i);
@@ -96,8 +96,8 @@ void ff_vector_fmul_reverse_altivec(float *dst, const float *src0,
const float *src1, int len)
{
int i;
- vector float d, s0, s1, h0, l0,
- s2, s3, zero = (vector float) vec_splat_u32(0);
+ vec_f d, s0, s1, h0, l0, s2, s3;
+ vec_f zero = (vec_f)vec_splat_u32(0);
src1 += len-4;
for(i = 0; i < len - 7; i += 8) {
diff --git a/libavutil/ppc/util_altivec.h b/libavutil/ppc/util_altivec.h
index 51a4e8ca58..0db58730fe 100644
--- a/libavutil/ppc/util_altivec.h
+++ b/libavutil/ppc/util_altivec.h
@@ -46,7 +46,7 @@
#define WORD_s1 0x14,0x15,0x16,0x17
#define WORD_s2 0x18,0x19,0x1a,0x1b
#define WORD_s3 0x1c,0x1d,0x1e,0x1f
-#define vcprm(a,b,c,d) (const vector unsigned char){WORD_ ## a, WORD_ ## b, WORD_ ## c, WORD_ ## d}
+#define vcprm(a,b,c,d) (const vec_u8){WORD_ ## a, WORD_ ## b, WORD_ ## c, WORD_ ## d}
#define SWP_W2S0 0x02,0x03,0x00,0x01
#define SWP_W2S1 0x06,0x07,0x04,0x05
@@ -65,8 +65,8 @@
// Transpose 8x8 matrix of 16-bit elements (in-place)
#define TRANSPOSE8(a,b,c,d,e,f,g,h) \
do { \
- vector signed short A1, B1, C1, D1, E1, F1, G1, H1; \
- vector signed short A2, B2, C2, D2, E2, F2, G2, H2; \
+ vec_s16 A1, B1, C1, D1, E1, F1, G1, H1; \
+ vec_s16 A2, B2, C2, D2, E2, F2, G2, H2; \
\
A1 = vec_mergeh (a, e); \
B1 = vec_mergel (a, e); \
@@ -108,17 +108,17 @@ do { \
/** @brief loads unaligned vector @a *src with offset @a offset
and returns it */
#if HAVE_BIGENDIAN
-static inline vector unsigned char unaligned_load(int offset, const uint8_t *src)
+static inline vec_u8 unaligned_load(int offset, const uint8_t *src)
{
- register vector unsigned char first = vec_ld(offset, src);
- register vector unsigned char second = vec_ld(offset+15, src);
- register vector unsigned char mask = vec_lvsl(offset, src);
+ register vec_u8 first = vec_ld(offset, src);
+ register vec_u8 second = vec_ld(offset + 15, src);
+ register vec_u8 mask = vec_lvsl(offset, src);
return vec_perm(first, second, mask);
}
static inline vec_u8 load_with_perm_vec(int offset, const uint8_t *src, vec_u8 perm_vec)
{
vec_u8 a = vec_ld(offset, src);
- vec_u8 b = vec_ld(offset+15, src);
+ vec_u8 b = vec_ld(offset + 15, src);
return vec_perm(a, b, perm_vec);
}
#else