diff options
author | Loren Merritt <lorenm@u.washington.edu> | 2009-12-05 15:09:10 +0000 |
---|---|---|
committer | Loren Merritt <lorenm@u.washington.edu> | 2009-12-05 15:09:10 +0000 |
commit | b1159ad92818cd8f0885d252b0800f5960fe7241 (patch) | |
tree | a9d4177c61a9a89b4ac78a4a5b8a95f962a858a0 /libavcodec/ppc/int_altivec.c | |
parent | e470691aa8798004bf5589871865a765cb791014 (diff) | |
download | ffmpeg-b1159ad92818cd8f0885d252b0800f5960fe7241.tar.gz |
refactor and optimize scalarproduct
29-105% faster apply_filter, 6-90% faster ape decoding on core2
(Any x86 other than core2 probably gets much less, since this is mostly due to ssse3 cachesplit avoidance and I haven't written the full gamut of other cachesplit modes.)
9-123% faster ape decoding on G4.
Originally committed as revision 20739 to svn://svn.ffmpeg.org/ffmpeg/trunk
Diffstat (limited to 'libavcodec/ppc/int_altivec.c')
-rw-r--r-- | libavcodec/ppc/int_altivec.c | 66 |
1 files changed, 36 insertions, 30 deletions
diff --git a/libavcodec/ppc/int_altivec.c b/libavcodec/ppc/int_altivec.c index d76a220048..4f7529fe4b 100644 --- a/libavcodec/ppc/int_altivec.c +++ b/libavcodec/ppc/int_altivec.c @@ -79,34 +79,6 @@ static int ssd_int8_vs_int16_altivec(const int8_t *pix1, const int16_t *pix2, return u.score[3]; } -static void add_int16_altivec(int16_t * v1, int16_t * v2, int order) -{ - int i; - register vec_s16 vec, *pv; - - for(i = 0; i < order; i += 8){ - pv = (vec_s16*)v2; - vec = vec_perm(pv[0], pv[1], vec_lvsl(0, v2)); - vec_st(vec_add(vec_ld(0, v1), vec), 0, v1); - v1 += 8; - v2 += 8; - } -} - -static void sub_int16_altivec(int16_t * v1, int16_t * v2, int order) -{ - int i; - register vec_s16 vec, *pv; - - for(i = 0; i < order; i += 8){ - pv = (vec_s16*)v2; - vec = vec_perm(pv[0], pv[1], vec_lvsl(0, v2)); - vec_st(vec_sub(vec_ld(0, v1), vec), 0, v1); - v1 += 8; - v2 += 8; - } -} - static int32_t scalarproduct_int16_altivec(int16_t * v1, int16_t * v2, int order, const int shift) { int i; @@ -137,10 +109,44 @@ static int32_t scalarproduct_int16_altivec(int16_t * v1, int16_t * v2, int order return ires; } +static int32_t scalarproduct_and_madd_int16_altivec(int16_t *v1, int16_t *v2, int16_t *v3, int order, int mul) +{ + LOAD_ZERO; + vec_s16 *pv1 = (vec_s16*)v1; + vec_s16 *pv2 = (vec_s16*)v2; + vec_s16 *pv3 = (vec_s16*)v3; + register vec_s16 muls = {mul,mul,mul,mul,mul,mul,mul,mul}; + register vec_s16 t0, t1, i0, i1; + register vec_s16 i2 = pv2[0], i3 = pv3[0]; + register vec_s32 res = zero_s32v; + register vec_u8 align = vec_lvsl(0, v2); + int32_t ires; + order >>= 4; + do { + t0 = vec_perm(i2, pv2[1], align); + i2 = pv2[2]; + t1 = vec_perm(pv2[1], i2, align); + i0 = pv1[0]; + i1 = pv1[1]; + res = vec_msum(t0, i0, res); + res = vec_msum(t1, i1, res); + t0 = vec_perm(i3, pv3[1], align); + i3 = pv3[2]; + t1 = vec_perm(pv3[1], i3, align); + pv1[0] = vec_mladd(t0, muls, i0); + pv1[1] = vec_mladd(t1, muls, i1); + pv1 += 2; + pv2 += 2; + pv3 += 2; + } while(--order); + res = vec_splat(vec_sums(res, zero_s32v), 3); + vec_ste(res, 0, &ires); + return ires; +} + void int_init_altivec(DSPContext* c, AVCodecContext *avctx) { c->ssd_int8_vs_int16 = ssd_int8_vs_int16_altivec; - c->add_int16 = add_int16_altivec; - c->sub_int16 = sub_int16_altivec; c->scalarproduct_int16 = scalarproduct_int16_altivec; + c->scalarproduct_and_madd_int16 = scalarproduct_and_madd_int16_altivec; } |