diff options
author | Måns Rullgård <mans@mansr.com> | 2009-09-27 16:52:00 +0000 |
---|---|---|
committer | Måns Rullgård <mans@mansr.com> | 2009-09-27 16:52:00 +0000 |
commit | 35de5d2412fefa31b117276e3b2c8d079b2b9d39 (patch) | |
tree | 1821a38b764854f7d4f3240b02ce3a6993fa8779 | |
parent | 952e87219815b0d8a698e0c098e4fb7984f8b19d (diff) | |
download | ffmpeg-35de5d2412fefa31b117276e3b2c8d079b2b9d39.tar.gz |
cosmetics: fix indentation after previous commit
Originally committed as revision 20062 to svn://svn.ffmpeg.org/ffmpeg/trunk
-rw-r--r-- | libavcodec/ppc/float_altivec.c | 26 | ||||
-rw-r--r-- | libavcodec/x86/dsputil_mmx.c | 64 |
2 files changed, 45 insertions, 45 deletions
diff --git a/libavcodec/ppc/float_altivec.c b/libavcodec/ppc/float_altivec.c index 48d093cd15..1c7326392b 100644 --- a/libavcodec/ppc/float_altivec.c +++ b/libavcodec/ppc/float_altivec.c @@ -75,19 +75,19 @@ static void vector_fmul_add_altivec(float *dst, const float *src0, vector unsigned char align = vec_lvsr(0,dst), mask = vec_lvsl(0, dst); - for (i=0; i<len-3; i+=4) { - t0 = vec_ld(0, dst+i); - t1 = vec_ld(15, dst+i); - s0 = vec_ld(0, src0+i); - s1 = vec_ld(0, src1+i); - s2 = vec_ld(0, src2+i); - edges = vec_perm(t1 ,t0, mask); - d = vec_madd(s0,s1,s2); - t1 = vec_perm(d, edges, align); - t0 = vec_perm(edges, d, align); - vec_st(t1, 15, dst+i); - vec_st(t0, 0, dst+i); - } + for (i=0; i<len-3; i+=4) { + t0 = vec_ld(0, dst+i); + t1 = vec_ld(15, dst+i); + s0 = vec_ld(0, src0+i); + s1 = vec_ld(0, src1+i); + s2 = vec_ld(0, src2+i); + edges = vec_perm(t1 ,t0, mask); + d = vec_madd(s0,s1,s2); + t1 = vec_perm(d, edges, align); + t0 = vec_perm(edges, d, align); + vec_st(t1, 15, dst+i); + vec_st(t0, 0, dst+i); + } } static void vector_fmul_window_altivec(float *dst, const float *src0, const float *src1, const float *win, float add_bias, int len) diff --git a/libavcodec/x86/dsputil_mmx.c b/libavcodec/x86/dsputil_mmx.c index 79ceb15554..2e00aa2a24 100644 --- a/libavcodec/x86/dsputil_mmx.c +++ b/libavcodec/x86/dsputil_mmx.c @@ -2128,43 +2128,43 @@ static void vector_fmul_reverse_sse(float *dst, const float *src0, const float * static void vector_fmul_add_3dnow(float *dst, const float *src0, const float *src1, const float *src2, int len){ x86_reg i = (len-4)*4; - __asm__ volatile( - "1: \n\t" - "movq (%2,%0), %%mm0 \n\t" - "movq 8(%2,%0), %%mm1 \n\t" - "pfmul (%3,%0), %%mm0 \n\t" - "pfmul 8(%3,%0), %%mm1 \n\t" - "pfadd (%4,%0), %%mm0 \n\t" - "pfadd 8(%4,%0), %%mm1 \n\t" - "movq %%mm0, (%1,%0) \n\t" - "movq %%mm1, 8(%1,%0) \n\t" - "sub $16, %0 \n\t" - "jge 1b \n\t" - :"+r"(i) - :"r"(dst), "r"(src0), "r"(src1), "r"(src2) - :"memory" - ); + __asm__ volatile( + "1: \n\t" + "movq (%2,%0), %%mm0 \n\t" + "movq 8(%2,%0), %%mm1 \n\t" + "pfmul (%3,%0), %%mm0 \n\t" + "pfmul 8(%3,%0), %%mm1 \n\t" + "pfadd (%4,%0), %%mm0 \n\t" + "pfadd 8(%4,%0), %%mm1 \n\t" + "movq %%mm0, (%1,%0) \n\t" + "movq %%mm1, 8(%1,%0) \n\t" + "sub $16, %0 \n\t" + "jge 1b \n\t" + :"+r"(i) + :"r"(dst), "r"(src0), "r"(src1), "r"(src2) + :"memory" + ); __asm__ volatile("femms"); } static void vector_fmul_add_sse(float *dst, const float *src0, const float *src1, const float *src2, int len){ x86_reg i = (len-8)*4; - __asm__ volatile( - "1: \n\t" - "movaps (%2,%0), %%xmm0 \n\t" - "movaps 16(%2,%0), %%xmm1 \n\t" - "mulps (%3,%0), %%xmm0 \n\t" - "mulps 16(%3,%0), %%xmm1 \n\t" - "addps (%4,%0), %%xmm0 \n\t" - "addps 16(%4,%0), %%xmm1 \n\t" - "movaps %%xmm0, (%1,%0) \n\t" - "movaps %%xmm1, 16(%1,%0) \n\t" - "sub $32, %0 \n\t" - "jge 1b \n\t" - :"+r"(i) - :"r"(dst), "r"(src0), "r"(src1), "r"(src2) - :"memory" - ); + __asm__ volatile( + "1: \n\t" + "movaps (%2,%0), %%xmm0 \n\t" + "movaps 16(%2,%0), %%xmm1 \n\t" + "mulps (%3,%0), %%xmm0 \n\t" + "mulps 16(%3,%0), %%xmm1 \n\t" + "addps (%4,%0), %%xmm0 \n\t" + "addps 16(%4,%0), %%xmm1 \n\t" + "movaps %%xmm0, (%1,%0) \n\t" + "movaps %%xmm1, 16(%1,%0) \n\t" + "sub $32, %0 \n\t" + "jge 1b \n\t" + :"+r"(i) + :"r"(dst), "r"(src0), "r"(src1), "r"(src2) + :"memory" + ); } static void vector_fmul_window_3dnow2(float *dst, const float *src0, const float *src1, |