From 5fe64d88f67637af6037fa864b1c66e41148597c Mon Sep 17 00:00:00 2001 From: Mans Rullgard <mans@mansr.com> Date: Sun, 2 Sep 2012 16:05:56 +0100 Subject: x86: allow using add_hfyu_median_prediction_cmov on any cpu with cmov For some reason add_hfyu_median_prediction_cmov is only selected on 3Dnow-capable CPUs, even though it uses no 3Dnow instructions. This patch allows it to be selected on any cpu with cmov with the possibility of being overridden by the mmxext version. Signed-off-by: Mans Rullgard <mans@mansr.com> --- libavcodec/x86/dsputil_mmx.c | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) (limited to 'libavcodec') diff --git a/libavcodec/x86/dsputil_mmx.c b/libavcodec/x86/dsputil_mmx.c index 9a3cb4931e..0876ceac9b 100644 --- a/libavcodec/x86/dsputil_mmx.c +++ b/libavcodec/x86/dsputil_mmx.c @@ -2717,7 +2717,9 @@ static void dsputil_init_mmx2(DSPContext *c, AVCodecContext *avctx, c->avg_h264_chroma_pixels_tab[1] = ff_avg_h264_chroma_mc4_10_mmx2; } - c->add_hfyu_median_prediction = ff_add_hfyu_median_prediction_mmx2; + /* slower than cmov version on AMD */ + if (!(mm_flags & AV_CPU_FLAG_3DNOW)) + c->add_hfyu_median_prediction = ff_add_hfyu_median_prediction_mmx2; c->scalarproduct_int16 = ff_scalarproduct_int16_mmx2; c->scalarproduct_and_madd_int16 = ff_scalarproduct_and_madd_int16_mmx2; @@ -2794,11 +2796,6 @@ static void dsputil_init_3dnow(DSPContext *c, AVCodecContext *avctx, } c->vorbis_inverse_coupling = vorbis_inverse_coupling_3dnow; - -#if HAVE_7REGS - if (mm_flags & AV_CPU_FLAG_CMOV) - c->add_hfyu_median_prediction = add_hfyu_median_prediction_cmov; -#endif #endif /* HAVE_INLINE_ASM */ #if HAVE_YASM @@ -3009,6 +3006,11 @@ void ff_dsputil_init_mmx(DSPContext *c, AVCodecContext *avctx) { int mm_flags = av_get_cpu_flags(); +#if HAVE_7REGS && HAVE_INLINE_ASM + if (mm_flags & AV_CPU_FLAG_CMOV) + c->add_hfyu_median_prediction = add_hfyu_median_prediction_cmov; +#endif + if (mm_flags & AV_CPU_FLAG_MMX) { #if HAVE_INLINE_ASM const int idct_algo = avctx->idct_algo; -- cgit v1.2.3 From 6efb698883507b13e90eb9ca03813f928066e5dd Mon Sep 17 00:00:00 2001 From: Mans Rullgard <mans@mansr.com> Date: Tue, 4 Sep 2012 13:52:01 +0100 Subject: cavsdsp: set idct permutation independently of dsputil CAVS uses its own idct so using dsputil to set the permutation is fragile. Signed-off-by: Mans Rullgard <mans@mansr.com> --- libavcodec/cavsdec.c | 3 ++- libavcodec/cavsdsp.c | 1 + libavcodec/cavsdsp.h | 1 + libavcodec/x86/cavsdsp.c | 2 ++ libavcodec/x86/dsputil_mmx.c | 2 -- 5 files changed, 6 insertions(+), 3 deletions(-) (limited to 'libavcodec') diff --git a/libavcodec/cavsdec.c b/libavcodec/cavsdec.c index e70dad038a..33e639b9ae 100644 --- a/libavcodec/cavsdec.c +++ b/libavcodec/cavsdec.c @@ -915,9 +915,10 @@ static int decode_pic(AVSContext *h) { enum cavs_mb mb_type; if (!s->context_initialized) { - s->avctx->idct_algo = FF_IDCT_CAVS; if (ff_MPV_common_init(s) < 0) return -1; + ff_init_scantable_permutation(s->dsp.idct_permutation, + h->cdsp.idct_perm); ff_init_scantable(s->dsp.idct_permutation,&h->scantable,ff_zigzag_direct); } skip_bits(&s->gb,16);//bbv_dwlay diff --git a/libavcodec/cavsdsp.c b/libavcodec/cavsdsp.c index 983d9d7f22..bdb8d6d064 100644 --- a/libavcodec/cavsdsp.c +++ b/libavcodec/cavsdsp.c @@ -544,6 +544,7 @@ av_cold void ff_cavsdsp_init(CAVSDSPContext* c, AVCodecContext *avctx) { c->cavs_filter_cv = cavs_filter_cv_c; c->cavs_filter_ch = cavs_filter_ch_c; c->cavs_idct8_add = cavs_idct8_add_c; + c->idct_perm = FF_NO_IDCT_PERM; if (HAVE_MMX) ff_cavsdsp_init_mmx(c, avctx); } diff --git a/libavcodec/cavsdsp.h b/libavcodec/cavsdsp.h index b1133b7264..99b0ea3774 100644 --- a/libavcodec/cavsdsp.h +++ b/libavcodec/cavsdsp.h @@ -33,6 +33,7 @@ typedef struct CAVSDSPContext { void (*cavs_filter_cv)(uint8_t *pix, int stride, int alpha, int beta, int tc, int bs1, int bs2); void (*cavs_filter_ch)(uint8_t *pix, int stride, int alpha, int beta, int tc, int bs1, int bs2); void (*cavs_idct8_add)(uint8_t *dst, DCTELEM *block, int stride); + int idct_perm; } CAVSDSPContext; void ff_cavsdsp_init(CAVSDSPContext* c, AVCodecContext *avctx); diff --git a/libavcodec/x86/cavsdsp.c b/libavcodec/x86/cavsdsp.c index e94003956f..aef74c4d36 100644 --- a/libavcodec/x86/cavsdsp.c +++ b/libavcodec/x86/cavsdsp.c @@ -461,6 +461,7 @@ static void ff_cavsdsp_init_mmx2(CAVSDSPContext* c, AVCodecContext *avctx) { dspfunc(avg_cavs_qpel, 1, 8); #undef dspfunc c->cavs_idct8_add = cavs_idct8_add_mmx; + c->idct_perm = FF_TRANSPOSE_IDCT_PERM; } static void ff_cavsdsp_init_3dnow(CAVSDSPContext* c, AVCodecContext *avctx) { @@ -477,6 +478,7 @@ static void ff_cavsdsp_init_3dnow(CAVSDSPContext* c, AVCodecContext *avctx) { dspfunc(avg_cavs_qpel, 1, 8); #undef dspfunc c->cavs_idct8_add = cavs_idct8_add_mmx; + c->idct_perm = FF_TRANSPOSE_IDCT_PERM; } #endif /* HAVE_INLINE_ASM */ diff --git a/libavcodec/x86/dsputil_mmx.c b/libavcodec/x86/dsputil_mmx.c index 0876ceac9b..af21765e65 100644 --- a/libavcodec/x86/dsputil_mmx.c +++ b/libavcodec/x86/dsputil_mmx.c @@ -3021,8 +3021,6 @@ void ff_dsputil_init_mmx(DSPContext *c, AVCodecContext *avctx) c->idct_add = ff_simple_idct_add_mmx; c->idct = ff_simple_idct_mmx; c->idct_permutation_type = FF_SIMPLE_IDCT_PERM; - } else if (idct_algo == FF_IDCT_CAVS) { - c->idct_permutation_type = FF_TRANSPOSE_IDCT_PERM; } else if (idct_algo == FF_IDCT_XVIDMMX) { if (mm_flags & AV_CPU_FLAG_SSE2) { c->idct_put = ff_idct_xvid_sse2_put; -- cgit v1.2.3 From 8cb7ed5562c438388b1dd7dc7d10c26f54c740b5 Mon Sep 17 00:00:00 2001 From: Diego Biurrun <diego@biurrun.de> Date: Sat, 11 Aug 2012 22:35:27 +0200 Subject: x86: avcodec: Drop silly "_mmx" suffix from dsputil template names --- libavcodec/x86/dsputil_avg_template.c | 977 ++++++++++++++++++++++++++++++ libavcodec/x86/dsputil_mmx.c | 8 +- libavcodec/x86/dsputil_mmx_avg_template.c | 977 ------------------------------ libavcodec/x86/dsputil_mmx_qns_template.c | 101 --- libavcodec/x86/dsputil_mmx_rnd_template.c | 590 ------------------ libavcodec/x86/dsputil_qns_template.c | 101 +++ libavcodec/x86/dsputil_rnd_template.c | 590 ++++++++++++++++++ libavcodec/x86/dsputilenc_mmx.c | 6 +- 8 files changed, 1675 insertions(+), 1675 deletions(-) create mode 100644 libavcodec/x86/dsputil_avg_template.c delete mode 100644 libavcodec/x86/dsputil_mmx_avg_template.c delete mode 100644 libavcodec/x86/dsputil_mmx_qns_template.c delete mode 100644 libavcodec/x86/dsputil_mmx_rnd_template.c create mode 100644 libavcodec/x86/dsputil_qns_template.c create mode 100644 libavcodec/x86/dsputil_rnd_template.c (limited to 'libavcodec') diff --git a/libavcodec/x86/dsputil_avg_template.c b/libavcodec/x86/dsputil_avg_template.c new file mode 100644 index 0000000000..8b116b74e2 --- /dev/null +++ b/libavcodec/x86/dsputil_avg_template.c @@ -0,0 +1,977 @@ +/* + * DSP utils : average functions are compiled twice for 3dnow/mmx2 + * Copyright (c) 2000, 2001 Fabrice Bellard + * Copyright (c) 2002-2004 Michael Niedermayer + * + * MMX optimization by Nick Kurshev <nickols_k@mail.ru> + * mostly rewritten by Michael Niedermayer <michaelni@gmx.at> + * and improved by Zdenek Kabelac <kabi@users.sf.net> + * + * This file is part of Libav. + * + * Libav is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * Libav is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with Libav; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/* XXX: we use explicit registers to avoid a gcc 2.95.2 register asm + clobber bug - now it will work with 2.95.2 and also with -fPIC + */ +static void DEF(put_pixels8_x2)(uint8_t *block, const uint8_t *pixels, int line_size, int h) +{ + __asm__ volatile( + "lea (%3, %3), %%"REG_a" \n\t" + "1: \n\t" + "movq (%1), %%mm0 \n\t" + "movq (%1, %3), %%mm1 \n\t" + PAVGB" 1(%1), %%mm0 \n\t" + PAVGB" 1(%1, %3), %%mm1 \n\t" + "movq %%mm0, (%2) \n\t" + "movq %%mm1, (%2, %3) \n\t" + "add %%"REG_a", %1 \n\t" + "add %%"REG_a", %2 \n\t" + "movq (%1), %%mm0 \n\t" + "movq (%1, %3), %%mm1 \n\t" + PAVGB" 1(%1), %%mm0 \n\t" + PAVGB" 1(%1, %3), %%mm1 \n\t" + "add %%"REG_a", %1 \n\t" + "movq %%mm0, (%2) \n\t" + "movq %%mm1, (%2, %3) \n\t" + "add %%"REG_a", %2 \n\t" + "subl $4, %0 \n\t" + "jnz 1b \n\t" + :"+g"(h), "+S"(pixels), "+D"(block) + :"r" ((x86_reg)line_size) + :"%"REG_a, "memory"); +} + +static void DEF(put_pixels4_l2)(uint8_t *dst, uint8_t *src1, uint8_t *src2, int dstStride, int src1Stride, int h) +{ + __asm__ volatile( + "testl $1, %0 \n\t" + " jz 1f \n\t" + "movd (%1), %%mm0 \n\t" + "movd (%2), %%mm1 \n\t" + "add %4, %1 \n\t" + "add $4, %2 \n\t" + PAVGB" %%mm1, %%mm0 \n\t" + "movd %%mm0, (%3) \n\t" + "add %5, %3 \n\t" + "decl %0 \n\t" + "1: \n\t" + "movd (%1), %%mm0 \n\t" + "add %4, %1 \n\t" + "movd (%1), %%mm1 \n\t" + "movd (%2), %%mm2 \n\t" + "movd 4(%2), %%mm3 \n\t" + "add %4, %1 \n\t" + PAVGB" %%mm2, %%mm0 \n\t" + PAVGB" %%mm3, %%mm1 \n\t" + "movd %%mm0, (%3) \n\t" + "add %5, %3 \n\t" + "movd %%mm1, (%3) \n\t" + "add %5, %3 \n\t" + "movd (%1), %%mm0 \n\t" + "add %4, %1 \n\t" + "movd (%1), %%mm1 \n\t" + "movd 8(%2), %%mm2 \n\t" + "movd 12(%2), %%mm3 \n\t" + "add %4, %1 \n\t" + PAVGB" %%mm2, %%mm0 \n\t" + PAVGB" %%mm3, %%mm1 \n\t" + "movd %%mm0, (%3) \n\t" + "add %5, %3 \n\t" + "movd %%mm1, (%3) \n\t" + "add %5, %3 \n\t" + "add $16, %2 \n\t" + "subl $4, %0 \n\t" + "jnz 1b \n\t" +#if !HAVE_EBX_AVAILABLE //Note "+bm" and "+mb" are buggy too (with gcc 3.2.2 at least) and cannot be used + :"+m"(h), "+a"(src1), "+c"(src2), "+d"(dst) +#else + :"+b"(h), "+a"(src1), "+c"(src2), "+d"(dst) +#endif + :"S"((x86_reg)src1Stride), "D"((x86_reg)dstStride) + :"memory"); +} + + +static void DEF(put_pixels8_l2)(uint8_t *dst, uint8_t *src1, uint8_t *src2, int dstStride, int src1Stride, int h) +{ + __asm__ volatile( + "testl $1, %0 \n\t" + " jz 1f \n\t" + "movq (%1), %%mm0 \n\t" + "movq (%2), %%mm1 \n\t" + "add %4, %1 \n\t" + "add $8, %2 \n\t" + PAVGB" %%mm1, %%mm0 \n\t" + "movq %%mm0, (%3) \n\t" + "add %5, %3 \n\t" + "decl %0 \n\t" + "1: \n\t" + "movq (%1), %%mm0 \n\t" + "add %4, %1 \n\t" + "movq (%1), %%mm1 \n\t" + "add %4, %1 \n\t" + PAVGB" (%2), %%mm0 \n\t" + PAVGB" 8(%2), %%mm1 \n\t" + "movq %%mm0, (%3) \n\t" + "add %5, %3 \n\t" + "movq %%mm1, (%3) \n\t" + "add %5, %3 \n\t" + "movq (%1), %%mm0 \n\t" + "add %4, %1 \n\t" + "movq (%1), %%mm1 \n\t" + "add %4, %1 \n\t" + PAVGB" 16(%2), %%mm0 \n\t" + PAVGB" 24(%2), %%mm1 \n\t" + "movq %%mm0, (%3) \n\t" + "add %5, %3 \n\t" + "movq %%mm1, (%3) \n\t" + "add %5, %3 \n\t" + "add $32, %2 \n\t" + "subl $4, %0 \n\t" + "jnz 1b \n\t" +#if !HAVE_EBX_AVAILABLE //Note "+bm" and "+mb" are buggy too (with gcc 3.2.2 at least) and cannot be used + :"+m"(h), "+a"(src1), "+c"(src2), "+d"(dst) +#else + :"+b"(h), "+a"(src1), "+c"(src2), "+d"(dst) +#endif + :"S"((x86_reg)src1Stride), "D"((x86_reg)dstStride) + :"memory"); +//the following should be used, though better not with gcc ... +/* :"+g"(h), "+r"(src1), "+r"(src2), "+r"(dst) + :"r"(src1Stride), "r"(dstStride) + :"memory");*/ +} + +static void DEF(put_no_rnd_pixels8_l2)(uint8_t *dst, uint8_t *src1, uint8_t *src2, int dstStride, int src1Stride, int h) +{ + __asm__ volatile( + "pcmpeqb %%mm6, %%mm6 \n\t" + "testl $1, %0 \n\t" + " jz 1f \n\t" + "movq (%1), %%mm0 \n\t" + "movq (%2), %%mm1 \n\t" + "add %4, %1 \n\t" + "add $8, %2 \n\t" + "pxor %%mm6, %%mm0 \n\t" + "pxor %%mm6, %%mm1 \n\t" + PAVGB" %%mm1, %%mm0 \n\t" + "pxor %%mm6, %%mm0 \n\t" + "movq %%mm0, (%3) \n\t" + "add %5, %3 \n\t" + "decl %0 \n\t" + "1: \n\t" + "movq (%1), %%mm0 \n\t" + "add %4, %1 \n\t" + "movq (%1), %%mm1 \n\t" + "add %4, %1 \n\t" + "movq (%2), %%mm2 \n\t" + "movq 8(%2), %%mm3 \n\t" + "pxor %%mm6, %%mm0 \n\t" + "pxor %%mm6, %%mm1 \n\t" + "pxor %%mm6, %%mm2 \n\t" + "pxor %%mm6, %%mm3 \n\t" + PAVGB" %%mm2, %%mm0 \n\t" + PAVGB" %%mm3, %%mm1 \n\t" + "pxor %%mm6, %%mm0 \n\t" + "pxor %%mm6, %%mm1 \n\t" + "movq %%mm0, (%3) \n\t" + "add %5, %3 \n\t" + "movq %%mm1, (%3) \n\t" + "add %5, %3 \n\t" + "movq (%1), %%mm0 \n\t" + "add %4, %1 \n\t" + "movq (%1), %%mm1 \n\t" + "add %4, %1 \n\t" + "movq 16(%2), %%mm2 \n\t" + "movq 24(%2), %%mm3 \n\t" + "pxor %%mm6, %%mm0 \n\t" + "pxor %%mm6, %%mm1 \n\t" + "pxor %%mm6, %%mm2 \n\t" + "pxor %%mm6, %%mm3 \n\t" + PAVGB" %%mm2, %%mm0 \n\t" + PAVGB" %%mm3, %%mm1 \n\t" + "pxor %%mm6, %%mm0 \n\t" + "pxor %%mm6, %%mm1 \n\t" + "movq %%mm0, (%3) \n\t" + "add %5, %3 \n\t" + "movq %%mm1, (%3) \n\t" + "add %5, %3 \n\t" + "add $32, %2 \n\t" + "subl $4, %0 \n\t" + "jnz 1b \n\t" +#if !HAVE_EBX_AVAILABLE //Note "+bm" and "+mb" are buggy too (with gcc 3.2.2 at least) and cannot be used + :"+m"(h), "+a"(src1), "+c"(src2), "+d"(dst) +#else + :"+b"(h), "+a"(src1), "+c"(src2), "+d"(dst) +#endif + :"S"((x86_reg)src1Stride), "D"((x86_reg)dstStride) + :"memory"); +//the following should be used, though better not with gcc ... +/* :"+g"(h), "+r"(src1), "+r"(src2), "+r"(dst) + :"r"(src1Stride), "r"(dstStride) + :"memory");*/ +} + +static void DEF(avg_pixels4_l2)(uint8_t *dst, uint8_t *src1, uint8_t *src2, int dstStride, int src1Stride, int h) +{ + __asm__ volatile( + "testl $1, %0 \n\t" + " jz 1f \n\t" + "movd (%1), %%mm0 \n\t" + "movd (%2), %%mm1 \n\t" + "add %4, %1 \n\t" + "add $4, %2 \n\t" + PAVGB" %%mm1, %%mm0 \n\t" + PAVGB" (%3), %%mm0 \n\t" + "movd %%mm0, (%3) \n\t" + "add %5, %3 \n\t" + "decl %0 \n\t" + "1: \n\t" + "movd (%1), %%mm0 \n\t" + "add %4, %1 \n\t" + "movd (%1), %%mm1 \n\t" + "add %4, %1 \n\t" + PAVGB" (%2), %%mm0 \n\t" + PAVGB" 4(%2), %%mm1 \n\t" + PAVGB" (%3), %%mm0 \n\t" + "movd %%mm0, (%3) \n\t" + "add %5, %3 \n\t" + PAVGB" (%3), %%mm1 \n\t" + "movd %%mm1, (%3) \n\t" + "add %5, %3 \n\t" + "movd (%1), %%mm0 \n\t" + "add %4, %1 \n\t" + "movd (%1), %%mm1 \n\t" + "add %4, %1 \n\t" + PAVGB" 8(%2), %%mm0 \n\t" + PAVGB" 12(%2), %%mm1 \n\t" + PAVGB" (%3), %%mm0 \n\t" + "movd %%mm0, (%3) \n\t" + "add %5, %3 \n\t" + PAVGB" (%3), %%mm1 \n\t" + "movd %%mm1, (%3) \n\t" + "add %5, %3 \n\t" + "add $16, %2 \n\t" + "subl $4, %0 \n\t" + "jnz 1b \n\t" +#if !HAVE_EBX_AVAILABLE //Note "+bm" and "+mb" are buggy too (with gcc 3.2.2 at least) and cannot be used + :"+m"(h), "+a"(src1), "+c"(src2), "+d"(dst) +#else + :"+b"(h), "+a"(src1), "+c"(src2), "+d"(dst) +#endif + :"S"((x86_reg)src1Stride), "D"((x86_reg)dstStride) + :"memory"); +} + + +static void DEF(avg_pixels8_l2)(uint8_t *dst, uint8_t *src1, uint8_t *src2, int dstStride, int src1Stride, int h) +{ + __asm__ volatile( + "testl $1, %0 \n\t" + " jz 1f \n\t" + "movq (%1), %%mm0 \n\t" + "movq (%2), %%mm1 \n\t" + "add %4, %1 \n\t" + "add $8, %2 \n\t" + PAVGB" %%mm1, %%mm0 \n\t" + PAVGB" (%3), %%mm0 \n\t" + "movq %%mm0, (%3) \n\t" + "add %5, %3 \n\t" + "decl %0 \n\t" + "1: \n\t" + "movq (%1), %%mm0 \n\t" + "add %4, %1 \n\t" + "movq (%1), %%mm1 \n\t" + "add %4, %1 \n\t" + PAVGB" (%2), %%mm0 \n\t" + PAVGB" 8(%2), %%mm1 \n\t" + PAVGB" (%3), %%mm0 \n\t" + "movq %%mm0, (%3) \n\t" + "add %5, %3 \n\t" + PAVGB" (%3), %%mm1 \n\t" + "movq %%mm1, (%3) \n\t" + "add %5, %3 \n\t" + "movq (%1), %%mm0 \n\t" + "add %4, %1 \n\t" + "movq (%1), %%mm1 \n\t" + "add %4, %1 \n\t" + PAVGB" 16(%2), %%mm0 \n\t" + PAVGB" 24(%2), %%mm1 \n\t" + PAVGB" (%3), %%mm0 \n\t" + "movq %%mm0, (%3) \n\t" + "add %5, %3 \n\t" + PAVGB" (%3), %%mm1 \n\t" + "movq %%mm1, (%3) \n\t" + "add %5, %3 \n\t" + "add $32, %2 \n\t" + "subl $4, %0 \n\t" + "jnz 1b \n\t" +#if !HAVE_EBX_AVAILABLE //Note "+bm" and "+mb" are buggy too (with gcc 3.2.2 at least) and cannot be used + :"+m"(h), "+a"(src1), "+c"(src2), "+d"(dst) +#else + :"+b"(h), "+a"(src1), "+c"(src2), "+d"(dst) +#endif + :"S"((x86_reg)src1Stride), "D"((x86_reg)dstStride) + :"memory"); +//the following should be used, though better not with gcc ... +/* :"+g"(h), "+r"(src1), "+r"(src2), "+r"(dst) + :"r"(src1Stride), "r"(dstStride) + :"memory");*/ +} + +static void DEF(put_pixels16_x2)(uint8_t *block, const uint8_t *pixels, int line_size, int h) +{ + __asm__ volatile( + "lea (%3, %3), %%"REG_a" \n\t" + "1: \n\t" + "movq (%1), %%mm0 \n\t" + "movq (%1, %3), %%mm1 \n\t" + "movq 8(%1), %%mm2 \n\t" + "movq 8(%1, %3), %%mm3 \n\t" + PAVGB" 1(%1), %%mm0 \n\t" + PAVGB" 1(%1, %3), %%mm1 \n\t" + PAVGB" 9(%1), %%mm2 \n\t" + PAVGB" 9(%1, %3), %%mm3 \n\t" + "movq %%mm0, (%2) \n\t" + "movq %%mm1, (%2, %3) \n\t" + "movq %%mm2, 8(%2) \n\t" + "movq %%mm3, 8(%2, %3) \n\t" + "add %%"REG_a", %1 \n\t" + "add %%"REG_a", %2 \n\t" + "movq (%1), %%mm0 \n\t" + "movq (%1, %3), %%mm1 \n\t" + "movq 8(%1), %%mm2 \n\t" + "movq 8(%1, %3), %%mm3 \n\t" + PAVGB" 1(%1), %%mm0 \n\t" + PAVGB" 1(%1, %3), %%mm1 \n\t" + PAVGB" 9(%1), %%mm2 \n\t" + PAVGB" 9(%1, %3), %%mm3 \n\t" + "add %%"REG_a", %1 \n\t" + "movq %%mm0, (%2) \n\t" + "movq %%mm1, (%2, %3) \n\t" + "movq %%mm2, 8(%2) \n\t" + "movq %%mm3, 8(%2, %3) \n\t" + "add %%"REG_a", %2 \n\t" + "subl $4, %0 \n\t" + "jnz 1b \n\t" + :"+g"(h), "+S"(pixels), "+D"(block) + :"r" ((x86_reg)line_size) + :"%"REG_a, "memory"); +} + +static void DEF(put_pixels16_l2)(uint8_t *dst, uint8_t *src1, uint8_t *src2, int dstStride, int src1Stride, int h) +{ + __asm__ volatile( + "testl $1, %0 \n\t" + " jz 1f \n\t" + "movq (%1), %%mm0 \n\t" + "movq 8(%1), %%mm1 \n\t" + PAVGB" (%2), %%mm0 \n\t" + PAVGB" 8(%2), %%mm1 \n\t" + "add %4, %1 \n\t" + "add $16, %2 \n\t" + "movq %%mm0, (%3) \n\t" + "movq %%mm1, 8(%3) \n\t" + "add %5, %3 \n\t" + "decl %0 \n\t" + "1: \n\t" + "movq (%1), %%mm0 \n\t" + "movq 8(%1), %%mm1 \n\t" + "add %4, %1 \n\t" + PAVGB" (%2), %%mm0 \n\t" + PAVGB" 8(%2), %%mm1 \n\t" + "movq %%mm0, (%3) \n\t" + "movq %%mm1, 8(%3) \n\t" + "add %5, %3 \n\t" + "movq (%1), %%mm0 \n\t" + "movq 8(%1), %%mm1 \n\t" + "add %4, %1 \n\t" + PAVGB" 16(%2), %%mm0 \n\t" + PAVGB" 24(%2), %%mm1 \n\t" + "movq %%mm0, (%3) \n\t" + "movq %%mm1, 8(%3) \n\t" + "add %5, %3 \n\t" + "add $32, %2 \n\t" + "subl $2, %0 \n\t" + "jnz 1b \n\t" +#if !HAVE_EBX_AVAILABLE //Note "+bm" and "+mb" are buggy too (with gcc 3.2.2 at least) and cannot be used + :"+m"(h), "+a"(src1), "+c"(src2), "+d"(dst) +#else + :"+b"(h), "+a"(src1), "+c"(src2), "+d"(dst) +#endif + :"S"((x86_reg)src1Stride), "D"((x86_reg)dstStride) + :"memory"); +//the following should be used, though better not with gcc ... +/* :"+g"(h), "+r"(src1), "+r"(src2), "+r"(dst) + :"r"(src1Stride), "r"(dstStride) + :"memory");*/ +} + +static void DEF(avg_pixels16_l2)(uint8_t *dst, uint8_t *src1, uint8_t *src2, int dstStride, int src1Stride, int h) +{ + __asm__ volatile( + "testl $1, %0 \n\t" + " jz 1f \n\t" + "movq (%1), %%mm0 \n\t" + "movq 8(%1), %%mm1 \n\t" + PAVGB" (%2), %%mm0 \n\t" + PAVGB" 8(%2), %%mm1 \n\t" + "add %4, %1 \n\t" + "add $16, %2 \n\t" + PAVGB" (%3), %%mm0 \n\t" + PAVGB" 8(%3), %%mm1 \n\t" + "movq %%mm0, (%3) \n\t" + "movq %%mm1, 8(%3) \n\t" + "add %5, %3 \n\t" + "decl %0 \n\t" + "1: \n\t" + "movq (%1), %%mm0 \n\t" + "movq 8(%1), %%mm1 \n\t" + "add %4, %1 \n\t" + PAVGB" (%2), %%mm0 \n\t" + PAVGB" 8(%2), %%mm1 \n\t" + PAVGB" (%3), %%mm0 \n\t" + PAVGB" 8(%3), %%mm1 \n\t" + "movq %%mm0, (%3) \n\t" + "movq %%mm1, 8(%3) \n\t" + "add %5, %3 \n\t" + "movq (%1), %%mm0 \n\t" + "movq 8(%1), %%mm1 \n\t" + "add %4, %1 \n\t" + PAVGB" 16(%2), %%mm0 \n\t" + PAVGB" 24(%2), %%mm1 \n\t" + PAVGB" (%3), %%mm0 \n\t" + PAVGB" 8(%3), %%mm1 \n\t" + "movq %%mm0, (%3) \n\t" + "movq %%mm1, 8(%3) \n\t" + "add %5, %3 \n\t" + "add $32, %2 \n\t" + "subl $2, %0 \n\t" + "jnz 1b \n\t" +#if !HAVE_EBX_AVAILABLE //Note "+bm" and "+mb" are buggy too (with gcc 3.2.2 at least) and cannot be used + :"+m"(h), "+a"(src1), "+c"(src2), "+d"(dst) +#else + :"+b"(h), "+a"(src1), "+c"(src2), "+d"(dst) +#endif + :"S"((x86_reg)src1Stride), "D"((x86_reg)dstStride) + :"memory"); +//the following should be used, though better not with gcc ... +/* :"+g"(h), "+r"(src1), "+r"(src2), "+r"(dst) + :"r"(src1Stride), "r"(dstStride) + :"memory");*/ +} + +static void DEF(put_no_rnd_pixels16_l2)(uint8_t *dst, uint8_t *src1, uint8_t *src2, int dstStride, int src1Stride, int h) +{ + __asm__ volatile( + "pcmpeqb %%mm6, %%mm6 \n\t" + "testl $1, %0 \n\t" + " jz 1f \n\t" + "movq (%1), %%mm0 \n\t" + "movq 8(%1), %%mm1 \n\t" + "movq (%2), %%mm2 \n\t" + "movq 8(%2), %%mm3 \n\t" + "pxor %%mm6, %%mm0 \n\t" + "pxor %%mm6, %%mm1 \n\t" + "pxor %%mm6, %%mm2 \n\t" + "pxor %%mm6, %%mm3 \n\t" + PAVGB" %%mm2, %%mm0 \n\t" + PAVGB" %%mm3, %%mm1 \n\t" + "pxor %%mm6, %%mm0 \n\t" + "pxor %%mm6, %%mm1 \n\t" + "add %4, %1 \n\t" + "add $16, %2 \n\t" + "movq %%mm0, (%3) \n\t" + "movq %%mm1, 8(%3) \n\t" + "add %5, %3 \n\t" + "decl %0 \n\t" + "1: \n\t" + "movq (%1), %%mm0 \n\t" + "movq 8(%1), %%mm1 \n\t" + "add %4, %1 \n\t" + "movq (%2), %%mm2 \n\t" + "movq 8(%2), %%mm3 \n\t" + "pxor %%mm6, %%mm0 \n\t" + "pxor %%mm6, %%mm1 \n\t" + "pxor %%mm6, %%mm2 \n\t" + "pxor %%mm6, %%mm3 \n\t" + PAVGB" %%mm2, %%mm0 \n\t" + PAVGB" %%mm3, %%mm1 \n\t" + "pxor %%mm6, %%mm0 \n\t" + "pxor %%mm6, %%mm1 \n\t" + "movq %%mm0, (%3) \n\t" + "movq %%mm1, 8(%3) \n\t" + "add %5, %3 \n\t" + "movq (%1), %%mm0 \n\t" + "movq 8(%1), %%mm1 \n\t" + "add %4, %1 \n\t" + "movq 16(%2), %%mm2 \n\t" + "movq 24(%2), %%mm3 \n\t" + "pxor %%mm6, %%mm0 \n\t" + "pxor %%mm6, %%mm1 \n\t" + "pxor %%mm6, %%mm2 \n\t" + "pxor %%mm6, %%mm3 \n\t" + PAVGB" %%mm2, %%mm0 \n\t" + PAVGB" %%mm3, %%mm1 \n\t" + "pxor %%mm6, %%mm0 \n\t" + "pxor %%mm6, %%mm1 \n\t" + "movq %%mm0, (%3) \n\t" + "movq %%mm1, 8(%3) \n\t" + "add %5, %3 \n\t" + "add $32, %2 \n\t" + "subl $2, %0 \n\t" + "jnz 1b \n\t" +#if !HAVE_EBX_AVAILABLE //Note "+bm" and "+mb" are buggy too (with gcc 3.2.2 at least) and cannot be used + :"+m"(h), "+a"(src1), "+c"(src2), "+d"(dst) +#else + :"+b"(h), "+a"(src1), "+c"(src2), "+d"(dst) +#endif + :"S"((x86_reg)src1Stride), "D"((x86_reg)dstStride) + :"memory"); +//the following should be used, though better not with gcc ... +/* :"+g"(h), "+r"(src1), "+r"(src2), "+r"(dst) + :"r"(src1Stride), "r"(dstStride) + :"memory");*/ +} + +/* GL: this function does incorrect rounding if overflow */ +static void DEF(put_no_rnd_pixels8_x2)(uint8_t *block, const uint8_t *pixels, int line_size, int h) +{ + MOVQ_BONE(mm6); + __asm__ volatile( + "lea (%3, %3), %%"REG_a" \n\t" + "1: \n\t" + "movq (%1), %%mm0 \n\t" + "movq (%1, %3), %%mm2 \n\t" + "movq 1(%1), %%mm1 \n\t" + "movq 1(%1, %3), %%mm3 \n\t" + "add %%"REG_a", %1 \n\t" + "psubusb %%mm6, %%mm0 \n\t" + "psubusb %%mm6, %%mm2 \n\t" + PAVGB" %%mm1, %%mm0 \n\t" + PAVGB" %%mm3, %%mm2 \n\t" + "movq %%mm0, (%2) \n\t" + "movq %%mm2, (%2, %3) \n\t" + "movq (%1), %%mm0 \n\t" + "movq 1(%1), %%mm1 \n\t" + "movq (%1, %3), %%mm2 \n\t" + "movq 1(%1, %3), %%mm3 \n\t" + "add %%"REG_a", %2 \n\t" + "add %%"REG_a", %1 \n\t" + "psubusb %%mm6, %%mm0 \n\t" + "psubusb %%mm6, %%mm2 \n\t" + PAVGB" %%mm1, %%mm0 \n\t" + PAVGB" %%mm3, %%mm2 \n\t" + "movq %%mm0, (%2) \n\t" + "movq %%mm2, (%2, %3) \n\t" + "add %%"REG_a", %2 \n\t" + "subl $4, %0 \n\t" + "jnz 1b \n\t" + :"+g"(h), "+S"(pixels), "+D"(block) + :"r" ((x86_reg)line_size) + :"%"REG_a, "memory"); +} + +static void DEF(put_no_rnd_pixels8_x2_exact)(uint8_t *block, const uint8_t *pixels, int line_size, int h) +{ + __asm__ volatile ( + "pcmpeqb %%mm6, %%mm6 \n\t" + "1: \n\t" + "movq (%1), %%mm0 \n\t" + "movq (%1, %3), %%mm2 \n\t" + "movq 1(%1), %%mm1 \n\t" + "movq 1(%1, %3), %%mm3 \n\t" + "pxor %%mm6, %%mm0 \n\t" + "pxor %%mm6, %%mm2 \n\t" + "pxor %%mm6, %%mm1 \n\t" + "pxor %%mm6, %%mm3 \n\t" + PAVGB" %%mm1, %%mm0 \n\t" + PAVGB" %%mm3, %%mm2 \n\t" + "pxor %%mm6, %%mm0 \n\t" + "pxor %%mm6, %%mm2 \n\t" + "movq %%mm0, (%2) \n\t" + "movq %%mm2, (%2, %3) \n\t" + "movq (%1, %3,2), %%mm0 \n\t" + "movq 1(%1, %3,2), %%mm1 \n\t" + "movq (%1, %4), %%mm2 \n\t" + "movq 1(%1, %4), %%mm3 \n\t" + "pxor %%mm6, %%mm0 \n\t" + "pxor %%mm6, %%mm1 \n\t" + "pxor %%mm6, %%mm2 \n\t" + "pxor %%mm6, %%mm3 \n\t" + PAVGB" %%mm1, %%mm0 \n\t" + PAVGB" %%mm3, %%mm2 \n\t" + "pxor %%mm6, %%mm0 \n\t" + "pxor %%mm6, %%mm2 \n\t" + "movq %%mm0, (%2, %3,2) \n\t" + "movq %%mm2, (%2, %4) \n\t" + "lea (%1, %3,4), %1 \n\t" + "lea (%2, %3,4), %2 \n\t" + "subl $4, %0 \n\t" + "jg 1b \n\t" + : "+g"(h), "+r"(pixels), "+r"(block) + : "r" ((x86_reg)line_size), "r"((x86_reg)3*line_size) + : "memory" + ); +} + +static void DEF(put_pixels8_y2)(uint8_t *block, const uint8_t *pixels, int line_size, int h) +{ + __asm__ volatile( + "lea (%3, %3), %%"REG_a" \n\t" + "movq (%1), %%mm0 \n\t" + "sub %3, %2 \n\t" + "1: \n\t" + "movq (%1, %3), %%mm1 \n\t" + "movq (%1, %%"REG_a"), %%mm2 \n\t" + "add %%"REG_a", %1 \n\t" + PAVGB" %%mm1, %%mm0 \n\t" + PAVGB" %%mm2, %%mm1 \n\t" + "movq %%mm0, (%2, %3) \n\t" + "movq %%mm1, (%2, %%"REG_a") \n\t" + "movq (%1, %3), %%mm1 \n\t" + "movq (%1, %%"REG_a"), %%mm0 \n\t" + "add %%"REG_a", %2 \n\t" + "add %%"REG_a", %1 \n\t" + PAVGB" %%mm1, %%mm2 \n\t" + PAVGB" %%mm0, %%mm1 \n\t" + "movq %%mm2, (%2, %3) \n\t" + "movq %%mm1, (%2, %%"REG_a") \n\t" + "add %%"REG_a", %2 \n\t" + "subl $4, %0 \n\t" + "jnz 1b \n\t" + :"+g"(h), "+S"(pixels), "+D" (block) + :"r" ((x86_reg)line_size) + :"%"REG_a, "memory"); +} + +/* GL: this function does incorrect rounding if overflow */ +static void DEF(put_no_rnd_pixels8_y2)(uint8_t *block, const uint8_t *pixels, int line_size, int h) +{ + MOVQ_BONE(mm6); + __asm__ volatile( + "lea (%3, %3), %%"REG_a" \n\t" + "movq (%1), %%mm0 \n\t" + "sub %3, %2 \n\t" + "1: \n\t" + "movq (%1, %3), %%mm1 \n\t" + "movq (%1, %%"REG_a"), %%mm2 \n\t" + "add %%"REG_a", %1 \n\t" + "psubusb %%mm6, %%mm1 \n\t" + PAVGB" %%mm1, %%mm0 \n\t" + PAVGB" %%mm2, %%mm1 \n\t" + "movq %%mm0, (%2, %3) \n\t" + "movq %%mm1, (%2, %%"REG_a") \n\t" + "movq (%1, %3), %%mm1 \n\t" + "movq (%1, %%"REG_a"), %%mm0 \n\t" + "add %%"REG_a", %2 \n\t" + "add %%"REG_a", %1 \n\t" + "psubusb %%mm6, %%mm1 \n\t" + PAVGB" %%mm1, %%mm2 \n\t" + PAVGB" %%mm0, %%mm1 \n\t" + "movq %%mm2, (%2, %3) \n\t" + "movq %%mm1, (%2, %%"REG_a") \n\t" + "add %%"REG_a", %2 \n\t" + "subl $4, %0 \n\t" + "jnz 1b \n\t" + :"+g"(h), "+S"(pixels), "+D" (block) + :"r" ((x86_reg)line_size) + :"%"REG_a, "memory"); +} + +static void DEF(put_no_rnd_pixels8_y2_exact)(uint8_t *block, const uint8_t *pixels, int line_size, int h) +{ + __asm__ volatile ( + "movq (%1), %%mm0 \n\t" + "pcmpeqb %%mm6, %%mm6 \n\t" + "add %3, %1 \n\t" + "pxor %%mm6, %%mm0 \n\t" + "1: \n\t" + "movq (%1), %%mm1 \n\t" + "movq (%1, %3), %%mm2 \n\t" + "pxor %%mm6, %%mm1 \n\t" + "pxor %%mm6, %%mm2 \n\t" + PAVGB" %%mm1, %%mm0 \n\t" + PAVGB" %%mm2, %%mm1 \n\t" + "pxor %%mm6, %%mm0 \n\t" + "pxor %%mm6, %%mm1 \n\t" + "movq %%mm0, (%2) \n\t" + "movq %%mm1, (%2, %3) \n\t" + "movq (%1, %3,2), %%mm1 \n\t" + "movq (%1, %4), %%mm0 \n\t" + "pxor %%mm6, %%mm1 \n\t" + "pxor %%mm6, %%mm0 \n\t" + PAVGB" %%mm1, %%mm2 \n\t" + PAVGB" %%mm0, %%mm1 \n\t" + "pxor %%mm6, %%mm2 \n\t" + "pxor %%mm6, %%mm1 \n\t" + "movq %%mm2, (%2, %3,2) \n\t" + "movq %%mm1, (%2, %4) \n\t" + "lea (%1, %3,4), %1 \n\t" + "lea (%2, %3,4), %2 \n\t" + "subl $4, %0 \n\t" + "jg 1b \n\t" + :"+g"(h), "+r"(pixels), "+r" (block) + :"r" ((x86_reg)line_size), "r"((x86_reg)3*line_size) + :"memory" + ); +} + +static void DEF(avg_pixels8)(uint8_t *block, const uint8_t *pixels, int line_size, int h) +{ + __asm__ volatile( + "lea (%3, %3), %%"REG_a" \n\t" + "1: \n\t" + "movq (%2), %%mm0 \n\t" + "movq (%2, %3), %%mm1 \n\t" + PAVGB" (%1), %%mm0 \n\t" + PAVGB" (%1, %3), %%mm1 \n\t" + "movq %%mm0, (%2) \n\t" + "movq %%mm1, (%2, %3) \n\t" + "add %%"REG_a", %1 \n\t" + "add %%"REG_a", %2 \n\t" + "movq (%2), %%mm0 \n\t" + "movq (%2, %3), %%mm1 \n\t" + PAVGB" (%1), %%mm0 \n\t" + PAVGB" (%1, %3), %%mm1 \n\t" + "add %%"REG_a", %1 \n\t" + "movq %%mm0, (%2) \n\t" + "movq %%mm1, (%2, %3) \n\t" + "add %%"REG_a", %2 \n\t" + "subl $4, %0 \n\t" + "jnz 1b \n\t" + :"+g"(h), "+S"(pixels), "+D"(block) + :"r" ((x86_reg)line_size) + :"%"REG_a, "memory"); +} + +static void DEF(avg_pixels8_x2)(uint8_t *block, const uint8_t *pixels, int line_size, int h) +{ + __asm__ volatile( + "lea (%3, %3), %%"REG_a" \n\t" + "1: \n\t" + "movq (%1), %%mm0 \n\t" + "movq (%1, %3), %%mm2 \n\t" + PAVGB" 1(%1), %%mm0 \n\t" + PAVGB" 1(%1, %3), %%mm2 \n\t" + PAVGB" (%2), %%mm0 \n\t" + PAVGB" (%2, %3), %%mm2 \n\t" + "add %%"REG_a", %1 \n\t" + "movq %%mm0, (%2) \n\t" + "movq %%mm2, (%2, %3) \n\t" + "movq (%1), %%mm0 \n\t" + "movq (%1, %3), %%mm2 \n\t" + PAVGB" 1(%1), %%mm0 \n\t" + PAVGB" 1(%1, %3), %%mm2 \n\t" + "add %%"REG_a", %2 \n\t" + "add %%"REG_a", %1 \n\t" + PAVGB" (%2), %%mm0 \n\t" + PAVGB" (%2, %3), %%mm2 \n\t" + "movq %%mm0, (%2) \n\t" + "movq %%mm2, (%2, %3) \n\t" + "add %%"REG_a", %2 \n\t" + "subl $4, %0 \n\t" + "jnz 1b \n\t" + :"+g"(h), "+S"(pixels), "+D"(block) + :"r" ((x86_reg)line_size) + :"%"REG_a, "memory"); +} + +static void DEF(avg_pixels8_y2)(uint8_t *block, const uint8_t *pixels, int line_size, int h) +{ + __asm__ volatile( + "lea (%3, %3), %%"REG_a" \n\t" + "movq (%1), %%mm0 \n\t" + "sub %3, %2 \n\t" + "1: \n\t" + "movq (%1, %3), %%mm1 \n\t" + "movq (%1, %%"REG_a"), %%mm2 \n\t" + "add %%"REG_a", %1 \n\t" + PAVGB" %%mm1, %%mm0 \n\t" + PAVGB" %%mm2, %%mm1 \n\t" + "movq (%2, %3), %%mm3 \n\t" + "movq (%2, %%"REG_a"), %%mm4 \n\t" + PAVGB" %%mm3, %%mm0 \n\t" + PAVGB" %%mm4, %%mm1 \n\t" + "movq %%mm0, (%2, %3) \n\t" + "movq %%mm1, (%2, %%"REG_a") \n\t" + "movq (%1, %3), %%mm1 \n\t" + "movq (%1, %%"REG_a"), %%mm0 \n\t" + PAVGB" %%mm1, %%mm2 \n\t" + PAVGB" %%mm0, %%mm1 \n\t" + "add %%"REG_a", %2 \n\t" + "add %%"REG_a", %1 \n\t" + "movq (%2, %3), %%mm3 \n\t" + "movq (%2, %%"REG_a"), %%mm4 \n\t" + PAVGB" %%mm3, %%mm2 \n\t" + PAVGB" %%mm4, %%mm1 \n\t" + "movq %%mm2, (%2, %3) \n\t" + "movq %%mm1, (%2, %%"REG_a") \n\t" + "add %%"REG_a", %2 \n\t" + "subl $4, %0 \n\t" + "jnz 1b \n\t" + :"+g"(h), "+S"(pixels), "+D"(block) + :"r" ((x86_reg)line_size) + :"%"REG_a, "memory"); +} + +/* Note this is not correctly rounded, but this function is only + * used for B-frames so it does not matter. */ +static void DEF(avg_pixels8_xy2)(uint8_t *block, const uint8_t *pixels, int line_size, int h) +{ + MOVQ_BONE(mm6); + __asm__ volatile( + "lea (%3, %3), %%"REG_a" \n\t" + "movq (%1), %%mm0 \n\t" + PAVGB" 1(%1), %%mm0 \n\t" + ".p2align 3 \n\t" + "1: \n\t" + "movq (%1, %%"REG_a"), %%mm2 \n\t" + "movq (%1, %3), %%mm1 \n\t" + "psubusb %%mm6, %%mm2 \n\t" + PAVGB" 1(%1, %3), %%mm1 \n\t" + PAVGB" 1(%1, %%"REG_a"), %%mm2 \n\t" + "add %%"REG_a", %1 \n\t" + PAVGB" %%mm1, %%mm0 \n\t" + PAVGB" %%mm2, %%mm1 \n\t" + PAVGB" (%2), %%mm0 \n\t" + PAVGB" (%2, %3), %%mm1 \n\t" + "movq %%mm0, (%2) \n\t" + "movq %%mm1, (%2, %3) \n\t" + "movq (%1, %3), %%mm1 \n\t" + "movq (%1, %%"REG_a"), %%mm0 \n\t" + PAVGB" 1(%1, %3), %%mm1 \n\t" + PAVGB" 1(%1, %%"REG_a"), %%mm0 \n\t" + "add %%"REG_a", %2 \n\t" + "add %%"REG_a", %1 \n\t" + PAVGB" %%mm1, %%mm2 \n\t" + PAVGB" %%mm0, %%mm1 \n\t" + PAVGB" (%2), %%mm2 \n\t" + PAVGB" (%2, %3), %%mm1 \n\t" + "movq %%mm2, (%2) \n\t" + "movq %%mm1, (%2, %3) \n\t" + "add %%"REG_a", %2 \n\t" + "subl $4, %0 \n\t" + "jnz 1b \n\t" + :"+g"(h), "+S"(pixels), "+D"(block) + :"r" ((x86_reg)line_size) + :"%"REG_a, "memory"); +} + +static void DEF(avg_pixels4)(uint8_t *block, const uint8_t *pixels, int line_size, int h) +{ + do { + __asm__ volatile( + "movd (%1), %%mm0 \n\t" + "movd (%1, %2), %%mm1 \n\t" + "movd (%1, %2, 2), %%mm2 \n\t" + "movd (%1, %3), %%mm3 \n\t" + PAVGB" (%0), %%mm0 \n\t" + PAVGB" (%0, %2), %%mm1 \n\t" + PAVGB" (%0, %2, 2), %%mm2 \n\t" + PAVGB" (%0, %3), %%mm3 \n\t" + "movd %%mm0, (%1) \n\t" + "movd %%mm1, (%1, %2) \n\t" + "movd %%mm2, (%1, %2, 2) \n\t" + "movd %%mm3, (%1, %3) \n\t" + ::"S"(pixels), "D"(block), + "r" ((x86_reg)line_size), "r"((x86_reg)3L*line_size) + :"memory"); + block += 4*line_size; + pixels += 4*line_size; + h -= 4; + } while(h > 0); +} + +//FIXME the following could be optimized too ... +static void DEF(put_no_rnd_pixels16_x2)(uint8_t *block, const uint8_t *pixels, int line_size, int h){ + DEF(put_no_rnd_pixels8_x2)(block , pixels , line_size, h); + DEF(put_no_rnd_pixels8_x2)(block+8, pixels+8, line_size, h); +} +static void DEF(put_pixels16_y2)(uint8_t *block, const uint8_t *pixels, int line_size, int h){ + DEF(put_pixels8_y2)(block , pixels , line_size, h); + DEF(put_pixels8_y2)(block+8, pixels+8, line_size, h); +} +static void DEF(put_no_rnd_pixels16_y2)(uint8_t *block, const uint8_t *pixels, int line_size, int h){ + DEF(put_no_rnd_pixels8_y2)(block , pixels , line_size, h); + DEF(put_no_rnd_pixels8_y2)(block+8, pixels+8, line_size, h); +} +static void DEF(avg_pixels16)(uint8_t *block, const uint8_t *pixels, int line_size, int h){ + DEF(avg_pixels8)(block , pixels , line_size, h); + DEF(avg_pixels8)(block+8, pixels+8, line_size, h); +} +static void DEF(avg_pixels16_x2)(uint8_t *block, const uint8_t *pixels, int line_size, int h){ + DEF(avg_pixels8_x2)(block , pixels , line_size, h); + DEF(avg_pixels8_x2)(block+8, pixels+8, line_size, h); +} +static void DEF(avg_pixels16_y2)(uint8_t *block, const uint8_t *pixels, int line_size, int h){ + DEF(avg_pixels8_y2)(block , pixels , line_size, h); + DEF(avg_pixels8_y2)(block+8, pixels+8, line_size, h); +} +static void DEF(avg_pixels16_xy2)(uint8_t *block, const uint8_t *pixels, int line_size, int h){ + DEF(avg_pixels8_xy2)(block , pixels , line_size, h); + DEF(avg_pixels8_xy2)(block+8, pixels+8, line_size, h); +} + +#define QPEL_2TAP_L3(OPNAME) \ +static void DEF(OPNAME ## 2tap_qpel16_l3)(uint8_t *dst, uint8_t *src, int stride, int h, int off1, int off2){\ + __asm__ volatile(\ + "1: \n\t"\ + "movq (%1,%2), %%mm0 \n\t"\ + "movq 8(%1,%2), %%mm1 \n\t"\ + PAVGB" (%1,%3), %%mm0 \n\t"\ + PAVGB" 8(%1,%3), %%mm1 \n\t"\ + PAVGB" (%1), %%mm0 \n\t"\ + PAVGB" 8(%1), %%mm1 \n\t"\ + STORE_OP( (%1,%4),%%mm0)\ + STORE_OP(8(%1,%4),%%mm1)\ + "movq %%mm0, (%1,%4) \n\t"\ + "movq %%mm1, 8(%1,%4) \n\t"\ + "add %5, %1 \n\t"\ + "decl %0 \n\t"\ + "jnz 1b \n\t"\ + :"+g"(h), "+r"(src)\ + :"r"((x86_reg)off1), "r"((x86_reg)off2),\ + "r"((x86_reg)(dst-src)), "r"((x86_reg)stride)\ + :"memory"\ + );\ +}\ +static void DEF(OPNAME ## 2tap_qpel8_l3)(uint8_t *dst, uint8_t *src, int stride, int h, int off1, int off2){\ + __asm__ volatile(\ + "1: \n\t"\ + "movq (%1,%2), %%mm0 \n\t"\ + PAVGB" (%1,%3), %%mm0 \n\t"\ + PAVGB" (%1), %%mm0 \n\t"\ + STORE_OP((%1,%4),%%mm0)\ + "movq %%mm0, (%1,%4) \n\t"\ + "add %5, %1 \n\t"\ + "decl %0 \n\t"\ + "jnz 1b \n\t"\ + :"+g"(h), "+r"(src)\ + :"r"((x86_reg)off1), "r"((x86_reg)off2),\ + "r"((x86_reg)(dst-src)), "r"((x86_reg)stride)\ + :"memory"\ + );\ +} + +#define STORE_OP(a,b) PAVGB" "#a","#b" \n\t" +QPEL_2TAP_L3(avg_) +#undef STORE_OP +#define STORE_OP(a,b) +QPEL_2TAP_L3(put_) +#undef STORE_OP +#undef QPEL_2TAP_L3 diff --git a/libavcodec/x86/dsputil_mmx.c b/libavcodec/x86/dsputil_mmx.c index af21765e65..f3ee342d72 100644 --- a/libavcodec/x86/dsputil_mmx.c +++ b/libavcodec/x86/dsputil_mmx.c @@ -170,7 +170,7 @@ DECLARE_ALIGNED(16, const double, ff_pd_2)[2] = { 2.0, 2.0 }; #define PAVGB(a, b, c, e) PAVGB_MMX_NO_RND(a, b, c, e) #define OP_AVG(a, b, c, e) PAVGB_MMX(a, b, c, e) -#include "dsputil_mmx_rnd_template.c" +#include "dsputil_rnd_template.c" #undef DEF #undef SET_RND @@ -184,7 +184,7 @@ DECLARE_ALIGNED(16, const double, ff_pd_2)[2] = { 2.0, 2.0 }; #define PAVGBP(a, b, c, d, e, f) PAVGBP_MMX(a, b, c, d, e, f) #define PAVGB(a, b, c, e) PAVGB_MMX(a, b, c, e) -#include "dsputil_mmx_rnd_template.c" +#include "dsputil_rnd_template.c" #undef DEF #undef SET_RND @@ -199,7 +199,7 @@ DECLARE_ALIGNED(16, const double, ff_pd_2)[2] = { 2.0, 2.0 }; #define PAVGB "pavgusb" #define OP_AVG PAVGB -#include "dsputil_mmx_avg_template.c" +#include "dsputil_avg_template.c" #undef DEF #undef PAVGB @@ -214,7 +214,7 @@ DECLARE_ALIGNED(16, const double, ff_pd_2)[2] = { 2.0, 2.0 }; #define PAVGB "pavgb" #define OP_AVG PAVGB -#include "dsputil_mmx_avg_template.c" +#include "dsputil_avg_template.c" #undef DEF #undef PAVGB diff --git a/libavcodec/x86/dsputil_mmx_avg_template.c b/libavcodec/x86/dsputil_mmx_avg_template.c deleted file mode 100644 index 8b116b74e2..0000000000 --- a/libavcodec/x86/dsputil_mmx_avg_template.c +++ /dev/null @@ -1,977 +0,0 @@ -/* - * DSP utils : average functions are compiled twice for 3dnow/mmx2 - * Copyright (c) 2000, 2001 Fabrice Bellard - * Copyright (c) 2002-2004 Michael Niedermayer - * - * MMX optimization by Nick Kurshev <nickols_k@mail.ru> - * mostly rewritten by Michael Niedermayer <michaelni@gmx.at> - * and improved by Zdenek Kabelac <kabi@users.sf.net> - * - * This file is part of Libav. - * - * Libav is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * Libav is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with Libav; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - */ - -/* XXX: we use explicit registers to avoid a gcc 2.95.2 register asm - clobber bug - now it will work with 2.95.2 and also with -fPIC - */ -static void DEF(put_pixels8_x2)(uint8_t *block, const uint8_t *pixels, int line_size, int h) -{ - __asm__ volatile( - "lea (%3, %3), %%"REG_a" \n\t" - "1: \n\t" - "movq (%1), %%mm0 \n\t" - "movq (%1, %3), %%mm1 \n\t" - PAVGB" 1(%1), %%mm0 \n\t" - PAVGB" 1(%1, %3), %%mm1 \n\t" - "movq %%mm0, (%2) \n\t" - "movq %%mm1, (%2, %3) \n\t" - "add %%"REG_a", %1 \n\t" - "add %%"REG_a", %2 \n\t" - "movq (%1), %%mm0 \n\t" - "movq (%1, %3), %%mm1 \n\t" - PAVGB" 1(%1), %%mm0 \n\t" - PAVGB" 1(%1, %3), %%mm1 \n\t" - "add %%"REG_a", %1 \n\t" - "movq %%mm0, (%2) \n\t" - "movq %%mm1, (%2, %3) \n\t" - "add %%"REG_a", %2 \n\t" - "subl $4, %0 \n\t" - "jnz 1b \n\t" - :"+g"(h), "+S"(pixels), "+D"(block) - :"r" ((x86_reg)line_size) - :"%"REG_a, "memory"); -} - -static void DEF(put_pixels4_l2)(uint8_t *dst, uint8_t *src1, uint8_t *src2, int dstStride, int src1Stride, int h) -{ - __asm__ volatile( - "testl $1, %0 \n\t" - " jz 1f \n\t" - "movd (%1), %%mm0 \n\t" - "movd (%2), %%mm1 \n\t" - "add %4, %1 \n\t" - "add $4, %2 \n\t" - PAVGB" %%mm1, %%mm0 \n\t" - "movd %%mm0, (%3) \n\t" - "add %5, %3 \n\t" - "decl %0 \n\t" - "1: \n\t" - "movd (%1), %%mm0 \n\t" - "add %4, %1 \n\t" - "movd (%1), %%mm1 \n\t" - "movd (%2), %%mm2 \n\t" - "movd 4(%2), %%mm3 \n\t" - "add %4, %1 \n\t" - PAVGB" %%mm2, %%mm0 \n\t" - PAVGB" %%mm3, %%mm1 \n\t" - "movd %%mm0, (%3) \n\t" - "add %5, %3 \n\t" - "movd %%mm1, (%3) \n\t" - "add %5, %3 \n\t" - "movd (%1), %%mm0 \n\t" - "add %4, %1 \n\t" - "movd (%1), %%mm1 \n\t" - "movd 8(%2), %%mm2 \n\t" - "movd 12(%2), %%mm3 \n\t" - "add %4, %1 \n\t" - PAVGB" %%mm2, %%mm0 \n\t" - PAVGB" %%mm3, %%mm1 \n\t" - "movd %%mm0, (%3) \n\t" - "add %5, %3 \n\t" - "movd %%mm1, (%3) \n\t" - "add %5, %3 \n\t" - "add $16, %2 \n\t" - "subl $4, %0 \n\t" - "jnz 1b \n\t" -#if !HAVE_EBX_AVAILABLE //Note "+bm" and "+mb" are buggy too (with gcc 3.2.2 at least) and cannot be used - :"+m"(h), "+a"(src1), "+c"(src2), "+d"(dst) -#else - :"+b"(h), "+a"(src1), "+c"(src2), "+d"(dst) -#endif - :"S"((x86_reg)src1Stride), "D"((x86_reg)dstStride) - :"memory"); -} - - -static void DEF(put_pixels8_l2)(uint8_t *dst, uint8_t *src1, uint8_t *src2, int dstStride, int src1Stride, int h) -{ - __asm__ volatile( - "testl $1, %0 \n\t" - " jz 1f \n\t" - "movq (%1), %%mm0 \n\t" - "movq (%2), %%mm1 \n\t" - "add %4, %1 \n\t" - "add $8, %2 \n\t" - PAVGB" %%mm1, %%mm0 \n\t" - "movq %%mm0, (%3) \n\t" - "add %5, %3 \n\t" - "decl %0 \n\t" - "1: \n\t" - "movq (%1), %%mm0 \n\t" - "add %4, %1 \n\t" - "movq (%1), %%mm1 \n\t" - "add %4, %1 \n\t" - PAVGB" (%2), %%mm0 \n\t" - PAVGB" 8(%2), %%mm1 \n\t" - "movq %%mm0, (%3) \n\t" - "add %5, %3 \n\t" - "movq %%mm1, (%3) \n\t" - "add %5, %3 \n\t" - "movq (%1), %%mm0 \n\t" - "add %4, %1 \n\t" - "movq (%1), %%mm1 \n\t" - "add %4, %1 \n\t" - PAVGB" 16(%2), %%mm0 \n\t" - PAVGB" 24(%2), %%mm1 \n\t" - "movq %%mm0, (%3) \n\t" - "add %5, %3 \n\t" - "movq %%mm1, (%3) \n\t" - "add %5, %3 \n\t" - "add $32, %2 \n\t" - "subl $4, %0 \n\t" - "jnz 1b \n\t" -#if !HAVE_EBX_AVAILABLE //Note "+bm" and "+mb" are buggy too (with gcc 3.2.2 at least) and cannot be used - :"+m"(h), "+a"(src1), "+c"(src2), "+d"(dst) -#else - :"+b"(h), "+a"(src1), "+c"(src2), "+d"(dst) -#endif - :"S"((x86_reg)src1Stride), "D"((x86_reg)dstStride) - :"memory"); -//the following should be used, though better not with gcc ... -/* :"+g"(h), "+r"(src1), "+r"(src2), "+r"(dst) - :"r"(src1Stride), "r"(dstStride) - :"memory");*/ -} - -static void DEF(put_no_rnd_pixels8_l2)(uint8_t *dst, uint8_t *src1, uint8_t *src2, int dstStride, int src1Stride, int h) -{ - __asm__ volatile( - "pcmpeqb %%mm6, %%mm6 \n\t" - "testl $1, %0 \n\t" - " jz 1f \n\t" - "movq (%1), %%mm0 \n\t" - "movq (%2), %%mm1 \n\t" - "add %4, %1 \n\t" - "add $8, %2 \n\t" - "pxor %%mm6, %%mm0 \n\t" - "pxor %%mm6, %%mm1 \n\t" - PAVGB" %%mm1, %%mm0 \n\t" - "pxor %%mm6, %%mm0 \n\t" - "movq %%mm0, (%3) \n\t" - "add %5, %3 \n\t" - "decl %0 \n\t" - "1: \n\t" - "movq (%1), %%mm0 \n\t" - "add %4, %1 \n\t" - "movq (%1), %%mm1 \n\t" - "add %4, %1 \n\t" - "movq (%2), %%mm2 \n\t" - "movq 8(%2), %%mm3 \n\t" - "pxor %%mm6, %%mm0 \n\t" - "pxor %%mm6, %%mm1 \n\t" - "pxor %%mm6, %%mm2 \n\t" - "pxor %%mm6, %%mm3 \n\t" - PAVGB" %%mm2, %%mm0 \n\t" - PAVGB" %%mm3, %%mm1 \n\t" - "pxor %%mm6, %%mm0 \n\t" - "pxor %%mm6, %%mm1 \n\t" - "movq %%mm0, (%3) \n\t" - "add %5, %3 \n\t" - "movq %%mm1, (%3) \n\t" - "add %5, %3 \n\t" - "movq (%1), %%mm0 \n\t" - "add %4, %1 \n\t" - "movq (%1), %%mm1 \n\t" - "add %4, %1 \n\t" - "movq 16(%2), %%mm2 \n\t" - "movq 24(%2), %%mm3 \n\t" - "pxor %%mm6, %%mm0 \n\t" - "pxor %%mm6, %%mm1 \n\t" - "pxor %%mm6, %%mm2 \n\t" - "pxor %%mm6, %%mm3 \n\t" - PAVGB" %%mm2, %%mm0 \n\t" - PAVGB" %%mm3, %%mm1 \n\t" - "pxor %%mm6, %%mm0 \n\t" - "pxor %%mm6, %%mm1 \n\t" - "movq %%mm0, (%3) \n\t" - "add %5, %3 \n\t" - "movq %%mm1, (%3) \n\t" - "add %5, %3 \n\t" - "add $32, %2 \n\t" - "subl $4, %0 \n\t" - "jnz 1b \n\t" -#if !HAVE_EBX_AVAILABLE //Note "+bm" and "+mb" are buggy too (with gcc 3.2.2 at least) and cannot be used - :"+m"(h), "+a"(src1), "+c"(src2), "+d"(dst) -#else - :"+b"(h), "+a"(src1), "+c"(src2), "+d"(dst) -#endif - :"S"((x86_reg)src1Stride), "D"((x86_reg)dstStride) - :"memory"); -//the following should be used, though better not with gcc ... -/* :"+g"(h), "+r"(src1), "+r"(src2), "+r"(dst) - :"r"(src1Stride), "r"(dstStride) - :"memory");*/ -} - -static void DEF(avg_pixels4_l2)(uint8_t *dst, uint8_t *src1, uint8_t *src2, int dstStride, int src1Stride, int h) -{ - __asm__ volatile( - "testl $1, %0 \n\t" - " jz 1f \n\t" - "movd (%1), %%mm0 \n\t" - "movd (%2), %%mm1 \n\t" - "add %4, %1 \n\t" - "add $4, %2 \n\t" - PAVGB" %%mm1, %%mm0 \n\t" - PAVGB" (%3), %%mm0 \n\t" - "movd %%mm0, (%3) \n\t" - "add %5, %3 \n\t" - "decl %0 \n\t" - "1: \n\t" - "movd (%1), %%mm0 \n\t" - "add %4, %1 \n\t" - "movd (%1), %%mm1 \n\t" - "add %4, %1 \n\t" - PAVGB" (%2), %%mm0 \n\t" - PAVGB" 4(%2), %%mm1 \n\t" - PAVGB" (%3), %%mm0 \n\t" - "movd %%mm0, (%3) \n\t" - "add %5, %3 \n\t" - PAVGB" (%3), %%mm1 \n\t" - "movd %%mm1, (%3) \n\t" - "add %5, %3 \n\t" - "movd (%1), %%mm0 \n\t" - "add %4, %1 \n\t" - "movd (%1), %%mm1 \n\t" - "add %4, %1 \n\t" - PAVGB" 8(%2), %%mm0 \n\t" - PAVGB" 12(%2), %%mm1 \n\t" - PAVGB" (%3), %%mm0 \n\t" - "movd %%mm0, (%3) \n\t" - "add %5, %3 \n\t" - PAVGB" (%3), %%mm1 \n\t" - "movd %%mm1, (%3) \n\t" - "add %5, %3 \n\t" - "add $16, %2 \n\t" - "subl $4, %0 \n\t" - "jnz 1b \n\t" -#if !HAVE_EBX_AVAILABLE //Note "+bm" and "+mb" are buggy too (with gcc 3.2.2 at least) and cannot be used - :"+m"(h), "+a"(src1), "+c"(src2), "+d"(dst) -#else - :"+b"(h), "+a"(src1), "+c"(src2), "+d"(dst) -#endif - :"S"((x86_reg)src1Stride), "D"((x86_reg)dstStride) - :"memory"); -} - - -static void DEF(avg_pixels8_l2)(uint8_t *dst, uint8_t *src1, uint8_t *src2, int dstStride, int src1Stride, int h) -{ - __asm__ volatile( - "testl $1, %0 \n\t" - " jz 1f \n\t" - "movq (%1), %%mm0 \n\t" - "movq (%2), %%mm1 \n\t" - "add %4, %1 \n\t" - "add $8, %2 \n\t" - PAVGB" %%mm1, %%mm0 \n\t" - PAVGB" (%3), %%mm0 \n\t" - "movq %%mm0, (%3) \n\t" - "add %5, %3 \n\t" - "decl %0 \n\t" - "1: \n\t" - "movq (%1), %%mm0 \n\t" - "add %4, %1 \n\t" - "movq (%1), %%mm1 \n\t" - "add %4, %1 \n\t" - PAVGB" (%2), %%mm0 \n\t" - PAVGB" 8(%2), %%mm1 \n\t" - PAVGB" (%3), %%mm0 \n\t" - "movq %%mm0, (%3) \n\t" - "add %5, %3 \n\t" - PAVGB" (%3), %%mm1 \n\t" - "movq %%mm1, (%3) \n\t" - "add %5, %3 \n\t" - "movq (%1), %%mm0 \n\t" - "add %4, %1 \n\t" - "movq (%1), %%mm1 \n\t" - "add %4, %1 \n\t" - PAVGB" 16(%2), %%mm0 \n\t" - PAVGB" 24(%2), %%mm1 \n\t" - PAVGB" (%3), %%mm0 \n\t" - "movq %%mm0, (%3) \n\t" - "add %5, %3 \n\t" - PAVGB" (%3), %%mm1 \n\t" - "movq %%mm1, (%3) \n\t" - "add %5, %3 \n\t" - "add $32, %2 \n\t" - "subl $4, %0 \n\t" - "jnz 1b \n\t" -#if !HAVE_EBX_AVAILABLE //Note "+bm" and "+mb" are buggy too (with gcc 3.2.2 at least) and cannot be used - :"+m"(h), "+a"(src1), "+c"(src2), "+d"(dst) -#else - :"+b"(h), "+a"(src1), "+c"(src2), "+d"(dst) -#endif - :"S"((x86_reg)src1Stride), "D"((x86_reg)dstStride) - :"memory"); -//the following should be used, though better not with gcc ... -/* :"+g"(h), "+r"(src1), "+r"(src2), "+r"(dst) - :"r"(src1Stride), "r"(dstStride) - :"memory");*/ -} - -static void DEF(put_pixels16_x2)(uint8_t *block, const uint8_t *pixels, int line_size, int h) -{ - __asm__ volatile( - "lea (%3, %3), %%"REG_a" \n\t" - "1: \n\t" - "movq (%1), %%mm0 \n\t" - "movq (%1, %3), %%mm1 \n\t" - "movq 8(%1), %%mm2 \n\t" - "movq 8(%1, %3), %%mm3 \n\t" - PAVGB" 1(%1), %%mm0 \n\t" - PAVGB" 1(%1, %3), %%mm1 \n\t" - PAVGB" 9(%1), %%mm2 \n\t" - PAVGB" 9(%1, %3), %%mm3 \n\t" - "movq %%mm0, (%2) \n\t" - "movq %%mm1, (%2, %3) \n\t" - "movq %%mm2, 8(%2) \n\t" - "movq %%mm3, 8(%2, %3) \n\t" - "add %%"REG_a", %1 \n\t" - "add %%"REG_a", %2 \n\t" - "movq (%1), %%mm0 \n\t" - "movq (%1, %3), %%mm1 \n\t" - "movq 8(%1), %%mm2 \n\t" - "movq 8(%1, %3), %%mm3 \n\t" - PAVGB" 1(%1), %%mm0 \n\t" - PAVGB" 1(%1, %3), %%mm1 \n\t" - PAVGB" 9(%1), %%mm2 \n\t" - PAVGB" 9(%1, %3), %%mm3 \n\t" - "add %%"REG_a", %1 \n\t" - "movq %%mm0, (%2) \n\t" - "movq %%mm1, (%2, %3) \n\t" - "movq %%mm2, 8(%2) \n\t" - "movq %%mm3, 8(%2, %3) \n\t" - "add %%"REG_a", %2 \n\t" - "subl $4, %0 \n\t" - "jnz 1b \n\t" - :"+g"(h), "+S"(pixels), "+D"(block) - :"r" ((x86_reg)line_size) - :"%"REG_a, "memory"); -} - -static void DEF(put_pixels16_l2)(uint8_t *dst, uint8_t *src1, uint8_t *src2, int dstStride, int src1Stride, int h) -{ - __asm__ volatile( - "testl $1, %0 \n\t" - " jz 1f \n\t" - "movq (%1), %%mm0 \n\t" - "movq 8(%1), %%mm1 \n\t" - PAVGB" (%2), %%mm0 \n\t" - PAVGB" 8(%2), %%mm1 \n\t" - "add %4, %1 \n\t" - "add $16, %2 \n\t" - "movq %%mm0, (%3) \n\t" - "movq %%mm1, 8(%3) \n\t" - "add %5, %3 \n\t" - "decl %0 \n\t" - "1: \n\t" - "movq (%1), %%mm0 \n\t" - "movq 8(%1), %%mm1 \n\t" - "add %4, %1 \n\t" - PAVGB" (%2), %%mm0 \n\t" - PAVGB" 8(%2), %%mm1 \n\t" - "movq %%mm0, (%3) \n\t" - "movq %%mm1, 8(%3) \n\t" - "add %5, %3 \n\t" - "movq (%1), %%mm0 \n\t" - "movq 8(%1), %%mm1 \n\t" - "add %4, %1 \n\t" - PAVGB" 16(%2), %%mm0 \n\t" - PAVGB" 24(%2), %%mm1 \n\t" - "movq %%mm0, (%3) \n\t" - "movq %%mm1, 8(%3) \n\t" - "add %5, %3 \n\t" - "add $32, %2 \n\t" - "subl $2, %0 \n\t" - "jnz 1b \n\t" -#if !HAVE_EBX_AVAILABLE //Note "+bm" and "+mb" are buggy too (with gcc 3.2.2 at least) and cannot be used - :"+m"(h), "+a"(src1), "+c"(src2), "+d"(dst) -#else - :"+b"(h), "+a"(src1), "+c"(src2), "+d"(dst) -#endif - :"S"((x86_reg)src1Stride), "D"((x86_reg)dstStride) - :"memory"); -//the following should be used, though better not with gcc ... -/* :"+g"(h), "+r"(src1), "+r"(src2), "+r"(dst) - :"r"(src1Stride), "r"(dstStride) - :"memory");*/ -} - -static void DEF(avg_pixels16_l2)(uint8_t *dst, uint8_t *src1, uint8_t *src2, int dstStride, int src1Stride, int h) -{ - __asm__ volatile( - "testl $1, %0 \n\t" - " jz 1f \n\t" - "movq (%1), %%mm0 \n\t" - "movq 8(%1), %%mm1 \n\t" - PAVGB" (%2), %%mm0 \n\t" - PAVGB" 8(%2), %%mm1 \n\t" - "add %4, %1 \n\t" - "add $16, %2 \n\t" - PAVGB" (%3), %%mm0 \n\t" - PAVGB" 8(%3), %%mm1 \n\t" - "movq %%mm0, (%3) \n\t" - "movq %%mm1, 8(%3) \n\t" - "add %5, %3 \n\t" - "decl %0 \n\t" - "1: \n\t" - "movq (%1), %%mm0 \n\t" - "movq 8(%1), %%mm1 \n\t" - "add %4, %1 \n\t" - PAVGB" (%2), %%mm0 \n\t" - PAVGB" 8(%2), %%mm1 \n\t" - PAVGB" (%3), %%mm0 \n\t" - PAVGB" 8(%3), %%mm1 \n\t" - "movq %%mm0, (%3) \n\t" - "movq %%mm1, 8(%3) \n\t" - "add %5, %3 \n\t" - "movq (%1), %%mm0 \n\t" - "movq 8(%1), %%mm1 \n\t" - "add %4, %1 \n\t" - PAVGB" 16(%2), %%mm0 \n\t" - PAVGB" 24(%2), %%mm1 \n\t" - PAVGB" (%3), %%mm0 \n\t" - PAVGB" 8(%3), %%mm1 \n\t" - "movq %%mm0, (%3) \n\t" - "movq %%mm1, 8(%3) \n\t" - "add %5, %3 \n\t" - "add $32, %2 \n\t" - "subl $2, %0 \n\t" - "jnz 1b \n\t" -#if !HAVE_EBX_AVAILABLE //Note "+bm" and "+mb" are buggy too (with gcc 3.2.2 at least) and cannot be used - :"+m"(h), "+a"(src1), "+c"(src2), "+d"(dst) -#else - :"+b"(h), "+a"(src1), "+c"(src2), "+d"(dst) -#endif - :"S"((x86_reg)src1Stride), "D"((x86_reg)dstStride) - :"memory"); -//the following should be used, though better not with gcc ... -/* :"+g"(h), "+r"(src1), "+r"(src2), "+r"(dst) - :"r"(src1Stride), "r"(dstStride) - :"memory");*/ -} - -static void DEF(put_no_rnd_pixels16_l2)(uint8_t *dst, uint8_t *src1, uint8_t *src2, int dstStride, int src1Stride, int h) -{ - __asm__ volatile( - "pcmpeqb %%mm6, %%mm6 \n\t" - "testl $1, %0 \n\t" - " jz 1f \n\t" - "movq (%1), %%mm0 \n\t" - "movq 8(%1), %%mm1 \n\t" - "movq (%2), %%mm2 \n\t" - "movq 8(%2), %%mm3 \n\t" - "pxor %%mm6, %%mm0 \n\t" - "pxor %%mm6, %%mm1 \n\t" - "pxor %%mm6, %%mm2 \n\t" - "pxor %%mm6, %%mm3 \n\t" - PAVGB" %%mm2, %%mm0 \n\t" - PAVGB" %%mm3, %%mm1 \n\t" - "pxor %%mm6, %%mm0 \n\t" - "pxor %%mm6, %%mm1 \n\t" - "add %4, %1 \n\t" - "add $16, %2 \n\t" - "movq %%mm0, (%3) \n\t" - "movq %%mm1, 8(%3) \n\t" - "add %5, %3 \n\t" - "decl %0 \n\t" - "1: \n\t" - "movq (%1), %%mm0 \n\t" - "movq 8(%1), %%mm1 \n\t" - "add %4, %1 \n\t" - "movq (%2), %%mm2 \n\t" - "movq 8(%2), %%mm3 \n\t" - "pxor %%mm6, %%mm0 \n\t" - "pxor %%mm6, %%mm1 \n\t" - "pxor %%mm6, %%mm2 \n\t" - "pxor %%mm6, %%mm3 \n\t" - PAVGB" %%mm2, %%mm0 \n\t" - PAVGB" %%mm3, %%mm1 \n\t" - "pxor %%mm6, %%mm0 \n\t" - "pxor %%mm6, %%mm1 \n\t" - "movq %%mm0, (%3) \n\t" - "movq %%mm1, 8(%3) \n\t" - "add %5, %3 \n\t" - "movq (%1), %%mm0 \n\t" - "movq 8(%1), %%mm1 \n\t" - "add %4, %1 \n\t" - "movq 16(%2), %%mm2 \n\t" - "movq 24(%2), %%mm3 \n\t" - "pxor %%mm6, %%mm0 \n\t" - "pxor %%mm6, %%mm1 \n\t" - "pxor %%mm6, %%mm2 \n\t" - "pxor %%mm6, %%mm3 \n\t" - PAVGB" %%mm2, %%mm0 \n\t" - PAVGB" %%mm3, %%mm1 \n\t" - "pxor %%mm6, %%mm0 \n\t" - "pxor %%mm6, %%mm1 \n\t" - "movq %%mm0, (%3) \n\t" - "movq %%mm1, 8(%3) \n\t" - "add %5, %3 \n\t" - "add $32, %2 \n\t" - "subl $2, %0 \n\t" - "jnz 1b \n\t" -#if !HAVE_EBX_AVAILABLE //Note "+bm" and "+mb" are buggy too (with gcc 3.2.2 at least) and cannot be used - :"+m"(h), "+a"(src1), "+c"(src2), "+d"(dst) -#else - :"+b"(h), "+a"(src1), "+c"(src2), "+d"(dst) -#endif - :"S"((x86_reg)src1Stride), "D"((x86_reg)dstStride) - :"memory"); -//the following should be used, though better not with gcc ... -/* :"+g"(h), "+r"(src1), "+r"(src2), "+r"(dst) - :"r"(src1Stride), "r"(dstStride) - :"memory");*/ -} - -/* GL: this function does incorrect rounding if overflow */ -static void DEF(put_no_rnd_pixels8_x2)(uint8_t *block, const uint8_t *pixels, int line_size, int h) -{ - MOVQ_BONE(mm6); - __asm__ volatile( - "lea (%3, %3), %%"REG_a" \n\t" - "1: \n\t" - "movq (%1), %%mm0 \n\t" - "movq (%1, %3), %%mm2 \n\t" - "movq 1(%1), %%mm1 \n\t" - "movq 1(%1, %3), %%mm3 \n\t" - "add %%"REG_a", %1 \n\t" - "psubusb %%mm6, %%mm0 \n\t" - "psubusb %%mm6, %%mm2 \n\t" - PAVGB" %%mm1, %%mm0 \n\t" - PAVGB" %%mm3, %%mm2 \n\t" - "movq %%mm0, (%2) \n\t" - "movq %%mm2, (%2, %3) \n\t" - "movq (%1), %%mm0 \n\t" - "movq 1(%1), %%mm1 \n\t" - "movq (%1, %3), %%mm2 \n\t" - "movq 1(%1, %3), %%mm3 \n\t" - "add %%"REG_a", %2 \n\t" - "add %%"REG_a", %1 \n\t" - "psubusb %%mm6, %%mm0 \n\t" - "psubusb %%mm6, %%mm2 \n\t" - PAVGB" %%mm1, %%mm0 \n\t" - PAVGB" %%mm3, %%mm2 \n\t" - "movq %%mm0, (%2) \n\t" - "movq %%mm2, (%2, %3) \n\t" - "add %%"REG_a", %2 \n\t" - "subl $4, %0 \n\t" - "jnz 1b \n\t" - :"+g"(h), "+S"(pixels), "+D"(block) - :"r" ((x86_reg)line_size) - :"%"REG_a, "memory"); -} - -static void DEF(put_no_rnd_pixels8_x2_exact)(uint8_t *block, const uint8_t *pixels, int line_size, int h) -{ - __asm__ volatile ( - "pcmpeqb %%mm6, %%mm6 \n\t" - "1: \n\t" - "movq (%1), %%mm0 \n\t" - "movq (%1, %3), %%mm2 \n\t" - "movq 1(%1), %%mm1 \n\t" - "movq 1(%1, %3), %%mm3 \n\t" - "pxor %%mm6, %%mm0 \n\t" - "pxor %%mm6, %%mm2 \n\t" - "pxor %%mm6, %%mm1 \n\t" - "pxor %%mm6, %%mm3 \n\t" - PAVGB" %%mm1, %%mm0 \n\t" - PAVGB" %%mm3, %%mm2 \n\t" - "pxor %%mm6, %%mm0 \n\t" - "pxor %%mm6, %%mm2 \n\t" - "movq %%mm0, (%2) \n\t" - "movq %%mm2, (%2, %3) \n\t" - "movq (%1, %3,2), %%mm0 \n\t" - "movq 1(%1, %3,2), %%mm1 \n\t" - "movq (%1, %4), %%mm2 \n\t" - "movq 1(%1, %4), %%mm3 \n\t" - "pxor %%mm6, %%mm0 \n\t" - "pxor %%mm6, %%mm1 \n\t" - "pxor %%mm6, %%mm2 \n\t" - "pxor %%mm6, %%mm3 \n\t" - PAVGB" %%mm1, %%mm0 \n\t" - PAVGB" %%mm3, %%mm2 \n\t" - "pxor %%mm6, %%mm0 \n\t" - "pxor %%mm6, %%mm2 \n\t" - "movq %%mm0, (%2, %3,2) \n\t" - "movq %%mm2, (%2, %4) \n\t" - "lea (%1, %3,4), %1 \n\t" - "lea (%2, %3,4), %2 \n\t" - "subl $4, %0 \n\t" - "jg 1b \n\t" - : "+g"(h), "+r"(pixels), "+r"(block) - : "r" ((x86_reg)line_size), "r"((x86_reg)3*line_size) - : "memory" - ); -} - -static void DEF(put_pixels8_y2)(uint8_t *block, const uint8_t *pixels, int line_size, int h) -{ - __asm__ volatile( - "lea (%3, %3), %%"REG_a" \n\t" - "movq (%1), %%mm0 \n\t" - "sub %3, %2 \n\t" - "1: \n\t" - "movq (%1, %3), %%mm1 \n\t" - "movq (%1, %%"REG_a"), %%mm2 \n\t" - "add %%"REG_a", %1 \n\t" - PAVGB" %%mm1, %%mm0 \n\t" - PAVGB" %%mm2, %%mm1 \n\t" - "movq %%mm0, (%2, %3) \n\t" - "movq %%mm1, (%2, %%"REG_a") \n\t" - "movq (%1, %3), %%mm1 \n\t" - "movq (%1, %%"REG_a"), %%mm0 \n\t" - "add %%"REG_a", %2 \n\t" - "add %%"REG_a", %1 \n\t" - PAVGB" %%mm1, %%mm2 \n\t" - PAVGB" %%mm0, %%mm1 \n\t" - "movq %%mm2, (%2, %3) \n\t" - "movq %%mm1, (%2, %%"REG_a") \n\t" - "add %%"REG_a", %2 \n\t" - "subl $4, %0 \n\t" - "jnz 1b \n\t" - :"+g"(h), "+S"(pixels), "+D" (block) - :"r" ((x86_reg)line_size) - :"%"REG_a, "memory"); -} - -/* GL: this function does incorrect rounding if overflow */ -static void DEF(put_no_rnd_pixels8_y2)(uint8_t *block, const uint8_t *pixels, int line_size, int h) -{ - MOVQ_BONE(mm6); - __asm__ volatile( - "lea (%3, %3), %%"REG_a" \n\t" - "movq (%1), %%mm0 \n\t" - "sub %3, %2 \n\t" - "1: \n\t" - "movq (%1, %3), %%mm1 \n\t" - "movq (%1, %%"REG_a"), %%mm2 \n\t" - "add %%"REG_a", %1 \n\t" - "psubusb %%mm6, %%mm1 \n\t" - PAVGB" %%mm1, %%mm0 \n\t" - PAVGB" %%mm2, %%mm1 \n\t" - "movq %%mm0, (%2, %3) \n\t" - "movq %%mm1, (%2, %%"REG_a") \n\t" - "movq (%1, %3), %%mm1 \n\t" - "movq (%1, %%"REG_a"), %%mm0 \n\t" - "add %%"REG_a", %2 \n\t" - "add %%"REG_a", %1 \n\t" - "psubusb %%mm6, %%mm1 \n\t" - PAVGB" %%mm1, %%mm2 \n\t" - PAVGB" %%mm0, %%mm1 \n\t" - "movq %%mm2, (%2, %3) \n\t" - "movq %%mm1, (%2, %%"REG_a") \n\t" - "add %%"REG_a", %2 \n\t" - "subl $4, %0 \n\t" - "jnz 1b \n\t" - :"+g"(h), "+S"(pixels), "+D" (block) - :"r" ((x86_reg)line_size) - :"%"REG_a, "memory"); -} - -static void DEF(put_no_rnd_pixels8_y2_exact)(uint8_t *block, const uint8_t *pixels, int line_size, int h) -{ - __asm__ volatile ( - "movq (%1), %%mm0 \n\t" - "pcmpeqb %%mm6, %%mm6 \n\t" - "add %3, %1 \n\t" - "pxor %%mm6, %%mm0 \n\t" - "1: \n\t" - "movq (%1), %%mm1 \n\t" - "movq (%1, %3), %%mm2 \n\t" - "pxor %%mm6, %%mm1 \n\t" - "pxor %%mm6, %%mm2 \n\t" - PAVGB" %%mm1, %%mm0 \n\t" - PAVGB" %%mm2, %%mm1 \n\t" - "pxor %%mm6, %%mm0 \n\t" - "pxor %%mm6, %%mm1 \n\t" - "movq %%mm0, (%2) \n\t" - "movq %%mm1, (%2, %3) \n\t" - "movq (%1, %3,2), %%mm1 \n\t" - "movq (%1, %4), %%mm0 \n\t" - "pxor %%mm6, %%mm1 \n\t" - "pxor %%mm6, %%mm0 \n\t" - PAVGB" %%mm1, %%mm2 \n\t" - PAVGB" %%mm0, %%mm1 \n\t" - "pxor %%mm6, %%mm2 \n\t" - "pxor %%mm6, %%mm1 \n\t" - "movq %%mm2, (%2, %3,2) \n\t" - "movq %%mm1, (%2, %4) \n\t" - "lea (%1, %3,4), %1 \n\t" - "lea (%2, %3,4), %2 \n\t" - "subl $4, %0 \n\t" - "jg 1b \n\t" - :"+g"(h), "+r"(pixels), "+r" (block) - :"r" ((x86_reg)line_size), "r"((x86_reg)3*line_size) - :"memory" - ); -} - -static void DEF(avg_pixels8)(uint8_t *block, const uint8_t *pixels, int line_size, int h) -{ - __asm__ volatile( - "lea (%3, %3), %%"REG_a" \n\t" - "1: \n\t" - "movq (%2), %%mm0 \n\t" - "movq (%2, %3), %%mm1 \n\t" - PAVGB" (%1), %%mm0 \n\t" - PAVGB" (%1, %3), %%mm1 \n\t" - "movq %%mm0, (%2) \n\t" - "movq %%mm1, (%2, %3) \n\t" - "add %%"REG_a", %1 \n\t" - "add %%"REG_a", %2 \n\t" - "movq (%2), %%mm0 \n\t" - "movq (%2, %3), %%mm1 \n\t" - PAVGB" (%1), %%mm0 \n\t" - PAVGB" (%1, %3), %%mm1 \n\t" - "add %%"REG_a", %1 \n\t" - "movq %%mm0, (%2) \n\t" - "movq %%mm1, (%2, %3) \n\t" - "add %%"REG_a", %2 \n\t" - "subl $4, %0 \n\t" - "jnz 1b \n\t" - :"+g"(h), "+S"(pixels), "+D"(block) - :"r" ((x86_reg)line_size) - :"%"REG_a, "memory"); -} - -static void DEF(avg_pixels8_x2)(uint8_t *block, const uint8_t *pixels, int line_size, int h) -{ - __asm__ volatile( - "lea (%3, %3), %%"REG_a" \n\t" - "1: \n\t" - "movq (%1), %%mm0 \n\t" - "movq (%1, %3), %%mm2 \n\t" - PAVGB" 1(%1), %%mm0 \n\t" - PAVGB" 1(%1, %3), %%mm2 \n\t" - PAVGB" (%2), %%mm0 \n\t" - PAVGB" (%2, %3), %%mm2 \n\t" - "add %%"REG_a", %1 \n\t" - "movq %%mm0, (%2) \n\t" - "movq %%mm2, (%2, %3) \n\t" - "movq (%1), %%mm0 \n\t" - "movq (%1, %3), %%mm2 \n\t" - PAVGB" 1(%1), %%mm0 \n\t" - PAVGB" 1(%1, %3), %%mm2 \n\t" - "add %%"REG_a", %2 \n\t" - "add %%"REG_a", %1 \n\t" - PAVGB" (%2), %%mm0 \n\t" - PAVGB" (%2, %3), %%mm2 \n\t" - "movq %%mm0, (%2) \n\t" - "movq %%mm2, (%2, %3) \n\t" - "add %%"REG_a", %2 \n\t" - "subl $4, %0 \n\t" - "jnz 1b \n\t" - :"+g"(h), "+S"(pixels), "+D"(block) - :"r" ((x86_reg)line_size) - :"%"REG_a, "memory"); -} - -static void DEF(avg_pixels8_y2)(uint8_t *block, const uint8_t *pixels, int line_size, int h) -{ - __asm__ volatile( - "lea (%3, %3), %%"REG_a" \n\t" - "movq (%1), %%mm0 \n\t" - "sub %3, %2 \n\t" - "1: \n\t" - "movq (%1, %3), %%mm1 \n\t" - "movq (%1, %%"REG_a"), %%mm2 \n\t" - "add %%"REG_a", %1 \n\t" - PAVGB" %%mm1, %%mm0 \n\t" - PAVGB" %%mm2, %%mm1 \n\t" - "movq (%2, %3), %%mm3 \n\t" - "movq (%2, %%"REG_a"), %%mm4 \n\t" - PAVGB" %%mm3, %%mm0 \n\t" - PAVGB" %%mm4, %%mm1 \n\t" - "movq %%mm0, (%2, %3) \n\t" - "movq %%mm1, (%2, %%"REG_a") \n\t" - "movq (%1, %3), %%mm1 \n\t" - "movq (%1, %%"REG_a"), %%mm0 \n\t" - PAVGB" %%mm1, %%mm2 \n\t" - PAVGB" %%mm0, %%mm1 \n\t" - "add %%"REG_a", %2 \n\t" - "add %%"REG_a", %1 \n\t" - "movq (%2, %3), %%mm3 \n\t" - "movq (%2, %%"REG_a"), %%mm4 \n\t" - PAVGB" %%mm3, %%mm2 \n\t" - PAVGB" %%mm4, %%mm1 \n\t" - "movq %%mm2, (%2, %3) \n\t" - "movq %%mm1, (%2, %%"REG_a") \n\t" - "add %%"REG_a", %2 \n\t" - "subl $4, %0 \n\t" - "jnz 1b \n\t" - :"+g"(h), "+S"(pixels), "+D"(block) - :"r" ((x86_reg)line_size) - :"%"REG_a, "memory"); -} - -/* Note this is not correctly rounded, but this function is only - * used for B-frames so it does not matter. */ -static void DEF(avg_pixels8_xy2)(uint8_t *block, const uint8_t *pixels, int line_size, int h) -{ - MOVQ_BONE(mm6); - __asm__ volatile( - "lea (%3, %3), %%"REG_a" \n\t" - "movq (%1), %%mm0 \n\t" - PAVGB" 1(%1), %%mm0 \n\t" - ".p2align 3 \n\t" - "1: \n\t" - "movq (%1, %%"REG_a"), %%mm2 \n\t" - "movq (%1, %3), %%mm1 \n\t" - "psubusb %%mm6, %%mm2 \n\t" - PAVGB" 1(%1, %3), %%mm1 \n\t" - PAVGB" 1(%1, %%"REG_a"), %%mm2 \n\t" - "add %%"REG_a", %1 \n\t" - PAVGB" %%mm1, %%mm0 \n\t" - PAVGB" %%mm2, %%mm1 \n\t" - PAVGB" (%2), %%mm0 \n\t" - PAVGB" (%2, %3), %%mm1 \n\t" - "movq %%mm0, (%2) \n\t" - "movq %%mm1, (%2, %3) \n\t" - "movq (%1, %3), %%mm1 \n\t" - "movq (%1, %%"REG_a"), %%mm0 \n\t" - PAVGB" 1(%1, %3), %%mm1 \n\t" - PAVGB" 1(%1, %%"REG_a"), %%mm0 \n\t" - "add %%"REG_a", %2 \n\t" - "add %%"REG_a", %1 \n\t" - PAVGB" %%mm1, %%mm2 \n\t" - PAVGB" %%mm0, %%mm1 \n\t" - PAVGB" (%2), %%mm2 \n\t" - PAVGB" (%2, %3), %%mm1 \n\t" - "movq %%mm2, (%2) \n\t" - "movq %%mm1, (%2, %3) \n\t" - "add %%"REG_a", %2 \n\t" - "subl $4, %0 \n\t" - "jnz 1b \n\t" - :"+g"(h), "+S"(pixels), "+D"(block) - :"r" ((x86_reg)line_size) - :"%"REG_a, "memory"); -} - -static void DEF(avg_pixels4)(uint8_t *block, const uint8_t *pixels, int line_size, int h) -{ - do { - __asm__ volatile( - "movd (%1), %%mm0 \n\t" - "movd (%1, %2), %%mm1 \n\t" - "movd (%1, %2, 2), %%mm2 \n\t" - "movd (%1, %3), %%mm3 \n\t" - PAVGB" (%0), %%mm0 \n\t" - PAVGB" (%0, %2), %%mm1 \n\t" - PAVGB" (%0, %2, 2), %%mm2 \n\t" - PAVGB" (%0, %3), %%mm3 \n\t" - "movd %%mm0, (%1) \n\t" - "movd %%mm1, (%1, %2) \n\t" - "movd %%mm2, (%1, %2, 2) \n\t" - "movd %%mm3, (%1, %3) \n\t" - ::"S"(pixels), "D"(block), - "r" ((x86_reg)line_size), "r"((x86_reg)3L*line_size) - :"memory"); - block += 4*line_size; - pixels += 4*line_size; - h -= 4; - } while(h > 0); -} - -//FIXME the following could be optimized too ... -static void DEF(put_no_rnd_pixels16_x2)(uint8_t *block, const uint8_t *pixels, int line_size, int h){ - DEF(put_no_rnd_pixels8_x2)(block , pixels , line_size, h); - DEF(put_no_rnd_pixels8_x2)(block+8, pixels+8, line_size, h); -} -static void DEF(put_pixels16_y2)(uint8_t *block, const uint8_t *pixels, int line_size, int h){ - DEF(put_pixels8_y2)(block , pixels , line_size, h); - DEF(put_pixels8_y2)(block+8, pixels+8, line_size, h); -} -static void DEF(put_no_rnd_pixels16_y2)(uint8_t *block, const uint8_t *pixels, int line_size, int h){ - DEF(put_no_rnd_pixels8_y2)(block , pixels , line_size, h); - DEF(put_no_rnd_pixels8_y2)(block+8, pixels+8, line_size, h); -} -static void DEF(avg_pixels16)(uint8_t *block, const uint8_t *pixels, int line_size, int h){ - DEF(avg_pixels8)(block , pixels , line_size, h); - DEF(avg_pixels8)(block+8, pixels+8, line_size, h); -} -static void DEF(avg_pixels16_x2)(uint8_t *block, const uint8_t *pixels, int line_size, int h){ - DEF(avg_pixels8_x2)(block , pixels , line_size, h); - DEF(avg_pixels8_x2)(block+8, pixels+8, line_size, h); -} -static void DEF(avg_pixels16_y2)(uint8_t *block, const uint8_t *pixels, int line_size, int h){ - DEF(avg_pixels8_y2)(block , pixels , line_size, h); - DEF(avg_pixels8_y2)(block+8, pixels+8, line_size, h); -} -static void DEF(avg_pixels16_xy2)(uint8_t *block, const uint8_t *pixels, int line_size, int h){ - DEF(avg_pixels8_xy2)(block , pixels , line_size, h); - DEF(avg_pixels8_xy2)(block+8, pixels+8, line_size, h); -} - -#define QPEL_2TAP_L3(OPNAME) \ -static void DEF(OPNAME ## 2tap_qpel16_l3)(uint8_t *dst, uint8_t *src, int stride, int h, int off1, int off2){\ - __asm__ volatile(\ - "1: \n\t"\ - "movq (%1,%2), %%mm0 \n\t"\ - "movq 8(%1,%2), %%mm1 \n\t"\ - PAVGB" (%1,%3), %%mm0 \n\t"\ - PAVGB" 8(%1,%3), %%mm1 \n\t"\ - PAVGB" (%1), %%mm0 \n\t"\ - PAVGB" 8(%1), %%mm1 \n\t"\ - STORE_OP( (%1,%4),%%mm0)\ - STORE_OP(8(%1,%4),%%mm1)\ - "movq %%mm0, (%1,%4) \n\t"\ - "movq %%mm1, 8(%1,%4) \n\t"\ - "add %5, %1 \n\t"\ - "decl %0 \n\t"\ - "jnz 1b \n\t"\ - :"+g"(h), "+r"(src)\ - :"r"((x86_reg)off1), "r"((x86_reg)off2),\ - "r"((x86_reg)(dst-src)), "r"((x86_reg)stride)\ - :"memory"\ - );\ -}\ -static void DEF(OPNAME ## 2tap_qpel8_l3)(uint8_t *dst, uint8_t *src, int stride, int h, int off1, int off2){\ - __asm__ volatile(\ - "1: \n\t"\ - "movq (%1,%2), %%mm0 \n\t"\ - PAVGB" (%1,%3), %%mm0 \n\t"\ - PAVGB" (%1), %%mm0 \n\t"\ - STORE_OP((%1,%4),%%mm0)\ - "movq %%mm0, (%1,%4) \n\t"\ - "add %5, %1 \n\t"\ - "decl %0 \n\t"\ - "jnz 1b \n\t"\ - :"+g"(h), "+r"(src)\ - :"r"((x86_reg)off1), "r"((x86_reg)off2),\ - "r"((x86_reg)(dst-src)), "r"((x86_reg)stride)\ - :"memory"\ - );\ -} - -#define STORE_OP(a,b) PAVGB" "#a","#b" \n\t" -QPEL_2TAP_L3(avg_) -#undef STORE_OP -#define STORE_OP(a,b) -QPEL_2TAP_L3(put_) -#undef STORE_OP -#undef QPEL_2TAP_L3 diff --git a/libavcodec/x86/dsputil_mmx_qns_template.c b/libavcodec/x86/dsputil_mmx_qns_template.c deleted file mode 100644 index 20a40a175e..0000000000 --- a/libavcodec/x86/dsputil_mmx_qns_template.c +++ /dev/null @@ -1,101 +0,0 @@ -/* - * DSP utils : QNS functions are compiled 3 times for mmx/3dnow/ssse3 - * Copyright (c) 2004 Michael Niedermayer - * - * MMX optimization by Michael Niedermayer <michaelni@gmx.at> - * 3DNow! and SSSE3 optimization by Zuxy Meng <zuxy.meng@gmail.com> - * - * This file is part of Libav. - * - * Libav is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * Libav is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with Libav; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - */ - -#define MAX_ABS (512 >> (SCALE_OFFSET>0 ? SCALE_OFFSET : 0)) - -static int DEF(try_8x8basis)(int16_t rem[64], int16_t weight[64], int16_t basis[64], int scale) -{ - x86_reg i=0; - - assert(FFABS(scale) < MAX_ABS); - scale<<= 16 + SCALE_OFFSET - BASIS_SHIFT + RECON_SHIFT; - - SET_RND(mm6); - __asm__ volatile( - "pxor %%mm7, %%mm7 \n\t" - "movd %4, %%mm5 \n\t" - "punpcklwd %%mm5, %%mm5 \n\t" - "punpcklwd %%mm5, %%mm5 \n\t" - ".p2align 4 \n\t" - "1: \n\t" - "movq (%1, %0), %%mm0 \n\t" - "movq 8(%1, %0), %%mm1 \n\t" - PMULHRW(%%mm0, %%mm1, %%mm5, %%mm6) - "paddw (%2, %0), %%mm0 \n\t" - "paddw 8(%2, %0), %%mm1 \n\t" - "psraw $6, %%mm0 \n\t" - "psraw $6, %%mm1 \n\t" - "pmullw (%3, %0), %%mm0 \n\t" - "pmullw 8(%3, %0), %%mm1 \n\t" - "pmaddwd %%mm0, %%mm0 \n\t" - "pmaddwd %%mm1, %%mm1 \n\t" - "paddd %%mm1, %%mm0 \n\t" - "psrld $4, %%mm0 \n\t" - "paddd %%mm0, %%mm7 \n\t" - "add $16, %0 \n\t" - "cmp $128, %0 \n\t" //FIXME optimize & bench - " jb 1b \n\t" - PHADDD(%%mm7, %%mm6) - "psrld $2, %%mm7 \n\t" - "movd %%mm7, %0 \n\t" - - : "+r" (i) - : "r"(basis), "r"(rem), "r"(weight), "g"(scale) - ); - return i; -} - -static void DEF(add_8x8basis)(int16_t rem[64], int16_t basis[64], int scale) -{ - x86_reg i=0; - - if(FFABS(scale) < MAX_ABS){ - scale<<= 16 + SCALE_OFFSET - BASIS_SHIFT + RECON_SHIFT; - SET_RND(mm6); - __asm__ volatile( - "movd %3, %%mm5 \n\t" - "punpcklwd %%mm5, %%mm5 \n\t" - "punpcklwd %%mm5, %%mm5 \n\t" - ".p2align 4 \n\t" - "1: \n\t" - "movq (%1, %0), %%mm0 \n\t" - "movq 8(%1, %0), %%mm1 \n\t" - PMULHRW(%%mm0, %%mm1, %%mm5, %%mm6) - "paddw (%2, %0), %%mm0 \n\t" - "paddw 8(%2, %0), %%mm1 \n\t" - "movq %%mm0, (%2, %0) \n\t" - "movq %%mm1, 8(%2, %0) \n\t" - "add $16, %0 \n\t" - "cmp $128, %0 \n\t" // FIXME optimize & bench - " jb 1b \n\t" - - : "+r" (i) - : "r"(basis), "r"(rem), "g"(scale) - ); - }else{ - for(i=0; i<8*8; i++){ - rem[i] += (basis[i]*scale + (1<<(BASIS_SHIFT - RECON_SHIFT-1)))>>(BASIS_SHIFT - RECON_SHIFT); - } - } -} diff --git a/libavcodec/x86/dsputil_mmx_rnd_template.c b/libavcodec/x86/dsputil_mmx_rnd_template.c deleted file mode 100644 index 34a2c0bca8..0000000000 --- a/libavcodec/x86/dsputil_mmx_rnd_template.c +++ /dev/null @@ -1,590 +0,0 @@ -/* - * DSP utils mmx functions are compiled twice for rnd/no_rnd - * Copyright (c) 2000, 2001 Fabrice Bellard - * Copyright (c) 2003-2004 Michael Niedermayer <michaelni@gmx.at> - * - * MMX optimization by Nick Kurshev <nickols_k@mail.ru> - * mostly rewritten by Michael Niedermayer <michaelni@gmx.at> - * and improved by Zdenek Kabelac <kabi@users.sf.net> - * - * This file is part of Libav. - * - * Libav is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * Libav is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with Libav; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - */ - -// put_pixels -static void DEF(put, pixels8_x2)(uint8_t *block, const uint8_t *pixels, int line_size, int h) -{ - MOVQ_BFE(mm6); - __asm__ volatile( - "lea (%3, %3), %%"REG_a" \n\t" - ".p2align 3 \n\t" - "1: \n\t" - "movq (%1), %%mm0 \n\t" - "movq 1(%1), %%mm1 \n\t" - "movq (%1, %3), %%mm2 \n\t" - "movq 1(%1, %3), %%mm3 \n\t" - PAVGBP(%%mm0, %%mm1, %%mm4, %%mm2, %%mm3, %%mm5) - "movq %%mm4, (%2) \n\t" - "movq %%mm5, (%2, %3) \n\t" - "add %%"REG_a", %1 \n\t" - "add %%"REG_a", %2 \n\t" - "movq (%1), %%mm0 \n\t" - "movq 1(%1), %%mm1 \n\t" - "movq (%1, %3), %%mm2 \n\t" - "movq 1(%1, %3), %%mm3 \n\t" - PAVGBP(%%mm0, %%mm1, %%mm4, %%mm2, %%mm3, %%mm5) - "movq %%mm4, (%2) \n\t" - "movq %%mm5, (%2, %3) \n\t" - "add %%"REG_a", %1 \n\t" - "add %%"REG_a", %2 \n\t" - "subl $4, %0 \n\t" - "jnz 1b \n\t" - :"+g"(h), "+S"(pixels), "+D"(block) - :"r"((x86_reg)line_size) - :REG_a, "memory"); -} - -static void av_unused DEF(put, pixels8_l2)(uint8_t *dst, uint8_t *src1, uint8_t *src2, int dstStride, int src1Stride, int h) -{ - MOVQ_BFE(mm6); - __asm__ volatile( - "testl $1, %0 \n\t" - " jz 1f \n\t" - "movq (%1), %%mm0 \n\t" - "movq (%2), %%mm1 \n\t" - "add %4, %1 \n\t" - "add $8, %2 \n\t" - PAVGB(%%mm0, %%mm1, %%mm4, %%mm6) - "movq %%mm4, (%3) \n\t" - "add %5, %3 \n\t" - "decl %0 \n\t" - ".p2align 3 \n\t" - "1: \n\t" - "movq (%1), %%mm0 \n\t" - "movq (%2), %%mm1 \n\t" - "add %4, %1 \n\t" - "movq (%1), %%mm2 \n\t" - "movq 8(%2), %%mm3 \n\t" - "add %4, %1 \n\t" - PAVGBP(%%mm0, %%mm1, %%mm4, %%mm2, %%mm3, %%mm5) - "movq %%mm4, (%3) \n\t" - "add %5, %3 \n\t" - "movq %%mm5, (%3) \n\t" - "add %5, %3 \n\t" - "movq (%1), %%mm0 \n\t" - "movq 16(%2), %%mm1 \n\t" - "add %4, %1 \n\t" - "movq (%1), %%mm2 \n\t" - "movq 24(%2), %%mm3 \n\t" - "add %4, %1 \n\t" - "add $32, %2 \n\t" - PAVGBP(%%mm0, %%mm1, %%mm4, %%mm2, %%mm3, %%mm5) - "movq %%mm4, (%3) \n\t" - "add %5, %3 \n\t" - "movq %%mm5, (%3) \n\t" - "add %5, %3 \n\t" - "subl $4, %0 \n\t" - "jnz 1b \n\t" -#if !HAVE_EBX_AVAILABLE //Note "+bm" and "+mb" are buggy too (with gcc 3.2.2 at least) and cannot be used - :"+m"(h), "+a"(src1), "+c"(src2), "+d"(dst) -#else - :"+b"(h), "+a"(src1), "+c"(src2), "+d"(dst) -#endif - :"S"((x86_reg)src1Stride), "D"((x86_reg)dstStride) - :"memory"); -} - -static void DEF(put, pixels16_x2)(uint8_t *block, const uint8_t *pixels, int line_size, int h) -{ - MOVQ_BFE(mm6); - __asm__ volatile( - "lea (%3, %3), %%"REG_a" \n\t" - ".p2align 3 \n\t" - "1: \n\t" - "movq (%1), %%mm0 \n\t" - "movq 1(%1), %%mm1 \n\t" - "movq (%1, %3), %%mm2 \n\t" - "movq 1(%1, %3), %%mm3 \n\t" - PAVGBP(%%mm0, %%mm1, %%mm4, %%mm2, %%mm3, %%mm5) - "movq %%mm4, (%2) \n\t" - "movq %%mm5, (%2, %3) \n\t" - "movq 8(%1), %%mm0 \n\t" - "movq 9(%1), %%mm1 \n\t" - "movq 8(%1, %3), %%mm2 \n\t" - "movq 9(%1, %3), %%mm3 \n\t" - PAVGBP(%%mm0, %%mm1, %%mm4, %%mm2, %%mm3, %%mm5) - "movq %%mm4, 8(%2) \n\t" - "movq %%mm5, 8(%2, %3) \n\t" - "add %%"REG_a", %1 \n\t" - "add %%"REG_a", %2 \n\t" - "movq (%1), %%mm0 \n\t" - "movq 1(%1), %%mm1 \n\t" - "movq (%1, %3), %%mm2 \n\t" - "movq 1(%1, %3), %%mm3 \n\t" - PAVGBP(%%mm0, %%mm1, %%mm4, %%mm2, %%mm3, %%mm5) - "movq %%mm4, (%2) \n\t" - "movq %%mm5, (%2, %3) \n\t" - "movq 8(%1), %%mm0 \n\t" - "movq 9(%1), %%mm1 \n\t" - "movq 8(%1, %3), %%mm2 \n\t" - "movq 9(%1, %3), %%mm3 \n\t" - PAVGBP(%%mm0, %%mm1, %%mm4, %%mm2, %%mm3, %%mm5) - "movq %%mm4, 8(%2) \n\t" - "movq %%mm5, 8(%2, %3) \n\t" - "add %%"REG_a", %1 \n\t" - "add %%"REG_a", %2 \n\t" - "subl $4, %0 \n\t" - "jnz 1b \n\t" - :"+g"(h), "+S"(pixels), "+D"(block) - :"r"((x86_reg)line_size) - :REG_a, "memory"); -} - -static void av_unused DEF(put, pixels16_l2)(uint8_t *dst, uint8_t *src1, uint8_t *src2, int dstStride, int src1Stride, int h) -{ - MOVQ_BFE(mm6); - __asm__ volatile( - "testl $1, %0 \n\t" - " jz 1f \n\t" - "movq (%1), %%mm0 \n\t" - "movq (%2), %%mm1 \n\t" - "movq 8(%1), %%mm2 \n\t" - "movq 8(%2), %%mm3 \n\t" - "add %4, %1 \n\t" - "add $16, %2 \n\t" - PAVGBP(%%mm0, %%mm1, %%mm4, %%mm2, %%mm3, %%mm5) - "movq %%mm4, (%3) \n\t" - "movq %%mm5, 8(%3) \n\t" - "add %5, %3 \n\t" - "decl %0 \n\t" - ".p2align 3 \n\t" - "1: \n\t" - "movq (%1), %%mm0 \n\t" - "movq (%2), %%mm1 \n\t" - "movq 8(%1), %%mm2 \n\t" - "movq 8(%2), %%mm3 \n\t" - "add %4, %1 \n\t" - PAVGBP(%%mm0, %%mm1, %%mm4, %%mm2, %%mm3, %%mm5) - "movq %%mm4, (%3) \n\t" - "movq %%mm5, 8(%3) \n\t" - "add %5, %3 \n\t" - "movq (%1), %%mm0 \n\t" - "movq 16(%2), %%mm1 \n\t" - "movq 8(%1), %%mm2 \n\t" - "movq 24(%2), %%mm3 \n\t" - "add %4, %1 \n\t" - PAVGBP(%%mm0, %%mm1, %%mm4, %%mm2, %%mm3, %%mm5) - "movq %%mm4, (%3) \n\t" - "movq %%mm5, 8(%3) \n\t" - "add %5, %3 \n\t" - "add $32, %2 \n\t" - "subl $2, %0 \n\t" - "jnz 1b \n\t" -#if !HAVE_EBX_AVAILABLE //Note "+bm" and "+mb" are buggy too (with gcc 3.2.2 at least) and cannot be used - :"+m"(h), "+a"(src1), "+c"(src2), "+d"(dst) -#else - :"+b"(h), "+a"(src1), "+c"(src2), "+d"(dst) -#endif - :"S"((x86_reg)src1Stride), "D"((x86_reg)dstStride) - :"memory"); -} - -static void DEF(put, pixels8_y2)(uint8_t *block, const uint8_t *pixels, int line_size, int h) -{ - MOVQ_BFE(mm6); - __asm__ volatile( - "lea (%3, %3), %%"REG_a" \n\t" - "movq (%1), %%mm0 \n\t" - ".p2align 3 \n\t" - "1: \n\t" - "movq (%1, %3), %%mm1 \n\t" - "movq (%1, %%"REG_a"),%%mm2 \n\t" - PAVGBP(%%mm1, %%mm0, %%mm4, %%mm2, %%mm1, %%mm5) - "movq %%mm4, (%2) \n\t" - "movq %%mm5, (%2, %3) \n\t" - "add %%"REG_a", %1 \n\t" - "add %%"REG_a", %2 \n\t" - "movq (%1, %3), %%mm1 \n\t" - "movq (%1, %%"REG_a"),%%mm0 \n\t" - PAVGBP(%%mm1, %%mm2, %%mm4, %%mm0, %%mm1, %%mm5) - "movq %%mm4, (%2) \n\t" - "movq %%mm5, (%2, %3) \n\t" - "add %%"REG_a", %1 \n\t" - "add %%"REG_a", %2 \n\t" - "subl $4, %0 \n\t" - "jnz 1b \n\t" - :"+g"(h), "+S"(pixels), "+D"(block) - :"r"((x86_reg)line_size) - :REG_a, "memory"); -} - -static void DEF(put, pixels8_xy2)(uint8_t *block, const uint8_t *pixels, int line_size, int h) -{ - MOVQ_ZERO(mm7); - SET_RND(mm6); // =2 for rnd and =1 for no_rnd version - __asm__ volatile( - "movq (%1), %%mm0 \n\t" - "movq 1(%1), %%mm4 \n\t" - "movq %%mm0, %%mm1 \n\t" - "movq %%mm4, %%mm5 \n\t" - "punpcklbw %%mm7, %%mm0 \n\t" - "punpcklbw %%mm7, %%mm4 \n\t" - "punpckhbw %%mm7, %%mm1 \n\t" - "punpckhbw %%mm7, %%mm5 \n\t" - "paddusw %%mm0, %%mm4 \n\t" - "paddusw %%mm1, %%mm5 \n\t" - "xor %%"REG_a", %%"REG_a" \n\t" - "add %3, %1 \n\t" - ".p2align 3 \n\t" - "1: \n\t" - "movq (%1, %%"REG_a"), %%mm0 \n\t" - "movq 1(%1, %%"REG_a"), %%mm2 \n\t" - "movq %%mm0, %%mm1 \n\t" - "movq %%mm2, %%mm3 \n\t" - "punpcklbw %%mm7, %%mm0 \n\t" - "punpcklbw %%mm7, %%mm2 \n\t" - "punpckhbw %%mm7, %%mm1 \n\t" - "punpckhbw %%mm7, %%mm3 \n\t" - "paddusw %%mm2, %%mm0 \n\t" - "paddusw %%mm3, %%mm1 \n\t" - "paddusw %%mm6, %%mm4 \n\t" - "paddusw %%mm6, %%mm5 \n\t" - "paddusw %%mm0, %%mm4 \n\t" - "paddusw %%mm1, %%mm5 \n\t" - "psrlw $2, %%mm4 \n\t" - "psrlw $2, %%mm5 \n\t" - "packuswb %%mm5, %%mm4 \n\t" - "movq %%mm4, (%2, %%"REG_a") \n\t" - "add %3, %%"REG_a" \n\t" - - "movq (%1, %%"REG_a"), %%mm2 \n\t" // 0 <-> 2 1 <-> 3 - "movq 1(%1, %%"REG_a"), %%mm4 \n\t" - "movq %%mm2, %%mm3 \n\t" - "movq %%mm4, %%mm5 \n\t" - "punpcklbw %%mm7, %%mm2 \n\t" - "punpcklbw %%mm7, %%mm4 \n\t" - "punpckhbw %%mm7, %%mm3 \n\t" - "punpckhbw %%mm7, %%mm5 \n\t" - "paddusw %%mm2, %%mm4 \n\t" - "paddusw %%mm3, %%mm5 \n\t" - "paddusw %%mm6, %%mm0 \n\t" - "paddusw %%mm6, %%mm1 \n\t" - "paddusw %%mm4, %%mm0 \n\t" - "paddusw %%mm5, %%mm1 \n\t" - "psrlw $2, %%mm0 \n\t" - "psrlw $2, %%mm1 \n\t" - "packuswb %%mm1, %%mm0 \n\t" - "movq %%mm0, (%2, %%"REG_a") \n\t" - "add %3, %%"REG_a" \n\t" - - "subl $2, %0 \n\t" - "jnz 1b \n\t" - :"+g"(h), "+S"(pixels) - :"D"(block), "r"((x86_reg)line_size) - :REG_a, "memory"); -} - -// avg_pixels -static void av_unused DEF(avg, pixels4)(uint8_t *block, const uint8_t *pixels, int line_size, int h) -{ - MOVQ_BFE(mm6); - JUMPALIGN(); - do { - __asm__ volatile( - "movd %0, %%mm0 \n\t" - "movd %1, %%mm1 \n\t" - OP_AVG(%%mm0, %%mm1, %%mm2, %%mm6) - "movd %%mm2, %0 \n\t" - :"+m"(*block) - :"m"(*pixels) - :"memory"); - pixels += line_size; - block += line_size; - } - while (--h); -} - -// in case more speed is needed - unroling would certainly help -static void DEF(avg, pixels8)(uint8_t *block, const uint8_t *pixels, int line_size, int h) -{ - MOVQ_BFE(mm6); - JUMPALIGN(); - do { - __asm__ volatile( - "movq %0, %%mm0 \n\t" - "movq %1, %%mm1 \n\t" - OP_AVG(%%mm0, %%mm1, %%mm2, %%mm6) - "movq %%mm2, %0 \n\t" - :"+m"(*block) - :"m"(*pixels) - :"memory"); - pixels += line_size; - block += line_size; - } - while (--h); -} - -static void DEF(avg, pixels16)(uint8_t *block, const uint8_t *pixels, int line_size, int h) -{ - MOVQ_BFE(mm6); - JUMPALIGN(); - do { - __asm__ volatile( - "movq %0, %%mm0 \n\t" - "movq %1, %%mm1 \n\t" - OP_AVG(%%mm0, %%mm1, %%mm2, %%mm6) - "movq %%mm2, %0 \n\t" - "movq 8%0, %%mm0 \n\t" - "movq 8%1, %%mm1 \n\t" - OP_AVG(%%mm0, %%mm1, %%mm2, %%mm6) - "movq %%mm2, 8%0 \n\t" - :"+m"(*block) - :"m"(*pixels) - :"memory"); - pixels += line_size; - block += line_size; - } - while (--h); -} - -static void DEF(avg, pixels8_x2)(uint8_t *block, const uint8_t *pixels, int line_size, int h) -{ - MOVQ_BFE(mm6); - JUMPALIGN(); - do { - __asm__ volatile( - "movq %1, %%mm0 \n\t" - "movq 1%1, %%mm1 \n\t" - "movq %0, %%mm3 \n\t" - PAVGB(%%mm0, %%mm1, %%mm2, %%mm6) - OP_AVG(%%mm3, %%mm2, %%mm0, %%mm6) - "movq %%mm0, %0 \n\t" - :"+m"(*block) - :"m"(*pixels) - :"memory"); - pixels += line_size; - block += line_size; - } while (--h); -} - -static av_unused void DEF(avg, pixels8_l2)(uint8_t *dst, uint8_t *src1, uint8_t *src2, int dstStride, int src1Stride, int h) -{ - MOVQ_BFE(mm6); - JUMPALIGN(); - do { - __asm__ volatile( - "movq %1, %%mm0 \n\t" - "movq %2, %%mm1 \n\t" - "movq %0, %%mm3 \n\t" - PAVGB(%%mm0, %%mm1, %%mm2, %%mm6) - OP_AVG(%%mm3, %%mm2, %%mm0, %%mm6) - "movq %%mm0, %0 \n\t" - :"+m"(*dst) - :"m"(*src1), "m"(*src2) - :"memory"); - dst += dstStride; - src1 += src1Stride; - src2 += 8; - } while (--h); -} - -static void DEF(avg, pixels16_x2)(uint8_t *block, const uint8_t *pixels, int line_size, int h) -{ - MOVQ_BFE(mm6); - JUMPALIGN(); - do { - __asm__ volatile( - "movq %1, %%mm0 \n\t" - "movq 1%1, %%mm1 \n\t" - "movq %0, %%mm3 \n\t" - PAVGB(%%mm0, %%mm1, %%mm2, %%mm6) - OP_AVG(%%mm3, %%mm2, %%mm0, %%mm6) - "movq %%mm0, %0 \n\t" - "movq 8%1, %%mm0 \n\t" - "movq 9%1, %%mm1 \n\t" - "movq 8%0, %%mm3 \n\t" - PAVGB(%%mm0, %%mm1, %%mm2, %%mm6) - OP_AVG(%%mm3, %%mm2, %%mm0, %%mm6) - "movq %%mm0, 8%0 \n\t" - :"+m"(*block) - :"m"(*pixels) - :"memory"); - pixels += line_size; - block += line_size; - } while (--h); -} - -static av_unused void DEF(avg, pixels16_l2)(uint8_t *dst, uint8_t *src1, uint8_t *src2, int dstStride, int src1Stride, int h) -{ - MOVQ_BFE(mm6); - JUMPALIGN(); - do { - __asm__ volatile( - "movq %1, %%mm0 \n\t" - "movq %2, %%mm1 \n\t" - "movq %0, %%mm3 \n\t" - PAVGB(%%mm0, %%mm1, %%mm2, %%mm6) - OP_AVG(%%mm3, %%mm2, %%mm0, %%mm6) - "movq %%mm0, %0 \n\t" - "movq 8%1, %%mm0 \n\t" - "movq 8%2, %%mm1 \n\t" - "movq 8%0, %%mm3 \n\t" - PAVGB(%%mm0, %%mm1, %%mm2, %%mm6) - OP_AVG(%%mm3, %%mm2, %%mm0, %%mm6) - "movq %%mm0, 8%0 \n\t" - :"+m"(*dst) - :"m"(*src1), "m"(*src2) - :"memory"); - dst += dstStride; - src1 += src1Stride; - src2 += 16; - } while (--h); -} - -static void DEF(avg, pixels8_y2)(uint8_t *block, const uint8_t *pixels, int line_size, int h) -{ - MOVQ_BFE(mm6); - __asm__ volatile( - "lea (%3, %3), %%"REG_a" \n\t" - "movq (%1), %%mm0 \n\t" - ".p2align 3 \n\t" - "1: \n\t" - "movq (%1, %3), %%mm1 \n\t" - "movq (%1, %%"REG_a"), %%mm2 \n\t" - PAVGBP(%%mm1, %%mm0, %%mm4, %%mm2, %%mm1, %%mm5) - "movq (%2), %%mm3 \n\t" - OP_AVG(%%mm3, %%mm4, %%mm0, %%mm6) - "movq (%2, %3), %%mm3 \n\t" - OP_AVG(%%mm3, %%mm5, %%mm1, %%mm6) - "movq %%mm0, (%2) \n\t" - "movq %%mm1, (%2, %3) \n\t" - "add %%"REG_a", %1 \n\t" - "add %%"REG_a", %2 \n\t" - - "movq (%1, %3), %%mm1 \n\t" - "movq (%1, %%"REG_a"), %%mm0 \n\t" - PAVGBP(%%mm1, %%mm2, %%mm4, %%mm0, %%mm1, %%mm5) - "movq (%2), %%mm3 \n\t" - OP_AVG(%%mm3, %%mm4, %%mm2, %%mm6) - "movq (%2, %3), %%mm3 \n\t" - OP_AVG(%%mm3, %%mm5, %%mm1, %%mm6) - "movq %%mm2, (%2) \n\t" - "movq %%mm1, (%2, %3) \n\t" - "add %%"REG_a", %1 \n\t" - "add %%"REG_a", %2 \n\t" - - "subl $4, %0 \n\t" - "jnz 1b \n\t" - :"+g"(h), "+S"(pixels), "+D"(block) - :"r"((x86_reg)line_size) - :REG_a, "memory"); -} - -// this routine is 'slightly' suboptimal but mostly unused -static void DEF(avg, pixels8_xy2)(uint8_t *block, const uint8_t *pixels, int line_size, int h) -{ - MOVQ_ZERO(mm7); - SET_RND(mm6); // =2 for rnd and =1 for no_rnd version - __asm__ volatile( - "movq (%1), %%mm0 \n\t" - "movq 1(%1), %%mm4 \n\t" - "movq %%mm0, %%mm1 \n\t" - "movq %%mm4, %%mm5 \n\t" - "punpcklbw %%mm7, %%mm0 \n\t" - "punpcklbw %%mm7, %%mm4 \n\t" - "punpckhbw %%mm7, %%mm1 \n\t" - "punpckhbw %%mm7, %%mm5 \n\t" - "paddusw %%mm0, %%mm4 \n\t" - "paddusw %%mm1, %%mm5 \n\t" - "xor %%"REG_a", %%"REG_a" \n\t" - "add %3, %1 \n\t" - ".p2align 3 \n\t" - "1: \n\t" - "movq (%1, %%"REG_a"), %%mm0 \n\t" - "movq 1(%1, %%"REG_a"), %%mm2 \n\t" - "movq %%mm0, %%mm1 \n\t" - "movq %%mm2, %%mm3 \n\t" - "punpcklbw %%mm7, %%mm0 \n\t" - "punpcklbw %%mm7, %%mm2 \n\t" - "punpckhbw %%mm7, %%mm1 \n\t" - "punpckhbw %%mm7, %%mm3 \n\t" - "paddusw %%mm2, %%mm0 \n\t" - "paddusw %%mm3, %%mm1 \n\t" - "paddusw %%mm6, %%mm4 \n\t" - "paddusw %%mm6, %%mm5 \n\t" - "paddusw %%mm0, %%mm4 \n\t" - "paddusw %%mm1, %%mm5 \n\t" - "psrlw $2, %%mm4 \n\t" - "psrlw $2, %%mm5 \n\t" - "movq (%2, %%"REG_a"), %%mm3 \n\t" - "packuswb %%mm5, %%mm4 \n\t" - "pcmpeqd %%mm2, %%mm2 \n\t" - "paddb %%mm2, %%mm2 \n\t" - OP_AVG(%%mm3, %%mm4, %%mm5, %%mm2) - "movq %%mm5, (%2, %%"REG_a") \n\t" - "add %3, %%"REG_a" \n\t" - - "movq (%1, %%"REG_a"), %%mm2 \n\t" // 0 <-> 2 1 <-> 3 - "movq 1(%1, %%"REG_a"), %%mm4 \n\t" - "movq %%mm2, %%mm3 \n\t" - "movq %%mm4, %%mm5 \n\t" - "punpcklbw %%mm7, %%mm2 \n\t" - "punpcklbw %%mm7, %%mm4 \n\t" - "punpckhbw %%mm7, %%mm3 \n\t" - "punpckhbw %%mm7, %%mm5 \n\t" - "paddusw %%mm2, %%mm4 \n\t" - "paddusw %%mm3, %%mm5 \n\t" - "paddusw %%mm6, %%mm0 \n\t" - "paddusw %%mm6, %%mm1 \n\t" - "paddusw %%mm4, %%mm0 \n\t" - "paddusw %%mm5, %%mm1 \n\t" - "psrlw $2, %%mm0 \n\t" - "psrlw $2, %%mm1 \n\t" - "movq (%2, %%"REG_a"), %%mm3 \n\t" - "packuswb %%mm1, %%mm0 \n\t" - "pcmpeqd %%mm2, %%mm2 \n\t" - "paddb %%mm2, %%mm2 \n\t" - OP_AVG(%%mm3, %%mm0, %%mm1, %%mm2) - "movq %%mm1, (%2, %%"REG_a") \n\t" - "add %3, %%"REG_a" \n\t" - - "subl $2, %0 \n\t" - "jnz 1b \n\t" - :"+g"(h), "+S"(pixels) - :"D"(block), "r"((x86_reg)line_size) - :REG_a, "memory"); -} - -//FIXME optimize -static void DEF(put, pixels16_y2)(uint8_t *block, const uint8_t *pixels, int line_size, int h){ - DEF(put, pixels8_y2)(block , pixels , line_size, h); - DEF(put, pixels8_y2)(block+8, pixels+8, line_size, h); -} - -static void DEF(put, pixels16_xy2)(uint8_t *block, const uint8_t *pixels, int line_size, int h){ - DEF(put, pixels8_xy2)(block , pixels , line_size, h); - DEF(put, pixels8_xy2)(block+8, pixels+8, line_size, h); -} - -static void DEF(avg, pixels16_y2)(uint8_t *block, const uint8_t *pixels, int line_size, int h){ - DEF(avg, pixels8_y2)(block , pixels , line_size, h); - DEF(avg, pixels8_y2)(block+8, pixels+8, line_size, h); -} - -static void DEF(avg, pixels16_xy2)(uint8_t *block, const uint8_t *pixels, int line_size, int h){ - DEF(avg, pixels8_xy2)(block , pixels , line_size, h); - DEF(avg, pixels8_xy2)(block+8, pixels+8, line_size, h); -} diff --git a/libavcodec/x86/dsputil_qns_template.c b/libavcodec/x86/dsputil_qns_template.c new file mode 100644 index 0000000000..20a40a175e --- /dev/null +++ b/libavcodec/x86/dsputil_qns_template.c @@ -0,0 +1,101 @@ +/* + * DSP utils : QNS functions are compiled 3 times for mmx/3dnow/ssse3 + * Copyright (c) 2004 Michael Niedermayer + * + * MMX optimization by Michael Niedermayer <michaelni@gmx.at> + * 3DNow! and SSSE3 optimization by Zuxy Meng <zuxy.meng@gmail.com> + * + * This file is part of Libav. + * + * Libav is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * Libav is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with Libav; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#define MAX_ABS (512 >> (SCALE_OFFSET>0 ? SCALE_OFFSET : 0)) + +static int DEF(try_8x8basis)(int16_t rem[64], int16_t weight[64], int16_t basis[64], int scale) +{ + x86_reg i=0; + + assert(FFABS(scale) < MAX_ABS); + scale<<= 16 + SCALE_OFFSET - BASIS_SHIFT + RECON_SHIFT; + + SET_RND(mm6); + __asm__ volatile( + "pxor %%mm7, %%mm7 \n\t" + "movd %4, %%mm5 \n\t" + "punpcklwd %%mm5, %%mm5 \n\t" + "punpcklwd %%mm5, %%mm5 \n\t" + ".p2align 4 \n\t" + "1: \n\t" + "movq (%1, %0), %%mm0 \n\t" + "movq 8(%1, %0), %%mm1 \n\t" + PMULHRW(%%mm0, %%mm1, %%mm5, %%mm6) + "paddw (%2, %0), %%mm0 \n\t" + "paddw 8(%2, %0), %%mm1 \n\t" + "psraw $6, %%mm0 \n\t" + "psraw $6, %%mm1 \n\t" + "pmullw (%3, %0), %%mm0 \n\t" + "pmullw 8(%3, %0), %%mm1 \n\t" + "pmaddwd %%mm0, %%mm0 \n\t" + "pmaddwd %%mm1, %%mm1 \n\t" + "paddd %%mm1, %%mm0 \n\t" + "psrld $4, %%mm0 \n\t" + "paddd %%mm0, %%mm7 \n\t" + "add $16, %0 \n\t" + "cmp $128, %0 \n\t" //FIXME optimize & bench + " jb 1b \n\t" + PHADDD(%%mm7, %%mm6) + "psrld $2, %%mm7 \n\t" + "movd %%mm7, %0 \n\t" + + : "+r" (i) + : "r"(basis), "r"(rem), "r"(weight), "g"(scale) + ); + return i; +} + +static void DEF(add_8x8basis)(int16_t rem[64], int16_t basis[64], int scale) +{ + x86_reg i=0; + + if(FFABS(scale) < MAX_ABS){ + scale<<= 16 + SCALE_OFFSET - BASIS_SHIFT + RECON_SHIFT; + SET_RND(mm6); + __asm__ volatile( + "movd %3, %%mm5 \n\t" + "punpcklwd %%mm5, %%mm5 \n\t" + "punpcklwd %%mm5, %%mm5 \n\t" + ".p2align 4 \n\t" + "1: \n\t" + "movq (%1, %0), %%mm0 \n\t" + "movq 8(%1, %0), %%mm1 \n\t" + PMULHRW(%%mm0, %%mm1, %%mm5, %%mm6) + "paddw (%2, %0), %%mm0 \n\t" + "paddw 8(%2, %0), %%mm1 \n\t" + "movq %%mm0, (%2, %0) \n\t" + "movq %%mm1, 8(%2, %0) \n\t" + "add $16, %0 \n\t" + "cmp $128, %0 \n\t" // FIXME optimize & bench + " jb 1b \n\t" + + : "+r" (i) + : "r"(basis), "r"(rem), "g"(scale) + ); + }else{ + for(i=0; i<8*8; i++){ + rem[i] += (basis[i]*scale + (1<<(BASIS_SHIFT - RECON_SHIFT-1)))>>(BASIS_SHIFT - RECON_SHIFT); + } + } +} diff --git a/libavcodec/x86/dsputil_rnd_template.c b/libavcodec/x86/dsputil_rnd_template.c new file mode 100644 index 0000000000..34a2c0bca8 --- /dev/null +++ b/libavcodec/x86/dsputil_rnd_template.c @@ -0,0 +1,590 @@ +/* + * DSP utils mmx functions are compiled twice for rnd/no_rnd + * Copyright (c) 2000, 2001 Fabrice Bellard + * Copyright (c) 2003-2004 Michael Niedermayer <michaelni@gmx.at> + * + * MMX optimization by Nick Kurshev <nickols_k@mail.ru> + * mostly rewritten by Michael Niedermayer <michaelni@gmx.at> + * and improved by Zdenek Kabelac <kabi@users.sf.net> + * + * This file is part of Libav. + * + * Libav is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * Libav is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with Libav; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +// put_pixels +static void DEF(put, pixels8_x2)(uint8_t *block, const uint8_t *pixels, int line_size, int h) +{ + MOVQ_BFE(mm6); + __asm__ volatile( + "lea (%3, %3), %%"REG_a" \n\t" + ".p2align 3 \n\t" + "1: \n\t" + "movq (%1), %%mm0 \n\t" + "movq 1(%1), %%mm1 \n\t" + "movq (%1, %3), %%mm2 \n\t" + "movq 1(%1, %3), %%mm3 \n\t" + PAVGBP(%%mm0, %%mm1, %%mm4, %%mm2, %%mm3, %%mm5) + "movq %%mm4, (%2) \n\t" + "movq %%mm5, (%2, %3) \n\t" + "add %%"REG_a", %1 \n\t" + "add %%"REG_a", %2 \n\t" + "movq (%1), %%mm0 \n\t" + "movq 1(%1), %%mm1 \n\t" + "movq (%1, %3), %%mm2 \n\t" + "movq 1(%1, %3), %%mm3 \n\t" + PAVGBP(%%mm0, %%mm1, %%mm4, %%mm2, %%mm3, %%mm5) + "movq %%mm4, (%2) \n\t" + "movq %%mm5, (%2, %3) \n\t" + "add %%"REG_a", %1 \n\t" + "add %%"REG_a", %2 \n\t" + "subl $4, %0 \n\t" + "jnz 1b \n\t" + :"+g"(h), "+S"(pixels), "+D"(block) + :"r"((x86_reg)line_size) + :REG_a, "memory"); +} + +static void av_unused DEF(put, pixels8_l2)(uint8_t *dst, uint8_t *src1, uint8_t *src2, int dstStride, int src1Stride, int h) +{ + MOVQ_BFE(mm6); + __asm__ volatile( + "testl $1, %0 \n\t" + " jz 1f \n\t" + "movq (%1), %%mm0 \n\t" + "movq (%2), %%mm1 \n\t" + "add %4, %1 \n\t" + "add $8, %2 \n\t" + PAVGB(%%mm0, %%mm1, %%mm4, %%mm6) + "movq %%mm4, (%3) \n\t" + "add %5, %3 \n\t" + "decl %0 \n\t" + ".p2align 3 \n\t" + "1: \n\t" + "movq (%1), %%mm0 \n\t" + "movq (%2), %%mm1 \n\t" + "add %4, %1 \n\t" + "movq (%1), %%mm2 \n\t" + "movq 8(%2), %%mm3 \n\t" + "add %4, %1 \n\t" + PAVGBP(%%mm0, %%mm1, %%mm4, %%mm2, %%mm3, %%mm5) + "movq %%mm4, (%3) \n\t" + "add %5, %3 \n\t" + "movq %%mm5, (%3) \n\t" + "add %5, %3 \n\t" + "movq (%1), %%mm0 \n\t" + "movq 16(%2), %%mm1 \n\t" + "add %4, %1 \n\t" + "movq (%1), %%mm2 \n\t" + "movq 24(%2), %%mm3 \n\t" + "add %4, %1 \n\t" + "add $32, %2 \n\t" + PAVGBP(%%mm0, %%mm1, %%mm4, %%mm2, %%mm3, %%mm5) + "movq %%mm4, (%3) \n\t" + "add %5, %3 \n\t" + "movq %%mm5, (%3) \n\t" + "add %5, %3 \n\t" + "subl $4, %0 \n\t" + "jnz 1b \n\t" +#if !HAVE_EBX_AVAILABLE //Note "+bm" and "+mb" are buggy too (with gcc 3.2.2 at least) and cannot be used + :"+m"(h), "+a"(src1), "+c"(src2), "+d"(dst) +#else + :"+b"(h), "+a"(src1), "+c"(src2), "+d"(dst) +#endif + :"S"((x86_reg)src1Stride), "D"((x86_reg)dstStride) + :"memory"); +} + +static void DEF(put, pixels16_x2)(uint8_t *block, const uint8_t *pixels, int line_size, int h) +{ + MOVQ_BFE(mm6); + __asm__ volatile( + "lea (%3, %3), %%"REG_a" \n\t" + ".p2align 3 \n\t" + "1: \n\t" + "movq (%1), %%mm0 \n\t" + "movq 1(%1), %%mm1 \n\t" + "movq (%1, %3), %%mm2 \n\t" + "movq 1(%1, %3), %%mm3 \n\t" + PAVGBP(%%mm0, %%mm1, %%mm4, %%mm2, %%mm3, %%mm5) + "movq %%mm4, (%2) \n\t" + "movq %%mm5, (%2, %3) \n\t" + "movq 8(%1), %%mm0 \n\t" + "movq 9(%1), %%mm1 \n\t" + "movq 8(%1, %3), %%mm2 \n\t" + "movq 9(%1, %3), %%mm3 \n\t" + PAVGBP(%%mm0, %%mm1, %%mm4, %%mm2, %%mm3, %%mm5) + "movq %%mm4, 8(%2) \n\t" + "movq %%mm5, 8(%2, %3) \n\t" + "add %%"REG_a", %1 \n\t" + "add %%"REG_a", %2 \n\t" + "movq (%1), %%mm0 \n\t" + "movq 1(%1), %%mm1 \n\t" + "movq (%1, %3), %%mm2 \n\t" + "movq 1(%1, %3), %%mm3 \n\t" + PAVGBP(%%mm0, %%mm1, %%mm4, %%mm2, %%mm3, %%mm5) + "movq %%mm4, (%2) \n\t" + "movq %%mm5, (%2, %3) \n\t" + "movq 8(%1), %%mm0 \n\t" + "movq 9(%1), %%mm1 \n\t" + "movq 8(%1, %3), %%mm2 \n\t" + "movq 9(%1, %3), %%mm3 \n\t" + PAVGBP(%%mm0, %%mm1, %%mm4, %%mm2, %%mm3, %%mm5) + "movq %%mm4, 8(%2) \n\t" + "movq %%mm5, 8(%2, %3) \n\t" + "add %%"REG_a", %1 \n\t" + "add %%"REG_a", %2 \n\t" + "subl $4, %0 \n\t" + "jnz 1b \n\t" + :"+g"(h), "+S"(pixels), "+D"(block) + :"r"((x86_reg)line_size) + :REG_a, "memory"); +} + +static void av_unused DEF(put, pixels16_l2)(uint8_t *dst, uint8_t *src1, uint8_t *src2, int dstStride, int src1Stride, int h) +{ + MOVQ_BFE(mm6); + __asm__ volatile( + "testl $1, %0 \n\t" + " jz 1f \n\t" + "movq (%1), %%mm0 \n\t" + "movq (%2), %%mm1 \n\t" + "movq 8(%1), %%mm2 \n\t" + "movq 8(%2), %%mm3 \n\t" + "add %4, %1 \n\t" + "add $16, %2 \n\t" + PAVGBP(%%mm0, %%mm1, %%mm4, %%mm2, %%mm3, %%mm5) + "movq %%mm4, (%3) \n\t" + "movq %%mm5, 8(%3) \n\t" + "add %5, %3 \n\t" + "decl %0 \n\t" + ".p2align 3 \n\t" + "1: \n\t" + "movq (%1), %%mm0 \n\t" + "movq (%2), %%mm1 \n\t" + "movq 8(%1), %%mm2 \n\t" + "movq 8(%2), %%mm3 \n\t" + "add %4, %1 \n\t" + PAVGBP(%%mm0, %%mm1, %%mm4, %%mm2, %%mm3, %%mm5) + "movq %%mm4, (%3) \n\t" + "movq %%mm5, 8(%3) \n\t" + "add %5, %3 \n\t" + "movq (%1), %%mm0 \n\t" + "movq 16(%2), %%mm1 \n\t" + "movq 8(%1), %%mm2 \n\t" + "movq 24(%2), %%mm3 \n\t" + "add %4, %1 \n\t" + PAVGBP(%%mm0, %%mm1, %%mm4, %%mm2, %%mm3, %%mm5) + "movq %%mm4, (%3) \n\t" + "movq %%mm5, 8(%3) \n\t" + "add %5, %3 \n\t" + "add $32, %2 \n\t" + "subl $2, %0 \n\t" + "jnz 1b \n\t" +#if !HAVE_EBX_AVAILABLE //Note "+bm" and "+mb" are buggy too (with gcc 3.2.2 at least) and cannot be used + :"+m"(h), "+a"(src1), "+c"(src2), "+d"(dst) +#else + :"+b"(h), "+a"(src1), "+c"(src2), "+d"(dst) +#endif + :"S"((x86_reg)src1Stride), "D"((x86_reg)dstStride) + :"memory"); +} + +static void DEF(put, pixels8_y2)(uint8_t *block, const uint8_t *pixels, int line_size, int h) +{ + MOVQ_BFE(mm6); + __asm__ volatile( + "lea (%3, %3), %%"REG_a" \n\t" + "movq (%1), %%mm0 \n\t" + ".p2align 3 \n\t" + "1: \n\t" + "movq (%1, %3), %%mm1 \n\t" + "movq (%1, %%"REG_a"),%%mm2 \n\t" + PAVGBP(%%mm1, %%mm0, %%mm4, %%mm2, %%mm1, %%mm5) + "movq %%mm4, (%2) \n\t" + "movq %%mm5, (%2, %3) \n\t" + "add %%"REG_a", %1 \n\t" + "add %%"REG_a", %2 \n\t" + "movq (%1, %3), %%mm1 \n\t" + "movq (%1, %%"REG_a"),%%mm0 \n\t" + PAVGBP(%%mm1, %%mm2, %%mm4, %%mm0, %%mm1, %%mm5) + "movq %%mm4, (%2) \n\t" + "movq %%mm5, (%2, %3) \n\t" + "add %%"REG_a", %1 \n\t" + "add %%"REG_a", %2 \n\t" + "subl $4, %0 \n\t" + "jnz 1b \n\t" + :"+g"(h), "+S"(pixels), "+D"(block) + :"r"((x86_reg)line_size) + :REG_a, "memory"); +} + +static void DEF(put, pixels8_xy2)(uint8_t *block, const uint8_t *pixels, int line_size, int h) +{ + MOVQ_ZERO(mm7); + SET_RND(mm6); // =2 for rnd and =1 for no_rnd version + __asm__ volatile( + "movq (%1), %%mm0 \n\t" + "movq 1(%1), %%mm4 \n\t" + "movq %%mm0, %%mm1 \n\t" + "movq %%mm4, %%mm5 \n\t" + "punpcklbw %%mm7, %%mm0 \n\t" + "punpcklbw %%mm7, %%mm4 \n\t" + "punpckhbw %%mm7, %%mm1 \n\t" + "punpckhbw %%mm7, %%mm5 \n\t" + "paddusw %%mm0, %%mm4 \n\t" + "paddusw %%mm1, %%mm5 \n\t" + "xor %%"REG_a", %%"REG_a" \n\t" + "add %3, %1 \n\t" + ".p2align 3 \n\t" + "1: \n\t" + "movq (%1, %%"REG_a"), %%mm0 \n\t" + "movq 1(%1, %%"REG_a"), %%mm2 \n\t" + "movq %%mm0, %%mm1 \n\t" + "movq %%mm2, %%mm3 \n\t" + "punpcklbw %%mm7, %%mm0 \n\t" + "punpcklbw %%mm7, %%mm2 \n\t" + "punpckhbw %%mm7, %%mm1 \n\t" + "punpckhbw %%mm7, %%mm3 \n\t" + "paddusw %%mm2, %%mm0 \n\t" + "paddusw %%mm3, %%mm1 \n\t" + "paddusw %%mm6, %%mm4 \n\t" + "paddusw %%mm6, %%mm5 \n\t" + "paddusw %%mm0, %%mm4 \n\t" + "paddusw %%mm1, %%mm5 \n\t" + "psrlw $2, %%mm4 \n\t" + "psrlw $2, %%mm5 \n\t" + "packuswb %%mm5, %%mm4 \n\t" + "movq %%mm4, (%2, %%"REG_a") \n\t" + "add %3, %%"REG_a" \n\t" + + "movq (%1, %%"REG_a"), %%mm2 \n\t" // 0 <-> 2 1 <-> 3 + "movq 1(%1, %%"REG_a"), %%mm4 \n\t" + "movq %%mm2, %%mm3 \n\t" + "movq %%mm4, %%mm5 \n\t" + "punpcklbw %%mm7, %%mm2 \n\t" + "punpcklbw %%mm7, %%mm4 \n\t" + "punpckhbw %%mm7, %%mm3 \n\t" + "punpckhbw %%mm7, %%mm5 \n\t" + "paddusw %%mm2, %%mm4 \n\t" + "paddusw %%mm3, %%mm5 \n\t" + "paddusw %%mm6, %%mm0 \n\t" + "paddusw %%mm6, %%mm1 \n\t" + "paddusw %%mm4, %%mm0 \n\t" + "paddusw %%mm5, %%mm1 \n\t" + "psrlw $2, %%mm0 \n\t" + "psrlw $2, %%mm1 \n\t" + "packuswb %%mm1, %%mm0 \n\t" + "movq %%mm0, (%2, %%"REG_a") \n\t" + "add %3, %%"REG_a" \n\t" + + "subl $2, %0 \n\t" + "jnz 1b \n\t" + :"+g"(h), "+S"(pixels) + :"D"(block), "r"((x86_reg)line_size) + :REG_a, "memory"); +} + +// avg_pixels +static void av_unused DEF(avg, pixels4)(uint8_t *block, const uint8_t *pixels, int line_size, int h) +{ + MOVQ_BFE(mm6); + JUMPALIGN(); + do { + __asm__ volatile( + "movd %0, %%mm0 \n\t" + "movd %1, %%mm1 \n\t" + OP_AVG(%%mm0, %%mm1, %%mm2, %%mm6) + "movd %%mm2, %0 \n\t" + :"+m"(*block) + :"m"(*pixels) + :"memory"); + pixels += line_size; + block += line_size; + } + while (--h); +} + +// in case more speed is needed - unroling would certainly help +static void DEF(avg, pixels8)(uint8_t *block, const uint8_t *pixels, int line_size, int h) +{ + MOVQ_BFE(mm6); + JUMPALIGN(); + do { + __asm__ volatile( + "movq %0, %%mm0 \n\t" + "movq %1, %%mm1 \n\t" + OP_AVG(%%mm0, %%mm1, %%mm2, %%mm6) + "movq %%mm2, %0 \n\t" + :"+m"(*block) + :"m"(*pixels) + :"memory"); + pixels += line_size; + block += line_size; + } + while (--h); +} + +static void DEF(avg, pixels16)(uint8_t *block, const uint8_t *pixels, int line_size, int h) +{ + MOVQ_BFE(mm6); + JUMPALIGN(); + do { + __asm__ volatile( + "movq %0, %%mm0 \n\t" + "movq %1, %%mm1 \n\t" + OP_AVG(%%mm0, %%mm1, %%mm2, %%mm6) + "movq %%mm2, %0 \n\t" + "movq 8%0, %%mm0 \n\t" + "movq 8%1, %%mm1 \n\t" + OP_AVG(%%mm0, %%mm1, %%mm2, %%mm6) + "movq %%mm2, 8%0 \n\t" + :"+m"(*block) + :"m"(*pixels) + :"memory"); + pixels += line_size; + block += line_size; + } + while (--h); +} + +static void DEF(avg, pixels8_x2)(uint8_t *block, const uint8_t *pixels, int line_size, int h) +{ + MOVQ_BFE(mm6); + JUMPALIGN(); + do { + __asm__ volatile( + "movq %1, %%mm0 \n\t" + "movq 1%1, %%mm1 \n\t" + "movq %0, %%mm3 \n\t" + PAVGB(%%mm0, %%mm1, %%mm2, %%mm6) + OP_AVG(%%mm3, %%mm2, %%mm0, %%mm6) + "movq %%mm0, %0 \n\t" + :"+m"(*block) + :"m"(*pixels) + :"memory"); + pixels += line_size; + block += line_size; + } while (--h); +} + +static av_unused void DEF(avg, pixels8_l2)(uint8_t *dst, uint8_t *src1, uint8_t *src2, int dstStride, int src1Stride, int h) +{ + MOVQ_BFE(mm6); + JUMPALIGN(); + do { + __asm__ volatile( + "movq %1, %%mm0 \n\t" + "movq %2, %%mm1 \n\t" + "movq %0, %%mm3 \n\t" + PAVGB(%%mm0, %%mm1, %%mm2, %%mm6) + OP_AVG(%%mm3, %%mm2, %%mm0, %%mm6) + "movq %%mm0, %0 \n\t" + :"+m"(*dst) + :"m"(*src1), "m"(*src2) + :"memory"); + dst += dstStride; + src1 += src1Stride; + src2 += 8; + } while (--h); +} + +static void DEF(avg, pixels16_x2)(uint8_t *block, const uint8_t *pixels, int line_size, int h) +{ + MOVQ_BFE(mm6); + JUMPALIGN(); + do { + __asm__ volatile( + "movq %1, %%mm0 \n\t" + "movq 1%1, %%mm1 \n\t" + "movq %0, %%mm3 \n\t" + PAVGB(%%mm0, %%mm1, %%mm2, %%mm6) + OP_AVG(%%mm3, %%mm2, %%mm0, %%mm6) + "movq %%mm0, %0 \n\t" + "movq 8%1, %%mm0 \n\t" + "movq 9%1, %%mm1 \n\t" + "movq 8%0, %%mm3 \n\t" + PAVGB(%%mm0, %%mm1, %%mm2, %%mm6) + OP_AVG(%%mm3, %%mm2, %%mm0, %%mm6) + "movq %%mm0, 8%0 \n\t" + :"+m"(*block) + :"m"(*pixels) + :"memory"); + pixels += line_size; + block += line_size; + } while (--h); +} + +static av_unused void DEF(avg, pixels16_l2)(uint8_t *dst, uint8_t *src1, uint8_t *src2, int dstStride, int src1Stride, int h) +{ + MOVQ_BFE(mm6); + JUMPALIGN(); + do { + __asm__ volatile( + "movq %1, %%mm0 \n\t" + "movq %2, %%mm1 \n\t" + "movq %0, %%mm3 \n\t" + PAVGB(%%mm0, %%mm1, %%mm2, %%mm6) + OP_AVG(%%mm3, %%mm2, %%mm0, %%mm6) + "movq %%mm0, %0 \n\t" + "movq 8%1, %%mm0 \n\t" + "movq 8%2, %%mm1 \n\t" + "movq 8%0, %%mm3 \n\t" + PAVGB(%%mm0, %%mm1, %%mm2, %%mm6) + OP_AVG(%%mm3, %%mm2, %%mm0, %%mm6) + "movq %%mm0, 8%0 \n\t" + :"+m"(*dst) + :"m"(*src1), "m"(*src2) + :"memory"); + dst += dstStride; + src1 += src1Stride; + src2 += 16; + } while (--h); +} + +static void DEF(avg, pixels8_y2)(uint8_t *block, const uint8_t *pixels, int line_size, int h) +{ + MOVQ_BFE(mm6); + __asm__ volatile( + "lea (%3, %3), %%"REG_a" \n\t" + "movq (%1), %%mm0 \n\t" + ".p2align 3 \n\t" + "1: \n\t" + "movq (%1, %3), %%mm1 \n\t" + "movq (%1, %%"REG_a"), %%mm2 \n\t" + PAVGBP(%%mm1, %%mm0, %%mm4, %%mm2, %%mm1, %%mm5) + "movq (%2), %%mm3 \n\t" + OP_AVG(%%mm3, %%mm4, %%mm0, %%mm6) + "movq (%2, %3), %%mm3 \n\t" + OP_AVG(%%mm3, %%mm5, %%mm1, %%mm6) + "movq %%mm0, (%2) \n\t" + "movq %%mm1, (%2, %3) \n\t" + "add %%"REG_a", %1 \n\t" + "add %%"REG_a", %2 \n\t" + + "movq (%1, %3), %%mm1 \n\t" + "movq (%1, %%"REG_a"), %%mm0 \n\t" + PAVGBP(%%mm1, %%mm2, %%mm4, %%mm0, %%mm1, %%mm5) + "movq (%2), %%mm3 \n\t" + OP_AVG(%%mm3, %%mm4, %%mm2, %%mm6) + "movq (%2, %3), %%mm3 \n\t" + OP_AVG(%%mm3, %%mm5, %%mm1, %%mm6) + "movq %%mm2, (%2) \n\t" + "movq %%mm1, (%2, %3) \n\t" + "add %%"REG_a", %1 \n\t" + "add %%"REG_a", %2 \n\t" + + "subl $4, %0 \n\t" + "jnz 1b \n\t" + :"+g"(h), "+S"(pixels), "+D"(block) + :"r"((x86_reg)line_size) + :REG_a, "memory"); +} + +// this routine is 'slightly' suboptimal but mostly unused +static void DEF(avg, pixels8_xy2)(uint8_t *block, const uint8_t *pixels, int line_size, int h) +{ + MOVQ_ZERO(mm7); + SET_RND(mm6); // =2 for rnd and =1 for no_rnd version + __asm__ volatile( + "movq (%1), %%mm0 \n\t" + "movq 1(%1), %%mm4 \n\t" + "movq %%mm0, %%mm1 \n\t" + "movq %%mm4, %%mm5 \n\t" + "punpcklbw %%mm7, %%mm0 \n\t" + "punpcklbw %%mm7, %%mm4 \n\t" + "punpckhbw %%mm7, %%mm1 \n\t" + "punpckhbw %%mm7, %%mm5 \n\t" + "paddusw %%mm0, %%mm4 \n\t" + "paddusw %%mm1, %%mm5 \n\t" + "xor %%"REG_a", %%"REG_a" \n\t" + "add %3, %1 \n\t" + ".p2align 3 \n\t" + "1: \n\t" + "movq (%1, %%"REG_a"), %%mm0 \n\t" + "movq 1(%1, %%"REG_a"), %%mm2 \n\t" + "movq %%mm0, %%mm1 \n\t" + "movq %%mm2, %%mm3 \n\t" + "punpcklbw %%mm7, %%mm0 \n\t" + "punpcklbw %%mm7, %%mm2 \n\t" + "punpckhbw %%mm7, %%mm1 \n\t" + "punpckhbw %%mm7, %%mm3 \n\t" + "paddusw %%mm2, %%mm0 \n\t" + "paddusw %%mm3, %%mm1 \n\t" + "paddusw %%mm6, %%mm4 \n\t" + "paddusw %%mm6, %%mm5 \n\t" + "paddusw %%mm0, %%mm4 \n\t" + "paddusw %%mm1, %%mm5 \n\t" + "psrlw $2, %%mm4 \n\t" + "psrlw $2, %%mm5 \n\t" + "movq (%2, %%"REG_a"), %%mm3 \n\t" + "packuswb %%mm5, %%mm4 \n\t" + "pcmpeqd %%mm2, %%mm2 \n\t" + "paddb %%mm2, %%mm2 \n\t" + OP_AVG(%%mm3, %%mm4, %%mm5, %%mm2) + "movq %%mm5, (%2, %%"REG_a") \n\t" + "add %3, %%"REG_a" \n\t" + + "movq (%1, %%"REG_a"), %%mm2 \n\t" // 0 <-> 2 1 <-> 3 + "movq 1(%1, %%"REG_a"), %%mm4 \n\t" + "movq %%mm2, %%mm3 \n\t" + "movq %%mm4, %%mm5 \n\t" + "punpcklbw %%mm7, %%mm2 \n\t" + "punpcklbw %%mm7, %%mm4 \n\t" + "punpckhbw %%mm7, %%mm3 \n\t" + "punpckhbw %%mm7, %%mm5 \n\t" + "paddusw %%mm2, %%mm4 \n\t" + "paddusw %%mm3, %%mm5 \n\t" + "paddusw %%mm6, %%mm0 \n\t" + "paddusw %%mm6, %%mm1 \n\t" + "paddusw %%mm4, %%mm0 \n\t" + "paddusw %%mm5, %%mm1 \n\t" + "psrlw $2, %%mm0 \n\t" + "psrlw $2, %%mm1 \n\t" + "movq (%2, %%"REG_a"), %%mm3 \n\t" + "packuswb %%mm1, %%mm0 \n\t" + "pcmpeqd %%mm2, %%mm2 \n\t" + "paddb %%mm2, %%mm2 \n\t" + OP_AVG(%%mm3, %%mm0, %%mm1, %%mm2) + "movq %%mm1, (%2, %%"REG_a") \n\t" + "add %3, %%"REG_a" \n\t" + + "subl $2, %0 \n\t" + "jnz 1b \n\t" + :"+g"(h), "+S"(pixels) + :"D"(block), "r"((x86_reg)line_size) + :REG_a, "memory"); +} + +//FIXME optimize +static void DEF(put, pixels16_y2)(uint8_t *block, const uint8_t *pixels, int line_size, int h){ + DEF(put, pixels8_y2)(block , pixels , line_size, h); + DEF(put, pixels8_y2)(block+8, pixels+8, line_size, h); +} + +static void DEF(put, pixels16_xy2)(uint8_t *block, const uint8_t *pixels, int line_size, int h){ + DEF(put, pixels8_xy2)(block , pixels , line_size, h); + DEF(put, pixels8_xy2)(block+8, pixels+8, line_size, h); +} + +static void DEF(avg, pixels16_y2)(uint8_t *block, const uint8_t *pixels, int line_size, int h){ + DEF(avg, pixels8_y2)(block , pixels , line_size, h); + DEF(avg, pixels8_y2)(block+8, pixels+8, line_size, h); +} + +static void DEF(avg, pixels16_xy2)(uint8_t *block, const uint8_t *pixels, int line_size, int h){ + DEF(avg, pixels8_xy2)(block , pixels , line_size, h); + DEF(avg, pixels8_xy2)(block+8, pixels+8, line_size, h); +} diff --git a/libavcodec/x86/dsputilenc_mmx.c b/libavcodec/x86/dsputilenc_mmx.c index 10331327bf..c9797ef31f 100644 --- a/libavcodec/x86/dsputilenc_mmx.c +++ b/libavcodec/x86/dsputilenc_mmx.c @@ -1041,7 +1041,7 @@ static int ssd_int8_vs_int16_mmx(const int8_t *pix1, const int16_t *pix2, int si #define SET_RND MOVQ_WONE #define SCALE_OFFSET 1 -#include "dsputil_mmx_qns_template.c" +#include "dsputil_qns_template.c" #undef DEF #undef SET_RND @@ -1055,7 +1055,7 @@ static int ssd_int8_vs_int16_mmx(const int8_t *pix1, const int16_t *pix2, int si "pmulhrw " #s ", "#x " \n\t"\ "pmulhrw " #s ", "#y " \n\t" -#include "dsputil_mmx_qns_template.c" +#include "dsputil_qns_template.c" #undef DEF #undef SET_RND @@ -1074,7 +1074,7 @@ static int ssd_int8_vs_int16_mmx(const int8_t *pix1, const int16_t *pix2, int si "pmulhrsw " #s ", "#x " \n\t"\ "pmulhrsw " #s ", "#y " \n\t" -#include "dsputil_mmx_qns_template.c" +#include "dsputil_qns_template.c" #undef DEF #undef SET_RND -- cgit v1.2.3 From 1169f0d0afc0454633cfcfad73643f0458521c67 Mon Sep 17 00:00:00 2001 From: Diego Biurrun <diego@biurrun.de> Date: Tue, 4 Sep 2012 08:30:16 +0200 Subject: x86: more specific checks for availability of required assembly capabilities --- libavcodec/x86/dnxhdenc.c | 8 ++++---- libavcodec/x86/lpc.c | 8 ++++---- libavcodec/x86/mpegaudiodec.c | 9 +++++---- 3 files changed, 13 insertions(+), 12 deletions(-) (limited to 'libavcodec') diff --git a/libavcodec/x86/dnxhdenc.c b/libavcodec/x86/dnxhdenc.c index c344afec55..43ee246221 100644 --- a/libavcodec/x86/dnxhdenc.c +++ b/libavcodec/x86/dnxhdenc.c @@ -24,7 +24,7 @@ #include "libavutil/x86/asm.h" #include "libavcodec/dnxhdenc.h" -#if HAVE_INLINE_ASM +#if HAVE_SSE2_INLINE static void get_pixels_8x4_sym_sse2(DCTELEM *block, const uint8_t *pixels, int line_size) { @@ -52,14 +52,14 @@ static void get_pixels_8x4_sym_sse2(DCTELEM *block, const uint8_t *pixels, int l ); } -#endif /* HAVE_INLINE_ASM */ +#endif /* HAVE_SSE2_INLINE */ void ff_dnxhdenc_init_x86(DNXHDEncContext *ctx) { -#if HAVE_INLINE_ASM +#if HAVE_SSE2_INLINE if (av_get_cpu_flags() & AV_CPU_FLAG_SSE2) { if (ctx->cid_table->bit_depth == 8) ctx->get_pixels_8x4_sym = get_pixels_8x4_sym_sse2; } -#endif /* HAVE_INLINE_ASM */ +#endif /* HAVE_SSE2_INLINE */ } diff --git a/libavcodec/x86/lpc.c b/libavcodec/x86/lpc.c index 82f77612f2..b8c77e28f4 100644 --- a/libavcodec/x86/lpc.c +++ b/libavcodec/x86/lpc.c @@ -24,7 +24,7 @@ #include "libavutil/internal.h" #include "libavcodec/lpc.h" -#if HAVE_INLINE_ASM +#if HAVE_SSE2_INLINE static void lpc_apply_welch_window_sse2(const int32_t *data, int len, double *w_data) @@ -139,16 +139,16 @@ static void lpc_compute_autocorr_sse2(const double *data, int len, int lag, } } -#endif /* HAVE_INLINE_ASM */ +#endif /* HAVE_SSE2_INLINE */ av_cold void ff_lpc_init_x86(LPCContext *c) { +#if HAVE_SSE2_INLINE int mm_flags = av_get_cpu_flags(); -#if HAVE_INLINE_ASM if (mm_flags & (AV_CPU_FLAG_SSE2|AV_CPU_FLAG_SSE2SLOW)) { c->lpc_apply_welch_window = lpc_apply_welch_window_sse2; c->lpc_compute_autocorr = lpc_compute_autocorr_sse2; } -#endif /* HAVE_INLINE_ASM */ +#endif /* HAVE_SSE2_INLINE */ } diff --git a/libavcodec/x86/mpegaudiodec.c b/libavcodec/x86/mpegaudiodec.c index d2573dd274..e7c7fbbf48 100644 --- a/libavcodec/x86/mpegaudiodec.c +++ b/libavcodec/x86/mpegaudiodec.c @@ -36,7 +36,7 @@ void ff_four_imdct36_float_avx(float *out, float *buf, float *in, float *win, DECLARE_ALIGNED(16, static float, mdct_win_sse)[2][4][4*40]; -#if HAVE_INLINE_ASM +#if HAVE_SSE2_INLINE #define MACS(rt, ra, rb) rt+=(ra)*(rb) #define MLSS(rt, ra, rb) rt-=(ra)*(rb) @@ -180,7 +180,7 @@ static void apply_window_mp3(float *in, float *win, int *unused, float *out, *out = sum; } -#endif /* HAVE_INLINE_ASM */ +#endif /* HAVE_SSE2_INLINE */ #if HAVE_YASM #define DECL_IMDCT_BLOCKS(CPU1, CPU2) \ @@ -240,11 +240,12 @@ void ff_mpadsp_init_mmx(MPADSPContext *s) } } -#if HAVE_INLINE_ASM +#if HAVE_SSE2_INLINE if (mm_flags & AV_CPU_FLAG_SSE2) { s->apply_window_float = apply_window_mp3; } -#endif /* HAVE_INLINE_ASM */ +#endif /* HAVE_SSE2_INLINE */ + #if HAVE_YASM if (mm_flags & AV_CPU_FLAG_AVX && HAVE_AVX) { s->imdct36_blocks_float = imdct36_blocks_avx; -- cgit v1.2.3