diff options
author | Michael Niedermayer <michaelni@gmx.at> | 2002-09-11 12:39:53 +0000 |
---|---|---|
committer | Michael Niedermayer <michaelni@gmx.at> | 2002-09-11 12:39:53 +0000 |
commit | b3184779924e40e82b1f92b4b315b2c4074a9669 (patch) | |
tree | 6f76a3ff7ce70d6d424f60206de7496f3845873f /libavcodec/i386 | |
parent | 6b460aa387530feefc91302c150a3405997e61cf (diff) | |
download | ffmpeg-b3184779924e40e82b1f92b4b315b2c4074a9669.tar.gz |
put/avg_pixels16
fixing 2 small qpel bugs
Originally committed as revision 915 to svn://svn.ffmpeg.org/ffmpeg/trunk
Diffstat (limited to 'libavcodec/i386')
-rw-r--r-- | libavcodec/i386/dsputil_mmx.c | 167 | ||||
-rw-r--r-- | libavcodec/i386/dsputil_mmx_avg.h | 87 | ||||
-rw-r--r-- | libavcodec/i386/dsputil_mmx_rnd.h | 132 |
3 files changed, 325 insertions, 61 deletions
diff --git a/libavcodec/i386/dsputil_mmx.c b/libavcodec/i386/dsputil_mmx.c index fed1818743..4336e4bde0 100644 --- a/libavcodec/i386/dsputil_mmx.c +++ b/libavcodec/i386/dsputil_mmx.c @@ -343,7 +343,7 @@ static void add_pixels_clamped_mmx(const DCTELEM *block, UINT8 *pixels, int line } while (--i); } -static void put_pixels_mmx(UINT8 *block, const UINT8 *pixels, int line_size, int h) +static void put_pixels8_mmx(UINT8 *block, const UINT8 *pixels, int line_size, int h) { __asm __volatile( "lea (%3, %3), %%eax \n\t" @@ -369,6 +369,40 @@ static void put_pixels_mmx(UINT8 *block, const UINT8 *pixels, int line_size, int ); } +static void put_pixels16_mmx(UINT8 *block, const UINT8 *pixels, int line_size, int h) +{ + __asm __volatile( + "lea (%3, %3), %%eax \n\t" + ".balign 8 \n\t" + "1: \n\t" + "movq (%1), %%mm0 \n\t" + "movq 8(%1), %%mm4 \n\t" + "movq (%1, %3), %%mm1 \n\t" + "movq 8(%1, %3), %%mm5 \n\t" + "movq %%mm0, (%2) \n\t" + "movq %%mm4, 8(%2) \n\t" + "movq %%mm1, (%2, %3) \n\t" + "movq %%mm5, 8(%2, %3) \n\t" + "addl %%eax, %1 \n\t" + "addl %%eax, %2 \n\t" + "movq (%1), %%mm0 \n\t" + "movq 8(%1), %%mm4 \n\t" + "movq (%1, %3), %%mm1 \n\t" + "movq 8(%1, %3), %%mm5 \n\t" + "movq %%mm0, (%2) \n\t" + "movq %%mm4, 8(%2) \n\t" + "movq %%mm1, (%2, %3) \n\t" + "movq %%mm5, 8(%2, %3) \n\t" + "addl %%eax, %1 \n\t" + "addl %%eax, %2 \n\t" + "subl $4, %0 \n\t" + "jnz 1b \n\t" + : "+g"(h), "+r" (pixels), "+r" (block) + : "r"(line_size) + : "%eax", "memory" + ); +} + static void clear_blocks_mmx(DCTELEM *blocks) { __asm __volatile( @@ -424,25 +458,45 @@ void dsputil_init_mmx(void) pix_abs8x8_y2 = pix_abs8x8_y2_mmx; pix_abs8x8_xy2= pix_abs8x8_xy2_mmx; - put_pixels_tab[0] = put_pixels_mmx; - put_pixels_tab[1] = put_pixels_x2_mmx; - put_pixels_tab[2] = put_pixels_y2_mmx; - put_pixels_tab[3] = put_pixels_xy2_mmx; - - put_no_rnd_pixels_tab[0] = put_pixels_mmx; - put_no_rnd_pixels_tab[1] = put_no_rnd_pixels_x2_mmx; - put_no_rnd_pixels_tab[2] = put_no_rnd_pixels_y2_mmx; - put_no_rnd_pixels_tab[3] = put_no_rnd_pixels_xy2_mmx; - - avg_pixels_tab[0] = avg_pixels_mmx; - avg_pixels_tab[1] = avg_pixels_x2_mmx; - avg_pixels_tab[2] = avg_pixels_y2_mmx; - avg_pixels_tab[3] = avg_pixels_xy2_mmx; - - avg_no_rnd_pixels_tab[0] = avg_no_rnd_pixels_mmx; - avg_no_rnd_pixels_tab[1] = avg_no_rnd_pixels_x2_mmx; - avg_no_rnd_pixels_tab[2] = avg_no_rnd_pixels_y2_mmx; - avg_no_rnd_pixels_tab[3] = avg_no_rnd_pixels_xy2_mmx; + put_pixels_tab[0][0] = put_pixels16_mmx; + put_pixels_tab[0][1] = put_pixels16_x2_mmx; + put_pixels_tab[0][2] = put_pixels16_y2_mmx; + put_pixels_tab[0][3] = put_pixels16_xy2_mmx; + + put_no_rnd_pixels_tab[0][0] = put_pixels16_mmx; + put_no_rnd_pixels_tab[0][1] = put_no_rnd_pixels16_x2_mmx; + put_no_rnd_pixels_tab[0][2] = put_no_rnd_pixels16_y2_mmx; + put_no_rnd_pixels_tab[0][3] = put_no_rnd_pixels16_xy2_mmx; + + avg_pixels_tab[0][0] = avg_pixels16_mmx; + avg_pixels_tab[0][1] = avg_pixels16_x2_mmx; + avg_pixels_tab[0][2] = avg_pixels16_y2_mmx; + avg_pixels_tab[0][3] = avg_pixels16_xy2_mmx; + + avg_no_rnd_pixels_tab[0][0] = avg_no_rnd_pixels16_mmx; + avg_no_rnd_pixels_tab[0][1] = avg_no_rnd_pixels16_x2_mmx; + avg_no_rnd_pixels_tab[0][2] = avg_no_rnd_pixels16_y2_mmx; + avg_no_rnd_pixels_tab[0][3] = avg_no_rnd_pixels16_xy2_mmx; + + put_pixels_tab[1][0] = put_pixels8_mmx; + put_pixels_tab[1][1] = put_pixels8_x2_mmx; + put_pixels_tab[1][2] = put_pixels8_y2_mmx; + put_pixels_tab[1][3] = put_pixels8_xy2_mmx; + + put_no_rnd_pixels_tab[1][0] = put_pixels8_mmx; + put_no_rnd_pixels_tab[1][1] = put_no_rnd_pixels8_x2_mmx; + put_no_rnd_pixels_tab[1][2] = put_no_rnd_pixels8_y2_mmx; + put_no_rnd_pixels_tab[1][3] = put_no_rnd_pixels8_xy2_mmx; + + avg_pixels_tab[1][0] = avg_pixels8_mmx; + avg_pixels_tab[1][1] = avg_pixels8_x2_mmx; + avg_pixels_tab[1][2] = avg_pixels8_y2_mmx; + avg_pixels_tab[1][3] = avg_pixels8_xy2_mmx; + + avg_no_rnd_pixels_tab[1][0] = avg_no_rnd_pixels8_mmx; + avg_no_rnd_pixels_tab[1][1] = avg_no_rnd_pixels8_x2_mmx; + avg_no_rnd_pixels_tab[1][2] = avg_no_rnd_pixels8_y2_mmx; + avg_no_rnd_pixels_tab[1][3] = avg_no_rnd_pixels8_xy2_mmx; if (mm_flags & MM_MMXEXT) { pix_abs16x16 = pix_abs16x16_mmx2; @@ -455,25 +509,45 @@ void dsputil_init_mmx(void) pix_abs8x8_y2 = pix_abs8x8_y2_mmx2; pix_abs8x8_xy2= pix_abs8x8_xy2_mmx2; - put_pixels_tab[1] = put_pixels_x2_mmx2; - put_pixels_tab[2] = put_pixels_y2_mmx2; - put_no_rnd_pixels_tab[1] = put_no_rnd_pixels_x2_mmx2; - put_no_rnd_pixels_tab[2] = put_no_rnd_pixels_y2_mmx2; - - avg_pixels_tab[0] = avg_pixels_mmx2; - avg_pixels_tab[1] = avg_pixels_x2_mmx2; - avg_pixels_tab[2] = avg_pixels_y2_mmx2; - avg_pixels_tab[3] = avg_pixels_xy2_mmx2; + put_pixels_tab[0][1] = put_pixels16_x2_mmx2; + put_pixels_tab[0][2] = put_pixels16_y2_mmx2; + put_no_rnd_pixels_tab[0][1] = put_no_rnd_pixels16_x2_mmx2; + put_no_rnd_pixels_tab[0][2] = put_no_rnd_pixels16_y2_mmx2; + + avg_pixels_tab[0][0] = avg_pixels16_mmx2; + avg_pixels_tab[0][1] = avg_pixels16_x2_mmx2; + avg_pixels_tab[0][2] = avg_pixels16_y2_mmx2; + avg_pixels_tab[0][3] = avg_pixels16_xy2_mmx2; + + put_pixels_tab[1][1] = put_pixels8_x2_mmx2; + put_pixels_tab[1][2] = put_pixels8_y2_mmx2; + put_no_rnd_pixels_tab[1][1] = put_no_rnd_pixels8_x2_mmx2; + put_no_rnd_pixels_tab[1][2] = put_no_rnd_pixels8_y2_mmx2; + + avg_pixels_tab[1][0] = avg_pixels8_mmx2; + avg_pixels_tab[1][1] = avg_pixels8_x2_mmx2; + avg_pixels_tab[1][2] = avg_pixels8_y2_mmx2; + avg_pixels_tab[1][3] = avg_pixels8_xy2_mmx2; } else if (mm_flags & MM_3DNOW) { - put_pixels_tab[1] = put_pixels_x2_3dnow; - put_pixels_tab[2] = put_pixels_y2_3dnow; - put_no_rnd_pixels_tab[1] = put_no_rnd_pixels_x2_3dnow; - put_no_rnd_pixels_tab[2] = put_no_rnd_pixels_y2_3dnow; - - avg_pixels_tab[0] = avg_pixels_3dnow; - avg_pixels_tab[1] = avg_pixels_x2_3dnow; - avg_pixels_tab[2] = avg_pixels_y2_3dnow; - avg_pixels_tab[3] = avg_pixels_xy2_3dnow; + put_pixels_tab[0][1] = put_pixels16_x2_3dnow; + put_pixels_tab[0][2] = put_pixels16_y2_3dnow; + put_no_rnd_pixels_tab[0][1] = put_no_rnd_pixels16_x2_3dnow; + put_no_rnd_pixels_tab[0][2] = put_no_rnd_pixels16_y2_3dnow; + + avg_pixels_tab[0][0] = avg_pixels16_3dnow; + avg_pixels_tab[0][1] = avg_pixels16_x2_3dnow; + avg_pixels_tab[0][2] = avg_pixels16_y2_3dnow; + avg_pixels_tab[0][3] = avg_pixels16_xy2_3dnow; + + put_pixels_tab[1][1] = put_pixels8_x2_3dnow; + put_pixels_tab[1][2] = put_pixels8_y2_3dnow; + put_no_rnd_pixels_tab[1][1] = put_no_rnd_pixels8_x2_3dnow; + put_no_rnd_pixels_tab[1][2] = put_no_rnd_pixels8_y2_3dnow; + + avg_pixels_tab[1][0] = avg_pixels8_3dnow; + avg_pixels_tab[1][1] = avg_pixels8_x2_3dnow; + avg_pixels_tab[1][2] = avg_pixels8_y2_3dnow; + avg_pixels_tab[1][3] = avg_pixels8_xy2_3dnow; } /* idct */ @@ -552,21 +626,22 @@ void bit_exact_idct_put(UINT8 *dest, int line_size, INT16 *block){ void dsputil_set_bit_exact_mmx(void) { if (mm_flags & MM_MMX) { - if (mm_flags & MM_MMXEXT) { - put_no_rnd_pixels_tab[1] = put_no_rnd_pixels_x2_mmx; - put_no_rnd_pixels_tab[2] = put_no_rnd_pixels_y2_mmx; - avg_pixels_tab[3] = avg_pixels_xy2_mmx; + + /* MMX2 & 3DNOW */ + put_no_rnd_pixels_tab[0][1] = put_no_rnd_pixels16_x2_mmx; + put_no_rnd_pixels_tab[0][2] = put_no_rnd_pixels16_y2_mmx; + avg_pixels_tab[0][3] = avg_pixels16_xy2_mmx; + put_no_rnd_pixels_tab[1][1] = put_no_rnd_pixels8_x2_mmx; + put_no_rnd_pixels_tab[1][2] = put_no_rnd_pixels8_y2_mmx; + avg_pixels_tab[1][3] = avg_pixels8_xy2_mmx; + if (mm_flags & MM_MMXEXT) { pix_abs16x16_x2 = pix_abs16x16_x2_mmx; pix_abs16x16_y2 = pix_abs16x16_y2_mmx; pix_abs16x16_xy2 = pix_abs16x16_xy2_mmx; pix_abs8x8_x2 = pix_abs8x8_x2_mmx; pix_abs8x8_y2 = pix_abs8x8_y2_mmx; pix_abs8x8_xy2= pix_abs8x8_xy2_mmx; - } else if (mm_flags & MM_3DNOW) { - put_no_rnd_pixels_tab[1] = put_no_rnd_pixels_x2_mmx; - put_no_rnd_pixels_tab[2] = put_no_rnd_pixels_y2_mmx; - avg_pixels_tab[3] = avg_pixels_xy2_mmx; } #ifdef SIMPLE_IDCT if(ff_idct_put==gen_idct_put && ff_idct == simple_idct_mmx) diff --git a/libavcodec/i386/dsputil_mmx_avg.h b/libavcodec/i386/dsputil_mmx_avg.h index a16ccc88b0..6873432ce8 100644 --- a/libavcodec/i386/dsputil_mmx_avg.h +++ b/libavcodec/i386/dsputil_mmx_avg.h @@ -25,7 +25,7 @@ /* XXX: we use explicit registers to avoid a gcc 2.95.2 register asm clobber bug - now it will work with 2.95.2 and also with -fPIC */ -static void DEF(put_pixels_x2)(UINT8 *block, const UINT8 *pixels, int line_size, int h) +static void DEF(put_pixels8_x2)(UINT8 *block, const UINT8 *pixels, int line_size, int h) { __asm __volatile( "lea (%3, %3), %%eax \n\t" @@ -52,9 +52,49 @@ static void DEF(put_pixels_x2)(UINT8 *block, const UINT8 *pixels, int line_size, :"r" (line_size) :"%eax", "memory"); } + +static void DEF(put_pixels16_x2)(UINT8 *block, const UINT8 *pixels, int line_size, int h) +{ + __asm __volatile( + "lea (%3, %3), %%eax \n\t" + "1: \n\t" + "movq (%1), %%mm0 \n\t" + "movq (%1, %3), %%mm1 \n\t" + "movq 8(%1), %%mm2 \n\t" + "movq 8(%1, %3), %%mm3 \n\t" + PAVGB" 1(%1), %%mm0 \n\t" + PAVGB" 1(%1, %3), %%mm1 \n\t" + PAVGB" 9(%1), %%mm2 \n\t" + PAVGB" 9(%1, %3), %%mm3 \n\t" + "movq %%mm0, (%2) \n\t" + "movq %%mm1, (%2, %3) \n\t" + "movq %%mm2, 8(%2) \n\t" + "movq %%mm3, 8(%2, %3) \n\t" + "addl %%eax, %1 \n\t" + "addl %%eax, %2 \n\t" + "movq (%1), %%mm0 \n\t" + "movq (%1, %3), %%mm1 \n\t" + "movq 8(%1), %%mm2 \n\t" + "movq 8(%1, %3), %%mm3 \n\t" + PAVGB" 1(%1), %%mm0 \n\t" + PAVGB" 1(%1, %3), %%mm1 \n\t" + PAVGB" 9(%1), %%mm2 \n\t" + PAVGB" 9(%1, %3), %%mm3 \n\t" + "addl %%eax, %1 \n\t" + "movq %%mm0, (%2) \n\t" + "movq %%mm1, (%2, %3) \n\t" + "movq %%mm2, 8(%2) \n\t" + "movq %%mm3, 8(%2, %3) \n\t" + "addl %%eax, %2 \n\t" + "subl $4, %0 \n\t" + "jnz 1b \n\t" + :"+g"(h), "+S"(pixels), "+D"(block) + :"r" (line_size) + :"%eax", "memory"); +} /* GL: this function does incorrect rounding if overflow */ -static void DEF(put_no_rnd_pixels_x2)(UINT8 *block, const UINT8 *pixels, int line_size, int h) +static void DEF(put_no_rnd_pixels8_x2)(UINT8 *block, const UINT8 *pixels, int line_size, int h) { MOVQ_BONE(mm6); __asm __volatile( @@ -91,7 +131,7 @@ static void DEF(put_no_rnd_pixels_x2)(UINT8 *block, const UINT8 *pixels, int lin :"%eax", "memory"); } -static void DEF(put_pixels_y2)(UINT8 *block, const UINT8 *pixels, int line_size, int h) +static void DEF(put_pixels8_y2)(UINT8 *block, const UINT8 *pixels, int line_size, int h) { __asm __volatile( "lea (%3, %3), %%eax \n\t" @@ -122,7 +162,7 @@ static void DEF(put_pixels_y2)(UINT8 *block, const UINT8 *pixels, int line_size, } /* GL: this function does incorrect rounding if overflow */ -static void DEF(put_no_rnd_pixels_y2)(UINT8 *block, const UINT8 *pixels, int line_size, int h) +static void DEF(put_no_rnd_pixels8_y2)(UINT8 *block, const UINT8 *pixels, int line_size, int h) { MOVQ_BONE(mm6); __asm __volatile( @@ -155,7 +195,7 @@ static void DEF(put_no_rnd_pixels_y2)(UINT8 *block, const UINT8 *pixels, int lin :"%eax", "memory"); } -static void DEF(avg_pixels)(UINT8 *block, const UINT8 *pixels, int line_size, int h) +static void DEF(avg_pixels8)(UINT8 *block, const UINT8 *pixels, int line_size, int h) { __asm __volatile( "lea (%3, %3), %%eax \n\t" @@ -183,7 +223,7 @@ static void DEF(avg_pixels)(UINT8 *block, const UINT8 *pixels, int line_size, in :"%eax", "memory"); } -static void DEF(avg_pixels_x2)(UINT8 *block, const UINT8 *pixels, int line_size, int h) +static void DEF(avg_pixels8_x2)(UINT8 *block, const UINT8 *pixels, int line_size, int h) { __asm __volatile( "lea (%3, %3), %%eax \n\t" @@ -215,7 +255,7 @@ static void DEF(avg_pixels_x2)(UINT8 *block, const UINT8 *pixels, int line_size, :"%eax", "memory"); } -static void DEF(avg_pixels_y2)(UINT8 *block, const UINT8 *pixels, int line_size, int h) +static void DEF(avg_pixels8_y2)(UINT8 *block, const UINT8 *pixels, int line_size, int h) { __asm __volatile( "lea (%3, %3), %%eax \n\t" @@ -254,7 +294,7 @@ static void DEF(avg_pixels_y2)(UINT8 *block, const UINT8 *pixels, int line_size, } // Note this is not correctly rounded, but this function is only used for b frames so it doesnt matter -static void DEF(avg_pixels_xy2)(UINT8 *block, const UINT8 *pixels, int line_size, int h) +static void DEF(avg_pixels8_xy2)(UINT8 *block, const UINT8 *pixels, int line_size, int h) { MOVQ_BONE(mm6); __asm __volatile( @@ -294,3 +334,34 @@ static void DEF(avg_pixels_xy2)(UINT8 *block, const UINT8 *pixels, int line_size :"r" (line_size) :"%eax", "memory"); } + +//FIXME the following could be optimized too ... +static void DEF(put_no_rnd_pixels16_x2)(UINT8 *block, const UINT8 *pixels, int line_size, int h){ + DEF(put_no_rnd_pixels8_x2)(block , pixels , line_size, h); + DEF(put_no_rnd_pixels8_x2)(block+8, pixels+8, line_size, h); +} +static void DEF(put_pixels16_y2)(UINT8 *block, const UINT8 *pixels, int line_size, int h){ + DEF(put_pixels8_y2)(block , pixels , line_size, h); + DEF(put_pixels8_y2)(block+8, pixels+8, line_size, h); +} +static void DEF(put_no_rnd_pixels16_y2)(UINT8 *block, const UINT8 *pixels, int line_size, int h){ + DEF(put_no_rnd_pixels8_y2)(block , pixels , line_size, h); + DEF(put_no_rnd_pixels8_y2)(block+8, pixels+8, line_size, h); +} +static void DEF(avg_pixels16)(UINT8 *block, const UINT8 *pixels, int line_size, int h){ + DEF(avg_pixels8)(block , pixels , line_size, h); + DEF(avg_pixels8)(block+8, pixels+8, line_size, h); +} +static void DEF(avg_pixels16_x2)(UINT8 *block, const UINT8 *pixels, int line_size, int h){ + DEF(avg_pixels8_x2)(block , pixels , line_size, h); + DEF(avg_pixels8_x2)(block+8, pixels+8, line_size, h); +} +static void DEF(avg_pixels16_y2)(UINT8 *block, const UINT8 *pixels, int line_size, int h){ + DEF(avg_pixels8_y2)(block , pixels , line_size, h); + DEF(avg_pixels8_y2)(block+8, pixels+8, line_size, h); +} +static void DEF(avg_pixels16_xy2)(UINT8 *block, const UINT8 *pixels, int line_size, int h){ + DEF(avg_pixels8_xy2)(block , pixels , line_size, h); + DEF(avg_pixels8_xy2)(block+8, pixels+8, line_size, h); +} + diff --git a/libavcodec/i386/dsputil_mmx_rnd.h b/libavcodec/i386/dsputil_mmx_rnd.h index 873f4b3e1e..3605e03f9c 100644 --- a/libavcodec/i386/dsputil_mmx_rnd.h +++ b/libavcodec/i386/dsputil_mmx_rnd.h @@ -22,7 +22,7 @@ */ // put_pixels -static void DEF(put, pixels_x2)(UINT8 *block, const UINT8 *pixels, int line_size, int h) +static void DEF(put, pixels8_x2)(UINT8 *block, const UINT8 *pixels, int line_size, int h) { MOVQ_BFE(mm6); __asm __volatile( @@ -54,7 +54,53 @@ static void DEF(put, pixels_x2)(UINT8 *block, const UINT8 *pixels, int line_size :"eax", "memory"); } -static void DEF(put, pixels_y2)(UINT8 *block, const UINT8 *pixels, int line_size, int h) +static void DEF(put, pixels16_x2)(UINT8 *block, const UINT8 *pixels, int line_size, int h) +{ + MOVQ_BFE(mm6); + __asm __volatile( + "lea (%3, %3), %%eax \n\t" + ".balign 8 \n\t" + "1: \n\t" + "movq (%1), %%mm0 \n\t" + "movq 1(%1), %%mm1 \n\t" + "movq (%1, %3), %%mm2 \n\t" + "movq 1(%1, %3), %%mm3 \n\t" + PAVGBP(%%mm0, %%mm1, %%mm4, %%mm2, %%mm3, %%mm5) + "movq %%mm4, (%2) \n\t" + "movq %%mm5, (%2, %3) \n\t" + "movq 8(%1), %%mm0 \n\t" + "movq 9(%1), %%mm1 \n\t" + "movq 8(%1, %3), %%mm2 \n\t" + "movq 9(%1, %3), %%mm3 \n\t" + PAVGBP(%%mm0, %%mm1, %%mm4, %%mm2, %%mm3, %%mm5) + "movq %%mm4, 8(%2) \n\t" + "movq %%mm5, 8(%2, %3) \n\t" + "addl %%eax, %1 \n\t" + "addl %%eax, %2 \n\t" + "movq (%1), %%mm0 \n\t" + "movq 1(%1), %%mm1 \n\t" + "movq (%1, %3), %%mm2 \n\t" + "movq 1(%1, %3), %%mm3 \n\t" + PAVGBP(%%mm0, %%mm1, %%mm4, %%mm2, %%mm3, %%mm5) + "movq %%mm4, (%2) \n\t" + "movq %%mm5, (%2, %3) \n\t" + "movq 8(%1), %%mm0 \n\t" + "movq 9(%1), %%mm1 \n\t" + "movq 8(%1, %3), %%mm2 \n\t" + "movq 9(%1, %3), %%mm3 \n\t" + PAVGBP(%%mm0, %%mm1, %%mm4, %%mm2, %%mm3, %%mm5) + "movq %%mm4, 8(%2) \n\t" + "movq %%mm5, 8(%2, %3) \n\t" + "addl %%eax, %1 \n\t" + "addl %%eax, %2 \n\t" + "subl $4, %0 \n\t" + "jnz 1b \n\t" + :"+g"(h), "+S"(pixels), "+D"(block) + :"r"(line_size) + :"eax", "memory"); +} + +static void DEF(put, pixels8_y2)(UINT8 *block, const UINT8 *pixels, int line_size, int h) { MOVQ_BFE(mm6); __asm __volatile( @@ -83,7 +129,7 @@ static void DEF(put, pixels_y2)(UINT8 *block, const UINT8 *pixels, int line_size :"eax", "memory"); } -static void DEF(put, pixels_xy2)(UINT8 *block, const UINT8 *pixels, int line_size, int h) +static void DEF(put, pixels8_xy2)(UINT8 *block, const UINT8 *pixels, int line_size, int h) { MOVQ_ZERO(mm7); SET_RND(mm6); // =2 for rnd and =1 for no_rnd version @@ -151,7 +197,7 @@ static void DEF(put, pixels_xy2)(UINT8 *block, const UINT8 *pixels, int line_siz // avg_pixels // in case more speed is needed - unroling would certainly help -static void DEF(avg, pixels)(UINT8 *block, const UINT8 *pixels, int line_size, int h) +static void DEF(avg, pixels8)(UINT8 *block, const UINT8 *pixels, int line_size, int h) { MOVQ_BFE(mm6); JUMPALIGN(); @@ -170,7 +216,30 @@ static void DEF(avg, pixels)(UINT8 *block, const UINT8 *pixels, int line_size, i while (--h); } -static void DEF(avg, pixels_x2)(UINT8 *block, const UINT8 *pixels, int line_size, int h) +static void DEF(avg, pixels16)(UINT8 *block, const UINT8 *pixels, int line_size, int h) +{ + MOVQ_BFE(mm6); + JUMPALIGN(); + do { + __asm __volatile( + "movq %0, %%mm0 \n\t" + "movq %1, %%mm1 \n\t" + PAVGB(%%mm0, %%mm1, %%mm2, %%mm6) + "movq %%mm2, %0 \n\t" + "movq 8%0, %%mm0 \n\t" + "movq 8%1, %%mm1 \n\t" + PAVGB(%%mm0, %%mm1, %%mm2, %%mm6) + "movq %%mm2, 8%0 \n\t" + :"+m"(*block) + :"m"(*pixels) + :"memory"); + pixels += line_size; + block += line_size; + } + while (--h); +} + +static void DEF(avg, pixels8_x2)(UINT8 *block, const UINT8 *pixels, int line_size, int h) { MOVQ_BFE(mm6); JUMPALIGN(); @@ -190,7 +259,33 @@ static void DEF(avg, pixels_x2)(UINT8 *block, const UINT8 *pixels, int line_size } while (--h); } -static void DEF(avg, pixels_y2)(UINT8 *block, const UINT8 *pixels, int line_size, int h) +static void DEF(avg, pixels16_x2)(UINT8 *block, const UINT8 *pixels, int line_size, int h) +{ + MOVQ_BFE(mm6); + JUMPALIGN(); + do { + __asm __volatile( + "movq %1, %%mm0 \n\t" + "movq 1%1, %%mm1 \n\t" + "movq %0, %%mm3 \n\t" + PAVGB(%%mm0, %%mm1, %%mm2, %%mm6) + PAVGB(%%mm3, %%mm2, %%mm0, %%mm6) + "movq %%mm0, %0 \n\t" + "movq 8%1, %%mm0 \n\t" + "movq 9%1, %%mm1 \n\t" + "movq 8%0, %%mm3 \n\t" + PAVGB(%%mm0, %%mm1, %%mm2, %%mm6) + PAVGB(%%mm3, %%mm2, %%mm0, %%mm6) + "movq %%mm0, 8%0 \n\t" + :"+m"(*block) + :"m"(*pixels) + :"memory"); + pixels += line_size; + block += line_size; + } while (--h); +} + +static void DEF(avg, pixels8_y2)(UINT8 *block, const UINT8 *pixels, int line_size, int h) { MOVQ_BFE(mm6); __asm __volatile( @@ -230,7 +325,7 @@ static void DEF(avg, pixels_y2)(UINT8 *block, const UINT8 *pixels, int line_size } // this routine is 'slightly' suboptimal but mostly unused -static void DEF(avg, pixels_xy2)(UINT8 *block, const UINT8 *pixels, int line_size, int h) +static void DEF(avg, pixels8_xy2)(UINT8 *block, const UINT8 *pixels, int line_size, int h) { MOVQ_ZERO(mm7); SET_RND(mm6); // =2 for rnd and =1 for no_rnd version @@ -303,3 +398,26 @@ static void DEF(avg, pixels_xy2)(UINT8 *block, const UINT8 *pixels, int line_siz :"D"(block), "r"(line_size) :"eax", "memory"); } + +//FIXME optimize +static void DEF(put, pixels16_y2)(UINT8 *block, const UINT8 *pixels, int line_size, int h){ + DEF(put, pixels8_y2)(block , pixels , line_size, h); + DEF(put, pixels8_y2)(block+8, pixels+8, line_size, h); +} + +static void DEF(put, pixels16_xy2)(UINT8 *block, const UINT8 *pixels, int line_size, int h){ + DEF(put, pixels8_xy2)(block , pixels , line_size, h); + DEF(put, pixels8_xy2)(block+8, pixels+8, line_size, h); +} + +static void DEF(avg, pixels16_y2)(UINT8 *block, const UINT8 *pixels, int line_size, int h){ + DEF(avg, pixels8_y2)(block , pixels , line_size, h); + DEF(avg, pixels8_y2)(block+8, pixels+8, line_size, h); +} + +static void DEF(avg, pixels16_xy2)(UINT8 *block, const UINT8 *pixels, int line_size, int h){ + DEF(avg, pixels8_xy2)(block , pixels , line_size, h); + DEF(avg, pixels8_xy2)(block+8, pixels+8, line_size, h); +} + + |