diff options
author | Ronald S. Bultje <rsbultje@gmail.com> | 2010-09-29 13:34:20 +0000 |
---|---|---|
committer | Ronald S. Bultje <rsbultje@gmail.com> | 2010-09-29 13:34:20 +0000 |
commit | 4b81511cab1d53f7f189bcb09aac4303b20a4ce8 (patch) | |
tree | 20239048a6535354019f5dddd7b5b7afa41898ad /libavcodec/x86 | |
parent | d29e863e1d8a5bbac36ee40d278ebfc5f01abd9a (diff) | |
download | ffmpeg-4b81511cab1d53f7f189bcb09aac4303b20a4ce8.tar.gz |
Unloop the outer loop in h264_loop_filter_strength_mmx2(), which allows
inlining various constants within the loop code. 20 cycles faster on
cathedral sample.
Originally committed as revision 25252 to svn://svn.ffmpeg.org/ffmpeg/trunk
Diffstat (limited to 'libavcodec/x86')
-rw-r--r-- | libavcodec/x86/h264dsp_mmx.c | 54 |
1 files changed, 29 insertions, 25 deletions
diff --git a/libavcodec/x86/h264dsp_mmx.c b/libavcodec/x86/h264dsp_mmx.c index 249675f391..4df3f12154 100644 --- a/libavcodec/x86/h264dsp_mmx.c +++ b/libavcodec/x86/h264dsp_mmx.c @@ -63,29 +63,12 @@ void ff_h264_idct_add8_sse2 (uint8_t **dest, const int *block_offset, DCTEL /***********************************/ /* deblocking */ -static void h264_loop_filter_strength_mmx2( int16_t bS[2][4][4], uint8_t nnz[40], int8_t ref[2][40], int16_t mv[2][40][2], - int bidir, int edges, int step, int mask_mv0, int mask_mv1, int field ) { - int dir; - __asm__ volatile( - "movq %0, %%mm7 \n" - "movq %1, %%mm6 \n" - ::"m"(ff_pb_1), "m"(ff_pb_3) - ); - if(field) - __asm__ volatile( - "movq %0, %%mm6 \n" - ::"m"(ff_pb_3_1) - ); - __asm__ volatile( - "movq %%mm6, %%mm5 \n" - "paddb %%mm5, %%mm5 \n" - :); - - // could do a special case for dir==0 && edges==1, but it only reduces the - // average filter time by 1.2% - for( dir=1; dir>=0; dir-- ) { +static av_always_inline void h264_loop_filter_strength_iteration_mmx2(int16_t bS[2][4][4], uint8_t nnz[40], + int8_t ref[2][40], int16_t mv[2][40][2], + int bidir, int edges, int step, + int mask_mv, int dir) +{ const x86_reg d_idx = dir ? -8 : -1; - const int mask_mv = dir ? mask_mv1 : mask_mv0; DECLARE_ALIGNED(8, const uint64_t, mask_dir) = dir ? 0 : 0xffffffffffffffffULL; int b_idx, edge; for( b_idx=12, edge=0; edge<edges; edge+=step, b_idx+=8*step ) { @@ -169,9 +152,30 @@ static void h264_loop_filter_strength_mmx2( int16_t bS[2][4][4], uint8_t nnz[40] ::"memory" ); } - edges = 4; - step = 1; - } +} + +static void h264_loop_filter_strength_mmx2( int16_t bS[2][4][4], uint8_t nnz[40], int8_t ref[2][40], int16_t mv[2][40][2], + int bidir, int edges, int step, int mask_mv0, int mask_mv1, int field ) { + __asm__ volatile( + "movq %0, %%mm7 \n" + "movq %1, %%mm6 \n" + ::"m"(ff_pb_1), "m"(ff_pb_3) + ); + if(field) + __asm__ volatile( + "movq %0, %%mm6 \n" + ::"m"(ff_pb_3_1) + ); + __asm__ volatile( + "movq %%mm6, %%mm5 \n" + "paddb %%mm5, %%mm5 \n" + :); + + // could do a special case for dir==0 && edges==1, but it only reduces the + // average filter time by 1.2% + h264_loop_filter_strength_iteration_mmx2(bS, nnz, ref, mv, bidir, edges, step, mask_mv1, 1); + h264_loop_filter_strength_iteration_mmx2(bS, nnz, ref, mv, bidir, 4, 1, mask_mv0, 0); + __asm__ volatile( "movq (%0), %%mm0 \n\t" "movq 8(%0), %%mm1 \n\t" |