diff options
author | Michael Niedermayer <michaelni@gmx.at> | 2010-02-24 20:37:58 +0000 |
---|---|---|
committer | Michael Niedermayer <michaelni@gmx.at> | 2010-02-24 20:37:58 +0000 |
commit | f3ea29bfa31a532365ee58d7dc03b474a72e2ee6 (patch) | |
tree | e8e5f316b2ba0bda7c1aeb0cc0ee2d76e5cd72bf /libavcodec | |
parent | 81b5e4ee9208e7ed367bf0a9aa4024affa700470 (diff) | |
download | ffmpeg-f3ea29bfa31a532365ee58d7dc03b474a72e2ee6.tar.gz |
Extend fill_rectangle() support for 16bit
Originally committed as revision 22036 to svn://svn.ffmpeg.org/ffmpeg/trunk
Diffstat (limited to 'libavcodec')
-rw-r--r-- | libavcodec/rectangle.h | 53 |
1 files changed, 27 insertions, 26 deletions
diff --git a/libavcodec/rectangle.h b/libavcodec/rectangle.h index cfd0f0696d..b9b2103b45 100644 --- a/libavcodec/rectangle.h +++ b/libavcodec/rectangle.h @@ -58,7 +58,7 @@ static av_always_inline void fill_rectangle(void *vp, int w, int h, int stride, *(uint16_t*)(p + 2*stride)= v; *(uint16_t*)(p + 3*stride)= v; }else if(w==4){ - const uint32_t v= size==4 ? val : val*0x01010101; + const uint32_t v= size==4 ? val : size==2 ? val*0x00010001 : val*0x01010101; *(uint32_t*)(p + 0*stride)= v; if(h==1) return; *(uint32_t*)(p + 1*stride)= v; @@ -68,7 +68,7 @@ static av_always_inline void fill_rectangle(void *vp, int w, int h, int stride, }else if(w==8){ //gcc can't optimize 64bit math on x86_32 #if HAVE_FAST_64BIT - const uint64_t v= val*0x0100000001ULL; + const uint64_t v= size==2 ? val*0x0001000100010001ULL : val*0x0100000001ULL; *(uint64_t*)(p + 0*stride)= v; if(h==1) return; *(uint64_t*)(p + 1*stride)= v; @@ -87,34 +87,35 @@ static av_always_inline void fill_rectangle(void *vp, int w, int h, int stride, *(uint64_t*)(p + 0+3*stride)= v; *(uint64_t*)(p + 8+3*stride)= v; #else - *(uint32_t*)(p + 0+0*stride)= val; - *(uint32_t*)(p + 4+0*stride)= val; + const uint32_t v= size==2 ? val*0x00010001 : val*0x01010101; + *(uint32_t*)(p + 0+0*stride)= v; + *(uint32_t*)(p + 4+0*stride)= v; if(h==1) return; - *(uint32_t*)(p + 0+1*stride)= val; - *(uint32_t*)(p + 4+1*stride)= val; + *(uint32_t*)(p + 0+1*stride)= v; + *(uint32_t*)(p + 4+1*stride)= v; if(h==2) return; - *(uint32_t*)(p + 0+2*stride)= val; - *(uint32_t*)(p + 4+2*stride)= val; - *(uint32_t*)(p + 0+3*stride)= val; - *(uint32_t*)(p + 4+3*stride)= val; + *(uint32_t*)(p + 0+2*stride)= v; + *(uint32_t*)(p + 4+2*stride)= v; + *(uint32_t*)(p + 0+3*stride)= v; + *(uint32_t*)(p + 4+3*stride)= v; }else if(w==16){ - *(uint32_t*)(p + 0+0*stride)= val; - *(uint32_t*)(p + 4+0*stride)= val; - *(uint32_t*)(p + 8+0*stride)= val; - *(uint32_t*)(p +12+0*stride)= val; - *(uint32_t*)(p + 0+1*stride)= val; - *(uint32_t*)(p + 4+1*stride)= val; - *(uint32_t*)(p + 8+1*stride)= val; - *(uint32_t*)(p +12+1*stride)= val; + *(uint32_t*)(p + 0+0*stride)= v; + *(uint32_t*)(p + 4+0*stride)= v; + *(uint32_t*)(p + 8+0*stride)= v; + *(uint32_t*)(p +12+0*stride)= v; + *(uint32_t*)(p + 0+1*stride)= v; + *(uint32_t*)(p + 4+1*stride)= v; + *(uint32_t*)(p + 8+1*stride)= v; + *(uint32_t*)(p +12+1*stride)= v; if(h==2) return; - *(uint32_t*)(p + 0+2*stride)= val; - *(uint32_t*)(p + 4+2*stride)= val; - *(uint32_t*)(p + 8+2*stride)= val; - *(uint32_t*)(p +12+2*stride)= val; - *(uint32_t*)(p + 0+3*stride)= val; - *(uint32_t*)(p + 4+3*stride)= val; - *(uint32_t*)(p + 8+3*stride)= val; - *(uint32_t*)(p +12+3*stride)= val; + *(uint32_t*)(p + 0+2*stride)= v; + *(uint32_t*)(p + 4+2*stride)= v; + *(uint32_t*)(p + 8+2*stride)= v; + *(uint32_t*)(p +12+2*stride)= v; + *(uint32_t*)(p + 0+3*stride)= v; + *(uint32_t*)(p + 4+3*stride)= v; + *(uint32_t*)(p + 8+3*stride)= v; + *(uint32_t*)(p +12+3*stride)= v; #endif }else assert(0); |