aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAlexander Strange <astrange@ithinksw.com>2007-08-14 22:28:09 +0000
committerGuillaume Poirier <gpoirier@mplayerhq.hu>2007-08-14 22:28:09 +0000
commit6d324c813fba2b5b59fcba108105c52a3591d96c (patch)
tree89d42cfd4094b1cbb52123fc99bc03ed45e3e572
parent77cb22fa7b3f632c16aa3d4e7aa7d47f9cd99f2c (diff)
downloadffmpeg-6d324c813fba2b5b59fcba108105c52a3591d96c.tar.gz
Statements like a = b = c = d = e; store from right-to-left, so if
you write them in the right order it comes out backwards. This removes them from fill_rectangle(). patch by Alexander Strange %astrange A ithinksw P com% Original thread: Date: Aug 14, 2007 5:36 AM Subject: [FFmpeg-devel] [PATCH] two small h264 optimizations Originally committed as revision 10118 to svn://svn.ffmpeg.org/ffmpeg/trunk
-rw-r--r--libavcodec/h264.c56
1 files changed, 28 insertions, 28 deletions
diff --git a/libavcodec/h264.c b/libavcodec/h264.c
index 8f20bad8a3..676ce5160f 100644
--- a/libavcodec/h264.c
+++ b/libavcodec/h264.c
@@ -91,7 +91,7 @@ static av_always_inline void fill_rectangle(void *vp, int w, int h, int stride,
if(h==1) return;
*(uint16_t*)(p + 1*stride)= v;
if(h==2) return;
- *(uint16_t*)(p + 2*stride)=
+ *(uint16_t*)(p + 2*stride)= v;
*(uint16_t*)(p + 3*stride)= v;
}else if(w==4){
const uint32_t v= size==4 ? val : val*0x01010101;
@@ -99,7 +99,7 @@ static av_always_inline void fill_rectangle(void *vp, int w, int h, int stride,
if(h==1) return;
*(uint32_t*)(p + 1*stride)= v;
if(h==2) return;
- *(uint32_t*)(p + 2*stride)=
+ *(uint32_t*)(p + 2*stride)= v;
*(uint32_t*)(p + 3*stride)= v;
}else if(w==8){
//gcc can't optimize 64bit math on x86_32
@@ -109,47 +109,47 @@ static av_always_inline void fill_rectangle(void *vp, int w, int h, int stride,
if(h==1) return;
*(uint64_t*)(p + 1*stride)= v;
if(h==2) return;
- *(uint64_t*)(p + 2*stride)=
+ *(uint64_t*)(p + 2*stride)= v;
*(uint64_t*)(p + 3*stride)= v;
}else if(w==16){
const uint64_t v= val*0x0100000001ULL;
- *(uint64_t*)(p + 0+0*stride)=
- *(uint64_t*)(p + 8+0*stride)=
- *(uint64_t*)(p + 0+1*stride)=
+ *(uint64_t*)(p + 0+0*stride)= v;
+ *(uint64_t*)(p + 8+0*stride)= v;
+ *(uint64_t*)(p + 0+1*stride)= v;
*(uint64_t*)(p + 8+1*stride)= v;
if(h==2) return;
- *(uint64_t*)(p + 0+2*stride)=
- *(uint64_t*)(p + 8+2*stride)=
- *(uint64_t*)(p + 0+3*stride)=
+ *(uint64_t*)(p + 0+2*stride)= v;
+ *(uint64_t*)(p + 8+2*stride)= v;
+ *(uint64_t*)(p + 0+3*stride)= v;
*(uint64_t*)(p + 8+3*stride)= v;
#else
- *(uint32_t*)(p + 0+0*stride)=
+ *(uint32_t*)(p + 0+0*stride)= val;
*(uint32_t*)(p + 4+0*stride)= val;
if(h==1) return;
- *(uint32_t*)(p + 0+1*stride)=
+ *(uint32_t*)(p + 0+1*stride)= val;
*(uint32_t*)(p + 4+1*stride)= val;
if(h==2) return;
- *(uint32_t*)(p + 0+2*stride)=
- *(uint32_t*)(p + 4+2*stride)=
- *(uint32_t*)(p + 0+3*stride)=
+ *(uint32_t*)(p + 0+2*stride)= val;
+ *(uint32_t*)(p + 4+2*stride)= val;
+ *(uint32_t*)(p + 0+3*stride)= val;
*(uint32_t*)(p + 4+3*stride)= val;
}else if(w==16){
- *(uint32_t*)(p + 0+0*stride)=
- *(uint32_t*)(p + 4+0*stride)=
- *(uint32_t*)(p + 8+0*stride)=
- *(uint32_t*)(p +12+0*stride)=
- *(uint32_t*)(p + 0+1*stride)=
- *(uint32_t*)(p + 4+1*stride)=
- *(uint32_t*)(p + 8+1*stride)=
+ *(uint32_t*)(p + 0+0*stride)= val;
+ *(uint32_t*)(p + 4+0*stride)= val;
+ *(uint32_t*)(p + 8+0*stride)= val;
+ *(uint32_t*)(p +12+0*stride)= val;
+ *(uint32_t*)(p + 0+1*stride)= val;
+ *(uint32_t*)(p + 4+1*stride)= val;
+ *(uint32_t*)(p + 8+1*stride)= val;
*(uint32_t*)(p +12+1*stride)= val;
if(h==2) return;
- *(uint32_t*)(p + 0+2*stride)=
- *(uint32_t*)(p + 4+2*stride)=
- *(uint32_t*)(p + 8+2*stride)=
- *(uint32_t*)(p +12+2*stride)=
- *(uint32_t*)(p + 0+3*stride)=
- *(uint32_t*)(p + 4+3*stride)=
- *(uint32_t*)(p + 8+3*stride)=
+ *(uint32_t*)(p + 0+2*stride)= val;
+ *(uint32_t*)(p + 4+2*stride)= val;
+ *(uint32_t*)(p + 8+2*stride)= val;
+ *(uint32_t*)(p +12+2*stride)= val;
+ *(uint32_t*)(p + 0+3*stride)= val;
+ *(uint32_t*)(p + 4+3*stride)= val;
+ *(uint32_t*)(p + 8+3*stride)= val;
*(uint32_t*)(p +12+3*stride)= val;
#endif
}else