aboutsummaryrefslogtreecommitdiffstats
path: root/libavcodec
diff options
context:
space:
mode:
authorMåns Rullgård <mans@mansr.com>2010-02-18 16:24:31 +0000
committerMåns Rullgård <mans@mansr.com>2010-02-18 16:24:31 +0000
commit19769ece3bd99b7570f39c9c605bee1bbada2b57 (patch)
treedb19c1e9e60ff41d57dfd1a76a4eb08509f924f6 /libavcodec
parentf4a7434f16afc4616a34a7cd1bd8e592ed04e89f (diff)
downloadffmpeg-19769ece3bd99b7570f39c9c605bee1bbada2b57.tar.gz
H264: use alias-safe macros
This eliminates all aliasing violation warnings in h264 code. No measurable speed difference with gcc-4.4.3 on i7. Originally committed as revision 21881 to svn://svn.ffmpeg.org/ffmpeg/trunk
Diffstat (limited to 'libavcodec')
-rw-r--r--libavcodec/h264.c4
-rw-r--r--libavcodec/h264.h111
-rw-r--r--libavcodec/h264_direct.c14
-rw-r--r--libavcodec/h264_loopfilter.c33
-rw-r--r--libavcodec/h264_mvpred.h6
5 files changed, 86 insertions, 82 deletions
diff --git a/libavcodec/h264.c b/libavcodec/h264.c
index f0abd6bc00..97c97ba7a1 100644
--- a/libavcodec/h264.c
+++ b/libavcodec/h264.c
@@ -156,11 +156,11 @@ const uint8_t *ff_h264_decode_nal(H264Context *h, const uint8_t *src, int *dst_l
# if HAVE_FAST_64BIT
# define RS 7
for(i=0; i+1<length; i+=9){
- if(!((~*(const uint64_t*)(src+i) & (*(const uint64_t*)(src+i) - 0x0100010001000101ULL)) & 0x8000800080008080ULL))
+ if(!((~AV_RN64A(src+i) & (AV_RN64A(src+i) - 0x0100010001000101ULL)) & 0x8000800080008080ULL))
# else
# define RS 3
for(i=0; i+1<length; i+=5){
- if(!((~*(const uint32_t*)(src+i) & (*(const uint32_t*)(src+i) - 0x01000101U)) & 0x80008080U))
+ if(!((~AV_RN32A(src+i) & (AV_RN32A(src+i) - 0x01000101U)) & 0x80008080U))
# endif
continue;
if(i>0 && !src[i]) i--;
diff --git a/libavcodec/h264.h b/libavcodec/h264.h
index 10658d36d5..2c677c88b4 100644
--- a/libavcodec/h264.h
+++ b/libavcodec/h264.h
@@ -28,6 +28,7 @@
#ifndef AVCODEC_H264_H
#define AVCODEC_H264_H
+#include "libavutil/intreadwrite.h"
#include "dsputil.h"
#include "cabac.h"
#include "mpegvideo.h"
@@ -921,7 +922,7 @@ static void fill_decode_caches(H264Context *h, int mb_type){
*/
//FIXME constraint_intra_pred & partitioning & nnz (let us hope this is just a typo in the spec)
if(top_type){
- *(uint32_t*)&h->non_zero_count_cache[4+8*0]= *(uint32_t*)&h->non_zero_count[top_xy][4+3*8];
+ AV_COPY32(&h->non_zero_count_cache[4+8*0], &h->non_zero_count[top_xy][4+3*8]);
h->non_zero_count_cache[1+8*0]= h->non_zero_count[top_xy][1+1*8];
h->non_zero_count_cache[2+8*0]= h->non_zero_count[top_xy][2+1*8];
@@ -933,7 +934,7 @@ static void fill_decode_caches(H264Context *h, int mb_type){
h->non_zero_count_cache[1+8*3]=
h->non_zero_count_cache[2+8*3]=
- *(uint32_t*)&h->non_zero_count_cache[4+8*0]= CABAC && !IS_INTRA(mb_type) ? 0 : 0x40404040;
+ AV_WN32A(&h->non_zero_count_cache[4+8*0], CABAC && !IS_INTRA(mb_type) ? 0 : 0x40404040);
}
for (i=0; i<2; i++) {
@@ -1002,7 +1003,7 @@ static void fill_decode_caches(H264Context *h, int mb_type){
h->ref_cache[list][scan8[0] + 3 - 1*8]= s->current_picture.ref_index[list][b8_xy + 1];
}else{
AV_ZERO128(h->mv_cache[list][scan8[0] + 0 - 1*8]);
- *(uint32_t*)&h->ref_cache[list][scan8[0] + 0 - 1*8]= ((top_type ? LIST_NOT_USED : PART_NOT_AVAILABLE)&0xFF)*0x01010101;
+ AV_WN32A(&h->ref_cache[list][scan8[0] + 0 - 1*8], ((top_type ? LIST_NOT_USED : PART_NOT_AVAILABLE)&0xFF)*0x01010101);
}
for(i=0; i<2; i++){
@@ -1010,13 +1011,13 @@ static void fill_decode_caches(H264Context *h, int mb_type){
if(USES_LIST(left_type[i], list)){
const int b_xy= h->mb2b_xy[left_xy[i]] + 3;
const int b8_xy= h->mb2b8_xy[left_xy[i]] + 1;
- *(uint32_t*)h->mv_cache[list][cache_idx ]= *(uint32_t*)s->current_picture.motion_val[list][b_xy + h->b_stride*left_block[0+i*2]];
- *(uint32_t*)h->mv_cache[list][cache_idx+8]= *(uint32_t*)s->current_picture.motion_val[list][b_xy + h->b_stride*left_block[1+i*2]];
+ AV_COPY32(h->mv_cache[list][cache_idx ], s->current_picture.motion_val[list][b_xy + h->b_stride*left_block[0+i*2]]);
+ AV_COPY32(h->mv_cache[list][cache_idx+8], s->current_picture.motion_val[list][b_xy + h->b_stride*left_block[1+i*2]]);
h->ref_cache[list][cache_idx ]= s->current_picture.ref_index[list][b8_xy + h->b8_stride*(left_block[0+i*2]>>1)];
h->ref_cache[list][cache_idx+8]= s->current_picture.ref_index[list][b8_xy + h->b8_stride*(left_block[1+i*2]>>1)];
}else{
- *(uint32_t*)h->mv_cache [list][cache_idx ]=
- *(uint32_t*)h->mv_cache [list][cache_idx+8]= 0;
+ AV_ZERO32(h->mv_cache [list][cache_idx ]);
+ AV_ZERO32(h->mv_cache [list][cache_idx+8]);
h->ref_cache[list][cache_idx ]=
h->ref_cache[list][cache_idx+8]= (left_type[i]) ? LIST_NOT_USED : PART_NOT_AVAILABLE;
}
@@ -1025,20 +1026,20 @@ static void fill_decode_caches(H264Context *h, int mb_type){
if(USES_LIST(topleft_type, list)){
const int b_xy = h->mb2b_xy [topleft_xy] + 3 + h->b_stride + (h->topleft_partition & 2*h->b_stride);
const int b8_xy= h->mb2b8_xy[topleft_xy] + 1 + (h->topleft_partition & h->b8_stride);
- *(uint32_t*)h->mv_cache[list][scan8[0] - 1 - 1*8]= *(uint32_t*)s->current_picture.motion_val[list][b_xy];
+ AV_COPY32(h->mv_cache[list][scan8[0] - 1 - 1*8], s->current_picture.motion_val[list][b_xy]);
h->ref_cache[list][scan8[0] - 1 - 1*8]= s->current_picture.ref_index[list][b8_xy];
}else{
- *(uint32_t*)h->mv_cache[list][scan8[0] - 1 - 1*8]= 0;
+ AV_ZERO32(h->mv_cache[list][scan8[0] - 1 - 1*8]);
h->ref_cache[list][scan8[0] - 1 - 1*8]= topleft_type ? LIST_NOT_USED : PART_NOT_AVAILABLE;
}
if(USES_LIST(topright_type, list)){
const int b_xy= h->mb2b_xy[topright_xy] + 3*h->b_stride;
const int b8_xy= h->mb2b8_xy[topright_xy] + h->b8_stride;
- *(uint32_t*)h->mv_cache[list][scan8[0] + 4 - 1*8]= *(uint32_t*)s->current_picture.motion_val[list][b_xy];
+ AV_COPY32(h->mv_cache[list][scan8[0] + 4 - 1*8], s->current_picture.motion_val[list][b_xy]);
h->ref_cache[list][scan8[0] + 4 - 1*8]= s->current_picture.ref_index[list][b8_xy];
}else{
- *(uint32_t*)h->mv_cache [list][scan8[0] + 4 - 1*8]= 0;
+ AV_ZERO32(h->mv_cache [list][scan8[0] + 4 - 1*8]);
h->ref_cache[list][scan8[0] + 4 - 1*8]= topright_type ? LIST_NOT_USED : PART_NOT_AVAILABLE;
}
@@ -1051,11 +1052,11 @@ static void fill_decode_caches(H264Context *h, int mb_type){
h->ref_cache[list][scan8[13]+1] = //FIXME remove past 3 (init somewhere else)
h->ref_cache[list][scan8[4 ]] =
h->ref_cache[list][scan8[12]] = PART_NOT_AVAILABLE;
- *(uint32_t*)h->mv_cache [list][scan8[5 ]+1]=
- *(uint32_t*)h->mv_cache [list][scan8[7 ]+1]=
- *(uint32_t*)h->mv_cache [list][scan8[13]+1]= //FIXME remove past 3 (init somewhere else)
- *(uint32_t*)h->mv_cache [list][scan8[4 ]]=
- *(uint32_t*)h->mv_cache [list][scan8[12]]= 0;
+ AV_ZERO32(h->mv_cache [list][scan8[5 ]+1]);
+ AV_ZERO32(h->mv_cache [list][scan8[7 ]+1]);
+ AV_ZERO32(h->mv_cache [list][scan8[13]+1]); //FIXME remove past 3 (init somewhere else)
+ AV_ZERO32(h->mv_cache [list][scan8[4 ]]);
+ AV_ZERO32(h->mv_cache [list][scan8[12]]);
if( CABAC ) {
/* XXX beurk, Load mvd */
@@ -1067,37 +1068,37 @@ static void fill_decode_caches(H264Context *h, int mb_type){
}
if(USES_LIST(left_type[0], list)){
const int b_xy= h->mb2b_xy[left_xy[0]] + 3;
- *(uint32_t*)h->mvd_cache[list][scan8[0] - 1 + 0*8]= *(uint32_t*)h->mvd_table[list][b_xy + h->b_stride*left_block[0]];
- *(uint32_t*)h->mvd_cache[list][scan8[0] - 1 + 1*8]= *(uint32_t*)h->mvd_table[list][b_xy + h->b_stride*left_block[1]];
+ AV_COPY32(h->mvd_cache[list][scan8[0] - 1 + 0*8], h->mvd_table[list][b_xy + h->b_stride*left_block[0]]);
+ AV_COPY32(h->mvd_cache[list][scan8[0] - 1 + 1*8], h->mvd_table[list][b_xy + h->b_stride*left_block[1]]);
}else{
- *(uint32_t*)h->mvd_cache [list][scan8[0] - 1 + 0*8]=
- *(uint32_t*)h->mvd_cache [list][scan8[0] - 1 + 1*8]= 0;
+ AV_ZERO32(h->mvd_cache [list][scan8[0] - 1 + 0*8]);
+ AV_ZERO32(h->mvd_cache [list][scan8[0] - 1 + 1*8]);
}
if(USES_LIST(left_type[1], list)){
const int b_xy= h->mb2b_xy[left_xy[1]] + 3;
- *(uint32_t*)h->mvd_cache[list][scan8[0] - 1 + 2*8]= *(uint32_t*)h->mvd_table[list][b_xy + h->b_stride*left_block[2]];
- *(uint32_t*)h->mvd_cache[list][scan8[0] - 1 + 3*8]= *(uint32_t*)h->mvd_table[list][b_xy + h->b_stride*left_block[3]];
+ AV_COPY32(h->mvd_cache[list][scan8[0] - 1 + 2*8], h->mvd_table[list][b_xy + h->b_stride*left_block[2]]);
+ AV_COPY32(h->mvd_cache[list][scan8[0] - 1 + 3*8], h->mvd_table[list][b_xy + h->b_stride*left_block[3]]);
}else{
- *(uint32_t*)h->mvd_cache [list][scan8[0] - 1 + 2*8]=
- *(uint32_t*)h->mvd_cache [list][scan8[0] - 1 + 3*8]= 0;
+ AV_ZERO32(h->mvd_cache [list][scan8[0] - 1 + 2*8]);
+ AV_ZERO32(h->mvd_cache [list][scan8[0] - 1 + 3*8]);
}
- *(uint32_t*)h->mvd_cache [list][scan8[5 ]+1]=
- *(uint32_t*)h->mvd_cache [list][scan8[7 ]+1]=
- *(uint32_t*)h->mvd_cache [list][scan8[13]+1]= //FIXME remove past 3 (init somewhere else)
- *(uint32_t*)h->mvd_cache [list][scan8[4 ]]=
- *(uint32_t*)h->mvd_cache [list][scan8[12]]= 0;
+ AV_ZERO32(h->mvd_cache [list][scan8[5 ]+1]);
+ AV_ZERO32(h->mvd_cache [list][scan8[7 ]+1]);
+ AV_ZERO32(h->mvd_cache [list][scan8[13]+1]); //FIXME remove past 3 (init somewhere else)
+ AV_ZERO32(h->mvd_cache [list][scan8[4 ]]);
+ AV_ZERO32(h->mvd_cache [list][scan8[12]]);
if(h->slice_type_nos == FF_B_TYPE){
fill_rectangle(&h->direct_cache[scan8[0]], 4, 4, 8, MB_TYPE_16x16>>1, 1);
if(IS_DIRECT(top_type)){
- *(uint32_t*)&h->direct_cache[scan8[0] - 1*8]= 0x01010101*(MB_TYPE_DIRECT2>>1);
+ AV_WN32A(&h->direct_cache[scan8[0] - 1*8], 0x01010101*(MB_TYPE_DIRECT2>>1));
}else if(IS_8X8(top_type)){
int b8_xy = h->mb2b8_xy[top_xy] + h->b8_stride;
h->direct_cache[scan8[0] + 0 - 1*8]= h->direct_table[b8_xy];
h->direct_cache[scan8[0] + 2 - 1*8]= h->direct_table[b8_xy + 1];
}else{
- *(uint32_t*)&h->direct_cache[scan8[0] - 1*8]= 0x01010101*(MB_TYPE_16x16>>1);
+ AV_WN32A(&h->direct_cache[scan8[0] - 1*8], 0x01010101*(MB_TYPE_16x16>>1));
}
if(IS_DIRECT(left_type[0]))
@@ -1223,8 +1224,8 @@ static int fill_filter_caches(H264Context *h, int mb_type){
AV_COPY64(&h->non_zero_count_cache[0+8*1], &h->non_zero_count[mb_xy][ 0]);
AV_COPY64(&h->non_zero_count_cache[0+8*2], &h->non_zero_count[mb_xy][ 8]);
- *((uint32_t*)&h->non_zero_count_cache[0+8*5])= *((uint32_t*)&h->non_zero_count[mb_xy][16]);
- *((uint32_t*)&h->non_zero_count_cache[4+8*3])= *((uint32_t*)&h->non_zero_count[mb_xy][20]);
+ AV_COPY32(&h->non_zero_count_cache[0+8*5], &h->non_zero_count[mb_xy][16]);
+ AV_COPY32(&h->non_zero_count_cache[4+8*3], &h->non_zero_count[mb_xy][20]);
AV_COPY64(&h->non_zero_count_cache[0+8*4], &h->non_zero_count[mb_xy][24]);
h->cbp= h->cbp_table[mb_xy];
@@ -1239,21 +1240,21 @@ static int fill_filter_caches(H264Context *h, int mb_type){
if(!USES_LIST(mb_type, list)){
fill_rectangle( h->mv_cache[list][scan8[0]], 4, 4, 8, pack16to32(0,0), 4);
- *(uint32_t*)&h->ref_cache[list][scan8[ 0]] =
- *(uint32_t*)&h->ref_cache[list][scan8[ 2]] =
- *(uint32_t*)&h->ref_cache[list][scan8[ 8]] =
- *(uint32_t*)&h->ref_cache[list][scan8[10]] = ((LIST_NOT_USED)&0xFF)*0x01010101U;
+ AV_WN32A(&h->ref_cache[list][scan8[ 0]], ((LIST_NOT_USED)&0xFF)*0x01010101u);
+ AV_WN32A(&h->ref_cache[list][scan8[ 2]], ((LIST_NOT_USED)&0xFF)*0x01010101u);
+ AV_WN32A(&h->ref_cache[list][scan8[ 8]], ((LIST_NOT_USED)&0xFF)*0x01010101u);
+ AV_WN32A(&h->ref_cache[list][scan8[10]], ((LIST_NOT_USED)&0xFF)*0x01010101u);
continue;
}
ref = &s->current_picture.ref_index[list][h->mb2b8_xy[mb_xy]];
{
int (*ref2frm)[64] = h->ref2frm[ h->slice_num&(MAX_SLICES-1) ][0] + (MB_MBAFF ? 20 : 2);
- *(uint32_t*)&h->ref_cache[list][scan8[ 0]] =
- *(uint32_t*)&h->ref_cache[list][scan8[ 2]] = (pack16to32(ref2frm[list][ref[0]],ref2frm[list][ref[1]])&0x00FF00FF)*0x0101;
+ AV_WN32A(&h->ref_cache[list][scan8[ 0]], (pack16to32(ref2frm[list][ref[0]],ref2frm[list][ref[1]])&0x00FF00FF)*0x0101);
+ AV_WN32A(&h->ref_cache[list][scan8[ 2]], (pack16to32(ref2frm[list][ref[0]],ref2frm[list][ref[1]])&0x00FF00FF)*0x0101);
ref += h->b8_stride;
- *(uint32_t*)&h->ref_cache[list][scan8[ 8]] =
- *(uint32_t*)&h->ref_cache[list][scan8[10]] = (pack16to32(ref2frm[list][ref[0]],ref2frm[list][ref[1]])&0x00FF00FF)*0x0101;
+ AV_WN32A(&h->ref_cache[list][scan8[ 8]], (pack16to32(ref2frm[list][ref[0]],ref2frm[list][ref[1]])&0x00FF00FF)*0x0101);
+ AV_WN32A(&h->ref_cache[list][scan8[10]], (pack16to32(ref2frm[list][ref[0]],ref2frm[list][ref[1]])&0x00FF00FF)*0x0101);
}
b_stride = h->b_stride;
@@ -1277,7 +1278,7 @@ static int fill_filter_caches(H264Context *h, int mb_type){
*/
//FIXME constraint_intra_pred & partitioning & nnz (let us hope this is just a typo in the spec)
if(top_type){
- *(uint32_t*)&h->non_zero_count_cache[4+8*0]= *(uint32_t*)&h->non_zero_count[top_xy][4+3*8];
+ AV_COPY32(&h->non_zero_count_cache[4+8*0], &h->non_zero_count[top_xy][4+3*8]);
}
if(left_type[0]){
@@ -1333,7 +1334,7 @@ static int fill_filter_caches(H264Context *h, int mb_type){
h->ref_cache[list][scan8[0] + 3 - 1*8]= ref2frm[list][s->current_picture.ref_index[list][b8_xy + 1]];
}else{
AV_ZERO128(h->mv_cache[list][scan8[0] + 0 - 1*8]);
- *(uint32_t*)&h->ref_cache[list][scan8[0] + 0 - 1*8]= ((LIST_NOT_USED)&0xFF)*0x01010101U;
+ AV_WN32A(&h->ref_cache[list][scan8[0] + 0 - 1*8], ((LIST_NOT_USED)&0xFF)*0x01010101u);
}
if(!IS_INTERLACED(mb_type^left_type[0])){
@@ -1341,19 +1342,19 @@ static int fill_filter_caches(H264Context *h, int mb_type){
const int b_xy= h->mb2b_xy[left_xy[0]] + 3;
const int b8_xy= h->mb2b8_xy[left_xy[0]] + 1;
int (*ref2frm)[64] = h->ref2frm[ h->slice_table[left_xy[0]]&(MAX_SLICES-1) ][0] + (MB_MBAFF ? 20 : 2);
- *(uint32_t*)h->mv_cache[list][scan8[0] - 1 + 0 ]= *(uint32_t*)s->current_picture.motion_val[list][b_xy + h->b_stride*0];
- *(uint32_t*)h->mv_cache[list][scan8[0] - 1 + 8 ]= *(uint32_t*)s->current_picture.motion_val[list][b_xy + h->b_stride*1];
- *(uint32_t*)h->mv_cache[list][scan8[0] - 1 +16 ]= *(uint32_t*)s->current_picture.motion_val[list][b_xy + h->b_stride*2];
- *(uint32_t*)h->mv_cache[list][scan8[0] - 1 +24 ]= *(uint32_t*)s->current_picture.motion_val[list][b_xy + h->b_stride*3];
+ AV_COPY32(h->mv_cache[list][scan8[0] - 1 + 0 ], s->current_picture.motion_val[list][b_xy + h->b_stride*0]);
+ AV_COPY32(h->mv_cache[list][scan8[0] - 1 + 8 ], s->current_picture.motion_val[list][b_xy + h->b_stride*1]);
+ AV_COPY32(h->mv_cache[list][scan8[0] - 1 +16 ], s->current_picture.motion_val[list][b_xy + h->b_stride*2]);
+ AV_COPY32(h->mv_cache[list][scan8[0] - 1 +24 ], s->current_picture.motion_val[list][b_xy + h->b_stride*3]);
h->ref_cache[list][scan8[0] - 1 + 0 ]=
h->ref_cache[list][scan8[0] - 1 + 8 ]= ref2frm[list][s->current_picture.ref_index[list][b8_xy + h->b8_stride*0]];
h->ref_cache[list][scan8[0] - 1 +16 ]=
h->ref_cache[list][scan8[0] - 1 +24 ]= ref2frm[list][s->current_picture.ref_index[list][b8_xy + h->b8_stride*1]];
}else{
- *(uint32_t*)h->mv_cache [list][scan8[0] - 1 + 0 ]=
- *(uint32_t*)h->mv_cache [list][scan8[0] - 1 + 8 ]=
- *(uint32_t*)h->mv_cache [list][scan8[0] - 1 +16 ]=
- *(uint32_t*)h->mv_cache [list][scan8[0] - 1 +24 ]= 0;
+ AV_ZERO32(h->mv_cache [list][scan8[0] - 1 + 0 ]);
+ AV_ZERO32(h->mv_cache [list][scan8[0] - 1 + 8 ]);
+ AV_ZERO32(h->mv_cache [list][scan8[0] - 1 +16 ]);
+ AV_ZERO32(h->mv_cache [list][scan8[0] - 1 +24 ]);
h->ref_cache[list][scan8[0] - 1 + 0 ]=
h->ref_cache[list][scan8[0] - 1 + 8 ]=
h->ref_cache[list][scan8[0] - 1 + 16 ]=
@@ -1386,8 +1387,8 @@ static inline void write_back_non_zero_count(H264Context *h){
AV_COPY64(&h->non_zero_count[mb_xy][ 0], &h->non_zero_count_cache[0+8*1]);
AV_COPY64(&h->non_zero_count[mb_xy][ 8], &h->non_zero_count_cache[0+8*2]);
- *((uint32_t*)&h->non_zero_count[mb_xy][16]) = *((uint32_t*)&h->non_zero_count_cache[0+8*5]);
- *((uint32_t*)&h->non_zero_count[mb_xy][20]) = *((uint32_t*)&h->non_zero_count_cache[4+8*3]);
+ AV_COPY32(&h->non_zero_count[mb_xy][16], &h->non_zero_count_cache[0+8*5]);
+ AV_COPY32(&h->non_zero_count[mb_xy][20], &h->non_zero_count_cache[4+8*3]);
AV_COPY64(&h->non_zero_count[mb_xy][24], &h->non_zero_count_cache[0+8*4]);
}
@@ -1446,9 +1447,9 @@ static inline void write_back_motion(H264Context *h, int mb_type){
static inline int get_dct8x8_allowed(H264Context *h){
if(h->sps.direct_8x8_inference_flag)
- return !(*(uint64_t*)h->sub_mb_type & ((MB_TYPE_16x8|MB_TYPE_8x16|MB_TYPE_8x8 )*0x0001000100010001ULL));
+ return !(AV_RN64A(h->sub_mb_type) & ((MB_TYPE_16x8|MB_TYPE_8x16|MB_TYPE_8x8 )*0x0001000100010001ULL));
else
- return !(*(uint64_t*)h->sub_mb_type & ((MB_TYPE_16x8|MB_TYPE_8x16|MB_TYPE_8x8|MB_TYPE_DIRECT2)*0x0001000100010001ULL));
+ return !(AV_RN64A(h->sub_mb_type) & ((MB_TYPE_16x8|MB_TYPE_8x16|MB_TYPE_8x8|MB_TYPE_DIRECT2)*0x0001000100010001ULL));
}
/**
diff --git a/libavcodec/h264_direct.c b/libavcodec/h264_direct.c
index e461fea06f..8977be114f 100644
--- a/libavcodec/h264_direct.c
+++ b/libavcodec/h264_direct.c
@@ -183,11 +183,11 @@ static void pred_spatial_direct_motion(H264Context * const h, int *mb_type){
}else {
assert(match_count==1);
if(left_ref==ref[list]){
- mv[list]= *(uint32_t*)A;
+ mv[list]= AV_RN32A(A);
}else if(top_ref==ref[list]){
- mv[list]= *(uint32_t*)B;
+ mv[list]= AV_RN32A(B);
}else{
- mv[list]= *(uint32_t*)C;
+ mv[list]= AV_RN32A(C);
}
}
}else{
@@ -362,9 +362,9 @@ single_col:
const int16_t *mv_col = l1mv[x8*2 + (i4&1) + (y8*2 + (i4>>1))*b4_stride];
if(FFABS(mv_col[0]) <= 1 && FFABS(mv_col[1]) <= 1){
if(ref[0] == 0)
- *(uint32_t*)h->mv_cache[0][scan8[i8*4+i4]] = 0;
+ AV_ZERO32(h->mv_cache[0][scan8[i8*4+i4]]);
if(ref[1] == 0)
- *(uint32_t*)h->mv_cache[1][scan8[i8*4+i4]] = 0;
+ AV_ZERO32(h->mv_cache[1][scan8[i8*4+i4]]);
m++;
}
}
@@ -571,8 +571,8 @@ single_col:
int16_t *mv_l0 = h->mv_cache[0][scan8[i8*4+i4]];
mv_l0[0] = (scale * mv_col[0] + 128) >> 8;
mv_l0[1] = (scale * mv_col[1] + 128) >> 8;
- *(uint32_t*)h->mv_cache[1][scan8[i8*4+i4]] =
- pack16to32(mv_l0[0]-mv_col[0],mv_l0[1]-mv_col[1]);
+ AV_WN32A(h->mv_cache[1][scan8[i8*4+i4]],
+ pack16to32(mv_l0[0]-mv_col[0],mv_l0[1]-mv_col[1]));
}
}
}
diff --git a/libavcodec/h264_loopfilter.c b/libavcodec/h264_loopfilter.c
index 1e2b044056..7855ba5aaa 100644
--- a/libavcodec/h264_loopfilter.c
+++ b/libavcodec/h264_loopfilter.c
@@ -25,6 +25,7 @@
* @author Michael Niedermayer <michaelni@gmx.at>
*/
+#include "libavutil/intreadwrite.h"
#include "internal.h"
#include "dsputil.h"
#include "avcodec.h"
@@ -368,11 +369,13 @@ void ff_h264_filter_mb_fast( H264Context *h, int mb_x, int mb_y, uint8_t *img_y,
return;
} else {
LOCAL_ALIGNED_8(int16_t, bS, [2], [4][4]);
- uint64_t (*bSv)[4] = (uint64_t(*)[4])bS;
int edges;
if( IS_8x8DCT(mb_type) && (h->cbp&7) == 7 ) {
edges = 4;
- bSv[0][0] = bSv[0][2] = bSv[1][0] = bSv[1][2] = 0x0002000200020002ULL;
+ AV_WN64A(bS[0][0], 0x0002000200020002ULL);
+ AV_WN64A(bS[0][2], 0x0002000200020002ULL);
+ AV_WN64A(bS[1][0], 0x0002000200020002ULL);
+ AV_WN64A(bS[1][2], 0x0002000200020002ULL);
} else {
int mask_edge1 = (3*(((5*mb_type)>>5)&1)) | (mb_type>>4); //(mb_type & (MB_TYPE_16x16 | MB_TYPE_8x16)) ? 3 : (mb_type & MB_TYPE_16x8) ? 1 : 0;
int mask_edge0 = 3*((mask_edge1>>1) & ((5*left_type)>>5)&1); // (mb_type & (MB_TYPE_16x16 | MB_TYPE_8x16)) && (h->left_type[0] & (MB_TYPE_16x16 | MB_TYPE_8x16)) ? 3 : 0;
@@ -382,12 +385,12 @@ void ff_h264_filter_mb_fast( H264Context *h, int mb_x, int mb_y, uint8_t *img_y,
h->list_count==2, edges, step, mask_edge0, mask_edge1, FIELD_PICTURE);
}
if( IS_INTRA(left_type) )
- bSv[0][0] = 0x0004000400040004ULL;
+ AV_WN64A(bS[0][0], 0x0004000400040004ULL);
if( IS_INTRA(h->top_type) )
- bSv[1][0] = FIELD_PICTURE ? 0x0003000300030003ULL : 0x0004000400040004ULL;
+ AV_WN64A(bS[1][0], FIELD_PICTURE ? 0x0003000300030003ULL : 0x0004000400040004ULL);
#define FILTER(hv,dir,edge)\
- if(bSv[dir][edge]) {\
+ if(AV_RN64A(bS[dir][edge])) { \
filter_mb_edge##hv( &img_y[4*edge*(dir?linesize:1)], linesize, bS[dir][edge], edge ? qp : qp##dir, h );\
if(!(edge&1)) {\
filter_mb_edgec##hv( &img_cb[2*edge*(dir?uvlinesize:1)], uvlinesize, bS[dir][edge], edge ? qpc : qpc##dir, h );\
@@ -477,7 +480,7 @@ static av_always_inline void filter_mb_dir(H264Context *h, int mb_x, int mb_y, u
DECLARE_ALIGNED_8(int16_t, bS)[4];
int qp;
if( IS_INTRA(mb_type|s->current_picture.mb_type[mbn_xy]) ) {
- *(uint64_t*)bS= 0x0003000300030003ULL;
+ AV_WN64A(bS, 0x0003000300030003ULL);
} else {
if(!CABAC && IS_8x8DCT(s->current_picture.mb_type[mbn_xy])){
bS[0]= 1+((h->cbp_table[mbn_xy] & 4)||h->non_zero_count_cache[scan8[0]+0]);
@@ -508,17 +511,17 @@ static av_always_inline void filter_mb_dir(H264Context *h, int mb_x, int mb_y, u
int qp;
if( IS_INTRA(mb_type|mbm_type)) {
- *(uint64_t*)bS= 0x0003000300030003ULL;
+ AV_WN64A(bS, 0x0003000300030003ULL);
if ( (!IS_INTERLACED(mb_type|mbm_type))
|| ((FRAME_MBAFF || (s->picture_structure != PICT_FRAME)) && (dir == 0))
)
- *(uint64_t*)bS= 0x0004000400040004ULL;
+ AV_WN64A(bS, 0x0004000400040004ULL);
} else {
int i;
int mv_done;
if( dir && FRAME_MBAFF && IS_INTERLACED(mb_type ^ mbm_type)) {
- *(uint64_t*)bS= 0x0001000100010001ULL;
+ AV_WN64A(bS, 0x0001000100010001ULL);
mv_done = 1;
}
else if( mask_par0 && ((mbm_type & (MB_TYPE_16x16 | (MB_TYPE_8x16 >> dir)))) ) {
@@ -588,13 +591,13 @@ static av_always_inline void filter_mb_dir(H264Context *h, int mb_x, int mb_y, u
continue;
if( IS_INTRA(mb_type)) {
- *(uint64_t*)bS= 0x0003000300030003ULL;
+ AV_WN64A(bS, 0x0003000300030003ULL);
} else {
int i;
int mv_done;
if( edge & mask_edge ) {
- *(uint64_t*)bS= 0;
+ AV_ZERO64(bS);
mv_done = 1;
}
else if( mask_par0 ) {
@@ -674,10 +677,10 @@ void ff_h264_filter_mb( H264Context *h, int mb_x, int mb_y, uint8_t *img_y, uint
int i;
first_vertical_edge_done = 1;
- if( IS_INTRA(mb_type) )
- *(uint64_t*)&bS[0]=
- *(uint64_t*)&bS[4]= 0x0004000400040004ULL;
- else {
+ if( IS_INTRA(mb_type) ) {
+ AV_WN64A(&bS[0], 0x0004000400040004ULL);
+ AV_WN64A(&bS[4], 0x0004000400040004ULL);
+ } else {
static const uint8_t offset[2][2][8]={
{
{7+8*0, 7+8*0, 7+8*0, 7+8*0, 7+8*1, 7+8*1, 7+8*1, 7+8*1},
diff --git a/libavcodec/h264_mvpred.h b/libavcodec/h264_mvpred.h
index 6404ab0bfc..4a6780a75e 100644
--- a/libavcodec/h264_mvpred.h
+++ b/libavcodec/h264_mvpred.h
@@ -58,7 +58,7 @@ static inline int fetch_diagonal_mv(H264Context *h, const int16_t **C, int i, in
&& h->ref_cache[list][scan8[0]-1] != PART_NOT_AVAILABLE){
const uint32_t *mb_types = s->current_picture_ptr->mb_type;
const int16_t *mv;
- *(uint32_t*)h->mv_cache[list][scan8[0]-2] = 0;
+ AV_ZERO32(h->mv_cache[list][scan8[0]-2]);
*C = h->mv_cache[list][scan8[0]-2];
if(!MB_FIELD
@@ -220,8 +220,8 @@ static inline void pred_pskip_motion(H264Context * const h, int * const mx, int
tprintf(h->s.avctx, "pred_pskip: (%d) (%d) at %2d %2d\n", top_ref, left_ref, h->s.mb_x, h->s.mb_y);
if(top_ref == PART_NOT_AVAILABLE || left_ref == PART_NOT_AVAILABLE
- || !( top_ref | *(uint32_t*)h->mv_cache[0][ scan8[0] - 8 ])
- || !(left_ref | *(uint32_t*)h->mv_cache[0][ scan8[0] - 1 ])){
+ || !( top_ref | AV_RN32A(h->mv_cache[0][ scan8[0] - 8 ]))
+ || !(left_ref | AV_RN32A(h->mv_cache[0][ scan8[0] - 1 ]))){
*mx = *my = 0;
return;