diff options
author | Mans Rullgard <mans@mansr.com> | 2011-01-15 22:09:35 +0000 |
---|---|---|
committer | Mans Rullgard <mans@mansr.com> | 2011-01-18 20:48:24 +0000 |
commit | ef4a65149db95acc7271a425a2cd0d2259740c84 (patch) | |
tree | 1e6afea7b341fa246d491ed8d7dcf1641ee89aba /libavcodec/x86/motion_est_mmx.c | |
parent | ac3c9d016917494e9cecb12f6e5a42fbd2e7adc1 (diff) | |
download | ffmpeg-ef4a65149db95acc7271a425a2cd0d2259740c84.tar.gz |
Replace ASMALIGN() with .p2align
This macro has unconditionally used .p2align for a long time and
serves no useful purpose.
Diffstat (limited to 'libavcodec/x86/motion_est_mmx.c')
-rw-r--r-- | libavcodec/x86/motion_est_mmx.c | 16 |
1 files changed, 8 insertions, 8 deletions
diff --git a/libavcodec/x86/motion_est_mmx.c b/libavcodec/x86/motion_est_mmx.c index e1314aef4b..fefef41058 100644 --- a/libavcodec/x86/motion_est_mmx.c +++ b/libavcodec/x86/motion_est_mmx.c @@ -38,7 +38,7 @@ static inline void sad8_1_mmx(uint8_t *blk1, uint8_t *blk2, int stride, int h) { x86_reg len= -(stride*h); __asm__ volatile( - ASMALIGN(4) + ".p2align 4 \n\t" "1: \n\t" "movq (%1, %%"REG_a"), %%mm0 \n\t" "movq (%2, %%"REG_a"), %%mm2 \n\t" @@ -73,7 +73,7 @@ static inline void sad8_1_mmx(uint8_t *blk1, uint8_t *blk2, int stride, int h) static inline void sad8_1_mmx2(uint8_t *blk1, uint8_t *blk2, int stride, int h) { __asm__ volatile( - ASMALIGN(4) + ".p2align 4 \n\t" "1: \n\t" "movq (%1), %%mm0 \n\t" "movq (%1, %3), %%mm1 \n\t" @@ -95,7 +95,7 @@ static int sad16_sse2(void *v, uint8_t *blk2, uint8_t *blk1, int stride, int h) int ret; __asm__ volatile( "pxor %%xmm2, %%xmm2 \n\t" - ASMALIGN(4) + ".p2align 4 \n\t" "1: \n\t" "movdqu (%1), %%xmm0 \n\t" "movdqu (%1, %4), %%xmm1 \n\t" @@ -119,7 +119,7 @@ static int sad16_sse2(void *v, uint8_t *blk2, uint8_t *blk1, int stride, int h) static inline void sad8_x2a_mmx2(uint8_t *blk1, uint8_t *blk2, int stride, int h) { __asm__ volatile( - ASMALIGN(4) + ".p2align 4 \n\t" "1: \n\t" "movq (%1), %%mm0 \n\t" "movq (%1, %3), %%mm1 \n\t" @@ -143,7 +143,7 @@ static inline void sad8_y2a_mmx2(uint8_t *blk1, uint8_t *blk2, int stride, int h __asm__ volatile( "movq (%1), %%mm0 \n\t" "add %3, %1 \n\t" - ASMALIGN(4) + ".p2align 4 \n\t" "1: \n\t" "movq (%1), %%mm1 \n\t" "movq (%1, %3), %%mm2 \n\t" @@ -170,7 +170,7 @@ static inline void sad8_4_mmx2(uint8_t *blk1, uint8_t *blk2, int stride, int h) "movq (%1), %%mm0 \n\t" "pavgb 1(%1), %%mm0 \n\t" "add %3, %1 \n\t" - ASMALIGN(4) + ".p2align 4 \n\t" "1: \n\t" "movq (%1), %%mm1 \n\t" "movq (%1,%3), %%mm2 \n\t" @@ -197,7 +197,7 @@ static inline void sad8_2_mmx(uint8_t *blk1a, uint8_t *blk1b, uint8_t *blk2, int { x86_reg len= -(stride*h); __asm__ volatile( - ASMALIGN(4) + ".p2align 4 \n\t" "1: \n\t" "movq (%1, %%"REG_a"), %%mm0 \n\t" "movq (%2, %%"REG_a"), %%mm1 \n\t" @@ -245,7 +245,7 @@ static inline void sad8_4_mmx(uint8_t *blk1, uint8_t *blk2, int stride, int h) "punpckhbw %%mm7, %%mm3 \n\t" "paddw %%mm2, %%mm0 \n\t" "paddw %%mm3, %%mm1 \n\t" - ASMALIGN(4) + ".p2align 4 \n\t" "1: \n\t" "movq (%2, %%"REG_a"), %%mm2 \n\t" "movq 1(%2, %%"REG_a"), %%mm4 \n\t" |