diff options
author | Mans Rullgard <mans@mansr.com> | 2011-01-15 22:09:35 +0000 |
---|---|---|
committer | Mans Rullgard <mans@mansr.com> | 2011-01-18 20:48:24 +0000 |
commit | ef4a65149db95acc7271a425a2cd0d2259740c84 (patch) | |
tree | 1e6afea7b341fa246d491ed8d7dcf1641ee89aba /libavcodec | |
parent | ac3c9d016917494e9cecb12f6e5a42fbd2e7adc1 (diff) | |
download | ffmpeg-ef4a65149db95acc7271a425a2cd0d2259740c84.tar.gz |
Replace ASMALIGN() with .p2align
This macro has unconditionally used .p2align for a long time and
serves no useful purpose.
Diffstat (limited to 'libavcodec')
-rw-r--r-- | libavcodec/x86/dsputil_mmx.c | 8 | ||||
-rw-r--r-- | libavcodec/x86/dsputil_mmx_avg_template.c | 2 | ||||
-rw-r--r-- | libavcodec/x86/dsputil_mmx_qns_template.c | 4 | ||||
-rw-r--r-- | libavcodec/x86/dsputil_mmx_rnd_template.c | 16 | ||||
-rw-r--r-- | libavcodec/x86/dsputilenc_mmx.c | 4 | ||||
-rw-r--r-- | libavcodec/x86/idct_sse2_xvid.c | 2 | ||||
-rw-r--r-- | libavcodec/x86/motion_est_mmx.c | 16 | ||||
-rw-r--r-- | libavcodec/x86/mpegvideo_mmx.c | 12 | ||||
-rw-r--r-- | libavcodec/x86/mpegvideo_mmx_template.c | 4 | ||||
-rw-r--r-- | libavcodec/x86/simple_idct_mmx.c | 14 | ||||
-rw-r--r-- | libavcodec/x86/vc1dsp_mmx.c | 6 |
11 files changed, 44 insertions, 44 deletions
diff --git a/libavcodec/x86/dsputil_mmx.c b/libavcodec/x86/dsputil_mmx.c index d8edb0f3f0..825149e4a5 100644 --- a/libavcodec/x86/dsputil_mmx.c +++ b/libavcodec/x86/dsputil_mmx.c @@ -81,7 +81,7 @@ DECLARE_ALIGNED(16, const xmm_reg, ff_pb_FE ) = {0xFEFEFEFEFEFEFEFEULL, 0xFEFEF DECLARE_ALIGNED(16, const double, ff_pd_1)[2] = { 1.0, 1.0 }; DECLARE_ALIGNED(16, const double, ff_pd_2)[2] = { 2.0, 2.0 }; -#define JUMPALIGN() __asm__ volatile (ASMALIGN(3)::) +#define JUMPALIGN() __asm__ volatile (".p2align 3"::) #define MOVQ_ZERO(regd) __asm__ volatile ("pxor %%" #regd ", %%" #regd ::) #define MOVQ_BFE(regd) \ @@ -368,7 +368,7 @@ static void put_pixels4_mmx(uint8_t *block, const uint8_t *pixels, int line_size { __asm__ volatile( "lea (%3, %3), %%"REG_a" \n\t" - ASMALIGN(3) + ".p2align 3 \n\t" "1: \n\t" "movd (%1), %%mm0 \n\t" "movd (%1, %3), %%mm1 \n\t" @@ -394,7 +394,7 @@ static void put_pixels8_mmx(uint8_t *block, const uint8_t *pixels, int line_size { __asm__ volatile( "lea (%3, %3), %%"REG_a" \n\t" - ASMALIGN(3) + ".p2align 3 \n\t" "1: \n\t" "movq (%1), %%mm0 \n\t" "movq (%1, %3), %%mm1 \n\t" @@ -420,7 +420,7 @@ static void put_pixels16_mmx(uint8_t *block, const uint8_t *pixels, int line_siz { __asm__ volatile( "lea (%3, %3), %%"REG_a" \n\t" - ASMALIGN(3) + ".p2align 3 \n\t" "1: \n\t" "movq (%1), %%mm0 \n\t" "movq 8(%1), %%mm4 \n\t" diff --git a/libavcodec/x86/dsputil_mmx_avg_template.c b/libavcodec/x86/dsputil_mmx_avg_template.c index 69575e3ae7..6f768595c0 100644 --- a/libavcodec/x86/dsputil_mmx_avg_template.c +++ b/libavcodec/x86/dsputil_mmx_avg_template.c @@ -838,7 +838,7 @@ static void DEF(avg_pixels8_xy2)(uint8_t *block, const uint8_t *pixels, int line "lea (%3, %3), %%"REG_a" \n\t" "movq (%1), %%mm0 \n\t" PAVGB" 1(%1), %%mm0 \n\t" - ASMALIGN(3) + ".p2align 3 \n\t" "1: \n\t" "movq (%1, %%"REG_a"), %%mm2 \n\t" "movq (%1, %3), %%mm1 \n\t" diff --git a/libavcodec/x86/dsputil_mmx_qns_template.c b/libavcodec/x86/dsputil_mmx_qns_template.c index d2dbfc5a58..77a41b9dcb 100644 --- a/libavcodec/x86/dsputil_mmx_qns_template.c +++ b/libavcodec/x86/dsputil_mmx_qns_template.c @@ -37,7 +37,7 @@ static int DEF(try_8x8basis)(int16_t rem[64], int16_t weight[64], int16_t basis[ "movd %4, %%mm5 \n\t" "punpcklwd %%mm5, %%mm5 \n\t" "punpcklwd %%mm5, %%mm5 \n\t" - ASMALIGN(4) + ".p2align 4 \n\t" "1: \n\t" "movq (%1, %0), %%mm0 \n\t" "movq 8(%1, %0), %%mm1 \n\t" @@ -77,7 +77,7 @@ static void DEF(add_8x8basis)(int16_t rem[64], int16_t basis[64], int scale) "movd %3, %%mm5 \n\t" "punpcklwd %%mm5, %%mm5 \n\t" "punpcklwd %%mm5, %%mm5 \n\t" - ASMALIGN(4) + ".p2align 4 \n\t" "1: \n\t" "movq (%1, %0), %%mm0 \n\t" "movq 8(%1, %0), %%mm1 \n\t" diff --git a/libavcodec/x86/dsputil_mmx_rnd_template.c b/libavcodec/x86/dsputil_mmx_rnd_template.c index 2fc1756f60..e4c91381fa 100644 --- a/libavcodec/x86/dsputil_mmx_rnd_template.c +++ b/libavcodec/x86/dsputil_mmx_rnd_template.c @@ -30,7 +30,7 @@ static void DEF(put, pixels8_x2)(uint8_t *block, const uint8_t *pixels, int line MOVQ_BFE(mm6); __asm__ volatile( "lea (%3, %3), %%"REG_a" \n\t" - ASMALIGN(3) + ".p2align 3 \n\t" "1: \n\t" "movq (%1), %%mm0 \n\t" "movq 1(%1), %%mm1 \n\t" @@ -71,7 +71,7 @@ static void av_unused DEF(put, pixels8_l2)(uint8_t *dst, uint8_t *src1, uint8_t "movq %%mm4, (%3) \n\t" "add %5, %3 \n\t" "decl %0 \n\t" - ASMALIGN(3) + ".p2align 3 \n\t" "1: \n\t" "movq (%1), %%mm0 \n\t" "movq (%2), %%mm1 \n\t" @@ -112,7 +112,7 @@ static void DEF(put, pixels16_x2)(uint8_t *block, const uint8_t *pixels, int lin MOVQ_BFE(mm6); __asm__ volatile( "lea (%3, %3), %%"REG_a" \n\t" - ASMALIGN(3) + ".p2align 3 \n\t" "1: \n\t" "movq (%1), %%mm0 \n\t" "movq 1(%1), %%mm1 \n\t" @@ -170,7 +170,7 @@ static void av_unused DEF(put, pixels16_l2)(uint8_t *dst, uint8_t *src1, uint8_t "movq %%mm5, 8(%3) \n\t" "add %5, %3 \n\t" "decl %0 \n\t" - ASMALIGN(3) + ".p2align 3 \n\t" "1: \n\t" "movq (%1), %%mm0 \n\t" "movq (%2), %%mm1 \n\t" @@ -208,7 +208,7 @@ static void DEF(put, pixels8_y2)(uint8_t *block, const uint8_t *pixels, int line __asm__ volatile( "lea (%3, %3), %%"REG_a" \n\t" "movq (%1), %%mm0 \n\t" - ASMALIGN(3) + ".p2align 3 \n\t" "1: \n\t" "movq (%1, %3), %%mm1 \n\t" "movq (%1, %%"REG_a"),%%mm2 \n\t" @@ -248,7 +248,7 @@ static void DEF(put, pixels8_xy2)(uint8_t *block, const uint8_t *pixels, int lin "paddusw %%mm1, %%mm5 \n\t" "xor %%"REG_a", %%"REG_a" \n\t" "add %3, %1 \n\t" - ASMALIGN(3) + ".p2align 3 \n\t" "1: \n\t" "movq (%1, %%"REG_a"), %%mm0 \n\t" "movq 1(%1, %%"REG_a"), %%mm2 \n\t" @@ -460,7 +460,7 @@ static void DEF(avg, pixels8_y2)(uint8_t *block, const uint8_t *pixels, int line __asm__ volatile( "lea (%3, %3), %%"REG_a" \n\t" "movq (%1), %%mm0 \n\t" - ASMALIGN(3) + ".p2align 3 \n\t" "1: \n\t" "movq (%1, %3), %%mm1 \n\t" "movq (%1, %%"REG_a"), %%mm2 \n\t" @@ -511,7 +511,7 @@ static void DEF(avg, pixels8_xy2)(uint8_t *block, const uint8_t *pixels, int lin "paddusw %%mm1, %%mm5 \n\t" "xor %%"REG_a", %%"REG_a" \n\t" "add %3, %1 \n\t" - ASMALIGN(3) + ".p2align 3 \n\t" "1: \n\t" "movq (%1, %%"REG_a"), %%mm0 \n\t" "movq 1(%1, %%"REG_a"), %%mm2 \n\t" diff --git a/libavcodec/x86/dsputilenc_mmx.c b/libavcodec/x86/dsputilenc_mmx.c index f02d1cad75..cb8080aa0a 100644 --- a/libavcodec/x86/dsputilenc_mmx.c +++ b/libavcodec/x86/dsputilenc_mmx.c @@ -35,7 +35,7 @@ static void get_pixels_mmx(DCTELEM *block, const uint8_t *pixels, int line_size) __asm__ volatile( "mov $-128, %%"REG_a" \n\t" "pxor %%mm7, %%mm7 \n\t" - ASMALIGN(4) + ".p2align 4 \n\t" "1: \n\t" "movq (%0), %%mm0 \n\t" "movq (%0, %2), %%mm2 \n\t" @@ -97,7 +97,7 @@ static inline void diff_pixels_mmx(DCTELEM *block, const uint8_t *s1, const uint __asm__ volatile( "pxor %%mm7, %%mm7 \n\t" "mov $-128, %%"REG_a" \n\t" - ASMALIGN(4) + ".p2align 4 \n\t" "1: \n\t" "movq (%0), %%mm0 \n\t" "movq (%1), %%mm2 \n\t" diff --git a/libavcodec/x86/idct_sse2_xvid.c b/libavcodec/x86/idct_sse2_xvid.c index edf65813f0..5185d61e54 100644 --- a/libavcodec/x86/idct_sse2_xvid.c +++ b/libavcodec/x86/idct_sse2_xvid.c @@ -356,7 +356,7 @@ inline void ff_idct_xvid_sse2(short *block) TEST_TWO_ROWS("5*16(%0)", "6*16(%0)", "%%eax", "%%edx", CLEAR_ODD(ROW5), CLEAR_EVEN(ROW6)) TEST_ONE_ROW("7*16(%0)", "%%esi", CLEAR_ODD(ROW7)) iLLM_HEAD - ASMALIGN(4) + ".p2align 4 \n\t" JNZ("%%ecx", "2f") JNZ("%%eax", "3f") JNZ("%%edx", "4f") diff --git a/libavcodec/x86/motion_est_mmx.c b/libavcodec/x86/motion_est_mmx.c index e1314aef4b..fefef41058 100644 --- a/libavcodec/x86/motion_est_mmx.c +++ b/libavcodec/x86/motion_est_mmx.c @@ -38,7 +38,7 @@ static inline void sad8_1_mmx(uint8_t *blk1, uint8_t *blk2, int stride, int h) { x86_reg len= -(stride*h); __asm__ volatile( - ASMALIGN(4) + ".p2align 4 \n\t" "1: \n\t" "movq (%1, %%"REG_a"), %%mm0 \n\t" "movq (%2, %%"REG_a"), %%mm2 \n\t" @@ -73,7 +73,7 @@ static inline void sad8_1_mmx(uint8_t *blk1, uint8_t *blk2, int stride, int h) static inline void sad8_1_mmx2(uint8_t *blk1, uint8_t *blk2, int stride, int h) { __asm__ volatile( - ASMALIGN(4) + ".p2align 4 \n\t" "1: \n\t" "movq (%1), %%mm0 \n\t" "movq (%1, %3), %%mm1 \n\t" @@ -95,7 +95,7 @@ static int sad16_sse2(void *v, uint8_t *blk2, uint8_t *blk1, int stride, int h) int ret; __asm__ volatile( "pxor %%xmm2, %%xmm2 \n\t" - ASMALIGN(4) + ".p2align 4 \n\t" "1: \n\t" "movdqu (%1), %%xmm0 \n\t" "movdqu (%1, %4), %%xmm1 \n\t" @@ -119,7 +119,7 @@ static int sad16_sse2(void *v, uint8_t *blk2, uint8_t *blk1, int stride, int h) static inline void sad8_x2a_mmx2(uint8_t *blk1, uint8_t *blk2, int stride, int h) { __asm__ volatile( - ASMALIGN(4) + ".p2align 4 \n\t" "1: \n\t" "movq (%1), %%mm0 \n\t" "movq (%1, %3), %%mm1 \n\t" @@ -143,7 +143,7 @@ static inline void sad8_y2a_mmx2(uint8_t *blk1, uint8_t *blk2, int stride, int h __asm__ volatile( "movq (%1), %%mm0 \n\t" "add %3, %1 \n\t" - ASMALIGN(4) + ".p2align 4 \n\t" "1: \n\t" "movq (%1), %%mm1 \n\t" "movq (%1, %3), %%mm2 \n\t" @@ -170,7 +170,7 @@ static inline void sad8_4_mmx2(uint8_t *blk1, uint8_t *blk2, int stride, int h) "movq (%1), %%mm0 \n\t" "pavgb 1(%1), %%mm0 \n\t" "add %3, %1 \n\t" - ASMALIGN(4) + ".p2align 4 \n\t" "1: \n\t" "movq (%1), %%mm1 \n\t" "movq (%1,%3), %%mm2 \n\t" @@ -197,7 +197,7 @@ static inline void sad8_2_mmx(uint8_t *blk1a, uint8_t *blk1b, uint8_t *blk2, int { x86_reg len= -(stride*h); __asm__ volatile( - ASMALIGN(4) + ".p2align 4 \n\t" "1: \n\t" "movq (%1, %%"REG_a"), %%mm0 \n\t" "movq (%2, %%"REG_a"), %%mm1 \n\t" @@ -245,7 +245,7 @@ static inline void sad8_4_mmx(uint8_t *blk1, uint8_t *blk2, int stride, int h) "punpckhbw %%mm7, %%mm3 \n\t" "paddw %%mm2, %%mm0 \n\t" "paddw %%mm3, %%mm1 \n\t" - ASMALIGN(4) + ".p2align 4 \n\t" "1: \n\t" "movq (%2, %%"REG_a"), %%mm2 \n\t" "movq 1(%2, %%"REG_a"), %%mm4 \n\t" diff --git a/libavcodec/x86/mpegvideo_mmx.c b/libavcodec/x86/mpegvideo_mmx.c index a32e35b6b7..3b8513d3f0 100644 --- a/libavcodec/x86/mpegvideo_mmx.c +++ b/libavcodec/x86/mpegvideo_mmx.c @@ -66,7 +66,7 @@ __asm__ volatile( "packssdw %%mm5, %%mm5 \n\t" "psubw %%mm5, %%mm7 \n\t" "pxor %%mm4, %%mm4 \n\t" - ASMALIGN(4) + ".p2align 4 \n\t" "1: \n\t" "movq (%0, %3), %%mm0 \n\t" "movq 8(%0, %3), %%mm1 \n\t" @@ -129,7 +129,7 @@ __asm__ volatile( "packssdw %%mm5, %%mm5 \n\t" "psubw %%mm5, %%mm7 \n\t" "pxor %%mm4, %%mm4 \n\t" - ASMALIGN(4) + ".p2align 4 \n\t" "1: \n\t" "movq (%0, %3), %%mm0 \n\t" "movq 8(%0, %3), %%mm1 \n\t" @@ -222,7 +222,7 @@ __asm__ volatile( "packssdw %%mm6, %%mm6 \n\t" "packssdw %%mm6, %%mm6 \n\t" "mov %3, %%"REG_a" \n\t" - ASMALIGN(4) + ".p2align 4 \n\t" "1: \n\t" "movq (%0, %%"REG_a"), %%mm0 \n\t" "movq 8(%0, %%"REG_a"), %%mm1 \n\t" @@ -285,7 +285,7 @@ __asm__ volatile( "packssdw %%mm6, %%mm6 \n\t" "packssdw %%mm6, %%mm6 \n\t" "mov %3, %%"REG_a" \n\t" - ASMALIGN(4) + ".p2align 4 \n\t" "1: \n\t" "movq (%0, %%"REG_a"), %%mm0 \n\t" "movq 8(%0, %%"REG_a"), %%mm1 \n\t" @@ -357,7 +357,7 @@ __asm__ volatile( "packssdw %%mm6, %%mm6 \n\t" "packssdw %%mm6, %%mm6 \n\t" "mov %3, %%"REG_a" \n\t" - ASMALIGN(4) + ".p2align 4 \n\t" "1: \n\t" "movq (%0, %%"REG_a"), %%mm0 \n\t" "movq 8(%0, %%"REG_a"), %%mm1 \n\t" @@ -418,7 +418,7 @@ __asm__ volatile( "packssdw %%mm6, %%mm6 \n\t" "packssdw %%mm6, %%mm6 \n\t" "mov %3, %%"REG_a" \n\t" - ASMALIGN(4) + ".p2align 4 \n\t" "1: \n\t" "movq (%0, %%"REG_a"), %%mm0 \n\t" "movq 8(%0, %%"REG_a"), %%mm1 \n\t" diff --git a/libavcodec/x86/mpegvideo_mmx_template.c b/libavcodec/x86/mpegvideo_mmx_template.c index b292c9d170..0df4600a11 100644 --- a/libavcodec/x86/mpegvideo_mmx_template.c +++ b/libavcodec/x86/mpegvideo_mmx_template.c @@ -158,7 +158,7 @@ static int RENAME(dct_quantize)(MpegEncContext *s, "pxor "MM"6, "MM"6 \n\t" "psubw (%3), "MM"6 \n\t" // -bias[0] "mov $-128, %%"REG_a" \n\t" - ASMALIGN(4) + ".p2align 4 \n\t" "1: \n\t" MOVQ" (%1, %%"REG_a"), "MM"0 \n\t" // block[i] SAVE_SIGN(MM"1", MM"0") // ABS(block[i]) @@ -190,7 +190,7 @@ static int RENAME(dct_quantize)(MpegEncContext *s, "pxor "MM"7, "MM"7 \n\t" // 0 "pxor "MM"4, "MM"4 \n\t" // 0 "mov $-128, %%"REG_a" \n\t" - ASMALIGN(4) + ".p2align 4 \n\t" "1: \n\t" MOVQ" (%1, %%"REG_a"), "MM"0 \n\t" // block[i] SAVE_SIGN(MM"1", MM"0") // ABS(block[i]) diff --git a/libavcodec/x86/simple_idct_mmx.c b/libavcodec/x86/simple_idct_mmx.c index fb6a5ff369..92cc18465c 100644 --- a/libavcodec/x86/simple_idct_mmx.c +++ b/libavcodec/x86/simple_idct_mmx.c @@ -789,7 +789,7 @@ IDCT( 16(%1), 80(%1), 48(%1), 112(%1), 8(%0), 20) IDCT( 24(%1), 88(%1), 56(%1), 120(%1), 12(%0), 20) "jmp 9f \n\t" - "#" ASMALIGN(4) \ + "# .p2align 4 \n\t"\ "4: \n\t" Z_COND_IDCT( 64(%0), 72(%0), 80(%0), 88(%0), 64(%1),paddd (%2), 11, 6f) Z_COND_IDCT( 96(%0),104(%0),112(%0),120(%0), 96(%1),paddd (%2), 11, 5f) @@ -864,7 +864,7 @@ IDCT( 16(%1), 80(%1), 48(%1), 112(%1), 8(%0), 20) IDCT( 24(%1), 88(%1), 56(%1), 120(%1), 12(%0), 20) "jmp 9f \n\t" - "#" ASMALIGN(4) \ + "# .p2align 4 \n\t"\ "6: \n\t" Z_COND_IDCT( 96(%0),104(%0),112(%0),120(%0), 96(%1),paddd (%2), 11, 7f) @@ -930,7 +930,7 @@ IDCT( 16(%1), 80(%1), 48(%1), 112(%1), 8(%0), 20) IDCT( 24(%1), 88(%1), 56(%1), 120(%1), 12(%0), 20) "jmp 9f \n\t" - "#" ASMALIGN(4) \ + "# .p2align 4 \n\t"\ "2: \n\t" Z_COND_IDCT( 96(%0),104(%0),112(%0),120(%0), 96(%1),paddd (%2), 11, 3f) @@ -1007,7 +1007,7 @@ IDCT( 16(%1), 80(%1), 48(%1), 112(%1), 8(%0), 20) IDCT( 24(%1), 88(%1), 56(%1), 120(%1), 12(%0), 20) "jmp 9f \n\t" - "#" ASMALIGN(4) \ + "# .p2align 4 \n\t"\ "3: \n\t" #undef IDCT #define IDCT(src0, src4, src1, src5, dst, shift) \ @@ -1071,7 +1071,7 @@ IDCT( 16(%1), 80(%1), 48(%1), 112(%1), 8(%0), 20) IDCT( 24(%1), 88(%1), 56(%1), 120(%1), 12(%0), 20) "jmp 9f \n\t" - "#" ASMALIGN(4) \ + "# .p2align 4 \n\t"\ "5: \n\t" #undef IDCT #define IDCT(src0, src4, src1, src5, dst, shift) \ @@ -1136,7 +1136,7 @@ IDCT( 16(%1), 80(%1), 48(%1), 112(%1), 8(%0), 20) "jmp 9f \n\t" - "#" ASMALIGN(4) \ + "# .p2align 4 \n\t"\ "1: \n\t" #undef IDCT #define IDCT(src0, src4, src1, src5, dst, shift) \ @@ -1210,7 +1210,7 @@ IDCT( 24(%1), 88(%1), 56(%1), 120(%1), 12(%0), 20) "jmp 9f \n\t" - "#" ASMALIGN(4) + "# .p2align 4 \n\t" "7: \n\t" #undef IDCT #define IDCT(src0, src4, src1, src5, dst, shift) \ diff --git a/libavcodec/x86/vc1dsp_mmx.c b/libavcodec/x86/vc1dsp_mmx.c index 8889bb36e6..04b4abaad3 100644 --- a/libavcodec/x86/vc1dsp_mmx.c +++ b/libavcodec/x86/vc1dsp_mmx.c @@ -275,7 +275,7 @@ vc1_put_ver_16b_ ## NAME ## _mmx(int16_t *dst, const uint8_t *src, \ LOAD_ROUNDER_MMX("%5") \ "movq "MANGLE(ff_pw_53)", %%mm5\n\t" \ "movq "MANGLE(ff_pw_18)", %%mm6\n\t" \ - ASMALIGN(3) \ + ".p2align 3 \n\t" \ "1: \n\t" \ MSPEL_FILTER13_CORE(DO_UNPACK, "movd 1", A1, A2, A3, A4) \ NORMALIZE_MMX("%6") \ @@ -331,7 +331,7 @@ OPNAME ## vc1_hor_16b_ ## NAME ## _mmx(uint8_t *dst, x86_reg stride, \ LOAD_ROUNDER_MMX("%4") \ "movq "MANGLE(ff_pw_18)", %%mm6 \n\t" \ "movq "MANGLE(ff_pw_53)", %%mm5 \n\t" \ - ASMALIGN(3) \ + ".p2align 3 \n\t" \ "1: \n\t" \ MSPEL_FILTER13_CORE(DONT_UNPACK, "movq 2", A1, A2, A3, A4) \ NORMALIZE_MMX("$7") \ @@ -369,7 +369,7 @@ OPNAME ## vc1_## NAME ## _mmx(uint8_t *dst, const uint8_t *src, \ LOAD_ROUNDER_MMX("%6") \ "movq "MANGLE(ff_pw_53)", %%mm5 \n\t" \ "movq "MANGLE(ff_pw_18)", %%mm6 \n\t" \ - ASMALIGN(3) \ + ".p2align 3 \n\t" \ "1: \n\t" \ MSPEL_FILTER13_CORE(DO_UNPACK, "movd 1", A1, A2, A3, A4) \ NORMALIZE_MMX("$6") \ |