aboutsummaryrefslogtreecommitdiffstats
path: root/libavcodec/x86
diff options
context:
space:
mode:
authorMåns Rullgård <mans@mansr.com>2010-03-06 14:24:59 +0000
committerMåns Rullgård <mans@mansr.com>2010-03-06 14:24:59 +0000
commit84dc2d8afa1fce31c6fe97149ef70ba966500f65 (patch)
tree650f8234ea81d9b0ec75ebf125e4d71c066a1337 /libavcodec/x86
parentda033b05ab2f3c9dd0274243df88bd8363de6c24 (diff)
downloadffmpeg-84dc2d8afa1fce31c6fe97149ef70ba966500f65.tar.gz
Remove DECLARE_ALIGNED_{8,16} macros
These macros are redundant. All uses are replaced with the generic DECLARE_ALIGNED macro instead. Originally committed as revision 22233 to svn://svn.ffmpeg.org/ffmpeg/trunk
Diffstat (limited to 'libavcodec/x86')
-rw-r--r--libavcodec/x86/cavsdsp_mmx.c4
-rw-r--r--libavcodec/x86/dsputil_h264_template_mmx.c4
-rw-r--r--libavcodec/x86/dsputil_mmx.c62
-rw-r--r--libavcodec/x86/dsputilenc_mmx.c4
-rw-r--r--libavcodec/x86/fft_3dn2.c2
-rw-r--r--libavcodec/x86/h264dsp_mmx.c20
-rw-r--r--libavcodec/x86/mpegvideo_mmx_template.c2
-rw-r--r--libavcodec/x86/rv40dsp_mmx.c2
-rw-r--r--libavcodec/x86/snowdsp_mmx.c2
-rw-r--r--libavcodec/x86/vc1dsp_mmx.c4
-rw-r--r--libavcodec/x86/vp3dsp_sse2.c2
11 files changed, 54 insertions, 54 deletions
diff --git a/libavcodec/x86/cavsdsp_mmx.c b/libavcodec/x86/cavsdsp_mmx.c
index 3edfdb9822..ad6f067317 100644
--- a/libavcodec/x86/cavsdsp_mmx.c
+++ b/libavcodec/x86/cavsdsp_mmx.c
@@ -113,10 +113,10 @@ static inline void cavs_idct8_1d(int16_t *block, uint64_t bias)
static void cavs_idct8_add_mmx(uint8_t *dst, int16_t *block, int stride)
{
int i;
- DECLARE_ALIGNED_8(int16_t, b2)[64];
+ DECLARE_ALIGNED(8, int16_t, b2)[64];
for(i=0; i<2; i++){
- DECLARE_ALIGNED_8(uint64_t, tmp);
+ DECLARE_ALIGNED(8, uint64_t, tmp);
cavs_idct8_1d(block+4*i, ff_pw_4);
diff --git a/libavcodec/x86/dsputil_h264_template_mmx.c b/libavcodec/x86/dsputil_h264_template_mmx.c
index 43f4393098..bc2379e1d0 100644
--- a/libavcodec/x86/dsputil_h264_template_mmx.c
+++ b/libavcodec/x86/dsputil_h264_template_mmx.c
@@ -27,8 +27,8 @@
*/
static void H264_CHROMA_MC8_TMPL(uint8_t *dst/*align 8*/, uint8_t *src/*align 1*/, int stride, int h, int x, int y, const uint64_t *rnd_reg)
{
- DECLARE_ALIGNED_8(uint64_t, AA);
- DECLARE_ALIGNED_8(uint64_t, DD);
+ DECLARE_ALIGNED(8, uint64_t, AA);
+ DECLARE_ALIGNED(8, uint64_t, DD);
int i;
if(y==0 && x==0) {
diff --git a/libavcodec/x86/dsputil_mmx.c b/libavcodec/x86/dsputil_mmx.c
index be8f616190..4d6a8da0fb 100644
--- a/libavcodec/x86/dsputil_mmx.c
+++ b/libavcodec/x86/dsputil_mmx.c
@@ -39,38 +39,38 @@
int mm_flags; /* multimedia extension flags */
/* pixel operations */
-DECLARE_ALIGNED_8 (const uint64_t, ff_bone) = 0x0101010101010101ULL;
-DECLARE_ALIGNED_8 (const uint64_t, ff_wtwo) = 0x0002000200020002ULL;
+DECLARE_ALIGNED(8, const uint64_t, ff_bone) = 0x0101010101010101ULL;
+DECLARE_ALIGNED(8, const uint64_t, ff_wtwo) = 0x0002000200020002ULL;
-DECLARE_ALIGNED_16(const uint64_t, ff_pdw_80000000)[2] =
+DECLARE_ALIGNED(16, const uint64_t, ff_pdw_80000000)[2] =
{0x8000000080000000ULL, 0x8000000080000000ULL};
-DECLARE_ALIGNED_8 (const uint64_t, ff_pw_3 ) = 0x0003000300030003ULL;
-DECLARE_ALIGNED_8 (const uint64_t, ff_pw_4 ) = 0x0004000400040004ULL;
-DECLARE_ALIGNED_16(const xmm_reg, ff_pw_5 ) = {0x0005000500050005ULL, 0x0005000500050005ULL};
-DECLARE_ALIGNED_16(const xmm_reg, ff_pw_8 ) = {0x0008000800080008ULL, 0x0008000800080008ULL};
-DECLARE_ALIGNED_8 (const uint64_t, ff_pw_15 ) = 0x000F000F000F000FULL;
-DECLARE_ALIGNED_16(const xmm_reg, ff_pw_16 ) = {0x0010001000100010ULL, 0x0010001000100010ULL};
-DECLARE_ALIGNED_8 (const uint64_t, ff_pw_20 ) = 0x0014001400140014ULL;
-DECLARE_ALIGNED_16(const xmm_reg, ff_pw_28 ) = {0x001C001C001C001CULL, 0x001C001C001C001CULL};
-DECLARE_ALIGNED_16(const xmm_reg, ff_pw_32 ) = {0x0020002000200020ULL, 0x0020002000200020ULL};
-DECLARE_ALIGNED_8 (const uint64_t, ff_pw_42 ) = 0x002A002A002A002AULL;
-DECLARE_ALIGNED_16(const xmm_reg, ff_pw_64 ) = {0x0040004000400040ULL, 0x0040004000400040ULL};
-DECLARE_ALIGNED_8 (const uint64_t, ff_pw_96 ) = 0x0060006000600060ULL;
-DECLARE_ALIGNED_8 (const uint64_t, ff_pw_128) = 0x0080008000800080ULL;
-DECLARE_ALIGNED_8 (const uint64_t, ff_pw_255) = 0x00ff00ff00ff00ffULL;
-
-DECLARE_ALIGNED_8 (const uint64_t, ff_pb_1 ) = 0x0101010101010101ULL;
-DECLARE_ALIGNED_8 (const uint64_t, ff_pb_3 ) = 0x0303030303030303ULL;
-DECLARE_ALIGNED_8 (const uint64_t, ff_pb_7 ) = 0x0707070707070707ULL;
-DECLARE_ALIGNED_8 (const uint64_t, ff_pb_1F ) = 0x1F1F1F1F1F1F1F1FULL;
-DECLARE_ALIGNED_8 (const uint64_t, ff_pb_3F ) = 0x3F3F3F3F3F3F3F3FULL;
-DECLARE_ALIGNED_8 (const uint64_t, ff_pb_81 ) = 0x8181818181818181ULL;
-DECLARE_ALIGNED_8 (const uint64_t, ff_pb_A1 ) = 0xA1A1A1A1A1A1A1A1ULL;
-DECLARE_ALIGNED_8 (const uint64_t, ff_pb_FC ) = 0xFCFCFCFCFCFCFCFCULL;
-
-DECLARE_ALIGNED_16(const double, ff_pd_1)[2] = { 1.0, 1.0 };
-DECLARE_ALIGNED_16(const double, ff_pd_2)[2] = { 2.0, 2.0 };
+DECLARE_ALIGNED(8, const uint64_t, ff_pw_3 ) = 0x0003000300030003ULL;
+DECLARE_ALIGNED(8, const uint64_t, ff_pw_4 ) = 0x0004000400040004ULL;
+DECLARE_ALIGNED(16, const xmm_reg, ff_pw_5 ) = {0x0005000500050005ULL, 0x0005000500050005ULL};
+DECLARE_ALIGNED(16, const xmm_reg, ff_pw_8 ) = {0x0008000800080008ULL, 0x0008000800080008ULL};
+DECLARE_ALIGNED(8, const uint64_t, ff_pw_15 ) = 0x000F000F000F000FULL;
+DECLARE_ALIGNED(16, const xmm_reg, ff_pw_16 ) = {0x0010001000100010ULL, 0x0010001000100010ULL};
+DECLARE_ALIGNED(8, const uint64_t, ff_pw_20 ) = 0x0014001400140014ULL;
+DECLARE_ALIGNED(16, const xmm_reg, ff_pw_28 ) = {0x001C001C001C001CULL, 0x001C001C001C001CULL};
+DECLARE_ALIGNED(16, const xmm_reg, ff_pw_32 ) = {0x0020002000200020ULL, 0x0020002000200020ULL};
+DECLARE_ALIGNED(8, const uint64_t, ff_pw_42 ) = 0x002A002A002A002AULL;
+DECLARE_ALIGNED(16, const xmm_reg, ff_pw_64 ) = {0x0040004000400040ULL, 0x0040004000400040ULL};
+DECLARE_ALIGNED(8, const uint64_t, ff_pw_96 ) = 0x0060006000600060ULL;
+DECLARE_ALIGNED(8, const uint64_t, ff_pw_128) = 0x0080008000800080ULL;
+DECLARE_ALIGNED(8, const uint64_t, ff_pw_255) = 0x00ff00ff00ff00ffULL;
+
+DECLARE_ALIGNED(8, const uint64_t, ff_pb_1 ) = 0x0101010101010101ULL;
+DECLARE_ALIGNED(8, const uint64_t, ff_pb_3 ) = 0x0303030303030303ULL;
+DECLARE_ALIGNED(8, const uint64_t, ff_pb_7 ) = 0x0707070707070707ULL;
+DECLARE_ALIGNED(8, const uint64_t, ff_pb_1F ) = 0x1F1F1F1F1F1F1F1FULL;
+DECLARE_ALIGNED(8, const uint64_t, ff_pb_3F ) = 0x3F3F3F3F3F3F3F3FULL;
+DECLARE_ALIGNED(8, const uint64_t, ff_pb_81 ) = 0x8181818181818181ULL;
+DECLARE_ALIGNED(8, const uint64_t, ff_pb_A1 ) = 0xA1A1A1A1A1A1A1A1ULL;
+DECLARE_ALIGNED(8, const uint64_t, ff_pb_FC ) = 0xFCFCFCFCFCFCFCFCULL;
+
+DECLARE_ALIGNED(16, const double, ff_pd_1)[2] = { 1.0, 1.0 };
+DECLARE_ALIGNED(16, const double, ff_pd_2)[2] = { 2.0, 2.0 };
#define JUMPALIGN() __asm__ volatile (ASMALIGN(3)::)
#define MOVQ_ZERO(regd) __asm__ volatile ("pxor %%" #regd ", %%" #regd ::)
@@ -2026,7 +2026,7 @@ static void ac3_downmix_sse(float (*samples)[256], float (*matrix)[2], int out_c
} else if(in_ch == 5 && out_ch == 1 && matrix_cmp[0][0]==matrix_cmp[2][0] && matrix_cmp[3][0]==matrix_cmp[4][0]) {
MIX5(IF1,IF0);
} else {
- DECLARE_ALIGNED_16(float, matrix_simd)[in_ch][2][4];
+ DECLARE_ALIGNED(16, float, matrix_simd)[in_ch][2][4];
j = 2*in_ch*sizeof(float);
__asm__ volatile(
"1: \n"
@@ -2413,7 +2413,7 @@ static void ff_x264_deblock_v_luma_intra_mmxext(uint8_t *pix, int stride, int al
#define FLOAT_TO_INT16_INTERLEAVE(cpu, body) \
/* gcc pessimizes register allocation if this is in the same function as float_to_int16_interleave_sse2*/\
static av_noinline void float_to_int16_interleave_misc_##cpu(int16_t *dst, const float **src, long len, int channels){\
- DECLARE_ALIGNED_16(int16_t, tmp)[len];\
+ DECLARE_ALIGNED(16, int16_t, tmp)[len];\
int i,j,c;\
for(c=0; c<channels; c++){\
float_to_int16_##cpu(tmp, src[c], len);\
diff --git a/libavcodec/x86/dsputilenc_mmx.c b/libavcodec/x86/dsputilenc_mmx.c
index 010ba5d5ed..f59788b797 100644
--- a/libavcodec/x86/dsputilenc_mmx.c
+++ b/libavcodec/x86/dsputilenc_mmx.c
@@ -1063,7 +1063,7 @@ static void sub_hfyu_median_prediction_mmx2(uint8_t *dst, const uint8_t *src1, c
#define HADAMARD8_DIFF_MMX(cpu) \
static int hadamard8_diff_##cpu(void *s, uint8_t *src1, uint8_t *src2, int stride, int h){\
- DECLARE_ALIGNED_8(uint64_t, temp)[13];\
+ DECLARE_ALIGNED(8, uint64_t, temp)[13];\
int sum;\
\
assert(h==8);\
@@ -1146,7 +1146,7 @@ WRAPPER8_16_SQ(hadamard8_diff_##cpu, hadamard8_diff16_##cpu)
#define HADAMARD8_DIFF_SSE2(cpu) \
static int hadamard8_diff_##cpu(void *s, uint8_t *src1, uint8_t *src2, int stride, int h){\
- DECLARE_ALIGNED_16(uint64_t, temp)[4];\
+ DECLARE_ALIGNED(16, uint64_t, temp)[4];\
int sum;\
\
assert(h==8);\
diff --git a/libavcodec/x86/fft_3dn2.c b/libavcodec/x86/fft_3dn2.c
index dd84f5bad4..b3bec4675a 100644
--- a/libavcodec/x86/fft_3dn2.c
+++ b/libavcodec/x86/fft_3dn2.c
@@ -23,7 +23,7 @@
#include "libavcodec/dsputil.h"
#include "fft.h"
-DECLARE_ALIGNED_8(static const int, m1m1)[2] = { 1<<31, 1<<31 };
+DECLARE_ALIGNED(8, static const int, m1m1)[2] = { 1<<31, 1<<31 };
#ifdef EMULATE_3DNOWEXT
#define PSWAPD(s,d)\
diff --git a/libavcodec/x86/h264dsp_mmx.c b/libavcodec/x86/h264dsp_mmx.c
index de6dbe2131..7effb7add9 100644
--- a/libavcodec/x86/h264dsp_mmx.c
+++ b/libavcodec/x86/h264dsp_mmx.c
@@ -20,8 +20,8 @@
#include "dsputil_mmx.h"
-DECLARE_ALIGNED_8 (static const uint64_t, ff_pb_3_1 ) = 0x0103010301030103ULL;
-DECLARE_ALIGNED_8 (static const uint64_t, ff_pb_7_3 ) = 0x0307030703070307ULL;
+DECLARE_ALIGNED(8, static const uint64_t, ff_pb_3_1 ) = 0x0103010301030103ULL;
+DECLARE_ALIGNED(8, static const uint64_t, ff_pb_7_3 ) = 0x0307030703070307ULL;
/***********************************/
/* IDCT */
@@ -157,12 +157,12 @@ static inline void h264_idct8_1d(int16_t *block)
static void ff_h264_idct8_add_mmx(uint8_t *dst, int16_t *block, int stride)
{
int i;
- DECLARE_ALIGNED_8(int16_t, b2)[64];
+ DECLARE_ALIGNED(8, int16_t, b2)[64];
block[0] += 32;
for(i=0; i<2; i++){
- DECLARE_ALIGNED_8(uint64_t, tmp);
+ DECLARE_ALIGNED(8, uint64_t, tmp);
h264_idct8_1d(block+4*i);
@@ -628,7 +628,7 @@ static void ff_h264_idct_add8_sse2(uint8_t **dest, const int *block_offset, DCTE
static inline void h264_loop_filter_luma_mmx2(uint8_t *pix, int stride, int alpha1, int beta1, int8_t *tc0)
{
- DECLARE_ALIGNED_8(uint64_t, tmp0)[2];
+ DECLARE_ALIGNED(8, uint64_t, tmp0)[2];
__asm__ volatile(
"movq (%2,%4), %%mm0 \n\t" //p1
@@ -690,7 +690,7 @@ static void h264_h_loop_filter_luma_mmx2(uint8_t *pix, int stride, int alpha, in
{
//FIXME: could cut some load/stores by merging transpose with filter
// also, it only needs to transpose 6x8
- DECLARE_ALIGNED_8(uint8_t, trans)[8*8];
+ DECLARE_ALIGNED(8, uint8_t, trans)[8*8];
int i;
for(i=0; i<2; i++, pix+=8*stride, tc0+=2) {
if((tc0[0] & tc0[1]) < 0)
@@ -734,7 +734,7 @@ static void h264_v_loop_filter_chroma_mmx2(uint8_t *pix, int stride, int alpha,
static void h264_h_loop_filter_chroma_mmx2(uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0)
{
//FIXME: could cut some load/stores by merging transpose with filter
- DECLARE_ALIGNED_8(uint8_t, trans)[8*4];
+ DECLARE_ALIGNED(8, uint8_t, trans)[8*4];
transpose4x4(trans, pix-2, 8, stride);
transpose4x4(trans+4, pix-2+4*stride, 8, stride);
h264_loop_filter_chroma_mmx2(trans+2*8, 8, alpha-1, beta-1, tc0);
@@ -784,7 +784,7 @@ static void h264_v_loop_filter_chroma_intra_mmx2(uint8_t *pix, int stride, int a
static void h264_h_loop_filter_chroma_intra_mmx2(uint8_t *pix, int stride, int alpha, int beta)
{
//FIXME: could cut some load/stores by merging transpose with filter
- DECLARE_ALIGNED_8(uint8_t, trans)[8*4];
+ DECLARE_ALIGNED(8, uint8_t, trans)[8*4];
transpose4x4(trans, pix-2, 8, stride);
transpose4x4(trans+4, pix-2+4*stride, 8, stride);
h264_loop_filter_chroma_intra_mmx2(trans+2*8, 8, alpha-1, beta-1);
@@ -815,7 +815,7 @@ static void h264_loop_filter_strength_mmx2( int16_t bS[2][4][4], uint8_t nnz[40]
for( dir=1; dir>=0; dir-- ) {
const x86_reg d_idx = dir ? -8 : -1;
const int mask_mv = dir ? mask_mv1 : mask_mv0;
- DECLARE_ALIGNED_8(const uint64_t, mask_dir) = dir ? 0 : 0xffffffffffffffffULL;
+ DECLARE_ALIGNED(8, const uint64_t, mask_dir) = dir ? 0 : 0xffffffffffffffffULL;
int b_idx, edge;
for( b_idx=12, edge=0; edge<edges; edge+=step, b_idx+=8*step ) {
__asm__ volatile(
@@ -2106,7 +2106,7 @@ H264_MC_816(H264_MC_HV, ssse3)
#endif
/* rnd interleaved with rnd div 8, use p+1 to access rnd div 8 */
-DECLARE_ALIGNED_8(static const uint64_t, h264_rnd_reg)[4] = {
+DECLARE_ALIGNED(8, static const uint64_t, h264_rnd_reg)[4] = {
0x0020002000200020ULL, 0x0004000400040004ULL, 0x001C001C001C001CULL, 0x0003000300030003ULL
};
diff --git a/libavcodec/x86/mpegvideo_mmx_template.c b/libavcodec/x86/mpegvideo_mmx_template.c
index 6c782b936e..0d927921e2 100644
--- a/libavcodec/x86/mpegvideo_mmx_template.c
+++ b/libavcodec/x86/mpegvideo_mmx_template.c
@@ -98,7 +98,7 @@ static int RENAME(dct_quantize)(MpegEncContext *s,
x86_reg last_non_zero_p1;
int level=0, q; //=0 is because gcc says uninitialized ...
const uint16_t *qmat, *bias;
- DECLARE_ALIGNED_16(int16_t, temp_block)[64];
+ DECLARE_ALIGNED(16, int16_t, temp_block)[64];
assert((7&(int)(&temp_block[0])) == 0); //did gcc align it correctly?
diff --git a/libavcodec/x86/rv40dsp_mmx.c b/libavcodec/x86/rv40dsp_mmx.c
index 9a702084a4..2b154c0a0b 100644
--- a/libavcodec/x86/rv40dsp_mmx.c
+++ b/libavcodec/x86/rv40dsp_mmx.c
@@ -24,7 +24,7 @@
#include "dsputil_mmx.h"
/* bias interleaved with bias div 8, use p+1 to access bias div 8 */
-DECLARE_ALIGNED_8(static const uint64_t, rv40_bias_reg)[4][8] = {
+DECLARE_ALIGNED(8, static const uint64_t, rv40_bias_reg)[4][8] = {
{ 0x0000000000000000ULL, 0x0000000000000000ULL, 0x0010001000100010ULL, 0x0002000200020002ULL,
0x0020002000200020ULL, 0x0004000400040004ULL, 0x0010001000100010ULL, 0x0002000200020002ULL },
{ 0x0020002000200020ULL, 0x0004000400040004ULL, 0x001C001C001C001CULL, 0x0003000300030003ULL,
diff --git a/libavcodec/x86/snowdsp_mmx.c b/libavcodec/x86/snowdsp_mmx.c
index 95ca2f4fef..0e1259ff64 100644
--- a/libavcodec/x86/snowdsp_mmx.c
+++ b/libavcodec/x86/snowdsp_mmx.c
@@ -25,7 +25,7 @@
void ff_snow_horizontal_compose97i_sse2(IDWTELEM *b, int width){
const int w2= (width+1)>>1;
- DECLARE_ALIGNED_16(IDWTELEM, temp)[width>>1];
+ DECLARE_ALIGNED(16, IDWTELEM, temp)[width>>1];
const int w_l= (width>>1);
const int w_r= w2 - 1;
int i;
diff --git a/libavcodec/x86/vc1dsp_mmx.c b/libavcodec/x86/vc1dsp_mmx.c
index bf96f24702..ef1ce1a480 100644
--- a/libavcodec/x86/vc1dsp_mmx.c
+++ b/libavcodec/x86/vc1dsp_mmx.c
@@ -73,7 +73,7 @@
"movq %%mm"#R1", "#OFF"(%1) \n\t" \
"add %2, %0 \n\t"
-DECLARE_ALIGNED_16(const uint64_t, ff_pw_9) = 0x0009000900090009ULL;
+DECLARE_ALIGNED(16, const uint64_t, ff_pw_9) = 0x0009000900090009ULL;
/** Sacrifying mm6 allows to pipeline loads from src */
static void vc1_put_ver_16b_shift2_mmx(int16_t *dst,
@@ -442,7 +442,7 @@ static void OP ## vc1_mspel_mc(uint8_t *dst, const uint8_t *src, int stride,\
static const int shift_value[] = { 0, 5, 1, 5 };\
int shift = (shift_value[hmode]+shift_value[vmode])>>1;\
int r;\
- DECLARE_ALIGNED_16(int16_t, tmp)[12*8];\
+ DECLARE_ALIGNED(16, int16_t, tmp)[12*8];\
\
r = (1<<(shift-1)) + rnd-1;\
vc1_put_shift_ver_16bits[vmode](tmp, src-1, stride, r, shift);\
diff --git a/libavcodec/x86/vp3dsp_sse2.c b/libavcodec/x86/vp3dsp_sse2.c
index dfe368b5ae..f3db6da56f 100644
--- a/libavcodec/x86/vp3dsp_sse2.c
+++ b/libavcodec/x86/vp3dsp_sse2.c
@@ -26,7 +26,7 @@
#include "libavcodec/dsputil.h"
#include "dsputil_mmx.h"
-DECLARE_ALIGNED_16(const uint16_t, ff_vp3_idct_data)[7 * 8] =
+DECLARE_ALIGNED(16, const uint16_t, ff_vp3_idct_data)[7 * 8] =
{
64277,64277,64277,64277,64277,64277,64277,64277,
60547,60547,60547,60547,60547,60547,60547,60547,