aboutsummaryrefslogtreecommitdiffstats
path: root/libavcodec/armv4l
diff options
context:
space:
mode:
authorDiego Pettenò <flameeyes@gmail.com>2008-10-16 13:34:09 +0000
committerDiego Pettenò <flameeyes@gmail.com>2008-10-16 13:34:09 +0000
commitbe449fca79a3b0394143f0a77c99784e65868d9f (patch)
tree5c5b2bbfe648467292b30cc501265e556acab101 /libavcodec/armv4l
parenta14b362fc650a5e036d413033d9709a526662d89 (diff)
downloadffmpeg-be449fca79a3b0394143f0a77c99784e65868d9f.tar.gz
Convert asm keyword into __asm__.
Neither the asm() nor the __asm__() keyword is part of the C99 standard, but while GCC accepts the former in C89 syntax, it is not accepted in C99 unless GNU extensions are turned on (with -fasm). The latter form is accepted in any syntax as an extension (without requiring further command-line options). Sun Studio C99 compiler also does not accept asm() while accepting __asm__(), albeit reporting warnings that it's not valid C99 syntax. Originally committed as revision 15627 to svn://svn.ffmpeg.org/ffmpeg/trunk
Diffstat (limited to 'libavcodec/armv4l')
-rw-r--r--libavcodec/armv4l/dsputil_arm.c4
-rw-r--r--libavcodec/armv4l/dsputil_iwmmxt.c10
-rw-r--r--libavcodec/armv4l/dsputil_iwmmxt_rnd.h30
-rw-r--r--libavcodec/armv4l/float_arm_vfp.c6
-rw-r--r--libavcodec/armv4l/mathops.h14
-rw-r--r--libavcodec/armv4l/mpegvideo_armv5te.c2
-rw-r--r--libavcodec/armv4l/mpegvideo_iwmmxt.c2
7 files changed, 34 insertions, 34 deletions
diff --git a/libavcodec/armv4l/dsputil_arm.c b/libavcodec/armv4l/dsputil_arm.c
index 100b89ef8a..7dfb790c8e 100644
--- a/libavcodec/armv4l/dsputil_arm.c
+++ b/libavcodec/armv4l/dsputil_arm.c
@@ -66,7 +66,7 @@ CALL_2X_PIXELS(put_no_rnd_pixels16_xy2_arm, put_no_rnd_pixels8_xy2_arm, 8)
static void add_pixels_clamped_ARM(short *block, unsigned char *dest, int line_size)
{
- asm volatile (
+ __asm__ volatile (
"mov r10, #8 \n\t"
"1: \n\t"
@@ -206,7 +206,7 @@ static void simple_idct_ipp_add(uint8_t *dest, int line_size, DCTELEM *block)
#ifdef HAVE_ARMV5TE
static void prefetch_arm(void *mem, int stride, int h)
{
- asm volatile(
+ __asm__ volatile(
"1: \n\t"
"subs %0, %0, #1 \n\t"
"pld [%1] \n\t"
diff --git a/libavcodec/armv4l/dsputil_iwmmxt.c b/libavcodec/armv4l/dsputil_iwmmxt.c
index 6d824e2a14..5699e38578 100644
--- a/libavcodec/armv4l/dsputil_iwmmxt.c
+++ b/libavcodec/armv4l/dsputil_iwmmxt.c
@@ -22,7 +22,7 @@
#include "libavcodec/dsputil.h"
#define DEF(x, y) x ## _no_rnd_ ## y ##_iwmmxt
-#define SET_RND(regd) asm volatile ("mov r12, #1 \n\t tbcsth " #regd ", r12":::"r12");
+#define SET_RND(regd) __asm__ volatile ("mov r12, #1 \n\t tbcsth " #regd ", r12":::"r12");
#define WAVG2B "wavg2b"
#include "dsputil_iwmmxt_rnd.h"
#undef DEF
@@ -30,7 +30,7 @@
#undef WAVG2B
#define DEF(x, y) x ## _ ## y ##_iwmmxt
-#define SET_RND(regd) asm volatile ("mov r12, #2 \n\t tbcsth " #regd ", r12":::"r12");
+#define SET_RND(regd) __asm__ volatile ("mov r12, #2 \n\t tbcsth " #regd ", r12":::"r12");
#define WAVG2B "wavg2br"
#include "dsputil_iwmmxt_rnd.h"
#undef DEF
@@ -39,7 +39,7 @@
// need scheduling
#define OP(AVG) \
- asm volatile ( \
+ __asm__ volatile ( \
/* alignment */ \
"and r12, %[pixels], #7 \n\t" \
"bic %[pixels], %[pixels], #7 \n\t" \
@@ -89,7 +89,7 @@ void add_pixels_clamped_iwmmxt(const DCTELEM *block, uint8_t *pixels, int line_s
{
uint8_t *pixels2 = pixels + line_size;
- asm volatile (
+ __asm__ volatile (
"mov r12, #4 \n\t"
"1: \n\t"
"pld [%[pixels], %[line_size2]] \n\t"
@@ -125,7 +125,7 @@ void add_pixels_clamped_iwmmxt(const DCTELEM *block, uint8_t *pixels, int line_s
static void clear_blocks_iwmmxt(DCTELEM *blocks)
{
- asm volatile(
+ __asm__ volatile(
"wzero wr0 \n\t"
"mov r1, #(128 * 6 / 32) \n\t"
"1: \n\t"
diff --git a/libavcodec/armv4l/dsputil_iwmmxt_rnd.h b/libavcodec/armv4l/dsputil_iwmmxt_rnd.h
index 349ece65c7..fddbdae12c 100644
--- a/libavcodec/armv4l/dsputil_iwmmxt_rnd.h
+++ b/libavcodec/armv4l/dsputil_iwmmxt_rnd.h
@@ -26,7 +26,7 @@
void DEF(put, pixels8)(uint8_t *block, const uint8_t *pixels, const int line_size, int h)
{
int stride = line_size;
- asm volatile (
+ __asm__ volatile (
"and r12, %[pixels], #7 \n\t"
"bic %[pixels], %[pixels], #7 \n\t"
"tmcr wcgr1, r12 \n\t"
@@ -60,7 +60,7 @@ void DEF(put, pixels8)(uint8_t *block, const uint8_t *pixels, const int line_siz
void DEF(avg, pixels8)(uint8_t *block, const uint8_t *pixels, const int line_size, int h)
{
int stride = line_size;
- asm volatile (
+ __asm__ volatile (
"and r12, %[pixels], #7 \n\t"
"bic %[pixels], %[pixels], #7 \n\t"
"tmcr wcgr1, r12 \n\t"
@@ -102,7 +102,7 @@ void DEF(avg, pixels8)(uint8_t *block, const uint8_t *pixels, const int line_siz
void DEF(put, pixels16)(uint8_t *block, const uint8_t *pixels, const int line_size, int h)
{
int stride = line_size;
- asm volatile (
+ __asm__ volatile (
"and r12, %[pixels], #7 \n\t"
"bic %[pixels], %[pixels], #7 \n\t"
"tmcr wcgr1, r12 \n\t"
@@ -142,7 +142,7 @@ void DEF(put, pixels16)(uint8_t *block, const uint8_t *pixels, const int line_si
void DEF(avg, pixels16)(uint8_t *block, const uint8_t *pixels, const int line_size, int h)
{
int stride = line_size;
- asm volatile (
+ __asm__ volatile (
"pld [%[pixels]] \n\t"
"pld [%[pixels], #32] \n\t"
"pld [%[block]] \n\t"
@@ -201,7 +201,7 @@ void DEF(put, pixels8_x2)(uint8_t *block, const uint8_t *pixels, const int line_
// [wr0 wr1 wr2 wr3] for previous line
// [wr4 wr5 wr6 wr7] for current line
SET_RND(wr15); // =2 for rnd and =1 for no_rnd version
- asm volatile(
+ __asm__ volatile(
"pld [%[pixels]] \n\t"
"pld [%[pixels], #32] \n\t"
"and r12, %[pixels], #7 \n\t"
@@ -250,7 +250,7 @@ void DEF(put, pixels16_x2)(uint8_t *block, const uint8_t *pixels, const int line
// [wr0 wr1 wr2 wr3] for previous line
// [wr4 wr5 wr6 wr7] for current line
SET_RND(wr15); // =2 for rnd and =1 for no_rnd version
- asm volatile(
+ __asm__ volatile(
"pld [%[pixels]] \n\t"
"pld [%[pixels], #32] \n\t"
"and r12, %[pixels], #7 \n\t"
@@ -311,7 +311,7 @@ void DEF(avg, pixels8_x2)(uint8_t *block, const uint8_t *pixels, const int line_
// [wr0 wr1 wr2 wr3] for previous line
// [wr4 wr5 wr6 wr7] for current line
SET_RND(wr15); // =2 for rnd and =1 for no_rnd version
- asm volatile(
+ __asm__ volatile(
"pld [%[pixels]] \n\t"
"pld [%[pixels], #32] \n\t"
"pld [%[block]] \n\t"
@@ -372,7 +372,7 @@ void DEF(avg, pixels16_x2)(uint8_t *block, const uint8_t *pixels, const int line
// [wr0 wr1 wr2 wr3] for previous line
// [wr4 wr5 wr6 wr7] for current line
SET_RND(wr15); // =2 for rnd and =1 for no_rnd version
- asm volatile(
+ __asm__ volatile(
"pld [%[pixels]] \n\t"
"pld [%[pixels], #32] \n\t"
"pld [%[block]] \n\t"
@@ -448,7 +448,7 @@ void DEF(avg, pixels8_y2)(uint8_t *block, const uint8_t *pixels, const int line_
int stride = line_size;
// [wr0 wr1 wr2 wr3] for previous line
// [wr4 wr5 wr6 wr7] for current line
- asm volatile(
+ __asm__ volatile(
"pld [%[pixels]] \n\t"
"pld [%[pixels], #32] \n\t"
"and r12, %[pixels], #7 \n\t"
@@ -502,7 +502,7 @@ void DEF(put, pixels16_y2)(uint8_t *block, const uint8_t *pixels, const int line
int stride = line_size;
// [wr0 wr1 wr2 wr3] for previous line
// [wr4 wr5 wr6 wr7] for current line
- asm volatile(
+ __asm__ volatile(
"pld [%[pixels]] \n\t"
"pld [%[pixels], #32] \n\t"
"and r12, %[pixels], #7 \n\t"
@@ -559,7 +559,7 @@ void DEF(avg, pixels16_y2)(uint8_t *block, const uint8_t *pixels, const int line
int stride = line_size;
// [wr0 wr1 wr2 wr3] for previous line
// [wr4 wr5 wr6 wr7] for current line
- asm volatile(
+ __asm__ volatile(
"pld [%[pixels]] \n\t"
"pld [%[pixels], #32] \n\t"
"and r12, %[pixels], #7 \n\t"
@@ -627,7 +627,7 @@ void DEF(put, pixels8_xy2)(uint8_t *block, const uint8_t *pixels, const int line
// [wr0 wr1 wr2 wr3] for previous line
// [wr4 wr5 wr6 wr7] for current line
SET_RND(wr15); // =2 for rnd and =1 for no_rnd version
- asm volatile(
+ __asm__ volatile(
"pld [%[pixels]] \n\t"
"mov r12, #2 \n\t"
"pld [%[pixels], #32] \n\t"
@@ -721,7 +721,7 @@ void DEF(put, pixels16_xy2)(uint8_t *block, const uint8_t *pixels, const int lin
// [wr0 wr1 wr2 wr3] for previous line
// [wr4 wr5 wr6 wr7] for current line
SET_RND(wr15); // =2 for rnd and =1 for no_rnd version
- asm volatile(
+ __asm__ volatile(
"pld [%[pixels]] \n\t"
"mov r12, #2 \n\t"
"pld [%[pixels], #32] \n\t"
@@ -863,7 +863,7 @@ void DEF(avg, pixels8_xy2)(uint8_t *block, const uint8_t *pixels, const int line
// [wr0 wr1 wr2 wr3] for previous line
// [wr4 wr5 wr6 wr7] for current line
SET_RND(wr15); // =2 for rnd and =1 for no_rnd version
- asm volatile(
+ __asm__ volatile(
"pld [%[block]] \n\t"
"pld [%[block], #32] \n\t"
"pld [%[pixels]] \n\t"
@@ -967,7 +967,7 @@ void DEF(avg, pixels16_xy2)(uint8_t *block, const uint8_t *pixels, const int lin
// [wr0 wr1 wr2 wr3] for previous line
// [wr4 wr5 wr6 wr7] for current line
SET_RND(wr15); // =2 for rnd and =1 for no_rnd version
- asm volatile(
+ __asm__ volatile(
"pld [%[block]] \n\t"
"pld [%[block], #32] \n\t"
"pld [%[pixels]] \n\t"
diff --git a/libavcodec/armv4l/float_arm_vfp.c b/libavcodec/armv4l/float_arm_vfp.c
index 3ba6f73fee..00b950df7e 100644
--- a/libavcodec/armv4l/float_arm_vfp.c
+++ b/libavcodec/armv4l/float_arm_vfp.c
@@ -42,7 +42,7 @@
static void vector_fmul_vfp(float *dst, const float *src, int len)
{
int tmp;
- asm volatile(
+ __asm__ volatile(
"fmrx %[tmp], fpscr\n\t"
"orr %[tmp], %[tmp], #(3 << 16)\n\t" /* set vector size to 4 */
"fmxr fpscr, %[tmp]\n\t"
@@ -90,7 +90,7 @@ static void vector_fmul_vfp(float *dst, const float *src, int len)
static void vector_fmul_reverse_vfp(float *dst, const float *src0, const float *src1, int len)
{
src1 += len;
- asm volatile(
+ __asm__ volatile(
"fldmdbs %[src1]!, {s0-s3}\n\t"
"fldmias %[src0]!, {s8-s11}\n\t"
"fldmdbs %[src1]!, {s4-s7}\n\t"
@@ -149,7 +149,7 @@ static void vector_fmul_reverse_vfp(float *dst, const float *src0, const float *
*/
void float_to_int16_vfp(int16_t *dst, const float *src, int len)
{
- asm volatile(
+ __asm__ volatile(
"fldmias %[src]!, {s16-s23}\n\t"
"ftosis s0, s16\n\t"
"ftosis s1, s17\n\t"
diff --git a/libavcodec/armv4l/mathops.h b/libavcodec/armv4l/mathops.h
index 9602cd4f9f..5364833b78 100644
--- a/libavcodec/armv4l/mathops.h
+++ b/libavcodec/armv4l/mathops.h
@@ -25,7 +25,7 @@
#ifdef FRAC_BITS
# define MULL(a, b) \
({ int lo, hi;\
- asm("smull %0, %1, %2, %3 \n\t"\
+ __asm__("smull %0, %1, %2, %3 \n\t"\
"mov %0, %0, lsr %4\n\t"\
"add %1, %0, %1, lsl %5\n\t"\
: "=&r"(lo), "=&r"(hi)\
@@ -37,21 +37,21 @@
static inline av_const int MULH(int a, int b)
{
int r;
- asm ("smmul %0, %1, %2" : "=r"(r) : "r"(a), "r"(b));
+ __asm__ ("smmul %0, %1, %2" : "=r"(r) : "r"(a), "r"(b));
return r;
}
#define MULH MULH
#else
#define MULH(a, b) \
({ int lo, hi;\
- asm ("smull %0, %1, %2, %3" : "=&r"(lo), "=&r"(hi) : "r"(b), "r"(a));\
+ __asm__ ("smull %0, %1, %2, %3" : "=&r"(lo), "=&r"(hi) : "r"(b), "r"(a));\
hi; })
#endif
static inline av_const int64_t MUL64(int a, int b)
{
union { uint64_t x; unsigned hl[2]; } x;
- asm ("smull %0, %1, %2, %3"
+ __asm__ ("smull %0, %1, %2, %3"
: "=r"(x.hl[0]), "=r"(x.hl[1]) : "r"(a), "r"(b));
return x.x;
}
@@ -60,7 +60,7 @@ static inline av_const int64_t MUL64(int a, int b)
static inline av_const int64_t MAC64(int64_t d, int a, int b)
{
union { uint64_t x; unsigned hl[2]; } x = { d };
- asm ("smlal %0, %1, %2, %3"
+ __asm__ ("smlal %0, %1, %2, %3"
: "+r"(x.hl[0]), "+r"(x.hl[1]) : "r"(a), "r"(b));
return x.x;
}
@@ -71,11 +71,11 @@ static inline av_const int64_t MAC64(int64_t d, int a, int b)
/* signed 16x16 -> 32 multiply add accumulate */
# define MAC16(rt, ra, rb) \
- asm ("smlabb %0, %2, %3, %0" : "=r" (rt) : "0" (rt), "r" (ra), "r" (rb));
+ __asm__ ("smlabb %0, %2, %3, %0" : "=r" (rt) : "0" (rt), "r" (ra), "r" (rb));
/* signed 16x16 -> 32 multiply */
# define MUL16(ra, rb) \
({ int __rt; \
- asm ("smulbb %0, %1, %2" : "=r" (__rt) : "r" (ra), "r" (rb)); \
+ __asm__ ("smulbb %0, %1, %2" : "=r" (__rt) : "r" (ra), "r" (rb)); \
__rt; })
#endif
diff --git a/libavcodec/armv4l/mpegvideo_armv5te.c b/libavcodec/armv4l/mpegvideo_armv5te.c
index 721dee5152..0ecadb4120 100644
--- a/libavcodec/armv4l/mpegvideo_armv5te.c
+++ b/libavcodec/armv4l/mpegvideo_armv5te.c
@@ -65,7 +65,7 @@ static inline void dct_unquantize_h263_helper_c(DCTELEM *block, int qmul, int qa
({ DCTELEM *xblock = xxblock; \
int xqmul = xxqmul, xqadd = xxqadd, xcount = xxcount, xtmp; \
int xdata1, xdata2; \
-asm volatile( \
+__asm__ volatile( \
"subs %[count], %[count], #2 \n\t" \
"ble 2f \n\t" \
"ldrd r4, [%[block], #0] \n\t" \
diff --git a/libavcodec/armv4l/mpegvideo_iwmmxt.c b/libavcodec/armv4l/mpegvideo_iwmmxt.c
index 108631f649..a6f8234d7b 100644
--- a/libavcodec/armv4l/mpegvideo_iwmmxt.c
+++ b/libavcodec/armv4l/mpegvideo_iwmmxt.c
@@ -48,7 +48,7 @@ static void dct_unquantize_h263_intra_iwmmxt(MpegEncContext *s,
else
nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
- asm volatile (
+ __asm__ volatile (
/* "movd %1, %%mm6 \n\t" //qmul */
/* "packssdw %%mm6, %%mm6 \n\t" */
/* "packssdw %%mm6, %%mm6 \n\t" */