aboutsummaryrefslogtreecommitdiffstats
path: root/libavcodec/arm/mathops.h
diff options
context:
space:
mode:
authorMåns Rullgård <mans@mansr.com>2008-12-17 00:54:54 +0000
committerMåns Rullgård <mans@mansr.com>2008-12-17 00:54:54 +0000
commita2fc0f6a6ddf884ace3c96a0d4f09f0932e6db32 (patch)
tree44b807b924e29465a6aed924fcb53c719a83a956 /libavcodec/arm/mathops.h
parent2600f8c86dcaa411a0485b1518e5e1592374aaf6 (diff)
downloadffmpeg-a2fc0f6a6ddf884ace3c96a0d4f09f0932e6db32.tar.gz
ARM: replace "armv4l" with "arm"
Originally committed as revision 16179 to svn://svn.ffmpeg.org/ffmpeg/trunk
Diffstat (limited to 'libavcodec/arm/mathops.h')
-rw-r--r--libavcodec/arm/mathops.h93
1 files changed, 93 insertions, 0 deletions
diff --git a/libavcodec/arm/mathops.h b/libavcodec/arm/mathops.h
new file mode 100644
index 0000000000..e36316c76b
--- /dev/null
+++ b/libavcodec/arm/mathops.h
@@ -0,0 +1,93 @@
+/*
+ * simple math operations
+ * Copyright (c) 2006 Michael Niedermayer <michaelni@gmx.at> et al
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVCODEC_ARM_MATHOPS_H
+#define AVCODEC_ARM_MATHOPS_H
+
+#include <stdint.h>
+#include "libavutil/common.h"
+
+# define MULL MULL
+static inline av_const int MULL(int a, int b, unsigned shift)
+{
+ int lo, hi;
+ __asm__("smull %0, %1, %2, %3 \n\t"
+ "mov %0, %0, lsr %4 \n\t"
+ "add %1, %0, %1, lsl %5 \n\t"
+ : "=&r"(lo), "=&r"(hi)
+ : "r"(b), "r"(a), "i"(shift), "i"(32-shift));
+ return hi;
+}
+
+#define MULH MULH
+#ifdef HAVE_ARMV6
+static inline av_const int MULH(int a, int b)
+{
+ int r;
+ __asm__ ("smmul %0, %1, %2" : "=r"(r) : "r"(a), "r"(b));
+ return r;
+}
+#else
+static inline av_const int MULH(int a, int b)
+{
+ int lo, hi;
+ __asm__ ("smull %0, %1, %2, %3" : "=&r"(lo), "=&r"(hi) : "r"(b), "r"(a));
+ return hi;
+}
+#endif
+
+static inline av_const int64_t MUL64(int a, int b)
+{
+ union { uint64_t x; unsigned hl[2]; } x;
+ __asm__ ("smull %0, %1, %2, %3"
+ : "=r"(x.hl[0]), "=r"(x.hl[1]) : "r"(a), "r"(b));
+ return x.x;
+}
+#define MUL64 MUL64
+
+static inline av_const int64_t MAC64(int64_t d, int a, int b)
+{
+ union { uint64_t x; unsigned hl[2]; } x = { d };
+ __asm__ ("smlal %0, %1, %2, %3"
+ : "+r"(x.hl[0]), "+r"(x.hl[1]) : "r"(a), "r"(b));
+ return x.x;
+}
+#define MAC64(d, a, b) ((d) = MAC64(d, a, b))
+#define MLS64(d, a, b) MAC64(d, -(a), b)
+
+#if defined(HAVE_ARMV5TE)
+
+/* signed 16x16 -> 32 multiply add accumulate */
+# define MAC16(rt, ra, rb) \
+ __asm__ ("smlabb %0, %1, %2, %0" : "+r"(rt) : "r"(ra), "r"(rb));
+
+/* signed 16x16 -> 32 multiply */
+# define MUL16 MUL16
+static inline av_const MUL16(int ra, int rb)
+{
+ int rt;
+ __asm__ ("smulbb %0, %1, %2" : "=r"(rt) : "r"(ra), "r"(rb));
+ return rt;
+}
+
+#endif
+
+#endif /* AVCODEC_ARM_MATHOPS_H */