aboutsummaryrefslogtreecommitdiffstats
path: root/libavcodec/arm/dca.h
diff options
context:
space:
mode:
authorMichael Niedermayer <michaelni@gmx.at>2011-11-26 01:12:08 +0100
committerMichael Niedermayer <michaelni@gmx.at>2011-11-26 01:12:08 +0100
commit022f8d27dd0a61bfaae729d53d133b17418ea16b (patch)
tree351bff9edab8404f7850ee05a238c50e6364e19b /libavcodec/arm/dca.h
parenta11eeb921571b23d1c988f9a2ddda109792948a6 (diff)
parentf32dfad9dc64acf0fd1bb867e127a9efe6380676 (diff)
downloadffmpeg-022f8d27dd0a61bfaae729d53d133b17418ea16b.tar.gz
Merge remote-tracking branch 'qatar/master'
* qatar/master: swscale: Readd #define _SVID_SOURCE Fix av_get_channel_layout_string() for positions >31 configure: Store vda lib flags in extralibs instead of ldflags Make channel layout masks unsigned dca: ARMv6 optimised decode_blockcode() nullenc: drop AVFMT_RAWPICTURE from the flags frame-mt: return consumed packet size in ff_thread_decode_frame aacdec: add more fate tests covering SBR and PS MK(BE)TAG: avoid undefined shifts Conflicts: configure libavcodec/arm/dca.h libavcodec/dca.c libavcodec/mlp_parser.c libavcodec/version.h libavfilter/asrc_anullsrc.c libavfilter/avfilter.c libavfilter/avfilter.h libavfilter/defaults.c libavutil/audioconvert.c libavutil/avutil.h libswscale/utils.c Merged-by: Michael Niedermayer <michaelni@gmx.at>
Diffstat (limited to 'libavcodec/arm/dca.h')
-rw-r--r--libavcodec/arm/dca.h70
1 files changed, 43 insertions, 27 deletions
diff --git a/libavcodec/arm/dca.h b/libavcodec/arm/dca.h
index e85b82c1bd..9ff7f7c75e 100644
--- a/libavcodec/arm/dca.h
+++ b/libavcodec/arm/dca.h
@@ -27,38 +27,54 @@
#if HAVE_ARMV6 && HAVE_INLINE_ASM
-#define decode_blockcode decode_blockcode
-static inline int decode_blockcode(int code, int levels, int *values)
+#define decode_blockcodes decode_blockcodes
+static inline int decode_blockcodes(int code1, int code2, int levels,
+ int *values)
{
- int v0, v1, v2, v3;
+ int v0, v1, v2, v3, v4, v5;
- __asm__ ("smmul %4, %8, %11 \n"
- "smlabb %8, %4, %10, %8 \n"
- "smmul %5, %4, %11 \n"
- "sub %8, %8, %9, lsr #1 \n"
- "smlabb %4, %5, %10, %4 \n"
- "smmul %6, %5, %11 \n"
- "str %8, %0 \n"
- "sub %4, %4, %9, lsr #1 \n"
- "smlabb %5, %6, %10, %5 \n"
- "smmul %7, %6, %11 \n"
- "str %4, %1 \n"
- "sub %5, %5, %9, lsr #1 \n"
- "smlabb %6, %7, %10, %6 \n"
- "cmp %7, #0 \n"
- "str %5, %2 \n"
- "sub %6, %6, %9, lsr #1 \n"
- "it eq \n"
- "mvneq %7, #0 \n"
- "str %6, %3 \n"
+ __asm__ ("smmul %8, %14, %18 \n"
+ "smmul %11, %15, %18 \n"
+ "smlabb %14, %8, %17, %14 \n"
+ "smlabb %15, %11, %17, %15 \n"
+ "smmul %9, %8, %18 \n"
+ "smmul %12, %11, %18 \n"
+ "sub %14, %14, %16, lsr #1 \n"
+ "sub %15, %15, %16, lsr #1 \n"
+ "smlabb %8, %9, %17, %8 \n"
+ "smlabb %11, %12, %17, %11 \n"
+ "smmul %10, %9, %18 \n"
+ "smmul %13, %12, %18 \n"
+ "str %14, %0 \n"
+ "str %15, %4 \n"
+ "sub %8, %8, %16, lsr #1 \n"
+ "sub %11, %11, %16, lsr #1 \n"
+ "smlabb %9, %10, %17, %9 \n"
+ "smlabb %12, %13, %17, %12 \n"
+ "smmul %14, %10, %18 \n"
+ "smmul %15, %13, %18 \n"
+ "str %8, %1 \n"
+ "str %11, %5 \n"
+ "sub %9, %9, %16, lsr #1 \n"
+ "sub %12, %12, %16, lsr #1 \n"
+ "smlabb %10, %14, %17, %10 \n"
+ "smlabb %13, %15, %17, %13 \n"
+ "str %9, %2 \n"
+ "str %12, %6 \n"
+ "sub %10, %10, %16, lsr #1 \n"
+ "sub %13, %13, %16, lsr #1 \n"
+ "str %10, %3 \n"
+ "str %13, %7 \n"
: "=m"(values[0]), "=m"(values[1]),
"=m"(values[2]), "=m"(values[3]),
- "=&r"(v0), "=&r"(v1), "=&r"(v2), "=&r"(v3),
- "+&r"(code)
- : "r"(levels - 1), "r"(-levels), "r"(ff_inverse[levels])
- : "cc");
+ "=m"(values[4]), "=m"(values[5]),
+ "=m"(values[6]), "=m"(values[7]),
+ "=&r"(v0), "=&r"(v1), "=&r"(v2),
+ "=&r"(v3), "=&r"(v4), "=&r"(v5),
+ "+&r"(code1), "+&r"(code2)
+ : "r"(levels - 1), "r"(-levels), "r"(ff_inverse[levels]));
- return v3;
+ return code1 | code2;
}
#endif