aboutsummaryrefslogtreecommitdiffstats
path: root/libavcodec/arm
diff options
context:
space:
mode:
authorMichael Niedermayer <michaelni@gmx.at>2011-05-29 02:55:19 +0200
committerMichael Niedermayer <michaelni@gmx.at>2011-05-29 03:34:35 +0200
commitb8a43bc1b50f409414493a05f6c4b7895ca4ddf9 (patch)
tree95dda1b7289aac9bdb1f457417baf9515aa4383a /libavcodec/arm
parent39d607e5bbc25ad9629683702b510e865434ef21 (diff)
parent90da52f01f8b6c22af22a002eb226989b1cf7ef8 (diff)
downloadffmpeg-b8a43bc1b50f409414493a05f6c4b7895ca4ddf9.tar.gz
Merge remote-tracking branch 'qatar/master' into master
* qatar/master: (27 commits) ac3enc: fix LOCAL_ALIGNED usage in count_mantissa_bits() ac3dsp: do not use the ff_* prefix when referencing ff_ac3_bap_bits. ac3dsp: fix loop condition in ac3_update_bap_counts_c() ARM: unbreak build ac3enc: modify mantissa bit counting to keep bap counts for all values of bap instead of just 0 to 4. ac3enc: split mantissa bit counting into a separate function. ac3enc: store per-block/channel bap pointers by reference block in a 2D array rather than in the AC3Block struct. get_bits: add av_unused tag to cache variable sws: replace all long with int. ARM: aacdec: fix constraints on inline asm ARM: remove unnecessary volatile from inline asm ARM: add "cc" clobbers to inline asm where needed ARM: improve FASTDIV asm ac3enc: use LOCAL_ALIGNED macro APIchanges: fill in git hash for av_get_pix_fmt_name (0420bd7). lavu: add av_get_pix_fmt_name() convenience function cmdutils: remove OPT_FUNC2 swscale: fix crash in bilinear scaling. vpxenc: add VP8E_SET_STATIC_THRESHOLD mapping webm: support stereo videos in matroska/webm muxer ... Conflicts: Changelog cmdutils.c cmdutils.h doc/APIchanges doc/muxers.texi ffmpeg.c ffplay.c libavcodec/ac3enc.c libavcodec/ac3enc_float.c libavcodec/avcodec.h libavcodec/get_bits.h libavcodec/libvpxenc.c libavcodec/version.h libavdevice/libdc1394.c libavformat/matroskaenc.c libavutil/avutil.h libswscale/rgb2rgb.c libswscale/swscale.c libswscale/swscale_template.c libswscale/x86/swscale_template.c Merged-by: Michael Niedermayer <michaelni@gmx.at>
Diffstat (limited to 'libavcodec/arm')
-rw-r--r--libavcodec/arm/Makefile1
-rw-r--r--libavcodec/arm/aac.h153
-rw-r--r--libavcodec/arm/ac3dsp_arm.S52
-rw-r--r--libavcodec/arm/ac3dsp_init_arm.c2
-rw-r--r--libavcodec/arm/mathops.h5
-rw-r--r--libavcodec/arm/vp56_arith.h64
6 files changed, 114 insertions, 163 deletions
diff --git a/libavcodec/arm/Makefile b/libavcodec/arm/Makefile
index a5abfdd128..a5a5dfab64 100644
--- a/libavcodec/arm/Makefile
+++ b/libavcodec/arm/Makefile
@@ -1,5 +1,4 @@
OBJS-$(CONFIG_AC3DSP) += arm/ac3dsp_init_arm.o \
- arm/ac3dsp_arm.o
OBJS-$(CONFIG_DCA_DECODER) += arm/dcadsp_init_arm.o \
diff --git a/libavcodec/arm/aac.h b/libavcodec/arm/aac.h
index 390cdbfcf9..3b14c094c6 100644
--- a/libavcodec/arm/aac.h
+++ b/libavcodec/arm/aac.h
@@ -30,17 +30,17 @@ static inline float *VMUL2(float *dst, const float *v, unsigned idx,
const float *scale)
{
unsigned v0, v1;
- __asm__ volatile ("ubfx %0, %4, #0, #4 \n\t"
- "ubfx %1, %4, #4, #4 \n\t"
- "ldr %0, [%3, %0, lsl #2] \n\t"
- "ldr %1, [%3, %1, lsl #2] \n\t"
- "vld1.32 {d1[]}, [%5,:32] \n\t"
- "vmov d0, %0, %1 \n\t"
- "vmul.f32 d0, d0, d1 \n\t"
- "vst1.32 {d0}, [%2,:64]! \n\t"
- : "=&r"(v0), "=&r"(v1), "+r"(dst)
- : "r"(v), "r"(idx), "r"(scale)
- : "d0", "d1");
+ __asm__ ("ubfx %0, %6, #0, #4 \n\t"
+ "ubfx %1, %6, #4, #4 \n\t"
+ "ldr %0, [%5, %0, lsl #2] \n\t"
+ "ldr %1, [%5, %1, lsl #2] \n\t"
+ "vld1.32 {d1[]}, [%7,:32] \n\t"
+ "vmov d0, %0, %1 \n\t"
+ "vmul.f32 d0, d0, d1 \n\t"
+ "vst1.32 {d0}, [%2,:64]! \n\t"
+ : "=&r"(v0), "=&r"(v1), "+r"(dst), "=m"(dst[0]), "=m"(dst[1])
+ : "r"(v), "r"(idx), "r"(scale)
+ : "d0", "d1");
return dst;
}
@@ -49,22 +49,23 @@ static inline float *VMUL4(float *dst, const float *v, unsigned idx,
const float *scale)
{
unsigned v0, v1, v2, v3;
- __asm__ volatile ("ubfx %0, %6, #0, #2 \n\t"
- "ubfx %1, %6, #2, #2 \n\t"
- "ldr %0, [%5, %0, lsl #2] \n\t"
- "ubfx %2, %6, #4, #2 \n\t"
- "ldr %1, [%5, %1, lsl #2] \n\t"
- "ubfx %3, %6, #6, #2 \n\t"
- "ldr %2, [%5, %2, lsl #2] \n\t"
- "vmov d0, %0, %1 \n\t"
- "ldr %3, [%5, %3, lsl #2] \n\t"
- "vld1.32 {d2[],d3[]},[%7,:32] \n\t"
- "vmov d1, %2, %3 \n\t"
- "vmul.f32 q0, q0, q1 \n\t"
- "vst1.32 {q0}, [%4,:128]! \n\t"
- : "=&r"(v0), "=&r"(v1), "=&r"(v2), "=&r"(v3), "+r"(dst)
- : "r"(v), "r"(idx), "r"(scale)
- : "d0", "d1", "d2", "d3");
+ __asm__ ("ubfx %0, %10, #0, #2 \n\t"
+ "ubfx %1, %10, #2, #2 \n\t"
+ "ldr %0, [%9, %0, lsl #2] \n\t"
+ "ubfx %2, %10, #4, #2 \n\t"
+ "ldr %1, [%9, %1, lsl #2] \n\t"
+ "ubfx %3, %10, #6, #2 \n\t"
+ "ldr %2, [%9, %2, lsl #2] \n\t"
+ "vmov d0, %0, %1 \n\t"
+ "ldr %3, [%9, %3, lsl #2] \n\t"
+ "vld1.32 {d2[],d3[]},[%11,:32] \n\t"
+ "vmov d1, %2, %3 \n\t"
+ "vmul.f32 q0, q0, q1 \n\t"
+ "vst1.32 {q0}, [%4,:128]! \n\t"
+ : "=&r"(v0), "=&r"(v1), "=&r"(v2), "=&r"(v3), "+r"(dst),
+ "=m"(dst[0]), "=m"(dst[1]), "=m"(dst[2]), "=m"(dst[3])
+ : "r"(v), "r"(idx), "r"(scale)
+ : "d0", "d1", "d2", "d3");
return dst;
}
@@ -73,22 +74,23 @@ static inline float *VMUL2S(float *dst, const float *v, unsigned idx,
unsigned sign, const float *scale)
{
unsigned v0, v1, v2, v3;
- __asm__ volatile ("ubfx %0, %6, #0, #4 \n\t"
- "ubfx %1, %6, #4, #4 \n\t"
- "ldr %0, [%5, %0, lsl #2] \n\t"
- "lsl %2, %8, #30 \n\t"
- "ldr %1, [%5, %1, lsl #2] \n\t"
- "lsl %3, %8, #31 \n\t"
- "vmov d0, %0, %1 \n\t"
- "bic %2, %2, #1<<30 \n\t"
- "vld1.32 {d1[]}, [%7,:32] \n\t"
- "vmov d2, %2, %3 \n\t"
- "veor d0, d0, d2 \n\t"
- "vmul.f32 d0, d0, d1 \n\t"
- "vst1.32 {d0}, [%4,:64]! \n\t"
- : "=&r"(v0), "=&r"(v1), "=&r"(v2), "=&r"(v3), "+r"(dst)
- : "r"(v), "r"(idx), "r"(scale), "r"(sign)
- : "d0", "d1", "d2");
+ __asm__ ("ubfx %0, %8, #0, #4 \n\t"
+ "ubfx %1, %8, #4, #4 \n\t"
+ "ldr %0, [%7, %0, lsl #2] \n\t"
+ "lsl %2, %10, #30 \n\t"
+ "ldr %1, [%7, %1, lsl #2] \n\t"
+ "lsl %3, %10, #31 \n\t"
+ "vmov d0, %0, %1 \n\t"
+ "bic %2, %2, #1<<30 \n\t"
+ "vld1.32 {d1[]}, [%9,:32] \n\t"
+ "vmov d2, %2, %3 \n\t"
+ "veor d0, d0, d2 \n\t"
+ "vmul.f32 d0, d0, d1 \n\t"
+ "vst1.32 {d0}, [%4,:64]! \n\t"
+ : "=&r"(v0), "=&r"(v1), "=&r"(v2), "=&r"(v3), "+r"(dst),
+ "=m"(dst[0]), "=m"(dst[1])
+ : "r"(v), "r"(idx), "r"(scale), "r"(sign)
+ : "d0", "d1", "d2");
return dst;
}
@@ -97,38 +99,39 @@ static inline float *VMUL4S(float *dst, const float *v, unsigned idx,
unsigned sign, const float *scale)
{
unsigned v0, v1, v2, v3, nz;
- __asm__ volatile ("vld1.32 {d2[],d3[]},[%9,:32] \n\t"
- "ubfx %0, %8, #0, #2 \n\t"
- "ubfx %1, %8, #2, #2 \n\t"
- "ldr %0, [%7, %0, lsl #2] \n\t"
- "ubfx %2, %8, #4, #2 \n\t"
- "ldr %1, [%7, %1, lsl #2] \n\t"
- "ubfx %3, %8, #6, #2 \n\t"
- "ldr %2, [%7, %2, lsl #2] \n\t"
- "vmov d0, %0, %1 \n\t"
- "ldr %3, [%7, %3, lsl #2] \n\t"
- "lsr %6, %8, #12 \n\t"
- "rbit %6, %6 \n\t"
- "vmov d1, %2, %3 \n\t"
- "lsls %6, %6, #1 \n\t"
- "and %0, %5, #1<<31 \n\t"
- "lslcs %5, %5, #1 \n\t"
- "lsls %6, %6, #1 \n\t"
- "and %1, %5, #1<<31 \n\t"
- "lslcs %5, %5, #1 \n\t"
- "lsls %6, %6, #1 \n\t"
- "and %2, %5, #1<<31 \n\t"
- "lslcs %5, %5, #1 \n\t"
- "vmov d4, %0, %1 \n\t"
- "and %3, %5, #1<<31 \n\t"
- "vmov d5, %2, %3 \n\t"
- "veor q0, q0, q2 \n\t"
- "vmul.f32 q0, q0, q1 \n\t"
- "vst1.32 {q0}, [%4,:128]! \n\t"
- : "=&r"(v0), "=&r"(v1), "=&r"(v2), "=&r"(v3), "+r"(dst),
- "+r"(sign), "=r"(nz)
- : "r"(v), "r"(idx), "r"(scale)
- : "d0", "d1", "d2", "d3", "d4", "d5");
+ __asm__ ("vld1.32 {d2[],d3[]},[%13,:32] \n\t"
+ "ubfx %0, %12, #0, #2 \n\t"
+ "ubfx %1, %12, #2, #2 \n\t"
+ "ldr %0, [%11,%0, lsl #2] \n\t"
+ "ubfx %2, %12, #4, #2 \n\t"
+ "ldr %1, [%11,%1, lsl #2] \n\t"
+ "ubfx %3, %12, #6, #2 \n\t"
+ "ldr %2, [%11,%2, lsl #2] \n\t"
+ "vmov d0, %0, %1 \n\t"
+ "ldr %3, [%11,%3, lsl #2] \n\t"
+ "lsr %6, %12, #12 \n\t"
+ "rbit %6, %6 \n\t"
+ "vmov d1, %2, %3 \n\t"
+ "lsls %6, %6, #1 \n\t"
+ "and %0, %5, #1<<31 \n\t"
+ "lslcs %5, %5, #1 \n\t"
+ "lsls %6, %6, #1 \n\t"
+ "and %1, %5, #1<<31 \n\t"
+ "lslcs %5, %5, #1 \n\t"
+ "lsls %6, %6, #1 \n\t"
+ "and %2, %5, #1<<31 \n\t"
+ "lslcs %5, %5, #1 \n\t"
+ "vmov d4, %0, %1 \n\t"
+ "and %3, %5, #1<<31 \n\t"
+ "vmov d5, %2, %3 \n\t"
+ "veor q0, q0, q2 \n\t"
+ "vmul.f32 q0, q0, q1 \n\t"
+ "vst1.32 {q0}, [%4,:128]! \n\t"
+ : "=&r"(v0), "=&r"(v1), "=&r"(v2), "=&r"(v3), "+r"(dst),
+ "+r"(sign), "=r"(nz),
+ "=m"(dst[0]), "=m"(dst[1]), "=m"(dst[2]), "=m"(dst[3])
+ : "r"(v), "r"(idx), "r"(scale)
+ : "cc", "d0", "d1", "d2", "d3", "d4", "d5");
return dst;
}
diff --git a/libavcodec/arm/ac3dsp_arm.S b/libavcodec/arm/ac3dsp_arm.S
deleted file mode 100644
index d7d498e41f..0000000000
--- a/libavcodec/arm/ac3dsp_arm.S
+++ /dev/null
@@ -1,52 +0,0 @@
-/*
- * Copyright (c) 2011 Mans Rullgard <mans@mansr.com>
- *
- * This file is part of Libav.
- *
- * Libav is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2.1 of the License, or (at your option) any later version.
- *
- * Libav is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
- */
-
-#include "asm.S"
-
-function ff_ac3_compute_mantissa_size_arm, export=1
- push {r4-r8,lr}
- ldm r0, {r4-r8}
- mov r3, r0
- mov r0, #0
-1:
- ldrb lr, [r1], #1
- subs r2, r2, #1
- blt 2f
- cmp lr, #4
- bgt 3f
- subs lr, lr, #1
- addlt r4, r4, #1
- addeq r5, r5, #1
- ble 1b
- subs lr, lr, #2
- addlt r6, r6, #1
- addeq r7, r7, #1
- addgt r8, r8, #1
- b 1b
-3:
- cmp lr, #14
- sublt lr, lr, #1
- addgt r0, r0, #16
- addle r0, r0, lr
- b 1b
-2:
- stm r3, {r4-r8}
- pop {r4-r8,pc}
-endfunc
diff --git a/libavcodec/arm/ac3dsp_init_arm.c b/libavcodec/arm/ac3dsp_init_arm.c
index 9f01507853..65790cdc51 100644
--- a/libavcodec/arm/ac3dsp_init_arm.c
+++ b/libavcodec/arm/ac3dsp_init_arm.c
@@ -39,8 +39,6 @@ int ff_ac3_compute_mantissa_size_arm(int cnt[5], uint8_t *bap, int nb_coefs);
av_cold void ff_ac3dsp_init_arm(AC3DSPContext *c, int bit_exact)
{
- c->compute_mantissa_size = ff_ac3_compute_mantissa_size_arm;
-
if (HAVE_ARMV6) {
c->bit_alloc_calc_bap = ff_ac3_bit_alloc_calc_bap_armv6;
}
diff --git a/libavcodec/arm/mathops.h b/libavcodec/arm/mathops.h
index 2244fa19ae..b4fb371739 100644
--- a/libavcodec/arm/mathops.h
+++ b/libavcodec/arm/mathops.h
@@ -97,7 +97,7 @@ static inline av_const int MUL16(int ra, int rb)
static inline av_const int mid_pred(int a, int b, int c)
{
int m;
- __asm__ volatile (
+ __asm__ (
"mov %0, %2 \n\t"
"cmp %1, %2 \n\t"
"movgt %0, %1 \n\t"
@@ -107,7 +107,8 @@ static inline av_const int mid_pred(int a, int b, int c)
"cmp %0, %1 \n\t"
"movgt %0, %1 \n\t"
: "=&r"(m), "+r"(a)
- : "r"(b), "r"(c));
+ : "r"(b), "r"(c)
+ : "cc");
return m;
}
diff --git a/libavcodec/arm/vp56_arith.h b/libavcodec/arm/vp56_arith.h
index 50a164d51c..cd02579e5b 100644
--- a/libavcodec/arm/vp56_arith.h
+++ b/libavcodec/arm/vp56_arith.h
@@ -31,24 +31,25 @@ static inline int vp56_rac_get_prob_armv6(VP56RangeCoder *c, int pr)
unsigned high = c->high << shift;
unsigned bit;
- __asm__ volatile ("adds %3, %3, %0 \n"
- "cmpcs %7, %4 \n"
- "ldrcsh %2, [%4], #2 \n"
- "rsb %0, %6, #256 \n"
- "smlabb %0, %5, %6, %0 \n"
- "rev16cs %2, %2 \n"
- "orrcs %1, %1, %2, lsl %3 \n"
- "subcs %3, %3, #16 \n"
- "lsr %0, %0, #8 \n"
- "cmp %1, %0, lsl #16 \n"
- "subge %1, %1, %0, lsl #16 \n"
- "subge %0, %5, %0 \n"
- "movge %2, #1 \n"
- "movlt %2, #0 \n"
- : "=&r"(c->high), "=&r"(c->code_word), "=&r"(bit),
- "+&r"(c->bits), "+&r"(c->buffer)
- : "r"(high), "r"(pr), "r"(c->end - 1),
- "0"(shift), "1"(code_word));
+ __asm__ ("adds %3, %3, %0 \n"
+ "cmpcs %7, %4 \n"
+ "ldrcsh %2, [%4], #2 \n"
+ "rsb %0, %6, #256 \n"
+ "smlabb %0, %5, %6, %0 \n"
+ "rev16cs %2, %2 \n"
+ "orrcs %1, %1, %2, lsl %3 \n"
+ "subcs %3, %3, #16 \n"
+ "lsr %0, %0, #8 \n"
+ "cmp %1, %0, lsl #16 \n"
+ "subge %1, %1, %0, lsl #16 \n"
+ "subge %0, %5, %0 \n"
+ "movge %2, #1 \n"
+ "movlt %2, #0 \n"
+ : "=&r"(c->high), "=&r"(c->code_word), "=&r"(bit),
+ "+&r"(c->bits), "+&r"(c->buffer)
+ : "r"(high), "r"(pr), "r"(c->end - 1),
+ "0"(shift), "1"(code_word)
+ : "cc");
return bit;
}
@@ -62,19 +63,20 @@ static inline int vp56_rac_get_prob_branchy_armv6(VP56RangeCoder *c, int pr)
unsigned low;
unsigned tmp;
- __asm__ volatile ("adds %3, %3, %0 \n"
- "cmpcs %7, %4 \n"
- "ldrcsh %2, [%4], #2 \n"
- "rsb %0, %6, #256 \n"
- "smlabb %0, %5, %6, %0 \n"
- "rev16cs %2, %2 \n"
- "orrcs %1, %1, %2, lsl %3 \n"
- "subcs %3, %3, #16 \n"
- "lsr %0, %0, #8 \n"
- "lsl %2, %0, #16 \n"
- : "=&r"(low), "+&r"(code_word), "=&r"(tmp),
- "+&r"(c->bits), "+&r"(c->buffer)
- : "r"(high), "r"(pr), "r"(c->end - 1), "0"(shift));
+ __asm__ ("adds %3, %3, %0 \n"
+ "cmpcs %7, %4 \n"
+ "ldrcsh %2, [%4], #2 \n"
+ "rsb %0, %6, #256 \n"
+ "smlabb %0, %5, %6, %0 \n"
+ "rev16cs %2, %2 \n"
+ "orrcs %1, %1, %2, lsl %3 \n"
+ "subcs %3, %3, #16 \n"
+ "lsr %0, %0, #8 \n"
+ "lsl %2, %0, #16 \n"
+ : "=&r"(low), "+&r"(code_word), "=&r"(tmp),
+ "+&r"(c->bits), "+&r"(c->buffer)
+ : "r"(high), "r"(pr), "r"(c->end - 1), "0"(shift)
+ : "cc");
if (code_word >= tmp) {
c->high = high - low;