aboutsummaryrefslogtreecommitdiffstats
path: root/libavcodec/ppc
diff options
context:
space:
mode:
authorDiego Biurrun <diego@biurrun.de>2013-01-19 03:34:47 +0100
committerDiego Biurrun <diego@biurrun.de>2013-02-06 11:30:53 +0100
commit79dad2a932534d1155079f937649e099f9e5cc27 (patch)
tree9b3018db13e312b28cc7df17ba71bacf5f94d4bd /libavcodec/ppc
parent293065bdb56e603589ad8a29326406c39323e153 (diff)
downloadffmpeg-79dad2a932534d1155079f937649e099f9e5cc27.tar.gz
dsputil: Separate h264chroma
Diffstat (limited to 'libavcodec/ppc')
-rw-r--r--libavcodec/ppc/Makefile1
-rw-r--r--libavcodec/ppc/dsputil_altivec.h2
-rw-r--r--libavcodec/ppc/dsputil_ppc.c2
-rw-r--r--libavcodec/ppc/h264_qpel.c20
-rw-r--r--libavcodec/ppc/h264_qpel_template.c268
-rw-r--r--libavcodec/ppc/h264chroma_init.c64
-rw-r--r--libavcodec/ppc/h264chroma_template.c289
-rw-r--r--libavcodec/ppc/vc1dsp_altivec.c4
8 files changed, 356 insertions, 294 deletions
diff --git a/libavcodec/ppc/Makefile b/libavcodec/ppc/Makefile
index e152483e7c..a2ce9ebafb 100644
--- a/libavcodec/ppc/Makefile
+++ b/libavcodec/ppc/Makefile
@@ -1,6 +1,7 @@
OBJS += ppc/dsputil_ppc.o \
ppc/videodsp_ppc.o \
+OBJS-$(CONFIG_H264CHROMA) += ppc/h264chroma_init.o
OBJS-$(CONFIG_H264QPEL) += ppc/h264_qpel.o
OBJS-$(CONFIG_VORBIS_DECODER) += ppc/vorbisdsp_altivec.o
OBJS-$(CONFIG_VP3DSP) += ppc/vp3dsp_altivec.o
diff --git a/libavcodec/ppc/dsputil_altivec.h b/libavcodec/ppc/dsputil_altivec.h
index e97234400f..de5054baa7 100644
--- a/libavcodec/ppc/dsputil_altivec.h
+++ b/libavcodec/ppc/dsputil_altivec.h
@@ -36,8 +36,6 @@ void ff_gmc1_altivec(uint8_t *dst, uint8_t *src, int stride, int h,
void ff_idct_put_altivec(uint8_t *dest, int line_size, int16_t *block);
void ff_idct_add_altivec(uint8_t *dest, int line_size, int16_t *block);
-void ff_dsputil_h264_init_ppc(DSPContext* c, AVCodecContext *avctx);
-
void ff_dsputil_init_altivec(DSPContext* c, AVCodecContext *avctx);
void ff_float_init_altivec(DSPContext* c, AVCodecContext *avctx);
void ff_int_init_altivec(DSPContext* c, AVCodecContext *avctx);
diff --git a/libavcodec/ppc/dsputil_ppc.c b/libavcodec/ppc/dsputil_ppc.c
index 1174a7c746..1eebafa92e 100644
--- a/libavcodec/ppc/dsputil_ppc.c
+++ b/libavcodec/ppc/dsputil_ppc.c
@@ -157,8 +157,6 @@ av_cold void ff_dsputil_init_ppc(DSPContext *c, AVCodecContext *avctx)
}
#if HAVE_ALTIVEC
- if(CONFIG_H264_DECODER) ff_dsputil_h264_init_ppc(c, avctx);
-
if (av_get_cpu_flags() & AV_CPU_FLAG_ALTIVEC) {
ff_dsputil_init_altivec(c, avctx);
ff_int_init_altivec(c, avctx);
diff --git a/libavcodec/ppc/h264_qpel.c b/libavcodec/ppc/h264_qpel.c
index b70d4a9067..2e68a06e60 100644
--- a/libavcodec/ppc/h264_qpel.c
+++ b/libavcodec/ppc/h264_qpel.c
@@ -33,8 +33,6 @@
#define AVG_OP_U8_ALTIVEC(d, s, dst) d = vec_avg(dst, s)
#define OP_U8_ALTIVEC PUT_OP_U8_ALTIVEC
-#define PREFIX_h264_chroma_mc8_altivec put_h264_chroma_mc8_altivec
-#define PREFIX_h264_chroma_mc8_num altivec_put_h264_chroma_mc8_num
#define PREFIX_h264_qpel16_h_lowpass_altivec put_h264_qpel16_h_lowpass_altivec
#define PREFIX_h264_qpel16_h_lowpass_num altivec_put_h264_qpel16_h_lowpass_num
#define PREFIX_h264_qpel16_v_lowpass_altivec put_h264_qpel16_v_lowpass_altivec
@@ -43,8 +41,6 @@
#define PREFIX_h264_qpel16_hv_lowpass_num altivec_put_h264_qpel16_hv_lowpass_num
#include "h264_qpel_template.c"
#undef OP_U8_ALTIVEC
-#undef PREFIX_h264_chroma_mc8_altivec
-#undef PREFIX_h264_chroma_mc8_num
#undef PREFIX_h264_qpel16_h_lowpass_altivec
#undef PREFIX_h264_qpel16_h_lowpass_num
#undef PREFIX_h264_qpel16_v_lowpass_altivec
@@ -53,8 +49,6 @@
#undef PREFIX_h264_qpel16_hv_lowpass_num
#define OP_U8_ALTIVEC AVG_OP_U8_ALTIVEC
-#define PREFIX_h264_chroma_mc8_altivec avg_h264_chroma_mc8_altivec
-#define PREFIX_h264_chroma_mc8_num altivec_avg_h264_chroma_mc8_num
#define PREFIX_h264_qpel16_h_lowpass_altivec avg_h264_qpel16_h_lowpass_altivec
#define PREFIX_h264_qpel16_h_lowpass_num altivec_avg_h264_qpel16_h_lowpass_num
#define PREFIX_h264_qpel16_v_lowpass_altivec avg_h264_qpel16_v_lowpass_altivec
@@ -63,8 +57,6 @@
#define PREFIX_h264_qpel16_hv_lowpass_num altivec_avg_h264_qpel16_hv_lowpass_num
#include "h264_qpel_template.c"
#undef OP_U8_ALTIVEC
-#undef PREFIX_h264_chroma_mc8_altivec
-#undef PREFIX_h264_chroma_mc8_num
#undef PREFIX_h264_qpel16_h_lowpass_altivec
#undef PREFIX_h264_qpel16_h_lowpass_num
#undef PREFIX_h264_qpel16_v_lowpass_altivec
@@ -273,18 +265,6 @@ static inline void avg_pixels16_l2_altivec( uint8_t * dst, const uint8_t * src1,
H264_MC(put_, 16, altivec)
H264_MC(avg_, 16, altivec)
-
-void ff_dsputil_h264_init_ppc(DSPContext* c, AVCodecContext *avctx)
-{
- const int high_bit_depth = avctx->bits_per_raw_sample > 8;
-
- if (av_get_cpu_flags() & AV_CPU_FLAG_ALTIVEC) {
- if (!high_bit_depth) {
- c->put_h264_chroma_pixels_tab[0] = put_h264_chroma_mc8_altivec;
- c->avg_h264_chroma_pixels_tab[0] = avg_h264_chroma_mc8_altivec;
- }
- }
-}
#endif /* HAVE_ALTIVEC */
av_cold void ff_h264qpel_init_ppc(H264QpelContext *c, int bit_depth)
diff --git a/libavcodec/ppc/h264_qpel_template.c b/libavcodec/ppc/h264_qpel_template.c
index b445f926a7..4df1d09768 100644
--- a/libavcodec/ppc/h264_qpel_template.c
+++ b/libavcodec/ppc/h264_qpel_template.c
@@ -26,274 +26,6 @@
#define ASSERT_ALIGNED(ptr) ;
#endif
-/* this code assume that stride % 16 == 0 */
-
-#define CHROMA_MC8_ALTIVEC_CORE(BIAS1, BIAS2) \
- vsrc2ssH = (vec_s16)vec_mergeh(zero_u8v,(vec_u8)vsrc2uc);\
- vsrc3ssH = (vec_s16)vec_mergeh(zero_u8v,(vec_u8)vsrc3uc);\
-\
- psum = vec_mladd(vA, vsrc0ssH, BIAS1);\
- psum = vec_mladd(vB, vsrc1ssH, psum);\
- psum = vec_mladd(vC, vsrc2ssH, psum);\
- psum = vec_mladd(vD, vsrc3ssH, psum);\
- psum = BIAS2(psum);\
- psum = vec_sr(psum, v6us);\
-\
- vdst = vec_ld(0, dst);\
- ppsum = (vec_u8)vec_pack(psum, psum);\
- vfdst = vec_perm(vdst, ppsum, fperm);\
-\
- OP_U8_ALTIVEC(fsum, vfdst, vdst);\
-\
- vec_st(fsum, 0, dst);\
-\
- vsrc0ssH = vsrc2ssH;\
- vsrc1ssH = vsrc3ssH;\
-\
- dst += stride;\
- src += stride;
-
-#define CHROMA_MC8_ALTIVEC_CORE_SIMPLE \
-\
- vsrc0ssH = (vec_s16)vec_mergeh(zero_u8v,(vec_u8)vsrc0uc);\
- vsrc1ssH = (vec_s16)vec_mergeh(zero_u8v,(vec_u8)vsrc1uc);\
-\
- psum = vec_mladd(vA, vsrc0ssH, v32ss);\
- psum = vec_mladd(vE, vsrc1ssH, psum);\
- psum = vec_sr(psum, v6us);\
-\
- vdst = vec_ld(0, dst);\
- ppsum = (vec_u8)vec_pack(psum, psum);\
- vfdst = vec_perm(vdst, ppsum, fperm);\
-\
- OP_U8_ALTIVEC(fsum, vfdst, vdst);\
-\
- vec_st(fsum, 0, dst);\
-\
- dst += stride;\
- src += stride;
-
-#define noop(a) a
-#define add28(a) vec_add(v28ss, a)
-
-#ifdef PREFIX_h264_chroma_mc8_altivec
-static void PREFIX_h264_chroma_mc8_altivec(uint8_t * dst, uint8_t * src,
- int stride, int h, int x, int y) {
- DECLARE_ALIGNED(16, signed int, ABCD)[4] =
- {((8 - x) * (8 - y)),
- (( x) * (8 - y)),
- ((8 - x) * ( y)),
- (( x) * ( y))};
- register int i;
- vec_u8 fperm;
- const vec_s32 vABCD = vec_ld(0, ABCD);
- const vec_s16 vA = vec_splat((vec_s16)vABCD, 1);
- const vec_s16 vB = vec_splat((vec_s16)vABCD, 3);
- const vec_s16 vC = vec_splat((vec_s16)vABCD, 5);
- const vec_s16 vD = vec_splat((vec_s16)vABCD, 7);
- LOAD_ZERO;
- const vec_s16 v32ss = vec_sl(vec_splat_s16(1),vec_splat_u16(5));
- const vec_u16 v6us = vec_splat_u16(6);
- register int loadSecond = (((unsigned long)src) % 16) <= 7 ? 0 : 1;
- register int reallyBadAlign = (((unsigned long)src) % 16) == 15 ? 1 : 0;
-
- vec_u8 vsrcAuc, av_uninit(vsrcBuc), vsrcperm0, vsrcperm1;
- vec_u8 vsrc0uc, vsrc1uc;
- vec_s16 vsrc0ssH, vsrc1ssH;
- vec_u8 vsrcCuc, vsrc2uc, vsrc3uc;
- vec_s16 vsrc2ssH, vsrc3ssH, psum;
- vec_u8 vdst, ppsum, vfdst, fsum;
-
- if (((unsigned long)dst) % 16 == 0) {
- fperm = (vec_u8){0x10, 0x11, 0x12, 0x13,
- 0x14, 0x15, 0x16, 0x17,
- 0x08, 0x09, 0x0A, 0x0B,
- 0x0C, 0x0D, 0x0E, 0x0F};
- } else {
- fperm = (vec_u8){0x00, 0x01, 0x02, 0x03,
- 0x04, 0x05, 0x06, 0x07,
- 0x18, 0x19, 0x1A, 0x1B,
- 0x1C, 0x1D, 0x1E, 0x1F};
- }
-
- vsrcAuc = vec_ld(0, src);
-
- if (loadSecond)
- vsrcBuc = vec_ld(16, src);
- vsrcperm0 = vec_lvsl(0, src);
- vsrcperm1 = vec_lvsl(1, src);
-
- vsrc0uc = vec_perm(vsrcAuc, vsrcBuc, vsrcperm0);
- if (reallyBadAlign)
- vsrc1uc = vsrcBuc;
- else
- vsrc1uc = vec_perm(vsrcAuc, vsrcBuc, vsrcperm1);
-
- vsrc0ssH = (vec_s16)vec_mergeh(zero_u8v,(vec_u8)vsrc0uc);
- vsrc1ssH = (vec_s16)vec_mergeh(zero_u8v,(vec_u8)vsrc1uc);
-
- if (ABCD[3]) {
- if (!loadSecond) {// -> !reallyBadAlign
- for (i = 0 ; i < h ; i++) {
- vsrcCuc = vec_ld(stride + 0, src);
- vsrc2uc = vec_perm(vsrcCuc, vsrcCuc, vsrcperm0);
- vsrc3uc = vec_perm(vsrcCuc, vsrcCuc, vsrcperm1);
-
- CHROMA_MC8_ALTIVEC_CORE(v32ss, noop)
- }
- } else {
- vec_u8 vsrcDuc;
- for (i = 0 ; i < h ; i++) {
- vsrcCuc = vec_ld(stride + 0, src);
- vsrcDuc = vec_ld(stride + 16, src);
- vsrc2uc = vec_perm(vsrcCuc, vsrcDuc, vsrcperm0);
- if (reallyBadAlign)
- vsrc3uc = vsrcDuc;
- else
- vsrc3uc = vec_perm(vsrcCuc, vsrcDuc, vsrcperm1);
-
- CHROMA_MC8_ALTIVEC_CORE(v32ss, noop)
- }
- }
- } else {
- const vec_s16 vE = vec_add(vB, vC);
- if (ABCD[2]) { // x == 0 B == 0
- if (!loadSecond) {// -> !reallyBadAlign
- for (i = 0 ; i < h ; i++) {
- vsrcCuc = vec_ld(stride + 0, src);
- vsrc1uc = vec_perm(vsrcCuc, vsrcCuc, vsrcperm0);
- CHROMA_MC8_ALTIVEC_CORE_SIMPLE
-
- vsrc0uc = vsrc1uc;
- }
- } else {
- vec_u8 vsrcDuc;
- for (i = 0 ; i < h ; i++) {
- vsrcCuc = vec_ld(stride + 0, src);
- vsrcDuc = vec_ld(stride + 15, src);
- vsrc1uc = vec_perm(vsrcCuc, vsrcDuc, vsrcperm0);
- CHROMA_MC8_ALTIVEC_CORE_SIMPLE
-
- vsrc0uc = vsrc1uc;
- }
- }
- } else { // y == 0 C == 0
- if (!loadSecond) {// -> !reallyBadAlign
- for (i = 0 ; i < h ; i++) {
- vsrcCuc = vec_ld(0, src);
- vsrc0uc = vec_perm(vsrcCuc, vsrcCuc, vsrcperm0);
- vsrc1uc = vec_perm(vsrcCuc, vsrcCuc, vsrcperm1);
-
- CHROMA_MC8_ALTIVEC_CORE_SIMPLE
- }
- } else {
- vec_u8 vsrcDuc;
- for (i = 0 ; i < h ; i++) {
- vsrcCuc = vec_ld(0, src);
- vsrcDuc = vec_ld(15, src);
- vsrc0uc = vec_perm(vsrcCuc, vsrcDuc, vsrcperm0);
- if (reallyBadAlign)
- vsrc1uc = vsrcDuc;
- else
- vsrc1uc = vec_perm(vsrcCuc, vsrcDuc, vsrcperm1);
-
- CHROMA_MC8_ALTIVEC_CORE_SIMPLE
- }
- }
- }
- }
-}
-#endif
-
-/* this code assume that stride % 16 == 0 */
-#ifdef PREFIX_no_rnd_vc1_chroma_mc8_altivec
-static void PREFIX_no_rnd_vc1_chroma_mc8_altivec(uint8_t * dst, uint8_t * src, int stride, int h, int x, int y) {
- DECLARE_ALIGNED(16, signed int, ABCD)[4] =
- {((8 - x) * (8 - y)),
- (( x) * (8 - y)),
- ((8 - x) * ( y)),
- (( x) * ( y))};
- register int i;
- vec_u8 fperm;
- const vec_s32 vABCD = vec_ld(0, ABCD);
- const vec_s16 vA = vec_splat((vec_s16)vABCD, 1);
- const vec_s16 vB = vec_splat((vec_s16)vABCD, 3);
- const vec_s16 vC = vec_splat((vec_s16)vABCD, 5);
- const vec_s16 vD = vec_splat((vec_s16)vABCD, 7);
- LOAD_ZERO;
- const vec_s16 v28ss = vec_sub(vec_sl(vec_splat_s16(1),vec_splat_u16(5)),vec_splat_s16(4));
- const vec_u16 v6us = vec_splat_u16(6);
- register int loadSecond = (((unsigned long)src) % 16) <= 7 ? 0 : 1;
- register int reallyBadAlign = (((unsigned long)src) % 16) == 15 ? 1 : 0;
-
- vec_u8 vsrcAuc, av_uninit(vsrcBuc), vsrcperm0, vsrcperm1;
- vec_u8 vsrc0uc, vsrc1uc;
- vec_s16 vsrc0ssH, vsrc1ssH;
- vec_u8 vsrcCuc, vsrc2uc, vsrc3uc;
- vec_s16 vsrc2ssH, vsrc3ssH, psum;
- vec_u8 vdst, ppsum, vfdst, fsum;
-
- if (((unsigned long)dst) % 16 == 0) {
- fperm = (vec_u8){0x10, 0x11, 0x12, 0x13,
- 0x14, 0x15, 0x16, 0x17,
- 0x08, 0x09, 0x0A, 0x0B,
- 0x0C, 0x0D, 0x0E, 0x0F};
- } else {
- fperm = (vec_u8){0x00, 0x01, 0x02, 0x03,
- 0x04, 0x05, 0x06, 0x07,
- 0x18, 0x19, 0x1A, 0x1B,
- 0x1C, 0x1D, 0x1E, 0x1F};
- }
-
- vsrcAuc = vec_ld(0, src);
-
- if (loadSecond)
- vsrcBuc = vec_ld(16, src);
- vsrcperm0 = vec_lvsl(0, src);
- vsrcperm1 = vec_lvsl(1, src);
-
- vsrc0uc = vec_perm(vsrcAuc, vsrcBuc, vsrcperm0);
- if (reallyBadAlign)
- vsrc1uc = vsrcBuc;
- else
- vsrc1uc = vec_perm(vsrcAuc, vsrcBuc, vsrcperm1);
-
- vsrc0ssH = (vec_s16)vec_mergeh(zero_u8v, (vec_u8)vsrc0uc);
- vsrc1ssH = (vec_s16)vec_mergeh(zero_u8v, (vec_u8)vsrc1uc);
-
- if (!loadSecond) {// -> !reallyBadAlign
- for (i = 0 ; i < h ; i++) {
-
-
- vsrcCuc = vec_ld(stride + 0, src);
-
- vsrc2uc = vec_perm(vsrcCuc, vsrcCuc, vsrcperm0);
- vsrc3uc = vec_perm(vsrcCuc, vsrcCuc, vsrcperm1);
-
- CHROMA_MC8_ALTIVEC_CORE(vec_splat_s16(0), add28)
- }
- } else {
- vec_u8 vsrcDuc;
- for (i = 0 ; i < h ; i++) {
- vsrcCuc = vec_ld(stride + 0, src);
- vsrcDuc = vec_ld(stride + 16, src);
-
- vsrc2uc = vec_perm(vsrcCuc, vsrcDuc, vsrcperm0);
- if (reallyBadAlign)
- vsrc3uc = vsrcDuc;
- else
- vsrc3uc = vec_perm(vsrcCuc, vsrcDuc, vsrcperm1);
-
- CHROMA_MC8_ALTIVEC_CORE(vec_splat_s16(0), add28)
- }
- }
-}
-#endif
-
-#undef noop
-#undef add28
-#undef CHROMA_MC8_ALTIVEC_CORE
-
/* this code assume stride % 16 == 0 */
#ifdef PREFIX_h264_qpel16_h_lowpass_altivec
static void PREFIX_h264_qpel16_h_lowpass_altivec(uint8_t * dst, uint8_t * src, int dstStride, int srcStride) {
diff --git a/libavcodec/ppc/h264chroma_init.c b/libavcodec/ppc/h264chroma_init.c
new file mode 100644
index 0000000000..06456ec6e6
--- /dev/null
+++ b/libavcodec/ppc/h264chroma_init.c
@@ -0,0 +1,64 @@
+/*
+ * Copyright (c) 2004 Romain Dolbeau <romain@dolbeau.org>
+ *
+ * This file is part of Libav.
+ *
+ * Libav is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * Libav is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with Libav; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "config.h"
+#include "libavutil/attributes.h"
+#include "libavcodec/h264chroma.h"
+
+#if HAVE_ALTIVEC
+#include "libavutil/cpu.h"
+#include "libavutil/intreadwrite.h"
+#include "libavutil/ppc/types_altivec.h"
+#include "libavutil/ppc/util_altivec.h"
+#include "dsputil_altivec.h"
+
+#define PUT_OP_U8_ALTIVEC(d, s, dst) d = s
+#define AVG_OP_U8_ALTIVEC(d, s, dst) d = vec_avg(dst, s)
+
+#define OP_U8_ALTIVEC PUT_OP_U8_ALTIVEC
+#define PREFIX_h264_chroma_mc8_altivec put_h264_chroma_mc8_altivec
+#define PREFIX_h264_chroma_mc8_num altivec_put_h264_chroma_mc8_num
+#include "h264chroma_template.c"
+#undef OP_U8_ALTIVEC
+#undef PREFIX_h264_chroma_mc8_altivec
+#undef PREFIX_h264_chroma_mc8_num
+
+#define OP_U8_ALTIVEC AVG_OP_U8_ALTIVEC
+#define PREFIX_h264_chroma_mc8_altivec avg_h264_chroma_mc8_altivec
+#define PREFIX_h264_chroma_mc8_num altivec_avg_h264_chroma_mc8_num
+#include "h264chroma_template.c"
+#undef OP_U8_ALTIVEC
+#undef PREFIX_h264_chroma_mc8_altivec
+#undef PREFIX_h264_chroma_mc8_num
+#endif /* HAVE_ALTIVEC */
+
+av_cold void ff_h264chroma_init_ppc(H264ChromaContext *c, int bit_depth)
+{
+#if HAVE_ALTIVEC
+ const int high_bit_depth = bit_depth > 8;
+
+ if (av_get_cpu_flags() & AV_CPU_FLAG_ALTIVEC) {
+ if (!high_bit_depth) {
+ c->put_h264_chroma_pixels_tab[0] = put_h264_chroma_mc8_altivec;
+ c->avg_h264_chroma_pixels_tab[0] = avg_h264_chroma_mc8_altivec;
+ }
+ }
+#endif /* HAVE_ALTIVEC */
+}
diff --git a/libavcodec/ppc/h264chroma_template.c b/libavcodec/ppc/h264chroma_template.c
new file mode 100644
index 0000000000..293fef5c90
--- /dev/null
+++ b/libavcodec/ppc/h264chroma_template.c
@@ -0,0 +1,289 @@
+/*
+ * Copyright (c) 2004 Romain Dolbeau <romain@dolbeau.org>
+ *
+ * This file is part of Libav.
+ *
+ * Libav is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * Libav is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with Libav; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "libavutil/mem.h"
+
+/* this code assume that stride % 16 == 0 */
+
+#define CHROMA_MC8_ALTIVEC_CORE(BIAS1, BIAS2) \
+ vsrc2ssH = (vec_s16)vec_mergeh(zero_u8v,(vec_u8)vsrc2uc);\
+ vsrc3ssH = (vec_s16)vec_mergeh(zero_u8v,(vec_u8)vsrc3uc);\
+\
+ psum = vec_mladd(vA, vsrc0ssH, BIAS1);\
+ psum = vec_mladd(vB, vsrc1ssH, psum);\
+ psum = vec_mladd(vC, vsrc2ssH, psum);\
+ psum = vec_mladd(vD, vsrc3ssH, psum);\
+ psum = BIAS2(psum);\
+ psum = vec_sr(psum, v6us);\
+\
+ vdst = vec_ld(0, dst);\
+ ppsum = (vec_u8)vec_pack(psum, psum);\
+ vfdst = vec_perm(vdst, ppsum, fperm);\
+\
+ OP_U8_ALTIVEC(fsum, vfdst, vdst);\
+\
+ vec_st(fsum, 0, dst);\
+\
+ vsrc0ssH = vsrc2ssH;\
+ vsrc1ssH = vsrc3ssH;\
+\
+ dst += stride;\
+ src += stride;
+
+#define CHROMA_MC8_ALTIVEC_CORE_SIMPLE \
+\
+ vsrc0ssH = (vec_s16)vec_mergeh(zero_u8v,(vec_u8)vsrc0uc);\
+ vsrc1ssH = (vec_s16)vec_mergeh(zero_u8v,(vec_u8)vsrc1uc);\
+\
+ psum = vec_mladd(vA, vsrc0ssH, v32ss);\
+ psum = vec_mladd(vE, vsrc1ssH, psum);\
+ psum = vec_sr(psum, v6us);\
+\
+ vdst = vec_ld(0, dst);\
+ ppsum = (vec_u8)vec_pack(psum, psum);\
+ vfdst = vec_perm(vdst, ppsum, fperm);\
+\
+ OP_U8_ALTIVEC(fsum, vfdst, vdst);\
+\
+ vec_st(fsum, 0, dst);\
+\
+ dst += stride;\
+ src += stride;
+
+#define noop(a) a
+#define add28(a) vec_add(v28ss, a)
+
+#ifdef PREFIX_h264_chroma_mc8_altivec
+static void PREFIX_h264_chroma_mc8_altivec(uint8_t * dst, uint8_t * src,
+ int stride, int h, int x, int y) {
+ DECLARE_ALIGNED(16, signed int, ABCD)[4] =
+ {((8 - x) * (8 - y)),
+ (( x) * (8 - y)),
+ ((8 - x) * ( y)),
+ (( x) * ( y))};
+ register int i;
+ vec_u8 fperm;
+ const vec_s32 vABCD = vec_ld(0, ABCD);
+ const vec_s16 vA = vec_splat((vec_s16)vABCD, 1);
+ const vec_s16 vB = vec_splat((vec_s16)vABCD, 3);
+ const vec_s16 vC = vec_splat((vec_s16)vABCD, 5);
+ const vec_s16 vD = vec_splat((vec_s16)vABCD, 7);
+ LOAD_ZERO;
+ const vec_s16 v32ss = vec_sl(vec_splat_s16(1),vec_splat_u16(5));
+ const vec_u16 v6us = vec_splat_u16(6);
+ register int loadSecond = (((unsigned long)src) % 16) <= 7 ? 0 : 1;
+ register int reallyBadAlign = (((unsigned long)src) % 16) == 15 ? 1 : 0;
+
+ vec_u8 vsrcAuc, av_uninit(vsrcBuc), vsrcperm0, vsrcperm1;
+ vec_u8 vsrc0uc, vsrc1uc;
+ vec_s16 vsrc0ssH, vsrc1ssH;
+ vec_u8 vsrcCuc, vsrc2uc, vsrc3uc;
+ vec_s16 vsrc2ssH, vsrc3ssH, psum;
+ vec_u8 vdst, ppsum, vfdst, fsum;
+
+ if (((unsigned long)dst) % 16 == 0) {
+ fperm = (vec_u8){0x10, 0x11, 0x12, 0x13,
+ 0x14, 0x15, 0x16, 0x17,
+ 0x08, 0x09, 0x0A, 0x0B,
+ 0x0C, 0x0D, 0x0E, 0x0F};
+ } else {
+ fperm = (vec_u8){0x00, 0x01, 0x02, 0x03,
+ 0x04, 0x05, 0x06, 0x07,
+ 0x18, 0x19, 0x1A, 0x1B,
+ 0x1C, 0x1D, 0x1E, 0x1F};
+ }
+
+ vsrcAuc = vec_ld(0, src);
+
+ if (loadSecond)
+ vsrcBuc = vec_ld(16, src);
+ vsrcperm0 = vec_lvsl(0, src);
+ vsrcperm1 = vec_lvsl(1, src);
+
+ vsrc0uc = vec_perm(vsrcAuc, vsrcBuc, vsrcperm0);
+ if (reallyBadAlign)
+ vsrc1uc = vsrcBuc;
+ else
+ vsrc1uc = vec_perm(vsrcAuc, vsrcBuc, vsrcperm1);
+
+ vsrc0ssH = (vec_s16)vec_mergeh(zero_u8v,(vec_u8)vsrc0uc);
+ vsrc1ssH = (vec_s16)vec_mergeh(zero_u8v,(vec_u8)vsrc1uc);
+
+ if (ABCD[3]) {
+ if (!loadSecond) {// -> !reallyBadAlign
+ for (i = 0 ; i < h ; i++) {
+ vsrcCuc = vec_ld(stride + 0, src);
+ vsrc2uc = vec_perm(vsrcCuc, vsrcCuc, vsrcperm0);
+ vsrc3uc = vec_perm(vsrcCuc, vsrcCuc, vsrcperm1);
+
+ CHROMA_MC8_ALTIVEC_CORE(v32ss, noop)
+ }
+ } else {
+ vec_u8 vsrcDuc;
+ for (i = 0 ; i < h ; i++) {
+ vsrcCuc = vec_ld(stride + 0, src);
+ vsrcDuc = vec_ld(stride + 16, src);
+ vsrc2uc = vec_perm(vsrcCuc, vsrcDuc, vsrcperm0);
+ if (reallyBadAlign)
+ vsrc3uc = vsrcDuc;
+ else
+ vsrc3uc = vec_perm(vsrcCuc, vsrcDuc, vsrcperm1);
+
+ CHROMA_MC8_ALTIVEC_CORE(v32ss, noop)
+ }
+ }
+ } else {
+ const vec_s16 vE = vec_add(vB, vC);
+ if (ABCD[2]) { // x == 0 B == 0
+ if (!loadSecond) {// -> !reallyBadAlign
+ for (i = 0 ; i < h ; i++) {
+ vsrcCuc = vec_ld(stride + 0, src);
+ vsrc1uc = vec_perm(vsrcCuc, vsrcCuc, vsrcperm0);
+ CHROMA_MC8_ALTIVEC_CORE_SIMPLE
+
+ vsrc0uc = vsrc1uc;
+ }
+ } else {
+ vec_u8 vsrcDuc;
+ for (i = 0 ; i < h ; i++) {
+ vsrcCuc = vec_ld(stride + 0, src);
+ vsrcDuc = vec_ld(stride + 15, src);
+ vsrc1uc = vec_perm(vsrcCuc, vsrcDuc, vsrcperm0);
+ CHROMA_MC8_ALTIVEC_CORE_SIMPLE
+
+ vsrc0uc = vsrc1uc;
+ }
+ }
+ } else { // y == 0 C == 0
+ if (!loadSecond) {// -> !reallyBadAlign
+ for (i = 0 ; i < h ; i++) {
+ vsrcCuc = vec_ld(0, src);
+ vsrc0uc = vec_perm(vsrcCuc, vsrcCuc, vsrcperm0);
+ vsrc1uc = vec_perm(vsrcCuc, vsrcCuc, vsrcperm1);
+
+ CHROMA_MC8_ALTIVEC_CORE_SIMPLE
+ }
+ } else {
+ vec_u8 vsrcDuc;
+ for (i = 0 ; i < h ; i++) {
+ vsrcCuc = vec_ld(0, src);
+ vsrcDuc = vec_ld(15, src);
+ vsrc0uc = vec_perm(vsrcCuc, vsrcDuc, vsrcperm0);
+ if (reallyBadAlign)
+ vsrc1uc = vsrcDuc;
+ else
+ vsrc1uc = vec_perm(vsrcCuc, vsrcDuc, vsrcperm1);
+
+ CHROMA_MC8_ALTIVEC_CORE_SIMPLE
+ }
+ }
+ }
+ }
+}
+#endif
+
+/* this code assume that stride % 16 == 0 */
+#ifdef PREFIX_no_rnd_vc1_chroma_mc8_altivec
+static void PREFIX_no_rnd_vc1_chroma_mc8_altivec(uint8_t * dst, uint8_t * src, int stride, int h, int x, int y) {
+ DECLARE_ALIGNED(16, signed int, ABCD)[4] =
+ {((8 - x) * (8 - y)),
+ (( x) * (8 - y)),
+ ((8 - x) * ( y)),
+ (( x) * ( y))};
+ register int i;
+ vec_u8 fperm;
+ const vec_s32 vABCD = vec_ld(0, ABCD);
+ const vec_s16 vA = vec_splat((vec_s16)vABCD, 1);
+ const vec_s16 vB = vec_splat((vec_s16)vABCD, 3);
+ const vec_s16 vC = vec_splat((vec_s16)vABCD, 5);
+ const vec_s16 vD = vec_splat((vec_s16)vABCD, 7);
+ LOAD_ZERO;
+ const vec_s16 v28ss = vec_sub(vec_sl(vec_splat_s16(1),vec_splat_u16(5)),vec_splat_s16(4));
+ const vec_u16 v6us = vec_splat_u16(6);
+ register int loadSecond = (((unsigned long)src) % 16) <= 7 ? 0 : 1;
+ register int reallyBadAlign = (((unsigned long)src) % 16) == 15 ? 1 : 0;
+
+ vec_u8 vsrcAuc, av_uninit(vsrcBuc), vsrcperm0, vsrcperm1;
+ vec_u8 vsrc0uc, vsrc1uc;
+ vec_s16 vsrc0ssH, vsrc1ssH;
+ vec_u8 vsrcCuc, vsrc2uc, vsrc3uc;
+ vec_s16 vsrc2ssH, vsrc3ssH, psum;
+ vec_u8 vdst, ppsum, vfdst, fsum;
+
+ if (((unsigned long)dst) % 16 == 0) {
+ fperm = (vec_u8){0x10, 0x11, 0x12, 0x13,
+ 0x14, 0x15, 0x16, 0x17,
+ 0x08, 0x09, 0x0A, 0x0B,
+ 0x0C, 0x0D, 0x0E, 0x0F};
+ } else {
+ fperm = (vec_u8){0x00, 0x01, 0x02, 0x03,
+ 0x04, 0x05, 0x06, 0x07,
+ 0x18, 0x19, 0x1A, 0x1B,
+ 0x1C, 0x1D, 0x1E, 0x1F};
+ }
+
+ vsrcAuc = vec_ld(0, src);
+
+ if (loadSecond)
+ vsrcBuc = vec_ld(16, src);
+ vsrcperm0 = vec_lvsl(0, src);
+ vsrcperm1 = vec_lvsl(1, src);
+
+ vsrc0uc = vec_perm(vsrcAuc, vsrcBuc, vsrcperm0);
+ if (reallyBadAlign)
+ vsrc1uc = vsrcBuc;
+ else
+ vsrc1uc = vec_perm(vsrcAuc, vsrcBuc, vsrcperm1);
+
+ vsrc0ssH = (vec_s16)vec_mergeh(zero_u8v, (vec_u8)vsrc0uc);
+ vsrc1ssH = (vec_s16)vec_mergeh(zero_u8v, (vec_u8)vsrc1uc);
+
+ if (!loadSecond) {// -> !reallyBadAlign
+ for (i = 0 ; i < h ; i++) {
+
+
+ vsrcCuc = vec_ld(stride + 0, src);
+
+ vsrc2uc = vec_perm(vsrcCuc, vsrcCuc, vsrcperm0);
+ vsrc3uc = vec_perm(vsrcCuc, vsrcCuc, vsrcperm1);
+
+ CHROMA_MC8_ALTIVEC_CORE(vec_splat_s16(0), add28)
+ }
+ } else {
+ vec_u8 vsrcDuc;
+ for (i = 0 ; i < h ; i++) {
+ vsrcCuc = vec_ld(stride + 0, src);
+ vsrcDuc = vec_ld(stride + 16, src);
+
+ vsrc2uc = vec_perm(vsrcCuc, vsrcDuc, vsrcperm0);
+ if (reallyBadAlign)
+ vsrc3uc = vsrcDuc;
+ else
+ vsrc3uc = vec_perm(vsrcCuc, vsrcDuc, vsrcperm1);
+
+ CHROMA_MC8_ALTIVEC_CORE(vec_splat_s16(0), add28)
+ }
+ }
+}
+#endif
+
+#undef noop
+#undef add28
+#undef CHROMA_MC8_ALTIVEC_CORE
diff --git a/libavcodec/ppc/vc1dsp_altivec.c b/libavcodec/ppc/vc1dsp_altivec.c
index e6ebb9ccb5..18ec50f58d 100644
--- a/libavcodec/ppc/vc1dsp_altivec.c
+++ b/libavcodec/ppc/vc1dsp_altivec.c
@@ -326,13 +326,13 @@ static void vc1_inv_trans_8x4_altivec(uint8_t *dest, int stride, int16_t *block)
#define OP_U8_ALTIVEC PUT_OP_U8_ALTIVEC
#define PREFIX_no_rnd_vc1_chroma_mc8_altivec put_no_rnd_vc1_chroma_mc8_altivec
-#include "h264_qpel_template.c"
+#include "h264chroma_template.c"
#undef OP_U8_ALTIVEC
#undef PREFIX_no_rnd_vc1_chroma_mc8_altivec
#define OP_U8_ALTIVEC AVG_OP_U8_ALTIVEC
#define PREFIX_no_rnd_vc1_chroma_mc8_altivec avg_no_rnd_vc1_chroma_mc8_altivec
-#include "h264_qpel_template.c"
+#include "h264chroma_template.c"
#undef OP_U8_ALTIVEC
#undef PREFIX_no_rnd_vc1_chroma_mc8_altivec