diff options
author | Diego Biurrun <diego@biurrun.de> | 2009-04-13 10:00:56 +0000 |
---|---|---|
committer | Diego Biurrun <diego@biurrun.de> | 2009-04-13 10:00:56 +0000 |
commit | beb93f987cef2f3a629caac29d143b2a1b901c6f (patch) | |
tree | 119c70b41a04ef28c072e17a520d6669061f7ab1 | |
parent | 8e8813a0a1fb035e8f25ac9b8ae1d7ba5d1d2be4 (diff) | |
download | ffmpeg-beb93f987cef2f3a629caac29d143b2a1b901c6f.tar.gz |
Add a copy of libswscale into the branch instead of using svn:external.
This will allow merging some changes from trunk.
Originally committed as revision 18488 to svn://svn.ffmpeg.org/ffmpeg/branches/0.5
-rw-r--r-- | libswscale/Makefile | 24 | ||||
-rw-r--r-- | libswscale/cs_test.c | 175 | ||||
-rw-r--r-- | libswscale/internal_bfin.S | 606 | ||||
-rw-r--r-- | libswscale/rgb2rgb.c | 442 | ||||
-rw-r--r-- | libswscale/rgb2rgb.h | 147 | ||||
-rw-r--r-- | libswscale/rgb2rgb_template.c | 2738 | ||||
-rw-r--r-- | libswscale/swscale-example.c | 210 | ||||
-rw-r--r-- | libswscale/swscale.c | 3198 | ||||
-rw-r--r-- | libswscale/swscale.h | 247 | ||||
-rw-r--r-- | libswscale/swscale_altivec_template.c | 538 | ||||
-rw-r--r-- | libswscale/swscale_avoption.c | 60 | ||||
-rw-r--r-- | libswscale/swscale_bfin.c | 91 | ||||
-rw-r--r-- | libswscale/swscale_internal.h | 324 | ||||
-rw-r--r-- | libswscale/swscale_template.c | 3041 | ||||
-rw-r--r-- | libswscale/yuv2rgb.c | 684 | ||||
-rw-r--r-- | libswscale/yuv2rgb_altivec.c | 962 | ||||
-rw-r--r-- | libswscale/yuv2rgb_bfin.c | 203 | ||||
-rw-r--r-- | libswscale/yuv2rgb_mlib.c | 85 | ||||
-rw-r--r-- | libswscale/yuv2rgb_template.c | 453 | ||||
-rw-r--r-- | libswscale/yuv2rgb_vis.c | 209 |
20 files changed, 14437 insertions, 0 deletions
diff --git a/libswscale/Makefile b/libswscale/Makefile new file mode 100644 index 0000000000..6d500abc65 --- /dev/null +++ b/libswscale/Makefile @@ -0,0 +1,24 @@ +include $(SUBDIR)../config.mak + +NAME = swscale +FFLIBS = avutil + +HEADERS = swscale.h + +OBJS = rgb2rgb.o swscale.o swscale_avoption.o yuv2rgb.o + +OBJS-$(ARCH_BFIN) += internal_bfin.o swscale_bfin.o yuv2rgb_bfin.o +OBJS-$(CONFIG_MLIB) += yuv2rgb_mlib.o +OBJS-$(HAVE_ALTIVEC) += yuv2rgb_altivec.o +OBJS-$(HAVE_VIS) += yuv2rgb_vis.o + +TESTS = cs_test swscale-example + +CLEANFILES = cs_test swscale-example + +include $(SUBDIR)../subdir.mak + +$(SUBDIR)cs_test: $(SUBDIR)cs_test.o $(SUBDIR)$(LIBNAME) + +$(SUBDIR)swscale-example: $(SUBDIR)swscale-example.o $(SUBDIR)$(LIBNAME) +$(SUBDIR)swscale-example: EXTRALIBS += -lm diff --git a/libswscale/cs_test.c b/libswscale/cs_test.c new file mode 100644 index 0000000000..2223ee3a31 --- /dev/null +++ b/libswscale/cs_test.c @@ -0,0 +1,175 @@ +/* + * Copyright (C) 2002 Michael Niedermayer <michaelni@gmx.at> + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#include <stdio.h> +#include <string.h> /* for memset() */ +#include <unistd.h> +#include <stdlib.h> +#include <inttypes.h> + +#include "swscale.h" +#include "rgb2rgb.h" + +#define SIZE 1000 +#define srcByte 0x55 +#define dstByte 0xBB + +#define FUNC(s,d,n) {s,d,#n,n} + +static int cpu_caps; + +static char *args_parse(int argc, char *argv[]) +{ + int o; + + while ((o = getopt(argc, argv, "m23")) != -1) { + switch (o) { + case 'm': + cpu_caps |= SWS_CPU_CAPS_MMX; + break; + case '2': + cpu_caps |= SWS_CPU_CAPS_MMX2; + break; + case '3': + cpu_caps |= SWS_CPU_CAPS_3DNOW; + break; + default: + av_log(NULL, AV_LOG_ERROR, "Unknown option %c\n", o); + } + } + + return argv[optind]; +} + +int main(int argc, char **argv) +{ + int i, funcNum; + uint8_t *srcBuffer= (uint8_t*)av_malloc(SIZE); + uint8_t *dstBuffer= (uint8_t*)av_malloc(SIZE); + int failedNum=0; + int passedNum=0; + + av_log(NULL, AV_LOG_INFO, "memory corruption test ...\n"); + args_parse(argc, argv); + av_log(NULL, AV_LOG_INFO, "CPU capabilities forced to %x\n", cpu_caps); + sws_rgb2rgb_init(cpu_caps); + + for(funcNum=0; ; funcNum++){ + struct func_info_s { + int src_bpp; + int dst_bpp; + const char *name; + void (*func)(const uint8_t *src, uint8_t *dst, long src_size); + } func_info[] = { + FUNC(2, 2, rgb15to16), + FUNC(2, 3, rgb15to24), + FUNC(2, 4, rgb15to32), + FUNC(2, 3, rgb16to24), + FUNC(2, 4, rgb16to32), + FUNC(3, 2, rgb24to15), + FUNC(3, 2, rgb24to16), + FUNC(3, 4, rgb24to32), + FUNC(4, 2, rgb32to15), + FUNC(4, 2, rgb32to16), + FUNC(4, 3, rgb32to24), + FUNC(2, 2, rgb16to15), + FUNC(2, 2, rgb15tobgr15), + FUNC(2, 2, rgb15tobgr16), + FUNC(2, 3, rgb15tobgr24), + FUNC(2, 4, rgb15tobgr32), + FUNC(2, 2, rgb16tobgr15), + FUNC(2, 2, rgb16tobgr16), + FUNC(2, 3, rgb16tobgr24), + FUNC(2, 4, rgb16tobgr32), + FUNC(3, 2, rgb24tobgr15), + FUNC(3, 2, rgb24tobgr16), + FUNC(3, 3, rgb24tobgr24), + FUNC(3, 4, rgb24tobgr32), + FUNC(4, 2, rgb32tobgr15), + FUNC(4, 2, rgb32tobgr16), + FUNC(4, 3, rgb32tobgr24), + FUNC(4, 4, rgb32tobgr32), + FUNC(0, 0, NULL) + }; + int width; + int failed=0; + int srcBpp=0; + int dstBpp=0; + + if (!func_info[funcNum].func) break; + + av_log(NULL, AV_LOG_INFO,"."); + memset(srcBuffer, srcByte, SIZE); + + for(width=63; width>0; width--){ + int dstOffset; + for(dstOffset=128; dstOffset<196; dstOffset+=4){ + int srcOffset; + memset(dstBuffer, dstByte, SIZE); + + for(srcOffset=128; srcOffset<196; srcOffset+=4){ + uint8_t *src= srcBuffer+srcOffset; + uint8_t *dst= dstBuffer+dstOffset; + const char *name=NULL; + + if(failed) break; //don't fill the screen with shit ... + + srcBpp = func_info[funcNum].src_bpp; + dstBpp = func_info[funcNum].dst_bpp; + name = func_info[funcNum].name; + + func_info[funcNum].func(src, dst, width*srcBpp); + + if(!srcBpp) break; + + for(i=0; i<SIZE; i++){ + if(srcBuffer[i]!=srcByte){ + av_log(NULL, AV_LOG_INFO, "src damaged at %d w:%d src:%d dst:%d %s\n", + i, width, srcOffset, dstOffset, name); + failed=1; + break; + } + } + for(i=0; i<dstOffset; i++){ + if(dstBuffer[i]!=dstByte){ + av_log(NULL, AV_LOG_INFO, "dst damaged at %d w:%d src:%d dst:%d %s\n", + i, width, srcOffset, dstOffset, name); + failed=1; + break; + } + } + for(i=dstOffset + width*dstBpp; i<SIZE; i++){ + if(dstBuffer[i]!=dstByte){ + av_log(NULL, AV_LOG_INFO, "dst damaged at %d w:%d src:%d dst:%d %s\n", + i, width, srcOffset, dstOffset, name); + failed=1; + break; + } + } + } + } + } + if(failed) failedNum++; + else if(srcBpp) passedNum++; + } + + av_log(NULL, AV_LOG_INFO, "\n%d converters passed, %d converters randomly overwrote memory\n", passedNum, failedNum); + return failedNum; +} diff --git a/libswscale/internal_bfin.S b/libswscale/internal_bfin.S new file mode 100644 index 0000000000..fb7bda7e12 --- /dev/null +++ b/libswscale/internal_bfin.S @@ -0,0 +1,606 @@ +/* + * Copyright (C) 2007 Marc Hoffman <marc.hoffman@analog.com> + * April 20, 2007 + * + * Blackfin video color space converter operations + * convert I420 YV12 to RGB in various formats + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + + +/* +YUV420 to RGB565 conversion. This routine takes a YUV 420 planar macroblock +and converts it to RGB565. R:5 bits, G:6 bits, B:5 bits.. packed into shorts. + + +The following calculation is used for the conversion: + + r = clipz((y-oy)*cy + crv*(v-128)) + g = clipz((y-oy)*cy + cgv*(v-128) + cgu*(u-128)) + b = clipz((y-oy)*cy + cbu*(u-128)) + +y,u,v are prescaled by a factor of 4 i.e. left-shifted to gain precision. + + +New factorization to eliminate the truncation error which was +occurring due to the byteop3p. + + +1) Use the bytop16m to subtract quad bytes we use this in U8 this + then so the offsets need to be renormalized to 8bits. + +2) Scale operands up by a factor of 4 not 8 because Blackfin + multiplies include a shift. + +3) Compute into the accumulators cy*yx0, cy*yx1. + +4) Compute each of the linear equations: + r = clipz((y - oy) * cy + crv * (v - 128)) + + g = clipz((y - oy) * cy + cgv * (v - 128) + cgu * (u - 128)) + + b = clipz((y - oy) * cy + cbu * (u - 128)) + + Reuse of the accumulators requires that we actually multiply + twice once with addition and the second time with a subtraction. + + Because of this we need to compute the equations in the order R B + then G saving the writes for B in the case of 24/32 bit color + formats. + + API: yuv2rgb_kind (uint8_t *Y, uint8_t *U, uint8_t *V, int *out, + int dW, uint32_t *coeffs); + + A B + --- --- + i2 = cb i3 = cr + i1 = coeff i0 = y + +Where coeffs have the following layout in memory. + +uint32_t oy,oc,zero,cy,crv,rmask,cbu,bmask,cgu,cgv; + +coeffs is a pointer to oy. + +The {rgb} masks are only utilized by the 565 packing algorithm. Note the data +replication is used to simplify the internal algorithms for the dual Mac +architecture of BlackFin. + +All routines are exported with _ff_bfin_ as a symbol prefix. + +Rough performance gain compared against -O3: + +2779809/1484290 187.28% + +which translates to ~33c/pel to ~57c/pel for the reference vs 17.5 +c/pel for the optimized implementations. Not sure why there is such a +huge variation on the reference codes on Blackfin I guess it must have +to do with the memory system. +*/ + +#define mL3 .text +#ifdef __FDPIC__ +#define mL1 .l1.text +#else +#define mL1 mL3 +#endif +#define MEM mL1 + +#define DEFUN(fname,where,interface) \ + .section where; \ + .global _ff_bfin_ ## fname; \ + .type _ff_bfin_ ## fname, STT_FUNC; \ + .align 8; \ + _ff_bfin_ ## fname + +#define DEFUN_END(fname) \ + .size _ff_bfin_ ## fname, . - _ff_bfin_ ## fname + + +.text + +#define COEFF_LEN 11*4 +#define COEFF_REL_CY_OFF 4*4 + +#define ARG_OUT 20 +#define ARG_W 24 +#define ARG_COEFF 28 + +DEFUN(yuv2rgb565_line,MEM, + (uint8_t *Y, uint8_t *U, uint8_t *V, int *out, int dW, uint32_t *coeffs)): + link 0; + [--sp] = (r7:4); + p1 = [fp+ARG_OUT]; + r3 = [fp+ARG_W]; + + i0 = r0; + i2 = r1; + i3 = r2; + + r0 = [fp+ARG_COEFF]; + i1 = r0; + b1 = i1; + l1 = COEFF_LEN; + m0 = COEFF_REL_CY_OFF; + p0 = r3; + + r0 = [i0++]; // 2Y + r1.l = w[i2++]; // 2u + r1.h = w[i3++]; // 2v + p0 = p0>>2; + + lsetup (.L0565, .L1565) lc0 = p0; + + /* + uint32_t oy,oc,zero,cy,crv,rmask,cbu,bmask,cgu,cgv + r0 -- used to load 4ys + r1 -- used to load 2us,2vs + r4 -- y3,y2 + r5 -- y1,y0 + r6 -- u1,u0 + r7 -- v1,v0 + */ + r2=[i1++]; // oy +.L0565: + /* + rrrrrrrr gggggggg bbbbbbbb + 5432109876543210 + bbbbb >>3 + gggggggg <<3 + rrrrrrrr <<8 + rrrrrggggggbbbbb + */ + (r4,r5) = byteop16m (r1:0, r3:2) || r3=[i1++]; // oc + (r7,r6) = byteop16m (r1:0, r3:2) (r); + r5 = r5 << 2 (v); // y1,y0 + r4 = r4 << 2 (v); // y3,y2 + r6 = r6 << 2 (v) || r0=[i1++]; // u1,u0, r0=zero + r7 = r7 << 2 (v) || r1=[i1++]; // v1,v0 r1=cy + /* Y' = y*cy */ + a1 = r1.h*r5.h, a0 = r1.l*r5.l || r1=[i1++]; // crv + + /* R = Y+ crv*(Cr-128) */ + r2.h = (a1 += r1.h*r7.l), r2.l = (a0 += r1.l*r7.l); + a1 -= r1.h*r7.l, a0 -= r1.l*r7.l || r5=[i1++]; // rmask + r2 = byteop3p(r3:2, r1:0)(LO) || r1=[i1++]; // cbu + r2 = r2 >> 3 (v); + r3 = r2 & r5; + + /* B = Y+ cbu*(Cb-128) */ + r2.h = (a1 += r1.h*r6.l), r2.l = (a0 += r1.l*r6.l); + a1 -= r1.h*r6.l, a0 -= r1.l*r6.l || r5=[i1++]; // bmask + r2 = byteop3p(r3:2, r1:0)(LO) || r1=[i1++]; // cgu + r2 = r2 << 8 (v); + r2 = r2 & r5; + r3 = r3 | r2; + + /* G = Y+ cgu*(Cb-128)+cgv*(Cr-128) */ + a1 += r1.h*r6.l, a0 += r1.l*r6.l || r1=[i1++]; // cgv + r2.h = (a1 += r1.h*r7.l), r2.l = (a0 += r1.l*r7.l); + r2 = byteop3p(r3:2, r1:0)(LO) || r5=[i1++m0]; // gmask + r2 = r2 << 3 (v); + r2 = r2 & r5; + r3 = r3 | r2; + [p1++]=r3 || r1=[i1++]; // cy + + /* Y' = y*cy */ + + a1 = r1.h*r4.h, a0 = r1.l*r4.l || r1=[i1++]; // crv + + /* R = Y+ crv*(Cr-128) */ + r2.h = (a1 += r1.h*r7.h), r2.l = (a0 += r1.l*r7.h); + a1 -= r1.h*r7.h, a0 -= r1.l*r7.h || r5=[i1++]; // rmask + r2 = byteop3p(r3:2, r1:0)(LO) || r1=[i1++]; // cbu + r2 = r2 >> 3 (v); + r3 = r2 & r5; + + /* B = Y+ cbu*(Cb-128) */ + r2.h = (a1 += r1.h*r6.h), r2.l = (a0 += r1.l*r6.h); + a1 -= r1.h*r6.h, a0 -= r1.l*r6.h || r5=[i1++]; // bmask + r2 = byteop3p(r3:2, r1:0)(LO) || r1=[i1++]; // cgu + r2 = r2 << 8 (v); + r2 = r2 & r5; + r3 = r3 | r2; + + /* G = Y+ cgu*(Cb-128)+cgv*(Cr-128) */ + a1 += r1.h*r6.h, a0 += r1.l*r6.h || r1=[i1++]; // cgv + r2.h = (a1 += r1.h*r7.h), r2.l = (a0 += r1.l*r7.h) || r5=[i1++]; // gmask + r2 = byteop3p(r3:2, r1:0)(LO) || r0 = [i0++]; // 2Y + r2 = r2 << 3 (v) || r1.l = w[i2++]; // 2u + r2 = r2 & r5; + r3 = r3 | r2; + [p1++]=r3 || r1.h = w[i3++]; // 2v +.L1565: r2=[i1++]; // oy + + l1 = 0; + + (r7:4) = [sp++]; + unlink; + rts; +DEFUN_END(yuv2rgb565_line) + +DEFUN(yuv2rgb555_line,MEM, + (uint8_t *Y, uint8_t *U, uint8_t *V, int *out, int dW, uint32_t *coeffs)): + link 0; + [--sp] = (r7:4); + p1 = [fp+ARG_OUT]; + r3 = [fp+ARG_W]; + + i0 = r0; + i2 = r1; + i3 = r2; + + r0 = [fp+ARG_COEFF]; + i1 = r0; + b1 = i1; + l1 = COEFF_LEN; + m0 = COEFF_REL_CY_OFF; + p0 = r3; + + r0 = [i0++]; // 2Y + r1.l = w[i2++]; // 2u + r1.h = w[i3++]; // 2v + p0 = p0>>2; + + lsetup (.L0555, .L1555) lc0 = p0; + + /* + uint32_t oy,oc,zero,cy,crv,rmask,cbu,bmask,cgu,cgv + r0 -- used to load 4ys + r1 -- used to load 2us,2vs + r4 -- y3,y2 + r5 -- y1,y0 + r6 -- u1,u0 + r7 -- v1,v0 + */ + r2=[i1++]; // oy +.L0555: + /* + rrrrrrrr gggggggg bbbbbbbb + 5432109876543210 + bbbbb >>3 + gggggggg <<2 + rrrrrrrr <<7 + xrrrrrgggggbbbbb + */ + + (r4,r5) = byteop16m (r1:0, r3:2) || r3=[i1++]; // oc + (r7,r6) = byteop16m (r1:0, r3:2) (r); + r5 = r5 << 2 (v); // y1,y0 + r4 = r4 << 2 (v); // y3,y2 + r6 = r6 << 2 (v) || r0=[i1++]; // u1,u0, r0=zero + r7 = r7 << 2 (v) || r1=[i1++]; // v1,v0 r1=cy + /* Y' = y*cy */ + a1 = r1.h*r5.h, a0 = r1.l*r5.l || r1=[i1++]; // crv + + /* R = Y+ crv*(Cr-128) */ + r2.h = (a1 += r1.h*r7.l), r2.l = (a0 += r1.l*r7.l); + a1 -= r1.h*r7.l, a0 -= r1.l*r7.l || r5=[i1++]; // rmask + r2 = byteop3p(r3:2, r1:0)(LO) || r1=[i1++]; // cbu + r2 = r2 >> 3 (v); + r3 = r2 & r5; + + /* B = Y+ cbu*(Cb-128) */ + r2.h = (a1 += r1.h*r6.l), r2.l = (a0 += r1.l*r6.l); + a1 -= r1.h*r6.l, a0 -= r1.l*r6.l || r5=[i1++]; // bmask + r2 = byteop3p(r3:2, r1:0)(LO) || r1=[i1++]; // cgu + r2 = r2 << 7 (v); + r2 = r2 & r5; + r3 = r3 | r2; + + /* G = Y+ cgu*(Cb-128)+cgv*(Cr-128) */ + a1 += r1.h*r6.l, a0 += r1.l*r6.l || r1=[i1++]; // cgv + r2.h = (a1 += r1.h*r7.l), r2.l = (a0 += r1.l*r7.l); + r2 = byteop3p(r3:2, r1:0)(LO) || r5=[i1++m0]; // gmask + r2 = r2 << 2 (v); + r2 = r2 & r5; + r3 = r3 | r2; + [p1++]=r3 || r1=[i1++]; // cy + + /* Y' = y*cy */ + + a1 = r1.h*r4.h, a0 = r1.l*r4.l || r1=[i1++]; // crv + + /* R = Y+ crv*(Cr-128) */ + r2.h = (a1 += r1.h*r7.h), r2.l = (a0 += r1.l*r7.h); + a1 -= r1.h*r7.h, a0 -= r1.l*r7.h || r5=[i1++]; // rmask + r2 = byteop3p(r3:2, r1:0)(LO) || r1=[i1++]; // cbu + r2 = r2 >> 3 (v); + r3 = r2 & r5; + + /* B = Y+ cbu*(Cb-128) */ + r2.h = (a1 += r1.h*r6.h), r2.l = (a0 += r1.l*r6.h); + a1 -= r1.h*r6.h, a0 -= r1.l*r6.h || r5=[i1++]; // bmask + r2 = byteop3p(r3:2, r1:0)(LO) || r1=[i1++]; // cgu + r2 = r2 << 7 (v); + r2 = r2 & r5; + r3 = r3 | r2; + + /* G = Y+ cgu*(Cb-128)+cgv*(Cr-128) */ + a1 += r1.h*r6.h, a0 += r1.l*r6.h || r1=[i1++]; // cgv + r2.h = (a1 += r1.h*r7.h), r2.l = (a0 += r1.l*r7.h) || r5=[i1++]; // gmask + r2 = byteop3p(r3:2, r1:0)(LO) || r0=[i0++]; // 4Y + r2 = r2 << 2 (v) || r1.l=w[i2++]; // 2u + r2 = r2 & r5; + r3 = r3 | r2; + [p1++]=r3 || r1.h=w[i3++]; // 2v + +.L1555: r2=[i1++]; // oy + + l1 = 0; + + (r7:4) = [sp++]; + unlink; + rts; +DEFUN_END(yuv2rgb555_line) + +DEFUN(yuv2rgb24_line,MEM, + (uint8_t *Y, uint8_t *U, uint8_t *V, int *out, int dW, uint32_t *coeffs)): + link 0; + [--sp] = (r7:4); + p1 = [fp+ARG_OUT]; + r3 = [fp+ARG_W]; + p2 = p1; + p2 += 3; + + i0 = r0; + i2 = r1; + i3 = r2; + + r0 = [fp+ARG_COEFF]; // coeff buffer + i1 = r0; + b1 = i1; + l1 = COEFF_LEN; + m0 = COEFF_REL_CY_OFF; + p0 = r3; + + r0 = [i0++]; // 2Y + r1.l = w[i2++]; // 2u + r1.h = w[i3++]; // 2v + p0 = p0>>2; + + lsetup (.L0888, .L1888) lc0 = p0; + + /* + uint32_t oy,oc,zero,cy,crv,rmask,cbu,bmask,cgu,cgv + r0 -- used to load 4ys + r1 -- used to load 2us,2vs + r4 -- y3,y2 + r5 -- y1,y0 + r6 -- u1,u0 + r7 -- v1,v0 + */ + r2=[i1++]; // oy +.L0888: + (r4,r5) = byteop16m (r1:0, r3:2) || r3=[i1++]; // oc + (r7,r6) = byteop16m (r1:0, r3:2) (r); + r5 = r5 << 2 (v); // y1,y0 + r4 = r4 << 2 (v); // y3,y2 + r6 = r6 << 2 (v) || r0=[i1++]; // u1,u0, r0=zero + r7 = r7 << 2 (v) || r1=[i1++]; // v1,v0 r1=cy + + /* Y' = y*cy */ + a1 = r1.h*r5.h, a0 = r1.l*r5.l || r1=[i1++]; // crv + + /* R = Y+ crv*(Cr-128) */ + r2.h = (a1 += r1.h*r7.l), r2.l = (a0 += r1.l*r7.l); + a1 -= r1.h*r7.l, a0 -= r1.l*r7.l || r5=[i1++]; // rmask + r2 = byteop3p(r3:2, r1:0)(LO) || r1=[i1++]; // cbu + r2=r2>>16 || B[p1++]=r2; + B[p2++]=r2; + + /* B = Y+ cbu*(Cb-128) */ + r2.h = (a1 += r1.h*r6.l), r2.l = (a0 += r1.l*r6.l); + a1 -= r1.h*r6.l, a0 -= r1.l*r6.l || r5=[i1++]; // bmask + r3 = byteop3p(r3:2, r1:0)(LO) || r1=[i1++]; // cgu + + /* G = Y+ cgu*(Cb-128)+cgv*(Cr-128) */ + a1 += r1.h*r6.l, a0 += r1.l*r6.l || r1=[i1++]; // cgv + r2.h = (a1 += r1.h*r7.l), r2.l = (a0 += r1.l*r7.l); + r2 = byteop3p(r3:2, r1:0)(LO) || r5=[i1++m0]; // gmask, oy,cy,zero + + r2=r2>>16 || B[p1++]=r2; + B[p2++]=r2; + + r3=r3>>16 || B[p1++]=r3; + B[p2++]=r3 || r1=[i1++]; // cy + + p1+=3; + p2+=3; + /* Y' = y*cy */ + a1 = r1.h*r4.h, a0 = r1.l*r4.l || r1=[i1++]; // crv + + /* R = Y+ crv*(Cr-128) */ + r2.h = (a1 += r1.h*r7.h), r2.l = (a0 += r1.l*r7.h); + a1 -= r1.h*r7.h, a0 -= r1.l*r7.h || r5=[i1++]; // rmask + r2 = byteop3p(r3:2, r1:0)(LO) || r1=[i1++]; // cbu + r2=r2>>16 || B[p1++]=r2; + B[p2++]=r2; + + /* B = Y+ cbu*(Cb-128) */ + r2.h = (a1 += r1.h*r6.h), r2.l = (a0 += r1.l*r6.h); + a1 -= r1.h*r6.h, a0 -= r1.l*r6.h || r5=[i1++]; // bmask + r3 = byteop3p(r3:2, r1:0)(LO) || r1=[i1++]; // cgu + + /* G = Y+ cgu*(Cb-128)+cgv*(Cr-128) */ + a1 += r1.h*r6.h, a0 += r1.l*r6.h || r1=[i1++]; // cgv + r2.h = (a1 += r1.h*r7.h), r2.l = (a0 += r1.l*r7.h); + r2 = byteop3p(r3:2, r1:0)(LO) || r5=[i1++]; // gmask + r2=r2>>16 || B[p1++]=r2 || r0 = [i0++]; // 4y + B[p2++]=r2 || r1.l = w[i2++]; // 2u + r3=r3>>16 || B[p1++]=r3 || r1.h = w[i3++]; // 2v + B[p2++]=r3 || r2=[i1++]; // oy + + p1+=3; +.L1888: p2+=3; + + l1 = 0; + + (r7:4) = [sp++]; + unlink; + rts; +DEFUN_END(yuv2rgb24_line) + + + +#define ARG_vdst 20 +#define ARG_width 24 +#define ARG_height 28 +#define ARG_lumStride 32 +#define ARG_chromStride 36 +#define ARG_srcStride 40 + +DEFUN(uyvytoyv12, mL3, (const uint8_t *src, uint8_t *ydst, uint8_t *udst, uint8_t *vdst, + long width, long height, + long lumStride, long chromStride, long srcStride)): + link 0; + [--sp] = (r7:4,p5:4); + + p0 = r1; // Y top even + + i2 = r2; // *u + r2 = [fp + ARG_vdst]; + i3 = r2; // *v + + r1 = [fp + ARG_srcStride]; + r2 = r0 + r1; + r1 += -8; // i0,i1 is pre read need to correct + m0 = r1; + + i0 = r0; // uyvy_T even + i1 = r2; // uyvy_B odd + + p2 = [fp + ARG_lumStride]; + p1 = p0 + p2; // Y bot odd + + p5 = [fp + ARG_width]; + p4 = [fp + ARG_height]; + r0 = p5; + p4 = p4 >> 1; + p5 = p5 >> 2; + + r2 = [fp + ARG_chromStride]; + r0 = r0 >> 1; + r2 = r2 - r0; + m1 = r2; + + /* I0,I1 - src input line pointers + * p0,p1 - luma output line pointers + * I2 - dstU + * I3 - dstV + */ + + lsetup (0f, 1f) lc1 = p4; // H/2 +0: r0 = [i0++] || r2 = [i1++]; + r1 = [i0++] || r3 = [i1++]; + r4 = byteop1p(r1:0, r3:2); + r5 = byteop1p(r1:0, r3:2) (r); + lsetup (2f, 3f) lc0 = p5; // W/4 +2: r0 = r0 >> 8(v); + r1 = r1 >> 8(v); + r2 = r2 >> 8(v); + r3 = r3 >> 8(v); + r0 = bytepack(r0, r1); + r2 = bytepack(r2, r3) || [p0++] = r0; // yyyy + r6 = pack(r5.l, r4.l) || [p1++] = r2; // yyyy + r7 = pack(r5.h, r4.h) || r0 = [i0++] || r2 = [i1++]; + r6 = bytepack(r6, r7) || r1 = [i0++] || r3 = [i1++]; + r4 = byteop1p(r1:0, r3:2) || w[i2++] = r6.l; // uu +3: r5 = byteop1p(r1:0, r3:2) (r) || w[i3++] = r6.h; // vv + + i0 += m0; + i1 += m0; + i2 += m1; + i3 += m1; + p0 = p0 + p2; +1: p1 = p1 + p2; + + (r7:4,p5:4) = [sp++]; + unlink; + rts; +DEFUN_END(uyvytoyv12) + +DEFUN(yuyvtoyv12, mL3, (const uint8_t *src, uint8_t *ydst, uint8_t *udst, uint8_t *vdst, + long width, long height, + long lumStride, long chromStride, long srcStride)): + link 0; + [--sp] = (r7:4,p5:4); + + p0 = r1; // Y top even + + i2 = r2; // *u + r2 = [fp + ARG_vdst]; + i3 = r2; // *v + + r1 = [fp + ARG_srcStride]; + r2 = r0 + r1; + r1 += -8; // i0,i1 is pre read need to correct + m0 = r1; + + i0 = r0; // uyvy_T even + i1 = r2; // uyvy_B odd + + p2 = [fp + ARG_lumStride]; + p1 = p0 + p2; // Y bot odd + + p5 = [fp + ARG_width]; + p4 = [fp + ARG_height]; + r0 = p5; + p4 = p4 >> 1; + p5 = p5 >> 2; + + r2 = [fp + ARG_chromStride]; + r0 = r0 >> 1; + r2 = r2 - r0; + m1 = r2; + + /* I0,I1 - src input line pointers + * p0,p1 - luma output line pointers + * I2 - dstU + * I3 - dstV + */ + + lsetup (0f, 1f) lc1 = p4; // H/2 +0: r0 = [i0++] || r2 = [i1++]; + r1 = [i0++] || r3 = [i1++]; + r4 = bytepack(r0, r1); + r5 = bytepack(r2, r3); + lsetup (2f, 3f) lc0 = p5; // W/4 +2: r0 = r0 >> 8(v) || [p0++] = r4; // yyyy-even + r1 = r1 >> 8(v) || [p1++] = r5; // yyyy-odd + r2 = r2 >> 8(v); + r3 = r3 >> 8(v); + r4 = byteop1p(r1:0, r3:2); + r5 = byteop1p(r1:0, r3:2) (r); + r6 = pack(r5.l, r4.l); + r7 = pack(r5.h, r4.h) || r0 = [i0++] || r2 = [i1++]; + r6 = bytepack(r6, r7) || r1 = [i0++] || r3 = [i1++]; + r4 = bytepack(r0, r1) || w[i2++] = r6.l; // uu +3: r5 = bytepack(r2, r3) || w[i3++] = r6.h; // vv + + i0 += m0; + i1 += m0; + i2 += m1; + i3 += m1; + p0 = p0 + p2; +1: p1 = p1 + p2; + + (r7:4,p5:4) = [sp++]; + unlink; + rts; +DEFUN_END(yuyvtoyv12) diff --git a/libswscale/rgb2rgb.c b/libswscale/rgb2rgb.c new file mode 100644 index 0000000000..ad69265c37 --- /dev/null +++ b/libswscale/rgb2rgb.c @@ -0,0 +1,442 @@ +/* + * software RGB to RGB converter + * pluralize by software PAL8 to RGB converter + * software YUV to YUV converter + * software YUV to RGB converter + * Written by Nick Kurshev. + * palette & YUV & runtime CPU stuff by Michael (michaelni@gmx.at) + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + * + * The C code (not assembly, MMX, ...) of this file can be used + * under the LGPL license. + */ +#include <inttypes.h> +#include "config.h" +#include "libavutil/x86_cpu.h" +#include "libavutil/bswap.h" +#include "rgb2rgb.h" +#include "swscale.h" +#include "swscale_internal.h" + +#define FAST_BGR2YV12 // use 7-bit instead of 15-bit coefficients + +void (*rgb24tobgr32)(const uint8_t *src, uint8_t *dst, long src_size); +void (*rgb24tobgr16)(const uint8_t *src, uint8_t *dst, long src_size); +void (*rgb24tobgr15)(const uint8_t *src, uint8_t *dst, long src_size); +void (*rgb32tobgr24)(const uint8_t *src, uint8_t *dst, long src_size); +void (*rgb32to16)(const uint8_t *src, uint8_t *dst, long src_size); +void (*rgb32to15)(const uint8_t *src, uint8_t *dst, long src_size); +void (*rgb15to16)(const uint8_t *src, uint8_t *dst, long src_size); +void (*rgb15tobgr24)(const uint8_t *src, uint8_t *dst, long src_size); +void (*rgb15to32)(const uint8_t *src, uint8_t *dst, long src_size); +void (*rgb16to15)(const uint8_t *src, uint8_t *dst, long src_size); +void (*rgb16tobgr24)(const uint8_t *src, uint8_t *dst, long src_size); +void (*rgb16to32)(const uint8_t *src, uint8_t *dst, long src_size); +void (*rgb24tobgr24)(const uint8_t *src, uint8_t *dst, long src_size); +void (*rgb24to16)(const uint8_t *src, uint8_t *dst, long src_size); +void (*rgb24to15)(const uint8_t *src, uint8_t *dst, long src_size); +void (*rgb32tobgr32)(const uint8_t *src, uint8_t *dst, long src_size); +void (*rgb32tobgr16)(const uint8_t *src, uint8_t *dst, long src_size); +void (*rgb32tobgr15)(const uint8_t *src, uint8_t *dst, long src_size); + +void (*yv12toyuy2)(const uint8_t *ysrc, const uint8_t *usrc, const uint8_t *vsrc, uint8_t *dst, + long width, long height, + long lumStride, long chromStride, long dstStride); +void (*yv12touyvy)(const uint8_t *ysrc, const uint8_t *usrc, const uint8_t *vsrc, uint8_t *dst, + long width, long height, + long lumStride, long chromStride, long dstStride); +void (*yuv422ptoyuy2)(const uint8_t *ysrc, const uint8_t *usrc, const uint8_t *vsrc, uint8_t *dst, + long width, long height, + long lumStride, long chromStride, long dstStride); +void (*yuv422ptouyvy)(const uint8_t *ysrc, const uint8_t *usrc, const uint8_t *vsrc, uint8_t *dst, + long width, long height, + long lumStride, long chromStride, long dstStride); +void (*yuy2toyv12)(const uint8_t *src, uint8_t *ydst, uint8_t *udst, uint8_t *vdst, + long width, long height, + long lumStride, long chromStride, long srcStride); +void (*rgb24toyv12)(const uint8_t *src, uint8_t *ydst, uint8_t *udst, uint8_t *vdst, + long width, long height, + long lumStride, long chromStride, long srcStride); +void (*planar2x)(const uint8_t *src, uint8_t *dst, long width, long height, + long srcStride, long dstStride); +void (*interleaveBytes)(uint8_t *src1, uint8_t *src2, uint8_t *dst, + long width, long height, long src1Stride, + long src2Stride, long dstStride); +void (*vu9_to_vu12)(const uint8_t *src1, const uint8_t *src2, + uint8_t *dst1, uint8_t *dst2, + long width, long height, + long srcStride1, long srcStride2, + long dstStride1, long dstStride2); +void (*yvu9_to_yuy2)(const uint8_t *src1, const uint8_t *src2, const uint8_t *src3, + uint8_t *dst, + long width, long height, + long srcStride1, long srcStride2, + long srcStride3, long dstStride); + +#if ARCH_X86 && CONFIG_GPL +DECLARE_ASM_CONST(8, uint64_t, mmx_null) = 0x0000000000000000ULL; +DECLARE_ASM_CONST(8, uint64_t, mmx_one) = 0xFFFFFFFFFFFFFFFFULL; +DECLARE_ASM_CONST(8, uint64_t, mask32b) = 0x000000FF000000FFULL; +DECLARE_ASM_CONST(8, uint64_t, mask32g) = 0x0000FF000000FF00ULL; +DECLARE_ASM_CONST(8, uint64_t, mask32r) = 0x00FF000000FF0000ULL; +DECLARE_ASM_CONST(8, uint64_t, mask32a) = 0xFF000000FF000000ULL; +DECLARE_ASM_CONST(8, uint64_t, mask32) = 0x00FFFFFF00FFFFFFULL; +DECLARE_ASM_CONST(8, uint64_t, mask3216br) = 0x00F800F800F800F8ULL; +DECLARE_ASM_CONST(8, uint64_t, mask3216g) = 0x0000FC000000FC00ULL; +DECLARE_ASM_CONST(8, uint64_t, mask3215g) = 0x0000F8000000F800ULL; +DECLARE_ASM_CONST(8, uint64_t, mul3216) = 0x2000000420000004ULL; +DECLARE_ASM_CONST(8, uint64_t, mul3215) = 0x2000000820000008ULL; +DECLARE_ASM_CONST(8, uint64_t, mask24b) = 0x00FF0000FF0000FFULL; +DECLARE_ASM_CONST(8, uint64_t, mask24g) = 0xFF0000FF0000FF00ULL; +DECLARE_ASM_CONST(8, uint64_t, mask24r) = 0x0000FF0000FF0000ULL; +DECLARE_ASM_CONST(8, uint64_t, mask24l) = 0x0000000000FFFFFFULL; +DECLARE_ASM_CONST(8, uint64_t, mask24h) = 0x0000FFFFFF000000ULL; +DECLARE_ASM_CONST(8, uint64_t, mask24hh) = 0xffff000000000000ULL; +DECLARE_ASM_CONST(8, uint64_t, mask24hhh) = 0xffffffff00000000ULL; +DECLARE_ASM_CONST(8, uint64_t, mask24hhhh) = 0xffffffffffff0000ULL; +DECLARE_ASM_CONST(8, uint64_t, mask15b) = 0x001F001F001F001FULL; /* 00000000 00011111 xxB */ +DECLARE_ASM_CONST(8, uint64_t, mask15rg) = 0x7FE07FE07FE07FE0ULL; /* 01111111 11100000 RGx */ +DECLARE_ASM_CONST(8, uint64_t, mask15s) = 0xFFE0FFE0FFE0FFE0ULL; +DECLARE_ASM_CONST(8, uint64_t, mask15g) = 0x03E003E003E003E0ULL; +DECLARE_ASM_CONST(8, uint64_t, mask15r) = 0x7C007C007C007C00ULL; +#define mask16b mask15b +DECLARE_ASM_CONST(8, uint64_t, mask16g) = 0x07E007E007E007E0ULL; +DECLARE_ASM_CONST(8, uint64_t, mask16r) = 0xF800F800F800F800ULL; +DECLARE_ASM_CONST(8, uint64_t, red_16mask) = 0x0000f8000000f800ULL; +DECLARE_ASM_CONST(8, uint64_t, green_16mask) = 0x000007e0000007e0ULL; +DECLARE_ASM_CONST(8, uint64_t, blue_16mask) = 0x0000001f0000001fULL; +DECLARE_ASM_CONST(8, uint64_t, red_15mask) = 0x00007c0000007c00ULL; +DECLARE_ASM_CONST(8, uint64_t, green_15mask) = 0x000003e0000003e0ULL; +DECLARE_ASM_CONST(8, uint64_t, blue_15mask) = 0x0000001f0000001fULL; +#endif /* ARCH_X86 */ + +#define RGB2YUV_SHIFT 8 +#define BY ((int)( 0.098*(1<<RGB2YUV_SHIFT)+0.5)) +#define BV ((int)(-0.071*(1<<RGB2YUV_SHIFT)+0.5)) +#define BU ((int)( 0.439*(1<<RGB2YUV_SHIFT)+0.5)) +#define GY ((int)( 0.504*(1<<RGB2YUV_SHIFT)+0.5)) +#define GV ((int)(-0.368*(1<<RGB2YUV_SHIFT)+0.5)) +#define GU ((int)(-0.291*(1<<RGB2YUV_SHIFT)+0.5)) +#define RY ((int)( 0.257*(1<<RGB2YUV_SHIFT)+0.5)) +#define RV ((int)( 0.439*(1<<RGB2YUV_SHIFT)+0.5)) +#define RU ((int)(-0.148*(1<<RGB2YUV_SHIFT)+0.5)) + +//Note: We have C, MMX, MMX2, 3DNOW versions, there is no 3DNOW + MMX2 one. +//plain C versions +#undef HAVE_MMX +#undef HAVE_MMX2 +#undef HAVE_AMD3DNOW +#undef HAVE_SSE2 +#define HAVE_MMX 0 +#define HAVE_MMX2 0 +#define HAVE_AMD3DNOW 0 +#define HAVE_SSE2 0 +#define RENAME(a) a ## _C +#include "rgb2rgb_template.c" + +#if ARCH_X86 && CONFIG_GPL + +//MMX versions +#undef RENAME +#undef HAVE_MMX +#define HAVE_MMX 1 +#define RENAME(a) a ## _MMX +#include "rgb2rgb_template.c" + +//MMX2 versions +#undef RENAME +#undef HAVE_MMX2 +#define HAVE_MMX2 1 +#define RENAME(a) a ## _MMX2 +#include "rgb2rgb_template.c" + +//3DNOW versions +#undef RENAME +#undef HAVE_MMX2 +#undef HAVE_AMD3DNOW +#define HAVE_MMX2 0 +#define HAVE_AMD3DNOW 1 +#define RENAME(a) a ## _3DNOW +#include "rgb2rgb_template.c" + +#endif //ARCH_X86 || ARCH_X86_64 + +/* + RGB15->RGB16 original by Strepto/Astral + ported to gcc & bugfixed : A'rpi + MMX2, 3DNOW optimization by Nick Kurshev + 32-bit C version, and and&add trick by Michael Niedermayer +*/ + +void sws_rgb2rgb_init(int flags){ +#if (HAVE_MMX2 || HAVE_AMD3DNOW || HAVE_MMX) && CONFIG_GPL + if (flags & SWS_CPU_CAPS_MMX2) + rgb2rgb_init_MMX2(); + else if (flags & SWS_CPU_CAPS_3DNOW) + rgb2rgb_init_3DNOW(); + else if (flags & SWS_CPU_CAPS_MMX) + rgb2rgb_init_MMX(); + else +#endif /* HAVE_MMX2 || HAVE_AMD3DNOW || HAVE_MMX */ + rgb2rgb_init_C(); +} + +/** + * Convert the palette to the same packet 32-bit format as the palette + */ +void palette8topacked32(const uint8_t *src, uint8_t *dst, long num_pixels, const uint8_t *palette) +{ + long i; + + for (i=0; i<num_pixels; i++) + ((uint32_t *) dst)[i] = ((const uint32_t *) palette)[src[i]]; +} + +/** + * Palette format: ABCD -> dst format: ABC + */ +void palette8topacked24(const uint8_t *src, uint8_t *dst, long num_pixels, const uint8_t *palette) +{ + long i; + + for (i=0; i<num_pixels; i++) + { + //FIXME slow? + dst[0]= palette[src[i]*4+0]; + dst[1]= palette[src[i]*4+1]; + dst[2]= palette[src[i]*4+2]; + dst+= 3; + } +} + +/** + * Palette is assumed to contain BGR16, see rgb32to16 to convert the palette. + */ +void palette8torgb16(const uint8_t *src, uint8_t *dst, long num_pixels, const uint8_t *palette) +{ + long i; + for (i=0; i<num_pixels; i++) + ((uint16_t *)dst)[i] = ((const uint16_t *)palette)[src[i]]; +} +void palette8tobgr16(const uint8_t *src, uint8_t *dst, long num_pixels, const uint8_t *palette) +{ + long i; + for (i=0; i<num_pixels; i++) + ((uint16_t *)dst)[i] = bswap_16(((const uint16_t *)palette)[src[i]]); +} + +/** + * Palette is assumed to contain BGR15, see rgb32to15 to convert the palette. + */ +void palette8torgb15(const uint8_t *src, uint8_t *dst, long num_pixels, const uint8_t *palette) +{ + long i; + for (i=0; i<num_pixels; i++) + ((uint16_t *)dst)[i] = ((const uint16_t *)palette)[src[i]]; +} +void palette8tobgr15(const uint8_t *src, uint8_t *dst, long num_pixels, const uint8_t *palette) +{ + long i; + for (i=0; i<num_pixels; i++) + ((uint16_t *)dst)[i] = bswap_16(((const uint16_t *)palette)[src[i]]); +} + +void rgb32to24(const uint8_t *src, uint8_t *dst, long src_size) +{ + long i; + long num_pixels = src_size >> 2; + for (i=0; i<num_pixels; i++) + { + #ifdef WORDS_BIGENDIAN + /* RGB32 (= A,B,G,R) -> BGR24 (= B,G,R) */ + dst[3*i + 0] = src[4*i + 1]; + dst[3*i + 1] = src[4*i + 2]; + dst[3*i + 2] = src[4*i + 3]; + #else + dst[3*i + 0] = src[4*i + 2]; + dst[3*i + 1] = src[4*i + 1]; + dst[3*i + 2] = src[4*i + 0]; + #endif + } +} + +void rgb24to32(const uint8_t *src, uint8_t *dst, long src_size) +{ + long i; + for (i=0; 3*i<src_size; i++) + { + #ifdef WORDS_BIGENDIAN + /* RGB24 (= R,G,B) -> BGR32 (= A,R,G,B) */ + dst[4*i + 0] = 255; + dst[4*i + 1] = src[3*i + 0]; + dst[4*i + 2] = src[3*i + 1]; + dst[4*i + 3] = src[3*i + 2]; + #else + dst[4*i + 0] = src[3*i + 2]; + dst[4*i + 1] = src[3*i + 1]; + dst[4*i + 2] = src[3*i + 0]; + dst[4*i + 3] = 255; + #endif + } +} + +void rgb16tobgr32(const uint8_t *src, uint8_t *dst, long src_size) +{ + const uint16_t *end; + uint8_t *d = dst; + const uint16_t *s = (const uint16_t *)src; + end = s + src_size/2; + while (s < end) + { + register uint16_t bgr; + bgr = *s++; + #ifdef WORDS_BIGENDIAN + *d++ = 255; + *d++ = (bgr&0x1F)<<3; + *d++ = (bgr&0x7E0)>>3; + *d++ = (bgr&0xF800)>>8; + #else + *d++ = (bgr&0xF800)>>8; + *d++ = (bgr&0x7E0)>>3; + *d++ = (bgr&0x1F)<<3; + *d++ = 255; + #endif + } +} + +void rgb16to24(const uint8_t *src, uint8_t *dst, long src_size) +{ + const uint16_t *end; + uint8_t *d = dst; + const uint16_t *s = (const uint16_t *)src; + end = s + src_size/2; + while (s < end) + { + register uint16_t bgr; + bgr = *s++; + *d++ = (bgr&0xF800)>>8; + *d++ = (bgr&0x7E0)>>3; + *d++ = (bgr&0x1F)<<3; + } +} + +void rgb16tobgr16(const uint8_t *src, uint8_t *dst, long src_size) +{ + long i; + long num_pixels = src_size >> 1; + + for (i=0; i<num_pixels; i++) + { + unsigned rgb = ((const uint16_t*)src)[i]; + ((uint16_t*)dst)[i] = (rgb>>11) | (rgb&0x7E0) | (rgb<<11); + } +} + +void rgb16tobgr15(const uint8_t *src, uint8_t *dst, long src_size) +{ + long i; + long num_pixels = src_size >> 1; + + for (i=0; i<num_pixels; i++) + { + unsigned rgb = ((const uint16_t*)src)[i]; + ((uint16_t*)dst)[i] = (rgb>>11) | ((rgb&0x7C0)>>1) | ((rgb&0x1F)<<10); + } +} + +void rgb15tobgr32(const uint8_t *src, uint8_t *dst, long src_size) +{ + const uint16_t *end; + uint8_t *d = dst; + const uint16_t *s = (const uint16_t *)src; + end = s + src_size/2; + while (s < end) + { + register uint16_t bgr; + bgr = *s++; + #ifdef WORDS_BIGENDIAN + *d++ = 255; + *d++ = (bgr&0x1F)<<3; + *d++ = (bgr&0x3E0)>>2; + *d++ = (bgr&0x7C00)>>7; + #else + *d++ = (bgr&0x7C00)>>7; + *d++ = (bgr&0x3E0)>>2; + *d++ = (bgr&0x1F)<<3; + *d++ = 255; + #endif + } +} + +void rgb15to24(const uint8_t *src, uint8_t *dst, long src_size) +{ + const uint16_t *end; + uint8_t *d = dst; + const uint16_t *s = (const uint16_t *)src; + end = s + src_size/2; + while (s < end) + { + register uint16_t bgr; + bgr = *s++; + *d++ = (bgr&0x7C00)>>7; + *d++ = (bgr&0x3E0)>>2; + *d++ = (bgr&0x1F)<<3; + } +} + +void rgb15tobgr16(const uint8_t *src, uint8_t *dst, long src_size) +{ + long i; + long num_pixels = src_size >> 1; + + for (i=0; i<num_pixels; i++) + { + unsigned rgb = ((const uint16_t*)src)[i]; + ((uint16_t*)dst)[i] = ((rgb&0x7C00)>>10) | ((rgb&0x3E0)<<1) | (rgb<<11); + } +} + +void rgb15tobgr15(const uint8_t *src, uint8_t *dst, long src_size) +{ + long i; + long num_pixels = src_size >> 1; + + for (i=0; i<num_pixels; i++) + { + unsigned br; + unsigned rgb = ((const uint16_t*)src)[i]; + br = rgb&0x7c1F; + ((uint16_t*)dst)[i] = (br>>10) | (rgb&0x3E0) | (br<<10); + } +} + +void bgr8torgb8(const uint8_t *src, uint8_t *dst, long src_size) +{ + long i; + long num_pixels = src_size; + for (i=0; i<num_pixels; i++) + { + unsigned b,g,r; + register uint8_t rgb; + rgb = src[i]; + r = (rgb&0x07); + g = (rgb&0x38)>>3; + b = (rgb&0xC0)>>6; + dst[i] = ((b<<1)&0x07) | ((g&0x07)<<3) | ((r&0x03)<<6); + } +} diff --git a/libswscale/rgb2rgb.h b/libswscale/rgb2rgb.h new file mode 100644 index 0000000000..df912c8533 --- /dev/null +++ b/libswscale/rgb2rgb.h @@ -0,0 +1,147 @@ +/* + * software RGB to RGB converter + * pluralize by Software PAL8 to RGB converter + * Software YUV to YUV converter + * Software YUV to RGB converter + * Written by Nick Kurshev. + * palette & YUV & runtime CPU stuff by Michael (michaelni@gmx.at) + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef SWSCALE_RGB2RGB_H +#define SWSCALE_RGB2RGB_H + +#include <inttypes.h> + +/* A full collection of RGB to RGB(BGR) converters */ +extern void (*rgb24tobgr32)(const uint8_t *src, uint8_t *dst, long src_size); +extern void (*rgb24tobgr16)(const uint8_t *src, uint8_t *dst, long src_size); +extern void (*rgb24tobgr15)(const uint8_t *src, uint8_t *dst, long src_size); +extern void (*rgb32tobgr24)(const uint8_t *src, uint8_t *dst, long src_size); +extern void (*rgb32to16) (const uint8_t *src, uint8_t *dst, long src_size); +extern void (*rgb32to15) (const uint8_t *src, uint8_t *dst, long src_size); +extern void (*rgb15to16) (const uint8_t *src, uint8_t *dst, long src_size); +extern void (*rgb15tobgr24)(const uint8_t *src, uint8_t *dst, long src_size); +extern void (*rgb15to32) (const uint8_t *src, uint8_t *dst, long src_size); +extern void (*rgb16to15) (const uint8_t *src, uint8_t *dst, long src_size); +extern void (*rgb16tobgr24)(const uint8_t *src, uint8_t *dst, long src_size); +extern void (*rgb16to32) (const uint8_t *src, uint8_t *dst, long src_size); +extern void (*rgb24tobgr24)(const uint8_t *src, uint8_t *dst, long src_size); +extern void (*rgb24to16) (const uint8_t *src, uint8_t *dst, long src_size); +extern void (*rgb24to15) (const uint8_t *src, uint8_t *dst, long src_size); +extern void (*rgb32tobgr32)(const uint8_t *src, uint8_t *dst, long src_size); +extern void (*rgb32tobgr16)(const uint8_t *src, uint8_t *dst, long src_size); +extern void (*rgb32tobgr15)(const uint8_t *src, uint8_t *dst, long src_size); + +void rgb24to32 (const uint8_t *src, uint8_t *dst, long src_size); +void rgb32to24 (const uint8_t *src, uint8_t *dst, long src_size); +void rgb16tobgr32(const uint8_t *src, uint8_t *dst, long src_size); +void rgb16to24 (const uint8_t *src, uint8_t *dst, long src_size); +void rgb16tobgr16(const uint8_t *src, uint8_t *dst, long src_size); +void rgb16tobgr15(const uint8_t *src, uint8_t *dst, long src_size); +void rgb15tobgr32(const uint8_t *src, uint8_t *dst, long src_size); +void rgb15to24 (const uint8_t *src, uint8_t *dst, long src_size); +void rgb15tobgr16(const uint8_t *src, uint8_t *dst, long src_size); +void rgb15tobgr15(const uint8_t *src, uint8_t *dst, long src_size); +void bgr8torgb8 (const uint8_t *src, uint8_t *dst, long src_size); + + +void palette8topacked32(const uint8_t *src, uint8_t *dst, long num_pixels, const uint8_t *palette); +void palette8topacked24(const uint8_t *src, uint8_t *dst, long num_pixels, const uint8_t *palette); +void palette8torgb16(const uint8_t *src, uint8_t *dst, long num_pixels, const uint8_t *palette); +void palette8tobgr16(const uint8_t *src, uint8_t *dst, long num_pixels, const uint8_t *palette); +void palette8torgb15(const uint8_t *src, uint8_t *dst, long num_pixels, const uint8_t *palette); +void palette8tobgr15(const uint8_t *src, uint8_t *dst, long num_pixels, const uint8_t *palette); + +/** + * Height should be a multiple of 2 and width should be a multiple of 16. + * (If this is a problem for anyone then tell me, and I will fix it.) + * Chrominance data is only taken from every second line, others are ignored. + * FIXME: Write high quality version. + */ +//void uyvytoyv12(const uint8_t *src, uint8_t *ydst, uint8_t *udst, uint8_t *vdst, + +/** + * Height should be a multiple of 2 and width should be a multiple of 16. + * (If this is a problem for anyone then tell me, and I will fix it.) + */ +extern void (*yv12toyuy2)(const uint8_t *ysrc, const uint8_t *usrc, const uint8_t *vsrc, uint8_t *dst, + long width, long height, + long lumStride, long chromStride, long dstStride); + +/** + * Width should be a multiple of 16. + */ +extern void (*yuv422ptoyuy2)(const uint8_t *ysrc, const uint8_t *usrc, const uint8_t *vsrc, uint8_t *dst, + long width, long height, + long lumStride, long chromStride, long dstStride); + +/** + * Height should be a multiple of 2 and width should be a multiple of 16. + * (If this is a problem for anyone then tell me, and I will fix it.) + */ +extern void (*yuy2toyv12)(const uint8_t *src, uint8_t *ydst, uint8_t *udst, uint8_t *vdst, + long width, long height, + long lumStride, long chromStride, long srcStride); + +/** + * Height should be a multiple of 2 and width should be a multiple of 16. + * (If this is a problem for anyone then tell me, and I will fix it.) + */ +extern void (*yv12touyvy)(const uint8_t *ysrc, const uint8_t *usrc, const uint8_t *vsrc, uint8_t *dst, + long width, long height, + long lumStride, long chromStride, long dstStride); + +/** + * Width should be a multiple of 16. + */ +extern void (*yuv422ptouyvy)(const uint8_t *ysrc, const uint8_t *usrc, const uint8_t *vsrc, uint8_t *dst, + long width, long height, + long lumStride, long chromStride, long dstStride); + +/** + * Height should be a multiple of 2 and width should be a multiple of 2. + * (If this is a problem for anyone then tell me, and I will fix it.) + * Chrominance data is only taken from every second line, others are ignored. + * FIXME: Write high quality version. + */ +extern void (*rgb24toyv12)(const uint8_t *src, uint8_t *ydst, uint8_t *udst, uint8_t *vdst, + long width, long height, + long lumStride, long chromStride, long srcStride); +extern void (*planar2x)(const uint8_t *src, uint8_t *dst, long width, long height, + long srcStride, long dstStride); + +extern void (*interleaveBytes)(uint8_t *src1, uint8_t *src2, uint8_t *dst, + long width, long height, long src1Stride, + long src2Stride, long dstStride); + +extern void (*vu9_to_vu12)(const uint8_t *src1, const uint8_t *src2, + uint8_t *dst1, uint8_t *dst2, + long width, long height, + long srcStride1, long srcStride2, + long dstStride1, long dstStride2); + +extern void (*yvu9_to_yuy2)(const uint8_t *src1, const uint8_t *src2, const uint8_t *src3, + uint8_t *dst, + long width, long height, + long srcStride1, long srcStride2, + long srcStride3, long dstStride); + +void sws_rgb2rgb_init(int flags); + +#endif /* SWSCALE_RGB2RGB_H */ diff --git a/libswscale/rgb2rgb_template.c b/libswscale/rgb2rgb_template.c new file mode 100644 index 0000000000..e95b628049 --- /dev/null +++ b/libswscale/rgb2rgb_template.c @@ -0,0 +1,2738 @@ +/* + * software RGB to RGB converter + * pluralize by software PAL8 to RGB converter + * software YUV to YUV converter + * software YUV to RGB converter + * Written by Nick Kurshev. + * palette & YUV & runtime CPU stuff by Michael (michaelni@gmx.at) + * lot of big-endian byte order fixes by Alex Beregszaszi + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + * + * The C code (not assembly, MMX, ...) of this file can be used + * under the LGPL license. + */ + +#include <stddef.h> + +#undef PREFETCH +#undef MOVNTQ +#undef EMMS +#undef SFENCE +#undef MMREG_SIZE +#undef PREFETCHW +#undef PAVGB + +#if HAVE_SSE2 +#define MMREG_SIZE 16 +#else +#define MMREG_SIZE 8 +#endif + +#if HAVE_AMD3DNOW +#define PREFETCH "prefetch" +#define PREFETCHW "prefetchw" +#define PAVGB "pavgusb" +#elif HAVE_MMX2 +#define PREFETCH "prefetchnta" +#define PREFETCHW "prefetcht0" +#define PAVGB "pavgb" +#else +#define PREFETCH " # nop" +#define PREFETCHW " # nop" +#endif + +#if HAVE_AMD3DNOW +/* On K6 femms is faster than emms. On K7 femms is directly mapped to emms. */ +#define EMMS "femms" +#else +#define EMMS "emms" +#endif + +#if HAVE_MMX2 +#define MOVNTQ "movntq" +#define SFENCE "sfence" +#else +#define MOVNTQ "movq" +#define SFENCE " # nop" +#endif + +static inline void RENAME(rgb24tobgr32)(const uint8_t *src, uint8_t *dst, long src_size) +{ + uint8_t *dest = dst; + const uint8_t *s = src; + const uint8_t *end; + #if HAVE_MMX + const uint8_t *mm_end; + #endif + end = s + src_size; + #if HAVE_MMX + __asm__ volatile(PREFETCH" %0"::"m"(*s):"memory"); + mm_end = end - 23; + __asm__ volatile("movq %0, %%mm7"::"m"(mask32a):"memory"); + while (s < mm_end) + { + __asm__ volatile( + PREFETCH" 32%1 \n\t" + "movd %1, %%mm0 \n\t" + "punpckldq 3%1, %%mm0 \n\t" + "movd 6%1, %%mm1 \n\t" + "punpckldq 9%1, %%mm1 \n\t" + "movd 12%1, %%mm2 \n\t" + "punpckldq 15%1, %%mm2 \n\t" + "movd 18%1, %%mm3 \n\t" + "punpckldq 21%1, %%mm3 \n\t" + "por %%mm7, %%mm0 \n\t" + "por %%mm7, %%mm1 \n\t" + "por %%mm7, %%mm2 \n\t" + "por %%mm7, %%mm3 \n\t" + MOVNTQ" %%mm0, %0 \n\t" + MOVNTQ" %%mm1, 8%0 \n\t" + MOVNTQ" %%mm2, 16%0 \n\t" + MOVNTQ" %%mm3, 24%0" + :"=m"(*dest) + :"m"(*s) + :"memory"); + dest += 32; + s += 24; + } + __asm__ volatile(SFENCE:::"memory"); + __asm__ volatile(EMMS:::"memory"); + #endif + while (s < end) + { + #ifdef WORDS_BIGENDIAN + /* RGB24 (= R,G,B) -> RGB32 (= A,B,G,R) */ + *dest++ = 255; + *dest++ = s[2]; + *dest++ = s[1]; + *dest++ = s[0]; + s+=3; + #else + *dest++ = *s++; + *dest++ = *s++; + *dest++ = *s++; + *dest++ = 255; + #endif + } +} + +static inline void RENAME(rgb32tobgr24)(const uint8_t *src, uint8_t *dst, long src_size) +{ + uint8_t *dest = dst; + const uint8_t *s = src; + const uint8_t *end; +#if HAVE_MMX + const uint8_t *mm_end; +#endif + end = s + src_size; +#if HAVE_MMX + __asm__ volatile(PREFETCH" %0"::"m"(*s):"memory"); + mm_end = end - 31; + while (s < mm_end) + { + __asm__ volatile( + PREFETCH" 32%1 \n\t" + "movq %1, %%mm0 \n\t" + "movq 8%1, %%mm1 \n\t" + "movq 16%1, %%mm4 \n\t" + "movq 24%1, %%mm5 \n\t" + "movq %%mm0, %%mm2 \n\t" + "movq %%mm1, %%mm3 \n\t" + "movq %%mm4, %%mm6 \n\t" + "movq %%mm5, %%mm7 \n\t" + "psrlq $8, %%mm2 \n\t" + "psrlq $8, %%mm3 \n\t" + "psrlq $8, %%mm6 \n\t" + "psrlq $8, %%mm7 \n\t" + "pand %2, %%mm0 \n\t" + "pand %2, %%mm1 \n\t" + "pand %2, %%mm4 \n\t" + "pand %2, %%mm5 \n\t" + "pand %3, %%mm2 \n\t" + "pand %3, %%mm3 \n\t" + "pand %3, %%mm6 \n\t" + "pand %3, %%mm7 \n\t" + "por %%mm2, %%mm0 \n\t" + "por %%mm3, %%mm1 \n\t" + "por %%mm6, %%mm4 \n\t" + "por %%mm7, %%mm5 \n\t" + + "movq %%mm1, %%mm2 \n\t" + "movq %%mm4, %%mm3 \n\t" + "psllq $48, %%mm2 \n\t" + "psllq $32, %%mm3 \n\t" + "pand %4, %%mm2 \n\t" + "pand %5, %%mm3 \n\t" + "por %%mm2, %%mm0 \n\t" + "psrlq $16, %%mm1 \n\t" + "psrlq $32, %%mm4 \n\t" + "psllq $16, %%mm5 \n\t" + "por %%mm3, %%mm1 \n\t" + "pand %6, %%mm5 \n\t" + "por %%mm5, %%mm4 \n\t" + + MOVNTQ" %%mm0, %0 \n\t" + MOVNTQ" %%mm1, 8%0 \n\t" + MOVNTQ" %%mm4, 16%0" + :"=m"(*dest) + :"m"(*s),"m"(mask24l), + "m"(mask24h),"m"(mask24hh),"m"(mask24hhh),"m"(mask24hhhh) + :"memory"); + dest += 24; + s += 32; + } + __asm__ volatile(SFENCE:::"memory"); + __asm__ volatile(EMMS:::"memory"); +#endif + while (s < end) + { +#ifdef WORDS_BIGENDIAN + /* RGB32 (= A,B,G,R) -> RGB24 (= R,G,B) */ + s++; + dest[2] = *s++; + dest[1] = *s++; + dest[0] = *s++; + dest += 3; +#else + *dest++ = *s++; + *dest++ = *s++; + *dest++ = *s++; + s++; +#endif + } +} + +/* + original by Strepto/Astral + ported to gcc & bugfixed: A'rpi + MMX2, 3DNOW optimization by Nick Kurshev + 32-bit C version, and and&add trick by Michael Niedermayer +*/ +static inline void RENAME(rgb15to16)(const uint8_t *src, uint8_t *dst, long src_size) +{ + register const uint8_t* s=src; + register uint8_t* d=dst; + register const uint8_t *end; + const uint8_t *mm_end; + end = s + src_size; +#if HAVE_MMX + __asm__ volatile(PREFETCH" %0"::"m"(*s)); + __asm__ volatile("movq %0, %%mm4"::"m"(mask15s)); + mm_end = end - 15; + while (s<mm_end) + { + __asm__ volatile( + PREFETCH" 32%1 \n\t" + "movq %1, %%mm0 \n\t" + "movq 8%1, %%mm2 \n\t" + "movq %%mm0, %%mm1 \n\t" + "movq %%mm2, %%mm3 \n\t" + "pand %%mm4, %%mm0 \n\t" + "pand %%mm4, %%mm2 \n\t" + "paddw %%mm1, %%mm0 \n\t" + "paddw %%mm3, %%mm2 \n\t" + MOVNTQ" %%mm0, %0 \n\t" + MOVNTQ" %%mm2, 8%0" + :"=m"(*d) + :"m"(*s) + ); + d+=16; + s+=16; + } + __asm__ volatile(SFENCE:::"memory"); + __asm__ volatile(EMMS:::"memory"); +#endif + mm_end = end - 3; + while (s < mm_end) + { + register unsigned x= *((const uint32_t *)s); + *((uint32_t *)d) = (x&0x7FFF7FFF) + (x&0x7FE07FE0); + d+=4; + s+=4; + } + if (s < end) + { + register unsigned short x= *((const uint16_t *)s); + *((uint16_t *)d) = (x&0x7FFF) + (x&0x7FE0); + } +} + +static inline void RENAME(rgb16to15)(const uint8_t *src, uint8_t *dst, long src_size) +{ + register const uint8_t* s=src; + register uint8_t* d=dst; + register const uint8_t *end; + const uint8_t *mm_end; + end = s + src_size; +#if HAVE_MMX + __asm__ volatile(PREFETCH" %0"::"m"(*s)); + __asm__ volatile("movq %0, %%mm7"::"m"(mask15rg)); + __asm__ volatile("movq %0, %%mm6"::"m"(mask15b)); + mm_end = end - 15; + while (s<mm_end) + { + __asm__ volatile( + PREFETCH" 32%1 \n\t" + "movq %1, %%mm0 \n\t" + "movq 8%1, %%mm2 \n\t" + "movq %%mm0, %%mm1 \n\t" + "movq %%mm2, %%mm3 \n\t" + "psrlq $1, %%mm0 \n\t" + "psrlq $1, %%mm2 \n\t" + "pand %%mm7, %%mm0 \n\t" + "pand %%mm7, %%mm2 \n\t" + "pand %%mm6, %%mm1 \n\t" + "pand %%mm6, %%mm3 \n\t" + "por %%mm1, %%mm0 \n\t" + "por %%mm3, %%mm2 \n\t" + MOVNTQ" %%mm0, %0 \n\t" + MOVNTQ" %%mm2, 8%0" + :"=m"(*d) + :"m"(*s) + ); + d+=16; + s+=16; + } + __asm__ volatile(SFENCE:::"memory"); + __asm__ volatile(EMMS:::"memory"); +#endif + mm_end = end - 3; + while (s < mm_end) + { + register uint32_t x= *((const uint32_t*)s); + *((uint32_t *)d) = ((x>>1)&0x7FE07FE0) | (x&0x001F001F); + s+=4; + d+=4; + } + if (s < end) + { + register uint16_t x= *((const uint16_t*)s); + *((uint16_t *)d) = ((x>>1)&0x7FE0) | (x&0x001F); + s+=2; + d+=2; + } +} + +static inline void RENAME(rgb32to16)(const uint8_t *src, uint8_t *dst, long src_size) +{ + const uint8_t *s = src; + const uint8_t *end; +#if HAVE_MMX + const uint8_t *mm_end; +#endif + uint16_t *d = (uint16_t *)dst; + end = s + src_size; +#if HAVE_MMX + mm_end = end - 15; +#if 1 //is faster only if multiplies are reasonably fast (FIXME figure out on which CPUs this is faster, on Athlon it is slightly faster) + __asm__ volatile( + "movq %3, %%mm5 \n\t" + "movq %4, %%mm6 \n\t" + "movq %5, %%mm7 \n\t" + "jmp 2f \n\t" + ASMALIGN(4) + "1: \n\t" + PREFETCH" 32(%1) \n\t" + "movd (%1), %%mm0 \n\t" + "movd 4(%1), %%mm3 \n\t" + "punpckldq 8(%1), %%mm0 \n\t" + "punpckldq 12(%1), %%mm3 \n\t" + "movq %%mm0, %%mm1 \n\t" + "movq %%mm3, %%mm4 \n\t" + "pand %%mm6, %%mm0 \n\t" + "pand %%mm6, %%mm3 \n\t" + "pmaddwd %%mm7, %%mm0 \n\t" + "pmaddwd %%mm7, %%mm3 \n\t" + "pand %%mm5, %%mm1 \n\t" + "pand %%mm5, %%mm4 \n\t" + "por %%mm1, %%mm0 \n\t" + "por %%mm4, %%mm3 \n\t" + "psrld $5, %%mm0 \n\t" + "pslld $11, %%mm3 \n\t" + "por %%mm3, %%mm0 \n\t" + MOVNTQ" %%mm0, (%0) \n\t" + "add $16, %1 \n\t" + "add $8, %0 \n\t" + "2: \n\t" + "cmp %2, %1 \n\t" + " jb 1b \n\t" + : "+r" (d), "+r"(s) + : "r" (mm_end), "m" (mask3216g), "m" (mask3216br), "m" (mul3216) + ); +#else + __asm__ volatile(PREFETCH" %0"::"m"(*src):"memory"); + __asm__ volatile( + "movq %0, %%mm7 \n\t" + "movq %1, %%mm6 \n\t" + ::"m"(red_16mask),"m"(green_16mask)); + while (s < mm_end) + { + __asm__ volatile( + PREFETCH" 32%1 \n\t" + "movd %1, %%mm0 \n\t" + "movd 4%1, %%mm3 \n\t" + "punpckldq 8%1, %%mm0 \n\t" + "punpckldq 12%1, %%mm3 \n\t" + "movq %%mm0, %%mm1 \n\t" + "movq %%mm0, %%mm2 \n\t" + "movq %%mm3, %%mm4 \n\t" + "movq %%mm3, %%mm5 \n\t" + "psrlq $3, %%mm0 \n\t" + "psrlq $3, %%mm3 \n\t" + "pand %2, %%mm0 \n\t" + "pand %2, %%mm3 \n\t" + "psrlq $5, %%mm1 \n\t" + "psrlq $5, %%mm4 \n\t" + "pand %%mm6, %%mm1 \n\t" + "pand %%mm6, %%mm4 \n\t" + "psrlq $8, %%mm2 \n\t" + "psrlq $8, %%mm5 \n\t" + "pand %%mm7, %%mm2 \n\t" + "pand %%mm7, %%mm5 \n\t" + "por %%mm1, %%mm0 \n\t" + "por %%mm4, %%mm3 \n\t" + "por %%mm2, %%mm0 \n\t" + "por %%mm5, %%mm3 \n\t" + "psllq $16, %%mm3 \n\t" + "por %%mm3, %%mm0 \n\t" + MOVNTQ" %%mm0, %0 \n\t" + :"=m"(*d):"m"(*s),"m"(blue_16mask):"memory"); + d += 4; + s += 16; + } +#endif + __asm__ volatile(SFENCE:::"memory"); + __asm__ volatile(EMMS:::"memory"); +#endif + while (s < end) + { + register int rgb = *(const uint32_t*)s; s += 4; + *d++ = ((rgb&0xFF)>>3) + ((rgb&0xFC00)>>5) + ((rgb&0xF80000)>>8); + } +} + +static inline void RENAME(rgb32tobgr16)(const uint8_t *src, uint8_t *dst, long src_size) +{ + const uint8_t *s = src; + const uint8_t *end; +#if HAVE_MMX + const uint8_t *mm_end; +#endif + uint16_t *d = (uint16_t *)dst; + end = s + src_size; +#if HAVE_MMX + __asm__ volatile(PREFETCH" %0"::"m"(*src):"memory"); + __asm__ volatile( + "movq %0, %%mm7 \n\t" + "movq %1, %%mm6 \n\t" + ::"m"(red_16mask),"m"(green_16mask)); + mm_end = end - 15; + while (s < mm_end) + { + __asm__ volatile( + PREFETCH" 32%1 \n\t" + "movd %1, %%mm0 \n\t" + "movd 4%1, %%mm3 \n\t" + "punpckldq 8%1, %%mm0 \n\t" + "punpckldq 12%1, %%mm3 \n\t" + "movq %%mm0, %%mm1 \n\t" + "movq %%mm0, %%mm2 \n\t" + "movq %%mm3, %%mm4 \n\t" + "movq %%mm3, %%mm5 \n\t" + "psllq $8, %%mm0 \n\t" + "psllq $8, %%mm3 \n\t" + "pand %%mm7, %%mm0 \n\t" + "pand %%mm7, %%mm3 \n\t" + "psrlq $5, %%mm1 \n\t" + "psrlq $5, %%mm4 \n\t" + "pand %%mm6, %%mm1 \n\t" + "pand %%mm6, %%mm4 \n\t" + "psrlq $19, %%mm2 \n\t" + "psrlq $19, %%mm5 \n\t" + "pand %2, %%mm2 \n\t" + "pand %2, %%mm5 \n\t" + "por %%mm1, %%mm0 \n\t" + "por %%mm4, %%mm3 \n\t" + "por %%mm2, %%mm0 \n\t" + "por %%mm5, %%mm3 \n\t" + "psllq $16, %%mm3 \n\t" + "por %%mm3, %%mm0 \n\t" + MOVNTQ" %%mm0, %0 \n\t" + :"=m"(*d):"m"(*s),"m"(blue_16mask):"memory"); + d += 4; + s += 16; + } + __asm__ volatile(SFENCE:::"memory"); + __asm__ volatile(EMMS:::"memory"); +#endif + while (s < end) + { + register int rgb = *(const uint32_t*)s; s += 4; + *d++ = ((rgb&0xF8)<<8) + ((rgb&0xFC00)>>5) + ((rgb&0xF80000)>>19); + } +} + +static inline void RENAME(rgb32to15)(const uint8_t *src, uint8_t *dst, long src_size) +{ + const uint8_t *s = src; + const uint8_t *end; +#if HAVE_MMX + const uint8_t *mm_end; +#endif + uint16_t *d = (uint16_t *)dst; + end = s + src_size; +#if HAVE_MMX + mm_end = end - 15; +#if 1 //is faster only if multiplies are reasonably fast (FIXME figure out on which CPUs this is faster, on Athlon it is slightly faster) + __asm__ volatile( + "movq %3, %%mm5 \n\t" + "movq %4, %%mm6 \n\t" + "movq %5, %%mm7 \n\t" + "jmp 2f \n\t" + ASMALIGN(4) + "1: \n\t" + PREFETCH" 32(%1) \n\t" + "movd (%1), %%mm0 \n\t" + "movd 4(%1), %%mm3 \n\t" + "punpckldq 8(%1), %%mm0 \n\t" + "punpckldq 12(%1), %%mm3 \n\t" + "movq %%mm0, %%mm1 \n\t" + "movq %%mm3, %%mm4 \n\t" + "pand %%mm6, %%mm0 \n\t" + "pand %%mm6, %%mm3 \n\t" + "pmaddwd %%mm7, %%mm0 \n\t" + "pmaddwd %%mm7, %%mm3 \n\t" + "pand %%mm5, %%mm1 \n\t" + "pand %%mm5, %%mm4 \n\t" + "por %%mm1, %%mm0 \n\t" + "por %%mm4, %%mm3 \n\t" + "psrld $6, %%mm0 \n\t" + "pslld $10, %%mm3 \n\t" + "por %%mm3, %%mm0 \n\t" + MOVNTQ" %%mm0, (%0) \n\t" + "add $16, %1 \n\t" + "add $8, %0 \n\t" + "2: \n\t" + "cmp %2, %1 \n\t" + " jb 1b \n\t" + : "+r" (d), "+r"(s) + : "r" (mm_end), "m" (mask3215g), "m" (mask3216br), "m" (mul3215) + ); +#else + __asm__ volatile(PREFETCH" %0"::"m"(*src):"memory"); + __asm__ volatile( + "movq %0, %%mm7 \n\t" + "movq %1, %%mm6 \n\t" + ::"m"(red_15mask),"m"(green_15mask)); + while (s < mm_end) + { + __asm__ volatile( + PREFETCH" 32%1 \n\t" + "movd %1, %%mm0 \n\t" + "movd 4%1, %%mm3 \n\t" + "punpckldq 8%1, %%mm0 \n\t" + "punpckldq 12%1, %%mm3 \n\t" + "movq %%mm0, %%mm1 \n\t" + "movq %%mm0, %%mm2 \n\t" + "movq %%mm3, %%mm4 \n\t" + "movq %%mm3, %%mm5 \n\t" + "psrlq $3, %%mm0 \n\t" + "psrlq $3, %%mm3 \n\t" + "pand %2, %%mm0 \n\t" + "pand %2, %%mm3 \n\t" + "psrlq $6, %%mm1 \n\t" + "psrlq $6, %%mm4 \n\t" + "pand %%mm6, %%mm1 \n\t" + "pand %%mm6, %%mm4 \n\t" + "psrlq $9, %%mm2 \n\t" + "psrlq $9, %%mm5 \n\t" + "pand %%mm7, %%mm2 \n\t" + "pand %%mm7, %%mm5 \n\t" + "por %%mm1, %%mm0 \n\t" + "por %%mm4, %%mm3 \n\t" + "por %%mm2, %%mm0 \n\t" + "por %%mm5, %%mm3 \n\t" + "psllq $16, %%mm3 \n\t" + "por %%mm3, %%mm0 \n\t" + MOVNTQ" %%mm0, %0 \n\t" + :"=m"(*d):"m"(*s),"m"(blue_15mask):"memory"); + d += 4; + s += 16; + } +#endif + __asm__ volatile(SFENCE:::"memory"); + __asm__ volatile(EMMS:::"memory"); +#endif + while (s < end) + { + register int rgb = *(const uint32_t*)s; s += 4; + *d++ = ((rgb&0xFF)>>3) + ((rgb&0xF800)>>6) + ((rgb&0xF80000)>>9); + } +} + +static inline void RENAME(rgb32tobgr15)(const uint8_t *src, uint8_t *dst, long src_size) +{ + const uint8_t *s = src; + const uint8_t *end; +#if HAVE_MMX + const uint8_t *mm_end; +#endif + uint16_t *d = (uint16_t *)dst; + end = s + src_size; +#if HAVE_MMX + __asm__ volatile(PREFETCH" %0"::"m"(*src):"memory"); + __asm__ volatile( + "movq %0, %%mm7 \n\t" + "movq %1, %%mm6 \n\t" + ::"m"(red_15mask),"m"(green_15mask)); + mm_end = end - 15; + while (s < mm_end) + { + __asm__ volatile( + PREFETCH" 32%1 \n\t" + "movd %1, %%mm0 \n\t" + "movd 4%1, %%mm3 \n\t" + "punpckldq 8%1, %%mm0 \n\t" + "punpckldq 12%1, %%mm3 \n\t" + "movq %%mm0, %%mm1 \n\t" + "movq %%mm0, %%mm2 \n\t" + "movq %%mm3, %%mm4 \n\t" + "movq %%mm3, %%mm5 \n\t" + "psllq $7, %%mm0 \n\t" + "psllq $7, %%mm3 \n\t" + "pand %%mm7, %%mm0 \n\t" + "pand %%mm7, %%mm3 \n\t" + "psrlq $6, %%mm1 \n\t" + "psrlq $6, %%mm4 \n\t" + "pand %%mm6, %%mm1 \n\t" + "pand %%mm6, %%mm4 \n\t" + "psrlq $19, %%mm2 \n\t" + "psrlq $19, %%mm5 \n\t" + "pand %2, %%mm2 \n\t" + "pand %2, %%mm5 \n\t" + "por %%mm1, %%mm0 \n\t" + "por %%mm4, %%mm3 \n\t" + "por %%mm2, %%mm0 \n\t" + "por %%mm5, %%mm3 \n\t" + "psllq $16, %%mm3 \n\t" + "por %%mm3, %%mm0 \n\t" + MOVNTQ" %%mm0, %0 \n\t" + :"=m"(*d):"m"(*s),"m"(blue_15mask):"memory"); + d += 4; + s += 16; + } + __asm__ volatile(SFENCE:::"memory"); + __asm__ volatile(EMMS:::"memory"); +#endif + while (s < end) + { + register int rgb = *(const uint32_t*)s; s += 4; + *d++ = ((rgb&0xF8)<<7) + ((rgb&0xF800)>>6) + ((rgb&0xF80000)>>19); + } +} + +static inline void RENAME(rgb24tobgr16)(const uint8_t *src, uint8_t *dst, long src_size) +{ + const uint8_t *s = src; + const uint8_t *end; +#if HAVE_MMX + const uint8_t *mm_end; +#endif + uint16_t *d = (uint16_t *)dst; + end = s + src_size; +#if HAVE_MMX + __asm__ volatile(PREFETCH" %0"::"m"(*src):"memory"); + __asm__ volatile( + "movq %0, %%mm7 \n\t" + "movq %1, %%mm6 \n\t" + ::"m"(red_16mask),"m"(green_16mask)); + mm_end = end - 11; + while (s < mm_end) + { + __asm__ volatile( + PREFETCH" 32%1 \n\t" + "movd %1, %%mm0 \n\t" + "movd 3%1, %%mm3 \n\t" + "punpckldq 6%1, %%mm0 \n\t" + "punpckldq 9%1, %%mm3 \n\t" + "movq %%mm0, %%mm1 \n\t" + "movq %%mm0, %%mm2 \n\t" + "movq %%mm3, %%mm4 \n\t" + "movq %%mm3, %%mm5 \n\t" + "psrlq $3, %%mm0 \n\t" + "psrlq $3, %%mm3 \n\t" + "pand %2, %%mm0 \n\t" + "pand %2, %%mm3 \n\t" + "psrlq $5, %%mm1 \n\t" + "psrlq $5, %%mm4 \n\t" + "pand %%mm6, %%mm1 \n\t" + "pand %%mm6, %%mm4 \n\t" + "psrlq $8, %%mm2 \n\t" + "psrlq $8, %%mm5 \n\t" + "pand %%mm7, %%mm2 \n\t" + "pand %%mm7, %%mm5 \n\t" + "por %%mm1, %%mm0 \n\t" + "por %%mm4, %%mm3 \n\t" + "por %%mm2, %%mm0 \n\t" + "por %%mm5, %%mm3 \n\t" + "psllq $16, %%mm3 \n\t" + "por %%mm3, %%mm0 \n\t" + MOVNTQ" %%mm0, %0 \n\t" + :"=m"(*d):"m"(*s),"m"(blue_16mask):"memory"); + d += 4; + s += 12; + } + __asm__ volatile(SFENCE:::"memory"); + __asm__ volatile(EMMS:::"memory"); +#endif + while (s < end) + { + const int b = *s++; + const int g = *s++; + const int r = *s++; + *d++ = (b>>3) | ((g&0xFC)<<3) | ((r&0xF8)<<8); + } +} + +static inline void RENAME(rgb24to16)(const uint8_t *src, uint8_t *dst, long src_size) +{ + const uint8_t *s = src; + const uint8_t *end; +#if HAVE_MMX + const uint8_t *mm_end; +#endif + uint16_t *d = (uint16_t *)dst; + end = s + src_size; +#if HAVE_MMX + __asm__ volatile(PREFETCH" %0"::"m"(*src):"memory"); + __asm__ volatile( + "movq %0, %%mm7 \n\t" + "movq %1, %%mm6 \n\t" + ::"m"(red_16mask),"m"(green_16mask)); + mm_end = end - 15; + while (s < mm_end) + { + __asm__ volatile( + PREFETCH" 32%1 \n\t" + "movd %1, %%mm0 \n\t" + "movd 3%1, %%mm3 \n\t" + "punpckldq 6%1, %%mm0 \n\t" + "punpckldq 9%1, %%mm3 \n\t" + "movq %%mm0, %%mm1 \n\t" + "movq %%mm0, %%mm2 \n\t" + "movq %%mm3, %%mm4 \n\t" + "movq %%mm3, %%mm5 \n\t" + "psllq $8, %%mm0 \n\t" + "psllq $8, %%mm3 \n\t" + "pand %%mm7, %%mm0 \n\t" + "pand %%mm7, %%mm3 \n\t" + "psrlq $5, %%mm1 \n\t" + "psrlq $5, %%mm4 \n\t" + "pand %%mm6, %%mm1 \n\t" + "pand %%mm6, %%mm4 \n\t" + "psrlq $19, %%mm2 \n\t" + "psrlq $19, %%mm5 \n\t" + "pand %2, %%mm2 \n\t" + "pand %2, %%mm5 \n\t" + "por %%mm1, %%mm0 \n\t" + "por %%mm4, %%mm3 \n\t" + "por %%mm2, %%mm0 \n\t" + "por %%mm5, %%mm3 \n\t" + "psllq $16, %%mm3 \n\t" + "por %%mm3, %%mm0 \n\t" + MOVNTQ" %%mm0, %0 \n\t" + :"=m"(*d):"m"(*s),"m"(blue_16mask):"memory"); + d += 4; + s += 12; + } + __asm__ volatile(SFENCE:::"memory"); + __asm__ volatile(EMMS:::"memory"); +#endif + while (s < end) + { + const int r = *s++; + const int g = *s++; + const int b = *s++; + *d++ = (b>>3) | ((g&0xFC)<<3) | ((r&0xF8)<<8); + } +} + +static inline void RENAME(rgb24tobgr15)(const uint8_t *src, uint8_t *dst, long src_size) +{ + const uint8_t *s = src; + const uint8_t *end; +#if HAVE_MMX + const uint8_t *mm_end; +#endif + uint16_t *d = (uint16_t *)dst; + end = s + src_size; +#if HAVE_MMX + __asm__ volatile(PREFETCH" %0"::"m"(*src):"memory"); + __asm__ volatile( + "movq %0, %%mm7 \n\t" + "movq %1, %%mm6 \n\t" + ::"m"(red_15mask),"m"(green_15mask)); + mm_end = end - 11; + while (s < mm_end) + { + __asm__ volatile( + PREFETCH" 32%1 \n\t" + "movd %1, %%mm0 \n\t" + "movd 3%1, %%mm3 \n\t" + "punpckldq 6%1, %%mm0 \n\t" + "punpckldq 9%1, %%mm3 \n\t" + "movq %%mm0, %%mm1 \n\t" + "movq %%mm0, %%mm2 \n\t" + "movq %%mm3, %%mm4 \n\t" + "movq %%mm3, %%mm5 \n\t" + "psrlq $3, %%mm0 \n\t" + "psrlq $3, %%mm3 \n\t" + "pand %2, %%mm0 \n\t" + "pand %2, %%mm3 \n\t" + "psrlq $6, %%mm1 \n\t" + "psrlq $6, %%mm4 \n\t" + "pand %%mm6, %%mm1 \n\t" + "pand %%mm6, %%mm4 \n\t" + "psrlq $9, %%mm2 \n\t" + "psrlq $9, %%mm5 \n\t" + "pand %%mm7, %%mm2 \n\t" + "pand %%mm7, %%mm5 \n\t" + "por %%mm1, %%mm0 \n\t" + "por %%mm4, %%mm3 \n\t" + "por %%mm2, %%mm0 \n\t" + "por %%mm5, %%mm3 \n\t" + "psllq $16, %%mm3 \n\t" + "por %%mm3, %%mm0 \n\t" + MOVNTQ" %%mm0, %0 \n\t" + :"=m"(*d):"m"(*s),"m"(blue_15mask):"memory"); + d += 4; + s += 12; + } + __asm__ volatile(SFENCE:::"memory"); + __asm__ volatile(EMMS:::"memory"); +#endif + while (s < end) + { + const int b = *s++; + const int g = *s++; + const int r = *s++; + *d++ = (b>>3) | ((g&0xF8)<<2) | ((r&0xF8)<<7); + } +} + +static inline void RENAME(rgb24to15)(const uint8_t *src, uint8_t *dst, long src_size) +{ + const uint8_t *s = src; + const uint8_t *end; +#if HAVE_MMX + const uint8_t *mm_end; +#endif + uint16_t *d = (uint16_t *)dst; + end = s + src_size; +#if HAVE_MMX + __asm__ volatile(PREFETCH" %0"::"m"(*src):"memory"); + __asm__ volatile( + "movq %0, %%mm7 \n\t" + "movq %1, %%mm6 \n\t" + ::"m"(red_15mask),"m"(green_15mask)); + mm_end = end - 15; + while (s < mm_end) + { + __asm__ volatile( + PREFETCH" 32%1 \n\t" + "movd %1, %%mm0 \n\t" + "movd 3%1, %%mm3 \n\t" + "punpckldq 6%1, %%mm0 \n\t" + "punpckldq 9%1, %%mm3 \n\t" + "movq %%mm0, %%mm1 \n\t" + "movq %%mm0, %%mm2 \n\t" + "movq %%mm3, %%mm4 \n\t" + "movq %%mm3, %%mm5 \n\t" + "psllq $7, %%mm0 \n\t" + "psllq $7, %%mm3 \n\t" + "pand %%mm7, %%mm0 \n\t" + "pand %%mm7, %%mm3 \n\t" + "psrlq $6, %%mm1 \n\t" + "psrlq $6, %%mm4 \n\t" + "pand %%mm6, %%mm1 \n\t" + "pand %%mm6, %%mm4 \n\t" + "psrlq $19, %%mm2 \n\t" + "psrlq $19, %%mm5 \n\t" + "pand %2, %%mm2 \n\t" + "pand %2, %%mm5 \n\t" + "por %%mm1, %%mm0 \n\t" + "por %%mm4, %%mm3 \n\t" + "por %%mm2, %%mm0 \n\t" + "por %%mm5, %%mm3 \n\t" + "psllq $16, %%mm3 \n\t" + "por %%mm3, %%mm0 \n\t" + MOVNTQ" %%mm0, %0 \n\t" + :"=m"(*d):"m"(*s),"m"(blue_15mask):"memory"); + d += 4; + s += 12; + } + __asm__ volatile(SFENCE:::"memory"); + __asm__ volatile(EMMS:::"memory"); +#endif + while (s < end) + { + const int r = *s++; + const int g = *s++; + const int b = *s++; + *d++ = (b>>3) | ((g&0xF8)<<2) | ((r&0xF8)<<7); + } +} + +/* + I use less accurate approximation here by simply left-shifting the input + value and filling the low order bits with zeroes. This method improves PNG + compression but this scheme cannot reproduce white exactly, since it does + not generate an all-ones maximum value; the net effect is to darken the + image slightly. + + The better method should be "left bit replication": + + 4 3 2 1 0 + --------- + 1 1 0 1 1 + + 7 6 5 4 3 2 1 0 + ---------------- + 1 1 0 1 1 1 1 0 + |=======| |===| + | leftmost bits repeated to fill open bits + | + original bits +*/ +static inline void RENAME(rgb15tobgr24)(const uint8_t *src, uint8_t *dst, long src_size) +{ + const uint16_t *end; +#if HAVE_MMX + const uint16_t *mm_end; +#endif + uint8_t *d = dst; + const uint16_t *s = (const uint16_t*)src; + end = s + src_size/2; +#if HAVE_MMX + __asm__ volatile(PREFETCH" %0"::"m"(*s):"memory"); + mm_end = end - 7; + while (s < mm_end) + { + __asm__ volatile( + PREFETCH" 32%1 \n\t" + "movq %1, %%mm0 \n\t" + "movq %1, %%mm1 \n\t" + "movq %1, %%mm2 \n\t" + "pand %2, %%mm0 \n\t" + "pand %3, %%mm1 \n\t" + "pand %4, %%mm2 \n\t" + "psllq $3, %%mm0 \n\t" + "psrlq $2, %%mm1 \n\t" + "psrlq $7, %%mm2 \n\t" + "movq %%mm0, %%mm3 \n\t" + "movq %%mm1, %%mm4 \n\t" + "movq %%mm2, %%mm5 \n\t" + "punpcklwd %5, %%mm0 \n\t" + "punpcklwd %5, %%mm1 \n\t" + "punpcklwd %5, %%mm2 \n\t" + "punpckhwd %5, %%mm3 \n\t" + "punpckhwd %5, %%mm4 \n\t" + "punpckhwd %5, %%mm5 \n\t" + "psllq $8, %%mm1 \n\t" + "psllq $16, %%mm2 \n\t" + "por %%mm1, %%mm0 \n\t" + "por %%mm2, %%mm0 \n\t" + "psllq $8, %%mm4 \n\t" + "psllq $16, %%mm5 \n\t" + "por %%mm4, %%mm3 \n\t" + "por %%mm5, %%mm3 \n\t" + + "movq %%mm0, %%mm6 \n\t" + "movq %%mm3, %%mm7 \n\t" + + "movq 8%1, %%mm0 \n\t" + "movq 8%1, %%mm1 \n\t" + "movq 8%1, %%mm2 \n\t" + "pand %2, %%mm0 \n\t" + "pand %3, %%mm1 \n\t" + "pand %4, %%mm2 \n\t" + "psllq $3, %%mm0 \n\t" + "psrlq $2, %%mm1 \n\t" + "psrlq $7, %%mm2 \n\t" + "movq %%mm0, %%mm3 \n\t" + "movq %%mm1, %%mm4 \n\t" + "movq %%mm2, %%mm5 \n\t" + "punpcklwd %5, %%mm0 \n\t" + "punpcklwd %5, %%mm1 \n\t" + "punpcklwd %5, %%mm2 \n\t" + "punpckhwd %5, %%mm3 \n\t" + "punpckhwd %5, %%mm4 \n\t" + "punpckhwd %5, %%mm5 \n\t" + "psllq $8, %%mm1 \n\t" + "psllq $16, %%mm2 \n\t" + "por %%mm1, %%mm0 \n\t" + "por %%mm2, %%mm0 \n\t" + "psllq $8, %%mm4 \n\t" + "psllq $16, %%mm5 \n\t" + "por %%mm4, %%mm3 \n\t" + "por %%mm5, %%mm3 \n\t" + + :"=m"(*d) + :"m"(*s),"m"(mask15b),"m"(mask15g),"m"(mask15r), "m"(mmx_null) + :"memory"); + /* borrowed 32 to 24 */ + __asm__ volatile( + "movq %%mm0, %%mm4 \n\t" + "movq %%mm3, %%mm5 \n\t" + "movq %%mm6, %%mm0 \n\t" + "movq %%mm7, %%mm1 \n\t" + + "movq %%mm4, %%mm6 \n\t" + "movq %%mm5, %%mm7 \n\t" + "movq %%mm0, %%mm2 \n\t" + "movq %%mm1, %%mm3 \n\t" + + "psrlq $8, %%mm2 \n\t" + "psrlq $8, %%mm3 \n\t" + "psrlq $8, %%mm6 \n\t" + "psrlq $8, %%mm7 \n\t" + "pand %2, %%mm0 \n\t" + "pand %2, %%mm1 \n\t" + "pand %2, %%mm4 \n\t" + "pand %2, %%mm5 \n\t" + "pand %3, %%mm2 \n\t" + "pand %3, %%mm3 \n\t" + "pand %3, %%mm6 \n\t" + "pand %3, %%mm7 \n\t" + "por %%mm2, %%mm0 \n\t" + "por %%mm3, %%mm1 \n\t" + "por %%mm6, %%mm4 \n\t" + "por %%mm7, %%mm5 \n\t" + + "movq %%mm1, %%mm2 \n\t" + "movq %%mm4, %%mm3 \n\t" + "psllq $48, %%mm2 \n\t" + "psllq $32, %%mm3 \n\t" + "pand %4, %%mm2 \n\t" + "pand %5, %%mm3 \n\t" + "por %%mm2, %%mm0 \n\t" + "psrlq $16, %%mm1 \n\t" + "psrlq $32, %%mm4 \n\t" + "psllq $16, %%mm5 \n\t" + "por %%mm3, %%mm1 \n\t" + "pand %6, %%mm5 \n\t" + "por %%mm5, %%mm4 \n\t" + + MOVNTQ" %%mm0, %0 \n\t" + MOVNTQ" %%mm1, 8%0 \n\t" + MOVNTQ" %%mm4, 16%0" + + :"=m"(*d) + :"m"(*s),"m"(mask24l),"m"(mask24h),"m"(mask24hh),"m"(mask24hhh),"m"(mask24hhhh) + :"memory"); + d += 24; + s += 8; + } + __asm__ volatile(SFENCE:::"memory"); + __asm__ volatile(EMMS:::"memory"); +#endif + while (s < end) + { + register uint16_t bgr; + bgr = *s++; + *d++ = (bgr&0x1F)<<3; + *d++ = (bgr&0x3E0)>>2; + *d++ = (bgr&0x7C00)>>7; + } +} + +static inline void RENAME(rgb16tobgr24)(const uint8_t *src, uint8_t *dst, long src_size) +{ + const uint16_t *end; +#if HAVE_MMX + const uint16_t *mm_end; +#endif + uint8_t *d = (uint8_t *)dst; + const uint16_t *s = (const uint16_t *)src; + end = s + src_size/2; +#if HAVE_MMX + __asm__ volatile(PREFETCH" %0"::"m"(*s):"memory"); + mm_end = end - 7; + while (s < mm_end) + { + __asm__ volatile( + PREFETCH" 32%1 \n\t" + "movq %1, %%mm0 \n\t" + "movq %1, %%mm1 \n\t" + "movq %1, %%mm2 \n\t" + "pand %2, %%mm0 \n\t" + "pand %3, %%mm1 \n\t" + "pand %4, %%mm2 \n\t" + "psllq $3, %%mm0 \n\t" + "psrlq $3, %%mm1 \n\t" + "psrlq $8, %%mm2 \n\t" + "movq %%mm0, %%mm3 \n\t" + "movq %%mm1, %%mm4 \n\t" + "movq %%mm2, %%mm5 \n\t" + "punpcklwd %5, %%mm0 \n\t" + "punpcklwd %5, %%mm1 \n\t" + "punpcklwd %5, %%mm2 \n\t" + "punpckhwd %5, %%mm3 \n\t" + "punpckhwd %5, %%mm4 \n\t" + "punpckhwd %5, %%mm5 \n\t" + "psllq $8, %%mm1 \n\t" + "psllq $16, %%mm2 \n\t" + "por %%mm1, %%mm0 \n\t" + "por %%mm2, %%mm0 \n\t" + "psllq $8, %%mm4 \n\t" + "psllq $16, %%mm5 \n\t" + "por %%mm4, %%mm3 \n\t" + "por %%mm5, %%mm3 \n\t" + + "movq %%mm0, %%mm6 \n\t" + "movq %%mm3, %%mm7 \n\t" + + "movq 8%1, %%mm0 \n\t" + "movq 8%1, %%mm1 \n\t" + "movq 8%1, %%mm2 \n\t" + "pand %2, %%mm0 \n\t" + "pand %3, %%mm1 \n\t" + "pand %4, %%mm2 \n\t" + "psllq $3, %%mm0 \n\t" + "psrlq $3, %%mm1 \n\t" + "psrlq $8, %%mm2 \n\t" + "movq %%mm0, %%mm3 \n\t" + "movq %%mm1, %%mm4 \n\t" + "movq %%mm2, %%mm5 \n\t" + "punpcklwd %5, %%mm0 \n\t" + "punpcklwd %5, %%mm1 \n\t" + "punpcklwd %5, %%mm2 \n\t" + "punpckhwd %5, %%mm3 \n\t" + "punpckhwd %5, %%mm4 \n\t" + "punpckhwd %5, %%mm5 \n\t" + "psllq $8, %%mm1 \n\t" + "psllq $16, %%mm2 \n\t" + "por %%mm1, %%mm0 \n\t" + "por %%mm2, %%mm0 \n\t" + "psllq $8, %%mm4 \n\t" + "psllq $16, %%mm5 \n\t" + "por %%mm4, %%mm3 \n\t" + "por %%mm5, %%mm3 \n\t" + :"=m"(*d) + :"m"(*s),"m"(mask16b),"m"(mask16g),"m"(mask16r),"m"(mmx_null) + :"memory"); + /* borrowed 32 to 24 */ + __asm__ volatile( + "movq %%mm0, %%mm4 \n\t" + "movq %%mm3, %%mm5 \n\t" + "movq %%mm6, %%mm0 \n\t" + "movq %%mm7, %%mm1 \n\t" + + "movq %%mm4, %%mm6 \n\t" + "movq %%mm5, %%mm7 \n\t" + "movq %%mm0, %%mm2 \n\t" + "movq %%mm1, %%mm3 \n\t" + + "psrlq $8, %%mm2 \n\t" + "psrlq $8, %%mm3 \n\t" + "psrlq $8, %%mm6 \n\t" + "psrlq $8, %%mm7 \n\t" + "pand %2, %%mm0 \n\t" + "pand %2, %%mm1 \n\t" + "pand %2, %%mm4 \n\t" + "pand %2, %%mm5 \n\t" + "pand %3, %%mm2 \n\t" + "pand %3, %%mm3 \n\t" + "pand %3, %%mm6 \n\t" + "pand %3, %%mm7 \n\t" + "por %%mm2, %%mm0 \n\t" + "por %%mm3, %%mm1 \n\t" + "por %%mm6, %%mm4 \n\t" + "por %%mm7, %%mm5 \n\t" + + "movq %%mm1, %%mm2 \n\t" + "movq %%mm4, %%mm3 \n\t" + "psllq $48, %%mm2 \n\t" + "psllq $32, %%mm3 \n\t" + "pand %4, %%mm2 \n\t" + "pand %5, %%mm3 \n\t" + "por %%mm2, %%mm0 \n\t" + "psrlq $16, %%mm1 \n\t" + "psrlq $32, %%mm4 \n\t" + "psllq $16, %%mm5 \n\t" + "por %%mm3, %%mm1 \n\t" + "pand %6, %%mm5 \n\t" + "por %%mm5, %%mm4 \n\t" + + MOVNTQ" %%mm0, %0 \n\t" + MOVNTQ" %%mm1, 8%0 \n\t" + MOVNTQ" %%mm4, 16%0" + + :"=m"(*d) + :"m"(*s),"m"(mask24l),"m"(mask24h),"m"(mask24hh),"m"(mask24hhh),"m"(mask24hhhh) + :"memory"); + d += 24; + s += 8; + } + __asm__ volatile(SFENCE:::"memory"); + __asm__ volatile(EMMS:::"memory"); +#endif + while (s < end) + { + register uint16_t bgr; + bgr = *s++; + *d++ = (bgr&0x1F)<<3; + *d++ = (bgr&0x7E0)>>3; + *d++ = (bgr&0xF800)>>8; + } +} + +static inline void RENAME(rgb15to32)(const uint8_t *src, uint8_t *dst, long src_size) +{ + const uint16_t *end; +#if HAVE_MMX + const uint16_t *mm_end; +#endif + uint8_t *d = dst; + const uint16_t *s = (const uint16_t *)src; + end = s + src_size/2; +#if HAVE_MMX + __asm__ volatile(PREFETCH" %0"::"m"(*s):"memory"); + __asm__ volatile("pxor %%mm7,%%mm7 \n\t":::"memory"); + mm_end = end - 3; + while (s < mm_end) + { + __asm__ volatile( + PREFETCH" 32%1 \n\t" + "movq %1, %%mm0 \n\t" + "movq %1, %%mm1 \n\t" + "movq %1, %%mm2 \n\t" + "pand %2, %%mm0 \n\t" + "pand %3, %%mm1 \n\t" + "pand %4, %%mm2 \n\t" + "psllq $3, %%mm0 \n\t" + "psrlq $2, %%mm1 \n\t" + "psrlq $7, %%mm2 \n\t" + "movq %%mm0, %%mm3 \n\t" + "movq %%mm1, %%mm4 \n\t" + "movq %%mm2, %%mm5 \n\t" + "punpcklwd %%mm7, %%mm0 \n\t" + "punpcklwd %%mm7, %%mm1 \n\t" + "punpcklwd %%mm7, %%mm2 \n\t" + "punpckhwd %%mm7, %%mm3 \n\t" + "punpckhwd %%mm7, %%mm4 \n\t" + "punpckhwd %%mm7, %%mm5 \n\t" + "psllq $8, %%mm1 \n\t" + "psllq $16, %%mm2 \n\t" + "por %%mm1, %%mm0 \n\t" + "por %%mm2, %%mm0 \n\t" + "psllq $8, %%mm4 \n\t" + "psllq $16, %%mm5 \n\t" + "por %%mm4, %%mm3 \n\t" + "por %%mm5, %%mm3 \n\t" + MOVNTQ" %%mm0, %0 \n\t" + MOVNTQ" %%mm3, 8%0 \n\t" + :"=m"(*d) + :"m"(*s),"m"(mask15b),"m"(mask15g),"m"(mask15r) + :"memory"); + d += 16; + s += 4; + } + __asm__ volatile(SFENCE:::"memory"); + __asm__ volatile(EMMS:::"memory"); +#endif + while (s < end) + { +#if 0 //slightly slower on Athlon + int bgr= *s++; + *((uint32_t*)d)++ = ((bgr&0x1F)<<3) + ((bgr&0x3E0)<<6) + ((bgr&0x7C00)<<9); +#else + register uint16_t bgr; + bgr = *s++; +#ifdef WORDS_BIGENDIAN + *d++ = 255; + *d++ = (bgr&0x7C00)>>7; + *d++ = (bgr&0x3E0)>>2; + *d++ = (bgr&0x1F)<<3; +#else + *d++ = (bgr&0x1F)<<3; + *d++ = (bgr&0x3E0)>>2; + *d++ = (bgr&0x7C00)>>7; + *d++ = 255; +#endif + +#endif + } +} + +static inline void RENAME(rgb16to32)(const uint8_t *src, uint8_t *dst, long src_size) +{ + const uint16_t *end; +#if HAVE_MMX + const uint16_t *mm_end; +#endif + uint8_t *d = dst; + const uint16_t *s = (const uint16_t*)src; + end = s + src_size/2; +#if HAVE_MMX + __asm__ volatile(PREFETCH" %0"::"m"(*s):"memory"); + __asm__ volatile("pxor %%mm7,%%mm7 \n\t":::"memory"); + mm_end = end - 3; + while (s < mm_end) + { + __asm__ volatile( + PREFETCH" 32%1 \n\t" + "movq %1, %%mm0 \n\t" + "movq %1, %%mm1 \n\t" + "movq %1, %%mm2 \n\t" + "pand %2, %%mm0 \n\t" + "pand %3, %%mm1 \n\t" + "pand %4, %%mm2 \n\t" + "psllq $3, %%mm0 \n\t" + "psrlq $3, %%mm1 \n\t" + "psrlq $8, %%mm2 \n\t" + "movq %%mm0, %%mm3 \n\t" + "movq %%mm1, %%mm4 \n\t" + "movq %%mm2, %%mm5 \n\t" + "punpcklwd %%mm7, %%mm0 \n\t" + "punpcklwd %%mm7, %%mm1 \n\t" + "punpcklwd %%mm7, %%mm2 \n\t" + "punpckhwd %%mm7, %%mm3 \n\t" + "punpckhwd %%mm7, %%mm4 \n\t" + "punpckhwd %%mm7, %%mm5 \n\t" + "psllq $8, %%mm1 \n\t" + "psllq $16, %%mm2 \n\t" + "por %%mm1, %%mm0 \n\t" + "por %%mm2, %%mm0 \n\t" + "psllq $8, %%mm4 \n\t" + "psllq $16, %%mm5 \n\t" + "por %%mm4, %%mm3 \n\t" + "por %%mm5, %%mm3 \n\t" + MOVNTQ" %%mm0, %0 \n\t" + MOVNTQ" %%mm3, 8%0 \n\t" + :"=m"(*d) + :"m"(*s),"m"(mask16b),"m"(mask16g),"m"(mask16r) + :"memory"); + d += 16; + s += 4; + } + __asm__ volatile(SFENCE:::"memory"); + __asm__ volatile(EMMS:::"memory"); +#endif + while (s < end) + { + register uint16_t bgr; + bgr = *s++; +#ifdef WORDS_BIGENDIAN + *d++ = 255; + *d++ = (bgr&0xF800)>>8; + *d++ = (bgr&0x7E0)>>3; + *d++ = (bgr&0x1F)<<3; +#else + *d++ = (bgr&0x1F)<<3; + *d++ = (bgr&0x7E0)>>3; + *d++ = (bgr&0xF800)>>8; + *d++ = 255; +#endif + } +} + +static inline void RENAME(rgb32tobgr32)(const uint8_t *src, uint8_t *dst, long src_size) +{ + long idx = 15 - src_size; + const uint8_t *s = src-idx; + uint8_t *d = dst-idx; +#if HAVE_MMX + __asm__ volatile( + "test %0, %0 \n\t" + "jns 2f \n\t" + PREFETCH" (%1, %0) \n\t" + "movq %3, %%mm7 \n\t" + "pxor %4, %%mm7 \n\t" + "movq %%mm7, %%mm6 \n\t" + "pxor %5, %%mm7 \n\t" + ASMALIGN(4) + "1: \n\t" + PREFETCH" 32(%1, %0) \n\t" + "movq (%1, %0), %%mm0 \n\t" + "movq 8(%1, %0), %%mm1 \n\t" +# if HAVE_MMX2 + "pshufw $177, %%mm0, %%mm3 \n\t" + "pshufw $177, %%mm1, %%mm5 \n\t" + "pand %%mm7, %%mm0 \n\t" + "pand %%mm6, %%mm3 \n\t" + "pand %%mm7, %%mm1 \n\t" + "pand %%mm6, %%mm5 \n\t" + "por %%mm3, %%mm0 \n\t" + "por %%mm5, %%mm1 \n\t" +# else + "movq %%mm0, %%mm2 \n\t" + "movq %%mm1, %%mm4 \n\t" + "pand %%mm7, %%mm0 \n\t" + "pand %%mm6, %%mm2 \n\t" + "pand %%mm7, %%mm1 \n\t" + "pand %%mm6, %%mm4 \n\t" + "movq %%mm2, %%mm3 \n\t" + "movq %%mm4, %%mm5 \n\t" + "pslld $16, %%mm2 \n\t" + "psrld $16, %%mm3 \n\t" + "pslld $16, %%mm4 \n\t" + "psrld $16, %%mm5 \n\t" + "por %%mm2, %%mm0 \n\t" + "por %%mm4, %%mm1 \n\t" + "por %%mm3, %%mm0 \n\t" + "por %%mm5, %%mm1 \n\t" +# endif + MOVNTQ" %%mm0, (%2, %0) \n\t" + MOVNTQ" %%mm1, 8(%2, %0) \n\t" + "add $16, %0 \n\t" + "js 1b \n\t" + SFENCE" \n\t" + EMMS" \n\t" + "2: \n\t" + : "+&r"(idx) + : "r" (s), "r" (d), "m" (mask32b), "m" (mask32r), "m" (mmx_one) + : "memory"); +#endif + for (; idx<15; idx+=4) { + register int v = *(const uint32_t *)&s[idx], g = v & 0xff00ff00; + v &= 0xff00ff; + *(uint32_t *)&d[idx] = (v>>16) + g + (v<<16); + } +} + +static inline void RENAME(rgb24tobgr24)(const uint8_t *src, uint8_t *dst, long src_size) +{ + unsigned i; +#if HAVE_MMX + long mmx_size= 23 - src_size; + __asm__ volatile ( + "test %%"REG_a", %%"REG_a" \n\t" + "jns 2f \n\t" + "movq "MANGLE(mask24r)", %%mm5 \n\t" + "movq "MANGLE(mask24g)", %%mm6 \n\t" + "movq "MANGLE(mask24b)", %%mm7 \n\t" + ASMALIGN(4) + "1: \n\t" + PREFETCH" 32(%1, %%"REG_a") \n\t" + "movq (%1, %%"REG_a"), %%mm0 \n\t" // BGR BGR BG + "movq (%1, %%"REG_a"), %%mm1 \n\t" // BGR BGR BG + "movq 2(%1, %%"REG_a"), %%mm2 \n\t" // R BGR BGR B + "psllq $16, %%mm0 \n\t" // 00 BGR BGR + "pand %%mm5, %%mm0 \n\t" + "pand %%mm6, %%mm1 \n\t" + "pand %%mm7, %%mm2 \n\t" + "por %%mm0, %%mm1 \n\t" + "por %%mm2, %%mm1 \n\t" + "movq 6(%1, %%"REG_a"), %%mm0 \n\t" // BGR BGR BG + MOVNTQ" %%mm1, (%2, %%"REG_a") \n\t" // RGB RGB RG + "movq 8(%1, %%"REG_a"), %%mm1 \n\t" // R BGR BGR B + "movq 10(%1, %%"REG_a"), %%mm2 \n\t" // GR BGR BGR + "pand %%mm7, %%mm0 \n\t" + "pand %%mm5, %%mm1 \n\t" + "pand %%mm6, %%mm2 \n\t" + "por %%mm0, %%mm1 \n\t" + "por %%mm2, %%mm1 \n\t" + "movq 14(%1, %%"REG_a"), %%mm0 \n\t" // R BGR BGR B + MOVNTQ" %%mm1, 8(%2, %%"REG_a") \n\t" // B RGB RGB R + "movq 16(%1, %%"REG_a"), %%mm1 \n\t" // GR BGR BGR + "movq 18(%1, %%"REG_a"), %%mm2 \n\t" // BGR BGR BG + "pand %%mm6, %%mm0 \n\t" + "pand %%mm7, %%mm1 \n\t" + "pand %%mm5, %%mm2 \n\t" + "por %%mm0, %%mm1 \n\t" + "por %%mm2, %%mm1 \n\t" + MOVNTQ" %%mm1, 16(%2, %%"REG_a") \n\t" + "add $24, %%"REG_a" \n\t" + " js 1b \n\t" + "2: \n\t" + : "+a" (mmx_size) + : "r" (src-mmx_size), "r"(dst-mmx_size) + ); + + __asm__ volatile(SFENCE:::"memory"); + __asm__ volatile(EMMS:::"memory"); + + if (mmx_size==23) return; //finished, was multiple of 8 + + src+= src_size; + dst+= src_size; + src_size= 23-mmx_size; + src-= src_size; + dst-= src_size; +#endif + for (i=0; i<src_size; i+=3) + { + register uint8_t x; + x = src[i + 2]; + dst[i + 1] = src[i + 1]; + dst[i + 2] = src[i + 0]; + dst[i + 0] = x; + } +} + +static inline void RENAME(yuvPlanartoyuy2)(const uint8_t *ysrc, const uint8_t *usrc, const uint8_t *vsrc, uint8_t *dst, + long width, long height, + long lumStride, long chromStride, long dstStride, long vertLumPerChroma) +{ + long y; + const long chromWidth= width>>1; + for (y=0; y<height; y++) + { +#if HAVE_MMX +//FIXME handle 2 lines at once (fewer prefetches, reuse some chroma, but very likely memory-limited anyway) + __asm__ volatile( + "xor %%"REG_a", %%"REG_a" \n\t" + ASMALIGN(4) + "1: \n\t" + PREFETCH" 32(%1, %%"REG_a", 2) \n\t" + PREFETCH" 32(%2, %%"REG_a") \n\t" + PREFETCH" 32(%3, %%"REG_a") \n\t" + "movq (%2, %%"REG_a"), %%mm0 \n\t" // U(0) + "movq %%mm0, %%mm2 \n\t" // U(0) + "movq (%3, %%"REG_a"), %%mm1 \n\t" // V(0) + "punpcklbw %%mm1, %%mm0 \n\t" // UVUV UVUV(0) + "punpckhbw %%mm1, %%mm2 \n\t" // UVUV UVUV(8) + + "movq (%1, %%"REG_a",2), %%mm3 \n\t" // Y(0) + "movq 8(%1, %%"REG_a",2), %%mm5 \n\t" // Y(8) + "movq %%mm3, %%mm4 \n\t" // Y(0) + "movq %%mm5, %%mm6 \n\t" // Y(8) + "punpcklbw %%mm0, %%mm3 \n\t" // YUYV YUYV(0) + "punpckhbw %%mm0, %%mm4 \n\t" // YUYV YUYV(4) + "punpcklbw %%mm2, %%mm5 \n\t" // YUYV YUYV(8) + "punpckhbw %%mm2, %%mm6 \n\t" // YUYV YUYV(12) + + MOVNTQ" %%mm3, (%0, %%"REG_a", 4) \n\t" + MOVNTQ" %%mm4, 8(%0, %%"REG_a", 4) \n\t" + MOVNTQ" %%mm5, 16(%0, %%"REG_a", 4) \n\t" + MOVNTQ" %%mm6, 24(%0, %%"REG_a", 4) \n\t" + + "add $8, %%"REG_a" \n\t" + "cmp %4, %%"REG_a" \n\t" + " jb 1b \n\t" + ::"r"(dst), "r"(ysrc), "r"(usrc), "r"(vsrc), "g" (chromWidth) + : "%"REG_a + ); +#else + +#if ARCH_ALPHA && HAVE_MVI +#define pl2yuy2(n) \ + y1 = yc[n]; \ + y2 = yc2[n]; \ + u = uc[n]; \ + v = vc[n]; \ + __asm__("unpkbw %1, %0" : "=r"(y1) : "r"(y1)); \ + __asm__("unpkbw %1, %0" : "=r"(y2) : "r"(y2)); \ + __asm__("unpkbl %1, %0" : "=r"(u) : "r"(u)); \ + __asm__("unpkbl %1, %0" : "=r"(v) : "r"(v)); \ + yuv1 = (u << 8) + (v << 24); \ + yuv2 = yuv1 + y2; \ + yuv1 += y1; \ + qdst[n] = yuv1; \ + qdst2[n] = yuv2; + + int i; + uint64_t *qdst = (uint64_t *) dst; + uint64_t *qdst2 = (uint64_t *) (dst + dstStride); + const uint32_t *yc = (uint32_t *) ysrc; + const uint32_t *yc2 = (uint32_t *) (ysrc + lumStride); + const uint16_t *uc = (uint16_t*) usrc, *vc = (uint16_t*) vsrc; + for (i = 0; i < chromWidth; i += 8){ + uint64_t y1, y2, yuv1, yuv2; + uint64_t u, v; + /* Prefetch */ + __asm__("ldq $31,64(%0)" :: "r"(yc)); + __asm__("ldq $31,64(%0)" :: "r"(yc2)); + __asm__("ldq $31,64(%0)" :: "r"(uc)); + __asm__("ldq $31,64(%0)" :: "r"(vc)); + + pl2yuy2(0); + pl2yuy2(1); + pl2yuy2(2); + pl2yuy2(3); + + yc += 4; + yc2 += 4; + uc += 4; + vc += 4; + qdst += 4; + qdst2 += 4; + } + y++; + ysrc += lumStride; + dst += dstStride; + +#elif HAVE_FAST_64BIT + int i; + uint64_t *ldst = (uint64_t *) dst; + const uint8_t *yc = ysrc, *uc = usrc, *vc = vsrc; + for (i = 0; i < chromWidth; i += 2){ + uint64_t k, l; + k = yc[0] + (uc[0] << 8) + + (yc[1] << 16) + (vc[0] << 24); + l = yc[2] + (uc[1] << 8) + + (yc[3] << 16) + (vc[1] << 24); + *ldst++ = k + (l << 32); + yc += 4; + uc += 2; + vc += 2; + } + +#else + int i, *idst = (int32_t *) dst; + const uint8_t *yc = ysrc, *uc = usrc, *vc = vsrc; + for (i = 0; i < chromWidth; i++){ +#ifdef WORDS_BIGENDIAN + *idst++ = (yc[0] << 24)+ (uc[0] << 16) + + (yc[1] << 8) + (vc[0] << 0); +#else + *idst++ = yc[0] + (uc[0] << 8) + + (yc[1] << 16) + (vc[0] << 24); +#endif + yc += 2; + uc++; + vc++; + } +#endif +#endif + if ((y&(vertLumPerChroma-1)) == vertLumPerChroma-1) + { + usrc += chromStride; + vsrc += chromStride; + } + ysrc += lumStride; + dst += dstStride; + } +#if HAVE_MMX +__asm__( EMMS" \n\t" + SFENCE" \n\t" + :::"memory"); +#endif +} + +/** + * Height should be a multiple of 2 and width should be a multiple of 16. + * (If this is a problem for anyone then tell me, and I will fix it.) + */ +static inline void RENAME(yv12toyuy2)(const uint8_t *ysrc, const uint8_t *usrc, const uint8_t *vsrc, uint8_t *dst, + long width, long height, + long lumStride, long chromStride, long dstStride) +{ + //FIXME interpolate chroma + RENAME(yuvPlanartoyuy2)(ysrc, usrc, vsrc, dst, width, height, lumStride, chromStride, dstStride, 2); +} + +static inline void RENAME(yuvPlanartouyvy)(const uint8_t *ysrc, const uint8_t *usrc, const uint8_t *vsrc, uint8_t *dst, + long width, long height, + long lumStride, long chromStride, long dstStride, long vertLumPerChroma) +{ + long y; + const long chromWidth= width>>1; + for (y=0; y<height; y++) + { +#if HAVE_MMX +//FIXME handle 2 lines at once (fewer prefetches, reuse some chroma, but very likely memory-limited anyway) + __asm__ volatile( + "xor %%"REG_a", %%"REG_a" \n\t" + ASMALIGN(4) + "1: \n\t" + PREFETCH" 32(%1, %%"REG_a", 2) \n\t" + PREFETCH" 32(%2, %%"REG_a") \n\t" + PREFETCH" 32(%3, %%"REG_a") \n\t" + "movq (%2, %%"REG_a"), %%mm0 \n\t" // U(0) + "movq %%mm0, %%mm2 \n\t" // U(0) + "movq (%3, %%"REG_a"), %%mm1 \n\t" // V(0) + "punpcklbw %%mm1, %%mm0 \n\t" // UVUV UVUV(0) + "punpckhbw %%mm1, %%mm2 \n\t" // UVUV UVUV(8) + + "movq (%1, %%"REG_a",2), %%mm3 \n\t" // Y(0) + "movq 8(%1, %%"REG_a",2), %%mm5 \n\t" // Y(8) + "movq %%mm0, %%mm4 \n\t" // Y(0) + "movq %%mm2, %%mm6 \n\t" // Y(8) + "punpcklbw %%mm3, %%mm0 \n\t" // YUYV YUYV(0) + "punpckhbw %%mm3, %%mm4 \n\t" // YUYV YUYV(4) + "punpcklbw %%mm5, %%mm2 \n\t" // YUYV YUYV(8) + "punpckhbw %%mm5, %%mm6 \n\t" // YUYV YUYV(12) + + MOVNTQ" %%mm0, (%0, %%"REG_a", 4) \n\t" + MOVNTQ" %%mm4, 8(%0, %%"REG_a", 4) \n\t" + MOVNTQ" %%mm2, 16(%0, %%"REG_a", 4) \n\t" + MOVNTQ" %%mm6, 24(%0, %%"REG_a", 4) \n\t" + + "add $8, %%"REG_a" \n\t" + "cmp %4, %%"REG_a" \n\t" + " jb 1b \n\t" + ::"r"(dst), "r"(ysrc), "r"(usrc), "r"(vsrc), "g" (chromWidth) + : "%"REG_a + ); +#else +//FIXME adapt the Alpha ASM code from yv12->yuy2 + +#if HAVE_FAST_64BIT + int i; + uint64_t *ldst = (uint64_t *) dst; + const uint8_t *yc = ysrc, *uc = usrc, *vc = vsrc; + for (i = 0; i < chromWidth; i += 2){ + uint64_t k, l; + k = uc[0] + (yc[0] << 8) + + (vc[0] << 16) + (yc[1] << 24); + l = uc[1] + (yc[2] << 8) + + (vc[1] << 16) + (yc[3] << 24); + *ldst++ = k + (l << 32); + yc += 4; + uc += 2; + vc += 2; + } + +#else + int i, *idst = (int32_t *) dst; + const uint8_t *yc = ysrc, *uc = usrc, *vc = vsrc; + for (i = 0; i < chromWidth; i++){ +#ifdef WORDS_BIGENDIAN + *idst++ = (uc[0] << 24)+ (yc[0] << 16) + + (vc[0] << 8) + (yc[1] << 0); +#else + *idst++ = uc[0] + (yc[0] << 8) + + (vc[0] << 16) + (yc[1] << 24); +#endif + yc += 2; + uc++; + vc++; + } +#endif +#endif + if ((y&(vertLumPerChroma-1)) == vertLumPerChroma-1) + { + usrc += chromStride; + vsrc += chromStride; + } + ysrc += lumStride; + dst += dstStride; + } +#if HAVE_MMX +__asm__( EMMS" \n\t" + SFENCE" \n\t" + :::"memory"); +#endif +} + +/** + * Height should be a multiple of 2 and width should be a multiple of 16 + * (If this is a problem for anyone then tell me, and I will fix it.) + */ +static inline void RENAME(yv12touyvy)(const uint8_t *ysrc, const uint8_t *usrc, const uint8_t *vsrc, uint8_t *dst, + long width, long height, + long lumStride, long chromStride, long dstStride) +{ + //FIXME interpolate chroma + RENAME(yuvPlanartouyvy)(ysrc, usrc, vsrc, dst, width, height, lumStride, chromStride, dstStride, 2); +} + +/** + * Width should be a multiple of 16. + */ +static inline void RENAME(yuv422ptouyvy)(const uint8_t *ysrc, const uint8_t *usrc, const uint8_t *vsrc, uint8_t *dst, + long width, long height, + long lumStride, long chromStride, long dstStride) +{ + RENAME(yuvPlanartouyvy)(ysrc, usrc, vsrc, dst, width, height, lumStride, chromStride, dstStride, 1); +} + +/** + * Width should be a multiple of 16. + */ +static inline void RENAME(yuv422ptoyuy2)(const uint8_t *ysrc, const uint8_t *usrc, const uint8_t *vsrc, uint8_t *dst, + long width, long height, + long lumStride, long chromStride, long dstStride) +{ + RENAME(yuvPlanartoyuy2)(ysrc, usrc, vsrc, dst, width, height, lumStride, chromStride, dstStride, 1); +} + +/** + * Height should be a multiple of 2 and width should be a multiple of 16. + * (If this is a problem for anyone then tell me, and I will fix it.) + */ +static inline void RENAME(yuy2toyv12)(const uint8_t *src, uint8_t *ydst, uint8_t *udst, uint8_t *vdst, + long width, long height, + long lumStride, long chromStride, long srcStride) +{ + long y; + const long chromWidth= width>>1; + for (y=0; y<height; y+=2) + { +#if HAVE_MMX + __asm__ volatile( + "xor %%"REG_a", %%"REG_a" \n\t" + "pcmpeqw %%mm7, %%mm7 \n\t" + "psrlw $8, %%mm7 \n\t" // FF,00,FF,00... + ASMALIGN(4) + "1: \n\t" + PREFETCH" 64(%0, %%"REG_a", 4) \n\t" + "movq (%0, %%"REG_a", 4), %%mm0 \n\t" // YUYV YUYV(0) + "movq 8(%0, %%"REG_a", 4), %%mm1 \n\t" // YUYV YUYV(4) + "movq %%mm0, %%mm2 \n\t" // YUYV YUYV(0) + "movq %%mm1, %%mm3 \n\t" // YUYV YUYV(4) + "psrlw $8, %%mm0 \n\t" // U0V0 U0V0(0) + "psrlw $8, %%mm1 \n\t" // U0V0 U0V0(4) + "pand %%mm7, %%mm2 \n\t" // Y0Y0 Y0Y0(0) + "pand %%mm7, %%mm3 \n\t" // Y0Y0 Y0Y0(4) + "packuswb %%mm1, %%mm0 \n\t" // UVUV UVUV(0) + "packuswb %%mm3, %%mm2 \n\t" // YYYY YYYY(0) + + MOVNTQ" %%mm2, (%1, %%"REG_a", 2) \n\t" + + "movq 16(%0, %%"REG_a", 4), %%mm1 \n\t" // YUYV YUYV(8) + "movq 24(%0, %%"REG_a", 4), %%mm2 \n\t" // YUYV YUYV(12) + "movq %%mm1, %%mm3 \n\t" // YUYV YUYV(8) + "movq %%mm2, %%mm4 \n\t" // YUYV YUYV(12) + "psrlw $8, %%mm1 \n\t" // U0V0 U0V0(8) + "psrlw $8, %%mm2 \n\t" // U0V0 U0V0(12) + "pand %%mm7, %%mm3 \n\t" // Y0Y0 Y0Y0(8) + "pand %%mm7, %%mm4 \n\t" // Y0Y0 Y0Y0(12) + "packuswb %%mm2, %%mm1 \n\t" // UVUV UVUV(8) + "packuswb %%mm4, %%mm3 \n\t" // YYYY YYYY(8) + + MOVNTQ" %%mm3, 8(%1, %%"REG_a", 2) \n\t" + + "movq %%mm0, %%mm2 \n\t" // UVUV UVUV(0) + "movq %%mm1, %%mm3 \n\t" // UVUV UVUV(8) + "psrlw $8, %%mm0 \n\t" // V0V0 V0V0(0) + "psrlw $8, %%mm1 \n\t" // V0V0 V0V0(8) + "pand %%mm7, %%mm2 \n\t" // U0U0 U0U0(0) + "pand %%mm7, %%mm3 \n\t" // U0U0 U0U0(8) + "packuswb %%mm1, %%mm0 \n\t" // VVVV VVVV(0) + "packuswb %%mm3, %%mm2 \n\t" // UUUU UUUU(0) + + MOVNTQ" %%mm0, (%3, %%"REG_a") \n\t" + MOVNTQ" %%mm2, (%2, %%"REG_a") \n\t" + + "add $8, %%"REG_a" \n\t" + "cmp %4, %%"REG_a" \n\t" + " jb 1b \n\t" + ::"r"(src), "r"(ydst), "r"(udst), "r"(vdst), "g" (chromWidth) + : "memory", "%"REG_a + ); + + ydst += lumStride; + src += srcStride; + + __asm__ volatile( + "xor %%"REG_a", %%"REG_a" \n\t" + ASMALIGN(4) + "1: \n\t" + PREFETCH" 64(%0, %%"REG_a", 4) \n\t" + "movq (%0, %%"REG_a", 4), %%mm0 \n\t" // YUYV YUYV(0) + "movq 8(%0, %%"REG_a", 4), %%mm1 \n\t" // YUYV YUYV(4) + "movq 16(%0, %%"REG_a", 4), %%mm2 \n\t" // YUYV YUYV(8) + "movq 24(%0, %%"REG_a", 4), %%mm3 \n\t" // YUYV YUYV(12) + "pand %%mm7, %%mm0 \n\t" // Y0Y0 Y0Y0(0) + "pand %%mm7, %%mm1 \n\t" // Y0Y0 Y0Y0(4) + "pand %%mm7, %%mm2 \n\t" // Y0Y0 Y0Y0(8) + "pand %%mm7, %%mm3 \n\t" // Y0Y0 Y0Y0(12) + "packuswb %%mm1, %%mm0 \n\t" // YYYY YYYY(0) + "packuswb %%mm3, %%mm2 \n\t" // YYYY YYYY(8) + + MOVNTQ" %%mm0, (%1, %%"REG_a", 2) \n\t" + MOVNTQ" %%mm2, 8(%1, %%"REG_a", 2) \n\t" + + "add $8, %%"REG_a" \n\t" + "cmp %4, %%"REG_a" \n\t" + " jb 1b \n\t" + + ::"r"(src), "r"(ydst), "r"(udst), "r"(vdst), "g" (chromWidth) + : "memory", "%"REG_a + ); +#else + long i; + for (i=0; i<chromWidth; i++) + { + ydst[2*i+0] = src[4*i+0]; + udst[i] = src[4*i+1]; + ydst[2*i+1] = src[4*i+2]; + vdst[i] = src[4*i+3]; + } + ydst += lumStride; + src += srcStride; + + for (i=0; i<chromWidth; i++) + { + ydst[2*i+0] = src[4*i+0]; + ydst[2*i+1] = src[4*i+2]; + } +#endif + udst += chromStride; + vdst += chromStride; + ydst += lumStride; + src += srcStride; + } +#if HAVE_MMX +__asm__ volatile( EMMS" \n\t" + SFENCE" \n\t" + :::"memory"); +#endif +} + +static inline void RENAME(yvu9toyv12)(const uint8_t *ysrc, const uint8_t *usrc, const uint8_t *vsrc, + uint8_t *ydst, uint8_t *udst, uint8_t *vdst, + long width, long height, long lumStride, long chromStride) +{ + /* Y Plane */ + memcpy(ydst, ysrc, width*height); + + /* XXX: implement upscaling for U,V */ +} + +static inline void RENAME(planar2x)(const uint8_t *src, uint8_t *dst, long srcWidth, long srcHeight, long srcStride, long dstStride) +{ + long x,y; + + dst[0]= src[0]; + + // first line + for (x=0; x<srcWidth-1; x++){ + dst[2*x+1]= (3*src[x] + src[x+1])>>2; + dst[2*x+2]= ( src[x] + 3*src[x+1])>>2; + } + dst[2*srcWidth-1]= src[srcWidth-1]; + + dst+= dstStride; + + for (y=1; y<srcHeight; y++){ +#if HAVE_MMX2 || HAVE_AMD3DNOW + const long mmxSize= srcWidth&~15; + __asm__ volatile( + "mov %4, %%"REG_a" \n\t" + "1: \n\t" + "movq (%0, %%"REG_a"), %%mm0 \n\t" + "movq (%1, %%"REG_a"), %%mm1 \n\t" + "movq 1(%0, %%"REG_a"), %%mm2 \n\t" + "movq 1(%1, %%"REG_a"), %%mm3 \n\t" + "movq -1(%0, %%"REG_a"), %%mm4 \n\t" + "movq -1(%1, %%"REG_a"), %%mm5 \n\t" + PAVGB" %%mm0, %%mm5 \n\t" + PAVGB" %%mm0, %%mm3 \n\t" + PAVGB" %%mm0, %%mm5 \n\t" + PAVGB" %%mm0, %%mm3 \n\t" + PAVGB" %%mm1, %%mm4 \n\t" + PAVGB" %%mm1, %%mm2 \n\t" + PAVGB" %%mm1, %%mm4 \n\t" + PAVGB" %%mm1, %%mm2 \n\t" + "movq %%mm5, %%mm7 \n\t" + "movq %%mm4, %%mm6 \n\t" + "punpcklbw %%mm3, %%mm5 \n\t" + "punpckhbw %%mm3, %%mm7 \n\t" + "punpcklbw %%mm2, %%mm4 \n\t" + "punpckhbw %%mm2, %%mm6 \n\t" +#if 1 + MOVNTQ" %%mm5, (%2, %%"REG_a", 2) \n\t" + MOVNTQ" %%mm7, 8(%2, %%"REG_a", 2) \n\t" + MOVNTQ" %%mm4, (%3, %%"REG_a", 2) \n\t" + MOVNTQ" %%mm6, 8(%3, %%"REG_a", 2) \n\t" +#else + "movq %%mm5, (%2, %%"REG_a", 2) \n\t" + "movq %%mm7, 8(%2, %%"REG_a", 2) \n\t" + "movq %%mm4, (%3, %%"REG_a", 2) \n\t" + "movq %%mm6, 8(%3, %%"REG_a", 2) \n\t" +#endif + "add $8, %%"REG_a" \n\t" + " js 1b \n\t" + :: "r" (src + mmxSize ), "r" (src + srcStride + mmxSize ), + "r" (dst + mmxSize*2), "r" (dst + dstStride + mmxSize*2), + "g" (-mmxSize) + : "%"REG_a + + ); +#else + const long mmxSize=1; +#endif + dst[0 ]= (3*src[0] + src[srcStride])>>2; + dst[dstStride]= ( src[0] + 3*src[srcStride])>>2; + + for (x=mmxSize-1; x<srcWidth-1; x++){ + dst[2*x +1]= (3*src[x+0] + src[x+srcStride+1])>>2; + dst[2*x+dstStride+2]= ( src[x+0] + 3*src[x+srcStride+1])>>2; + dst[2*x+dstStride+1]= ( src[x+1] + 3*src[x+srcStride ])>>2; + dst[2*x +2]= (3*src[x+1] + src[x+srcStride ])>>2; + } + dst[srcWidth*2 -1 ]= (3*src[srcWidth-1] + src[srcWidth-1 + srcStride])>>2; + dst[srcWidth*2 -1 + dstStride]= ( src[srcWidth-1] + 3*src[srcWidth-1 + srcStride])>>2; + + dst+=dstStride*2; + src+=srcStride; + } + + // last line +#if 1 + dst[0]= src[0]; + + for (x=0; x<srcWidth-1; x++){ + dst[2*x+1]= (3*src[x] + src[x+1])>>2; + dst[2*x+2]= ( src[x] + 3*src[x+1])>>2; + } + dst[2*srcWidth-1]= src[srcWidth-1]; +#else + for (x=0; x<srcWidth; x++){ + dst[2*x+0]= + dst[2*x+1]= src[x]; + } +#endif + +#if HAVE_MMX +__asm__ volatile( EMMS" \n\t" + SFENCE" \n\t" + :::"memory"); +#endif +} + +/** + * Height should be a multiple of 2 and width should be a multiple of 16. + * (If this is a problem for anyone then tell me, and I will fix it.) + * Chrominance data is only taken from every second line, others are ignored. + * FIXME: Write HQ version. + */ +static inline void RENAME(uyvytoyv12)(const uint8_t *src, uint8_t *ydst, uint8_t *udst, uint8_t *vdst, + long width, long height, + long lumStride, long chromStride, long srcStride) +{ + long y; + const long chromWidth= width>>1; + for (y=0; y<height; y+=2) + { +#if HAVE_MMX + __asm__ volatile( + "xor %%"REG_a", %%"REG_a" \n\t" + "pcmpeqw %%mm7, %%mm7 \n\t" + "psrlw $8, %%mm7 \n\t" // FF,00,FF,00... + ASMALIGN(4) + "1: \n\t" + PREFETCH" 64(%0, %%"REG_a", 4) \n\t" + "movq (%0, %%"REG_a", 4), %%mm0 \n\t" // UYVY UYVY(0) + "movq 8(%0, %%"REG_a", 4), %%mm1 \n\t" // UYVY UYVY(4) + "movq %%mm0, %%mm2 \n\t" // UYVY UYVY(0) + "movq %%mm1, %%mm3 \n\t" // UYVY UYVY(4) + "pand %%mm7, %%mm0 \n\t" // U0V0 U0V0(0) + "pand %%mm7, %%mm1 \n\t" // U0V0 U0V0(4) + "psrlw $8, %%mm2 \n\t" // Y0Y0 Y0Y0(0) + "psrlw $8, %%mm3 \n\t" // Y0Y0 Y0Y0(4) + "packuswb %%mm1, %%mm0 \n\t" // UVUV UVUV(0) + "packuswb %%mm3, %%mm2 \n\t" // YYYY YYYY(0) + + MOVNTQ" %%mm2, (%1, %%"REG_a", 2) \n\t" + + "movq 16(%0, %%"REG_a", 4), %%mm1 \n\t" // UYVY UYVY(8) + "movq 24(%0, %%"REG_a", 4), %%mm2 \n\t" // UYVY UYVY(12) + "movq %%mm1, %%mm3 \n\t" // UYVY UYVY(8) + "movq %%mm2, %%mm4 \n\t" // UYVY UYVY(12) + "pand %%mm7, %%mm1 \n\t" // U0V0 U0V0(8) + "pand %%mm7, %%mm2 \n\t" // U0V0 U0V0(12) + "psrlw $8, %%mm3 \n\t" // Y0Y0 Y0Y0(8) + "psrlw $8, %%mm4 \n\t" // Y0Y0 Y0Y0(12) + "packuswb %%mm2, %%mm1 \n\t" // UVUV UVUV(8) + "packuswb %%mm4, %%mm3 \n\t" // YYYY YYYY(8) + + MOVNTQ" %%mm3, 8(%1, %%"REG_a", 2) \n\t" + + "movq %%mm0, %%mm2 \n\t" // UVUV UVUV(0) + "movq %%mm1, %%mm3 \n\t" // UVUV UVUV(8) + "psrlw $8, %%mm0 \n\t" // V0V0 V0V0(0) + "psrlw $8, %%mm1 \n\t" // V0V0 V0V0(8) + "pand %%mm7, %%mm2 \n\t" // U0U0 U0U0(0) + "pand %%mm7, %%mm3 \n\t" // U0U0 U0U0(8) + "packuswb %%mm1, %%mm0 \n\t" // VVVV VVVV(0) + "packuswb %%mm3, %%mm2 \n\t" // UUUU UUUU(0) + + MOVNTQ" %%mm0, (%3, %%"REG_a") \n\t" + MOVNTQ" %%mm2, (%2, %%"REG_a") \n\t" + + "add $8, %%"REG_a" \n\t" + "cmp %4, %%"REG_a" \n\t" + " jb 1b \n\t" + ::"r"(src), "r"(ydst), "r"(udst), "r"(vdst), "g" (chromWidth) + : "memory", "%"REG_a + ); + + ydst += lumStride; + src += srcStride; + + __asm__ volatile( + "xor %%"REG_a", %%"REG_a" \n\t" + ASMALIGN(4) + "1: \n\t" + PREFETCH" 64(%0, %%"REG_a", 4) \n\t" + "movq (%0, %%"REG_a", 4), %%mm0 \n\t" // YUYV YUYV(0) + "movq 8(%0, %%"REG_a", 4), %%mm1 \n\t" // YUYV YUYV(4) + "movq 16(%0, %%"REG_a", 4), %%mm2 \n\t" // YUYV YUYV(8) + "movq 24(%0, %%"REG_a", 4), %%mm3 \n\t" // YUYV YUYV(12) + "psrlw $8, %%mm0 \n\t" // Y0Y0 Y0Y0(0) + "psrlw $8, %%mm1 \n\t" // Y0Y0 Y0Y0(4) + "psrlw $8, %%mm2 \n\t" // Y0Y0 Y0Y0(8) + "psrlw $8, %%mm3 \n\t" // Y0Y0 Y0Y0(12) + "packuswb %%mm1, %%mm0 \n\t" // YYYY YYYY(0) + "packuswb %%mm3, %%mm2 \n\t" // YYYY YYYY(8) + + MOVNTQ" %%mm0, (%1, %%"REG_a", 2) \n\t" + MOVNTQ" %%mm2, 8(%1, %%"REG_a", 2) \n\t" + + "add $8, %%"REG_a" \n\t" + "cmp %4, %%"REG_a" \n\t" + " jb 1b \n\t" + + ::"r"(src), "r"(ydst), "r"(udst), "r"(vdst), "g" (chromWidth) + : "memory", "%"REG_a + ); +#else + long i; + for (i=0; i<chromWidth; i++) + { + udst[i] = src[4*i+0]; + ydst[2*i+0] = src[4*i+1]; + vdst[i] = src[4*i+2]; + ydst[2*i+1] = src[4*i+3]; + } + ydst += lumStride; + src += srcStride; + + for (i=0; i<chromWidth; i++) + { + ydst[2*i+0] = src[4*i+1]; + ydst[2*i+1] = src[4*i+3]; + } +#endif + udst += chromStride; + vdst += chromStride; + ydst += lumStride; + src += srcStride; + } +#if HAVE_MMX +__asm__ volatile( EMMS" \n\t" + SFENCE" \n\t" + :::"memory"); +#endif +} + +/** + * Height should be a multiple of 2 and width should be a multiple of 2. + * (If this is a problem for anyone then tell me, and I will fix it.) + * Chrominance data is only taken from every second line, + * others are ignored in the C version. + * FIXME: Write HQ version. + */ +static inline void RENAME(rgb24toyv12)(const uint8_t *src, uint8_t *ydst, uint8_t *udst, uint8_t *vdst, + long width, long height, + long lumStride, long chromStride, long srcStride) +{ + long y; + const long chromWidth= width>>1; +#if HAVE_MMX + for (y=0; y<height-2; y+=2) + { + long i; + for (i=0; i<2; i++) + { + __asm__ volatile( + "mov %2, %%"REG_a" \n\t" + "movq "MANGLE(ff_bgr2YCoeff)", %%mm6 \n\t" + "movq "MANGLE(ff_w1111)", %%mm5 \n\t" + "pxor %%mm7, %%mm7 \n\t" + "lea (%%"REG_a", %%"REG_a", 2), %%"REG_d" \n\t" + ASMALIGN(4) + "1: \n\t" + PREFETCH" 64(%0, %%"REG_d") \n\t" + "movd (%0, %%"REG_d"), %%mm0 \n\t" + "movd 3(%0, %%"REG_d"), %%mm1 \n\t" + "punpcklbw %%mm7, %%mm0 \n\t" + "punpcklbw %%mm7, %%mm1 \n\t" + "movd 6(%0, %%"REG_d"), %%mm2 \n\t" + "movd 9(%0, %%"REG_d"), %%mm3 \n\t" + "punpcklbw %%mm7, %%mm2 \n\t" + "punpcklbw %%mm7, %%mm3 \n\t" + "pmaddwd %%mm6, %%mm0 \n\t" + "pmaddwd %%mm6, %%mm1 \n\t" + "pmaddwd %%mm6, %%mm2 \n\t" + "pmaddwd %%mm6, %%mm3 \n\t" +#ifndef FAST_BGR2YV12 + "psrad $8, %%mm0 \n\t" + "psrad $8, %%mm1 \n\t" + "psrad $8, %%mm2 \n\t" + "psrad $8, %%mm3 \n\t" +#endif + "packssdw %%mm1, %%mm0 \n\t" + "packssdw %%mm3, %%mm2 \n\t" + "pmaddwd %%mm5, %%mm0 \n\t" + "pmaddwd %%mm5, %%mm2 \n\t" + "packssdw %%mm2, %%mm0 \n\t" + "psraw $7, %%mm0 \n\t" + + "movd 12(%0, %%"REG_d"), %%mm4 \n\t" + "movd 15(%0, %%"REG_d"), %%mm1 \n\t" + "punpcklbw %%mm7, %%mm4 \n\t" + "punpcklbw %%mm7, %%mm1 \n\t" + "movd 18(%0, %%"REG_d"), %%mm2 \n\t" + "movd 21(%0, %%"REG_d"), %%mm3 \n\t" + "punpcklbw %%mm7, %%mm2 \n\t" + "punpcklbw %%mm7, %%mm3 \n\t" + "pmaddwd %%mm6, %%mm4 \n\t" + "pmaddwd %%mm6, %%mm1 \n\t" + "pmaddwd %%mm6, %%mm2 \n\t" + "pmaddwd %%mm6, %%mm3 \n\t" +#ifndef FAST_BGR2YV12 + "psrad $8, %%mm4 \n\t" + "psrad $8, %%mm1 \n\t" + "psrad $8, %%mm2 \n\t" + "psrad $8, %%mm3 \n\t" +#endif + "packssdw %%mm1, %%mm4 \n\t" + "packssdw %%mm3, %%mm2 \n\t" + "pmaddwd %%mm5, %%mm4 \n\t" + "pmaddwd %%mm5, %%mm2 \n\t" + "add $24, %%"REG_d" \n\t" + "packssdw %%mm2, %%mm4 \n\t" + "psraw $7, %%mm4 \n\t" + + "packuswb %%mm4, %%mm0 \n\t" + "paddusb "MANGLE(ff_bgr2YOffset)", %%mm0 \n\t" + + MOVNTQ" %%mm0, (%1, %%"REG_a") \n\t" + "add $8, %%"REG_a" \n\t" + " js 1b \n\t" + : : "r" (src+width*3), "r" (ydst+width), "g" (-width) + : "%"REG_a, "%"REG_d + ); + ydst += lumStride; + src += srcStride; + } + src -= srcStride*2; + __asm__ volatile( + "mov %4, %%"REG_a" \n\t" + "movq "MANGLE(ff_w1111)", %%mm5 \n\t" + "movq "MANGLE(ff_bgr2UCoeff)", %%mm6 \n\t" + "pxor %%mm7, %%mm7 \n\t" + "lea (%%"REG_a", %%"REG_a", 2), %%"REG_d" \n\t" + "add %%"REG_d", %%"REG_d" \n\t" + ASMALIGN(4) + "1: \n\t" + PREFETCH" 64(%0, %%"REG_d") \n\t" + PREFETCH" 64(%1, %%"REG_d") \n\t" +#if HAVE_MMX2 || HAVE_AMD3DNOW + "movq (%0, %%"REG_d"), %%mm0 \n\t" + "movq (%1, %%"REG_d"), %%mm1 \n\t" + "movq 6(%0, %%"REG_d"), %%mm2 \n\t" + "movq 6(%1, %%"REG_d"), %%mm3 \n\t" + PAVGB" %%mm1, %%mm0 \n\t" + PAVGB" %%mm3, %%mm2 \n\t" + "movq %%mm0, %%mm1 \n\t" + "movq %%mm2, %%mm3 \n\t" + "psrlq $24, %%mm0 \n\t" + "psrlq $24, %%mm2 \n\t" + PAVGB" %%mm1, %%mm0 \n\t" + PAVGB" %%mm3, %%mm2 \n\t" + "punpcklbw %%mm7, %%mm0 \n\t" + "punpcklbw %%mm7, %%mm2 \n\t" +#else + "movd (%0, %%"REG_d"), %%mm0 \n\t" + "movd (%1, %%"REG_d"), %%mm1 \n\t" + "movd 3(%0, %%"REG_d"), %%mm2 \n\t" + "movd 3(%1, %%"REG_d"), %%mm3 \n\t" + "punpcklbw %%mm7, %%mm0 \n\t" + "punpcklbw %%mm7, %%mm1 \n\t" + "punpcklbw %%mm7, %%mm2 \n\t" + "punpcklbw %%mm7, %%mm3 \n\t" + "paddw %%mm1, %%mm0 \n\t" + "paddw %%mm3, %%mm2 \n\t" + "paddw %%mm2, %%mm0 \n\t" + "movd 6(%0, %%"REG_d"), %%mm4 \n\t" + "movd 6(%1, %%"REG_d"), %%mm1 \n\t" + "movd 9(%0, %%"REG_d"), %%mm2 \n\t" + "movd 9(%1, %%"REG_d"), %%mm3 \n\t" + "punpcklbw %%mm7, %%mm4 \n\t" + "punpcklbw %%mm7, %%mm1 \n\t" + "punpcklbw %%mm7, %%mm2 \n\t" + "punpcklbw %%mm7, %%mm3 \n\t" + "paddw %%mm1, %%mm4 \n\t" + "paddw %%mm3, %%mm2 \n\t" + "paddw %%mm4, %%mm2 \n\t" + "psrlw $2, %%mm0 \n\t" + "psrlw $2, %%mm2 \n\t" +#endif + "movq "MANGLE(ff_bgr2VCoeff)", %%mm1 \n\t" + "movq "MANGLE(ff_bgr2VCoeff)", %%mm3 \n\t" + + "pmaddwd %%mm0, %%mm1 \n\t" + "pmaddwd %%mm2, %%mm3 \n\t" + "pmaddwd %%mm6, %%mm0 \n\t" + "pmaddwd %%mm6, %%mm2 \n\t" +#ifndef FAST_BGR2YV12 + "psrad $8, %%mm0 \n\t" + "psrad $8, %%mm1 \n\t" + "psrad $8, %%mm2 \n\t" + "psrad $8, %%mm3 \n\t" +#endif + "packssdw %%mm2, %%mm0 \n\t" + "packssdw %%mm3, %%mm1 \n\t" + "pmaddwd %%mm5, %%mm0 \n\t" + "pmaddwd %%mm5, %%mm1 \n\t" + "packssdw %%mm1, %%mm0 \n\t" // V1 V0 U1 U0 + "psraw $7, %%mm0 \n\t" + +#if HAVE_MMX2 || HAVE_AMD3DNOW + "movq 12(%0, %%"REG_d"), %%mm4 \n\t" + "movq 12(%1, %%"REG_d"), %%mm1 \n\t" + "movq 18(%0, %%"REG_d"), %%mm2 \n\t" + "movq 18(%1, %%"REG_d"), %%mm3 \n\t" + PAVGB" %%mm1, %%mm4 \n\t" + PAVGB" %%mm3, %%mm2 \n\t" + "movq %%mm4, %%mm1 \n\t" + "movq %%mm2, %%mm3 \n\t" + "psrlq $24, %%mm4 \n\t" + "psrlq $24, %%mm2 \n\t" + PAVGB" %%mm1, %%mm4 \n\t" + PAVGB" %%mm3, %%mm2 \n\t" + "punpcklbw %%mm7, %%mm4 \n\t" + "punpcklbw %%mm7, %%mm2 \n\t" +#else + "movd 12(%0, %%"REG_d"), %%mm4 \n\t" + "movd 12(%1, %%"REG_d"), %%mm1 \n\t" + "movd 15(%0, %%"REG_d"), %%mm2 \n\t" + "movd 15(%1, %%"REG_d"), %%mm3 \n\t" + "punpcklbw %%mm7, %%mm4 \n\t" + "punpcklbw %%mm7, %%mm1 \n\t" + "punpcklbw %%mm7, %%mm2 \n\t" + "punpcklbw %%mm7, %%mm3 \n\t" + "paddw %%mm1, %%mm4 \n\t" + "paddw %%mm3, %%mm2 \n\t" + "paddw %%mm2, %%mm4 \n\t" + "movd 18(%0, %%"REG_d"), %%mm5 \n\t" + "movd 18(%1, %%"REG_d"), %%mm1 \n\t" + "movd 21(%0, %%"REG_d"), %%mm2 \n\t" + "movd 21(%1, %%"REG_d"), %%mm3 \n\t" + "punpcklbw %%mm7, %%mm5 \n\t" + "punpcklbw %%mm7, %%mm1 \n\t" + "punpcklbw %%mm7, %%mm2 \n\t" + "punpcklbw %%mm7, %%mm3 \n\t" + "paddw %%mm1, %%mm5 \n\t" + "paddw %%mm3, %%mm2 \n\t" + "paddw %%mm5, %%mm2 \n\t" + "movq "MANGLE(ff_w1111)", %%mm5 \n\t" + "psrlw $2, %%mm4 \n\t" + "psrlw $2, %%mm2 \n\t" +#endif + "movq "MANGLE(ff_bgr2VCoeff)", %%mm1 \n\t" + "movq "MANGLE(ff_bgr2VCoeff)", %%mm3 \n\t" + + "pmaddwd %%mm4, %%mm1 \n\t" + "pmaddwd %%mm2, %%mm3 \n\t" + "pmaddwd %%mm6, %%mm4 \n\t" + "pmaddwd %%mm6, %%mm2 \n\t" +#ifndef FAST_BGR2YV12 + "psrad $8, %%mm4 \n\t" + "psrad $8, %%mm1 \n\t" + "psrad $8, %%mm2 \n\t" + "psrad $8, %%mm3 \n\t" +#endif + "packssdw %%mm2, %%mm4 \n\t" + "packssdw %%mm3, %%mm1 \n\t" + "pmaddwd %%mm5, %%mm4 \n\t" + "pmaddwd %%mm5, %%mm1 \n\t" + "add $24, %%"REG_d" \n\t" + "packssdw %%mm1, %%mm4 \n\t" // V3 V2 U3 U2 + "psraw $7, %%mm4 \n\t" + + "movq %%mm0, %%mm1 \n\t" + "punpckldq %%mm4, %%mm0 \n\t" + "punpckhdq %%mm4, %%mm1 \n\t" + "packsswb %%mm1, %%mm0 \n\t" + "paddb "MANGLE(ff_bgr2UVOffset)", %%mm0 \n\t" + "movd %%mm0, (%2, %%"REG_a") \n\t" + "punpckhdq %%mm0, %%mm0 \n\t" + "movd %%mm0, (%3, %%"REG_a") \n\t" + "add $4, %%"REG_a" \n\t" + " js 1b \n\t" + : : "r" (src+chromWidth*6), "r" (src+srcStride+chromWidth*6), "r" (udst+chromWidth), "r" (vdst+chromWidth), "g" (-chromWidth) + : "%"REG_a, "%"REG_d + ); + + udst += chromStride; + vdst += chromStride; + src += srcStride*2; + } + + __asm__ volatile( EMMS" \n\t" + SFENCE" \n\t" + :::"memory"); +#else + y=0; +#endif + for (; y<height; y+=2) + { + long i; + for (i=0; i<chromWidth; i++) + { + unsigned int b = src[6*i+0]; + unsigned int g = src[6*i+1]; + unsigned int r = src[6*i+2]; + + unsigned int Y = ((RY*r + GY*g + BY*b)>>RGB2YUV_SHIFT) + 16; + unsigned int V = ((RV*r + GV*g + BV*b)>>RGB2YUV_SHIFT) + 128; + unsigned int U = ((RU*r + GU*g + BU*b)>>RGB2YUV_SHIFT) + 128; + + udst[i] = U; + vdst[i] = V; + ydst[2*i] = Y; + + b = src[6*i+3]; + g = src[6*i+4]; + r = src[6*i+5]; + + Y = ((RY*r + GY*g + BY*b)>>RGB2YUV_SHIFT) + 16; + ydst[2*i+1] = Y; + } + ydst += lumStride; + src += srcStride; + + for (i=0; i<chromWidth; i++) + { + unsigned int b = src[6*i+0]; + unsigned int g = src[6*i+1]; + unsigned int r = src[6*i+2]; + + unsigned int Y = ((RY*r + GY*g + BY*b)>>RGB2YUV_SHIFT) + 16; + + ydst[2*i] = Y; + + b = src[6*i+3]; + g = src[6*i+4]; + r = src[6*i+5]; + + Y = ((RY*r + GY*g + BY*b)>>RGB2YUV_SHIFT) + 16; + ydst[2*i+1] = Y; + } + udst += chromStride; + vdst += chromStride; + ydst += lumStride; + src += srcStride; + } +} + +static void RENAME(interleaveBytes)(uint8_t *src1, uint8_t *src2, uint8_t *dest, + long width, long height, long src1Stride, + long src2Stride, long dstStride){ + long h; + + for (h=0; h < height; h++) + { + long w; + +#if HAVE_MMX +#if HAVE_SSE2 + __asm__( + "xor %%"REG_a", %%"REG_a" \n\t" + "1: \n\t" + PREFETCH" 64(%1, %%"REG_a") \n\t" + PREFETCH" 64(%2, %%"REG_a") \n\t" + "movdqa (%1, %%"REG_a"), %%xmm0 \n\t" + "movdqa (%1, %%"REG_a"), %%xmm1 \n\t" + "movdqa (%2, %%"REG_a"), %%xmm2 \n\t" + "punpcklbw %%xmm2, %%xmm0 \n\t" + "punpckhbw %%xmm2, %%xmm1 \n\t" + "movntdq %%xmm0, (%0, %%"REG_a", 2) \n\t" + "movntdq %%xmm1, 16(%0, %%"REG_a", 2) \n\t" + "add $16, %%"REG_a" \n\t" + "cmp %3, %%"REG_a" \n\t" + " jb 1b \n\t" + ::"r"(dest), "r"(src1), "r"(src2), "r" (width-15) + : "memory", "%"REG_a"" + ); +#else + __asm__( + "xor %%"REG_a", %%"REG_a" \n\t" + "1: \n\t" + PREFETCH" 64(%1, %%"REG_a") \n\t" + PREFETCH" 64(%2, %%"REG_a") \n\t" + "movq (%1, %%"REG_a"), %%mm0 \n\t" + "movq 8(%1, %%"REG_a"), %%mm2 \n\t" + "movq %%mm0, %%mm1 \n\t" + "movq %%mm2, %%mm3 \n\t" + "movq (%2, %%"REG_a"), %%mm4 \n\t" + "movq 8(%2, %%"REG_a"), %%mm5 \n\t" + "punpcklbw %%mm4, %%mm0 \n\t" + "punpckhbw %%mm4, %%mm1 \n\t" + "punpcklbw %%mm5, %%mm2 \n\t" + "punpckhbw %%mm5, %%mm3 \n\t" + MOVNTQ" %%mm0, (%0, %%"REG_a", 2) \n\t" + MOVNTQ" %%mm1, 8(%0, %%"REG_a", 2) \n\t" + MOVNTQ" %%mm2, 16(%0, %%"REG_a", 2) \n\t" + MOVNTQ" %%mm3, 24(%0, %%"REG_a", 2) \n\t" + "add $16, %%"REG_a" \n\t" + "cmp %3, %%"REG_a" \n\t" + " jb 1b \n\t" + ::"r"(dest), "r"(src1), "r"(src2), "r" (width-15) + : "memory", "%"REG_a + ); +#endif + for (w= (width&(~15)); w < width; w++) + { + dest[2*w+0] = src1[w]; + dest[2*w+1] = src2[w]; + } +#else + for (w=0; w < width; w++) + { + dest[2*w+0] = src1[w]; + dest[2*w+1] = src2[w]; + } +#endif + dest += dstStride; + src1 += src1Stride; + src2 += src2Stride; + } +#if HAVE_MMX + __asm__( + EMMS" \n\t" + SFENCE" \n\t" + ::: "memory" + ); +#endif +} + +static inline void RENAME(vu9_to_vu12)(const uint8_t *src1, const uint8_t *src2, + uint8_t *dst1, uint8_t *dst2, + long width, long height, + long srcStride1, long srcStride2, + long dstStride1, long dstStride2) +{ + long y,x,w,h; + w=width/2; h=height/2; +#if HAVE_MMX + __asm__ volatile( + PREFETCH" %0 \n\t" + PREFETCH" %1 \n\t" + ::"m"(*(src1+srcStride1)),"m"(*(src2+srcStride2)):"memory"); +#endif + for (y=0;y<h;y++){ + const uint8_t* s1=src1+srcStride1*(y>>1); + uint8_t* d=dst1+dstStride1*y; + x=0; +#if HAVE_MMX + for (;x<w-31;x+=32) + { + __asm__ volatile( + PREFETCH" 32%1 \n\t" + "movq %1, %%mm0 \n\t" + "movq 8%1, %%mm2 \n\t" + "movq 16%1, %%mm4 \n\t" + "movq 24%1, %%mm6 \n\t" + "movq %%mm0, %%mm1 \n\t" + "movq %%mm2, %%mm3 \n\t" + "movq %%mm4, %%mm5 \n\t" + "movq %%mm6, %%mm7 \n\t" + "punpcklbw %%mm0, %%mm0 \n\t" + "punpckhbw %%mm1, %%mm1 \n\t" + "punpcklbw %%mm2, %%mm2 \n\t" + "punpckhbw %%mm3, %%mm3 \n\t" + "punpcklbw %%mm4, %%mm4 \n\t" + "punpckhbw %%mm5, %%mm5 \n\t" + "punpcklbw %%mm6, %%mm6 \n\t" + "punpckhbw %%mm7, %%mm7 \n\t" + MOVNTQ" %%mm0, %0 \n\t" + MOVNTQ" %%mm1, 8%0 \n\t" + MOVNTQ" %%mm2, 16%0 \n\t" + MOVNTQ" %%mm3, 24%0 \n\t" + MOVNTQ" %%mm4, 32%0 \n\t" + MOVNTQ" %%mm5, 40%0 \n\t" + MOVNTQ" %%mm6, 48%0 \n\t" + MOVNTQ" %%mm7, 56%0" + :"=m"(d[2*x]) + :"m"(s1[x]) + :"memory"); + } +#endif + for (;x<w;x++) d[2*x]=d[2*x+1]=s1[x]; + } + for (y=0;y<h;y++){ + const uint8_t* s2=src2+srcStride2*(y>>1); + uint8_t* d=dst2+dstStride2*y; + x=0; +#if HAVE_MMX + for (;x<w-31;x+=32) + { + __asm__ volatile( + PREFETCH" 32%1 \n\t" + "movq %1, %%mm0 \n\t" + "movq 8%1, %%mm2 \n\t" + "movq 16%1, %%mm4 \n\t" + "movq 24%1, %%mm6 \n\t" + "movq %%mm0, %%mm1 \n\t" + "movq %%mm2, %%mm3 \n\t" + "movq %%mm4, %%mm5 \n\t" + "movq %%mm6, %%mm7 \n\t" + "punpcklbw %%mm0, %%mm0 \n\t" + "punpckhbw %%mm1, %%mm1 \n\t" + "punpcklbw %%mm2, %%mm2 \n\t" + "punpckhbw %%mm3, %%mm3 \n\t" + "punpcklbw %%mm4, %%mm4 \n\t" + "punpckhbw %%mm5, %%mm5 \n\t" + "punpcklbw %%mm6, %%mm6 \n\t" + "punpckhbw %%mm7, %%mm7 \n\t" + MOVNTQ" %%mm0, %0 \n\t" + MOVNTQ" %%mm1, 8%0 \n\t" + MOVNTQ" %%mm2, 16%0 \n\t" + MOVNTQ" %%mm3, 24%0 \n\t" + MOVNTQ" %%mm4, 32%0 \n\t" + MOVNTQ" %%mm5, 40%0 \n\t" + MOVNTQ" %%mm6, 48%0 \n\t" + MOVNTQ" %%mm7, 56%0" + :"=m"(d[2*x]) + :"m"(s2[x]) + :"memory"); + } +#endif + for (;x<w;x++) d[2*x]=d[2*x+1]=s2[x]; + } +#if HAVE_MMX + __asm__( + EMMS" \n\t" + SFENCE" \n\t" + ::: "memory" + ); +#endif +} + +static inline void RENAME(yvu9_to_yuy2)(const uint8_t *src1, const uint8_t *src2, const uint8_t *src3, + uint8_t *dst, + long width, long height, + long srcStride1, long srcStride2, + long srcStride3, long dstStride) +{ + long y,x,w,h; + w=width/2; h=height; + for (y=0;y<h;y++){ + const uint8_t* yp=src1+srcStride1*y; + const uint8_t* up=src2+srcStride2*(y>>2); + const uint8_t* vp=src3+srcStride3*(y>>2); + uint8_t* d=dst+dstStride*y; + x=0; +#if HAVE_MMX + for (;x<w-7;x+=8) + { + __asm__ volatile( + PREFETCH" 32(%1, %0) \n\t" + PREFETCH" 32(%2, %0) \n\t" + PREFETCH" 32(%3, %0) \n\t" + "movq (%1, %0, 4), %%mm0 \n\t" /* Y0Y1Y2Y3Y4Y5Y6Y7 */ + "movq (%2, %0), %%mm1 \n\t" /* U0U1U2U3U4U5U6U7 */ + "movq (%3, %0), %%mm2 \n\t" /* V0V1V2V3V4V5V6V7 */ + "movq %%mm0, %%mm3 \n\t" /* Y0Y1Y2Y3Y4Y5Y6Y7 */ + "movq %%mm1, %%mm4 \n\t" /* U0U1U2U3U4U5U6U7 */ + "movq %%mm2, %%mm5 \n\t" /* V0V1V2V3V4V5V6V7 */ + "punpcklbw %%mm1, %%mm1 \n\t" /* U0U0 U1U1 U2U2 U3U3 */ + "punpcklbw %%mm2, %%mm2 \n\t" /* V0V0 V1V1 V2V2 V3V3 */ + "punpckhbw %%mm4, %%mm4 \n\t" /* U4U4 U5U5 U6U6 U7U7 */ + "punpckhbw %%mm5, %%mm5 \n\t" /* V4V4 V5V5 V6V6 V7V7 */ + + "movq %%mm1, %%mm6 \n\t" + "punpcklbw %%mm2, %%mm1 \n\t" /* U0V0 U0V0 U1V1 U1V1*/ + "punpcklbw %%mm1, %%mm0 \n\t" /* Y0U0 Y1V0 Y2U0 Y3V0*/ + "punpckhbw %%mm1, %%mm3 \n\t" /* Y4U1 Y5V1 Y6U1 Y7V1*/ + MOVNTQ" %%mm0, (%4, %0, 8) \n\t" + MOVNTQ" %%mm3, 8(%4, %0, 8) \n\t" + + "punpckhbw %%mm2, %%mm6 \n\t" /* U2V2 U2V2 U3V3 U3V3*/ + "movq 8(%1, %0, 4), %%mm0 \n\t" + "movq %%mm0, %%mm3 \n\t" + "punpcklbw %%mm6, %%mm0 \n\t" /* Y U2 Y V2 Y U2 Y V2*/ + "punpckhbw %%mm6, %%mm3 \n\t" /* Y U3 Y V3 Y U3 Y V3*/ + MOVNTQ" %%mm0, 16(%4, %0, 8) \n\t" + MOVNTQ" %%mm3, 24(%4, %0, 8) \n\t" + + "movq %%mm4, %%mm6 \n\t" + "movq 16(%1, %0, 4), %%mm0 \n\t" + "movq %%mm0, %%mm3 \n\t" + "punpcklbw %%mm5, %%mm4 \n\t" + "punpcklbw %%mm4, %%mm0 \n\t" /* Y U4 Y V4 Y U4 Y V4*/ + "punpckhbw %%mm4, %%mm3 \n\t" /* Y U5 Y V5 Y U5 Y V5*/ + MOVNTQ" %%mm0, 32(%4, %0, 8) \n\t" + MOVNTQ" %%mm3, 40(%4, %0, 8) \n\t" + + "punpckhbw %%mm5, %%mm6 \n\t" + "movq 24(%1, %0, 4), %%mm0 \n\t" + "movq %%mm0, %%mm3 \n\t" + "punpcklbw %%mm6, %%mm0 \n\t" /* Y U6 Y V6 Y U6 Y V6*/ + "punpckhbw %%mm6, %%mm3 \n\t" /* Y U7 Y V7 Y U7 Y V7*/ + MOVNTQ" %%mm0, 48(%4, %0, 8) \n\t" + MOVNTQ" %%mm3, 56(%4, %0, 8) \n\t" + + : "+r" (x) + : "r"(yp), "r" (up), "r"(vp), "r"(d) + :"memory"); + } +#endif + for (; x<w; x++) + { + const long x2 = x<<2; + d[8*x+0] = yp[x2]; + d[8*x+1] = up[x]; + d[8*x+2] = yp[x2+1]; + d[8*x+3] = vp[x]; + d[8*x+4] = yp[x2+2]; + d[8*x+5] = up[x]; + d[8*x+6] = yp[x2+3]; + d[8*x+7] = vp[x]; + } + } +#if HAVE_MMX + __asm__( + EMMS" \n\t" + SFENCE" \n\t" + ::: "memory" + ); +#endif +} + +static inline void RENAME(rgb2rgb_init)(void){ + rgb15to16 = RENAME(rgb15to16); + rgb15tobgr24 = RENAME(rgb15tobgr24); + rgb15to32 = RENAME(rgb15to32); + rgb16tobgr24 = RENAME(rgb16tobgr24); + rgb16to32 = RENAME(rgb16to32); + rgb16to15 = RENAME(rgb16to15); + rgb24tobgr16 = RENAME(rgb24tobgr16); + rgb24tobgr15 = RENAME(rgb24tobgr15); + rgb24tobgr32 = RENAME(rgb24tobgr32); + rgb32to16 = RENAME(rgb32to16); + rgb32to15 = RENAME(rgb32to15); + rgb32tobgr24 = RENAME(rgb32tobgr24); + rgb24to15 = RENAME(rgb24to15); + rgb24to16 = RENAME(rgb24to16); + rgb24tobgr24 = RENAME(rgb24tobgr24); + rgb32tobgr32 = RENAME(rgb32tobgr32); + rgb32tobgr16 = RENAME(rgb32tobgr16); + rgb32tobgr15 = RENAME(rgb32tobgr15); + yv12toyuy2 = RENAME(yv12toyuy2); + yv12touyvy = RENAME(yv12touyvy); + yuv422ptoyuy2 = RENAME(yuv422ptoyuy2); + yuv422ptouyvy = RENAME(yuv422ptouyvy); + yuy2toyv12 = RENAME(yuy2toyv12); +// uyvytoyv12 = RENAME(uyvytoyv12); +// yvu9toyv12 = RENAME(yvu9toyv12); + planar2x = RENAME(planar2x); + rgb24toyv12 = RENAME(rgb24toyv12); + interleaveBytes = RENAME(interleaveBytes); + vu9_to_vu12 = RENAME(vu9_to_vu12); + yvu9_to_yuy2 = RENAME(yvu9_to_yuy2); +} diff --git a/libswscale/swscale-example.c b/libswscale/swscale-example.c new file mode 100644 index 0000000000..87b9ba027d --- /dev/null +++ b/libswscale/swscale-example.c @@ -0,0 +1,210 @@ +/* + * Copyright (C) 2003 Michael Niedermayer <michaelni@gmx.at> + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#include <stdio.h> +#include <stdlib.h> +#include <string.h> +#include <inttypes.h> +#include <stdarg.h> + +#undef HAVE_AV_CONFIG_H +#include "libavutil/avutil.h" +#include "swscale.h" +#include "swscale_internal.h" + +static uint64_t getSSD(uint8_t *src1, uint8_t *src2, int stride1, int stride2, int w, int h){ + int x,y; + uint64_t ssd=0; + +//printf("%d %d\n", w, h); + + for (y=0; y<h; y++){ + for (x=0; x<w; x++){ + int d= src1[x + y*stride1] - src2[x + y*stride2]; + ssd+= d*d; +//printf("%d", abs(src1[x + y*stride1] - src2[x + y*stride2])/26 ); + } +//printf("\n"); + } + return ssd; +} + +// test by ref -> src -> dst -> out & compare out against ref +// ref & out are YV12 +static int doTest(uint8_t *ref[3], int refStride[3], int w, int h, int srcFormat, int dstFormat, + int srcW, int srcH, int dstW, int dstH, int flags){ + uint8_t *src[3]; + uint8_t *dst[3]; + uint8_t *out[3]; + int srcStride[3], dstStride[3]; + int i; + uint64_t ssdY, ssdU, ssdV; + struct SwsContext *srcContext, *dstContext, *outContext; + int res; + + res = 0; + for (i=0; i<3; i++){ + // avoid stride % bpp != 0 + if (srcFormat==PIX_FMT_RGB24 || srcFormat==PIX_FMT_BGR24) + srcStride[i]= srcW*3; + else + srcStride[i]= srcW*4; + + if (dstFormat==PIX_FMT_RGB24 || dstFormat==PIX_FMT_BGR24) + dstStride[i]= dstW*3; + else + dstStride[i]= dstW*4; + + src[i]= (uint8_t*) malloc(srcStride[i]*srcH); + dst[i]= (uint8_t*) malloc(dstStride[i]*dstH); + out[i]= (uint8_t*) malloc(refStride[i]*h); + if (!src[i] || !dst[i] || !out[i]) { + perror("Malloc"); + res = -1; + + goto end; + } + } + + dstContext = outContext = NULL; + srcContext= sws_getContext(w, h, PIX_FMT_YUV420P, srcW, srcH, srcFormat, flags, NULL, NULL, NULL); + if (!srcContext) { + fprintf(stderr, "Failed to get %s ---> %s\n", + sws_format_name(PIX_FMT_YUV420P), + sws_format_name(srcFormat)); + res = -1; + + goto end; + } + dstContext= sws_getContext(srcW, srcH, srcFormat, dstW, dstH, dstFormat, flags, NULL, NULL, NULL); + if (!dstContext) { + fprintf(stderr, "Failed to get %s ---> %s\n", + sws_format_name(srcFormat), + sws_format_name(dstFormat)); + res = -1; + + goto end; + } + outContext= sws_getContext(dstW, dstH, dstFormat, w, h, PIX_FMT_YUV420P, flags, NULL, NULL, NULL); + if (!outContext) { + fprintf(stderr, "Failed to get %s ---> %s\n", + sws_format_name(dstFormat), + sws_format_name(PIX_FMT_YUV420P)); + res = -1; + + goto end; + } +// printf("test %X %X %X -> %X %X %X\n", (int)ref[0], (int)ref[1], (int)ref[2], +// (int)src[0], (int)src[1], (int)src[2]); + + sws_scale(srcContext, ref, refStride, 0, h , src, srcStride); + sws_scale(dstContext, src, srcStride, 0, srcH, dst, dstStride); + sws_scale(outContext, dst, dstStride, 0, dstH, out, refStride); + + ssdY= getSSD(ref[0], out[0], refStride[0], refStride[0], w, h); + ssdU= getSSD(ref[1], out[1], refStride[1], refStride[1], (w+1)>>1, (h+1)>>1); + ssdV= getSSD(ref[2], out[2], refStride[2], refStride[2], (w+1)>>1, (h+1)>>1); + + if (srcFormat == PIX_FMT_GRAY8 || dstFormat==PIX_FMT_GRAY8) ssdU=ssdV=0; //FIXME check that output is really gray + + ssdY/= w*h; + ssdU/= w*h/4; + ssdV/= w*h/4; + + printf(" %s %dx%d -> %s %4dx%4d flags=%2d SSD=%5lld,%5lld,%5lld\n", + sws_format_name(srcFormat), srcW, srcH, + sws_format_name(dstFormat), dstW, dstH, + flags, ssdY, ssdU, ssdV); + fflush(stdout); + + end: + + sws_freeContext(srcContext); + sws_freeContext(dstContext); + sws_freeContext(outContext); + + for (i=0; i<3; i++){ + free(src[i]); + free(dst[i]); + free(out[i]); + } + + return res; +} + +static void selfTest(uint8_t *src[3], int stride[3], int w, int h){ + enum PixelFormat srcFormat, dstFormat; + int srcW, srcH, dstW, dstH; + int flags; + + for (srcFormat = 0; srcFormat < PIX_FMT_NB; srcFormat++) { + for (dstFormat = 0; dstFormat < PIX_FMT_NB; dstFormat++) { + printf("%s -> %s\n", + sws_format_name(srcFormat), + sws_format_name(dstFormat)); + fflush(stdout); + + srcW= w; + srcH= h; + for (dstW=w - w/3; dstW<= 4*w/3; dstW+= w/3){ + for (dstH=h - h/3; dstH<= 4*h/3; dstH+= h/3){ + for (flags=1; flags<33; flags*=2) { + int res; + + res = doTest(src, stride, w, h, srcFormat, dstFormat, + srcW, srcH, dstW, dstH, flags); + if (res < 0) { + dstW = 4 * w / 3; + dstH = 4 * h / 3; + flags = 33; + } + } + } + } + } + } +} + +#define W 96 +#define H 96 + +int main(int argc, char **argv){ + uint8_t *rgb_data = malloc (W*H*4); + uint8_t *rgb_src[3]= {rgb_data, NULL, NULL}; + int rgb_stride[3]={4*W, 0, 0}; + uint8_t *data = malloc (3*W*H); + uint8_t *src[3]= {data, data+W*H, data+W*H*2}; + int stride[3]={W, W, W}; + int x, y; + struct SwsContext *sws; + + sws= sws_getContext(W/12, H/12, PIX_FMT_RGB32, W, H, PIX_FMT_YUV420P, 2, NULL, NULL, NULL); + + for (y=0; y<H; y++){ + for (x=0; x<W*4; x++){ + rgb_data[ x + y*4*W]= random(); + } + } + sws_scale(sws, rgb_src, rgb_stride, 0, H, src, stride); + + selfTest(src, stride, W, H); + + return 123; +} diff --git a/libswscale/swscale.c b/libswscale/swscale.c new file mode 100644 index 0000000000..7c335f1680 --- /dev/null +++ b/libswscale/swscale.c @@ -0,0 +1,3198 @@ +/* + * Copyright (C) 2001-2003 Michael Niedermayer <michaelni@gmx.at> + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + * + * the C code (not assembly, mmx, ...) of this file can be used + * under the LGPL license too + */ + +/* + supported Input formats: YV12, I420/IYUV, YUY2, UYVY, BGR32, BGR32_1, BGR24, BGR16, BGR15, RGB32, RGB32_1, RGB24, Y8/Y800, YVU9/IF09, PAL8 + supported output formats: YV12, I420/IYUV, YUY2, UYVY, {BGR,RGB}{1,4,8,15,16,24,32}, Y8/Y800, YVU9/IF09 + {BGR,RGB}{1,4,8,15,16} support dithering + + unscaled special converters (YV12=I420=IYUV, Y800=Y8) + YV12 -> {BGR,RGB}{1,4,8,15,16,24,32} + x -> x + YUV9 -> YV12 + YUV9/YV12 -> Y800 + Y800 -> YUV9/YV12 + BGR24 -> BGR32 & RGB24 -> RGB32 + BGR32 -> BGR24 & RGB32 -> RGB24 + BGR15 -> BGR16 +*/ + +/* +tested special converters (most are tested actually, but I did not write it down ...) + YV12 -> BGR16 + YV12 -> YV12 + BGR15 -> BGR16 + BGR16 -> BGR16 + YVU9 -> YV12 + +untested special converters + YV12/I420 -> BGR15/BGR24/BGR32 (it is the yuv2rgb stuff, so it should be OK) + YV12/I420 -> YV12/I420 + YUY2/BGR15/BGR24/BGR32/RGB24/RGB32 -> same format + BGR24 -> BGR32 & RGB24 -> RGB32 + BGR32 -> BGR24 & RGB32 -> RGB24 + BGR24 -> YV12 +*/ + +#define _SVID_SOURCE //needed for MAP_ANONYMOUS +#include <inttypes.h> +#include <string.h> +#include <math.h> +#include <stdio.h> +#include <unistd.h> +#include "config.h" +#include <assert.h> +#if HAVE_SYS_MMAN_H +#include <sys/mman.h> +#if defined(MAP_ANON) && !defined(MAP_ANONYMOUS) +#define MAP_ANONYMOUS MAP_ANON +#endif +#endif +#include "swscale.h" +#include "swscale_internal.h" +#include "rgb2rgb.h" +#include "libavutil/x86_cpu.h" +#include "libavutil/bswap.h" + +unsigned swscale_version(void) +{ + return LIBSWSCALE_VERSION_INT; +} + +#undef MOVNTQ +#undef PAVGB + +//#undef HAVE_MMX2 +//#define HAVE_AMD3DNOW +//#undef HAVE_MMX +//#undef ARCH_X86 +//#define WORDS_BIGENDIAN +#define DITHER1XBPP + +#define FAST_BGR2YV12 // use 7 bit coefficients instead of 15 bit + +#define RET 0xC3 //near return opcode for x86 + +#ifdef M_PI +#define PI M_PI +#else +#define PI 3.14159265358979323846 +#endif + +#define isSupportedIn(x) ( \ + (x)==PIX_FMT_YUV420P \ + || (x)==PIX_FMT_YUVA420P \ + || (x)==PIX_FMT_YUYV422 \ + || (x)==PIX_FMT_UYVY422 \ + || (x)==PIX_FMT_RGB32 \ + || (x)==PIX_FMT_RGB32_1 \ + || (x)==PIX_FMT_BGR24 \ + || (x)==PIX_FMT_BGR565 \ + || (x)==PIX_FMT_BGR555 \ + || (x)==PIX_FMT_BGR32 \ + || (x)==PIX_FMT_BGR32_1 \ + || (x)==PIX_FMT_RGB24 \ + || (x)==PIX_FMT_RGB565 \ + || (x)==PIX_FMT_RGB555 \ + || (x)==PIX_FMT_GRAY8 \ + || (x)==PIX_FMT_YUV410P \ + || (x)==PIX_FMT_YUV440P \ + || (x)==PIX_FMT_GRAY16BE \ + || (x)==PIX_FMT_GRAY16LE \ + || (x)==PIX_FMT_YUV444P \ + || (x)==PIX_FMT_YUV422P \ + || (x)==PIX_FMT_YUV411P \ + || (x)==PIX_FMT_PAL8 \ + || (x)==PIX_FMT_BGR8 \ + || (x)==PIX_FMT_RGB8 \ + || (x)==PIX_FMT_BGR4_BYTE \ + || (x)==PIX_FMT_RGB4_BYTE \ + || (x)==PIX_FMT_YUV440P \ + || (x)==PIX_FMT_MONOWHITE \ + || (x)==PIX_FMT_MONOBLACK \ + ) +#define isSupportedOut(x) ( \ + (x)==PIX_FMT_YUV420P \ + || (x)==PIX_FMT_YUYV422 \ + || (x)==PIX_FMT_UYVY422 \ + || (x)==PIX_FMT_YUV444P \ + || (x)==PIX_FMT_YUV422P \ + || (x)==PIX_FMT_YUV411P \ + || isRGB(x) \ + || isBGR(x) \ + || (x)==PIX_FMT_NV12 \ + || (x)==PIX_FMT_NV21 \ + || (x)==PIX_FMT_GRAY16BE \ + || (x)==PIX_FMT_GRAY16LE \ + || (x)==PIX_FMT_GRAY8 \ + || (x)==PIX_FMT_YUV410P \ + || (x)==PIX_FMT_YUV440P \ + ) +#define isPacked(x) ( \ + (x)==PIX_FMT_PAL8 \ + || (x)==PIX_FMT_YUYV422 \ + || (x)==PIX_FMT_UYVY422 \ + || isRGB(x) \ + || isBGR(x) \ + ) +#define usePal(x) ( \ + (x)==PIX_FMT_PAL8 \ + || (x)==PIX_FMT_BGR4_BYTE \ + || (x)==PIX_FMT_RGB4_BYTE \ + || (x)==PIX_FMT_BGR8 \ + || (x)==PIX_FMT_RGB8 \ + ) + +#define RGB2YUV_SHIFT 15 +#define BY ( (int)(0.114*219/255*(1<<RGB2YUV_SHIFT)+0.5)) +#define BV (-(int)(0.081*224/255*(1<<RGB2YUV_SHIFT)+0.5)) +#define BU ( (int)(0.500*224/255*(1<<RGB2YUV_SHIFT)+0.5)) +#define GY ( (int)(0.587*219/255*(1<<RGB2YUV_SHIFT)+0.5)) +#define GV (-(int)(0.419*224/255*(1<<RGB2YUV_SHIFT)+0.5)) +#define GU (-(int)(0.331*224/255*(1<<RGB2YUV_SHIFT)+0.5)) +#define RY ( (int)(0.299*219/255*(1<<RGB2YUV_SHIFT)+0.5)) +#define RV ( (int)(0.500*224/255*(1<<RGB2YUV_SHIFT)+0.5)) +#define RU (-(int)(0.169*224/255*(1<<RGB2YUV_SHIFT)+0.5)) + +extern const int32_t ff_yuv2rgb_coeffs[8][4]; + +static const double rgb2yuv_table[8][9]={ + {0.7152, 0.0722, 0.2126, -0.386, 0.5, -0.115, -0.454, -0.046, 0.5}, + {0.7152, 0.0722, 0.2126, -0.386, 0.5, -0.115, -0.454, -0.046, 0.5}, + {0.587 , 0.114 , 0.299 , -0.331, 0.5, -0.169, -0.419, -0.081, 0.5}, + {0.587 , 0.114 , 0.299 , -0.331, 0.5, -0.169, -0.419, -0.081, 0.5}, + {0.59 , 0.11 , 0.30 , -0.331, 0.5, -0.169, -0.421, -0.079, 0.5}, //FCC + {0.587 , 0.114 , 0.299 , -0.331, 0.5, -0.169, -0.419, -0.081, 0.5}, + {0.587 , 0.114 , 0.299 , -0.331, 0.5, -0.169, -0.419, -0.081, 0.5}, //SMPTE 170M + {0.701 , 0.087 , 0.212 , -0.384, 0.5 -0.116, -0.445, -0.055, 0.5}, //SMPTE 240M +}; + +/* +NOTES +Special versions: fast Y 1:1 scaling (no interpolation in y direction) + +TODO +more intelligent misalignment avoidance for the horizontal scaler +write special vertical cubic upscale version +optimize C code (YV12 / minmax) +add support for packed pixel YUV input & output +add support for Y8 output +optimize BGR24 & BGR32 +add BGR4 output support +write special BGR->BGR scaler +*/ + +#if ARCH_X86 && CONFIG_GPL +DECLARE_ASM_CONST(8, uint64_t, bF8)= 0xF8F8F8F8F8F8F8F8LL; +DECLARE_ASM_CONST(8, uint64_t, bFC)= 0xFCFCFCFCFCFCFCFCLL; +DECLARE_ASM_CONST(8, uint64_t, w10)= 0x0010001000100010LL; +DECLARE_ASM_CONST(8, uint64_t, w02)= 0x0002000200020002LL; +DECLARE_ASM_CONST(8, uint64_t, bm00001111)=0x00000000FFFFFFFFLL; +DECLARE_ASM_CONST(8, uint64_t, bm00000111)=0x0000000000FFFFFFLL; +DECLARE_ASM_CONST(8, uint64_t, bm11111000)=0xFFFFFFFFFF000000LL; +DECLARE_ASM_CONST(8, uint64_t, bm01010101)=0x00FF00FF00FF00FFLL; + +const DECLARE_ALIGNED(8, uint64_t, ff_dither4[2]) = { + 0x0103010301030103LL, + 0x0200020002000200LL,}; + +const DECLARE_ALIGNED(8, uint64_t, ff_dither8[2]) = { + 0x0602060206020602LL, + 0x0004000400040004LL,}; + +DECLARE_ASM_CONST(8, uint64_t, b16Mask)= 0x001F001F001F001FLL; +DECLARE_ASM_CONST(8, uint64_t, g16Mask)= 0x07E007E007E007E0LL; +DECLARE_ASM_CONST(8, uint64_t, r16Mask)= 0xF800F800F800F800LL; +DECLARE_ASM_CONST(8, uint64_t, b15Mask)= 0x001F001F001F001FLL; +DECLARE_ASM_CONST(8, uint64_t, g15Mask)= 0x03E003E003E003E0LL; +DECLARE_ASM_CONST(8, uint64_t, r15Mask)= 0x7C007C007C007C00LL; + +DECLARE_ALIGNED(8, const uint64_t, ff_M24A) = 0x00FF0000FF0000FFLL; +DECLARE_ALIGNED(8, const uint64_t, ff_M24B) = 0xFF0000FF0000FF00LL; +DECLARE_ALIGNED(8, const uint64_t, ff_M24C) = 0x0000FF0000FF0000LL; + +#ifdef FAST_BGR2YV12 +DECLARE_ALIGNED(8, const uint64_t, ff_bgr2YCoeff) = 0x000000210041000DULL; +DECLARE_ALIGNED(8, const uint64_t, ff_bgr2UCoeff) = 0x0000FFEEFFDC0038ULL; +DECLARE_ALIGNED(8, const uint64_t, ff_bgr2VCoeff) = 0x00000038FFD2FFF8ULL; +#else +DECLARE_ALIGNED(8, const uint64_t, ff_bgr2YCoeff) = 0x000020E540830C8BULL; +DECLARE_ALIGNED(8, const uint64_t, ff_bgr2UCoeff) = 0x0000ED0FDAC23831ULL; +DECLARE_ALIGNED(8, const uint64_t, ff_bgr2VCoeff) = 0x00003831D0E6F6EAULL; +#endif /* FAST_BGR2YV12 */ +DECLARE_ALIGNED(8, const uint64_t, ff_bgr2YOffset) = 0x1010101010101010ULL; +DECLARE_ALIGNED(8, const uint64_t, ff_bgr2UVOffset) = 0x8080808080808080ULL; +DECLARE_ALIGNED(8, const uint64_t, ff_w1111) = 0x0001000100010001ULL; + +DECLARE_ASM_CONST(8, uint64_t, ff_bgr24toY1Coeff) = 0x0C88000040870C88ULL; +DECLARE_ASM_CONST(8, uint64_t, ff_bgr24toY2Coeff) = 0x20DE4087000020DEULL; +DECLARE_ASM_CONST(8, uint64_t, ff_rgb24toY1Coeff) = 0x20DE0000408720DEULL; +DECLARE_ASM_CONST(8, uint64_t, ff_rgb24toY2Coeff) = 0x0C88408700000C88ULL; +DECLARE_ASM_CONST(8, uint64_t, ff_bgr24toYOffset) = 0x0008400000084000ULL; + +DECLARE_ASM_CONST(8, uint64_t, ff_bgr24toUV[2][4]) = { + {0x38380000DAC83838ULL, 0xECFFDAC80000ECFFULL, 0xF6E40000D0E3F6E4ULL, 0x3838D0E300003838ULL}, + {0xECFF0000DAC8ECFFULL, 0x3838DAC800003838ULL, 0x38380000D0E33838ULL, 0xF6E4D0E30000F6E4ULL}, +}; + +DECLARE_ASM_CONST(8, uint64_t, ff_bgr24toUVOffset)= 0x0040400000404000ULL; + +#endif /* ARCH_X86 && CONFIG_GPL */ + +// clipping helper table for C implementations: +static unsigned char clip_table[768]; + +static SwsVector *sws_getConvVec(SwsVector *a, SwsVector *b); + +static const uint8_t __attribute__((aligned(8))) dither_2x2_4[2][8]={ +{ 1, 3, 1, 3, 1, 3, 1, 3, }, +{ 2, 0, 2, 0, 2, 0, 2, 0, }, +}; + +static const uint8_t __attribute__((aligned(8))) dither_2x2_8[2][8]={ +{ 6, 2, 6, 2, 6, 2, 6, 2, }, +{ 0, 4, 0, 4, 0, 4, 0, 4, }, +}; + +const uint8_t __attribute__((aligned(8))) dither_8x8_32[8][8]={ +{ 17, 9, 23, 15, 16, 8, 22, 14, }, +{ 5, 29, 3, 27, 4, 28, 2, 26, }, +{ 21, 13, 19, 11, 20, 12, 18, 10, }, +{ 0, 24, 6, 30, 1, 25, 7, 31, }, +{ 16, 8, 22, 14, 17, 9, 23, 15, }, +{ 4, 28, 2, 26, 5, 29, 3, 27, }, +{ 20, 12, 18, 10, 21, 13, 19, 11, }, +{ 1, 25, 7, 31, 0, 24, 6, 30, }, +}; + +#if 0 +const uint8_t __attribute__((aligned(8))) dither_8x8_64[8][8]={ +{ 0, 48, 12, 60, 3, 51, 15, 63, }, +{ 32, 16, 44, 28, 35, 19, 47, 31, }, +{ 8, 56, 4, 52, 11, 59, 7, 55, }, +{ 40, 24, 36, 20, 43, 27, 39, 23, }, +{ 2, 50, 14, 62, 1, 49, 13, 61, }, +{ 34, 18, 46, 30, 33, 17, 45, 29, }, +{ 10, 58, 6, 54, 9, 57, 5, 53, }, +{ 42, 26, 38, 22, 41, 25, 37, 21, }, +}; +#endif + +const uint8_t __attribute__((aligned(8))) dither_8x8_73[8][8]={ +{ 0, 55, 14, 68, 3, 58, 17, 72, }, +{ 37, 18, 50, 32, 40, 22, 54, 35, }, +{ 9, 64, 5, 59, 13, 67, 8, 63, }, +{ 46, 27, 41, 23, 49, 31, 44, 26, }, +{ 2, 57, 16, 71, 1, 56, 15, 70, }, +{ 39, 21, 52, 34, 38, 19, 51, 33, }, +{ 11, 66, 7, 62, 10, 65, 6, 60, }, +{ 48, 30, 43, 25, 47, 29, 42, 24, }, +}; + +#if 0 +const uint8_t __attribute__((aligned(8))) dither_8x8_128[8][8]={ +{ 68, 36, 92, 60, 66, 34, 90, 58, }, +{ 20, 116, 12, 108, 18, 114, 10, 106, }, +{ 84, 52, 76, 44, 82, 50, 74, 42, }, +{ 0, 96, 24, 120, 6, 102, 30, 126, }, +{ 64, 32, 88, 56, 70, 38, 94, 62, }, +{ 16, 112, 8, 104, 22, 118, 14, 110, }, +{ 80, 48, 72, 40, 86, 54, 78, 46, }, +{ 4, 100, 28, 124, 2, 98, 26, 122, }, +}; +#endif + +#if 1 +const uint8_t __attribute__((aligned(8))) dither_8x8_220[8][8]={ +{117, 62, 158, 103, 113, 58, 155, 100, }, +{ 34, 199, 21, 186, 31, 196, 17, 182, }, +{144, 89, 131, 76, 141, 86, 127, 72, }, +{ 0, 165, 41, 206, 10, 175, 52, 217, }, +{110, 55, 151, 96, 120, 65, 162, 107, }, +{ 28, 193, 14, 179, 38, 203, 24, 189, }, +{138, 83, 124, 69, 148, 93, 134, 79, }, +{ 7, 172, 48, 213, 3, 168, 45, 210, }, +}; +#elif 1 +// tries to correct a gamma of 1.5 +const uint8_t __attribute__((aligned(8))) dither_8x8_220[8][8]={ +{ 0, 143, 18, 200, 2, 156, 25, 215, }, +{ 78, 28, 125, 64, 89, 36, 138, 74, }, +{ 10, 180, 3, 161, 16, 195, 8, 175, }, +{109, 51, 93, 38, 121, 60, 105, 47, }, +{ 1, 152, 23, 210, 0, 147, 20, 205, }, +{ 85, 33, 134, 71, 81, 30, 130, 67, }, +{ 14, 190, 6, 171, 12, 185, 5, 166, }, +{117, 57, 101, 44, 113, 54, 97, 41, }, +}; +#elif 1 +// tries to correct a gamma of 2.0 +const uint8_t __attribute__((aligned(8))) dither_8x8_220[8][8]={ +{ 0, 124, 8, 193, 0, 140, 12, 213, }, +{ 55, 14, 104, 42, 66, 19, 119, 52, }, +{ 3, 168, 1, 145, 6, 187, 3, 162, }, +{ 86, 31, 70, 21, 99, 39, 82, 28, }, +{ 0, 134, 11, 206, 0, 129, 9, 200, }, +{ 62, 17, 114, 48, 58, 16, 109, 45, }, +{ 5, 181, 2, 157, 4, 175, 1, 151, }, +{ 95, 36, 78, 26, 90, 34, 74, 24, }, +}; +#else +// tries to correct a gamma of 2.5 +const uint8_t __attribute__((aligned(8))) dither_8x8_220[8][8]={ +{ 0, 107, 3, 187, 0, 125, 6, 212, }, +{ 39, 7, 86, 28, 49, 11, 102, 36, }, +{ 1, 158, 0, 131, 3, 180, 1, 151, }, +{ 68, 19, 52, 12, 81, 25, 64, 17, }, +{ 0, 119, 5, 203, 0, 113, 4, 195, }, +{ 45, 9, 96, 33, 42, 8, 91, 30, }, +{ 2, 172, 1, 144, 2, 165, 0, 137, }, +{ 77, 23, 60, 15, 72, 21, 56, 14, }, +}; +#endif + +const char *sws_format_name(enum PixelFormat format) +{ + switch (format) { + case PIX_FMT_YUV420P: + return "yuv420p"; + case PIX_FMT_YUVA420P: + return "yuva420p"; + case PIX_FMT_YUYV422: + return "yuyv422"; + case PIX_FMT_RGB24: + return "rgb24"; + case PIX_FMT_BGR24: + return "bgr24"; + case PIX_FMT_YUV422P: + return "yuv422p"; + case PIX_FMT_YUV444P: + return "yuv444p"; + case PIX_FMT_RGB32: + return "rgb32"; + case PIX_FMT_YUV410P: + return "yuv410p"; + case PIX_FMT_YUV411P: + return "yuv411p"; + case PIX_FMT_RGB565: + return "rgb565"; + case PIX_FMT_RGB555: + return "rgb555"; + case PIX_FMT_GRAY16BE: + return "gray16be"; + case PIX_FMT_GRAY16LE: + return "gray16le"; + case PIX_FMT_GRAY8: + return "gray8"; + case PIX_FMT_MONOWHITE: + return "mono white"; + case PIX_FMT_MONOBLACK: + return "mono black"; + case PIX_FMT_PAL8: + return "Palette"; + case PIX_FMT_YUVJ420P: + return "yuvj420p"; + case PIX_FMT_YUVJ422P: + return "yuvj422p"; + case PIX_FMT_YUVJ444P: + return "yuvj444p"; + case PIX_FMT_XVMC_MPEG2_MC: + return "xvmc_mpeg2_mc"; + case PIX_FMT_XVMC_MPEG2_IDCT: + return "xvmc_mpeg2_idct"; + case PIX_FMT_UYVY422: + return "uyvy422"; + case PIX_FMT_UYYVYY411: + return "uyyvyy411"; + case PIX_FMT_RGB32_1: + return "rgb32x"; + case PIX_FMT_BGR32_1: + return "bgr32x"; + case PIX_FMT_BGR32: + return "bgr32"; + case PIX_FMT_BGR565: + return "bgr565"; + case PIX_FMT_BGR555: + return "bgr555"; + case PIX_FMT_BGR8: + return "bgr8"; + case PIX_FMT_BGR4: + return "bgr4"; + case PIX_FMT_BGR4_BYTE: + return "bgr4 byte"; + case PIX_FMT_RGB8: + return "rgb8"; + case PIX_FMT_RGB4: + return "rgb4"; + case PIX_FMT_RGB4_BYTE: + return "rgb4 byte"; + case PIX_FMT_NV12: + return "nv12"; + case PIX_FMT_NV21: + return "nv21"; + case PIX_FMT_YUV440P: + return "yuv440p"; + case PIX_FMT_VDPAU_H264: + return "vdpau_h264"; + case PIX_FMT_VDPAU_MPEG1: + return "vdpau_mpeg1"; + case PIX_FMT_VDPAU_MPEG2: + return "vdpau_mpeg2"; + case PIX_FMT_VDPAU_WMV3: + return "vdpau_wmv3"; + case PIX_FMT_VDPAU_VC1: + return "vdpau_vc1"; + default: + return "Unknown format"; + } +} + +static inline void yuv2yuvXinC(int16_t *lumFilter, int16_t **lumSrc, int lumFilterSize, + int16_t *chrFilter, int16_t **chrSrc, int chrFilterSize, + uint8_t *dest, uint8_t *uDest, uint8_t *vDest, int dstW, int chrDstW) +{ + //FIXME Optimize (just quickly written not optimized..) + int i; + for (i=0; i<dstW; i++) + { + int val=1<<18; + int j; + for (j=0; j<lumFilterSize; j++) + val += lumSrc[j][i] * lumFilter[j]; + + dest[i]= av_clip_uint8(val>>19); + } + + if (uDest) + for (i=0; i<chrDstW; i++) + { + int u=1<<18; + int v=1<<18; + int j; + for (j=0; j<chrFilterSize; j++) + { + u += chrSrc[j][i] * chrFilter[j]; + v += chrSrc[j][i + VOFW] * chrFilter[j]; + } + + uDest[i]= av_clip_uint8(u>>19); + vDest[i]= av_clip_uint8(v>>19); + } +} + +static inline void yuv2nv12XinC(int16_t *lumFilter, int16_t **lumSrc, int lumFilterSize, + int16_t *chrFilter, int16_t **chrSrc, int chrFilterSize, + uint8_t *dest, uint8_t *uDest, int dstW, int chrDstW, int dstFormat) +{ + //FIXME Optimize (just quickly written not optimized..) + int i; + for (i=0; i<dstW; i++) + { + int val=1<<18; + int j; + for (j=0; j<lumFilterSize; j++) + val += lumSrc[j][i] * lumFilter[j]; + + dest[i]= av_clip_uint8(val>>19); + } + + if (!uDest) + return; + + if (dstFormat == PIX_FMT_NV12) + for (i=0; i<chrDstW; i++) + { + int u=1<<18; + int v=1<<18; + int j; + for (j=0; j<chrFilterSize; j++) + { + u += chrSrc[j][i] * chrFilter[j]; + v += chrSrc[j][i + VOFW] * chrFilter[j]; + } + + uDest[2*i]= av_clip_uint8(u>>19); + uDest[2*i+1]= av_clip_uint8(v>>19); + } + else + for (i=0; i<chrDstW; i++) + { + int u=1<<18; + int v=1<<18; + int j; + for (j=0; j<chrFilterSize; j++) + { + u += chrSrc[j][i] * chrFilter[j]; + v += chrSrc[j][i + VOFW] * chrFilter[j]; + } + + uDest[2*i]= av_clip_uint8(v>>19); + uDest[2*i+1]= av_clip_uint8(u>>19); + } +} + +#define YSCALE_YUV_2_PACKEDX_NOCLIP_C(type) \ + for (i=0; i<(dstW>>1); i++){\ + int j;\ + int Y1 = 1<<18;\ + int Y2 = 1<<18;\ + int U = 1<<18;\ + int V = 1<<18;\ + type av_unused *r, *b, *g;\ + const int i2= 2*i;\ + \ + for (j=0; j<lumFilterSize; j++)\ + {\ + Y1 += lumSrc[j][i2] * lumFilter[j];\ + Y2 += lumSrc[j][i2+1] * lumFilter[j];\ + }\ + for (j=0; j<chrFilterSize; j++)\ + {\ + U += chrSrc[j][i] * chrFilter[j];\ + V += chrSrc[j][i+VOFW] * chrFilter[j];\ + }\ + Y1>>=19;\ + Y2>>=19;\ + U >>=19;\ + V >>=19;\ + +#define YSCALE_YUV_2_PACKEDX_C(type) \ + YSCALE_YUV_2_PACKEDX_NOCLIP_C(type)\ + if ((Y1|Y2|U|V)&256)\ + {\ + if (Y1>255) Y1=255; \ + else if (Y1<0)Y1=0; \ + if (Y2>255) Y2=255; \ + else if (Y2<0)Y2=0; \ + if (U>255) U=255; \ + else if (U<0) U=0; \ + if (V>255) V=255; \ + else if (V<0) V=0; \ + } + +#define YSCALE_YUV_2_PACKEDX_FULL_C \ + for (i=0; i<dstW; i++){\ + int j;\ + int Y = 0;\ + int U = -128<<19;\ + int V = -128<<19;\ + int R,G,B;\ + \ + for (j=0; j<lumFilterSize; j++){\ + Y += lumSrc[j][i ] * lumFilter[j];\ + }\ + for (j=0; j<chrFilterSize; j++){\ + U += chrSrc[j][i ] * chrFilter[j];\ + V += chrSrc[j][i+VOFW] * chrFilter[j];\ + }\ + Y >>=10;\ + U >>=10;\ + V >>=10;\ + +#define YSCALE_YUV_2_RGBX_FULL_C(rnd) \ + YSCALE_YUV_2_PACKEDX_FULL_C\ + Y-= c->yuv2rgb_y_offset;\ + Y*= c->yuv2rgb_y_coeff;\ + Y+= rnd;\ + R= Y + V*c->yuv2rgb_v2r_coeff;\ + G= Y + V*c->yuv2rgb_v2g_coeff + U*c->yuv2rgb_u2g_coeff;\ + B= Y + U*c->yuv2rgb_u2b_coeff;\ + if ((R|G|B)&(0xC0000000)){\ + if (R>=(256<<22)) R=(256<<22)-1; \ + else if (R<0)R=0; \ + if (G>=(256<<22)) G=(256<<22)-1; \ + else if (G<0)G=0; \ + if (B>=(256<<22)) B=(256<<22)-1; \ + else if (B<0)B=0; \ + }\ + + +#define YSCALE_YUV_2_GRAY16_C \ + for (i=0; i<(dstW>>1); i++){\ + int j;\ + int Y1 = 1<<18;\ + int Y2 = 1<<18;\ + int U = 1<<18;\ + int V = 1<<18;\ + \ + const int i2= 2*i;\ + \ + for (j=0; j<lumFilterSize; j++)\ + {\ + Y1 += lumSrc[j][i2] * lumFilter[j];\ + Y2 += lumSrc[j][i2+1] * lumFilter[j];\ + }\ + Y1>>=11;\ + Y2>>=11;\ + if ((Y1|Y2|U|V)&65536)\ + {\ + if (Y1>65535) Y1=65535; \ + else if (Y1<0)Y1=0; \ + if (Y2>65535) Y2=65535; \ + else if (Y2<0)Y2=0; \ + } + +#define YSCALE_YUV_2_RGBX_C(type) \ + YSCALE_YUV_2_PACKEDX_C(type) /* FIXME fix tables so that clipping is not needed and then use _NOCLIP*/\ + r = (type *)c->table_rV[V]; \ + g = (type *)(c->table_gU[U] + c->table_gV[V]); \ + b = (type *)c->table_bU[U]; \ + +#define YSCALE_YUV_2_PACKED2_C \ + for (i=0; i<(dstW>>1); i++){ \ + const int i2= 2*i; \ + int Y1= (buf0[i2 ]*yalpha1+buf1[i2 ]*yalpha)>>19; \ + int Y2= (buf0[i2+1]*yalpha1+buf1[i2+1]*yalpha)>>19; \ + int U= (uvbuf0[i ]*uvalpha1+uvbuf1[i ]*uvalpha)>>19; \ + int V= (uvbuf0[i+VOFW]*uvalpha1+uvbuf1[i+VOFW]*uvalpha)>>19; \ + +#define YSCALE_YUV_2_GRAY16_2_C \ + for (i=0; i<(dstW>>1); i++){ \ + const int i2= 2*i; \ + int Y1= (buf0[i2 ]*yalpha1+buf1[i2 ]*yalpha)>>11; \ + int Y2= (buf0[i2+1]*yalpha1+buf1[i2+1]*yalpha)>>11; \ + +#define YSCALE_YUV_2_RGB2_C(type) \ + YSCALE_YUV_2_PACKED2_C\ + type *r, *b, *g;\ + r = (type *)c->table_rV[V];\ + g = (type *)(c->table_gU[U] + c->table_gV[V]);\ + b = (type *)c->table_bU[U];\ + +#define YSCALE_YUV_2_PACKED1_C \ + for (i=0; i<(dstW>>1); i++){\ + const int i2= 2*i;\ + int Y1= buf0[i2 ]>>7;\ + int Y2= buf0[i2+1]>>7;\ + int U= (uvbuf1[i ])>>7;\ + int V= (uvbuf1[i+VOFW])>>7;\ + +#define YSCALE_YUV_2_GRAY16_1_C \ + for (i=0; i<(dstW>>1); i++){\ + const int i2= 2*i;\ + int Y1= buf0[i2 ]<<1;\ + int Y2= buf0[i2+1]<<1;\ + +#define YSCALE_YUV_2_RGB1_C(type) \ + YSCALE_YUV_2_PACKED1_C\ + type *r, *b, *g;\ + r = (type *)c->table_rV[V];\ + g = (type *)(c->table_gU[U] + c->table_gV[V]);\ + b = (type *)c->table_bU[U];\ + +#define YSCALE_YUV_2_PACKED1B_C \ + for (i=0; i<(dstW>>1); i++){\ + const int i2= 2*i;\ + int Y1= buf0[i2 ]>>7;\ + int Y2= buf0[i2+1]>>7;\ + int U= (uvbuf0[i ] + uvbuf1[i ])>>8;\ + int V= (uvbuf0[i+VOFW] + uvbuf1[i+VOFW])>>8;\ + +#define YSCALE_YUV_2_RGB1B_C(type) \ + YSCALE_YUV_2_PACKED1B_C\ + type *r, *b, *g;\ + r = (type *)c->table_rV[V];\ + g = (type *)(c->table_gU[U] + c->table_gV[V]);\ + b = (type *)c->table_bU[U];\ + +#define YSCALE_YUV_2_MONO2_C \ + const uint8_t * const d128=dither_8x8_220[y&7];\ + uint8_t *g= c->table_gU[128] + c->table_gV[128];\ + for (i=0; i<dstW-7; i+=8){\ + int acc;\ + acc = g[((buf0[i ]*yalpha1+buf1[i ]*yalpha)>>19) + d128[0]];\ + acc+= acc + g[((buf0[i+1]*yalpha1+buf1[i+1]*yalpha)>>19) + d128[1]];\ + acc+= acc + g[((buf0[i+2]*yalpha1+buf1[i+2]*yalpha)>>19) + d128[2]];\ + acc+= acc + g[((buf0[i+3]*yalpha1+buf1[i+3]*yalpha)>>19) + d128[3]];\ + acc+= acc + g[((buf0[i+4]*yalpha1+buf1[i+4]*yalpha)>>19) + d128[4]];\ + acc+= acc + g[((buf0[i+5]*yalpha1+buf1[i+5]*yalpha)>>19) + d128[5]];\ + acc+= acc + g[((buf0[i+6]*yalpha1+buf1[i+6]*yalpha)>>19) + d128[6]];\ + acc+= acc + g[((buf0[i+7]*yalpha1+buf1[i+7]*yalpha)>>19) + d128[7]];\ + ((uint8_t*)dest)[0]= c->dstFormat == PIX_FMT_MONOBLACK ? acc : ~acc;\ + dest++;\ + }\ + + +#define YSCALE_YUV_2_MONOX_C \ + const uint8_t * const d128=dither_8x8_220[y&7];\ + uint8_t *g= c->table_gU[128] + c->table_gV[128];\ + int acc=0;\ + for (i=0; i<dstW-1; i+=2){\ + int j;\ + int Y1=1<<18;\ + int Y2=1<<18;\ +\ + for (j=0; j<lumFilterSize; j++)\ + {\ + Y1 += lumSrc[j][i] * lumFilter[j];\ + Y2 += lumSrc[j][i+1] * lumFilter[j];\ + }\ + Y1>>=19;\ + Y2>>=19;\ + if ((Y1|Y2)&256)\ + {\ + if (Y1>255) Y1=255;\ + else if (Y1<0)Y1=0;\ + if (Y2>255) Y2=255;\ + else if (Y2<0)Y2=0;\ + }\ + acc+= acc + g[Y1+d128[(i+0)&7]];\ + acc+= acc + g[Y2+d128[(i+1)&7]];\ + if ((i&7)==6){\ + ((uint8_t*)dest)[0]= c->dstFormat == PIX_FMT_MONOBLACK ? acc : ~acc;\ + dest++;\ + }\ + } + + +#define YSCALE_YUV_2_ANYRGB_C(func, func2, func_g16, func_monoblack)\ + switch(c->dstFormat)\ + {\ + case PIX_FMT_RGB32:\ + case PIX_FMT_BGR32:\ + case PIX_FMT_RGB32_1:\ + case PIX_FMT_BGR32_1:\ + func(uint32_t)\ + ((uint32_t*)dest)[i2+0]= r[Y1] + g[Y1] + b[Y1];\ + ((uint32_t*)dest)[i2+1]= r[Y2] + g[Y2] + b[Y2];\ + } \ + break;\ + case PIX_FMT_RGB24:\ + func(uint8_t)\ + ((uint8_t*)dest)[0]= r[Y1];\ + ((uint8_t*)dest)[1]= g[Y1];\ + ((uint8_t*)dest)[2]= b[Y1];\ + ((uint8_t*)dest)[3]= r[Y2];\ + ((uint8_t*)dest)[4]= g[Y2];\ + ((uint8_t*)dest)[5]= b[Y2];\ + dest+=6;\ + }\ + break;\ + case PIX_FMT_BGR24:\ + func(uint8_t)\ + ((uint8_t*)dest)[0]= b[Y1];\ + ((uint8_t*)dest)[1]= g[Y1];\ + ((uint8_t*)dest)[2]= r[Y1];\ + ((uint8_t*)dest)[3]= b[Y2];\ + ((uint8_t*)dest)[4]= g[Y2];\ + ((uint8_t*)dest)[5]= r[Y2];\ + dest+=6;\ + }\ + break;\ + case PIX_FMT_RGB565:\ + case PIX_FMT_BGR565:\ + {\ + const int dr1= dither_2x2_8[y&1 ][0];\ + const int dg1= dither_2x2_4[y&1 ][0];\ + const int db1= dither_2x2_8[(y&1)^1][0];\ + const int dr2= dither_2x2_8[y&1 ][1];\ + const int dg2= dither_2x2_4[y&1 ][1];\ + const int db2= dither_2x2_8[(y&1)^1][1];\ + func(uint16_t)\ + ((uint16_t*)dest)[i2+0]= r[Y1+dr1] + g[Y1+dg1] + b[Y1+db1];\ + ((uint16_t*)dest)[i2+1]= r[Y2+dr2] + g[Y2+dg2] + b[Y2+db2];\ + }\ + }\ + break;\ + case PIX_FMT_RGB555:\ + case PIX_FMT_BGR555:\ + {\ + const int dr1= dither_2x2_8[y&1 ][0];\ + const int dg1= dither_2x2_8[y&1 ][1];\ + const int db1= dither_2x2_8[(y&1)^1][0];\ + const int dr2= dither_2x2_8[y&1 ][1];\ + const int dg2= dither_2x2_8[y&1 ][0];\ + const int db2= dither_2x2_8[(y&1)^1][1];\ + func(uint16_t)\ + ((uint16_t*)dest)[i2+0]= r[Y1+dr1] + g[Y1+dg1] + b[Y1+db1];\ + ((uint16_t*)dest)[i2+1]= r[Y2+dr2] + g[Y2+dg2] + b[Y2+db2];\ + }\ + }\ + break;\ + case PIX_FMT_RGB8:\ + case PIX_FMT_BGR8:\ + {\ + const uint8_t * const d64= dither_8x8_73[y&7];\ + const uint8_t * const d32= dither_8x8_32[y&7];\ + func(uint8_t)\ + ((uint8_t*)dest)[i2+0]= r[Y1+d32[(i2+0)&7]] + g[Y1+d32[(i2+0)&7]] + b[Y1+d64[(i2+0)&7]];\ + ((uint8_t*)dest)[i2+1]= r[Y2+d32[(i2+1)&7]] + g[Y2+d32[(i2+1)&7]] + b[Y2+d64[(i2+1)&7]];\ + }\ + }\ + break;\ + case PIX_FMT_RGB4:\ + case PIX_FMT_BGR4:\ + {\ + const uint8_t * const d64= dither_8x8_73 [y&7];\ + const uint8_t * const d128=dither_8x8_220[y&7];\ + func(uint8_t)\ + ((uint8_t*)dest)[i]= r[Y1+d128[(i2+0)&7]] + g[Y1+d64[(i2+0)&7]] + b[Y1+d128[(i2+0)&7]]\ + + ((r[Y2+d128[(i2+1)&7]] + g[Y2+d64[(i2+1)&7]] + b[Y2+d128[(i2+1)&7]])<<4);\ + }\ + }\ + break;\ + case PIX_FMT_RGB4_BYTE:\ + case PIX_FMT_BGR4_BYTE:\ + {\ + const uint8_t * const d64= dither_8x8_73 [y&7];\ + const uint8_t * const d128=dither_8x8_220[y&7];\ + func(uint8_t)\ + ((uint8_t*)dest)[i2+0]= r[Y1+d128[(i2+0)&7]] + g[Y1+d64[(i2+0)&7]] + b[Y1+d128[(i2+0)&7]];\ + ((uint8_t*)dest)[i2+1]= r[Y2+d128[(i2+1)&7]] + g[Y2+d64[(i2+1)&7]] + b[Y2+d128[(i2+1)&7]];\ + }\ + }\ + break;\ + case PIX_FMT_MONOBLACK:\ + case PIX_FMT_MONOWHITE:\ + {\ + func_monoblack\ + }\ + break;\ + case PIX_FMT_YUYV422:\ + func2\ + ((uint8_t*)dest)[2*i2+0]= Y1;\ + ((uint8_t*)dest)[2*i2+1]= U;\ + ((uint8_t*)dest)[2*i2+2]= Y2;\ + ((uint8_t*)dest)[2*i2+3]= V;\ + } \ + break;\ + case PIX_FMT_UYVY422:\ + func2\ + ((uint8_t*)dest)[2*i2+0]= U;\ + ((uint8_t*)dest)[2*i2+1]= Y1;\ + ((uint8_t*)dest)[2*i2+2]= V;\ + ((uint8_t*)dest)[2*i2+3]= Y2;\ + } \ + break;\ + case PIX_FMT_GRAY16BE:\ + func_g16\ + ((uint8_t*)dest)[2*i2+0]= Y1>>8;\ + ((uint8_t*)dest)[2*i2+1]= Y1;\ + ((uint8_t*)dest)[2*i2+2]= Y2>>8;\ + ((uint8_t*)dest)[2*i2+3]= Y2;\ + } \ + break;\ + case PIX_FMT_GRAY16LE:\ + func_g16\ + ((uint8_t*)dest)[2*i2+0]= Y1;\ + ((uint8_t*)dest)[2*i2+1]= Y1>>8;\ + ((uint8_t*)dest)[2*i2+2]= Y2;\ + ((uint8_t*)dest)[2*i2+3]= Y2>>8;\ + } \ + break;\ + }\ + + +static inline void yuv2packedXinC(SwsContext *c, int16_t *lumFilter, int16_t **lumSrc, int lumFilterSize, + int16_t *chrFilter, int16_t **chrSrc, int chrFilterSize, + uint8_t *dest, int dstW, int y) +{ + int i; + YSCALE_YUV_2_ANYRGB_C(YSCALE_YUV_2_RGBX_C, YSCALE_YUV_2_PACKEDX_C(void), YSCALE_YUV_2_GRAY16_C, YSCALE_YUV_2_MONOX_C) +} + +static inline void yuv2rgbXinC_full(SwsContext *c, int16_t *lumFilter, int16_t **lumSrc, int lumFilterSize, + int16_t *chrFilter, int16_t **chrSrc, int chrFilterSize, + uint8_t *dest, int dstW, int y) +{ + int i; + int step= fmt_depth(c->dstFormat)/8; + int aidx= 3; + + switch(c->dstFormat){ + case PIX_FMT_ARGB: + dest++; + aidx= -1; + case PIX_FMT_RGB24: + aidx--; + case PIX_FMT_RGBA: + YSCALE_YUV_2_RGBX_FULL_C(1<<21) + dest[aidx]= 255; + dest[0]= R>>22; + dest[1]= G>>22; + dest[2]= B>>22; + dest+= step; + } + break; + case PIX_FMT_ABGR: + dest++; + aidx= -1; + case PIX_FMT_BGR24: + aidx--; + case PIX_FMT_BGRA: + YSCALE_YUV_2_RGBX_FULL_C(1<<21) + dest[aidx]= 255; + dest[0]= B>>22; + dest[1]= G>>22; + dest[2]= R>>22; + dest+= step; + } + break; + default: + assert(0); + } +} + +//Note: we have C, X86, MMX, MMX2, 3DNOW versions, there is no 3DNOW+MMX2 one +//Plain C versions +#if !HAVE_MMX || defined (RUNTIME_CPUDETECT) || !CONFIG_GPL +#define COMPILE_C +#endif + +#if ARCH_PPC +#if (HAVE_ALTIVEC || defined (RUNTIME_CPUDETECT)) && CONFIG_GPL +#undef COMPILE_C +#define COMPILE_ALTIVEC +#endif +#endif //ARCH_PPC + +#if ARCH_X86 + +#if ((HAVE_MMX && !HAVE_AMD3DNOW && !HAVE_MMX2) || defined (RUNTIME_CPUDETECT)) && CONFIG_GPL +#define COMPILE_MMX +#endif + +#if (HAVE_MMX2 || defined (RUNTIME_CPUDETECT)) && CONFIG_GPL +#define COMPILE_MMX2 +#endif + +#if ((HAVE_AMD3DNOW && !HAVE_MMX2) || defined (RUNTIME_CPUDETECT)) && CONFIG_GPL +#define COMPILE_3DNOW +#endif +#endif //ARCH_X86 + +#undef HAVE_MMX +#undef HAVE_MMX2 +#undef HAVE_AMD3DNOW +#undef HAVE_ALTIVEC +#define HAVE_MMX 0 +#define HAVE_MMX2 0 +#define HAVE_AMD3DNOW 0 +#define HAVE_ALTIVEC 0 + +#ifdef COMPILE_C +#define RENAME(a) a ## _C +#include "swscale_template.c" +#endif + +#ifdef COMPILE_ALTIVEC +#undef RENAME +#undef HAVE_ALTIVEC +#define HAVE_ALTIVEC 1 +#define RENAME(a) a ## _altivec +#include "swscale_template.c" +#endif + +#if ARCH_X86 + +//x86 versions +/* +#undef RENAME +#undef HAVE_MMX +#undef HAVE_MMX2 +#undef HAVE_AMD3DNOW +#define ARCH_X86 +#define RENAME(a) a ## _X86 +#include "swscale_template.c" +*/ +//MMX versions +#ifdef COMPILE_MMX +#undef RENAME +#undef HAVE_MMX +#undef HAVE_MMX2 +#undef HAVE_AMD3DNOW +#define HAVE_MMX 1 +#define HAVE_MMX2 0 +#define HAVE_AMD3DNOW 0 +#define RENAME(a) a ## _MMX +#include "swscale_template.c" +#endif + +//MMX2 versions +#ifdef COMPILE_MMX2 +#undef RENAME +#undef HAVE_MMX +#undef HAVE_MMX2 +#undef HAVE_AMD3DNOW +#define HAVE_MMX 1 +#define HAVE_MMX2 1 +#define HAVE_AMD3DNOW 0 +#define RENAME(a) a ## _MMX2 +#include "swscale_template.c" +#endif + +//3DNOW versions +#ifdef COMPILE_3DNOW +#undef RENAME +#undef HAVE_MMX +#undef HAVE_MMX2 +#undef HAVE_AMD3DNOW +#define HAVE_MMX 1 +#define HAVE_MMX2 0 +#define HAVE_AMD3DNOW 1 +#define RENAME(a) a ## _3DNow +#include "swscale_template.c" +#endif + +#endif //ARCH_X86 + +// minor note: the HAVE_xyz are messed up after this line so don't use them + +static double getSplineCoeff(double a, double b, double c, double d, double dist) +{ +// printf("%f %f %f %f %f\n", a,b,c,d,dist); + if (dist<=1.0) return ((d*dist + c)*dist + b)*dist +a; + else return getSplineCoeff( 0.0, + b+ 2.0*c + 3.0*d, + c + 3.0*d, + -b- 3.0*c - 6.0*d, + dist-1.0); +} + +static inline int initFilter(int16_t **outFilter, int16_t **filterPos, int *outFilterSize, int xInc, + int srcW, int dstW, int filterAlign, int one, int flags, + SwsVector *srcFilter, SwsVector *dstFilter, double param[2]) +{ + int i; + int filterSize; + int filter2Size; + int minFilterSize; + int64_t *filter=NULL; + int64_t *filter2=NULL; + const int64_t fone= 1LL<<54; + int ret= -1; +#if ARCH_X86 + if (flags & SWS_CPU_CAPS_MMX) + __asm__ volatile("emms\n\t"::: "memory"); //FIXME this should not be required but it IS (even for non-MMX versions) +#endif + + // NOTE: the +1 is for the MMX scaler which reads over the end + *filterPos = av_malloc((dstW+1)*sizeof(int16_t)); + + if (FFABS(xInc - 0x10000) <10) // unscaled + { + int i; + filterSize= 1; + filter= av_mallocz(dstW*sizeof(*filter)*filterSize); + + for (i=0; i<dstW; i++) + { + filter[i*filterSize]= fone; + (*filterPos)[i]=i; + } + + } + else if (flags&SWS_POINT) // lame looking point sampling mode + { + int i; + int xDstInSrc; + filterSize= 1; + filter= av_malloc(dstW*sizeof(*filter)*filterSize); + + xDstInSrc= xInc/2 - 0x8000; + for (i=0; i<dstW; i++) + { + int xx= (xDstInSrc - ((filterSize-1)<<15) + (1<<15))>>16; + + (*filterPos)[i]= xx; + filter[i]= fone; + xDstInSrc+= xInc; + } + } + else if ((xInc <= (1<<16) && (flags&SWS_AREA)) || (flags&SWS_FAST_BILINEAR)) // bilinear upscale + { + int i; + int xDstInSrc; + if (flags&SWS_BICUBIC) filterSize= 4; + else if (flags&SWS_X ) filterSize= 4; + else filterSize= 2; // SWS_BILINEAR / SWS_AREA + filter= av_malloc(dstW*sizeof(*filter)*filterSize); + + xDstInSrc= xInc/2 - 0x8000; + for (i=0; i<dstW; i++) + { + int xx= (xDstInSrc - ((filterSize-1)<<15) + (1<<15))>>16; + int j; + + (*filterPos)[i]= xx; + //bilinear upscale / linear interpolate / area averaging + for (j=0; j<filterSize; j++) + { + int64_t coeff= fone - FFABS((xx<<16) - xDstInSrc)*(fone>>16); + if (coeff<0) coeff=0; + filter[i*filterSize + j]= coeff; + xx++; + } + xDstInSrc+= xInc; + } + } + else + { + int xDstInSrc; + int sizeFactor; + + if (flags&SWS_BICUBIC) sizeFactor= 4; + else if (flags&SWS_X) sizeFactor= 8; + else if (flags&SWS_AREA) sizeFactor= 1; //downscale only, for upscale it is bilinear + else if (flags&SWS_GAUSS) sizeFactor= 8; // infinite ;) + else if (flags&SWS_LANCZOS) sizeFactor= param[0] != SWS_PARAM_DEFAULT ? ceil(2*param[0]) : 6; + else if (flags&SWS_SINC) sizeFactor= 20; // infinite ;) + else if (flags&SWS_SPLINE) sizeFactor= 20; // infinite ;) + else if (flags&SWS_BILINEAR) sizeFactor= 2; + else { + sizeFactor= 0; //GCC warning killer + assert(0); + } + + if (xInc <= 1<<16) filterSize= 1 + sizeFactor; // upscale + else filterSize= 1 + (sizeFactor*srcW + dstW - 1)/ dstW; + + if (filterSize > srcW-2) filterSize=srcW-2; + + filter= av_malloc(dstW*sizeof(*filter)*filterSize); + + xDstInSrc= xInc - 0x10000; + for (i=0; i<dstW; i++) + { + int xx= (xDstInSrc - ((filterSize-2)<<16)) / (1<<17); + int j; + (*filterPos)[i]= xx; + for (j=0; j<filterSize; j++) + { + int64_t d= ((int64_t)FFABS((xx<<17) - xDstInSrc))<<13; + double floatd; + int64_t coeff; + + if (xInc > 1<<16) + d= d*dstW/srcW; + floatd= d * (1.0/(1<<30)); + + if (flags & SWS_BICUBIC) + { + int64_t B= (param[0] != SWS_PARAM_DEFAULT ? param[0] : 0) * (1<<24); + int64_t C= (param[1] != SWS_PARAM_DEFAULT ? param[1] : 0.6) * (1<<24); + int64_t dd = ( d*d)>>30; + int64_t ddd= (dd*d)>>30; + + if (d < 1LL<<30) + coeff = (12*(1<<24)-9*B-6*C)*ddd + (-18*(1<<24)+12*B+6*C)*dd + (6*(1<<24)-2*B)*(1<<30); + else if (d < 1LL<<31) + coeff = (-B-6*C)*ddd + (6*B+30*C)*dd + (-12*B-48*C)*d + (8*B+24*C)*(1<<30); + else + coeff=0.0; + coeff *= fone>>(30+24); + } +/* else if (flags & SWS_X) + { + double p= param ? param*0.01 : 0.3; + coeff = d ? sin(d*PI)/(d*PI) : 1.0; + coeff*= pow(2.0, - p*d*d); + }*/ + else if (flags & SWS_X) + { + double A= param[0] != SWS_PARAM_DEFAULT ? param[0] : 1.0; + double c; + + if (floatd<1.0) + c = cos(floatd*PI); + else + c=-1.0; + if (c<0.0) c= -pow(-c, A); + else c= pow( c, A); + coeff= (c*0.5 + 0.5)*fone; + } + else if (flags & SWS_AREA) + { + int64_t d2= d - (1<<29); + if (d2*xInc < -(1LL<<(29+16))) coeff= 1.0 * (1LL<<(30+16)); + else if (d2*xInc < (1LL<<(29+16))) coeff= -d2*xInc + (1LL<<(29+16)); + else coeff=0.0; + coeff *= fone>>(30+16); + } + else if (flags & SWS_GAUSS) + { + double p= param[0] != SWS_PARAM_DEFAULT ? param[0] : 3.0; + coeff = (pow(2.0, - p*floatd*floatd))*fone; + } + else if (flags & SWS_SINC) + { + coeff = (d ? sin(floatd*PI)/(floatd*PI) : 1.0)*fone; + } + else if (flags & SWS_LANCZOS) + { + double p= param[0] != SWS_PARAM_DEFAULT ? param[0] : 3.0; + coeff = (d ? sin(floatd*PI)*sin(floatd*PI/p)/(floatd*floatd*PI*PI/p) : 1.0)*fone; + if (floatd>p) coeff=0; + } + else if (flags & SWS_BILINEAR) + { + coeff= (1<<30) - d; + if (coeff<0) coeff=0; + coeff *= fone >> 30; + } + else if (flags & SWS_SPLINE) + { + double p=-2.196152422706632; + coeff = getSplineCoeff(1.0, 0.0, p, -p-1.0, floatd) * fone; + } + else { + coeff= 0.0; //GCC warning killer + assert(0); + } + + filter[i*filterSize + j]= coeff; + xx++; + } + xDstInSrc+= 2*xInc; + } + } + + /* apply src & dst Filter to filter -> filter2 + av_free(filter); + */ + assert(filterSize>0); + filter2Size= filterSize; + if (srcFilter) filter2Size+= srcFilter->length - 1; + if (dstFilter) filter2Size+= dstFilter->length - 1; + assert(filter2Size>0); + filter2= av_mallocz(filter2Size*dstW*sizeof(*filter2)); + + for (i=0; i<dstW; i++) + { + int j, k; + + if(srcFilter){ + for (k=0; k<srcFilter->length; k++){ + for (j=0; j<filterSize; j++) + filter2[i*filter2Size + k + j] += srcFilter->coeff[k]*filter[i*filterSize + j]; + } + }else{ + for (j=0; j<filterSize; j++) + filter2[i*filter2Size + j]= filter[i*filterSize + j]; + } + //FIXME dstFilter + + (*filterPos)[i]+= (filterSize-1)/2 - (filter2Size-1)/2; + } + av_freep(&filter); + + /* try to reduce the filter-size (step1 find size and shift left) */ + // Assume it is near normalized (*0.5 or *2.0 is OK but * 0.001 is not). + minFilterSize= 0; + for (i=dstW-1; i>=0; i--) + { + int min= filter2Size; + int j; + int64_t cutOff=0.0; + + /* get rid off near zero elements on the left by shifting left */ + for (j=0; j<filter2Size; j++) + { + int k; + cutOff += FFABS(filter2[i*filter2Size]); + + if (cutOff > SWS_MAX_REDUCE_CUTOFF*fone) break; + + /* preserve monotonicity because the core can't handle the filter otherwise */ + if (i<dstW-1 && (*filterPos)[i] >= (*filterPos)[i+1]) break; + + // move filter coefficients left + for (k=1; k<filter2Size; k++) + filter2[i*filter2Size + k - 1]= filter2[i*filter2Size + k]; + filter2[i*filter2Size + k - 1]= 0; + (*filterPos)[i]++; + } + + cutOff=0; + /* count near zeros on the right */ + for (j=filter2Size-1; j>0; j--) + { + cutOff += FFABS(filter2[i*filter2Size + j]); + + if (cutOff > SWS_MAX_REDUCE_CUTOFF*fone) break; + min--; + } + + if (min>minFilterSize) minFilterSize= min; + } + + if (flags & SWS_CPU_CAPS_ALTIVEC) { + // we can handle the special case 4, + // so we don't want to go to the full 8 + if (minFilterSize < 5) + filterAlign = 4; + + // We really don't want to waste our time + // doing useless computation, so fall back on + // the scalar C code for very small filters. + // Vectorizing is worth it only if you have a + // decent-sized vector. + if (minFilterSize < 3) + filterAlign = 1; + } + + if (flags & SWS_CPU_CAPS_MMX) { + // special case for unscaled vertical filtering + if (minFilterSize == 1 && filterAlign == 2) + filterAlign= 1; + } + + assert(minFilterSize > 0); + filterSize= (minFilterSize +(filterAlign-1)) & (~(filterAlign-1)); + assert(filterSize > 0); + filter= av_malloc(filterSize*dstW*sizeof(*filter)); + if (filterSize >= MAX_FILTER_SIZE*16/((flags&SWS_ACCURATE_RND) ? APCK_SIZE : 16) || !filter) + goto error; + *outFilterSize= filterSize; + + if (flags&SWS_PRINT_INFO) + av_log(NULL, AV_LOG_VERBOSE, "SwScaler: reducing / aligning filtersize %d -> %d\n", filter2Size, filterSize); + /* try to reduce the filter-size (step2 reduce it) */ + for (i=0; i<dstW; i++) + { + int j; + + for (j=0; j<filterSize; j++) + { + if (j>=filter2Size) filter[i*filterSize + j]= 0; + else filter[i*filterSize + j]= filter2[i*filter2Size + j]; + if((flags & SWS_BITEXACT) && j>=minFilterSize) + filter[i*filterSize + j]= 0; + } + } + + + //FIXME try to align filterPos if possible + + //fix borders + for (i=0; i<dstW; i++) + { + int j; + if ((*filterPos)[i] < 0) + { + // move filter coefficients left to compensate for filterPos + for (j=1; j<filterSize; j++) + { + int left= FFMAX(j + (*filterPos)[i], 0); + filter[i*filterSize + left] += filter[i*filterSize + j]; + filter[i*filterSize + j]=0; + } + (*filterPos)[i]= 0; + } + + if ((*filterPos)[i] + filterSize > srcW) + { + int shift= (*filterPos)[i] + filterSize - srcW; + // move filter coefficients right to compensate for filterPos + for (j=filterSize-2; j>=0; j--) + { + int right= FFMIN(j + shift, filterSize-1); + filter[i*filterSize +right] += filter[i*filterSize +j]; + filter[i*filterSize +j]=0; + } + (*filterPos)[i]= srcW - filterSize; + } + } + + // Note the +1 is for the MMX scaler which reads over the end + /* align at 16 for AltiVec (needed by hScale_altivec_real) */ + *outFilter= av_mallocz(*outFilterSize*(dstW+1)*sizeof(int16_t)); + + /* normalize & store in outFilter */ + for (i=0; i<dstW; i++) + { + int j; + int64_t error=0; + int64_t sum=0; + + for (j=0; j<filterSize; j++) + { + sum+= filter[i*filterSize + j]; + } + sum= (sum + one/2)/ one; + for (j=0; j<*outFilterSize; j++) + { + int64_t v= filter[i*filterSize + j] + error; + int intV= ROUNDED_DIV(v, sum); + (*outFilter)[i*(*outFilterSize) + j]= intV; + error= v - intV*sum; + } + } + + (*filterPos)[dstW]= (*filterPos)[dstW-1]; // the MMX scaler will read over the end + for (i=0; i<*outFilterSize; i++) + { + int j= dstW*(*outFilterSize); + (*outFilter)[j + i]= (*outFilter)[j + i - (*outFilterSize)]; + } + + ret=0; +error: + av_free(filter); + av_free(filter2); + return ret; +} + +#ifdef COMPILE_MMX2 +static void initMMX2HScaler(int dstW, int xInc, uint8_t *funnyCode, int16_t *filter, int32_t *filterPos, int numSplits) +{ + uint8_t *fragmentA; + long imm8OfPShufW1A; + long imm8OfPShufW2A; + long fragmentLengthA; + uint8_t *fragmentB; + long imm8OfPShufW1B; + long imm8OfPShufW2B; + long fragmentLengthB; + int fragmentPos; + + int xpos, i; + + // create an optimized horizontal scaling routine + + //code fragment + + __asm__ volatile( + "jmp 9f \n\t" + // Begin + "0: \n\t" + "movq (%%"REG_d", %%"REG_a"), %%mm3 \n\t" + "movd (%%"REG_c", %%"REG_S"), %%mm0 \n\t" + "movd 1(%%"REG_c", %%"REG_S"), %%mm1 \n\t" + "punpcklbw %%mm7, %%mm1 \n\t" + "punpcklbw %%mm7, %%mm0 \n\t" + "pshufw $0xFF, %%mm1, %%mm1 \n\t" + "1: \n\t" + "pshufw $0xFF, %%mm0, %%mm0 \n\t" + "2: \n\t" + "psubw %%mm1, %%mm0 \n\t" + "movl 8(%%"REG_b", %%"REG_a"), %%esi \n\t" + "pmullw %%mm3, %%mm0 \n\t" + "psllw $7, %%mm1 \n\t" + "paddw %%mm1, %%mm0 \n\t" + + "movq %%mm0, (%%"REG_D", %%"REG_a") \n\t" + + "add $8, %%"REG_a" \n\t" + // End + "9: \n\t" +// "int $3 \n\t" + "lea " LOCAL_MANGLE(0b) ", %0 \n\t" + "lea " LOCAL_MANGLE(1b) ", %1 \n\t" + "lea " LOCAL_MANGLE(2b) ", %2 \n\t" + "dec %1 \n\t" + "dec %2 \n\t" + "sub %0, %1 \n\t" + "sub %0, %2 \n\t" + "lea " LOCAL_MANGLE(9b) ", %3 \n\t" + "sub %0, %3 \n\t" + + + :"=r" (fragmentA), "=r" (imm8OfPShufW1A), "=r" (imm8OfPShufW2A), + "=r" (fragmentLengthA) + ); + + __asm__ volatile( + "jmp 9f \n\t" + // Begin + "0: \n\t" + "movq (%%"REG_d", %%"REG_a"), %%mm3 \n\t" + "movd (%%"REG_c", %%"REG_S"), %%mm0 \n\t" + "punpcklbw %%mm7, %%mm0 \n\t" + "pshufw $0xFF, %%mm0, %%mm1 \n\t" + "1: \n\t" + "pshufw $0xFF, %%mm0, %%mm0 \n\t" + "2: \n\t" + "psubw %%mm1, %%mm0 \n\t" + "movl 8(%%"REG_b", %%"REG_a"), %%esi \n\t" + "pmullw %%mm3, %%mm0 \n\t" + "psllw $7, %%mm1 \n\t" + "paddw %%mm1, %%mm0 \n\t" + + "movq %%mm0, (%%"REG_D", %%"REG_a") \n\t" + + "add $8, %%"REG_a" \n\t" + // End + "9: \n\t" +// "int $3 \n\t" + "lea " LOCAL_MANGLE(0b) ", %0 \n\t" + "lea " LOCAL_MANGLE(1b) ", %1 \n\t" + "lea " LOCAL_MANGLE(2b) ", %2 \n\t" + "dec %1 \n\t" + "dec %2 \n\t" + "sub %0, %1 \n\t" + "sub %0, %2 \n\t" + "lea " LOCAL_MANGLE(9b) ", %3 \n\t" + "sub %0, %3 \n\t" + + + :"=r" (fragmentB), "=r" (imm8OfPShufW1B), "=r" (imm8OfPShufW2B), + "=r" (fragmentLengthB) + ); + + xpos= 0; //lumXInc/2 - 0x8000; // difference between pixel centers + fragmentPos=0; + + for (i=0; i<dstW/numSplits; i++) + { + int xx=xpos>>16; + + if ((i&3) == 0) + { + int a=0; + int b=((xpos+xInc)>>16) - xx; + int c=((xpos+xInc*2)>>16) - xx; + int d=((xpos+xInc*3)>>16) - xx; + + filter[i ] = (( xpos & 0xFFFF) ^ 0xFFFF)>>9; + filter[i+1] = (((xpos+xInc ) & 0xFFFF) ^ 0xFFFF)>>9; + filter[i+2] = (((xpos+xInc*2) & 0xFFFF) ^ 0xFFFF)>>9; + filter[i+3] = (((xpos+xInc*3) & 0xFFFF) ^ 0xFFFF)>>9; + filterPos[i/2]= xx; + + if (d+1<4) + { + int maxShift= 3-(d+1); + int shift=0; + + memcpy(funnyCode + fragmentPos, fragmentB, fragmentLengthB); + + funnyCode[fragmentPos + imm8OfPShufW1B]= + (a+1) | ((b+1)<<2) | ((c+1)<<4) | ((d+1)<<6); + funnyCode[fragmentPos + imm8OfPShufW2B]= + a | (b<<2) | (c<<4) | (d<<6); + + if (i+3>=dstW) shift=maxShift; //avoid overread + else if ((filterPos[i/2]&3) <= maxShift) shift=filterPos[i/2]&3; //Align + + if (shift && i>=shift) + { + funnyCode[fragmentPos + imm8OfPShufW1B]+= 0x55*shift; + funnyCode[fragmentPos + imm8OfPShufW2B]+= 0x55*shift; + filterPos[i/2]-=shift; + } + + fragmentPos+= fragmentLengthB; + } + else + { + int maxShift= 3-d; + int shift=0; + + memcpy(funnyCode + fragmentPos, fragmentA, fragmentLengthA); + + funnyCode[fragmentPos + imm8OfPShufW1A]= + funnyCode[fragmentPos + imm8OfPShufW2A]= + a | (b<<2) | (c<<4) | (d<<6); + + if (i+4>=dstW) shift=maxShift; //avoid overread + else if ((filterPos[i/2]&3) <= maxShift) shift=filterPos[i/2]&3; //partial align + + if (shift && i>=shift) + { + funnyCode[fragmentPos + imm8OfPShufW1A]+= 0x55*shift; + funnyCode[fragmentPos + imm8OfPShufW2A]+= 0x55*shift; + filterPos[i/2]-=shift; + } + + fragmentPos+= fragmentLengthA; + } + + funnyCode[fragmentPos]= RET; + } + xpos+=xInc; + } + filterPos[i/2]= xpos>>16; // needed to jump to the next part +} +#endif /* COMPILE_MMX2 */ + +static void globalInit(void){ + // generating tables: + int i; + for (i=0; i<768; i++){ + int c= av_clip_uint8(i-256); + clip_table[i]=c; + } +} + +static SwsFunc getSwsFunc(int flags){ + +#if defined(RUNTIME_CPUDETECT) && CONFIG_GPL +#if ARCH_X86 + // ordered per speed fastest first + if (flags & SWS_CPU_CAPS_MMX2) + return swScale_MMX2; + else if (flags & SWS_CPU_CAPS_3DNOW) + return swScale_3DNow; + else if (flags & SWS_CPU_CAPS_MMX) + return swScale_MMX; + else + return swScale_C; + +#else +#if ARCH_PPC + if (flags & SWS_CPU_CAPS_ALTIVEC) + return swScale_altivec; + else + return swScale_C; +#endif + return swScale_C; +#endif /* ARCH_X86 */ +#else //RUNTIME_CPUDETECT +#if HAVE_MMX2 + return swScale_MMX2; +#elif HAVE_AMD3DNOW + return swScale_3DNow; +#elif HAVE_MMX + return swScale_MMX; +#elif HAVE_ALTIVEC + return swScale_altivec; +#else + return swScale_C; +#endif +#endif //!RUNTIME_CPUDETECT +} + +static int PlanarToNV12Wrapper(SwsContext *c, uint8_t* src[], int srcStride[], int srcSliceY, + int srcSliceH, uint8_t* dstParam[], int dstStride[]){ + uint8_t *dst=dstParam[0] + dstStride[0]*srcSliceY; + /* Copy Y plane */ + if (dstStride[0]==srcStride[0] && srcStride[0] > 0) + memcpy(dst, src[0], srcSliceH*dstStride[0]); + else + { + int i; + uint8_t *srcPtr= src[0]; + uint8_t *dstPtr= dst; + for (i=0; i<srcSliceH; i++) + { + memcpy(dstPtr, srcPtr, c->srcW); + srcPtr+= srcStride[0]; + dstPtr+= dstStride[0]; + } + } + dst = dstParam[1] + dstStride[1]*srcSliceY/2; + if (c->dstFormat == PIX_FMT_NV12) + interleaveBytes(src[1], src[2], dst, c->srcW/2, srcSliceH/2, srcStride[1], srcStride[2], dstStride[0]); + else + interleaveBytes(src[2], src[1], dst, c->srcW/2, srcSliceH/2, srcStride[2], srcStride[1], dstStride[0]); + + return srcSliceH; +} + +static int PlanarToYuy2Wrapper(SwsContext *c, uint8_t* src[], int srcStride[], int srcSliceY, + int srcSliceH, uint8_t* dstParam[], int dstStride[]){ + uint8_t *dst=dstParam[0] + dstStride[0]*srcSliceY; + + yv12toyuy2(src[0], src[1], src[2], dst, c->srcW, srcSliceH, srcStride[0], srcStride[1], dstStride[0]); + + return srcSliceH; +} + +static int PlanarToUyvyWrapper(SwsContext *c, uint8_t* src[], int srcStride[], int srcSliceY, + int srcSliceH, uint8_t* dstParam[], int dstStride[]){ + uint8_t *dst=dstParam[0] + dstStride[0]*srcSliceY; + + yv12touyvy(src[0], src[1], src[2], dst, c->srcW, srcSliceH, srcStride[0], srcStride[1], dstStride[0]); + + return srcSliceH; +} + +static int YUV422PToYuy2Wrapper(SwsContext *c, uint8_t* src[], int srcStride[], int srcSliceY, + int srcSliceH, uint8_t* dstParam[], int dstStride[]){ + uint8_t *dst=dstParam[0] + dstStride[0]*srcSliceY; + + yuv422ptoyuy2(src[0],src[1],src[2],dst,c->srcW,srcSliceH,srcStride[0],srcStride[1],dstStride[0]); + + return srcSliceH; +} + +static int YUV422PToUyvyWrapper(SwsContext *c, uint8_t* src[], int srcStride[], int srcSliceY, + int srcSliceH, uint8_t* dstParam[], int dstStride[]){ + uint8_t *dst=dstParam[0] + dstStride[0]*srcSliceY; + + yuv422ptouyvy(src[0],src[1],src[2],dst,c->srcW,srcSliceH,srcStride[0],srcStride[1],dstStride[0]); + + return srcSliceH; +} + +static int pal2rgbWrapper(SwsContext *c, uint8_t* src[], int srcStride[], int srcSliceY, + int srcSliceH, uint8_t* dst[], int dstStride[]){ + const enum PixelFormat srcFormat= c->srcFormat; + const enum PixelFormat dstFormat= c->dstFormat; + void (*conv)(const uint8_t *src, uint8_t *dst, long num_pixels, + const uint8_t *palette)=NULL; + int i; + uint8_t *dstPtr= dst[0] + dstStride[0]*srcSliceY; + uint8_t *srcPtr= src[0]; + + if (!usePal(srcFormat)) + av_log(c, AV_LOG_ERROR, "internal error %s -> %s converter\n", + sws_format_name(srcFormat), sws_format_name(dstFormat)); + + switch(dstFormat){ + case PIX_FMT_RGB32 : conv = palette8topacked32; break; + case PIX_FMT_BGR32 : conv = palette8topacked32; break; + case PIX_FMT_BGR32_1: conv = palette8topacked32; break; + case PIX_FMT_RGB32_1: conv = palette8topacked32; break; + case PIX_FMT_RGB24 : conv = palette8topacked24; break; + case PIX_FMT_BGR24 : conv = palette8topacked24; break; + default: av_log(c, AV_LOG_ERROR, "internal error %s -> %s converter\n", + sws_format_name(srcFormat), sws_format_name(dstFormat)); break; + } + + + for (i=0; i<srcSliceH; i++) { + conv(srcPtr, dstPtr, c->srcW, (uint8_t *) c->pal_rgb); + srcPtr+= srcStride[0]; + dstPtr+= dstStride[0]; + } + + return srcSliceH; +} + +/* {RGB,BGR}{15,16,24,32,32_1} -> {RGB,BGR}{15,16,24,32} */ +static int rgb2rgbWrapper(SwsContext *c, uint8_t* src[], int srcStride[], int srcSliceY, + int srcSliceH, uint8_t* dst[], int dstStride[]){ + const enum PixelFormat srcFormat= c->srcFormat; + const enum PixelFormat dstFormat= c->dstFormat; + const int srcBpp= (fmt_depth(srcFormat) + 7) >> 3; + const int dstBpp= (fmt_depth(dstFormat) + 7) >> 3; + const int srcId= fmt_depth(srcFormat) >> 2; /* 1:0, 4:1, 8:2, 15:3, 16:4, 24:6, 32:8 */ + const int dstId= fmt_depth(dstFormat) >> 2; + void (*conv)(const uint8_t *src, uint8_t *dst, long src_size)=NULL; + + /* BGR -> BGR */ + if ( (isBGR(srcFormat) && isBGR(dstFormat)) + || (isRGB(srcFormat) && isRGB(dstFormat))){ + switch(srcId | (dstId<<4)){ + case 0x34: conv= rgb16to15; break; + case 0x36: conv= rgb24to15; break; + case 0x38: conv= rgb32to15; break; + case 0x43: conv= rgb15to16; break; + case 0x46: conv= rgb24to16; break; + case 0x48: conv= rgb32to16; break; + case 0x63: conv= rgb15to24; break; + case 0x64: conv= rgb16to24; break; + case 0x68: conv= rgb32to24; break; + case 0x83: conv= rgb15to32; break; + case 0x84: conv= rgb16to32; break; + case 0x86: conv= rgb24to32; break; + default: av_log(c, AV_LOG_ERROR, "internal error %s -> %s converter\n", + sws_format_name(srcFormat), sws_format_name(dstFormat)); break; + } + }else if ( (isBGR(srcFormat) && isRGB(dstFormat)) + || (isRGB(srcFormat) && isBGR(dstFormat))){ + switch(srcId | (dstId<<4)){ + case 0x33: conv= rgb15tobgr15; break; + case 0x34: conv= rgb16tobgr15; break; + case 0x36: conv= rgb24tobgr15; break; + case 0x38: conv= rgb32tobgr15; break; + case 0x43: conv= rgb15tobgr16; break; + case 0x44: conv= rgb16tobgr16; break; + case 0x46: conv= rgb24tobgr16; break; + case 0x48: conv= rgb32tobgr16; break; + case 0x63: conv= rgb15tobgr24; break; + case 0x64: conv= rgb16tobgr24; break; + case 0x66: conv= rgb24tobgr24; break; + case 0x68: conv= rgb32tobgr24; break; + case 0x83: conv= rgb15tobgr32; break; + case 0x84: conv= rgb16tobgr32; break; + case 0x86: conv= rgb24tobgr32; break; + case 0x88: conv= rgb32tobgr32; break; + default: av_log(c, AV_LOG_ERROR, "internal error %s -> %s converter\n", + sws_format_name(srcFormat), sws_format_name(dstFormat)); break; + } + }else{ + av_log(c, AV_LOG_ERROR, "internal error %s -> %s converter\n", + sws_format_name(srcFormat), sws_format_name(dstFormat)); + } + + if(conv) + { + uint8_t *srcPtr= src[0]; + if(srcFormat == PIX_FMT_RGB32_1 || srcFormat == PIX_FMT_BGR32_1) + srcPtr += ALT32_CORR; + + if (dstStride[0]*srcBpp == srcStride[0]*dstBpp && srcStride[0] > 0) + conv(srcPtr, dst[0] + dstStride[0]*srcSliceY, srcSliceH*srcStride[0]); + else + { + int i; + uint8_t *dstPtr= dst[0] + dstStride[0]*srcSliceY; + + for (i=0; i<srcSliceH; i++) + { + conv(srcPtr, dstPtr, c->srcW*srcBpp); + srcPtr+= srcStride[0]; + dstPtr+= dstStride[0]; + } + } + } + return srcSliceH; +} + +static int bgr24toyv12Wrapper(SwsContext *c, uint8_t* src[], int srcStride[], int srcSliceY, + int srcSliceH, uint8_t* dst[], int dstStride[]){ + + rgb24toyv12( + src[0], + dst[0]+ srcSliceY *dstStride[0], + dst[1]+(srcSliceY>>1)*dstStride[1], + dst[2]+(srcSliceY>>1)*dstStride[2], + c->srcW, srcSliceH, + dstStride[0], dstStride[1], srcStride[0]); + return srcSliceH; +} + +static int yvu9toyv12Wrapper(SwsContext *c, uint8_t* src[], int srcStride[], int srcSliceY, + int srcSliceH, uint8_t* dst[], int dstStride[]){ + int i; + + /* copy Y */ + if (srcStride[0]==dstStride[0] && srcStride[0] > 0) + memcpy(dst[0]+ srcSliceY*dstStride[0], src[0], srcStride[0]*srcSliceH); + else{ + uint8_t *srcPtr= src[0]; + uint8_t *dstPtr= dst[0] + dstStride[0]*srcSliceY; + + for (i=0; i<srcSliceH; i++) + { + memcpy(dstPtr, srcPtr, c->srcW); + srcPtr+= srcStride[0]; + dstPtr+= dstStride[0]; + } + } + + if (c->dstFormat==PIX_FMT_YUV420P){ + planar2x(src[1], dst[1], c->chrSrcW, c->chrSrcH, srcStride[1], dstStride[1]); + planar2x(src[2], dst[2], c->chrSrcW, c->chrSrcH, srcStride[2], dstStride[2]); + }else{ + planar2x(src[1], dst[2], c->chrSrcW, c->chrSrcH, srcStride[1], dstStride[2]); + planar2x(src[2], dst[1], c->chrSrcW, c->chrSrcH, srcStride[2], dstStride[1]); + } + return srcSliceH; +} + +/* unscaled copy like stuff (assumes nearly identical formats) */ +static int packedCopy(SwsContext *c, uint8_t* src[], int srcStride[], int srcSliceY, + int srcSliceH, uint8_t* dst[], int dstStride[]) +{ + if (dstStride[0]==srcStride[0] && srcStride[0] > 0) + memcpy(dst[0] + dstStride[0]*srcSliceY, src[0], srcSliceH*dstStride[0]); + else + { + int i; + uint8_t *srcPtr= src[0]; + uint8_t *dstPtr= dst[0] + dstStride[0]*srcSliceY; + int length=0; + + /* universal length finder */ + while(length+c->srcW <= FFABS(dstStride[0]) + && length+c->srcW <= FFABS(srcStride[0])) length+= c->srcW; + assert(length!=0); + + for (i=0; i<srcSliceH; i++) + { + memcpy(dstPtr, srcPtr, length); + srcPtr+= srcStride[0]; + dstPtr+= dstStride[0]; + } + } + return srcSliceH; +} + +static int planarCopy(SwsContext *c, uint8_t* src[], int srcStride[], int srcSliceY, + int srcSliceH, uint8_t* dst[], int dstStride[]) +{ + int plane; + for (plane=0; plane<3; plane++) + { + int length= plane==0 ? c->srcW : -((-c->srcW )>>c->chrDstHSubSample); + int y= plane==0 ? srcSliceY: -((-srcSliceY)>>c->chrDstVSubSample); + int height= plane==0 ? srcSliceH: -((-srcSliceH)>>c->chrDstVSubSample); + + if ((isGray(c->srcFormat) || isGray(c->dstFormat)) && plane>0) + { + if (!isGray(c->dstFormat)) + memset(dst[plane], 128, dstStride[plane]*height); + } + else + { + if (dstStride[plane]==srcStride[plane] && srcStride[plane] > 0) + memcpy(dst[plane] + dstStride[plane]*y, src[plane], height*dstStride[plane]); + else + { + int i; + uint8_t *srcPtr= src[plane]; + uint8_t *dstPtr= dst[plane] + dstStride[plane]*y; + for (i=0; i<height; i++) + { + memcpy(dstPtr, srcPtr, length); + srcPtr+= srcStride[plane]; + dstPtr+= dstStride[plane]; + } + } + } + } + return srcSliceH; +} + +static int gray16togray(SwsContext *c, uint8_t* src[], int srcStride[], int srcSliceY, + int srcSliceH, uint8_t* dst[], int dstStride[]){ + + int length= c->srcW; + int y= srcSliceY; + int height= srcSliceH; + int i, j; + uint8_t *srcPtr= src[0]; + uint8_t *dstPtr= dst[0] + dstStride[0]*y; + + if (!isGray(c->dstFormat)){ + int height= -((-srcSliceH)>>c->chrDstVSubSample); + memset(dst[1], 128, dstStride[1]*height); + memset(dst[2], 128, dstStride[2]*height); + } + if (c->srcFormat == PIX_FMT_GRAY16LE) srcPtr++; + for (i=0; i<height; i++) + { + for (j=0; j<length; j++) dstPtr[j] = srcPtr[j<<1]; + srcPtr+= srcStride[0]; + dstPtr+= dstStride[0]; + } + return srcSliceH; +} + +static int graytogray16(SwsContext *c, uint8_t* src[], int srcStride[], int srcSliceY, + int srcSliceH, uint8_t* dst[], int dstStride[]){ + + int length= c->srcW; + int y= srcSliceY; + int height= srcSliceH; + int i, j; + uint8_t *srcPtr= src[0]; + uint8_t *dstPtr= dst[0] + dstStride[0]*y; + for (i=0; i<height; i++) + { + for (j=0; j<length; j++) + { + dstPtr[j<<1] = srcPtr[j]; + dstPtr[(j<<1)+1] = srcPtr[j]; + } + srcPtr+= srcStride[0]; + dstPtr+= dstStride[0]; + } + return srcSliceH; +} + +static int gray16swap(SwsContext *c, uint8_t* src[], int srcStride[], int srcSliceY, + int srcSliceH, uint8_t* dst[], int dstStride[]){ + + int length= c->srcW; + int y= srcSliceY; + int height= srcSliceH; + int i, j; + uint16_t *srcPtr= (uint16_t*)src[0]; + uint16_t *dstPtr= (uint16_t*)(dst[0] + dstStride[0]*y/2); + for (i=0; i<height; i++) + { + for (j=0; j<length; j++) dstPtr[j] = bswap_16(srcPtr[j]); + srcPtr+= srcStride[0]/2; + dstPtr+= dstStride[0]/2; + } + return srcSliceH; +} + + +static void getSubSampleFactors(int *h, int *v, int format){ + switch(format){ + case PIX_FMT_UYVY422: + case PIX_FMT_YUYV422: + *h=1; + *v=0; + break; + case PIX_FMT_YUV420P: + case PIX_FMT_YUVA420P: + case PIX_FMT_GRAY16BE: + case PIX_FMT_GRAY16LE: + case PIX_FMT_GRAY8: //FIXME remove after different subsamplings are fully implemented + case PIX_FMT_NV12: + case PIX_FMT_NV21: + *h=1; + *v=1; + break; + case PIX_FMT_YUV440P: + *h=0; + *v=1; + break; + case PIX_FMT_YUV410P: + *h=2; + *v=2; + break; + case PIX_FMT_YUV444P: + *h=0; + *v=0; + break; + case PIX_FMT_YUV422P: + *h=1; + *v=0; + break; + case PIX_FMT_YUV411P: + *h=2; + *v=0; + break; + default: + *h=0; + *v=0; + break; + } +} + +static uint16_t roundToInt16(int64_t f){ + int r= (f + (1<<15))>>16; + if (r<-0x7FFF) return 0x8000; + else if (r> 0x7FFF) return 0x7FFF; + else return r; +} + +/** + * @param inv_table the yuv2rgb coefficients, normally ff_yuv2rgb_coeffs[x] + * @param fullRange if 1 then the luma range is 0..255 if 0 it is 16..235 + * @return -1 if not supported + */ +int sws_setColorspaceDetails(SwsContext *c, const int inv_table[4], int srcRange, const int table[4], int dstRange, int brightness, int contrast, int saturation){ + int64_t crv = inv_table[0]; + int64_t cbu = inv_table[1]; + int64_t cgu = -inv_table[2]; + int64_t cgv = -inv_table[3]; + int64_t cy = 1<<16; + int64_t oy = 0; + + memcpy(c->srcColorspaceTable, inv_table, sizeof(int)*4); + memcpy(c->dstColorspaceTable, table, sizeof(int)*4); + + c->brightness= brightness; + c->contrast = contrast; + c->saturation= saturation; + c->srcRange = srcRange; + c->dstRange = dstRange; + if (isYUV(c->dstFormat) || isGray(c->dstFormat)) return 0; + + c->uOffset= 0x0400040004000400LL; + c->vOffset= 0x0400040004000400LL; + + if (!srcRange){ + cy= (cy*255) / 219; + oy= 16<<16; + }else{ + crv= (crv*224) / 255; + cbu= (cbu*224) / 255; + cgu= (cgu*224) / 255; + cgv= (cgv*224) / 255; + } + + cy = (cy *contrast )>>16; + crv= (crv*contrast * saturation)>>32; + cbu= (cbu*contrast * saturation)>>32; + cgu= (cgu*contrast * saturation)>>32; + cgv= (cgv*contrast * saturation)>>32; + + oy -= 256*brightness; + + c->yCoeff= roundToInt16(cy *8192) * 0x0001000100010001ULL; + c->vrCoeff= roundToInt16(crv*8192) * 0x0001000100010001ULL; + c->ubCoeff= roundToInt16(cbu*8192) * 0x0001000100010001ULL; + c->vgCoeff= roundToInt16(cgv*8192) * 0x0001000100010001ULL; + c->ugCoeff= roundToInt16(cgu*8192) * 0x0001000100010001ULL; + c->yOffset= roundToInt16(oy * 8) * 0x0001000100010001ULL; + + c->yuv2rgb_y_coeff = (int16_t)roundToInt16(cy <<13); + c->yuv2rgb_y_offset = (int16_t)roundToInt16(oy << 9); + c->yuv2rgb_v2r_coeff= (int16_t)roundToInt16(crv<<13); + c->yuv2rgb_v2g_coeff= (int16_t)roundToInt16(cgv<<13); + c->yuv2rgb_u2g_coeff= (int16_t)roundToInt16(cgu<<13); + c->yuv2rgb_u2b_coeff= (int16_t)roundToInt16(cbu<<13); + + sws_yuv2rgb_c_init_tables(c, inv_table, srcRange, brightness, contrast, saturation); + //FIXME factorize + +#ifdef COMPILE_ALTIVEC + if (c->flags & SWS_CPU_CAPS_ALTIVEC) + sws_yuv2rgb_altivec_init_tables (c, inv_table, brightness, contrast, saturation); +#endif + return 0; +} + +/** + * @return -1 if not supported + */ +int sws_getColorspaceDetails(SwsContext *c, int **inv_table, int *srcRange, int **table, int *dstRange, int *brightness, int *contrast, int *saturation){ + if (isYUV(c->dstFormat) || isGray(c->dstFormat)) return -1; + + *inv_table = c->srcColorspaceTable; + *table = c->dstColorspaceTable; + *srcRange = c->srcRange; + *dstRange = c->dstRange; + *brightness= c->brightness; + *contrast = c->contrast; + *saturation= c->saturation; + + return 0; +} + +static int handle_jpeg(enum PixelFormat *format) +{ + switch (*format) { + case PIX_FMT_YUVJ420P: + *format = PIX_FMT_YUV420P; + return 1; + case PIX_FMT_YUVJ422P: + *format = PIX_FMT_YUV422P; + return 1; + case PIX_FMT_YUVJ444P: + *format = PIX_FMT_YUV444P; + return 1; + case PIX_FMT_YUVJ440P: + *format = PIX_FMT_YUV440P; + return 1; + default: + return 0; + } +} + +SwsContext *sws_getContext(int srcW, int srcH, enum PixelFormat srcFormat, int dstW, int dstH, enum PixelFormat dstFormat, int flags, + SwsFilter *srcFilter, SwsFilter *dstFilter, double *param){ + + SwsContext *c; + int i; + int usesVFilter, usesHFilter; + int unscaled, needsDither; + int srcRange, dstRange; + SwsFilter dummyFilter= {NULL, NULL, NULL, NULL}; +#if ARCH_X86 + if (flags & SWS_CPU_CAPS_MMX) + __asm__ volatile("emms\n\t"::: "memory"); +#endif + +#if !defined(RUNTIME_CPUDETECT) || !CONFIG_GPL //ensure that the flags match the compiled variant if cpudetect is off + flags &= ~(SWS_CPU_CAPS_MMX|SWS_CPU_CAPS_MMX2|SWS_CPU_CAPS_3DNOW|SWS_CPU_CAPS_ALTIVEC|SWS_CPU_CAPS_BFIN); +#if HAVE_MMX2 + flags |= SWS_CPU_CAPS_MMX|SWS_CPU_CAPS_MMX2; +#elif HAVE_AMD3DNOW + flags |= SWS_CPU_CAPS_MMX|SWS_CPU_CAPS_3DNOW; +#elif HAVE_MMX + flags |= SWS_CPU_CAPS_MMX; +#elif HAVE_ALTIVEC + flags |= SWS_CPU_CAPS_ALTIVEC; +#elif ARCH_BFIN + flags |= SWS_CPU_CAPS_BFIN; +#endif +#endif /* RUNTIME_CPUDETECT */ + if (clip_table[512] != 255) globalInit(); + if (!rgb15to16) sws_rgb2rgb_init(flags); + + unscaled = (srcW == dstW && srcH == dstH); + needsDither= (isBGR(dstFormat) || isRGB(dstFormat)) + && (fmt_depth(dstFormat))<24 + && ((fmt_depth(dstFormat))<(fmt_depth(srcFormat)) || (!(isRGB(srcFormat) || isBGR(srcFormat)))); + + srcRange = handle_jpeg(&srcFormat); + dstRange = handle_jpeg(&dstFormat); + + if (!isSupportedIn(srcFormat)) + { + av_log(NULL, AV_LOG_ERROR, "swScaler: %s is not supported as input pixel format\n", sws_format_name(srcFormat)); + return NULL; + } + if (!isSupportedOut(dstFormat)) + { + av_log(NULL, AV_LOG_ERROR, "swScaler: %s is not supported as output pixel format\n", sws_format_name(dstFormat)); + return NULL; + } + + i= flags & ( SWS_POINT + |SWS_AREA + |SWS_BILINEAR + |SWS_FAST_BILINEAR + |SWS_BICUBIC + |SWS_X + |SWS_GAUSS + |SWS_LANCZOS + |SWS_SINC + |SWS_SPLINE + |SWS_BICUBLIN); + if(!i || (i & (i-1))) + { + av_log(NULL, AV_LOG_ERROR, "swScaler: Exactly one scaler algorithm must be chosen\n"); + return NULL; + } + + /* sanity check */ + if (srcW<4 || srcH<1 || dstW<8 || dstH<1) //FIXME check if these are enough and try to lowwer them after fixing the relevant parts of the code + { + av_log(NULL, AV_LOG_ERROR, "swScaler: %dx%d -> %dx%d is invalid scaling dimension\n", + srcW, srcH, dstW, dstH); + return NULL; + } + if(srcW > VOFW || dstW > VOFW){ + av_log(NULL, AV_LOG_ERROR, "swScaler: Compile-time maximum width is "AV_STRINGIFY(VOFW)" change VOF/VOFW and recompile\n"); + return NULL; + } + + if (!dstFilter) dstFilter= &dummyFilter; + if (!srcFilter) srcFilter= &dummyFilter; + + c= av_mallocz(sizeof(SwsContext)); + + c->av_class = &sws_context_class; + c->srcW= srcW; + c->srcH= srcH; + c->dstW= dstW; + c->dstH= dstH; + c->lumXInc= ((srcW<<16) + (dstW>>1))/dstW; + c->lumYInc= ((srcH<<16) + (dstH>>1))/dstH; + c->flags= flags; + c->dstFormat= dstFormat; + c->srcFormat= srcFormat; + c->vRounder= 4* 0x0001000100010001ULL; + + usesHFilter= usesVFilter= 0; + if (dstFilter->lumV && dstFilter->lumV->length>1) usesVFilter=1; + if (dstFilter->lumH && dstFilter->lumH->length>1) usesHFilter=1; + if (dstFilter->chrV && dstFilter->chrV->length>1) usesVFilter=1; + if (dstFilter->chrH && dstFilter->chrH->length>1) usesHFilter=1; + if (srcFilter->lumV && srcFilter->lumV->length>1) usesVFilter=1; + if (srcFilter->lumH && srcFilter->lumH->length>1) usesHFilter=1; + if (srcFilter->chrV && srcFilter->chrV->length>1) usesVFilter=1; + if (srcFilter->chrH && srcFilter->chrH->length>1) usesHFilter=1; + + getSubSampleFactors(&c->chrSrcHSubSample, &c->chrSrcVSubSample, srcFormat); + getSubSampleFactors(&c->chrDstHSubSample, &c->chrDstVSubSample, dstFormat); + + // reuse chroma for 2 pixels RGB/BGR unless user wants full chroma interpolation + if ((isBGR(dstFormat) || isRGB(dstFormat)) && !(flags&SWS_FULL_CHR_H_INT)) c->chrDstHSubSample=1; + + // drop some chroma lines if the user wants it + c->vChrDrop= (flags&SWS_SRC_V_CHR_DROP_MASK)>>SWS_SRC_V_CHR_DROP_SHIFT; + c->chrSrcVSubSample+= c->vChrDrop; + + // drop every other pixel for chroma calculation unless user wants full chroma + if ((isBGR(srcFormat) || isRGB(srcFormat)) && !(flags&SWS_FULL_CHR_H_INP) + && srcFormat!=PIX_FMT_RGB8 && srcFormat!=PIX_FMT_BGR8 + && srcFormat!=PIX_FMT_RGB4 && srcFormat!=PIX_FMT_BGR4 + && srcFormat!=PIX_FMT_RGB4_BYTE && srcFormat!=PIX_FMT_BGR4_BYTE + && ((dstW>>c->chrDstHSubSample) <= (srcW>>1) || (flags&(SWS_FAST_BILINEAR|SWS_POINT)))) + c->chrSrcHSubSample=1; + + if (param){ + c->param[0] = param[0]; + c->param[1] = param[1]; + }else{ + c->param[0] = + c->param[1] = SWS_PARAM_DEFAULT; + } + + c->chrIntHSubSample= c->chrDstHSubSample; + c->chrIntVSubSample= c->chrSrcVSubSample; + + // Note the -((-x)>>y) is so that we always round toward +inf. + c->chrSrcW= -((-srcW) >> c->chrSrcHSubSample); + c->chrSrcH= -((-srcH) >> c->chrSrcVSubSample); + c->chrDstW= -((-dstW) >> c->chrDstHSubSample); + c->chrDstH= -((-dstH) >> c->chrDstVSubSample); + + sws_setColorspaceDetails(c, ff_yuv2rgb_coeffs[SWS_CS_DEFAULT], srcRange, ff_yuv2rgb_coeffs[SWS_CS_DEFAULT] /* FIXME*/, dstRange, 0, 1<<16, 1<<16); + + /* unscaled special cases */ + if (unscaled && !usesHFilter && !usesVFilter && (srcRange == dstRange || isBGR(dstFormat) || isRGB(dstFormat))) + { + /* yv12_to_nv12 */ + if ((srcFormat == PIX_FMT_YUV420P || srcFormat == PIX_FMT_YUVA420P) && (dstFormat == PIX_FMT_NV12 || dstFormat == PIX_FMT_NV21)) + { + c->swScale= PlanarToNV12Wrapper; + } + /* yuv2bgr */ + if ((srcFormat==PIX_FMT_YUV420P || srcFormat==PIX_FMT_YUV422P || srcFormat==PIX_FMT_YUVA420P) && (isBGR(dstFormat) || isRGB(dstFormat)) + && !(flags & SWS_ACCURATE_RND) && !(dstH&1)) + { + c->swScale= sws_yuv2rgb_get_func_ptr(c); + } + + if (srcFormat==PIX_FMT_YUV410P && dstFormat==PIX_FMT_YUV420P && !(flags & SWS_BITEXACT)) + { + c->swScale= yvu9toyv12Wrapper; + } + + /* bgr24toYV12 */ + if (srcFormat==PIX_FMT_BGR24 && dstFormat==PIX_FMT_YUV420P && !(flags & SWS_ACCURATE_RND)) + c->swScale= bgr24toyv12Wrapper; + + /* RGB/BGR -> RGB/BGR (no dither needed forms) */ + if ( (isBGR(srcFormat) || isRGB(srcFormat)) + && (isBGR(dstFormat) || isRGB(dstFormat)) + && srcFormat != PIX_FMT_BGR8 && dstFormat != PIX_FMT_BGR8 + && srcFormat != PIX_FMT_RGB8 && dstFormat != PIX_FMT_RGB8 + && srcFormat != PIX_FMT_BGR4 && dstFormat != PIX_FMT_BGR4 + && srcFormat != PIX_FMT_RGB4 && dstFormat != PIX_FMT_RGB4 + && srcFormat != PIX_FMT_BGR4_BYTE && dstFormat != PIX_FMT_BGR4_BYTE + && srcFormat != PIX_FMT_RGB4_BYTE && dstFormat != PIX_FMT_RGB4_BYTE + && srcFormat != PIX_FMT_MONOBLACK && dstFormat != PIX_FMT_MONOBLACK + && srcFormat != PIX_FMT_MONOWHITE && dstFormat != PIX_FMT_MONOWHITE + && dstFormat != PIX_FMT_RGB32_1 + && dstFormat != PIX_FMT_BGR32_1 + && (!needsDither || (c->flags&(SWS_FAST_BILINEAR|SWS_POINT)))) + c->swScale= rgb2rgbWrapper; + + if ((usePal(srcFormat) && ( + dstFormat == PIX_FMT_RGB32 || + dstFormat == PIX_FMT_RGB32_1 || + dstFormat == PIX_FMT_RGB24 || + dstFormat == PIX_FMT_BGR32 || + dstFormat == PIX_FMT_BGR32_1 || + dstFormat == PIX_FMT_BGR24))) + c->swScale= pal2rgbWrapper; + + if (srcFormat == PIX_FMT_YUV422P) + { + if (dstFormat == PIX_FMT_YUYV422) + c->swScale= YUV422PToYuy2Wrapper; + else if (dstFormat == PIX_FMT_UYVY422) + c->swScale= YUV422PToUyvyWrapper; + } + + /* LQ converters if -sws 0 or -sws 4*/ + if (c->flags&(SWS_FAST_BILINEAR|SWS_POINT)){ + /* yv12_to_yuy2 */ + if (srcFormat == PIX_FMT_YUV420P || srcFormat == PIX_FMT_YUVA420P) + { + if (dstFormat == PIX_FMT_YUYV422) + c->swScale= PlanarToYuy2Wrapper; + else if (dstFormat == PIX_FMT_UYVY422) + c->swScale= PlanarToUyvyWrapper; + } + } + +#ifdef COMPILE_ALTIVEC + if ((c->flags & SWS_CPU_CAPS_ALTIVEC) && + !(c->flags & SWS_BITEXACT) && + srcFormat == PIX_FMT_YUV420P) { + // unscaled YV12 -> packed YUV, we want speed + if (dstFormat == PIX_FMT_YUYV422) + c->swScale= yv12toyuy2_unscaled_altivec; + else if (dstFormat == PIX_FMT_UYVY422) + c->swScale= yv12touyvy_unscaled_altivec; + } +#endif + + /* simple copy */ + if ( srcFormat == dstFormat + || (srcFormat == PIX_FMT_YUVA420P && dstFormat == PIX_FMT_YUV420P) + || (isPlanarYUV(srcFormat) && isGray(dstFormat)) + || (isPlanarYUV(dstFormat) && isGray(srcFormat))) + { + if (isPacked(c->srcFormat)) + c->swScale= packedCopy; + else /* Planar YUV or gray */ + c->swScale= planarCopy; + } + + /* gray16{le,be} conversions */ + if (isGray16(srcFormat) && (isPlanarYUV(dstFormat) || (dstFormat == PIX_FMT_GRAY8))) + { + c->swScale= gray16togray; + } + if ((isPlanarYUV(srcFormat) || (srcFormat == PIX_FMT_GRAY8)) && isGray16(dstFormat)) + { + c->swScale= graytogray16; + } + if (srcFormat != dstFormat && isGray16(srcFormat) && isGray16(dstFormat)) + { + c->swScale= gray16swap; + } + +#if ARCH_BFIN + if (flags & SWS_CPU_CAPS_BFIN) + ff_bfin_get_unscaled_swscale (c); +#endif + + if (c->swScale){ + if (flags&SWS_PRINT_INFO) + av_log(c, AV_LOG_INFO, "using unscaled %s -> %s special converter\n", + sws_format_name(srcFormat), sws_format_name(dstFormat)); + return c; + } + } + + if (flags & SWS_CPU_CAPS_MMX2) + { + c->canMMX2BeUsed= (dstW >=srcW && (dstW&31)==0 && (srcW&15)==0) ? 1 : 0; + if (!c->canMMX2BeUsed && dstW >=srcW && (srcW&15)==0 && (flags&SWS_FAST_BILINEAR)) + { + if (flags&SWS_PRINT_INFO) + av_log(c, AV_LOG_INFO, "output width is not a multiple of 32 -> no MMX2 scaler\n"); + } + if (usesHFilter) c->canMMX2BeUsed=0; + } + else + c->canMMX2BeUsed=0; + + c->chrXInc= ((c->chrSrcW<<16) + (c->chrDstW>>1))/c->chrDstW; + c->chrYInc= ((c->chrSrcH<<16) + (c->chrDstH>>1))/c->chrDstH; + + // match pixel 0 of the src to pixel 0 of dst and match pixel n-2 of src to pixel n-2 of dst + // but only for the FAST_BILINEAR mode otherwise do correct scaling + // n-2 is the last chrominance sample available + // this is not perfect, but no one should notice the difference, the more correct variant + // would be like the vertical one, but that would require some special code for the + // first and last pixel + if (flags&SWS_FAST_BILINEAR) + { + if (c->canMMX2BeUsed) + { + c->lumXInc+= 20; + c->chrXInc+= 20; + } + //we don't use the x86 asm scaler if MMX is available + else if (flags & SWS_CPU_CAPS_MMX) + { + c->lumXInc = ((srcW-2)<<16)/(dstW-2) - 20; + c->chrXInc = ((c->chrSrcW-2)<<16)/(c->chrDstW-2) - 20; + } + } + + /* precalculate horizontal scaler filter coefficients */ + { + const int filterAlign= + (flags & SWS_CPU_CAPS_MMX) ? 4 : + (flags & SWS_CPU_CAPS_ALTIVEC) ? 8 : + 1; + + initFilter(&c->hLumFilter, &c->hLumFilterPos, &c->hLumFilterSize, c->lumXInc, + srcW , dstW, filterAlign, 1<<14, + (flags&SWS_BICUBLIN) ? (flags|SWS_BICUBIC) : flags, + srcFilter->lumH, dstFilter->lumH, c->param); + initFilter(&c->hChrFilter, &c->hChrFilterPos, &c->hChrFilterSize, c->chrXInc, + c->chrSrcW, c->chrDstW, filterAlign, 1<<14, + (flags&SWS_BICUBLIN) ? (flags|SWS_BILINEAR) : flags, + srcFilter->chrH, dstFilter->chrH, c->param); + +#define MAX_FUNNY_CODE_SIZE 10000 +#if defined(COMPILE_MMX2) +// can't downscale !!! + if (c->canMMX2BeUsed && (flags & SWS_FAST_BILINEAR)) + { +#ifdef MAP_ANONYMOUS + c->funnyYCode = (uint8_t*)mmap(NULL, MAX_FUNNY_CODE_SIZE, PROT_EXEC | PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, 0, 0); + c->funnyUVCode = (uint8_t*)mmap(NULL, MAX_FUNNY_CODE_SIZE, PROT_EXEC | PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, 0, 0); +#else + c->funnyYCode = av_malloc(MAX_FUNNY_CODE_SIZE); + c->funnyUVCode = av_malloc(MAX_FUNNY_CODE_SIZE); +#endif + + c->lumMmx2Filter = av_malloc((dstW /8+8)*sizeof(int16_t)); + c->chrMmx2Filter = av_malloc((c->chrDstW /4+8)*sizeof(int16_t)); + c->lumMmx2FilterPos= av_malloc((dstW /2/8+8)*sizeof(int32_t)); + c->chrMmx2FilterPos= av_malloc((c->chrDstW/2/4+8)*sizeof(int32_t)); + + initMMX2HScaler( dstW, c->lumXInc, c->funnyYCode , c->lumMmx2Filter, c->lumMmx2FilterPos, 8); + initMMX2HScaler(c->chrDstW, c->chrXInc, c->funnyUVCode, c->chrMmx2Filter, c->chrMmx2FilterPos, 4); + } +#endif /* defined(COMPILE_MMX2) */ + } // initialize horizontal stuff + + + + /* precalculate vertical scaler filter coefficients */ + { + const int filterAlign= + (flags & SWS_CPU_CAPS_MMX) && (flags & SWS_ACCURATE_RND) ? 2 : + (flags & SWS_CPU_CAPS_ALTIVEC) ? 8 : + 1; + + initFilter(&c->vLumFilter, &c->vLumFilterPos, &c->vLumFilterSize, c->lumYInc, + srcH , dstH, filterAlign, (1<<12), + (flags&SWS_BICUBLIN) ? (flags|SWS_BICUBIC) : flags, + srcFilter->lumV, dstFilter->lumV, c->param); + initFilter(&c->vChrFilter, &c->vChrFilterPos, &c->vChrFilterSize, c->chrYInc, + c->chrSrcH, c->chrDstH, filterAlign, (1<<12), + (flags&SWS_BICUBLIN) ? (flags|SWS_BILINEAR) : flags, + srcFilter->chrV, dstFilter->chrV, c->param); + +#if HAVE_ALTIVEC + c->vYCoeffsBank = av_malloc(sizeof (vector signed short)*c->vLumFilterSize*c->dstH); + c->vCCoeffsBank = av_malloc(sizeof (vector signed short)*c->vChrFilterSize*c->chrDstH); + + for (i=0;i<c->vLumFilterSize*c->dstH;i++) { + int j; + short *p = (short *)&c->vYCoeffsBank[i]; + for (j=0;j<8;j++) + p[j] = c->vLumFilter[i]; + } + + for (i=0;i<c->vChrFilterSize*c->chrDstH;i++) { + int j; + short *p = (short *)&c->vCCoeffsBank[i]; + for (j=0;j<8;j++) + p[j] = c->vChrFilter[i]; + } +#endif + } + + // calculate buffer sizes so that they won't run out while handling these damn slices + c->vLumBufSize= c->vLumFilterSize; + c->vChrBufSize= c->vChrFilterSize; + for (i=0; i<dstH; i++) + { + int chrI= i*c->chrDstH / dstH; + int nextSlice= FFMAX(c->vLumFilterPos[i ] + c->vLumFilterSize - 1, + ((c->vChrFilterPos[chrI] + c->vChrFilterSize - 1)<<c->chrSrcVSubSample)); + + nextSlice>>= c->chrSrcVSubSample; + nextSlice<<= c->chrSrcVSubSample; + if (c->vLumFilterPos[i ] + c->vLumBufSize < nextSlice) + c->vLumBufSize= nextSlice - c->vLumFilterPos[i]; + if (c->vChrFilterPos[chrI] + c->vChrBufSize < (nextSlice>>c->chrSrcVSubSample)) + c->vChrBufSize= (nextSlice>>c->chrSrcVSubSample) - c->vChrFilterPos[chrI]; + } + + // allocate pixbufs (we use dynamic allocation because otherwise we would need to + c->lumPixBuf= av_malloc(c->vLumBufSize*2*sizeof(int16_t*)); + c->chrPixBuf= av_malloc(c->vChrBufSize*2*sizeof(int16_t*)); + //Note we need at least one pixel more at the end because of the MMX code (just in case someone wanna replace the 4000/8000) + /* align at 16 bytes for AltiVec */ + for (i=0; i<c->vLumBufSize; i++) + c->lumPixBuf[i]= c->lumPixBuf[i+c->vLumBufSize]= av_mallocz(VOF+1); + for (i=0; i<c->vChrBufSize; i++) + c->chrPixBuf[i]= c->chrPixBuf[i+c->vChrBufSize]= av_malloc((VOF+1)*2); + + //try to avoid drawing green stuff between the right end and the stride end + for (i=0; i<c->vChrBufSize; i++) memset(c->chrPixBuf[i], 64, (VOF+1)*2); + + assert(2*VOFW == VOF); + + assert(c->chrDstH <= dstH); + + if (flags&SWS_PRINT_INFO) + { +#ifdef DITHER1XBPP + const char *dither= " dithered"; +#else + const char *dither= ""; +#endif + if (flags&SWS_FAST_BILINEAR) + av_log(c, AV_LOG_INFO, "FAST_BILINEAR scaler, "); + else if (flags&SWS_BILINEAR) + av_log(c, AV_LOG_INFO, "BILINEAR scaler, "); + else if (flags&SWS_BICUBIC) + av_log(c, AV_LOG_INFO, "BICUBIC scaler, "); + else if (flags&SWS_X) + av_log(c, AV_LOG_INFO, "Experimental scaler, "); + else if (flags&SWS_POINT) + av_log(c, AV_LOG_INFO, "Nearest Neighbor / POINT scaler, "); + else if (flags&SWS_AREA) + av_log(c, AV_LOG_INFO, "Area Averageing scaler, "); + else if (flags&SWS_BICUBLIN) + av_log(c, AV_LOG_INFO, "luma BICUBIC / chroma BILINEAR scaler, "); + else if (flags&SWS_GAUSS) + av_log(c, AV_LOG_INFO, "Gaussian scaler, "); + else if (flags&SWS_SINC) + av_log(c, AV_LOG_INFO, "Sinc scaler, "); + else if (flags&SWS_LANCZOS) + av_log(c, AV_LOG_INFO, "Lanczos scaler, "); + else if (flags&SWS_SPLINE) + av_log(c, AV_LOG_INFO, "Bicubic spline scaler, "); + else + av_log(c, AV_LOG_INFO, "ehh flags invalid?! "); + + if (dstFormat==PIX_FMT_BGR555 || dstFormat==PIX_FMT_BGR565) + av_log(c, AV_LOG_INFO, "from %s to%s %s ", + sws_format_name(srcFormat), dither, sws_format_name(dstFormat)); + else + av_log(c, AV_LOG_INFO, "from %s to %s ", + sws_format_name(srcFormat), sws_format_name(dstFormat)); + + if (flags & SWS_CPU_CAPS_MMX2) + av_log(c, AV_LOG_INFO, "using MMX2\n"); + else if (flags & SWS_CPU_CAPS_3DNOW) + av_log(c, AV_LOG_INFO, "using 3DNOW\n"); + else if (flags & SWS_CPU_CAPS_MMX) + av_log(c, AV_LOG_INFO, "using MMX\n"); + else if (flags & SWS_CPU_CAPS_ALTIVEC) + av_log(c, AV_LOG_INFO, "using AltiVec\n"); + else + av_log(c, AV_LOG_INFO, "using C\n"); + } + + if (flags & SWS_PRINT_INFO) + { + if (flags & SWS_CPU_CAPS_MMX) + { + if (c->canMMX2BeUsed && (flags&SWS_FAST_BILINEAR)) + av_log(c, AV_LOG_VERBOSE, "using FAST_BILINEAR MMX2 scaler for horizontal scaling\n"); + else + { + if (c->hLumFilterSize==4) + av_log(c, AV_LOG_VERBOSE, "using 4-tap MMX scaler for horizontal luminance scaling\n"); + else if (c->hLumFilterSize==8) + av_log(c, AV_LOG_VERBOSE, "using 8-tap MMX scaler for horizontal luminance scaling\n"); + else + av_log(c, AV_LOG_VERBOSE, "using n-tap MMX scaler for horizontal luminance scaling\n"); + + if (c->hChrFilterSize==4) + av_log(c, AV_LOG_VERBOSE, "using 4-tap MMX scaler for horizontal chrominance scaling\n"); + else if (c->hChrFilterSize==8) + av_log(c, AV_LOG_VERBOSE, "using 8-tap MMX scaler for horizontal chrominance scaling\n"); + else + av_log(c, AV_LOG_VERBOSE, "using n-tap MMX scaler for horizontal chrominance scaling\n"); + } + } + else + { +#if ARCH_X86 + av_log(c, AV_LOG_VERBOSE, "using x86 asm scaler for horizontal scaling\n"); +#else + if (flags & SWS_FAST_BILINEAR) + av_log(c, AV_LOG_VERBOSE, "using FAST_BILINEAR C scaler for horizontal scaling\n"); + else + av_log(c, AV_LOG_VERBOSE, "using C scaler for horizontal scaling\n"); +#endif + } + if (isPlanarYUV(dstFormat)) + { + if (c->vLumFilterSize==1) + av_log(c, AV_LOG_VERBOSE, "using 1-tap %s \"scaler\" for vertical scaling (YV12 like)\n", (flags & SWS_CPU_CAPS_MMX) ? "MMX" : "C"); + else + av_log(c, AV_LOG_VERBOSE, "using n-tap %s scaler for vertical scaling (YV12 like)\n", (flags & SWS_CPU_CAPS_MMX) ? "MMX" : "C"); + } + else + { + if (c->vLumFilterSize==1 && c->vChrFilterSize==2) + av_log(c, AV_LOG_VERBOSE, "using 1-tap %s \"scaler\" for vertical luminance scaling (BGR)\n" + " 2-tap scaler for vertical chrominance scaling (BGR)\n", (flags & SWS_CPU_CAPS_MMX) ? "MMX" : "C"); + else if (c->vLumFilterSize==2 && c->vChrFilterSize==2) + av_log(c, AV_LOG_VERBOSE, "using 2-tap linear %s scaler for vertical scaling (BGR)\n", (flags & SWS_CPU_CAPS_MMX) ? "MMX" : "C"); + else + av_log(c, AV_LOG_VERBOSE, "using n-tap %s scaler for vertical scaling (BGR)\n", (flags & SWS_CPU_CAPS_MMX) ? "MMX" : "C"); + } + + if (dstFormat==PIX_FMT_BGR24) + av_log(c, AV_LOG_VERBOSE, "using %s YV12->BGR24 converter\n", + (flags & SWS_CPU_CAPS_MMX2) ? "MMX2" : ((flags & SWS_CPU_CAPS_MMX) ? "MMX" : "C")); + else if (dstFormat==PIX_FMT_RGB32) + av_log(c, AV_LOG_VERBOSE, "using %s YV12->BGR32 converter\n", (flags & SWS_CPU_CAPS_MMX) ? "MMX" : "C"); + else if (dstFormat==PIX_FMT_BGR565) + av_log(c, AV_LOG_VERBOSE, "using %s YV12->BGR16 converter\n", (flags & SWS_CPU_CAPS_MMX) ? "MMX" : "C"); + else if (dstFormat==PIX_FMT_BGR555) + av_log(c, AV_LOG_VERBOSE, "using %s YV12->BGR15 converter\n", (flags & SWS_CPU_CAPS_MMX) ? "MMX" : "C"); + + av_log(c, AV_LOG_VERBOSE, "%dx%d -> %dx%d\n", srcW, srcH, dstW, dstH); + } + if (flags & SWS_PRINT_INFO) + { + av_log(c, AV_LOG_DEBUG, "lum srcW=%d srcH=%d dstW=%d dstH=%d xInc=%d yInc=%d\n", + c->srcW, c->srcH, c->dstW, c->dstH, c->lumXInc, c->lumYInc); + av_log(c, AV_LOG_DEBUG, "chr srcW=%d srcH=%d dstW=%d dstH=%d xInc=%d yInc=%d\n", + c->chrSrcW, c->chrSrcH, c->chrDstW, c->chrDstH, c->chrXInc, c->chrYInc); + } + + c->swScale= getSwsFunc(flags); + return c; +} + +/** + * swscale wrapper, so we don't need to export the SwsContext. + * Assumes planar YUV to be in YUV order instead of YVU. + */ +int sws_scale(SwsContext *c, uint8_t* src[], int srcStride[], int srcSliceY, + int srcSliceH, uint8_t* dst[], int dstStride[]){ + int i; + uint8_t* src2[4]= {src[0], src[1], src[2]}; + + if (c->sliceDir == 0 && srcSliceY != 0 && srcSliceY + srcSliceH != c->srcH) { + av_log(c, AV_LOG_ERROR, "Slices start in the middle!\n"); + return 0; + } + if (c->sliceDir == 0) { + if (srcSliceY == 0) c->sliceDir = 1; else c->sliceDir = -1; + } + + if (usePal(c->srcFormat)){ + for (i=0; i<256; i++){ + int p, r, g, b,y,u,v; + if(c->srcFormat == PIX_FMT_PAL8){ + p=((uint32_t*)(src[1]))[i]; + r= (p>>16)&0xFF; + g= (p>> 8)&0xFF; + b= p &0xFF; + }else if(c->srcFormat == PIX_FMT_RGB8){ + r= (i>>5 )*36; + g= ((i>>2)&7)*36; + b= (i&3 )*85; + }else if(c->srcFormat == PIX_FMT_BGR8){ + b= (i>>6 )*85; + g= ((i>>3)&7)*36; + r= (i&7 )*36; + }else if(c->srcFormat == PIX_FMT_RGB4_BYTE){ + r= (i>>3 )*255; + g= ((i>>1)&3)*85; + b= (i&1 )*255; + }else { + assert(c->srcFormat == PIX_FMT_BGR4_BYTE); + b= (i>>3 )*255; + g= ((i>>1)&3)*85; + r= (i&1 )*255; + } + y= av_clip_uint8((RY*r + GY*g + BY*b + ( 33<<(RGB2YUV_SHIFT-1)))>>RGB2YUV_SHIFT); + u= av_clip_uint8((RU*r + GU*g + BU*b + (257<<(RGB2YUV_SHIFT-1)))>>RGB2YUV_SHIFT); + v= av_clip_uint8((RV*r + GV*g + BV*b + (257<<(RGB2YUV_SHIFT-1)))>>RGB2YUV_SHIFT); + c->pal_yuv[i]= y + (u<<8) + (v<<16); + + + switch(c->dstFormat) { + case PIX_FMT_BGR32: +#ifndef WORDS_BIGENDIAN + case PIX_FMT_RGB24: +#endif + c->pal_rgb[i]= r + (g<<8) + (b<<16); + break; + case PIX_FMT_BGR32_1: +#ifdef WORDS_BIGENDIAN + case PIX_FMT_BGR24: +#endif + c->pal_rgb[i]= (r + (g<<8) + (b<<16)) << 8; + break; + case PIX_FMT_RGB32_1: +#ifdef WORDS_BIGENDIAN + case PIX_FMT_RGB24: +#endif + c->pal_rgb[i]= (b + (g<<8) + (r<<16)) << 8; + break; + case PIX_FMT_RGB32: +#ifndef WORDS_BIGENDIAN + case PIX_FMT_BGR24: +#endif + default: + c->pal_rgb[i]= b + (g<<8) + (r<<16); + } + } + } + + // copy strides, so they can safely be modified + if (c->sliceDir == 1) { + // slices go from top to bottom + int srcStride2[4]= {srcStride[0], srcStride[1], srcStride[2]}; + int dstStride2[4]= {dstStride[0], dstStride[1], dstStride[2]}; + return c->swScale(c, src2, srcStride2, srcSliceY, srcSliceH, dst, dstStride2); + } else { + // slices go from bottom to top => we flip the image internally + uint8_t* dst2[4]= {dst[0] + (c->dstH-1)*dstStride[0], + dst[1] + ((c->dstH>>c->chrDstVSubSample)-1)*dstStride[1], + dst[2] + ((c->dstH>>c->chrDstVSubSample)-1)*dstStride[2]}; + int srcStride2[4]= {-srcStride[0], -srcStride[1], -srcStride[2]}; + int dstStride2[4]= {-dstStride[0], -dstStride[1], -dstStride[2]}; + + src2[0] += (srcSliceH-1)*srcStride[0]; + if (!usePal(c->srcFormat)) + src2[1] += ((srcSliceH>>c->chrSrcVSubSample)-1)*srcStride[1]; + src2[2] += ((srcSliceH>>c->chrSrcVSubSample)-1)*srcStride[2]; + + return c->swScale(c, src2, srcStride2, c->srcH-srcSliceY-srcSliceH, srcSliceH, dst2, dstStride2); + } +} + +#if LIBSWSCALE_VERSION_MAJOR < 1 +int sws_scale_ordered(SwsContext *c, uint8_t* src[], int srcStride[], int srcSliceY, + int srcSliceH, uint8_t* dst[], int dstStride[]){ + return sws_scale(c, src, srcStride, srcSliceY, srcSliceH, dst, dstStride); +} +#endif + +SwsFilter *sws_getDefaultFilter(float lumaGBlur, float chromaGBlur, + float lumaSharpen, float chromaSharpen, + float chromaHShift, float chromaVShift, + int verbose) +{ + SwsFilter *filter= av_malloc(sizeof(SwsFilter)); + + if (lumaGBlur!=0.0){ + filter->lumH= sws_getGaussianVec(lumaGBlur, 3.0); + filter->lumV= sws_getGaussianVec(lumaGBlur, 3.0); + }else{ + filter->lumH= sws_getIdentityVec(); + filter->lumV= sws_getIdentityVec(); + } + + if (chromaGBlur!=0.0){ + filter->chrH= sws_getGaussianVec(chromaGBlur, 3.0); + filter->chrV= sws_getGaussianVec(chromaGBlur, 3.0); + }else{ + filter->chrH= sws_getIdentityVec(); + filter->chrV= sws_getIdentityVec(); + } + + if (chromaSharpen!=0.0){ + SwsVector *id= sws_getIdentityVec(); + sws_scaleVec(filter->chrH, -chromaSharpen); + sws_scaleVec(filter->chrV, -chromaSharpen); + sws_addVec(filter->chrH, id); + sws_addVec(filter->chrV, id); + sws_freeVec(id); + } + + if (lumaSharpen!=0.0){ + SwsVector *id= sws_getIdentityVec(); + sws_scaleVec(filter->lumH, -lumaSharpen); + sws_scaleVec(filter->lumV, -lumaSharpen); + sws_addVec(filter->lumH, id); + sws_addVec(filter->lumV, id); + sws_freeVec(id); + } + + if (chromaHShift != 0.0) + sws_shiftVec(filter->chrH, (int)(chromaHShift+0.5)); + + if (chromaVShift != 0.0) + sws_shiftVec(filter->chrV, (int)(chromaVShift+0.5)); + + sws_normalizeVec(filter->chrH, 1.0); + sws_normalizeVec(filter->chrV, 1.0); + sws_normalizeVec(filter->lumH, 1.0); + sws_normalizeVec(filter->lumV, 1.0); + + if (verbose) sws_printVec2(filter->chrH, NULL, AV_LOG_DEBUG); + if (verbose) sws_printVec2(filter->lumH, NULL, AV_LOG_DEBUG); + + return filter; +} + +SwsVector *sws_getGaussianVec(double variance, double quality){ + const int length= (int)(variance*quality + 0.5) | 1; + int i; + double *coeff= av_malloc(length*sizeof(double)); + double middle= (length-1)*0.5; + SwsVector *vec= av_malloc(sizeof(SwsVector)); + + vec->coeff= coeff; + vec->length= length; + + for (i=0; i<length; i++) + { + double dist= i-middle; + coeff[i]= exp(-dist*dist/(2*variance*variance)) / sqrt(2*variance*PI); + } + + sws_normalizeVec(vec, 1.0); + + return vec; +} + +SwsVector *sws_getConstVec(double c, int length){ + int i; + double *coeff= av_malloc(length*sizeof(double)); + SwsVector *vec= av_malloc(sizeof(SwsVector)); + + vec->coeff= coeff; + vec->length= length; + + for (i=0; i<length; i++) + coeff[i]= c; + + return vec; +} + + +SwsVector *sws_getIdentityVec(void){ + return sws_getConstVec(1.0, 1); +} + +double sws_dcVec(SwsVector *a){ + int i; + double sum=0; + + for (i=0; i<a->length; i++) + sum+= a->coeff[i]; + + return sum; +} + +void sws_scaleVec(SwsVector *a, double scalar){ + int i; + + for (i=0; i<a->length; i++) + a->coeff[i]*= scalar; +} + +void sws_normalizeVec(SwsVector *a, double height){ + sws_scaleVec(a, height/sws_dcVec(a)); +} + +static SwsVector *sws_getConvVec(SwsVector *a, SwsVector *b){ + int length= a->length + b->length - 1; + double *coeff= av_malloc(length*sizeof(double)); + int i, j; + SwsVector *vec= av_malloc(sizeof(SwsVector)); + + vec->coeff= coeff; + vec->length= length; + + for (i=0; i<length; i++) coeff[i]= 0.0; + + for (i=0; i<a->length; i++) + { + for (j=0; j<b->length; j++) + { + coeff[i+j]+= a->coeff[i]*b->coeff[j]; + } + } + + return vec; +} + +static SwsVector *sws_sumVec(SwsVector *a, SwsVector *b){ + int length= FFMAX(a->length, b->length); + double *coeff= av_malloc(length*sizeof(double)); + int i; + SwsVector *vec= av_malloc(sizeof(SwsVector)); + + vec->coeff= coeff; + vec->length= length; + + for (i=0; i<length; i++) coeff[i]= 0.0; + + for (i=0; i<a->length; i++) coeff[i + (length-1)/2 - (a->length-1)/2]+= a->coeff[i]; + for (i=0; i<b->length; i++) coeff[i + (length-1)/2 - (b->length-1)/2]+= b->coeff[i]; + + return vec; +} + +static SwsVector *sws_diffVec(SwsVector *a, SwsVector *b){ + int length= FFMAX(a->length, b->length); + double *coeff= av_malloc(length*sizeof(double)); + int i; + SwsVector *vec= av_malloc(sizeof(SwsVector)); + + vec->coeff= coeff; + vec->length= length; + + for (i=0; i<length; i++) coeff[i]= 0.0; + + for (i=0; i<a->length; i++) coeff[i + (length-1)/2 - (a->length-1)/2]+= a->coeff[i]; + for (i=0; i<b->length; i++) coeff[i + (length-1)/2 - (b->length-1)/2]-= b->coeff[i]; + + return vec; +} + +/* shift left / or right if "shift" is negative */ +static SwsVector *sws_getShiftedVec(SwsVector *a, int shift){ + int length= a->length + FFABS(shift)*2; + double *coeff= av_malloc(length*sizeof(double)); + int i; + SwsVector *vec= av_malloc(sizeof(SwsVector)); + + vec->coeff= coeff; + vec->length= length; + + for (i=0; i<length; i++) coeff[i]= 0.0; + + for (i=0; i<a->length; i++) + { + coeff[i + (length-1)/2 - (a->length-1)/2 - shift]= a->coeff[i]; + } + + return vec; +} + +void sws_shiftVec(SwsVector *a, int shift){ + SwsVector *shifted= sws_getShiftedVec(a, shift); + av_free(a->coeff); + a->coeff= shifted->coeff; + a->length= shifted->length; + av_free(shifted); +} + +void sws_addVec(SwsVector *a, SwsVector *b){ + SwsVector *sum= sws_sumVec(a, b); + av_free(a->coeff); + a->coeff= sum->coeff; + a->length= sum->length; + av_free(sum); +} + +void sws_subVec(SwsVector *a, SwsVector *b){ + SwsVector *diff= sws_diffVec(a, b); + av_free(a->coeff); + a->coeff= diff->coeff; + a->length= diff->length; + av_free(diff); +} + +void sws_convVec(SwsVector *a, SwsVector *b){ + SwsVector *conv= sws_getConvVec(a, b); + av_free(a->coeff); + a->coeff= conv->coeff; + a->length= conv->length; + av_free(conv); +} + +SwsVector *sws_cloneVec(SwsVector *a){ + double *coeff= av_malloc(a->length*sizeof(double)); + int i; + SwsVector *vec= av_malloc(sizeof(SwsVector)); + + vec->coeff= coeff; + vec->length= a->length; + + for (i=0; i<a->length; i++) coeff[i]= a->coeff[i]; + + return vec; +} + +void sws_printVec2(SwsVector *a, AVClass *log_ctx, int log_level){ + int i; + double max=0; + double min=0; + double range; + + for (i=0; i<a->length; i++) + if (a->coeff[i]>max) max= a->coeff[i]; + + for (i=0; i<a->length; i++) + if (a->coeff[i]<min) min= a->coeff[i]; + + range= max - min; + + for (i=0; i<a->length; i++) + { + int x= (int)((a->coeff[i]-min)*60.0/range +0.5); + av_log(log_ctx, log_level, "%1.3f ", a->coeff[i]); + for (;x>0; x--) av_log(log_ctx, log_level, " "); + av_log(log_ctx, log_level, "|\n"); + } +} + +#if LIBSWSCALE_VERSION_MAJOR < 1 +void sws_printVec(SwsVector *a){ + sws_printVec2(a, NULL, AV_LOG_DEBUG); +} +#endif + +void sws_freeVec(SwsVector *a){ + if (!a) return; + av_freep(&a->coeff); + a->length=0; + av_free(a); +} + +void sws_freeFilter(SwsFilter *filter){ + if (!filter) return; + + if (filter->lumH) sws_freeVec(filter->lumH); + if (filter->lumV) sws_freeVec(filter->lumV); + if (filter->chrH) sws_freeVec(filter->chrH); + if (filter->chrV) sws_freeVec(filter->chrV); + av_free(filter); +} + + +void sws_freeContext(SwsContext *c){ + int i; + if (!c) return; + + if (c->lumPixBuf) + { + for (i=0; i<c->vLumBufSize; i++) + av_freep(&c->lumPixBuf[i]); + av_freep(&c->lumPixBuf); + } + + if (c->chrPixBuf) + { + for (i=0; i<c->vChrBufSize; i++) + av_freep(&c->chrPixBuf[i]); + av_freep(&c->chrPixBuf); + } + + av_freep(&c->vLumFilter); + av_freep(&c->vChrFilter); + av_freep(&c->hLumFilter); + av_freep(&c->hChrFilter); +#if HAVE_ALTIVEC + av_freep(&c->vYCoeffsBank); + av_freep(&c->vCCoeffsBank); +#endif + + av_freep(&c->vLumFilterPos); + av_freep(&c->vChrFilterPos); + av_freep(&c->hLumFilterPos); + av_freep(&c->hChrFilterPos); + +#if ARCH_X86 && CONFIG_GPL +#ifdef MAP_ANONYMOUS + if (c->funnyYCode) munmap(c->funnyYCode, MAX_FUNNY_CODE_SIZE); + if (c->funnyUVCode) munmap(c->funnyUVCode, MAX_FUNNY_CODE_SIZE); +#else + av_free(c->funnyYCode); + av_free(c->funnyUVCode); +#endif + c->funnyYCode=NULL; + c->funnyUVCode=NULL; +#endif /* ARCH_X86 && CONFIG_GPL */ + + av_freep(&c->lumMmx2Filter); + av_freep(&c->chrMmx2Filter); + av_freep(&c->lumMmx2FilterPos); + av_freep(&c->chrMmx2FilterPos); + av_freep(&c->yuvTable); + + av_free(c); +} + +struct SwsContext *sws_getCachedContext(struct SwsContext *context, + int srcW, int srcH, enum PixelFormat srcFormat, + int dstW, int dstH, enum PixelFormat dstFormat, int flags, + SwsFilter *srcFilter, SwsFilter *dstFilter, double *param) +{ + static const double default_param[2] = {SWS_PARAM_DEFAULT, SWS_PARAM_DEFAULT}; + + if (!param) + param = default_param; + + if (context) { + if (context->srcW != srcW || context->srcH != srcH || + context->srcFormat != srcFormat || + context->dstW != dstW || context->dstH != dstH || + context->dstFormat != dstFormat || context->flags != flags || + context->param[0] != param[0] || context->param[1] != param[1]) + { + sws_freeContext(context); + context = NULL; + } + } + if (!context) { + return sws_getContext(srcW, srcH, srcFormat, + dstW, dstH, dstFormat, flags, + srcFilter, dstFilter, param); + } + return context; +} + diff --git a/libswscale/swscale.h b/libswscale/swscale.h new file mode 100644 index 0000000000..6efd90fcda --- /dev/null +++ b/libswscale/swscale.h @@ -0,0 +1,247 @@ +/* + * Copyright (C) 2001-2003 Michael Niedermayer <michaelni@gmx.at> + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef SWSCALE_SWSCALE_H +#define SWSCALE_SWSCALE_H + +/** + * @file libswscale/swscale.h + * @brief + * external api for the swscale stuff + */ + +#include "libavutil/avutil.h" + +#define LIBSWSCALE_VERSION_MAJOR 0 +#define LIBSWSCALE_VERSION_MINOR 7 +#define LIBSWSCALE_VERSION_MICRO 1 + +#define LIBSWSCALE_VERSION_INT AV_VERSION_INT(LIBSWSCALE_VERSION_MAJOR, \ + LIBSWSCALE_VERSION_MINOR, \ + LIBSWSCALE_VERSION_MICRO) +#define LIBSWSCALE_VERSION AV_VERSION(LIBSWSCALE_VERSION_MAJOR, \ + LIBSWSCALE_VERSION_MINOR, \ + LIBSWSCALE_VERSION_MICRO) +#define LIBSWSCALE_BUILD LIBSWSCALE_VERSION_INT + +#define LIBSWSCALE_IDENT "SwS" AV_STRINGIFY(LIBSWSCALE_VERSION) + +/** + * Returns the LIBSWSCALE_VERSION_INT constant. + */ +unsigned swscale_version(void); + +/* values for the flags, the stuff on the command line is different */ +#define SWS_FAST_BILINEAR 1 +#define SWS_BILINEAR 2 +#define SWS_BICUBIC 4 +#define SWS_X 8 +#define SWS_POINT 0x10 +#define SWS_AREA 0x20 +#define SWS_BICUBLIN 0x40 +#define SWS_GAUSS 0x80 +#define SWS_SINC 0x100 +#define SWS_LANCZOS 0x200 +#define SWS_SPLINE 0x400 + +#define SWS_SRC_V_CHR_DROP_MASK 0x30000 +#define SWS_SRC_V_CHR_DROP_SHIFT 16 + +#define SWS_PARAM_DEFAULT 123456 + +#define SWS_PRINT_INFO 0x1000 + +//the following 3 flags are not completely implemented +//internal chrominace subsampling info +#define SWS_FULL_CHR_H_INT 0x2000 +//input subsampling info +#define SWS_FULL_CHR_H_INP 0x4000 +#define SWS_DIRECT_BGR 0x8000 +#define SWS_ACCURATE_RND 0x40000 +#define SWS_BITEXACT 0x80000 + +#define SWS_CPU_CAPS_MMX 0x80000000 +#define SWS_CPU_CAPS_MMX2 0x20000000 +#define SWS_CPU_CAPS_3DNOW 0x40000000 +#define SWS_CPU_CAPS_ALTIVEC 0x10000000 +#define SWS_CPU_CAPS_BFIN 0x01000000 + +#define SWS_MAX_REDUCE_CUTOFF 0.002 + +#define SWS_CS_ITU709 1 +#define SWS_CS_FCC 4 +#define SWS_CS_ITU601 5 +#define SWS_CS_ITU624 5 +#define SWS_CS_SMPTE170M 5 +#define SWS_CS_SMPTE240M 7 +#define SWS_CS_DEFAULT 5 + + + +// when used for filters they must have an odd number of elements +// coeffs cannot be shared between vectors +typedef struct { + double *coeff; ///< pointer to the list of coefficients + int length; ///< number of coefficients in the vector +} SwsVector; + +// vectors can be shared +typedef struct { + SwsVector *lumH; + SwsVector *lumV; + SwsVector *chrH; + SwsVector *chrV; +} SwsFilter; + +struct SwsContext; + +void sws_freeContext(struct SwsContext *swsContext); + +/** + * Allocates and returns a SwsContext. You need it to perform + * scaling/conversion operations using sws_scale(). + * + * @param srcW the width of the source image + * @param srcH the height of the source image + * @param srcFormat the source image format + * @param dstW the width of the destination image + * @param dstH the height of the destination image + * @param dstFormat the destination image format + * @param flags specify which algorithm and options to use for rescaling + * @return a pointer to an allocated context, or NULL in case of error + */ +struct SwsContext *sws_getContext(int srcW, int srcH, enum PixelFormat srcFormat, int dstW, int dstH, enum PixelFormat dstFormat, int flags, + SwsFilter *srcFilter, SwsFilter *dstFilter, double *param); + +/** + * Scales the image slice in \p srcSlice and puts the resulting scaled + * slice in the image in \p dst. A slice is a sequence of consecutive + * rows in an image. + * + * @param context the scaling context previously created with + * sws_getContext() + * @param srcSlice the array containing the pointers to the planes of + * the source slice + * @param srcStride the array containing the strides for each plane of + * the source image + * @param srcSliceY the position in the source image of the slice to + * process, that is the number (counted starting from + * zero) in the image of the first row of the slice + * @param srcSliceH the height of the source slice, that is the number + * of rows in the slice + * @param dst the array containing the pointers to the planes of + * the destination image + * @param dstStride the array containing the strides for each plane of + * the destination image + * @return the height of the output slice + */ +int sws_scale(struct SwsContext *context, uint8_t* srcSlice[], int srcStride[], int srcSliceY, + int srcSliceH, uint8_t* dst[], int dstStride[]); +#if LIBSWSCALE_VERSION_MAJOR < 1 +/** + * @deprecated Use sws_scale() instead. + */ +int sws_scale_ordered(struct SwsContext *context, uint8_t* src[], int srcStride[], int srcSliceY, + int srcSliceH, uint8_t* dst[], int dstStride[]) attribute_deprecated; +#endif + + +int sws_setColorspaceDetails(struct SwsContext *c, const int inv_table[4], int srcRange, const int table[4], int dstRange, int brightness, int contrast, int saturation); +int sws_getColorspaceDetails(struct SwsContext *c, int **inv_table, int *srcRange, int **table, int *dstRange, int *brightness, int *contrast, int *saturation); + +/** + * Returns a normalized Gaussian curve used to filter stuff + * quality=3 is high quality, lower is lower quality. + */ +SwsVector *sws_getGaussianVec(double variance, double quality); + +/** + * Allocates and returns a vector with \p length coefficients, all + * with the same value \p c. + */ +SwsVector *sws_getConstVec(double c, int length); + +/** + * Allocates and returns a vector with just one coefficient, with + * value 1.0. + */ +SwsVector *sws_getIdentityVec(void); + +/** + * Scales all the coefficients of \p a by the \p scalar value. + */ +void sws_scaleVec(SwsVector *a, double scalar); + +/** + * Scales all the coefficients of \p a so that their sum equals \p + * height." + */ +void sws_normalizeVec(SwsVector *a, double height); +void sws_convVec(SwsVector *a, SwsVector *b); +void sws_addVec(SwsVector *a, SwsVector *b); +void sws_subVec(SwsVector *a, SwsVector *b); +void sws_shiftVec(SwsVector *a, int shift); + +/** + * Allocates and returns a clone of the vector \p a, that is a vector + * with the same coefficients as \p a. + */ +SwsVector *sws_cloneVec(SwsVector *a); + +#if LIBSWSCALE_VERSION_MAJOR < 1 +/** + * @deprecated Use sws_printVec2() instead. + */ +attribute_deprecated void sws_printVec(SwsVector *a); +#endif + +/** + * Prints with av_log() a textual representation of the vector \p a + * if \p log_level <= av_log_level. + */ +void sws_printVec2(SwsVector *a, AVClass *log_ctx, int log_level); + +void sws_freeVec(SwsVector *a); + +SwsFilter *sws_getDefaultFilter(float lumaGBlur, float chromaGBlur, + float lumaSharpen, float chromaSharpen, + float chromaHShift, float chromaVShift, + int verbose); +void sws_freeFilter(SwsFilter *filter); + +/** + * Checks if \p context can be reused, otherwise reallocates a new + * one. + * + * If \p context is NULL, just calls sws_getContext() to get a new + * context. Otherwise, checks if the parameters are the ones already + * saved in \p context. If that is the case, returns the current + * context. Otherwise, frees \p context and gets a new context with + * the new parameters. + * + * Be warned that \p srcFilter and \p dstFilter are not checked, they + * are assumed to remain the same. + */ +struct SwsContext *sws_getCachedContext(struct SwsContext *context, + int srcW, int srcH, enum PixelFormat srcFormat, + int dstW, int dstH, enum PixelFormat dstFormat, int flags, + SwsFilter *srcFilter, SwsFilter *dstFilter, double *param); + +#endif /* SWSCALE_SWSCALE_H */ diff --git a/libswscale/swscale_altivec_template.c b/libswscale/swscale_altivec_template.c new file mode 100644 index 0000000000..a008b966e8 --- /dev/null +++ b/libswscale/swscale_altivec_template.c @@ -0,0 +1,538 @@ +/* + * AltiVec-enhanced yuv2yuvX + * + * Copyright (C) 2004 Romain Dolbeau <romain@dolbeau.org> + * based on the equivalent C code in swscale.c + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#define vzero vec_splat_s32(0) + +static inline void +altivec_packIntArrayToCharArray(int *val, uint8_t* dest, int dstW) { + register int i; + vector unsigned int altivec_vectorShiftInt19 = + vec_add(vec_splat_u32(10), vec_splat_u32(9)); + if ((unsigned long)dest % 16) { + /* badly aligned store, we force store alignment */ + /* and will handle load misalignment on val w/ vec_perm */ + vector unsigned char perm1; + vector signed int v1; + for (i = 0 ; (i < dstW) && + (((unsigned long)dest + i) % 16) ; i++) { + int t = val[i] >> 19; + dest[i] = (t < 0) ? 0 : ((t > 255) ? 255 : t); + } + perm1 = vec_lvsl(i << 2, val); + v1 = vec_ld(i << 2, val); + for ( ; i < (dstW - 15); i+=16) { + int offset = i << 2; + vector signed int v2 = vec_ld(offset + 16, val); + vector signed int v3 = vec_ld(offset + 32, val); + vector signed int v4 = vec_ld(offset + 48, val); + vector signed int v5 = vec_ld(offset + 64, val); + vector signed int v12 = vec_perm(v1, v2, perm1); + vector signed int v23 = vec_perm(v2, v3, perm1); + vector signed int v34 = vec_perm(v3, v4, perm1); + vector signed int v45 = vec_perm(v4, v5, perm1); + + vector signed int vA = vec_sra(v12, altivec_vectorShiftInt19); + vector signed int vB = vec_sra(v23, altivec_vectorShiftInt19); + vector signed int vC = vec_sra(v34, altivec_vectorShiftInt19); + vector signed int vD = vec_sra(v45, altivec_vectorShiftInt19); + vector unsigned short vs1 = vec_packsu(vA, vB); + vector unsigned short vs2 = vec_packsu(vC, vD); + vector unsigned char vf = vec_packsu(vs1, vs2); + vec_st(vf, i, dest); + v1 = v5; + } + } else { // dest is properly aligned, great + for (i = 0; i < (dstW - 15); i+=16) { + int offset = i << 2; + vector signed int v1 = vec_ld(offset, val); + vector signed int v2 = vec_ld(offset + 16, val); + vector signed int v3 = vec_ld(offset + 32, val); + vector signed int v4 = vec_ld(offset + 48, val); + vector signed int v5 = vec_sra(v1, altivec_vectorShiftInt19); + vector signed int v6 = vec_sra(v2, altivec_vectorShiftInt19); + vector signed int v7 = vec_sra(v3, altivec_vectorShiftInt19); + vector signed int v8 = vec_sra(v4, altivec_vectorShiftInt19); + vector unsigned short vs1 = vec_packsu(v5, v6); + vector unsigned short vs2 = vec_packsu(v7, v8); + vector unsigned char vf = vec_packsu(vs1, vs2); + vec_st(vf, i, dest); + } + } + for ( ; i < dstW ; i++) { + int t = val[i] >> 19; + dest[i] = (t < 0) ? 0 : ((t > 255) ? 255 : t); + } +} + +static inline void +yuv2yuvX_altivec_real(int16_t *lumFilter, int16_t **lumSrc, int lumFilterSize, + int16_t *chrFilter, int16_t **chrSrc, int chrFilterSize, + uint8_t *dest, uint8_t *uDest, uint8_t *vDest, int dstW, int chrDstW) +{ + const vector signed int vini = {(1 << 18), (1 << 18), (1 << 18), (1 << 18)}; + register int i, j; + { + int __attribute__ ((aligned (16))) val[dstW]; + + for (i = 0; i < (dstW -7); i+=4) { + vec_st(vini, i << 2, val); + } + for (; i < dstW; i++) { + val[i] = (1 << 18); + } + + for (j = 0; j < lumFilterSize; j++) { + vector signed short l1, vLumFilter = vec_ld(j << 1, lumFilter); + vector unsigned char perm, perm0 = vec_lvsl(j << 1, lumFilter); + vLumFilter = vec_perm(vLumFilter, vLumFilter, perm0); + vLumFilter = vec_splat(vLumFilter, 0); // lumFilter[j] is loaded 8 times in vLumFilter + + perm = vec_lvsl(0, lumSrc[j]); + l1 = vec_ld(0, lumSrc[j]); + + for (i = 0; i < (dstW - 7); i+=8) { + int offset = i << 2; + vector signed short l2 = vec_ld((i << 1) + 16, lumSrc[j]); + + vector signed int v1 = vec_ld(offset, val); + vector signed int v2 = vec_ld(offset + 16, val); + + vector signed short ls = vec_perm(l1, l2, perm); // lumSrc[j][i] ... lumSrc[j][i+7] + + vector signed int i1 = vec_mule(vLumFilter, ls); + vector signed int i2 = vec_mulo(vLumFilter, ls); + + vector signed int vf1 = vec_mergeh(i1, i2); + vector signed int vf2 = vec_mergel(i1, i2); // lumSrc[j][i] * lumFilter[j] ... lumSrc[j][i+7] * lumFilter[j] + + vector signed int vo1 = vec_add(v1, vf1); + vector signed int vo2 = vec_add(v2, vf2); + + vec_st(vo1, offset, val); + vec_st(vo2, offset + 16, val); + + l1 = l2; + } + for ( ; i < dstW; i++) { + val[i] += lumSrc[j][i] * lumFilter[j]; + } + } + altivec_packIntArrayToCharArray(val, dest, dstW); + } + if (uDest != 0) { + int __attribute__ ((aligned (16))) u[chrDstW]; + int __attribute__ ((aligned (16))) v[chrDstW]; + + for (i = 0; i < (chrDstW -7); i+=4) { + vec_st(vini, i << 2, u); + vec_st(vini, i << 2, v); + } + for (; i < chrDstW; i++) { + u[i] = (1 << 18); + v[i] = (1 << 18); + } + + for (j = 0; j < chrFilterSize; j++) { + vector signed short l1, l1_V, vChrFilter = vec_ld(j << 1, chrFilter); + vector unsigned char perm, perm0 = vec_lvsl(j << 1, chrFilter); + vChrFilter = vec_perm(vChrFilter, vChrFilter, perm0); + vChrFilter = vec_splat(vChrFilter, 0); // chrFilter[j] is loaded 8 times in vChrFilter + + perm = vec_lvsl(0, chrSrc[j]); + l1 = vec_ld(0, chrSrc[j]); + l1_V = vec_ld(2048 << 1, chrSrc[j]); + + for (i = 0; i < (chrDstW - 7); i+=8) { + int offset = i << 2; + vector signed short l2 = vec_ld((i << 1) + 16, chrSrc[j]); + vector signed short l2_V = vec_ld(((i + 2048) << 1) + 16, chrSrc[j]); + + vector signed int v1 = vec_ld(offset, u); + vector signed int v2 = vec_ld(offset + 16, u); + vector signed int v1_V = vec_ld(offset, v); + vector signed int v2_V = vec_ld(offset + 16, v); + + vector signed short ls = vec_perm(l1, l2, perm); // chrSrc[j][i] ... chrSrc[j][i+7] + vector signed short ls_V = vec_perm(l1_V, l2_V, perm); // chrSrc[j][i+2048] ... chrSrc[j][i+2055] + + vector signed int i1 = vec_mule(vChrFilter, ls); + vector signed int i2 = vec_mulo(vChrFilter, ls); + vector signed int i1_V = vec_mule(vChrFilter, ls_V); + vector signed int i2_V = vec_mulo(vChrFilter, ls_V); + + vector signed int vf1 = vec_mergeh(i1, i2); + vector signed int vf2 = vec_mergel(i1, i2); // chrSrc[j][i] * chrFilter[j] ... chrSrc[j][i+7] * chrFilter[j] + vector signed int vf1_V = vec_mergeh(i1_V, i2_V); + vector signed int vf2_V = vec_mergel(i1_V, i2_V); // chrSrc[j][i] * chrFilter[j] ... chrSrc[j][i+7] * chrFilter[j] + + vector signed int vo1 = vec_add(v1, vf1); + vector signed int vo2 = vec_add(v2, vf2); + vector signed int vo1_V = vec_add(v1_V, vf1_V); + vector signed int vo2_V = vec_add(v2_V, vf2_V); + + vec_st(vo1, offset, u); + vec_st(vo2, offset + 16, u); + vec_st(vo1_V, offset, v); + vec_st(vo2_V, offset + 16, v); + + l1 = l2; + l1_V = l2_V; + } + for ( ; i < chrDstW; i++) { + u[i] += chrSrc[j][i] * chrFilter[j]; + v[i] += chrSrc[j][i + 2048] * chrFilter[j]; + } + } + altivec_packIntArrayToCharArray(u, uDest, chrDstW); + altivec_packIntArrayToCharArray(v, vDest, chrDstW); + } +} + +static inline void hScale_altivec_real(int16_t *dst, int dstW, uint8_t *src, int srcW, int xInc, int16_t *filter, int16_t *filterPos, int filterSize) { + register int i; + int __attribute__ ((aligned (16))) tempo[4]; + + if (filterSize % 4) { + for (i=0; i<dstW; i++) { + register int j; + register int srcPos = filterPos[i]; + register int val = 0; + for (j=0; j<filterSize; j++) { + val += ((int)src[srcPos + j])*filter[filterSize*i + j]; + } + dst[i] = FFMIN(val>>7, (1<<15)-1); + } + } + else + switch (filterSize) { + case 4: + { + for (i=0; i<dstW; i++) { + register int srcPos = filterPos[i]; + + vector unsigned char src_v0 = vec_ld(srcPos, src); + vector unsigned char src_v1, src_vF; + vector signed short src_v, filter_v; + vector signed int val_vEven, val_s; + if ((((int)src + srcPos)% 16) > 12) { + src_v1 = vec_ld(srcPos + 16, src); + } + src_vF = vec_perm(src_v0, src_v1, vec_lvsl(srcPos, src)); + + src_v = // vec_unpackh sign-extends... + (vector signed short)(vec_mergeh((vector unsigned char)vzero, src_vF)); + // now put our elements in the even slots + src_v = vec_mergeh(src_v, (vector signed short)vzero); + + filter_v = vec_ld(i << 3, filter); + // The 3 above is 2 (filterSize == 4) + 1 (sizeof(short) == 2). + + // The neat trick: We only care for half the elements, + // high or low depending on (i<<3)%16 (it's 0 or 8 here), + // and we're going to use vec_mule, so we choose + // carefully how to "unpack" the elements into the even slots. + if ((i << 3) % 16) + filter_v = vec_mergel(filter_v, (vector signed short)vzero); + else + filter_v = vec_mergeh(filter_v, (vector signed short)vzero); + + val_vEven = vec_mule(src_v, filter_v); + val_s = vec_sums(val_vEven, vzero); + vec_st(val_s, 0, tempo); + dst[i] = FFMIN(tempo[3]>>7, (1<<15)-1); + } + } + break; + + case 8: + { + for (i=0; i<dstW; i++) { + register int srcPos = filterPos[i]; + + vector unsigned char src_v0 = vec_ld(srcPos, src); + vector unsigned char src_v1, src_vF; + vector signed short src_v, filter_v; + vector signed int val_v, val_s; + if ((((int)src + srcPos)% 16) > 8) { + src_v1 = vec_ld(srcPos + 16, src); + } + src_vF = vec_perm(src_v0, src_v1, vec_lvsl(srcPos, src)); + + src_v = // vec_unpackh sign-extends... + (vector signed short)(vec_mergeh((vector unsigned char)vzero, src_vF)); + filter_v = vec_ld(i << 4, filter); + // the 4 above is 3 (filterSize == 8) + 1 (sizeof(short) == 2) + + val_v = vec_msums(src_v, filter_v, (vector signed int)vzero); + val_s = vec_sums(val_v, vzero); + vec_st(val_s, 0, tempo); + dst[i] = FFMIN(tempo[3]>>7, (1<<15)-1); + } + } + break; + + case 16: + { + for (i=0; i<dstW; i++) { + register int srcPos = filterPos[i]; + + vector unsigned char src_v0 = vec_ld(srcPos, src); + vector unsigned char src_v1 = vec_ld(srcPos + 16, src); + vector unsigned char src_vF = vec_perm(src_v0, src_v1, vec_lvsl(srcPos, src)); + + vector signed short src_vA = // vec_unpackh sign-extends... + (vector signed short)(vec_mergeh((vector unsigned char)vzero, src_vF)); + vector signed short src_vB = // vec_unpackh sign-extends... + (vector signed short)(vec_mergel((vector unsigned char)vzero, src_vF)); + + vector signed short filter_v0 = vec_ld(i << 5, filter); + vector signed short filter_v1 = vec_ld((i << 5) + 16, filter); + // the 5 above are 4 (filterSize == 16) + 1 (sizeof(short) == 2) + + vector signed int val_acc = vec_msums(src_vA, filter_v0, (vector signed int)vzero); + vector signed int val_v = vec_msums(src_vB, filter_v1, val_acc); + + vector signed int val_s = vec_sums(val_v, vzero); + + vec_st(val_s, 0, tempo); + dst[i] = FFMIN(tempo[3]>>7, (1<<15)-1); + } + } + break; + + default: + { + for (i=0; i<dstW; i++) { + register int j; + register int srcPos = filterPos[i]; + + vector signed int val_s, val_v = (vector signed int)vzero; + vector signed short filter_v0R = vec_ld(i * 2 * filterSize, filter); + vector unsigned char permF = vec_lvsl((i * 2 * filterSize), filter); + + vector unsigned char src_v0 = vec_ld(srcPos, src); + vector unsigned char permS = vec_lvsl(srcPos, src); + + for (j = 0 ; j < filterSize - 15; j += 16) { + vector unsigned char src_v1 = vec_ld(srcPos + j + 16, src); + vector unsigned char src_vF = vec_perm(src_v0, src_v1, permS); + + vector signed short src_vA = // vec_unpackh sign-extends... + (vector signed short)(vec_mergeh((vector unsigned char)vzero, src_vF)); + vector signed short src_vB = // vec_unpackh sign-extends... + (vector signed short)(vec_mergel((vector unsigned char)vzero, src_vF)); + + vector signed short filter_v1R = vec_ld((i * 2 * filterSize) + (j * 2) + 16, filter); + vector signed short filter_v2R = vec_ld((i * 2 * filterSize) + (j * 2) + 32, filter); + vector signed short filter_v0 = vec_perm(filter_v0R, filter_v1R, permF); + vector signed short filter_v1 = vec_perm(filter_v1R, filter_v2R, permF); + + vector signed int val_acc = vec_msums(src_vA, filter_v0, val_v); + val_v = vec_msums(src_vB, filter_v1, val_acc); + + filter_v0R = filter_v2R; + src_v0 = src_v1; + } + + if (j < filterSize-7) { + // loading src_v0 is useless, it's already done above + //vector unsigned char src_v0 = vec_ld(srcPos + j, src); + vector unsigned char src_v1, src_vF; + vector signed short src_v, filter_v1R, filter_v; + if ((((int)src + srcPos)% 16) > 8) { + src_v1 = vec_ld(srcPos + j + 16, src); + } + src_vF = vec_perm(src_v0, src_v1, permS); + + src_v = // vec_unpackh sign-extends... + (vector signed short)(vec_mergeh((vector unsigned char)vzero, src_vF)); + // loading filter_v0R is useless, it's already done above + //vector signed short filter_v0R = vec_ld((i * 2 * filterSize) + j, filter); + filter_v1R = vec_ld((i * 2 * filterSize) + (j * 2) + 16, filter); + filter_v = vec_perm(filter_v0R, filter_v1R, permF); + + val_v = vec_msums(src_v, filter_v, val_v); + } + + val_s = vec_sums(val_v, vzero); + + vec_st(val_s, 0, tempo); + dst[i] = FFMIN(tempo[3]>>7, (1<<15)-1); + } + + } + } +} + +static inline int yv12toyuy2_unscaled_altivec(SwsContext *c, uint8_t* src[], int srcStride[], int srcSliceY, + int srcSliceH, uint8_t* dstParam[], int dstStride_a[]) { + uint8_t *dst=dstParam[0] + dstStride_a[0]*srcSliceY; + // yv12toyuy2(src[0], src[1], src[2], dst, c->srcW, srcSliceH, srcStride[0], srcStride[1], dstStride[0]); + uint8_t *ysrc = src[0]; + uint8_t *usrc = src[1]; + uint8_t *vsrc = src[2]; + const int width = c->srcW; + const int height = srcSliceH; + const int lumStride = srcStride[0]; + const int chromStride = srcStride[1]; + const int dstStride = dstStride_a[0]; + const vector unsigned char yperm = vec_lvsl(0, ysrc); + const int vertLumPerChroma = 2; + register unsigned int y; + + if (width&15) { + yv12toyuy2(ysrc, usrc, vsrc, dst, c->srcW, srcSliceH, lumStride, chromStride, dstStride); + return srcSliceH; + } + + /* This code assumes: + + 1) dst is 16 bytes-aligned + 2) dstStride is a multiple of 16 + 3) width is a multiple of 16 + 4) lum & chrom stride are multiples of 8 + */ + + for (y=0; y<height; y++) { + int i; + for (i = 0; i < width - 31; i+= 32) { + const unsigned int j = i >> 1; + vector unsigned char v_yA = vec_ld(i, ysrc); + vector unsigned char v_yB = vec_ld(i + 16, ysrc); + vector unsigned char v_yC = vec_ld(i + 32, ysrc); + vector unsigned char v_y1 = vec_perm(v_yA, v_yB, yperm); + vector unsigned char v_y2 = vec_perm(v_yB, v_yC, yperm); + vector unsigned char v_uA = vec_ld(j, usrc); + vector unsigned char v_uB = vec_ld(j + 16, usrc); + vector unsigned char v_u = vec_perm(v_uA, v_uB, vec_lvsl(j, usrc)); + vector unsigned char v_vA = vec_ld(j, vsrc); + vector unsigned char v_vB = vec_ld(j + 16, vsrc); + vector unsigned char v_v = vec_perm(v_vA, v_vB, vec_lvsl(j, vsrc)); + vector unsigned char v_uv_a = vec_mergeh(v_u, v_v); + vector unsigned char v_uv_b = vec_mergel(v_u, v_v); + vector unsigned char v_yuy2_0 = vec_mergeh(v_y1, v_uv_a); + vector unsigned char v_yuy2_1 = vec_mergel(v_y1, v_uv_a); + vector unsigned char v_yuy2_2 = vec_mergeh(v_y2, v_uv_b); + vector unsigned char v_yuy2_3 = vec_mergel(v_y2, v_uv_b); + vec_st(v_yuy2_0, (i << 1), dst); + vec_st(v_yuy2_1, (i << 1) + 16, dst); + vec_st(v_yuy2_2, (i << 1) + 32, dst); + vec_st(v_yuy2_3, (i << 1) + 48, dst); + } + if (i < width) { + const unsigned int j = i >> 1; + vector unsigned char v_y1 = vec_ld(i, ysrc); + vector unsigned char v_u = vec_ld(j, usrc); + vector unsigned char v_v = vec_ld(j, vsrc); + vector unsigned char v_uv_a = vec_mergeh(v_u, v_v); + vector unsigned char v_yuy2_0 = vec_mergeh(v_y1, v_uv_a); + vector unsigned char v_yuy2_1 = vec_mergel(v_y1, v_uv_a); + vec_st(v_yuy2_0, (i << 1), dst); + vec_st(v_yuy2_1, (i << 1) + 16, dst); + } + if ((y&(vertLumPerChroma-1)) == vertLumPerChroma-1) { + usrc += chromStride; + vsrc += chromStride; + } + ysrc += lumStride; + dst += dstStride; + } + + return srcSliceH; +} + +static inline int yv12touyvy_unscaled_altivec(SwsContext *c, uint8_t* src[], int srcStride[], int srcSliceY, + int srcSliceH, uint8_t* dstParam[], int dstStride_a[]) { + uint8_t *dst=dstParam[0] + dstStride_a[0]*srcSliceY; + // yv12toyuy2(src[0], src[1], src[2], dst, c->srcW, srcSliceH, srcStride[0], srcStride[1], dstStride[0]); + uint8_t *ysrc = src[0]; + uint8_t *usrc = src[1]; + uint8_t *vsrc = src[2]; + const int width = c->srcW; + const int height = srcSliceH; + const int lumStride = srcStride[0]; + const int chromStride = srcStride[1]; + const int dstStride = dstStride_a[0]; + const int vertLumPerChroma = 2; + const vector unsigned char yperm = vec_lvsl(0, ysrc); + register unsigned int y; + + if (width&15) { + yv12touyvy(ysrc, usrc, vsrc, dst, c->srcW, srcSliceH, lumStride, chromStride, dstStride); + return srcSliceH; + } + + /* This code assumes: + + 1) dst is 16 bytes-aligned + 2) dstStride is a multiple of 16 + 3) width is a multiple of 16 + 4) lum & chrom stride are multiples of 8 + */ + + for (y=0; y<height; y++) { + int i; + for (i = 0; i < width - 31; i+= 32) { + const unsigned int j = i >> 1; + vector unsigned char v_yA = vec_ld(i, ysrc); + vector unsigned char v_yB = vec_ld(i + 16, ysrc); + vector unsigned char v_yC = vec_ld(i + 32, ysrc); + vector unsigned char v_y1 = vec_perm(v_yA, v_yB, yperm); + vector unsigned char v_y2 = vec_perm(v_yB, v_yC, yperm); + vector unsigned char v_uA = vec_ld(j, usrc); + vector unsigned char v_uB = vec_ld(j + 16, usrc); + vector unsigned char v_u = vec_perm(v_uA, v_uB, vec_lvsl(j, usrc)); + vector unsigned char v_vA = vec_ld(j, vsrc); + vector unsigned char v_vB = vec_ld(j + 16, vsrc); + vector unsigned char v_v = vec_perm(v_vA, v_vB, vec_lvsl(j, vsrc)); + vector unsigned char v_uv_a = vec_mergeh(v_u, v_v); + vector unsigned char v_uv_b = vec_mergel(v_u, v_v); + vector unsigned char v_uyvy_0 = vec_mergeh(v_uv_a, v_y1); + vector unsigned char v_uyvy_1 = vec_mergel(v_uv_a, v_y1); + vector unsigned char v_uyvy_2 = vec_mergeh(v_uv_b, v_y2); + vector unsigned char v_uyvy_3 = vec_mergel(v_uv_b, v_y2); + vec_st(v_uyvy_0, (i << 1), dst); + vec_st(v_uyvy_1, (i << 1) + 16, dst); + vec_st(v_uyvy_2, (i << 1) + 32, dst); + vec_st(v_uyvy_3, (i << 1) + 48, dst); + } + if (i < width) { + const unsigned int j = i >> 1; + vector unsigned char v_y1 = vec_ld(i, ysrc); + vector unsigned char v_u = vec_ld(j, usrc); + vector unsigned char v_v = vec_ld(j, vsrc); + vector unsigned char v_uv_a = vec_mergeh(v_u, v_v); + vector unsigned char v_uyvy_0 = vec_mergeh(v_uv_a, v_y1); + vector unsigned char v_uyvy_1 = vec_mergel(v_uv_a, v_y1); + vec_st(v_uyvy_0, (i << 1), dst); + vec_st(v_uyvy_1, (i << 1) + 16, dst); + } + if ((y&(vertLumPerChroma-1)) == vertLumPerChroma-1) { + usrc += chromStride; + vsrc += chromStride; + } + ysrc += lumStride; + dst += dstStride; + } + return srcSliceH; +} diff --git a/libswscale/swscale_avoption.c b/libswscale/swscale_avoption.c new file mode 100644 index 0000000000..996843df1d --- /dev/null +++ b/libswscale/swscale_avoption.c @@ -0,0 +1,60 @@ +/* + * Copyright (C) 2001-2003 Michael Niedermayer <michaelni@gmx.at> + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#include "libavutil/avutil.h" +#include "libavcodec/opt.h" +#include "swscale.h" +#include "swscale_internal.h" + +static const char * sws_context_to_name(void * ptr) { + return "swscaler"; +} + +#define OFFSET(x) offsetof(SwsContext, x) +#define DEFAULT 0 +#define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM + +static const AVOption options[] = { + { "sws_flags", "scaler/cpu flags", OFFSET(flags), FF_OPT_TYPE_FLAGS, DEFAULT, 0, UINT_MAX, VE, "sws_flags" }, + { "fast_bilinear", "fast bilinear", 0, FF_OPT_TYPE_CONST, SWS_FAST_BILINEAR, INT_MIN, INT_MAX, VE, "sws_flags" }, + { "bilinear", "bilinear", 0, FF_OPT_TYPE_CONST, SWS_BILINEAR, INT_MIN, INT_MAX, VE, "sws_flags" }, + { "bicubic", "bicubic", 0, FF_OPT_TYPE_CONST, SWS_BICUBIC, INT_MIN, INT_MAX, VE, "sws_flags" }, + { "experimental", "experimental", 0, FF_OPT_TYPE_CONST, SWS_X, INT_MIN, INT_MAX, VE, "sws_flags" }, + { "neighbor", "nearest neighbor", 0, FF_OPT_TYPE_CONST, SWS_POINT, INT_MIN, INT_MAX, VE, "sws_flags" }, + { "area", "averaging area", 0, FF_OPT_TYPE_CONST, SWS_AREA, INT_MIN, INT_MAX, VE, "sws_flags" }, + { "bicublin", "luma bicubic, chroma bilinear", 0, FF_OPT_TYPE_CONST, SWS_BICUBLIN, INT_MIN, INT_MAX, VE, "sws_flags" }, + { "gauss", "gaussian", 0, FF_OPT_TYPE_CONST, SWS_GAUSS, INT_MIN, INT_MAX, VE, "sws_flags" }, + { "sinc", "sinc", 0, FF_OPT_TYPE_CONST, SWS_SINC, INT_MIN, INT_MAX, VE, "sws_flags" }, + { "lanczos", "lanczos", 0, FF_OPT_TYPE_CONST, SWS_LANCZOS, INT_MIN, INT_MAX, VE, "sws_flags" }, + { "spline", "natural bicubic spline", 0, FF_OPT_TYPE_CONST, SWS_SPLINE, INT_MIN, INT_MAX, VE, "sws_flags" }, + { "print_info", "print info", 0, FF_OPT_TYPE_CONST, SWS_PRINT_INFO, INT_MIN, INT_MAX, VE, "sws_flags" }, + { "accurate_rnd", "accurate rounding", 0, FF_OPT_TYPE_CONST, SWS_ACCURATE_RND, INT_MIN, INT_MAX, VE, "sws_flags" }, + { "mmx", "MMX SIMD acceleration", 0, FF_OPT_TYPE_CONST, SWS_CPU_CAPS_MMX, INT_MIN, INT_MAX, VE, "sws_flags" }, + { "mmx2", "MMX2 SIMD acceleration", 0, FF_OPT_TYPE_CONST, SWS_CPU_CAPS_MMX2, INT_MIN, INT_MAX, VE, "sws_flags" }, + { "3dnow", "3DNOW SIMD acceleration", 0, FF_OPT_TYPE_CONST, SWS_CPU_CAPS_3DNOW, INT_MIN, INT_MAX, VE, "sws_flags" }, + { "altivec", "AltiVec SIMD acceleration", 0, FF_OPT_TYPE_CONST, SWS_CPU_CAPS_ALTIVEC, INT_MIN, INT_MAX, VE, "sws_flags" }, + { "bfin", "Blackfin SIMD acceleration", 0, FF_OPT_TYPE_CONST, SWS_CPU_CAPS_BFIN, INT_MIN, INT_MAX, VE, "sws_flags" }, + { "full_chroma_int", "full chroma interpolation", 0 , FF_OPT_TYPE_CONST, SWS_FULL_CHR_H_INT, INT_MIN, INT_MAX, VE, "sws_flags" }, + { "full_chroma_inp", "full chroma input", 0 , FF_OPT_TYPE_CONST, SWS_FULL_CHR_H_INP, INT_MIN, INT_MAX, VE, "sws_flags" }, + { "bitexact", "", 0 , FF_OPT_TYPE_CONST, SWS_BITEXACT, INT_MIN, INT_MAX, VE, "sws_flags" }, + { NULL } +}; + +const AVClass sws_context_class = { "SWScaler", sws_context_to_name, options }; diff --git a/libswscale/swscale_bfin.c b/libswscale/swscale_bfin.c new file mode 100644 index 0000000000..ed7d9579b6 --- /dev/null +++ b/libswscale/swscale_bfin.c @@ -0,0 +1,91 @@ +/* + * Copyright (C) 2007 Marc Hoffman <marc.hoffman@analog.com> + * + * Blackfin software video scaler operations + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#include <stdio.h> +#include <stdlib.h> +#include <string.h> +#include <inttypes.h> +#include <assert.h> +#include "config.h" +#include <unistd.h> +#include "rgb2rgb.h" +#include "swscale.h" +#include "swscale_internal.h" + +#ifdef __FDPIC__ +#define L1CODE __attribute__ ((l1_text)) +#else +#define L1CODE +#endif + +int ff_bfin_uyvytoyv12 (const uint8_t *src, uint8_t *ydst, uint8_t *udst, uint8_t *vdst, + long width, long height, + long lumStride, long chromStride, long srcStride) L1CODE; + +int ff_bfin_yuyvtoyv12 (const uint8_t *src, uint8_t *ydst, uint8_t *udst, uint8_t *vdst, + long width, long height, + long lumStride, long chromStride, long srcStride) L1CODE; + +static int uyvytoyv12_unscaled (SwsContext *c, uint8_t* src[], int srcStride[], int srcSliceY, + int srcSliceH, uint8_t* dst[], int dstStride[]) +{ + uint8_t *dsty = dst[0] + dstStride[0]*srcSliceY; + uint8_t *dstu = dst[1] + dstStride[1]*srcSliceY/2; + uint8_t *dstv = dst[2] + dstStride[2]*srcSliceY/2; + uint8_t *ip = src[0] + srcStride[0]*srcSliceY; + int w = dstStride[0]; + + ff_bfin_uyvytoyv12 (ip, dsty, dstu, dstv, w, srcSliceH, dstStride[0], dstStride[1], srcStride[0]); + + return srcSliceH; +} + +static int yuyvtoyv12_unscaled (SwsContext *c, uint8_t* src[], int srcStride[], int srcSliceY, + int srcSliceH, uint8_t* dst[], int dstStride[]) +{ + uint8_t *dsty = dst[0] + dstStride[0]*srcSliceY; + uint8_t *dstu = dst[1] + dstStride[1]*srcSliceY/2; + uint8_t *dstv = dst[2] + dstStride[2]*srcSliceY/2; + uint8_t *ip = src[0] + srcStride[0]*srcSliceY; + int w = dstStride[0]; + + ff_bfin_yuyvtoyv12 (ip, dsty, dstu, dstv, w, srcSliceH, dstStride[0], dstStride[1], srcStride[0]); + + return srcSliceH; +} + + +void ff_bfin_get_unscaled_swscale (SwsContext *c) +{ + SwsFunc swScale = c->swScale; + if (c->flags & SWS_CPU_CAPS_BFIN) + if (c->dstFormat == PIX_FMT_YUV420P) + if (c->srcFormat == PIX_FMT_UYVY422) { + av_log (NULL, AV_LOG_VERBOSE, "selecting Blackfin optimized uyvytoyv12_unscaled\n"); + c->swScale = uyvytoyv12_unscaled; + } + if (c->dstFormat == PIX_FMT_YUV420P) + if (c->srcFormat == PIX_FMT_YUYV422) { + av_log (NULL, AV_LOG_VERBOSE, "selecting Blackfin optimized yuyvtoyv12_unscaled\n"); + c->swScale = yuyvtoyv12_unscaled; + } +} diff --git a/libswscale/swscale_internal.h b/libswscale/swscale_internal.h new file mode 100644 index 0000000000..cdf3754d14 --- /dev/null +++ b/libswscale/swscale_internal.h @@ -0,0 +1,324 @@ +/* + * Copyright (C) 2001-2003 Michael Niedermayer <michaelni@gmx.at> + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef SWSCALE_SWSCALE_INTERNAL_H +#define SWSCALE_SWSCALE_INTERNAL_H + +#include "config.h" + +#if HAVE_ALTIVEC_H +#include <altivec.h> +#endif + +#include "libavutil/avutil.h" + +#define STR(s) AV_TOSTRING(s) //AV_STRINGIFY is too long + +#define MAX_FILTER_SIZE 256 + +#define VOFW 2048 +#define VOF (VOFW*2) + +#ifdef WORDS_BIGENDIAN +#define ALT32_CORR (-1) +#else +#define ALT32_CORR 1 +#endif + +#if ARCH_X86_64 +# define APCK_PTR2 8 +# define APCK_COEF 16 +# define APCK_SIZE 24 +#else +# define APCK_PTR2 4 +# define APCK_COEF 8 +# define APCK_SIZE 16 +#endif + +struct SwsContext; + +typedef int (*SwsFunc)(struct SwsContext *context, uint8_t* src[], int srcStride[], int srcSliceY, + int srcSliceH, uint8_t* dst[], int dstStride[]); + +/* This struct should be aligned on at least a 32-byte boundary. */ +typedef struct SwsContext{ + /** + * info on struct for av_log + */ + const AVClass *av_class; + + /** + * Note that src, dst, srcStride, dstStride will be copied in the + * sws_scale() wrapper so they can be freely modified here. + */ + SwsFunc swScale; + int srcW, srcH, dstH; + int chrSrcW, chrSrcH, chrDstW, chrDstH; + int lumXInc, chrXInc; + int lumYInc, chrYInc; + enum PixelFormat dstFormat, srcFormat; ///< format 4:2:0 type is always YV12 + int origDstFormat, origSrcFormat; ///< format + int chrSrcHSubSample, chrSrcVSubSample; + int chrIntHSubSample, chrIntVSubSample; + int chrDstHSubSample, chrDstVSubSample; + int vChrDrop; + int sliceDir; + double param[2]; + + uint32_t pal_yuv[256]; + uint32_t pal_rgb[256]; + + int16_t **lumPixBuf; + int16_t **chrPixBuf; + int16_t *hLumFilter; + int16_t *hLumFilterPos; + int16_t *hChrFilter; + int16_t *hChrFilterPos; + int16_t *vLumFilter; + int16_t *vLumFilterPos; + int16_t *vChrFilter; + int16_t *vChrFilterPos; + + uint8_t formatConvBuffer[VOF]; //FIXME dynamic allocation, but we have to change a lot of code for this to be useful + + int hLumFilterSize; + int hChrFilterSize; + int vLumFilterSize; + int vChrFilterSize; + int vLumBufSize; + int vChrBufSize; + + uint8_t *funnyYCode; + uint8_t *funnyUVCode; + int32_t *lumMmx2FilterPos; + int32_t *chrMmx2FilterPos; + int16_t *lumMmx2Filter; + int16_t *chrMmx2Filter; + + int canMMX2BeUsed; + + int lastInLumBuf; + int lastInChrBuf; + int lumBufIndex; + int chrBufIndex; + int dstY; + int flags; + void * yuvTable; // pointer to the yuv->rgb table start so it can be freed() + uint8_t * table_rV[256]; + uint8_t * table_gU[256]; + int table_gV[256]; + uint8_t * table_bU[256]; + + //Colorspace stuff + int contrast, brightness, saturation; // for sws_getColorspaceDetails + int srcColorspaceTable[4]; + int dstColorspaceTable[4]; + int srcRange, dstRange; + int yuv2rgb_y_offset; + int yuv2rgb_y_coeff; + int yuv2rgb_v2r_coeff; + int yuv2rgb_v2g_coeff; + int yuv2rgb_u2g_coeff; + int yuv2rgb_u2b_coeff; + +#define RED_DITHER "0*8" +#define GREEN_DITHER "1*8" +#define BLUE_DITHER "2*8" +#define Y_COEFF "3*8" +#define VR_COEFF "4*8" +#define UB_COEFF "5*8" +#define VG_COEFF "6*8" +#define UG_COEFF "7*8" +#define Y_OFFSET "8*8" +#define U_OFFSET "9*8" +#define V_OFFSET "10*8" +#define LUM_MMX_FILTER_OFFSET "11*8" +#define CHR_MMX_FILTER_OFFSET "11*8+4*4*256" +#define DSTW_OFFSET "11*8+4*4*256*2" //do not change, it is hardcoded in the ASM +#define ESP_OFFSET "11*8+4*4*256*2+8" +#define VROUNDER_OFFSET "11*8+4*4*256*2+16" +#define U_TEMP "11*8+4*4*256*2+24" +#define V_TEMP "11*8+4*4*256*2+32" + + uint64_t redDither __attribute__((aligned(8))); + uint64_t greenDither __attribute__((aligned(8))); + uint64_t blueDither __attribute__((aligned(8))); + + uint64_t yCoeff __attribute__((aligned(8))); + uint64_t vrCoeff __attribute__((aligned(8))); + uint64_t ubCoeff __attribute__((aligned(8))); + uint64_t vgCoeff __attribute__((aligned(8))); + uint64_t ugCoeff __attribute__((aligned(8))); + uint64_t yOffset __attribute__((aligned(8))); + uint64_t uOffset __attribute__((aligned(8))); + uint64_t vOffset __attribute__((aligned(8))); + int32_t lumMmxFilter[4*MAX_FILTER_SIZE]; + int32_t chrMmxFilter[4*MAX_FILTER_SIZE]; + int dstW; + uint64_t esp __attribute__((aligned(8))); + uint64_t vRounder __attribute__((aligned(8))); + uint64_t u_temp __attribute__((aligned(8))); + uint64_t v_temp __attribute__((aligned(8))); + +#if HAVE_ALTIVEC + + vector signed short CY; + vector signed short CRV; + vector signed short CBU; + vector signed short CGU; + vector signed short CGV; + vector signed short OY; + vector unsigned short CSHIFT; + vector signed short *vYCoeffsBank, *vCCoeffsBank; + +#endif + + +#if ARCH_BFIN + uint32_t oy __attribute__((aligned(4))); + uint32_t oc __attribute__((aligned(4))); + uint32_t zero __attribute__((aligned(4))); + uint32_t cy __attribute__((aligned(4))); + uint32_t crv __attribute__((aligned(4))); + uint32_t rmask __attribute__((aligned(4))); + uint32_t cbu __attribute__((aligned(4))); + uint32_t bmask __attribute__((aligned(4))); + uint32_t cgu __attribute__((aligned(4))); + uint32_t cgv __attribute__((aligned(4))); + uint32_t gmask __attribute__((aligned(4))); +#endif + +#if HAVE_VIS + uint64_t sparc_coeffs[10] __attribute__((aligned(8))); +#endif + +} SwsContext; +//FIXME check init (where 0) + +SwsFunc sws_yuv2rgb_get_func_ptr (SwsContext *c); +int sws_yuv2rgb_c_init_tables (SwsContext *c, const int inv_table[4], int fullRange, int brightness, int contrast, int saturation); + +void sws_yuv2rgb_altivec_init_tables (SwsContext *c, const int inv_table[4],int brightness,int contrast, int saturation); +SwsFunc sws_yuv2rgb_init_altivec (SwsContext *c); +void altivec_yuv2packedX (SwsContext *c, + int16_t *lumFilter, int16_t **lumSrc, int lumFilterSize, + int16_t *chrFilter, int16_t **chrSrc, int chrFilterSize, + uint8_t *dest, int dstW, int dstY); + +const char *sws_format_name(int format); + +//FIXME replace this with something faster +#define isPlanarYUV(x) ( \ + (x)==PIX_FMT_YUV410P \ + || (x)==PIX_FMT_YUV420P \ + || (x)==PIX_FMT_YUV411P \ + || (x)==PIX_FMT_YUV422P \ + || (x)==PIX_FMT_YUV444P \ + || (x)==PIX_FMT_YUV440P \ + || (x)==PIX_FMT_NV12 \ + || (x)==PIX_FMT_NV21 \ + ) +#define isYUV(x) ( \ + (x)==PIX_FMT_UYVY422 \ + || (x)==PIX_FMT_YUYV422 \ + || isPlanarYUV(x) \ + ) +#define isGray(x) ( \ + (x)==PIX_FMT_GRAY8 \ + || (x)==PIX_FMT_GRAY16BE \ + || (x)==PIX_FMT_GRAY16LE \ + ) +#define isGray16(x) ( \ + (x)==PIX_FMT_GRAY16BE \ + || (x)==PIX_FMT_GRAY16LE \ + ) +#define isRGB(x) ( \ + (x)==PIX_FMT_RGB32 \ + || (x)==PIX_FMT_RGB32_1 \ + || (x)==PIX_FMT_RGB24 \ + || (x)==PIX_FMT_RGB565 \ + || (x)==PIX_FMT_RGB555 \ + || (x)==PIX_FMT_RGB8 \ + || (x)==PIX_FMT_RGB4 \ + || (x)==PIX_FMT_RGB4_BYTE \ + || (x)==PIX_FMT_MONOBLACK \ + || (x)==PIX_FMT_MONOWHITE \ + ) +#define isBGR(x) ( \ + (x)==PIX_FMT_BGR32 \ + || (x)==PIX_FMT_BGR32_1 \ + || (x)==PIX_FMT_BGR24 \ + || (x)==PIX_FMT_BGR565 \ + || (x)==PIX_FMT_BGR555 \ + || (x)==PIX_FMT_BGR8 \ + || (x)==PIX_FMT_BGR4 \ + || (x)==PIX_FMT_BGR4_BYTE \ + || (x)==PIX_FMT_MONOBLACK \ + || (x)==PIX_FMT_MONOWHITE \ + ) +#define isALPHA(x) ( \ + (x)==PIX_FMT_BGR32 \ + || (x)==PIX_FMT_BGR32_1 \ + || (x)==PIX_FMT_RGB32 \ + || (x)==PIX_FMT_RGB32_1 \ + || (x)==PIX_FMT_YUVA420P \ + ) + +static inline int fmt_depth(int fmt) +{ + switch(fmt) { + case PIX_FMT_BGRA: + case PIX_FMT_ABGR: + case PIX_FMT_RGBA: + case PIX_FMT_ARGB: + return 32; + case PIX_FMT_BGR24: + case PIX_FMT_RGB24: + return 24; + case PIX_FMT_BGR565: + case PIX_FMT_RGB565: + case PIX_FMT_GRAY16BE: + case PIX_FMT_GRAY16LE: + return 16; + case PIX_FMT_BGR555: + case PIX_FMT_RGB555: + return 15; + case PIX_FMT_BGR8: + case PIX_FMT_RGB8: + return 8; + case PIX_FMT_BGR4: + case PIX_FMT_RGB4: + case PIX_FMT_BGR4_BYTE: + case PIX_FMT_RGB4_BYTE: + return 4; + case PIX_FMT_MONOBLACK: + case PIX_FMT_MONOWHITE: + return 1; + default: + return 0; + } +} + +extern const uint64_t ff_dither4[2]; +extern const uint64_t ff_dither8[2]; + +extern const AVClass sws_context_class; + +#endif /* SWSCALE_SWSCALE_INTERNAL_H */ diff --git a/libswscale/swscale_template.c b/libswscale/swscale_template.c new file mode 100644 index 0000000000..3262b6ee85 --- /dev/null +++ b/libswscale/swscale_template.c @@ -0,0 +1,3041 @@ +/* + * Copyright (C) 2001-2003 Michael Niedermayer <michaelni@gmx.at> + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + * + * The C code (not assembly, MMX, ...) of this file can be used + * under the LGPL license. + */ + +#undef REAL_MOVNTQ +#undef MOVNTQ +#undef PAVGB +#undef PREFETCH +#undef PREFETCHW +#undef EMMS +#undef SFENCE + +#if HAVE_AMD3DNOW +/* On K6 femms is faster than emms. On K7 femms is directly mapped to emms. */ +#define EMMS "femms" +#else +#define EMMS "emms" +#endif + +#if HAVE_AMD3DNOW +#define PREFETCH "prefetch" +#define PREFETCHW "prefetchw" +#elif HAVE_MMX2 +#define PREFETCH "prefetchnta" +#define PREFETCHW "prefetcht0" +#else +#define PREFETCH " # nop" +#define PREFETCHW " # nop" +#endif + +#if HAVE_MMX2 +#define SFENCE "sfence" +#else +#define SFENCE " # nop" +#endif + +#if HAVE_MMX2 +#define PAVGB(a,b) "pavgb " #a ", " #b " \n\t" +#elif HAVE_AMD3DNOW +#define PAVGB(a,b) "pavgusb " #a ", " #b " \n\t" +#endif + +#if HAVE_MMX2 +#define REAL_MOVNTQ(a,b) "movntq " #a ", " #b " \n\t" +#else +#define REAL_MOVNTQ(a,b) "movq " #a ", " #b " \n\t" +#endif +#define MOVNTQ(a,b) REAL_MOVNTQ(a,b) + +#if HAVE_ALTIVEC +#include "swscale_altivec_template.c" +#endif + +#define YSCALEYUV2YV12X(x, offset, dest, width) \ + __asm__ volatile(\ + "xor %%"REG_a", %%"REG_a" \n\t"\ + "movq "VROUNDER_OFFSET"(%0), %%mm3 \n\t"\ + "movq %%mm3, %%mm4 \n\t"\ + "lea " offset "(%0), %%"REG_d" \n\t"\ + "mov (%%"REG_d"), %%"REG_S" \n\t"\ + ASMALIGN(4) /* FIXME Unroll? */\ + "1: \n\t"\ + "movq 8(%%"REG_d"), %%mm0 \n\t" /* filterCoeff */\ + "movq " x "(%%"REG_S", %%"REG_a", 2), %%mm2 \n\t" /* srcData */\ + "movq 8+" x "(%%"REG_S", %%"REG_a", 2), %%mm5 \n\t" /* srcData */\ + "add $16, %%"REG_d" \n\t"\ + "mov (%%"REG_d"), %%"REG_S" \n\t"\ + "test %%"REG_S", %%"REG_S" \n\t"\ + "pmulhw %%mm0, %%mm2 \n\t"\ + "pmulhw %%mm0, %%mm5 \n\t"\ + "paddw %%mm2, %%mm3 \n\t"\ + "paddw %%mm5, %%mm4 \n\t"\ + " jnz 1b \n\t"\ + "psraw $3, %%mm3 \n\t"\ + "psraw $3, %%mm4 \n\t"\ + "packuswb %%mm4, %%mm3 \n\t"\ + MOVNTQ(%%mm3, (%1, %%REGa))\ + "add $8, %%"REG_a" \n\t"\ + "cmp %2, %%"REG_a" \n\t"\ + "movq "VROUNDER_OFFSET"(%0), %%mm3 \n\t"\ + "movq %%mm3, %%mm4 \n\t"\ + "lea " offset "(%0), %%"REG_d" \n\t"\ + "mov (%%"REG_d"), %%"REG_S" \n\t"\ + "jb 1b \n\t"\ + :: "r" (&c->redDither),\ + "r" (dest), "g" (width)\ + : "%"REG_a, "%"REG_d, "%"REG_S\ + ); + +#define YSCALEYUV2YV12X_ACCURATE(x, offset, dest, width) \ + __asm__ volatile(\ + "lea " offset "(%0), %%"REG_d" \n\t"\ + "xor %%"REG_a", %%"REG_a" \n\t"\ + "pxor %%mm4, %%mm4 \n\t"\ + "pxor %%mm5, %%mm5 \n\t"\ + "pxor %%mm6, %%mm6 \n\t"\ + "pxor %%mm7, %%mm7 \n\t"\ + "mov (%%"REG_d"), %%"REG_S" \n\t"\ + ASMALIGN(4) \ + "1: \n\t"\ + "movq " x "(%%"REG_S", %%"REG_a", 2), %%mm0 \n\t" /* srcData */\ + "movq 8+" x "(%%"REG_S", %%"REG_a", 2), %%mm2 \n\t" /* srcData */\ + "mov "STR(APCK_PTR2)"(%%"REG_d"), %%"REG_S" \n\t"\ + "movq " x "(%%"REG_S", %%"REG_a", 2), %%mm1 \n\t" /* srcData */\ + "movq %%mm0, %%mm3 \n\t"\ + "punpcklwd %%mm1, %%mm0 \n\t"\ + "punpckhwd %%mm1, %%mm3 \n\t"\ + "movq "STR(APCK_COEF)"(%%"REG_d"), %%mm1 \n\t" /* filterCoeff */\ + "pmaddwd %%mm1, %%mm0 \n\t"\ + "pmaddwd %%mm1, %%mm3 \n\t"\ + "paddd %%mm0, %%mm4 \n\t"\ + "paddd %%mm3, %%mm5 \n\t"\ + "movq 8+" x "(%%"REG_S", %%"REG_a", 2), %%mm3 \n\t" /* srcData */\ + "mov "STR(APCK_SIZE)"(%%"REG_d"), %%"REG_S" \n\t"\ + "add $"STR(APCK_SIZE)", %%"REG_d" \n\t"\ + "test %%"REG_S", %%"REG_S" \n\t"\ + "movq %%mm2, %%mm0 \n\t"\ + "punpcklwd %%mm3, %%mm2 \n\t"\ + "punpckhwd %%mm3, %%mm0 \n\t"\ + "pmaddwd %%mm1, %%mm2 \n\t"\ + "pmaddwd %%mm1, %%mm0 \n\t"\ + "paddd %%mm2, %%mm6 \n\t"\ + "paddd %%mm0, %%mm7 \n\t"\ + " jnz 1b \n\t"\ + "psrad $16, %%mm4 \n\t"\ + "psrad $16, %%mm5 \n\t"\ + "psrad $16, %%mm6 \n\t"\ + "psrad $16, %%mm7 \n\t"\ + "movq "VROUNDER_OFFSET"(%0), %%mm0 \n\t"\ + "packssdw %%mm5, %%mm4 \n\t"\ + "packssdw %%mm7, %%mm6 \n\t"\ + "paddw %%mm0, %%mm4 \n\t"\ + "paddw %%mm0, %%mm6 \n\t"\ + "psraw $3, %%mm4 \n\t"\ + "psraw $3, %%mm6 \n\t"\ + "packuswb %%mm6, %%mm4 \n\t"\ + MOVNTQ(%%mm4, (%1, %%REGa))\ + "add $8, %%"REG_a" \n\t"\ + "cmp %2, %%"REG_a" \n\t"\ + "lea " offset "(%0), %%"REG_d" \n\t"\ + "pxor %%mm4, %%mm4 \n\t"\ + "pxor %%mm5, %%mm5 \n\t"\ + "pxor %%mm6, %%mm6 \n\t"\ + "pxor %%mm7, %%mm7 \n\t"\ + "mov (%%"REG_d"), %%"REG_S" \n\t"\ + "jb 1b \n\t"\ + :: "r" (&c->redDither),\ + "r" (dest), "g" (width)\ + : "%"REG_a, "%"REG_d, "%"REG_S\ + ); + +#define YSCALEYUV2YV121 \ + "mov %2, %%"REG_a" \n\t"\ + ASMALIGN(4) /* FIXME Unroll? */\ + "1: \n\t"\ + "movq (%0, %%"REG_a", 2), %%mm0 \n\t"\ + "movq 8(%0, %%"REG_a", 2), %%mm1 \n\t"\ + "psraw $7, %%mm0 \n\t"\ + "psraw $7, %%mm1 \n\t"\ + "packuswb %%mm1, %%mm0 \n\t"\ + MOVNTQ(%%mm0, (%1, %%REGa))\ + "add $8, %%"REG_a" \n\t"\ + "jnc 1b \n\t" + +#define YSCALEYUV2YV121_ACCURATE \ + "mov %2, %%"REG_a" \n\t"\ + "pcmpeqw %%mm7, %%mm7 \n\t"\ + "psrlw $15, %%mm7 \n\t"\ + "psllw $6, %%mm7 \n\t"\ + ASMALIGN(4) /* FIXME Unroll? */\ + "1: \n\t"\ + "movq (%0, %%"REG_a", 2), %%mm0 \n\t"\ + "movq 8(%0, %%"REG_a", 2), %%mm1 \n\t"\ + "paddsw %%mm7, %%mm0 \n\t"\ + "paddsw %%mm7, %%mm1 \n\t"\ + "psraw $7, %%mm0 \n\t"\ + "psraw $7, %%mm1 \n\t"\ + "packuswb %%mm1, %%mm0 \n\t"\ + MOVNTQ(%%mm0, (%1, %%REGa))\ + "add $8, %%"REG_a" \n\t"\ + "jnc 1b \n\t" + +/* + :: "m" (-lumFilterSize), "m" (-chrFilterSize), + "m" (lumMmxFilter+lumFilterSize*4), "m" (chrMmxFilter+chrFilterSize*4), + "r" (dest), "m" (dstW), + "m" (lumSrc+lumFilterSize), "m" (chrSrc+chrFilterSize) + : "%eax", "%ebx", "%ecx", "%edx", "%esi" +*/ +#define YSCALEYUV2PACKEDX_UV \ + __asm__ volatile(\ + "xor %%"REG_a", %%"REG_a" \n\t"\ + ASMALIGN(4)\ + "nop \n\t"\ + "1: \n\t"\ + "lea "CHR_MMX_FILTER_OFFSET"(%0), %%"REG_d" \n\t"\ + "mov (%%"REG_d"), %%"REG_S" \n\t"\ + "movq "VROUNDER_OFFSET"(%0), %%mm3 \n\t"\ + "movq %%mm3, %%mm4 \n\t"\ + ASMALIGN(4)\ + "2: \n\t"\ + "movq 8(%%"REG_d"), %%mm0 \n\t" /* filterCoeff */\ + "movq (%%"REG_S", %%"REG_a"), %%mm2 \n\t" /* UsrcData */\ + "movq "AV_STRINGIFY(VOF)"(%%"REG_S", %%"REG_a"), %%mm5 \n\t" /* VsrcData */\ + "add $16, %%"REG_d" \n\t"\ + "mov (%%"REG_d"), %%"REG_S" \n\t"\ + "pmulhw %%mm0, %%mm2 \n\t"\ + "pmulhw %%mm0, %%mm5 \n\t"\ + "paddw %%mm2, %%mm3 \n\t"\ + "paddw %%mm5, %%mm4 \n\t"\ + "test %%"REG_S", %%"REG_S" \n\t"\ + " jnz 2b \n\t"\ + +#define YSCALEYUV2PACKEDX_YA(offset) \ + "lea "offset"(%0), %%"REG_d" \n\t"\ + "mov (%%"REG_d"), %%"REG_S" \n\t"\ + "movq "VROUNDER_OFFSET"(%0), %%mm1 \n\t"\ + "movq %%mm1, %%mm7 \n\t"\ + ASMALIGN(4)\ + "2: \n\t"\ + "movq 8(%%"REG_d"), %%mm0 \n\t" /* filterCoeff */\ + "movq (%%"REG_S", %%"REG_a", 2), %%mm2 \n\t" /* Y1srcData */\ + "movq 8(%%"REG_S", %%"REG_a", 2), %%mm5 \n\t" /* Y2srcData */\ + "add $16, %%"REG_d" \n\t"\ + "mov (%%"REG_d"), %%"REG_S" \n\t"\ + "pmulhw %%mm0, %%mm2 \n\t"\ + "pmulhw %%mm0, %%mm5 \n\t"\ + "paddw %%mm2, %%mm1 \n\t"\ + "paddw %%mm5, %%mm7 \n\t"\ + "test %%"REG_S", %%"REG_S" \n\t"\ + " jnz 2b \n\t"\ + +#define YSCALEYUV2PACKEDX \ + YSCALEYUV2PACKEDX_UV \ + YSCALEYUV2PACKEDX_YA(LUM_MMX_FILTER_OFFSET) \ + +#define YSCALEYUV2PACKEDX_END \ + :: "r" (&c->redDither), \ + "m" (dummy), "m" (dummy), "m" (dummy),\ + "r" (dest), "m" (dstW) \ + : "%"REG_a, "%"REG_d, "%"REG_S \ + ); + +#define YSCALEYUV2PACKEDX_ACCURATE_UV \ + __asm__ volatile(\ + "xor %%"REG_a", %%"REG_a" \n\t"\ + ASMALIGN(4)\ + "nop \n\t"\ + "1: \n\t"\ + "lea "CHR_MMX_FILTER_OFFSET"(%0), %%"REG_d" \n\t"\ + "mov (%%"REG_d"), %%"REG_S" \n\t"\ + "pxor %%mm4, %%mm4 \n\t"\ + "pxor %%mm5, %%mm5 \n\t"\ + "pxor %%mm6, %%mm6 \n\t"\ + "pxor %%mm7, %%mm7 \n\t"\ + ASMALIGN(4)\ + "2: \n\t"\ + "movq (%%"REG_S", %%"REG_a"), %%mm0 \n\t" /* UsrcData */\ + "movq "AV_STRINGIFY(VOF)"(%%"REG_S", %%"REG_a"), %%mm2 \n\t" /* VsrcData */\ + "mov "STR(APCK_PTR2)"(%%"REG_d"), %%"REG_S" \n\t"\ + "movq (%%"REG_S", %%"REG_a"), %%mm1 \n\t" /* UsrcData */\ + "movq %%mm0, %%mm3 \n\t"\ + "punpcklwd %%mm1, %%mm0 \n\t"\ + "punpckhwd %%mm1, %%mm3 \n\t"\ + "movq "STR(APCK_COEF)"(%%"REG_d"),%%mm1 \n\t" /* filterCoeff */\ + "pmaddwd %%mm1, %%mm0 \n\t"\ + "pmaddwd %%mm1, %%mm3 \n\t"\ + "paddd %%mm0, %%mm4 \n\t"\ + "paddd %%mm3, %%mm5 \n\t"\ + "movq "AV_STRINGIFY(VOF)"(%%"REG_S", %%"REG_a"), %%mm3 \n\t" /* VsrcData */\ + "mov "STR(APCK_SIZE)"(%%"REG_d"), %%"REG_S" \n\t"\ + "add $"STR(APCK_SIZE)", %%"REG_d" \n\t"\ + "test %%"REG_S", %%"REG_S" \n\t"\ + "movq %%mm2, %%mm0 \n\t"\ + "punpcklwd %%mm3, %%mm2 \n\t"\ + "punpckhwd %%mm3, %%mm0 \n\t"\ + "pmaddwd %%mm1, %%mm2 \n\t"\ + "pmaddwd %%mm1, %%mm0 \n\t"\ + "paddd %%mm2, %%mm6 \n\t"\ + "paddd %%mm0, %%mm7 \n\t"\ + " jnz 2b \n\t"\ + "psrad $16, %%mm4 \n\t"\ + "psrad $16, %%mm5 \n\t"\ + "psrad $16, %%mm6 \n\t"\ + "psrad $16, %%mm7 \n\t"\ + "movq "VROUNDER_OFFSET"(%0), %%mm0 \n\t"\ + "packssdw %%mm5, %%mm4 \n\t"\ + "packssdw %%mm7, %%mm6 \n\t"\ + "paddw %%mm0, %%mm4 \n\t"\ + "paddw %%mm0, %%mm6 \n\t"\ + "movq %%mm4, "U_TEMP"(%0) \n\t"\ + "movq %%mm6, "V_TEMP"(%0) \n\t"\ + +#define YSCALEYUV2PACKEDX_ACCURATE_YA(offset) \ + "lea "offset"(%0), %%"REG_d" \n\t"\ + "mov (%%"REG_d"), %%"REG_S" \n\t"\ + "pxor %%mm1, %%mm1 \n\t"\ + "pxor %%mm5, %%mm5 \n\t"\ + "pxor %%mm7, %%mm7 \n\t"\ + "pxor %%mm6, %%mm6 \n\t"\ + ASMALIGN(4)\ + "2: \n\t"\ + "movq (%%"REG_S", %%"REG_a", 2), %%mm0 \n\t" /* Y1srcData */\ + "movq 8(%%"REG_S", %%"REG_a", 2), %%mm2 \n\t" /* Y2srcData */\ + "mov "STR(APCK_PTR2)"(%%"REG_d"), %%"REG_S" \n\t"\ + "movq (%%"REG_S", %%"REG_a", 2), %%mm4 \n\t" /* Y1srcData */\ + "movq %%mm0, %%mm3 \n\t"\ + "punpcklwd %%mm4, %%mm0 \n\t"\ + "punpckhwd %%mm4, %%mm3 \n\t"\ + "movq "STR(APCK_COEF)"(%%"REG_d"), %%mm4 \n\t" /* filterCoeff */\ + "pmaddwd %%mm4, %%mm0 \n\t"\ + "pmaddwd %%mm4, %%mm3 \n\t"\ + "paddd %%mm0, %%mm1 \n\t"\ + "paddd %%mm3, %%mm5 \n\t"\ + "movq 8(%%"REG_S", %%"REG_a", 2), %%mm3 \n\t" /* Y2srcData */\ + "mov "STR(APCK_SIZE)"(%%"REG_d"), %%"REG_S" \n\t"\ + "add $"STR(APCK_SIZE)", %%"REG_d" \n\t"\ + "test %%"REG_S", %%"REG_S" \n\t"\ + "movq %%mm2, %%mm0 \n\t"\ + "punpcklwd %%mm3, %%mm2 \n\t"\ + "punpckhwd %%mm3, %%mm0 \n\t"\ + "pmaddwd %%mm4, %%mm2 \n\t"\ + "pmaddwd %%mm4, %%mm0 \n\t"\ + "paddd %%mm2, %%mm7 \n\t"\ + "paddd %%mm0, %%mm6 \n\t"\ + " jnz 2b \n\t"\ + "psrad $16, %%mm1 \n\t"\ + "psrad $16, %%mm5 \n\t"\ + "psrad $16, %%mm7 \n\t"\ + "psrad $16, %%mm6 \n\t"\ + "movq "VROUNDER_OFFSET"(%0), %%mm0 \n\t"\ + "packssdw %%mm5, %%mm1 \n\t"\ + "packssdw %%mm6, %%mm7 \n\t"\ + "paddw %%mm0, %%mm1 \n\t"\ + "paddw %%mm0, %%mm7 \n\t"\ + "movq "U_TEMP"(%0), %%mm3 \n\t"\ + "movq "V_TEMP"(%0), %%mm4 \n\t"\ + +#define YSCALEYUV2PACKEDX_ACCURATE \ + YSCALEYUV2PACKEDX_ACCURATE_UV \ + YSCALEYUV2PACKEDX_ACCURATE_YA(LUM_MMX_FILTER_OFFSET) + +#define YSCALEYUV2RGBX \ + "psubw "U_OFFSET"(%0), %%mm3 \n\t" /* (U-128)8*/\ + "psubw "V_OFFSET"(%0), %%mm4 \n\t" /* (V-128)8*/\ + "movq %%mm3, %%mm2 \n\t" /* (U-128)8*/\ + "movq %%mm4, %%mm5 \n\t" /* (V-128)8*/\ + "pmulhw "UG_COEFF"(%0), %%mm3 \n\t"\ + "pmulhw "VG_COEFF"(%0), %%mm4 \n\t"\ +/* mm2=(U-128)8, mm3=ug, mm4=vg mm5=(V-128)8 */\ + "pmulhw "UB_COEFF"(%0), %%mm2 \n\t"\ + "pmulhw "VR_COEFF"(%0), %%mm5 \n\t"\ + "psubw "Y_OFFSET"(%0), %%mm1 \n\t" /* 8(Y-16)*/\ + "psubw "Y_OFFSET"(%0), %%mm7 \n\t" /* 8(Y-16)*/\ + "pmulhw "Y_COEFF"(%0), %%mm1 \n\t"\ + "pmulhw "Y_COEFF"(%0), %%mm7 \n\t"\ +/* mm1= Y1, mm2=ub, mm3=ug, mm4=vg mm5=vr, mm7=Y2 */\ + "paddw %%mm3, %%mm4 \n\t"\ + "movq %%mm2, %%mm0 \n\t"\ + "movq %%mm5, %%mm6 \n\t"\ + "movq %%mm4, %%mm3 \n\t"\ + "punpcklwd %%mm2, %%mm2 \n\t"\ + "punpcklwd %%mm5, %%mm5 \n\t"\ + "punpcklwd %%mm4, %%mm4 \n\t"\ + "paddw %%mm1, %%mm2 \n\t"\ + "paddw %%mm1, %%mm5 \n\t"\ + "paddw %%mm1, %%mm4 \n\t"\ + "punpckhwd %%mm0, %%mm0 \n\t"\ + "punpckhwd %%mm6, %%mm6 \n\t"\ + "punpckhwd %%mm3, %%mm3 \n\t"\ + "paddw %%mm7, %%mm0 \n\t"\ + "paddw %%mm7, %%mm6 \n\t"\ + "paddw %%mm7, %%mm3 \n\t"\ + /* mm0=B1, mm2=B2, mm3=G2, mm4=G1, mm5=R1, mm6=R2 */\ + "packuswb %%mm0, %%mm2 \n\t"\ + "packuswb %%mm6, %%mm5 \n\t"\ + "packuswb %%mm3, %%mm4 \n\t"\ + +#define REAL_YSCALEYUV2PACKED(index, c) \ + "movq "CHR_MMX_FILTER_OFFSET"+8("#c"), %%mm0 \n\t"\ + "movq "LUM_MMX_FILTER_OFFSET"+8("#c"), %%mm1 \n\t"\ + "psraw $3, %%mm0 \n\t"\ + "psraw $3, %%mm1 \n\t"\ + "movq %%mm0, "CHR_MMX_FILTER_OFFSET"+8("#c") \n\t"\ + "movq %%mm1, "LUM_MMX_FILTER_OFFSET"+8("#c") \n\t"\ + "xor "#index", "#index" \n\t"\ + ASMALIGN(4)\ + "1: \n\t"\ + "movq (%2, "#index"), %%mm2 \n\t" /* uvbuf0[eax]*/\ + "movq (%3, "#index"), %%mm3 \n\t" /* uvbuf1[eax]*/\ + "movq "AV_STRINGIFY(VOF)"(%2, "#index"), %%mm5 \n\t" /* uvbuf0[eax+2048]*/\ + "movq "AV_STRINGIFY(VOF)"(%3, "#index"), %%mm4 \n\t" /* uvbuf1[eax+2048]*/\ + "psubw %%mm3, %%mm2 \n\t" /* uvbuf0[eax] - uvbuf1[eax]*/\ + "psubw %%mm4, %%mm5 \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048]*/\ + "movq "CHR_MMX_FILTER_OFFSET"+8("#c"), %%mm0 \n\t"\ + "pmulhw %%mm0, %%mm2 \n\t" /* (uvbuf0[eax] - uvbuf1[eax])uvalpha1>>16*/\ + "pmulhw %%mm0, %%mm5 \n\t" /* (uvbuf0[eax+2048] - uvbuf1[eax+2048])uvalpha1>>16*/\ + "psraw $7, %%mm3 \n\t" /* uvbuf0[eax] - uvbuf1[eax] >>4*/\ + "psraw $7, %%mm4 \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048] >>4*/\ + "paddw %%mm2, %%mm3 \n\t" /* uvbuf0[eax]uvalpha1 - uvbuf1[eax](1-uvalpha1)*/\ + "paddw %%mm5, %%mm4 \n\t" /* uvbuf0[eax+2048]uvalpha1 - uvbuf1[eax+2048](1-uvalpha1)*/\ + "movq (%0, "#index", 2), %%mm0 \n\t" /*buf0[eax]*/\ + "movq (%1, "#index", 2), %%mm1 \n\t" /*buf1[eax]*/\ + "movq 8(%0, "#index", 2), %%mm6 \n\t" /*buf0[eax]*/\ + "movq 8(%1, "#index", 2), %%mm7 \n\t" /*buf1[eax]*/\ + "psubw %%mm1, %%mm0 \n\t" /* buf0[eax] - buf1[eax]*/\ + "psubw %%mm7, %%mm6 \n\t" /* buf0[eax] - buf1[eax]*/\ + "pmulhw "LUM_MMX_FILTER_OFFSET"+8("#c"), %%mm0 \n\t" /* (buf0[eax] - buf1[eax])yalpha1>>16*/\ + "pmulhw "LUM_MMX_FILTER_OFFSET"+8("#c"), %%mm6 \n\t" /* (buf0[eax] - buf1[eax])yalpha1>>16*/\ + "psraw $7, %%mm1 \n\t" /* buf0[eax] - buf1[eax] >>4*/\ + "psraw $7, %%mm7 \n\t" /* buf0[eax] - buf1[eax] >>4*/\ + "paddw %%mm0, %%mm1 \n\t" /* buf0[eax]yalpha1 + buf1[eax](1-yalpha1) >>16*/\ + "paddw %%mm6, %%mm7 \n\t" /* buf0[eax]yalpha1 + buf1[eax](1-yalpha1) >>16*/\ + +#define YSCALEYUV2PACKED(index, c) REAL_YSCALEYUV2PACKED(index, c) + +#define REAL_YSCALEYUV2RGB_UV(index, c) \ + "xor "#index", "#index" \n\t"\ + ASMALIGN(4)\ + "1: \n\t"\ + "movq (%2, "#index"), %%mm2 \n\t" /* uvbuf0[eax]*/\ + "movq (%3, "#index"), %%mm3 \n\t" /* uvbuf1[eax]*/\ + "movq "AV_STRINGIFY(VOF)"(%2, "#index"), %%mm5 \n\t" /* uvbuf0[eax+2048]*/\ + "movq "AV_STRINGIFY(VOF)"(%3, "#index"), %%mm4 \n\t" /* uvbuf1[eax+2048]*/\ + "psubw %%mm3, %%mm2 \n\t" /* uvbuf0[eax] - uvbuf1[eax]*/\ + "psubw %%mm4, %%mm5 \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048]*/\ + "movq "CHR_MMX_FILTER_OFFSET"+8("#c"), %%mm0 \n\t"\ + "pmulhw %%mm0, %%mm2 \n\t" /* (uvbuf0[eax] - uvbuf1[eax])uvalpha1>>16*/\ + "pmulhw %%mm0, %%mm5 \n\t" /* (uvbuf0[eax+2048] - uvbuf1[eax+2048])uvalpha1>>16*/\ + "psraw $4, %%mm3 \n\t" /* uvbuf0[eax] - uvbuf1[eax] >>4*/\ + "psraw $4, %%mm4 \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048] >>4*/\ + "paddw %%mm2, %%mm3 \n\t" /* uvbuf0[eax]uvalpha1 - uvbuf1[eax](1-uvalpha1)*/\ + "paddw %%mm5, %%mm4 \n\t" /* uvbuf0[eax+2048]uvalpha1 - uvbuf1[eax+2048](1-uvalpha1)*/\ + "psubw "U_OFFSET"("#c"), %%mm3 \n\t" /* (U-128)8*/\ + "psubw "V_OFFSET"("#c"), %%mm4 \n\t" /* (V-128)8*/\ + "movq %%mm3, %%mm2 \n\t" /* (U-128)8*/\ + "movq %%mm4, %%mm5 \n\t" /* (V-128)8*/\ + "pmulhw "UG_COEFF"("#c"), %%mm3 \n\t"\ + "pmulhw "VG_COEFF"("#c"), %%mm4 \n\t"\ + /* mm2=(U-128)8, mm3=ug, mm4=vg mm5=(V-128)8 */\ + +#define REAL_YSCALEYUV2RGB_YA(index, c) \ + "movq (%0, "#index", 2), %%mm0 \n\t" /*buf0[eax]*/\ + "movq (%1, "#index", 2), %%mm1 \n\t" /*buf1[eax]*/\ + "movq 8(%0, "#index", 2), %%mm6 \n\t" /*buf0[eax]*/\ + "movq 8(%1, "#index", 2), %%mm7 \n\t" /*buf1[eax]*/\ + "psubw %%mm1, %%mm0 \n\t" /* buf0[eax] - buf1[eax]*/\ + "psubw %%mm7, %%mm6 \n\t" /* buf0[eax] - buf1[eax]*/\ + "pmulhw "LUM_MMX_FILTER_OFFSET"+8("#c"), %%mm0 \n\t" /* (buf0[eax] - buf1[eax])yalpha1>>16*/\ + "pmulhw "LUM_MMX_FILTER_OFFSET"+8("#c"), %%mm6 \n\t" /* (buf0[eax] - buf1[eax])yalpha1>>16*/\ + "psraw $4, %%mm1 \n\t" /* buf0[eax] - buf1[eax] >>4*/\ + "psraw $4, %%mm7 \n\t" /* buf0[eax] - buf1[eax] >>4*/\ + "paddw %%mm0, %%mm1 \n\t" /* buf0[eax]yalpha1 + buf1[eax](1-yalpha1) >>16*/\ + "paddw %%mm6, %%mm7 \n\t" /* buf0[eax]yalpha1 + buf1[eax](1-yalpha1) >>16*/\ + +#define REAL_YSCALEYUV2RGB_COEFF(c) \ + "pmulhw "UB_COEFF"("#c"), %%mm2 \n\t"\ + "pmulhw "VR_COEFF"("#c"), %%mm5 \n\t"\ + "psubw "Y_OFFSET"("#c"), %%mm1 \n\t" /* 8(Y-16)*/\ + "psubw "Y_OFFSET"("#c"), %%mm7 \n\t" /* 8(Y-16)*/\ + "pmulhw "Y_COEFF"("#c"), %%mm1 \n\t"\ + "pmulhw "Y_COEFF"("#c"), %%mm7 \n\t"\ + /* mm1= Y1, mm2=ub, mm3=ug, mm4=vg mm5=vr, mm7=Y2 */\ + "paddw %%mm3, %%mm4 \n\t"\ + "movq %%mm2, %%mm0 \n\t"\ + "movq %%mm5, %%mm6 \n\t"\ + "movq %%mm4, %%mm3 \n\t"\ + "punpcklwd %%mm2, %%mm2 \n\t"\ + "punpcklwd %%mm5, %%mm5 \n\t"\ + "punpcklwd %%mm4, %%mm4 \n\t"\ + "paddw %%mm1, %%mm2 \n\t"\ + "paddw %%mm1, %%mm5 \n\t"\ + "paddw %%mm1, %%mm4 \n\t"\ + "punpckhwd %%mm0, %%mm0 \n\t"\ + "punpckhwd %%mm6, %%mm6 \n\t"\ + "punpckhwd %%mm3, %%mm3 \n\t"\ + "paddw %%mm7, %%mm0 \n\t"\ + "paddw %%mm7, %%mm6 \n\t"\ + "paddw %%mm7, %%mm3 \n\t"\ + /* mm0=B1, mm2=B2, mm3=G2, mm4=G1, mm5=R1, mm6=R2 */\ + "packuswb %%mm0, %%mm2 \n\t"\ + "packuswb %%mm6, %%mm5 \n\t"\ + "packuswb %%mm3, %%mm4 \n\t"\ + +#define YSCALEYUV2RGB_YA(index, c) REAL_YSCALEYUV2RGB_YA(index, c) + +#define YSCALEYUV2RGB(index, c) \ + REAL_YSCALEYUV2RGB_UV(index, c) \ + REAL_YSCALEYUV2RGB_YA(index, c) \ + REAL_YSCALEYUV2RGB_COEFF(c) + +#define REAL_YSCALEYUV2PACKED1(index, c) \ + "xor "#index", "#index" \n\t"\ + ASMALIGN(4)\ + "1: \n\t"\ + "movq (%2, "#index"), %%mm3 \n\t" /* uvbuf0[eax]*/\ + "movq "AV_STRINGIFY(VOF)"(%2, "#index"), %%mm4 \n\t" /* uvbuf0[eax+2048]*/\ + "psraw $7, %%mm3 \n\t" \ + "psraw $7, %%mm4 \n\t" \ + "movq (%0, "#index", 2), %%mm1 \n\t" /*buf0[eax]*/\ + "movq 8(%0, "#index", 2), %%mm7 \n\t" /*buf0[eax]*/\ + "psraw $7, %%mm1 \n\t" \ + "psraw $7, %%mm7 \n\t" \ + +#define YSCALEYUV2PACKED1(index, c) REAL_YSCALEYUV2PACKED1(index, c) + +#define REAL_YSCALEYUV2RGB1(index, c) \ + "xor "#index", "#index" \n\t"\ + ASMALIGN(4)\ + "1: \n\t"\ + "movq (%2, "#index"), %%mm3 \n\t" /* uvbuf0[eax]*/\ + "movq "AV_STRINGIFY(VOF)"(%2, "#index"), %%mm4 \n\t" /* uvbuf0[eax+2048]*/\ + "psraw $4, %%mm3 \n\t" /* uvbuf0[eax] - uvbuf1[eax] >>4*/\ + "psraw $4, %%mm4 \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048] >>4*/\ + "psubw "U_OFFSET"("#c"), %%mm3 \n\t" /* (U-128)8*/\ + "psubw "V_OFFSET"("#c"), %%mm4 \n\t" /* (V-128)8*/\ + "movq %%mm3, %%mm2 \n\t" /* (U-128)8*/\ + "movq %%mm4, %%mm5 \n\t" /* (V-128)8*/\ + "pmulhw "UG_COEFF"("#c"), %%mm3 \n\t"\ + "pmulhw "VG_COEFF"("#c"), %%mm4 \n\t"\ + /* mm2=(U-128)8, mm3=ug, mm4=vg mm5=(V-128)8 */\ + "movq (%0, "#index", 2), %%mm1 \n\t" /*buf0[eax]*/\ + "movq 8(%0, "#index", 2), %%mm7 \n\t" /*buf0[eax]*/\ + "psraw $4, %%mm1 \n\t" /* buf0[eax] - buf1[eax] >>4*/\ + "psraw $4, %%mm7 \n\t" /* buf0[eax] - buf1[eax] >>4*/\ + "pmulhw "UB_COEFF"("#c"), %%mm2 \n\t"\ + "pmulhw "VR_COEFF"("#c"), %%mm5 \n\t"\ + "psubw "Y_OFFSET"("#c"), %%mm1 \n\t" /* 8(Y-16)*/\ + "psubw "Y_OFFSET"("#c"), %%mm7 \n\t" /* 8(Y-16)*/\ + "pmulhw "Y_COEFF"("#c"), %%mm1 \n\t"\ + "pmulhw "Y_COEFF"("#c"), %%mm7 \n\t"\ + /* mm1= Y1, mm2=ub, mm3=ug, mm4=vg mm5=vr, mm7=Y2 */\ + "paddw %%mm3, %%mm4 \n\t"\ + "movq %%mm2, %%mm0 \n\t"\ + "movq %%mm5, %%mm6 \n\t"\ + "movq %%mm4, %%mm3 \n\t"\ + "punpcklwd %%mm2, %%mm2 \n\t"\ + "punpcklwd %%mm5, %%mm5 \n\t"\ + "punpcklwd %%mm4, %%mm4 \n\t"\ + "paddw %%mm1, %%mm2 \n\t"\ + "paddw %%mm1, %%mm5 \n\t"\ + "paddw %%mm1, %%mm4 \n\t"\ + "punpckhwd %%mm0, %%mm0 \n\t"\ + "punpckhwd %%mm6, %%mm6 \n\t"\ + "punpckhwd %%mm3, %%mm3 \n\t"\ + "paddw %%mm7, %%mm0 \n\t"\ + "paddw %%mm7, %%mm6 \n\t"\ + "paddw %%mm7, %%mm3 \n\t"\ + /* mm0=B1, mm2=B2, mm3=G2, mm4=G1, mm5=R1, mm6=R2 */\ + "packuswb %%mm0, %%mm2 \n\t"\ + "packuswb %%mm6, %%mm5 \n\t"\ + "packuswb %%mm3, %%mm4 \n\t"\ + +#define YSCALEYUV2RGB1(index, c) REAL_YSCALEYUV2RGB1(index, c) + +#define REAL_YSCALEYUV2PACKED1b(index, c) \ + "xor "#index", "#index" \n\t"\ + ASMALIGN(4)\ + "1: \n\t"\ + "movq (%2, "#index"), %%mm2 \n\t" /* uvbuf0[eax]*/\ + "movq (%3, "#index"), %%mm3 \n\t" /* uvbuf1[eax]*/\ + "movq "AV_STRINGIFY(VOF)"(%2, "#index"), %%mm5 \n\t" /* uvbuf0[eax+2048]*/\ + "movq "AV_STRINGIFY(VOF)"(%3, "#index"), %%mm4 \n\t" /* uvbuf1[eax+2048]*/\ + "paddw %%mm2, %%mm3 \n\t" /* uvbuf0[eax] + uvbuf1[eax]*/\ + "paddw %%mm5, %%mm4 \n\t" /* uvbuf0[eax+2048] + uvbuf1[eax+2048]*/\ + "psrlw $8, %%mm3 \n\t" \ + "psrlw $8, %%mm4 \n\t" \ + "movq (%0, "#index", 2), %%mm1 \n\t" /*buf0[eax]*/\ + "movq 8(%0, "#index", 2), %%mm7 \n\t" /*buf0[eax]*/\ + "psraw $7, %%mm1 \n\t" \ + "psraw $7, %%mm7 \n\t" +#define YSCALEYUV2PACKED1b(index, c) REAL_YSCALEYUV2PACKED1b(index, c) + +// do vertical chrominance interpolation +#define REAL_YSCALEYUV2RGB1b(index, c) \ + "xor "#index", "#index" \n\t"\ + ASMALIGN(4)\ + "1: \n\t"\ + "movq (%2, "#index"), %%mm2 \n\t" /* uvbuf0[eax]*/\ + "movq (%3, "#index"), %%mm3 \n\t" /* uvbuf1[eax]*/\ + "movq "AV_STRINGIFY(VOF)"(%2, "#index"), %%mm5 \n\t" /* uvbuf0[eax+2048]*/\ + "movq "AV_STRINGIFY(VOF)"(%3, "#index"), %%mm4 \n\t" /* uvbuf1[eax+2048]*/\ + "paddw %%mm2, %%mm3 \n\t" /* uvbuf0[eax] + uvbuf1[eax]*/\ + "paddw %%mm5, %%mm4 \n\t" /* uvbuf0[eax+2048] + uvbuf1[eax+2048]*/\ + "psrlw $5, %%mm3 \n\t" /*FIXME might overflow*/\ + "psrlw $5, %%mm4 \n\t" /*FIXME might overflow*/\ + "psubw "U_OFFSET"("#c"), %%mm3 \n\t" /* (U-128)8*/\ + "psubw "V_OFFSET"("#c"), %%mm4 \n\t" /* (V-128)8*/\ + "movq %%mm3, %%mm2 \n\t" /* (U-128)8*/\ + "movq %%mm4, %%mm5 \n\t" /* (V-128)8*/\ + "pmulhw "UG_COEFF"("#c"), %%mm3 \n\t"\ + "pmulhw "VG_COEFF"("#c"), %%mm4 \n\t"\ + /* mm2=(U-128)8, mm3=ug, mm4=vg mm5=(V-128)8 */\ + "movq (%0, "#index", 2), %%mm1 \n\t" /*buf0[eax]*/\ + "movq 8(%0, "#index", 2), %%mm7 \n\t" /*buf0[eax]*/\ + "psraw $4, %%mm1 \n\t" /* buf0[eax] - buf1[eax] >>4*/\ + "psraw $4, %%mm7 \n\t" /* buf0[eax] - buf1[eax] >>4*/\ + "pmulhw "UB_COEFF"("#c"), %%mm2 \n\t"\ + "pmulhw "VR_COEFF"("#c"), %%mm5 \n\t"\ + "psubw "Y_OFFSET"("#c"), %%mm1 \n\t" /* 8(Y-16)*/\ + "psubw "Y_OFFSET"("#c"), %%mm7 \n\t" /* 8(Y-16)*/\ + "pmulhw "Y_COEFF"("#c"), %%mm1 \n\t"\ + "pmulhw "Y_COEFF"("#c"), %%mm7 \n\t"\ + /* mm1= Y1, mm2=ub, mm3=ug, mm4=vg mm5=vr, mm7=Y2 */\ + "paddw %%mm3, %%mm4 \n\t"\ + "movq %%mm2, %%mm0 \n\t"\ + "movq %%mm5, %%mm6 \n\t"\ + "movq %%mm4, %%mm3 \n\t"\ + "punpcklwd %%mm2, %%mm2 \n\t"\ + "punpcklwd %%mm5, %%mm5 \n\t"\ + "punpcklwd %%mm4, %%mm4 \n\t"\ + "paddw %%mm1, %%mm2 \n\t"\ + "paddw %%mm1, %%mm5 \n\t"\ + "paddw %%mm1, %%mm4 \n\t"\ + "punpckhwd %%mm0, %%mm0 \n\t"\ + "punpckhwd %%mm6, %%mm6 \n\t"\ + "punpckhwd %%mm3, %%mm3 \n\t"\ + "paddw %%mm7, %%mm0 \n\t"\ + "paddw %%mm7, %%mm6 \n\t"\ + "paddw %%mm7, %%mm3 \n\t"\ + /* mm0=B1, mm2=B2, mm3=G2, mm4=G1, mm5=R1, mm6=R2 */\ + "packuswb %%mm0, %%mm2 \n\t"\ + "packuswb %%mm6, %%mm5 \n\t"\ + "packuswb %%mm3, %%mm4 \n\t"\ + +#define YSCALEYUV2RGB1b(index, c) REAL_YSCALEYUV2RGB1b(index, c) + +#define REAL_WRITEBGR32(dst, dstw, index, b, g, r, a, q0, q2, q3, t) \ + "movq "#b", "#q2" \n\t" /* B */\ + "movq "#r", "#t" \n\t" /* R */\ + "punpcklbw "#g", "#b" \n\t" /* GBGBGBGB 0 */\ + "punpcklbw "#a", "#r" \n\t" /* ARARARAR 0 */\ + "punpckhbw "#g", "#q2" \n\t" /* GBGBGBGB 2 */\ + "punpckhbw "#a", "#t" \n\t" /* ARARARAR 2 */\ + "movq "#b", "#q0" \n\t" /* GBGBGBGB 0 */\ + "movq "#q2", "#q3" \n\t" /* GBGBGBGB 2 */\ + "punpcklwd "#r", "#q0" \n\t" /* ARGBARGB 0 */\ + "punpckhwd "#r", "#b" \n\t" /* ARGBARGB 1 */\ + "punpcklwd "#t", "#q2" \n\t" /* ARGBARGB 2 */\ + "punpckhwd "#t", "#q3" \n\t" /* ARGBARGB 3 */\ +\ + MOVNTQ( q0, (dst, index, 4))\ + MOVNTQ( b, 8(dst, index, 4))\ + MOVNTQ( q2, 16(dst, index, 4))\ + MOVNTQ( q3, 24(dst, index, 4))\ +\ + "add $8, "#index" \n\t"\ + "cmp "#dstw", "#index" \n\t"\ + " jb 1b \n\t" +#define WRITEBGR32(dst, dstw, index, b, g, r, a, q0, q2, q3, t) REAL_WRITEBGR32(dst, dstw, index, b, g, r, a, q0, q2, q3, t) + +#define REAL_WRITERGB16(dst, dstw, index) \ + "pand "MANGLE(bF8)", %%mm2 \n\t" /* B */\ + "pand "MANGLE(bFC)", %%mm4 \n\t" /* G */\ + "pand "MANGLE(bF8)", %%mm5 \n\t" /* R */\ + "psrlq $3, %%mm2 \n\t"\ +\ + "movq %%mm2, %%mm1 \n\t"\ + "movq %%mm4, %%mm3 \n\t"\ +\ + "punpcklbw %%mm7, %%mm3 \n\t"\ + "punpcklbw %%mm5, %%mm2 \n\t"\ + "punpckhbw %%mm7, %%mm4 \n\t"\ + "punpckhbw %%mm5, %%mm1 \n\t"\ +\ + "psllq $3, %%mm3 \n\t"\ + "psllq $3, %%mm4 \n\t"\ +\ + "por %%mm3, %%mm2 \n\t"\ + "por %%mm4, %%mm1 \n\t"\ +\ + MOVNTQ(%%mm2, (dst, index, 2))\ + MOVNTQ(%%mm1, 8(dst, index, 2))\ +\ + "add $8, "#index" \n\t"\ + "cmp "#dstw", "#index" \n\t"\ + " jb 1b \n\t" +#define WRITERGB16(dst, dstw, index) REAL_WRITERGB16(dst, dstw, index) + +#define REAL_WRITERGB15(dst, dstw, index) \ + "pand "MANGLE(bF8)", %%mm2 \n\t" /* B */\ + "pand "MANGLE(bF8)", %%mm4 \n\t" /* G */\ + "pand "MANGLE(bF8)", %%mm5 \n\t" /* R */\ + "psrlq $3, %%mm2 \n\t"\ + "psrlq $1, %%mm5 \n\t"\ +\ + "movq %%mm2, %%mm1 \n\t"\ + "movq %%mm4, %%mm3 \n\t"\ +\ + "punpcklbw %%mm7, %%mm3 \n\t"\ + "punpcklbw %%mm5, %%mm2 \n\t"\ + "punpckhbw %%mm7, %%mm4 \n\t"\ + "punpckhbw %%mm5, %%mm1 \n\t"\ +\ + "psllq $2, %%mm3 \n\t"\ + "psllq $2, %%mm4 \n\t"\ +\ + "por %%mm3, %%mm2 \n\t"\ + "por %%mm4, %%mm1 \n\t"\ +\ + MOVNTQ(%%mm2, (dst, index, 2))\ + MOVNTQ(%%mm1, 8(dst, index, 2))\ +\ + "add $8, "#index" \n\t"\ + "cmp "#dstw", "#index" \n\t"\ + " jb 1b \n\t" +#define WRITERGB15(dst, dstw, index) REAL_WRITERGB15(dst, dstw, index) + +#define WRITEBGR24OLD(dst, dstw, index) \ + /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */\ + "movq %%mm2, %%mm1 \n\t" /* B */\ + "movq %%mm5, %%mm6 \n\t" /* R */\ + "punpcklbw %%mm4, %%mm2 \n\t" /* GBGBGBGB 0 */\ + "punpcklbw %%mm7, %%mm5 \n\t" /* 0R0R0R0R 0 */\ + "punpckhbw %%mm4, %%mm1 \n\t" /* GBGBGBGB 2 */\ + "punpckhbw %%mm7, %%mm6 \n\t" /* 0R0R0R0R 2 */\ + "movq %%mm2, %%mm0 \n\t" /* GBGBGBGB 0 */\ + "movq %%mm1, %%mm3 \n\t" /* GBGBGBGB 2 */\ + "punpcklwd %%mm5, %%mm0 \n\t" /* 0RGB0RGB 0 */\ + "punpckhwd %%mm5, %%mm2 \n\t" /* 0RGB0RGB 1 */\ + "punpcklwd %%mm6, %%mm1 \n\t" /* 0RGB0RGB 2 */\ + "punpckhwd %%mm6, %%mm3 \n\t" /* 0RGB0RGB 3 */\ +\ + "movq %%mm0, %%mm4 \n\t" /* 0RGB0RGB 0 */\ + "psrlq $8, %%mm0 \n\t" /* 00RGB0RG 0 */\ + "pand "MANGLE(bm00000111)", %%mm4 \n\t" /* 00000RGB 0 */\ + "pand "MANGLE(bm11111000)", %%mm0 \n\t" /* 00RGB000 0.5 */\ + "por %%mm4, %%mm0 \n\t" /* 00RGBRGB 0 */\ + "movq %%mm2, %%mm4 \n\t" /* 0RGB0RGB 1 */\ + "psllq $48, %%mm2 \n\t" /* GB000000 1 */\ + "por %%mm2, %%mm0 \n\t" /* GBRGBRGB 0 */\ +\ + "movq %%mm4, %%mm2 \n\t" /* 0RGB0RGB 1 */\ + "psrld $16, %%mm4 \n\t" /* 000R000R 1 */\ + "psrlq $24, %%mm2 \n\t" /* 0000RGB0 1.5 */\ + "por %%mm4, %%mm2 \n\t" /* 000RRGBR 1 */\ + "pand "MANGLE(bm00001111)", %%mm2 \n\t" /* 0000RGBR 1 */\ + "movq %%mm1, %%mm4 \n\t" /* 0RGB0RGB 2 */\ + "psrlq $8, %%mm1 \n\t" /* 00RGB0RG 2 */\ + "pand "MANGLE(bm00000111)", %%mm4 \n\t" /* 00000RGB 2 */\ + "pand "MANGLE(bm11111000)", %%mm1 \n\t" /* 00RGB000 2.5 */\ + "por %%mm4, %%mm1 \n\t" /* 00RGBRGB 2 */\ + "movq %%mm1, %%mm4 \n\t" /* 00RGBRGB 2 */\ + "psllq $32, %%mm1 \n\t" /* BRGB0000 2 */\ + "por %%mm1, %%mm2 \n\t" /* BRGBRGBR 1 */\ +\ + "psrlq $32, %%mm4 \n\t" /* 000000RG 2.5 */\ + "movq %%mm3, %%mm5 \n\t" /* 0RGB0RGB 3 */\ + "psrlq $8, %%mm3 \n\t" /* 00RGB0RG 3 */\ + "pand "MANGLE(bm00000111)", %%mm5 \n\t" /* 00000RGB 3 */\ + "pand "MANGLE(bm11111000)", %%mm3 \n\t" /* 00RGB000 3.5 */\ + "por %%mm5, %%mm3 \n\t" /* 00RGBRGB 3 */\ + "psllq $16, %%mm3 \n\t" /* RGBRGB00 3 */\ + "por %%mm4, %%mm3 \n\t" /* RGBRGBRG 2.5 */\ +\ + MOVNTQ(%%mm0, (dst))\ + MOVNTQ(%%mm2, 8(dst))\ + MOVNTQ(%%mm3, 16(dst))\ + "add $24, "#dst" \n\t"\ +\ + "add $8, "#index" \n\t"\ + "cmp "#dstw", "#index" \n\t"\ + " jb 1b \n\t" + +#define WRITEBGR24MMX(dst, dstw, index) \ + /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */\ + "movq %%mm2, %%mm1 \n\t" /* B */\ + "movq %%mm5, %%mm6 \n\t" /* R */\ + "punpcklbw %%mm4, %%mm2 \n\t" /* GBGBGBGB 0 */\ + "punpcklbw %%mm7, %%mm5 \n\t" /* 0R0R0R0R 0 */\ + "punpckhbw %%mm4, %%mm1 \n\t" /* GBGBGBGB 2 */\ + "punpckhbw %%mm7, %%mm6 \n\t" /* 0R0R0R0R 2 */\ + "movq %%mm2, %%mm0 \n\t" /* GBGBGBGB 0 */\ + "movq %%mm1, %%mm3 \n\t" /* GBGBGBGB 2 */\ + "punpcklwd %%mm5, %%mm0 \n\t" /* 0RGB0RGB 0 */\ + "punpckhwd %%mm5, %%mm2 \n\t" /* 0RGB0RGB 1 */\ + "punpcklwd %%mm6, %%mm1 \n\t" /* 0RGB0RGB 2 */\ + "punpckhwd %%mm6, %%mm3 \n\t" /* 0RGB0RGB 3 */\ +\ + "movq %%mm0, %%mm4 \n\t" /* 0RGB0RGB 0 */\ + "movq %%mm2, %%mm6 \n\t" /* 0RGB0RGB 1 */\ + "movq %%mm1, %%mm5 \n\t" /* 0RGB0RGB 2 */\ + "movq %%mm3, %%mm7 \n\t" /* 0RGB0RGB 3 */\ +\ + "psllq $40, %%mm0 \n\t" /* RGB00000 0 */\ + "psllq $40, %%mm2 \n\t" /* RGB00000 1 */\ + "psllq $40, %%mm1 \n\t" /* RGB00000 2 */\ + "psllq $40, %%mm3 \n\t" /* RGB00000 3 */\ +\ + "punpckhdq %%mm4, %%mm0 \n\t" /* 0RGBRGB0 0 */\ + "punpckhdq %%mm6, %%mm2 \n\t" /* 0RGBRGB0 1 */\ + "punpckhdq %%mm5, %%mm1 \n\t" /* 0RGBRGB0 2 */\ + "punpckhdq %%mm7, %%mm3 \n\t" /* 0RGBRGB0 3 */\ +\ + "psrlq $8, %%mm0 \n\t" /* 00RGBRGB 0 */\ + "movq %%mm2, %%mm6 \n\t" /* 0RGBRGB0 1 */\ + "psllq $40, %%mm2 \n\t" /* GB000000 1 */\ + "por %%mm2, %%mm0 \n\t" /* GBRGBRGB 0 */\ + MOVNTQ(%%mm0, (dst))\ +\ + "psrlq $24, %%mm6 \n\t" /* 0000RGBR 1 */\ + "movq %%mm1, %%mm5 \n\t" /* 0RGBRGB0 2 */\ + "psllq $24, %%mm1 \n\t" /* BRGB0000 2 */\ + "por %%mm1, %%mm6 \n\t" /* BRGBRGBR 1 */\ + MOVNTQ(%%mm6, 8(dst))\ +\ + "psrlq $40, %%mm5 \n\t" /* 000000RG 2 */\ + "psllq $8, %%mm3 \n\t" /* RGBRGB00 3 */\ + "por %%mm3, %%mm5 \n\t" /* RGBRGBRG 2 */\ + MOVNTQ(%%mm5, 16(dst))\ +\ + "add $24, "#dst" \n\t"\ +\ + "add $8, "#index" \n\t"\ + "cmp "#dstw", "#index" \n\t"\ + " jb 1b \n\t" + +#define WRITEBGR24MMX2(dst, dstw, index) \ + /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */\ + "movq "MANGLE(ff_M24A)", %%mm0 \n\t"\ + "movq "MANGLE(ff_M24C)", %%mm7 \n\t"\ + "pshufw $0x50, %%mm2, %%mm1 \n\t" /* B3 B2 B3 B2 B1 B0 B1 B0 */\ + "pshufw $0x50, %%mm4, %%mm3 \n\t" /* G3 G2 G3 G2 G1 G0 G1 G0 */\ + "pshufw $0x00, %%mm5, %%mm6 \n\t" /* R1 R0 R1 R0 R1 R0 R1 R0 */\ +\ + "pand %%mm0, %%mm1 \n\t" /* B2 B1 B0 */\ + "pand %%mm0, %%mm3 \n\t" /* G2 G1 G0 */\ + "pand %%mm7, %%mm6 \n\t" /* R1 R0 */\ +\ + "psllq $8, %%mm3 \n\t" /* G2 G1 G0 */\ + "por %%mm1, %%mm6 \n\t"\ + "por %%mm3, %%mm6 \n\t"\ + MOVNTQ(%%mm6, (dst))\ +\ + "psrlq $8, %%mm4 \n\t" /* 00 G7 G6 G5 G4 G3 G2 G1 */\ + "pshufw $0xA5, %%mm2, %%mm1 \n\t" /* B5 B4 B5 B4 B3 B2 B3 B2 */\ + "pshufw $0x55, %%mm4, %%mm3 \n\t" /* G4 G3 G4 G3 G4 G3 G4 G3 */\ + "pshufw $0xA5, %%mm5, %%mm6 \n\t" /* R5 R4 R5 R4 R3 R2 R3 R2 */\ +\ + "pand "MANGLE(ff_M24B)", %%mm1 \n\t" /* B5 B4 B3 */\ + "pand %%mm7, %%mm3 \n\t" /* G4 G3 */\ + "pand %%mm0, %%mm6 \n\t" /* R4 R3 R2 */\ +\ + "por %%mm1, %%mm3 \n\t" /* B5 G4 B4 G3 B3 */\ + "por %%mm3, %%mm6 \n\t"\ + MOVNTQ(%%mm6, 8(dst))\ +\ + "pshufw $0xFF, %%mm2, %%mm1 \n\t" /* B7 B6 B7 B6 B7 B6 B6 B7 */\ + "pshufw $0xFA, %%mm4, %%mm3 \n\t" /* 00 G7 00 G7 G6 G5 G6 G5 */\ + "pshufw $0xFA, %%mm5, %%mm6 \n\t" /* R7 R6 R7 R6 R5 R4 R5 R4 */\ +\ + "pand %%mm7, %%mm1 \n\t" /* B7 B6 */\ + "pand %%mm0, %%mm3 \n\t" /* G7 G6 G5 */\ + "pand "MANGLE(ff_M24B)", %%mm6 \n\t" /* R7 R6 R5 */\ +\ + "por %%mm1, %%mm3 \n\t"\ + "por %%mm3, %%mm6 \n\t"\ + MOVNTQ(%%mm6, 16(dst))\ +\ + "add $24, "#dst" \n\t"\ +\ + "add $8, "#index" \n\t"\ + "cmp "#dstw", "#index" \n\t"\ + " jb 1b \n\t" + +#if HAVE_MMX2 +#undef WRITEBGR24 +#define WRITEBGR24(dst, dstw, index) WRITEBGR24MMX2(dst, dstw, index) +#else +#undef WRITEBGR24 +#define WRITEBGR24(dst, dstw, index) WRITEBGR24MMX(dst, dstw, index) +#endif + +#define REAL_WRITEYUY2(dst, dstw, index) \ + "packuswb %%mm3, %%mm3 \n\t"\ + "packuswb %%mm4, %%mm4 \n\t"\ + "packuswb %%mm7, %%mm1 \n\t"\ + "punpcklbw %%mm4, %%mm3 \n\t"\ + "movq %%mm1, %%mm7 \n\t"\ + "punpcklbw %%mm3, %%mm1 \n\t"\ + "punpckhbw %%mm3, %%mm7 \n\t"\ +\ + MOVNTQ(%%mm1, (dst, index, 2))\ + MOVNTQ(%%mm7, 8(dst, index, 2))\ +\ + "add $8, "#index" \n\t"\ + "cmp "#dstw", "#index" \n\t"\ + " jb 1b \n\t" +#define WRITEYUY2(dst, dstw, index) REAL_WRITEYUY2(dst, dstw, index) + + +static inline void RENAME(yuv2yuvX)(SwsContext *c, int16_t *lumFilter, int16_t **lumSrc, int lumFilterSize, + int16_t *chrFilter, int16_t **chrSrc, int chrFilterSize, + uint8_t *dest, uint8_t *uDest, uint8_t *vDest, long dstW, long chrDstW) +{ +#if HAVE_MMX + if(!(c->flags & SWS_BITEXACT)){ + if (c->flags & SWS_ACCURATE_RND){ + if (uDest){ + YSCALEYUV2YV12X_ACCURATE( "0", CHR_MMX_FILTER_OFFSET, uDest, chrDstW) + YSCALEYUV2YV12X_ACCURATE(AV_STRINGIFY(VOF), CHR_MMX_FILTER_OFFSET, vDest, chrDstW) + } + + YSCALEYUV2YV12X_ACCURATE("0", LUM_MMX_FILTER_OFFSET, dest, dstW) + }else{ + if (uDest){ + YSCALEYUV2YV12X( "0", CHR_MMX_FILTER_OFFSET, uDest, chrDstW) + YSCALEYUV2YV12X(AV_STRINGIFY(VOF), CHR_MMX_FILTER_OFFSET, vDest, chrDstW) + } + + YSCALEYUV2YV12X("0", LUM_MMX_FILTER_OFFSET, dest, dstW) + } + return; + } +#endif +#if HAVE_ALTIVEC +yuv2yuvX_altivec_real(lumFilter, lumSrc, lumFilterSize, + chrFilter, chrSrc, chrFilterSize, + dest, uDest, vDest, dstW, chrDstW); +#else //HAVE_ALTIVEC +yuv2yuvXinC(lumFilter, lumSrc, lumFilterSize, + chrFilter, chrSrc, chrFilterSize, + dest, uDest, vDest, dstW, chrDstW); +#endif //!HAVE_ALTIVEC +} + +static inline void RENAME(yuv2nv12X)(SwsContext *c, int16_t *lumFilter, int16_t **lumSrc, int lumFilterSize, + int16_t *chrFilter, int16_t **chrSrc, int chrFilterSize, + uint8_t *dest, uint8_t *uDest, int dstW, int chrDstW, int dstFormat) +{ +yuv2nv12XinC(lumFilter, lumSrc, lumFilterSize, + chrFilter, chrSrc, chrFilterSize, + dest, uDest, dstW, chrDstW, dstFormat); +} + +static inline void RENAME(yuv2yuv1)(SwsContext *c, int16_t *lumSrc, int16_t *chrSrc, + uint8_t *dest, uint8_t *uDest, uint8_t *vDest, long dstW, long chrDstW) +{ + int i; +#if HAVE_MMX + if(!(c->flags & SWS_BITEXACT)){ + long p= uDest ? 3 : 1; + uint8_t *src[3]= {lumSrc + dstW, chrSrc + chrDstW, chrSrc + VOFW + chrDstW}; + uint8_t *dst[3]= {dest, uDest, vDest}; + long counter[3] = {dstW, chrDstW, chrDstW}; + + if (c->flags & SWS_ACCURATE_RND){ + while(p--){ + __asm__ volatile( + YSCALEYUV2YV121_ACCURATE + :: "r" (src[p]), "r" (dst[p] + counter[p]), + "g" (-counter[p]) + : "%"REG_a + ); + } + }else{ + while(p--){ + __asm__ volatile( + YSCALEYUV2YV121 + :: "r" (src[p]), "r" (dst[p] + counter[p]), + "g" (-counter[p]) + : "%"REG_a + ); + } + } + return; + } +#endif + for (i=0; i<dstW; i++) + { + int val= (lumSrc[i]+64)>>7; + + if (val&256){ + if (val<0) val=0; + else val=255; + } + + dest[i]= val; + } + + if (uDest) + for (i=0; i<chrDstW; i++) + { + int u=(chrSrc[i ]+64)>>7; + int v=(chrSrc[i + VOFW]+64)>>7; + + if ((u|v)&256){ + if (u<0) u=0; + else if (u>255) u=255; + if (v<0) v=0; + else if (v>255) v=255; + } + + uDest[i]= u; + vDest[i]= v; + } +} + + +/** + * vertical scale YV12 to RGB + */ +static inline void RENAME(yuv2packedX)(SwsContext *c, int16_t *lumFilter, int16_t **lumSrc, int lumFilterSize, + int16_t *chrFilter, int16_t **chrSrc, int chrFilterSize, + uint8_t *dest, long dstW, long dstY) +{ +#if HAVE_MMX + long dummy=0; + if(!(c->flags & SWS_BITEXACT)){ + if (c->flags & SWS_ACCURATE_RND){ + switch(c->dstFormat){ + case PIX_FMT_RGB32: + YSCALEYUV2PACKEDX_ACCURATE + YSCALEYUV2RGBX + "pcmpeqd %%mm7, %%mm7 \n\t" + WRITEBGR32(%4, %5, %%REGa, %%mm2, %%mm4, %%mm5, %%mm7, %%mm0, %%mm1, %%mm3, %%mm6) + + YSCALEYUV2PACKEDX_END + return; + case PIX_FMT_BGR24: + YSCALEYUV2PACKEDX_ACCURATE + YSCALEYUV2RGBX + "pxor %%mm7, %%mm7 \n\t" + "lea (%%"REG_a", %%"REG_a", 2), %%"REG_c"\n\t" //FIXME optimize + "add %4, %%"REG_c" \n\t" + WRITEBGR24(%%REGc, %5, %%REGa) + + + :: "r" (&c->redDither), + "m" (dummy), "m" (dummy), "m" (dummy), + "r" (dest), "m" (dstW) + : "%"REG_a, "%"REG_c, "%"REG_d, "%"REG_S + ); + return; + case PIX_FMT_RGB555: + YSCALEYUV2PACKEDX_ACCURATE + YSCALEYUV2RGBX + "pxor %%mm7, %%mm7 \n\t" + /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */ +#ifdef DITHER1XBPP + "paddusb "BLUE_DITHER"(%0), %%mm2\n\t" + "paddusb "GREEN_DITHER"(%0), %%mm4\n\t" + "paddusb "RED_DITHER"(%0), %%mm5\n\t" +#endif + + WRITERGB15(%4, %5, %%REGa) + YSCALEYUV2PACKEDX_END + return; + case PIX_FMT_RGB565: + YSCALEYUV2PACKEDX_ACCURATE + YSCALEYUV2RGBX + "pxor %%mm7, %%mm7 \n\t" + /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */ +#ifdef DITHER1XBPP + "paddusb "BLUE_DITHER"(%0), %%mm2\n\t" + "paddusb "GREEN_DITHER"(%0), %%mm4\n\t" + "paddusb "RED_DITHER"(%0), %%mm5\n\t" +#endif + + WRITERGB16(%4, %5, %%REGa) + YSCALEYUV2PACKEDX_END + return; + case PIX_FMT_YUYV422: + YSCALEYUV2PACKEDX_ACCURATE + /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */ + + "psraw $3, %%mm3 \n\t" + "psraw $3, %%mm4 \n\t" + "psraw $3, %%mm1 \n\t" + "psraw $3, %%mm7 \n\t" + WRITEYUY2(%4, %5, %%REGa) + YSCALEYUV2PACKEDX_END + return; + } + }else{ + switch(c->dstFormat) + { + case PIX_FMT_RGB32: + YSCALEYUV2PACKEDX + YSCALEYUV2RGBX + "pcmpeqd %%mm7, %%mm7 \n\t" + WRITEBGR32(%4, %5, %%REGa, %%mm2, %%mm4, %%mm5, %%mm7, %%mm0, %%mm1, %%mm3, %%mm6) + YSCALEYUV2PACKEDX_END + return; + case PIX_FMT_BGR24: + YSCALEYUV2PACKEDX + YSCALEYUV2RGBX + "pxor %%mm7, %%mm7 \n\t" + "lea (%%"REG_a", %%"REG_a", 2), %%"REG_c" \n\t" //FIXME optimize + "add %4, %%"REG_c" \n\t" + WRITEBGR24(%%REGc, %5, %%REGa) + + :: "r" (&c->redDither), + "m" (dummy), "m" (dummy), "m" (dummy), + "r" (dest), "m" (dstW) + : "%"REG_a, "%"REG_c, "%"REG_d, "%"REG_S + ); + return; + case PIX_FMT_RGB555: + YSCALEYUV2PACKEDX + YSCALEYUV2RGBX + "pxor %%mm7, %%mm7 \n\t" + /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */ +#ifdef DITHER1XBPP + "paddusb "BLUE_DITHER"(%0), %%mm2 \n\t" + "paddusb "GREEN_DITHER"(%0), %%mm4 \n\t" + "paddusb "RED_DITHER"(%0), %%mm5 \n\t" +#endif + + WRITERGB15(%4, %5, %%REGa) + YSCALEYUV2PACKEDX_END + return; + case PIX_FMT_RGB565: + YSCALEYUV2PACKEDX + YSCALEYUV2RGBX + "pxor %%mm7, %%mm7 \n\t" + /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */ +#ifdef DITHER1XBPP + "paddusb "BLUE_DITHER"(%0), %%mm2 \n\t" + "paddusb "GREEN_DITHER"(%0), %%mm4 \n\t" + "paddusb "RED_DITHER"(%0), %%mm5 \n\t" +#endif + + WRITERGB16(%4, %5, %%REGa) + YSCALEYUV2PACKEDX_END + return; + case PIX_FMT_YUYV422: + YSCALEYUV2PACKEDX + /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */ + + "psraw $3, %%mm3 \n\t" + "psraw $3, %%mm4 \n\t" + "psraw $3, %%mm1 \n\t" + "psraw $3, %%mm7 \n\t" + WRITEYUY2(%4, %5, %%REGa) + YSCALEYUV2PACKEDX_END + return; + } + } + } +#endif /* HAVE_MMX */ +#if HAVE_ALTIVEC + /* The following list of supported dstFormat values should + match what's found in the body of altivec_yuv2packedX() */ + if (!(c->flags & SWS_BITEXACT) && + (c->dstFormat==PIX_FMT_ABGR || c->dstFormat==PIX_FMT_BGRA || + c->dstFormat==PIX_FMT_BGR24 || c->dstFormat==PIX_FMT_RGB24 || + c->dstFormat==PIX_FMT_RGBA || c->dstFormat==PIX_FMT_ARGB)) + altivec_yuv2packedX (c, lumFilter, lumSrc, lumFilterSize, + chrFilter, chrSrc, chrFilterSize, + dest, dstW, dstY); + else +#endif + yuv2packedXinC(c, lumFilter, lumSrc, lumFilterSize, + chrFilter, chrSrc, chrFilterSize, + dest, dstW, dstY); +} + +/** + * vertical bilinear scale YV12 to RGB + */ +static inline void RENAME(yuv2packed2)(SwsContext *c, uint16_t *buf0, uint16_t *buf1, uint16_t *uvbuf0, uint16_t *uvbuf1, + uint8_t *dest, int dstW, int yalpha, int uvalpha, int y) +{ + int yalpha1=4095- yalpha; + int uvalpha1=4095-uvalpha; + int i; + +#if HAVE_MMX + if(!(c->flags & SWS_BITEXACT)){ + switch(c->dstFormat) + { + //Note 8280 == DSTW_OFFSET but the preprocessor can't handle that there :( + case PIX_FMT_RGB32: + __asm__ volatile( + "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t" + "mov %4, %%"REG_b" \n\t" + "push %%"REG_BP" \n\t" + YSCALEYUV2RGB(%%REGBP, %5) + "pcmpeqd %%mm7, %%mm7 \n\t" + WRITEBGR32(%%REGb, 8280(%5), %%REGBP, %%mm2, %%mm4, %%mm5, %%mm7, %%mm0, %%mm1, %%mm3, %%mm6) + "pop %%"REG_BP" \n\t" + "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t" + + :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest), + "a" (&c->redDither) + ); + return; + case PIX_FMT_BGR24: + __asm__ volatile( + "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t" + "mov %4, %%"REG_b" \n\t" + "push %%"REG_BP" \n\t" + YSCALEYUV2RGB(%%REGBP, %5) + "pxor %%mm7, %%mm7 \n\t" + WRITEBGR24(%%REGb, 8280(%5), %%REGBP) + "pop %%"REG_BP" \n\t" + "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t" + :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest), + "a" (&c->redDither) + ); + return; + case PIX_FMT_RGB555: + __asm__ volatile( + "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t" + "mov %4, %%"REG_b" \n\t" + "push %%"REG_BP" \n\t" + YSCALEYUV2RGB(%%REGBP, %5) + "pxor %%mm7, %%mm7 \n\t" + /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */ +#ifdef DITHER1XBPP + "paddusb "BLUE_DITHER"(%5), %%mm2 \n\t" + "paddusb "GREEN_DITHER"(%5), %%mm4 \n\t" + "paddusb "RED_DITHER"(%5), %%mm5 \n\t" +#endif + + WRITERGB15(%%REGb, 8280(%5), %%REGBP) + "pop %%"REG_BP" \n\t" + "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t" + + :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest), + "a" (&c->redDither) + ); + return; + case PIX_FMT_RGB565: + __asm__ volatile( + "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t" + "mov %4, %%"REG_b" \n\t" + "push %%"REG_BP" \n\t" + YSCALEYUV2RGB(%%REGBP, %5) + "pxor %%mm7, %%mm7 \n\t" + /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */ +#ifdef DITHER1XBPP + "paddusb "BLUE_DITHER"(%5), %%mm2 \n\t" + "paddusb "GREEN_DITHER"(%5), %%mm4 \n\t" + "paddusb "RED_DITHER"(%5), %%mm5 \n\t" +#endif + + WRITERGB16(%%REGb, 8280(%5), %%REGBP) + "pop %%"REG_BP" \n\t" + "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t" + :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest), + "a" (&c->redDither) + ); + return; + case PIX_FMT_YUYV422: + __asm__ volatile( + "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t" + "mov %4, %%"REG_b" \n\t" + "push %%"REG_BP" \n\t" + YSCALEYUV2PACKED(%%REGBP, %5) + WRITEYUY2(%%REGb, 8280(%5), %%REGBP) + "pop %%"REG_BP" \n\t" + "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t" + :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest), + "a" (&c->redDither) + ); + return; + default: break; + } + } +#endif //HAVE_MMX +YSCALE_YUV_2_ANYRGB_C(YSCALE_YUV_2_RGB2_C, YSCALE_YUV_2_PACKED2_C, YSCALE_YUV_2_GRAY16_2_C, YSCALE_YUV_2_MONO2_C) +} + +/** + * YV12 to RGB without scaling or interpolating + */ +static inline void RENAME(yuv2packed1)(SwsContext *c, uint16_t *buf0, uint16_t *uvbuf0, uint16_t *uvbuf1, + uint8_t *dest, int dstW, int uvalpha, int dstFormat, int flags, int y) +{ + const int yalpha1=0; + int i; + + uint16_t *buf1= buf0; //FIXME needed for RGB1/BGR1 + const int yalpha= 4096; //FIXME ... + + if (flags&SWS_FULL_CHR_H_INT) + { + RENAME(yuv2packed2)(c, buf0, buf0, uvbuf0, uvbuf1, dest, dstW, 0, uvalpha, y); + return; + } + +#if HAVE_MMX + if(!(flags & SWS_BITEXACT)){ + if (uvalpha < 2048) // note this is not correct (shifts chrominance by 0.5 pixels) but it is a bit faster + { + switch(dstFormat) + { + case PIX_FMT_RGB32: + __asm__ volatile( + "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t" + "mov %4, %%"REG_b" \n\t" + "push %%"REG_BP" \n\t" + YSCALEYUV2RGB1(%%REGBP, %5) + "pcmpeqd %%mm7, %%mm7 \n\t" + WRITEBGR32(%%REGb, 8280(%5), %%REGBP, %%mm2, %%mm4, %%mm5, %%mm7, %%mm0, %%mm1, %%mm3, %%mm6) + "pop %%"REG_BP" \n\t" + "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t" + + :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest), + "a" (&c->redDither) + ); + return; + case PIX_FMT_BGR24: + __asm__ volatile( + "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t" + "mov %4, %%"REG_b" \n\t" + "push %%"REG_BP" \n\t" + YSCALEYUV2RGB1(%%REGBP, %5) + "pxor %%mm7, %%mm7 \n\t" + WRITEBGR24(%%REGb, 8280(%5), %%REGBP) + "pop %%"REG_BP" \n\t" + "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t" + + :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest), + "a" (&c->redDither) + ); + return; + case PIX_FMT_RGB555: + __asm__ volatile( + "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t" + "mov %4, %%"REG_b" \n\t" + "push %%"REG_BP" \n\t" + YSCALEYUV2RGB1(%%REGBP, %5) + "pxor %%mm7, %%mm7 \n\t" + /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */ +#ifdef DITHER1XBPP + "paddusb "BLUE_DITHER"(%5), %%mm2 \n\t" + "paddusb "GREEN_DITHER"(%5), %%mm4 \n\t" + "paddusb "RED_DITHER"(%5), %%mm5 \n\t" +#endif + WRITERGB15(%%REGb, 8280(%5), %%REGBP) + "pop %%"REG_BP" \n\t" + "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t" + + :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest), + "a" (&c->redDither) + ); + return; + case PIX_FMT_RGB565: + __asm__ volatile( + "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t" + "mov %4, %%"REG_b" \n\t" + "push %%"REG_BP" \n\t" + YSCALEYUV2RGB1(%%REGBP, %5) + "pxor %%mm7, %%mm7 \n\t" + /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */ +#ifdef DITHER1XBPP + "paddusb "BLUE_DITHER"(%5), %%mm2 \n\t" + "paddusb "GREEN_DITHER"(%5), %%mm4 \n\t" + "paddusb "RED_DITHER"(%5), %%mm5 \n\t" +#endif + + WRITERGB16(%%REGb, 8280(%5), %%REGBP) + "pop %%"REG_BP" \n\t" + "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t" + + :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest), + "a" (&c->redDither) + ); + return; + case PIX_FMT_YUYV422: + __asm__ volatile( + "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t" + "mov %4, %%"REG_b" \n\t" + "push %%"REG_BP" \n\t" + YSCALEYUV2PACKED1(%%REGBP, %5) + WRITEYUY2(%%REGb, 8280(%5), %%REGBP) + "pop %%"REG_BP" \n\t" + "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t" + + :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest), + "a" (&c->redDither) + ); + return; + } + } + else + { + switch(dstFormat) + { + case PIX_FMT_RGB32: + __asm__ volatile( + "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t" + "mov %4, %%"REG_b" \n\t" + "push %%"REG_BP" \n\t" + YSCALEYUV2RGB1b(%%REGBP, %5) + "pcmpeqd %%mm7, %%mm7 \n\t" + WRITEBGR32(%%REGb, 8280(%5), %%REGBP, %%mm2, %%mm4, %%mm5, %%mm7, %%mm0, %%mm1, %%mm3, %%mm6) + "pop %%"REG_BP" \n\t" + "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t" + + :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest), + "a" (&c->redDither) + ); + return; + case PIX_FMT_BGR24: + __asm__ volatile( + "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t" + "mov %4, %%"REG_b" \n\t" + "push %%"REG_BP" \n\t" + YSCALEYUV2RGB1b(%%REGBP, %5) + "pxor %%mm7, %%mm7 \n\t" + WRITEBGR24(%%REGb, 8280(%5), %%REGBP) + "pop %%"REG_BP" \n\t" + "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t" + + :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest), + "a" (&c->redDither) + ); + return; + case PIX_FMT_RGB555: + __asm__ volatile( + "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t" + "mov %4, %%"REG_b" \n\t" + "push %%"REG_BP" \n\t" + YSCALEYUV2RGB1b(%%REGBP, %5) + "pxor %%mm7, %%mm7 \n\t" + /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */ +#ifdef DITHER1XBPP + "paddusb "BLUE_DITHER"(%5), %%mm2 \n\t" + "paddusb "GREEN_DITHER"(%5), %%mm4 \n\t" + "paddusb "RED_DITHER"(%5), %%mm5 \n\t" +#endif + WRITERGB15(%%REGb, 8280(%5), %%REGBP) + "pop %%"REG_BP" \n\t" + "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t" + + :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest), + "a" (&c->redDither) + ); + return; + case PIX_FMT_RGB565: + __asm__ volatile( + "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t" + "mov %4, %%"REG_b" \n\t" + "push %%"REG_BP" \n\t" + YSCALEYUV2RGB1b(%%REGBP, %5) + "pxor %%mm7, %%mm7 \n\t" + /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */ +#ifdef DITHER1XBPP + "paddusb "BLUE_DITHER"(%5), %%mm2 \n\t" + "paddusb "GREEN_DITHER"(%5), %%mm4 \n\t" + "paddusb "RED_DITHER"(%5), %%mm5 \n\t" +#endif + + WRITERGB16(%%REGb, 8280(%5), %%REGBP) + "pop %%"REG_BP" \n\t" + "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t" + + :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest), + "a" (&c->redDither) + ); + return; + case PIX_FMT_YUYV422: + __asm__ volatile( + "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t" + "mov %4, %%"REG_b" \n\t" + "push %%"REG_BP" \n\t" + YSCALEYUV2PACKED1b(%%REGBP, %5) + WRITEYUY2(%%REGb, 8280(%5), %%REGBP) + "pop %%"REG_BP" \n\t" + "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t" + + :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest), + "a" (&c->redDither) + ); + return; + } + } + } +#endif /* HAVE_MMX */ + if (uvalpha < 2048) + { + YSCALE_YUV_2_ANYRGB_C(YSCALE_YUV_2_RGB1_C, YSCALE_YUV_2_PACKED1_C, YSCALE_YUV_2_GRAY16_1_C, YSCALE_YUV_2_MONO2_C) + }else{ + YSCALE_YUV_2_ANYRGB_C(YSCALE_YUV_2_RGB1B_C, YSCALE_YUV_2_PACKED1B_C, YSCALE_YUV_2_GRAY16_1_C, YSCALE_YUV_2_MONO2_C) + } +} + +//FIXME yuy2* can read up to 7 samples too much + +static inline void RENAME(yuy2ToY)(uint8_t *dst, uint8_t *src, long width, uint32_t *unused) +{ +#if HAVE_MMX + __asm__ volatile( + "movq "MANGLE(bm01010101)", %%mm2 \n\t" + "mov %0, %%"REG_a" \n\t" + "1: \n\t" + "movq (%1, %%"REG_a",2), %%mm0 \n\t" + "movq 8(%1, %%"REG_a",2), %%mm1 \n\t" + "pand %%mm2, %%mm0 \n\t" + "pand %%mm2, %%mm1 \n\t" + "packuswb %%mm1, %%mm0 \n\t" + "movq %%mm0, (%2, %%"REG_a") \n\t" + "add $8, %%"REG_a" \n\t" + " js 1b \n\t" + : : "g" (-width), "r" (src+width*2), "r" (dst+width) + : "%"REG_a + ); +#else + int i; + for (i=0; i<width; i++) + dst[i]= src[2*i]; +#endif +} + +static inline void RENAME(yuy2ToUV)(uint8_t *dstU, uint8_t *dstV, uint8_t *src1, uint8_t *src2, long width, uint32_t *unused) +{ +#if HAVE_MMX + __asm__ volatile( + "movq "MANGLE(bm01010101)", %%mm4 \n\t" + "mov %0, %%"REG_a" \n\t" + "1: \n\t" + "movq (%1, %%"REG_a",4), %%mm0 \n\t" + "movq 8(%1, %%"REG_a",4), %%mm1 \n\t" + "psrlw $8, %%mm0 \n\t" + "psrlw $8, %%mm1 \n\t" + "packuswb %%mm1, %%mm0 \n\t" + "movq %%mm0, %%mm1 \n\t" + "psrlw $8, %%mm0 \n\t" + "pand %%mm4, %%mm1 \n\t" + "packuswb %%mm0, %%mm0 \n\t" + "packuswb %%mm1, %%mm1 \n\t" + "movd %%mm0, (%3, %%"REG_a") \n\t" + "movd %%mm1, (%2, %%"REG_a") \n\t" + "add $4, %%"REG_a" \n\t" + " js 1b \n\t" + : : "g" (-width), "r" (src1+width*4), "r" (dstU+width), "r" (dstV+width) + : "%"REG_a + ); +#else + int i; + for (i=0; i<width; i++) + { + dstU[i]= src1[4*i + 1]; + dstV[i]= src1[4*i + 3]; + } +#endif + assert(src1 == src2); +} + +/* This is almost identical to the previous, end exists only because + * yuy2ToY/UV)(dst, src+1, ...) would have 100% unaligned accesses. */ +static inline void RENAME(uyvyToY)(uint8_t *dst, uint8_t *src, long width, uint32_t *unused) +{ +#if HAVE_MMX + __asm__ volatile( + "mov %0, %%"REG_a" \n\t" + "1: \n\t" + "movq (%1, %%"REG_a",2), %%mm0 \n\t" + "movq 8(%1, %%"REG_a",2), %%mm1 \n\t" + "psrlw $8, %%mm0 \n\t" + "psrlw $8, %%mm1 \n\t" + "packuswb %%mm1, %%mm0 \n\t" + "movq %%mm0, (%2, %%"REG_a") \n\t" + "add $8, %%"REG_a" \n\t" + " js 1b \n\t" + : : "g" (-width), "r" (src+width*2), "r" (dst+width) + : "%"REG_a + ); +#else + int i; + for (i=0; i<width; i++) + dst[i]= src[2*i+1]; +#endif +} + +static inline void RENAME(uyvyToUV)(uint8_t *dstU, uint8_t *dstV, uint8_t *src1, uint8_t *src2, long width, uint32_t *unused) +{ +#if HAVE_MMX + __asm__ volatile( + "movq "MANGLE(bm01010101)", %%mm4 \n\t" + "mov %0, %%"REG_a" \n\t" + "1: \n\t" + "movq (%1, %%"REG_a",4), %%mm0 \n\t" + "movq 8(%1, %%"REG_a",4), %%mm1 \n\t" + "pand %%mm4, %%mm0 \n\t" + "pand %%mm4, %%mm1 \n\t" + "packuswb %%mm1, %%mm0 \n\t" + "movq %%mm0, %%mm1 \n\t" + "psrlw $8, %%mm0 \n\t" + "pand %%mm4, %%mm1 \n\t" + "packuswb %%mm0, %%mm0 \n\t" + "packuswb %%mm1, %%mm1 \n\t" + "movd %%mm0, (%3, %%"REG_a") \n\t" + "movd %%mm1, (%2, %%"REG_a") \n\t" + "add $4, %%"REG_a" \n\t" + " js 1b \n\t" + : : "g" (-width), "r" (src1+width*4), "r" (dstU+width), "r" (dstV+width) + : "%"REG_a + ); +#else + int i; + for (i=0; i<width; i++) + { + dstU[i]= src1[4*i + 0]; + dstV[i]= src1[4*i + 2]; + } +#endif + assert(src1 == src2); +} + +#define BGR2Y(type, name, shr, shg, shb, maskr, maskg, maskb, RY, GY, BY, S)\ +static inline void RENAME(name)(uint8_t *dst, uint8_t *src, long width, uint32_t *unused)\ +{\ + int i;\ + for (i=0; i<width; i++)\ + {\ + int b= (((type*)src)[i]>>shb)&maskb;\ + int g= (((type*)src)[i]>>shg)&maskg;\ + int r= (((type*)src)[i]>>shr)&maskr;\ +\ + dst[i]= (((RY)*r + (GY)*g + (BY)*b + (33<<((S)-1)))>>(S));\ + }\ +} + +BGR2Y(uint32_t, bgr32ToY,16, 0, 0, 0x00FF, 0xFF00, 0x00FF, RY<< 8, GY , BY<< 8, RGB2YUV_SHIFT+8) +BGR2Y(uint32_t, rgb32ToY, 0, 0,16, 0x00FF, 0xFF00, 0x00FF, RY<< 8, GY , BY<< 8, RGB2YUV_SHIFT+8) +BGR2Y(uint16_t, bgr16ToY, 0, 0, 0, 0x001F, 0x07E0, 0xF800, RY<<11, GY<<5, BY , RGB2YUV_SHIFT+8) +BGR2Y(uint16_t, bgr15ToY, 0, 0, 0, 0x001F, 0x03E0, 0x7C00, RY<<10, GY<<5, BY , RGB2YUV_SHIFT+7) +BGR2Y(uint16_t, rgb16ToY, 0, 0, 0, 0xF800, 0x07E0, 0x001F, RY , GY<<5, BY<<11, RGB2YUV_SHIFT+8) +BGR2Y(uint16_t, rgb15ToY, 0, 0, 0, 0x7C00, 0x03E0, 0x001F, RY , GY<<5, BY<<10, RGB2YUV_SHIFT+7) + +#define BGR2UV(type, name, shr, shg, shb, maska, maskr, maskg, maskb, RU, GU, BU, RV, GV, BV, S)\ +static inline void RENAME(name)(uint8_t *dstU, uint8_t *dstV, uint8_t *src, uint8_t *dummy, long width, uint32_t *unused)\ +{\ + int i;\ + for (i=0; i<width; i++)\ + {\ + int b= (((type*)src)[i]&maskb)>>shb;\ + int g= (((type*)src)[i]&maskg)>>shg;\ + int r= (((type*)src)[i]&maskr)>>shr;\ +\ + dstU[i]= ((RU)*r + (GU)*g + (BU)*b + (257<<((S)-1)))>>(S);\ + dstV[i]= ((RV)*r + (GV)*g + (BV)*b + (257<<((S)-1)))>>(S);\ + }\ +}\ +static inline void RENAME(name ## _half)(uint8_t *dstU, uint8_t *dstV, uint8_t *src, uint8_t *dummy, long width, uint32_t *unused)\ +{\ + int i;\ + for (i=0; i<width; i++)\ + {\ + int pix0= ((type*)src)[2*i+0];\ + int pix1= ((type*)src)[2*i+1];\ + int g= (pix0&(maskg|maska))+(pix1&(maskg|maska));\ + int b= ((pix0+pix1-g)&(maskb|(2*maskb)))>>shb;\ + int r= ((pix0+pix1-g)&(maskr|(2*maskr)))>>shr;\ + g&= maskg|(2*maskg);\ +\ + g>>=shg;\ +\ + dstU[i]= ((RU)*r + (GU)*g + (BU)*b + (257<<(S)))>>((S)+1);\ + dstV[i]= ((RV)*r + (GV)*g + (BV)*b + (257<<(S)))>>((S)+1);\ + }\ +} + +BGR2UV(uint32_t, bgr32ToUV,16, 0, 0, 0xFF000000, 0xFF0000, 0xFF00, 0x00FF, RU<< 8, GU , BU<< 8, RV<< 8, GV , BV<< 8, RGB2YUV_SHIFT+8) +BGR2UV(uint32_t, rgb32ToUV, 0, 0,16, 0xFF000000, 0x00FF, 0xFF00, 0xFF0000, RU<< 8, GU , BU<< 8, RV<< 8, GV , BV<< 8, RGB2YUV_SHIFT+8) +BGR2UV(uint16_t, bgr16ToUV, 0, 0, 0, 0, 0x001F, 0x07E0, 0xF800, RU<<11, GU<<5, BU , RV<<11, GV<<5, BV , RGB2YUV_SHIFT+8) +BGR2UV(uint16_t, bgr15ToUV, 0, 0, 0, 0, 0x001F, 0x03E0, 0x7C00, RU<<10, GU<<5, BU , RV<<10, GV<<5, BV , RGB2YUV_SHIFT+7) +BGR2UV(uint16_t, rgb16ToUV, 0, 0, 0, 0, 0xF800, 0x07E0, 0x001F, RU , GU<<5, BU<<11, RV , GV<<5, BV<<11, RGB2YUV_SHIFT+8) +BGR2UV(uint16_t, rgb15ToUV, 0, 0, 0, 0, 0x7C00, 0x03E0, 0x001F, RU , GU<<5, BU<<10, RV , GV<<5, BV<<10, RGB2YUV_SHIFT+7) + +#if HAVE_MMX +static inline void RENAME(bgr24ToY_mmx)(uint8_t *dst, uint8_t *src, long width, int srcFormat) +{ + + if(srcFormat == PIX_FMT_BGR24){ + __asm__ volatile( + "movq "MANGLE(ff_bgr24toY1Coeff)", %%mm5 \n\t" + "movq "MANGLE(ff_bgr24toY2Coeff)", %%mm6 \n\t" + : + ); + }else{ + __asm__ volatile( + "movq "MANGLE(ff_rgb24toY1Coeff)", %%mm5 \n\t" + "movq "MANGLE(ff_rgb24toY2Coeff)", %%mm6 \n\t" + : + ); + } + + __asm__ volatile( + "movq "MANGLE(ff_bgr24toYOffset)", %%mm4 \n\t" + "mov %2, %%"REG_a" \n\t" + "pxor %%mm7, %%mm7 \n\t" + "1: \n\t" + PREFETCH" 64(%0) \n\t" + "movd (%0), %%mm0 \n\t" + "movd 2(%0), %%mm1 \n\t" + "movd 6(%0), %%mm2 \n\t" + "movd 8(%0), %%mm3 \n\t" + "add $12, %0 \n\t" + "punpcklbw %%mm7, %%mm0 \n\t" + "punpcklbw %%mm7, %%mm1 \n\t" + "punpcklbw %%mm7, %%mm2 \n\t" + "punpcklbw %%mm7, %%mm3 \n\t" + "pmaddwd %%mm5, %%mm0 \n\t" + "pmaddwd %%mm6, %%mm1 \n\t" + "pmaddwd %%mm5, %%mm2 \n\t" + "pmaddwd %%mm6, %%mm3 \n\t" + "paddd %%mm1, %%mm0 \n\t" + "paddd %%mm3, %%mm2 \n\t" + "paddd %%mm4, %%mm0 \n\t" + "paddd %%mm4, %%mm2 \n\t" + "psrad $15, %%mm0 \n\t" + "psrad $15, %%mm2 \n\t" + "packssdw %%mm2, %%mm0 \n\t" + "packuswb %%mm0, %%mm0 \n\t" + "movd %%mm0, (%1, %%"REG_a") \n\t" + "add $4, %%"REG_a" \n\t" + " js 1b \n\t" + : "+r" (src) + : "r" (dst+width), "g" (-width) + : "%"REG_a + ); +} + +static inline void RENAME(bgr24ToUV_mmx)(uint8_t *dstU, uint8_t *dstV, uint8_t *src, long width, int srcFormat) +{ + __asm__ volatile( + "movq 24+%4, %%mm6 \n\t" + "mov %3, %%"REG_a" \n\t" + "pxor %%mm7, %%mm7 \n\t" + "1: \n\t" + PREFETCH" 64(%0) \n\t" + "movd (%0), %%mm0 \n\t" + "movd 2(%0), %%mm1 \n\t" + "punpcklbw %%mm7, %%mm0 \n\t" + "punpcklbw %%mm7, %%mm1 \n\t" + "movq %%mm0, %%mm2 \n\t" + "movq %%mm1, %%mm3 \n\t" + "pmaddwd %4, %%mm0 \n\t" + "pmaddwd 8+%4, %%mm1 \n\t" + "pmaddwd 16+%4, %%mm2 \n\t" + "pmaddwd %%mm6, %%mm3 \n\t" + "paddd %%mm1, %%mm0 \n\t" + "paddd %%mm3, %%mm2 \n\t" + + "movd 6(%0), %%mm1 \n\t" + "movd 8(%0), %%mm3 \n\t" + "add $12, %0 \n\t" + "punpcklbw %%mm7, %%mm1 \n\t" + "punpcklbw %%mm7, %%mm3 \n\t" + "movq %%mm1, %%mm4 \n\t" + "movq %%mm3, %%mm5 \n\t" + "pmaddwd %4, %%mm1 \n\t" + "pmaddwd 8+%4, %%mm3 \n\t" + "pmaddwd 16+%4, %%mm4 \n\t" + "pmaddwd %%mm6, %%mm5 \n\t" + "paddd %%mm3, %%mm1 \n\t" + "paddd %%mm5, %%mm4 \n\t" + + "movq "MANGLE(ff_bgr24toUVOffset)", %%mm3 \n\t" + "paddd %%mm3, %%mm0 \n\t" + "paddd %%mm3, %%mm2 \n\t" + "paddd %%mm3, %%mm1 \n\t" + "paddd %%mm3, %%mm4 \n\t" + "psrad $15, %%mm0 \n\t" + "psrad $15, %%mm2 \n\t" + "psrad $15, %%mm1 \n\t" + "psrad $15, %%mm4 \n\t" + "packssdw %%mm1, %%mm0 \n\t" + "packssdw %%mm4, %%mm2 \n\t" + "packuswb %%mm0, %%mm0 \n\t" + "packuswb %%mm2, %%mm2 \n\t" + "movd %%mm0, (%1, %%"REG_a") \n\t" + "movd %%mm2, (%2, %%"REG_a") \n\t" + "add $4, %%"REG_a" \n\t" + " js 1b \n\t" + : "+r" (src) + : "r" (dstU+width), "r" (dstV+width), "g" (-width), "m"(ff_bgr24toUV[srcFormat == PIX_FMT_RGB24][0]) + : "%"REG_a + ); +} +#endif + +static inline void RENAME(bgr24ToY)(uint8_t *dst, uint8_t *src, long width, uint32_t *unused) +{ +#if HAVE_MMX + RENAME(bgr24ToY_mmx)(dst, src, width, PIX_FMT_BGR24); +#else + int i; + for (i=0; i<width; i++) + { + int b= src[i*3+0]; + int g= src[i*3+1]; + int r= src[i*3+2]; + + dst[i]= ((RY*r + GY*g + BY*b + (33<<(RGB2YUV_SHIFT-1)))>>RGB2YUV_SHIFT); + } +#endif /* HAVE_MMX */ +} + +static inline void RENAME(bgr24ToUV)(uint8_t *dstU, uint8_t *dstV, uint8_t *src1, uint8_t *src2, long width, uint32_t *unused) +{ +#if HAVE_MMX + RENAME(bgr24ToUV_mmx)(dstU, dstV, src1, width, PIX_FMT_BGR24); +#else + int i; + for (i=0; i<width; i++) + { + int b= src1[3*i + 0]; + int g= src1[3*i + 1]; + int r= src1[3*i + 2]; + + dstU[i]= (RU*r + GU*g + BU*b + (257<<(RGB2YUV_SHIFT-1)))>>RGB2YUV_SHIFT; + dstV[i]= (RV*r + GV*g + BV*b + (257<<(RGB2YUV_SHIFT-1)))>>RGB2YUV_SHIFT; + } +#endif /* HAVE_MMX */ + assert(src1 == src2); +} + +static inline void RENAME(bgr24ToUV_half)(uint8_t *dstU, uint8_t *dstV, uint8_t *src1, uint8_t *src2, long width, uint32_t *unused) +{ + int i; + for (i=0; i<width; i++) + { + int b= src1[6*i + 0] + src1[6*i + 3]; + int g= src1[6*i + 1] + src1[6*i + 4]; + int r= src1[6*i + 2] + src1[6*i + 5]; + + dstU[i]= (RU*r + GU*g + BU*b + (257<<RGB2YUV_SHIFT))>>(RGB2YUV_SHIFT+1); + dstV[i]= (RV*r + GV*g + BV*b + (257<<RGB2YUV_SHIFT))>>(RGB2YUV_SHIFT+1); + } + assert(src1 == src2); +} + +static inline void RENAME(rgb24ToY)(uint8_t *dst, uint8_t *src, long width, uint32_t *unused) +{ +#if HAVE_MMX + RENAME(bgr24ToY_mmx)(dst, src, width, PIX_FMT_RGB24); +#else + int i; + for (i=0; i<width; i++) + { + int r= src[i*3+0]; + int g= src[i*3+1]; + int b= src[i*3+2]; + + dst[i]= ((RY*r + GY*g + BY*b + (33<<(RGB2YUV_SHIFT-1)))>>RGB2YUV_SHIFT); + } +#endif +} + +static inline void RENAME(rgb24ToUV)(uint8_t *dstU, uint8_t *dstV, uint8_t *src1, uint8_t *src2, long width, uint32_t *unused) +{ +#if HAVE_MMX + assert(src1==src2); + RENAME(bgr24ToUV_mmx)(dstU, dstV, src1, width, PIX_FMT_RGB24); +#else + int i; + assert(src1==src2); + for (i=0; i<width; i++) + { + int r= src1[3*i + 0]; + int g= src1[3*i + 1]; + int b= src1[3*i + 2]; + + dstU[i]= (RU*r + GU*g + BU*b + (257<<(RGB2YUV_SHIFT-1)))>>RGB2YUV_SHIFT; + dstV[i]= (RV*r + GV*g + BV*b + (257<<(RGB2YUV_SHIFT-1)))>>RGB2YUV_SHIFT; + } +#endif +} + +static inline void RENAME(rgb24ToUV_half)(uint8_t *dstU, uint8_t *dstV, uint8_t *src1, uint8_t *src2, long width, uint32_t *unused) +{ + int i; + assert(src1==src2); + for (i=0; i<width; i++) + { + int r= src1[6*i + 0] + src1[6*i + 3]; + int g= src1[6*i + 1] + src1[6*i + 4]; + int b= src1[6*i + 2] + src1[6*i + 5]; + + dstU[i]= (RU*r + GU*g + BU*b + (257<<RGB2YUV_SHIFT))>>(RGB2YUV_SHIFT+1); + dstV[i]= (RV*r + GV*g + BV*b + (257<<RGB2YUV_SHIFT))>>(RGB2YUV_SHIFT+1); + } +} + + +static inline void RENAME(palToY)(uint8_t *dst, uint8_t *src, long width, uint32_t *pal) +{ + int i; + for (i=0; i<width; i++) + { + int d= src[i]; + + dst[i]= pal[d] & 0xFF; + } +} + +static inline void RENAME(palToUV)(uint8_t *dstU, uint8_t *dstV, uint8_t *src1, uint8_t *src2, long width, uint32_t *pal) +{ + int i; + assert(src1 == src2); + for (i=0; i<width; i++) + { + int p= pal[src1[i]]; + + dstU[i]= p>>8; + dstV[i]= p>>16; + } +} + +static inline void RENAME(monowhite2Y)(uint8_t *dst, uint8_t *src, long width, uint32_t *unused) +{ + int i, j; + for (i=0; i<width/8; i++){ + int d= ~src[i]; + for(j=0; j<8; j++) + dst[8*i+j]= ((d>>(7-j))&1)*255; + } +} + +static inline void RENAME(monoblack2Y)(uint8_t *dst, uint8_t *src, long width, uint32_t *unused) +{ + int i, j; + for (i=0; i<width/8; i++){ + int d= src[i]; + for(j=0; j<8; j++) + dst[8*i+j]= ((d>>(7-j))&1)*255; + } +} + +// bilinear / bicubic scaling +static inline void RENAME(hScale)(int16_t *dst, int dstW, uint8_t *src, int srcW, int xInc, + int16_t *filter, int16_t *filterPos, long filterSize) +{ +#if HAVE_MMX + assert(filterSize % 4 == 0 && filterSize>0); + if (filterSize==4) // Always true for upscaling, sometimes for down, too. + { + long counter= -2*dstW; + filter-= counter*2; + filterPos-= counter/2; + dst-= counter/2; + __asm__ volatile( +#if defined(PIC) + "push %%"REG_b" \n\t" +#endif + "pxor %%mm7, %%mm7 \n\t" + "push %%"REG_BP" \n\t" // we use 7 regs here ... + "mov %%"REG_a", %%"REG_BP" \n\t" + ASMALIGN(4) + "1: \n\t" + "movzwl (%2, %%"REG_BP"), %%eax \n\t" + "movzwl 2(%2, %%"REG_BP"), %%ebx \n\t" + "movq (%1, %%"REG_BP", 4), %%mm1 \n\t" + "movq 8(%1, %%"REG_BP", 4), %%mm3 \n\t" + "movd (%3, %%"REG_a"), %%mm0 \n\t" + "movd (%3, %%"REG_b"), %%mm2 \n\t" + "punpcklbw %%mm7, %%mm0 \n\t" + "punpcklbw %%mm7, %%mm2 \n\t" + "pmaddwd %%mm1, %%mm0 \n\t" + "pmaddwd %%mm2, %%mm3 \n\t" + "movq %%mm0, %%mm4 \n\t" + "punpckldq %%mm3, %%mm0 \n\t" + "punpckhdq %%mm3, %%mm4 \n\t" + "paddd %%mm4, %%mm0 \n\t" + "psrad $7, %%mm0 \n\t" + "packssdw %%mm0, %%mm0 \n\t" + "movd %%mm0, (%4, %%"REG_BP") \n\t" + "add $4, %%"REG_BP" \n\t" + " jnc 1b \n\t" + + "pop %%"REG_BP" \n\t" +#if defined(PIC) + "pop %%"REG_b" \n\t" +#endif + : "+a" (counter) + : "c" (filter), "d" (filterPos), "S" (src), "D" (dst) +#if !defined(PIC) + : "%"REG_b +#endif + ); + } + else if (filterSize==8) + { + long counter= -2*dstW; + filter-= counter*4; + filterPos-= counter/2; + dst-= counter/2; + __asm__ volatile( +#if defined(PIC) + "push %%"REG_b" \n\t" +#endif + "pxor %%mm7, %%mm7 \n\t" + "push %%"REG_BP" \n\t" // we use 7 regs here ... + "mov %%"REG_a", %%"REG_BP" \n\t" + ASMALIGN(4) + "1: \n\t" + "movzwl (%2, %%"REG_BP"), %%eax \n\t" + "movzwl 2(%2, %%"REG_BP"), %%ebx \n\t" + "movq (%1, %%"REG_BP", 8), %%mm1 \n\t" + "movq 16(%1, %%"REG_BP", 8), %%mm3 \n\t" + "movd (%3, %%"REG_a"), %%mm0 \n\t" + "movd (%3, %%"REG_b"), %%mm2 \n\t" + "punpcklbw %%mm7, %%mm0 \n\t" + "punpcklbw %%mm7, %%mm2 \n\t" + "pmaddwd %%mm1, %%mm0 \n\t" + "pmaddwd %%mm2, %%mm3 \n\t" + + "movq 8(%1, %%"REG_BP", 8), %%mm1 \n\t" + "movq 24(%1, %%"REG_BP", 8), %%mm5 \n\t" + "movd 4(%3, %%"REG_a"), %%mm4 \n\t" + "movd 4(%3, %%"REG_b"), %%mm2 \n\t" + "punpcklbw %%mm7, %%mm4 \n\t" + "punpcklbw %%mm7, %%mm2 \n\t" + "pmaddwd %%mm1, %%mm4 \n\t" + "pmaddwd %%mm2, %%mm5 \n\t" + "paddd %%mm4, %%mm0 \n\t" + "paddd %%mm5, %%mm3 \n\t" + "movq %%mm0, %%mm4 \n\t" + "punpckldq %%mm3, %%mm0 \n\t" + "punpckhdq %%mm3, %%mm4 \n\t" + "paddd %%mm4, %%mm0 \n\t" + "psrad $7, %%mm0 \n\t" + "packssdw %%mm0, %%mm0 \n\t" + "movd %%mm0, (%4, %%"REG_BP") \n\t" + "add $4, %%"REG_BP" \n\t" + " jnc 1b \n\t" + + "pop %%"REG_BP" \n\t" +#if defined(PIC) + "pop %%"REG_b" \n\t" +#endif + : "+a" (counter) + : "c" (filter), "d" (filterPos), "S" (src), "D" (dst) +#if !defined(PIC) + : "%"REG_b +#endif + ); + } + else + { + uint8_t *offset = src+filterSize; + long counter= -2*dstW; + //filter-= counter*filterSize/2; + filterPos-= counter/2; + dst-= counter/2; + __asm__ volatile( + "pxor %%mm7, %%mm7 \n\t" + ASMALIGN(4) + "1: \n\t" + "mov %2, %%"REG_c" \n\t" + "movzwl (%%"REG_c", %0), %%eax \n\t" + "movzwl 2(%%"REG_c", %0), %%edx \n\t" + "mov %5, %%"REG_c" \n\t" + "pxor %%mm4, %%mm4 \n\t" + "pxor %%mm5, %%mm5 \n\t" + "2: \n\t" + "movq (%1), %%mm1 \n\t" + "movq (%1, %6), %%mm3 \n\t" + "movd (%%"REG_c", %%"REG_a"), %%mm0 \n\t" + "movd (%%"REG_c", %%"REG_d"), %%mm2 \n\t" + "punpcklbw %%mm7, %%mm0 \n\t" + "punpcklbw %%mm7, %%mm2 \n\t" + "pmaddwd %%mm1, %%mm0 \n\t" + "pmaddwd %%mm2, %%mm3 \n\t" + "paddd %%mm3, %%mm5 \n\t" + "paddd %%mm0, %%mm4 \n\t" + "add $8, %1 \n\t" + "add $4, %%"REG_c" \n\t" + "cmp %4, %%"REG_c" \n\t" + " jb 2b \n\t" + "add %6, %1 \n\t" + "movq %%mm4, %%mm0 \n\t" + "punpckldq %%mm5, %%mm4 \n\t" + "punpckhdq %%mm5, %%mm0 \n\t" + "paddd %%mm0, %%mm4 \n\t" + "psrad $7, %%mm4 \n\t" + "packssdw %%mm4, %%mm4 \n\t" + "mov %3, %%"REG_a" \n\t" + "movd %%mm4, (%%"REG_a", %0) \n\t" + "add $4, %0 \n\t" + " jnc 1b \n\t" + + : "+r" (counter), "+r" (filter) + : "m" (filterPos), "m" (dst), "m"(offset), + "m" (src), "r" (filterSize*2) + : "%"REG_a, "%"REG_c, "%"REG_d + ); + } +#else +#if HAVE_ALTIVEC + hScale_altivec_real(dst, dstW, src, srcW, xInc, filter, filterPos, filterSize); +#else + int i; + for (i=0; i<dstW; i++) + { + int j; + int srcPos= filterPos[i]; + int val=0; + //printf("filterPos: %d\n", filterPos[i]); + for (j=0; j<filterSize; j++) + { + //printf("filter: %d, src: %d\n", filter[i], src[srcPos + j]); + val += ((int)src[srcPos + j])*filter[filterSize*i + j]; + } + //filter += hFilterSize; + dst[i] = FFMIN(val>>7, (1<<15)-1); // the cubic equation does overflow ... + //dst[i] = val>>7; + } +#endif /* HAVE_ALTIVEC */ +#endif /* HAVE_MMX */ +} + // *** horizontal scale Y line to temp buffer +static inline void RENAME(hyscale)(SwsContext *c, uint16_t *dst, long dstWidth, uint8_t *src, int srcW, int xInc, + int flags, int canMMX2BeUsed, int16_t *hLumFilter, + int16_t *hLumFilterPos, int hLumFilterSize, void *funnyYCode, + int srcFormat, uint8_t *formatConvBuffer, int16_t *mmx2Filter, + int32_t *mmx2FilterPos, uint32_t *pal) +{ + if (srcFormat==PIX_FMT_YUYV422 || srcFormat==PIX_FMT_GRAY16BE) + { + RENAME(yuy2ToY)(formatConvBuffer, src, srcW, pal); + src= formatConvBuffer; + } + else if (srcFormat==PIX_FMT_UYVY422 || srcFormat==PIX_FMT_GRAY16LE) + { + RENAME(uyvyToY)(formatConvBuffer, src, srcW, pal); + src= formatConvBuffer; + } + else if (srcFormat==PIX_FMT_RGB32) + { + RENAME(bgr32ToY)(formatConvBuffer, src, srcW, pal); + src= formatConvBuffer; + } + else if (srcFormat==PIX_FMT_RGB32_1) + { + RENAME(bgr32ToY)(formatConvBuffer, src+ALT32_CORR, srcW, pal); + src= formatConvBuffer; + } + else if (srcFormat==PIX_FMT_BGR24) + { + RENAME(bgr24ToY)(formatConvBuffer, src, srcW, pal); + src= formatConvBuffer; + } + else if (srcFormat==PIX_FMT_BGR565) + { + RENAME(bgr16ToY)(formatConvBuffer, src, srcW, pal); + src= formatConvBuffer; + } + else if (srcFormat==PIX_FMT_BGR555) + { + RENAME(bgr15ToY)(formatConvBuffer, src, srcW, pal); + src= formatConvBuffer; + } + else if (srcFormat==PIX_FMT_BGR32) + { + RENAME(rgb32ToY)(formatConvBuffer, src, srcW, pal); + src= formatConvBuffer; + } + else if (srcFormat==PIX_FMT_BGR32_1) + { + RENAME(rgb32ToY)(formatConvBuffer, src+ALT32_CORR, srcW, pal); + src= formatConvBuffer; + } + else if (srcFormat==PIX_FMT_RGB24) + { + RENAME(rgb24ToY)(formatConvBuffer, src, srcW, pal); + src= formatConvBuffer; + } + else if (srcFormat==PIX_FMT_RGB565) + { + RENAME(rgb16ToY)(formatConvBuffer, src, srcW, pal); + src= formatConvBuffer; + } + else if (srcFormat==PIX_FMT_RGB555) + { + RENAME(rgb15ToY)(formatConvBuffer, src, srcW, pal); + src= formatConvBuffer; + } + else if (srcFormat==PIX_FMT_RGB8 || srcFormat==PIX_FMT_BGR8 || srcFormat==PIX_FMT_PAL8 || srcFormat==PIX_FMT_BGR4_BYTE || srcFormat==PIX_FMT_RGB4_BYTE) + { + RENAME(palToY)(formatConvBuffer, src, srcW, pal); + src= formatConvBuffer; + } + else if (srcFormat==PIX_FMT_MONOBLACK) + { + RENAME(monoblack2Y)(formatConvBuffer, src, srcW, pal); + src= formatConvBuffer; + } + else if (srcFormat==PIX_FMT_MONOWHITE) + { + RENAME(monowhite2Y)(formatConvBuffer, src, srcW, pal); + src= formatConvBuffer; + } + +#if HAVE_MMX + // Use the new MMX scaler if the MMX2 one can't be used (it is faster than the x86 ASM one). + if (!(flags&SWS_FAST_BILINEAR) || (!canMMX2BeUsed)) +#else + if (!(flags&SWS_FAST_BILINEAR)) +#endif + { + RENAME(hScale)(dst, dstWidth, src, srcW, xInc, hLumFilter, hLumFilterPos, hLumFilterSize); + } + else // fast bilinear upscale / crap downscale + { +#if ARCH_X86 +#if HAVE_MMX2 + int i; +#if defined(PIC) + uint64_t ebxsave __attribute__((aligned(8))); +#endif + if (canMMX2BeUsed) + { + __asm__ volatile( +#if defined(PIC) + "mov %%"REG_b", %5 \n\t" +#endif + "pxor %%mm7, %%mm7 \n\t" + "mov %0, %%"REG_c" \n\t" + "mov %1, %%"REG_D" \n\t" + "mov %2, %%"REG_d" \n\t" + "mov %3, %%"REG_b" \n\t" + "xor %%"REG_a", %%"REG_a" \n\t" // i + PREFETCH" (%%"REG_c") \n\t" + PREFETCH" 32(%%"REG_c") \n\t" + PREFETCH" 64(%%"REG_c") \n\t" + +#if ARCH_X86_64 + +#define FUNNY_Y_CODE \ + "movl (%%"REG_b"), %%esi \n\t"\ + "call *%4 \n\t"\ + "movl (%%"REG_b", %%"REG_a"), %%esi \n\t"\ + "add %%"REG_S", %%"REG_c" \n\t"\ + "add %%"REG_a", %%"REG_D" \n\t"\ + "xor %%"REG_a", %%"REG_a" \n\t"\ + +#else + +#define FUNNY_Y_CODE \ + "movl (%%"REG_b"), %%esi \n\t"\ + "call *%4 \n\t"\ + "addl (%%"REG_b", %%"REG_a"), %%"REG_c" \n\t"\ + "add %%"REG_a", %%"REG_D" \n\t"\ + "xor %%"REG_a", %%"REG_a" \n\t"\ + +#endif /* ARCH_X86_64 */ + +FUNNY_Y_CODE +FUNNY_Y_CODE +FUNNY_Y_CODE +FUNNY_Y_CODE +FUNNY_Y_CODE +FUNNY_Y_CODE +FUNNY_Y_CODE +FUNNY_Y_CODE + +#if defined(PIC) + "mov %5, %%"REG_b" \n\t" +#endif + :: "m" (src), "m" (dst), "m" (mmx2Filter), "m" (mmx2FilterPos), + "m" (funnyYCode) +#if defined(PIC) + ,"m" (ebxsave) +#endif + : "%"REG_a, "%"REG_c, "%"REG_d, "%"REG_S, "%"REG_D +#if !defined(PIC) + ,"%"REG_b +#endif + ); + for (i=dstWidth-1; (i*xInc)>>16 >=srcW-1; i--) dst[i] = src[srcW-1]*128; + } + else + { +#endif /* HAVE_MMX2 */ + long xInc_shr16 = xInc >> 16; + uint16_t xInc_mask = xInc & 0xffff; + //NO MMX just normal asm ... + __asm__ volatile( + "xor %%"REG_a", %%"REG_a" \n\t" // i + "xor %%"REG_d", %%"REG_d" \n\t" // xx + "xorl %%ecx, %%ecx \n\t" // 2*xalpha + ASMALIGN(4) + "1: \n\t" + "movzbl (%0, %%"REG_d"), %%edi \n\t" //src[xx] + "movzbl 1(%0, %%"REG_d"), %%esi \n\t" //src[xx+1] + "subl %%edi, %%esi \n\t" //src[xx+1] - src[xx] + "imull %%ecx, %%esi \n\t" //(src[xx+1] - src[xx])*2*xalpha + "shll $16, %%edi \n\t" + "addl %%edi, %%esi \n\t" //src[xx+1]*2*xalpha + src[xx]*(1-2*xalpha) + "mov %1, %%"REG_D" \n\t" + "shrl $9, %%esi \n\t" + "movw %%si, (%%"REG_D", %%"REG_a", 2) \n\t" + "addw %4, %%cx \n\t" //2*xalpha += xInc&0xFF + "adc %3, %%"REG_d" \n\t" //xx+= xInc>>8 + carry + + "movzbl (%0, %%"REG_d"), %%edi \n\t" //src[xx] + "movzbl 1(%0, %%"REG_d"), %%esi \n\t" //src[xx+1] + "subl %%edi, %%esi \n\t" //src[xx+1] - src[xx] + "imull %%ecx, %%esi \n\t" //(src[xx+1] - src[xx])*2*xalpha + "shll $16, %%edi \n\t" + "addl %%edi, %%esi \n\t" //src[xx+1]*2*xalpha + src[xx]*(1-2*xalpha) + "mov %1, %%"REG_D" \n\t" + "shrl $9, %%esi \n\t" + "movw %%si, 2(%%"REG_D", %%"REG_a", 2) \n\t" + "addw %4, %%cx \n\t" //2*xalpha += xInc&0xFF + "adc %3, %%"REG_d" \n\t" //xx+= xInc>>8 + carry + + + "add $2, %%"REG_a" \n\t" + "cmp %2, %%"REG_a" \n\t" + " jb 1b \n\t" + + + :: "r" (src), "m" (dst), "m" (dstWidth), "m" (xInc_shr16), "m" (xInc_mask) + : "%"REG_a, "%"REG_d, "%ecx", "%"REG_D, "%esi" + ); +#if HAVE_MMX2 + } //if MMX2 can't be used +#endif +#else + int i; + unsigned int xpos=0; + for (i=0;i<dstWidth;i++) + { + register unsigned int xx=xpos>>16; + register unsigned int xalpha=(xpos&0xFFFF)>>9; + dst[i]= (src[xx]<<7) + (src[xx+1] - src[xx])*xalpha; + xpos+=xInc; + } +#endif /* ARCH_X86 */ + } + + if(c->srcRange != c->dstRange && !(isRGB(c->dstFormat) || isBGR(c->dstFormat))){ + int i; + //FIXME all pal and rgb srcFormats could do this convertion as well + //FIXME all scalers more complex than bilinear could do half of this transform + if(c->srcRange){ + for (i=0; i<dstWidth; i++) + dst[i]= (dst[i]*14071 + 33561947)>>14; + }else{ + for (i=0; i<dstWidth; i++) + dst[i]= (FFMIN(dst[i],30189)*19077 - 39057361)>>14; + } + } +} + +inline static void RENAME(hcscale)(SwsContext *c, uint16_t *dst, long dstWidth, uint8_t *src1, uint8_t *src2, + int srcW, int xInc, int flags, int canMMX2BeUsed, int16_t *hChrFilter, + int16_t *hChrFilterPos, int hChrFilterSize, void *funnyUVCode, + int srcFormat, uint8_t *formatConvBuffer, int16_t *mmx2Filter, + int32_t *mmx2FilterPos, uint32_t *pal) +{ + if (srcFormat==PIX_FMT_YUYV422) + { + RENAME(yuy2ToUV)(formatConvBuffer, formatConvBuffer+VOFW, src1, src2, srcW, pal); + src1= formatConvBuffer; + src2= formatConvBuffer+VOFW; + } + else if (srcFormat==PIX_FMT_UYVY422) + { + RENAME(uyvyToUV)(formatConvBuffer, formatConvBuffer+VOFW, src1, src2, srcW, pal); + src1= formatConvBuffer; + src2= formatConvBuffer+VOFW; + } + else if (srcFormat==PIX_FMT_RGB32) + { + if(c->chrSrcHSubSample) + RENAME(bgr32ToUV_half)(formatConvBuffer, formatConvBuffer+VOFW, src1, src2, srcW, pal); + else + RENAME(bgr32ToUV)(formatConvBuffer, formatConvBuffer+VOFW, src1, src2, srcW, pal); + src1= formatConvBuffer; + src2= formatConvBuffer+VOFW; + } + else if (srcFormat==PIX_FMT_RGB32_1) + { + if(c->chrSrcHSubSample) + RENAME(bgr32ToUV_half)(formatConvBuffer, formatConvBuffer+VOFW, src1+ALT32_CORR, src2+ALT32_CORR, srcW, pal); + else + RENAME(bgr32ToUV)(formatConvBuffer, formatConvBuffer+VOFW, src1+ALT32_CORR, src2+ALT32_CORR, srcW, pal); + src1= formatConvBuffer; + src2= formatConvBuffer+VOFW; + } + else if (srcFormat==PIX_FMT_BGR24) + { + if(c->chrSrcHSubSample) + RENAME(bgr24ToUV_half)(formatConvBuffer, formatConvBuffer+VOFW, src1, src2, srcW, pal); + else + RENAME(bgr24ToUV)(formatConvBuffer, formatConvBuffer+VOFW, src1, src2, srcW, pal); + src1= formatConvBuffer; + src2= formatConvBuffer+VOFW; + } + else if (srcFormat==PIX_FMT_BGR565) + { + if(c->chrSrcHSubSample) + RENAME(bgr16ToUV_half)(formatConvBuffer, formatConvBuffer+VOFW, src1, src2, srcW, pal); + else + RENAME(bgr16ToUV)(formatConvBuffer, formatConvBuffer+VOFW, src1, src2, srcW, pal); + src1= formatConvBuffer; + src2= formatConvBuffer+VOFW; + } + else if (srcFormat==PIX_FMT_BGR555) + { + if(c->chrSrcHSubSample) + RENAME(bgr15ToUV_half)(formatConvBuffer, formatConvBuffer+VOFW, src1, src2, srcW, pal); + else + RENAME(bgr15ToUV)(formatConvBuffer, formatConvBuffer+VOFW, src1, src2, srcW, pal); + src1= formatConvBuffer; + src2= formatConvBuffer+VOFW; + } + else if (srcFormat==PIX_FMT_BGR32) + { + if(c->chrSrcHSubSample) + RENAME(rgb32ToUV_half)(formatConvBuffer, formatConvBuffer+VOFW, src1, src2, srcW, pal); + else + RENAME(rgb32ToUV)(formatConvBuffer, formatConvBuffer+VOFW, src1, src2, srcW, pal); + src1= formatConvBuffer; + src2= formatConvBuffer+VOFW; + } + else if (srcFormat==PIX_FMT_BGR32_1) + { + if(c->chrSrcHSubSample) + RENAME(rgb32ToUV_half)(formatConvBuffer, formatConvBuffer+VOFW, src1+ALT32_CORR, src2+ALT32_CORR, srcW, pal); + else + RENAME(rgb32ToUV)(formatConvBuffer, formatConvBuffer+VOFW, src1+ALT32_CORR, src2+ALT32_CORR, srcW, pal); + src1= formatConvBuffer; + src2= formatConvBuffer+VOFW; + } + else if (srcFormat==PIX_FMT_RGB24) + { + if(c->chrSrcHSubSample) + RENAME(rgb24ToUV_half)(formatConvBuffer, formatConvBuffer+VOFW, src1, src2, srcW, pal); + else + RENAME(rgb24ToUV)(formatConvBuffer, formatConvBuffer+VOFW, src1, src2, srcW, pal); + src1= formatConvBuffer; + src2= formatConvBuffer+VOFW; + } + else if (srcFormat==PIX_FMT_RGB565) + { + if(c->chrSrcHSubSample) + RENAME(rgb16ToUV_half)(formatConvBuffer, formatConvBuffer+VOFW, src1, src2, srcW, pal); + else + RENAME(rgb16ToUV)(formatConvBuffer, formatConvBuffer+VOFW, src1, src2, srcW, pal); + src1= formatConvBuffer; + src2= formatConvBuffer+VOFW; + } + else if (srcFormat==PIX_FMT_RGB555) + { + if(c->chrSrcHSubSample) + RENAME(rgb15ToUV_half)(formatConvBuffer, formatConvBuffer+VOFW, src1, src2, srcW, pal); + else + RENAME(rgb15ToUV)(formatConvBuffer, formatConvBuffer+VOFW, src1, src2, srcW, pal); + src1= formatConvBuffer; + src2= formatConvBuffer+VOFW; + } + else if (isGray(srcFormat) || srcFormat==PIX_FMT_MONOBLACK || srcFormat==PIX_FMT_MONOWHITE) + { + return; + } + else if (srcFormat==PIX_FMT_RGB8 || srcFormat==PIX_FMT_BGR8 || srcFormat==PIX_FMT_PAL8 || srcFormat==PIX_FMT_BGR4_BYTE || srcFormat==PIX_FMT_RGB4_BYTE) + { + RENAME(palToUV)(formatConvBuffer, formatConvBuffer+VOFW, src1, src2, srcW, pal); + src1= formatConvBuffer; + src2= formatConvBuffer+VOFW; + } + +#if HAVE_MMX + // Use the new MMX scaler if the MMX2 one can't be used (it is faster than the x86 ASM one). + if (!(flags&SWS_FAST_BILINEAR) || (!canMMX2BeUsed)) +#else + if (!(flags&SWS_FAST_BILINEAR)) +#endif + { + RENAME(hScale)(dst , dstWidth, src1, srcW, xInc, hChrFilter, hChrFilterPos, hChrFilterSize); + RENAME(hScale)(dst+VOFW, dstWidth, src2, srcW, xInc, hChrFilter, hChrFilterPos, hChrFilterSize); + } + else // fast bilinear upscale / crap downscale + { +#if ARCH_X86 +#if HAVE_MMX2 + int i; +#if defined(PIC) + uint64_t ebxsave __attribute__((aligned(8))); +#endif + if (canMMX2BeUsed) + { + __asm__ volatile( +#if defined(PIC) + "mov %%"REG_b", %6 \n\t" +#endif + "pxor %%mm7, %%mm7 \n\t" + "mov %0, %%"REG_c" \n\t" + "mov %1, %%"REG_D" \n\t" + "mov %2, %%"REG_d" \n\t" + "mov %3, %%"REG_b" \n\t" + "xor %%"REG_a", %%"REG_a" \n\t" // i + PREFETCH" (%%"REG_c") \n\t" + PREFETCH" 32(%%"REG_c") \n\t" + PREFETCH" 64(%%"REG_c") \n\t" + +#if ARCH_X86_64 + +#define FUNNY_UV_CODE \ + "movl (%%"REG_b"), %%esi \n\t"\ + "call *%4 \n\t"\ + "movl (%%"REG_b", %%"REG_a"), %%esi \n\t"\ + "add %%"REG_S", %%"REG_c" \n\t"\ + "add %%"REG_a", %%"REG_D" \n\t"\ + "xor %%"REG_a", %%"REG_a" \n\t"\ + +#else + +#define FUNNY_UV_CODE \ + "movl (%%"REG_b"), %%esi \n\t"\ + "call *%4 \n\t"\ + "addl (%%"REG_b", %%"REG_a"), %%"REG_c" \n\t"\ + "add %%"REG_a", %%"REG_D" \n\t"\ + "xor %%"REG_a", %%"REG_a" \n\t"\ + +#endif /* ARCH_X86_64 */ + +FUNNY_UV_CODE +FUNNY_UV_CODE +FUNNY_UV_CODE +FUNNY_UV_CODE + "xor %%"REG_a", %%"REG_a" \n\t" // i + "mov %5, %%"REG_c" \n\t" // src + "mov %1, %%"REG_D" \n\t" // buf1 + "add $"AV_STRINGIFY(VOF)", %%"REG_D" \n\t" + PREFETCH" (%%"REG_c") \n\t" + PREFETCH" 32(%%"REG_c") \n\t" + PREFETCH" 64(%%"REG_c") \n\t" + +FUNNY_UV_CODE +FUNNY_UV_CODE +FUNNY_UV_CODE +FUNNY_UV_CODE + +#if defined(PIC) + "mov %6, %%"REG_b" \n\t" +#endif + :: "m" (src1), "m" (dst), "m" (mmx2Filter), "m" (mmx2FilterPos), + "m" (funnyUVCode), "m" (src2) +#if defined(PIC) + ,"m" (ebxsave) +#endif + : "%"REG_a, "%"REG_c, "%"REG_d, "%"REG_S, "%"REG_D +#if !defined(PIC) + ,"%"REG_b +#endif + ); + for (i=dstWidth-1; (i*xInc)>>16 >=srcW-1; i--) + { + //printf("%d %d %d\n", dstWidth, i, srcW); + dst[i] = src1[srcW-1]*128; + dst[i+VOFW] = src2[srcW-1]*128; + } + } + else + { +#endif /* HAVE_MMX2 */ + long xInc_shr16 = (long) (xInc >> 16); + uint16_t xInc_mask = xInc & 0xffff; + __asm__ volatile( + "xor %%"REG_a", %%"REG_a" \n\t" // i + "xor %%"REG_d", %%"REG_d" \n\t" // xx + "xorl %%ecx, %%ecx \n\t" // 2*xalpha + ASMALIGN(4) + "1: \n\t" + "mov %0, %%"REG_S" \n\t" + "movzbl (%%"REG_S", %%"REG_d"), %%edi \n\t" //src[xx] + "movzbl 1(%%"REG_S", %%"REG_d"), %%esi \n\t" //src[xx+1] + "subl %%edi, %%esi \n\t" //src[xx+1] - src[xx] + "imull %%ecx, %%esi \n\t" //(src[xx+1] - src[xx])*2*xalpha + "shll $16, %%edi \n\t" + "addl %%edi, %%esi \n\t" //src[xx+1]*2*xalpha + src[xx]*(1-2*xalpha) + "mov %1, %%"REG_D" \n\t" + "shrl $9, %%esi \n\t" + "movw %%si, (%%"REG_D", %%"REG_a", 2) \n\t" + + "movzbl (%5, %%"REG_d"), %%edi \n\t" //src[xx] + "movzbl 1(%5, %%"REG_d"), %%esi \n\t" //src[xx+1] + "subl %%edi, %%esi \n\t" //src[xx+1] - src[xx] + "imull %%ecx, %%esi \n\t" //(src[xx+1] - src[xx])*2*xalpha + "shll $16, %%edi \n\t" + "addl %%edi, %%esi \n\t" //src[xx+1]*2*xalpha + src[xx]*(1-2*xalpha) + "mov %1, %%"REG_D" \n\t" + "shrl $9, %%esi \n\t" + "movw %%si, "AV_STRINGIFY(VOF)"(%%"REG_D", %%"REG_a", 2) \n\t" + + "addw %4, %%cx \n\t" //2*xalpha += xInc&0xFF + "adc %3, %%"REG_d" \n\t" //xx+= xInc>>8 + carry + "add $1, %%"REG_a" \n\t" + "cmp %2, %%"REG_a" \n\t" + " jb 1b \n\t" + +/* GCC 3.3 makes MPlayer crash on IA-32 machines when using "g" operand here, + which is needed to support GCC 4.0. */ +#if ARCH_X86_64 && ((__GNUC__ > 3) || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4)) + :: "m" (src1), "m" (dst), "g" ((long)dstWidth), "m" (xInc_shr16), "m" (xInc_mask), +#else + :: "m" (src1), "m" (dst), "m" ((long)dstWidth), "m" (xInc_shr16), "m" (xInc_mask), +#endif + "r" (src2) + : "%"REG_a, "%"REG_d, "%ecx", "%"REG_D, "%esi" + ); +#if HAVE_MMX2 + } //if MMX2 can't be used +#endif +#else + int i; + unsigned int xpos=0; + for (i=0;i<dstWidth;i++) + { + register unsigned int xx=xpos>>16; + register unsigned int xalpha=(xpos&0xFFFF)>>9; + dst[i]=(src1[xx]*(xalpha^127)+src1[xx+1]*xalpha); + dst[i+VOFW]=(src2[xx]*(xalpha^127)+src2[xx+1]*xalpha); + /* slower + dst[i]= (src1[xx]<<7) + (src1[xx+1] - src1[xx])*xalpha; + dst[i+VOFW]=(src2[xx]<<7) + (src2[xx+1] - src2[xx])*xalpha; + */ + xpos+=xInc; + } +#endif /* ARCH_X86 */ + } + if(c->srcRange != c->dstRange && !(isRGB(c->dstFormat) || isBGR(c->dstFormat))){ + int i; + //FIXME all pal and rgb srcFormats could do this convertion as well + //FIXME all scalers more complex than bilinear could do half of this transform + if(c->srcRange){ + for (i=0; i<dstWidth; i++){ + dst[i ]= (dst[i ]*1799 + 4081085)>>11; //1469 + dst[i+VOFW]= (dst[i+VOFW]*1799 + 4081085)>>11; //1469 + } + }else{ + for (i=0; i<dstWidth; i++){ + dst[i ]= (FFMIN(dst[i ],30775)*4663 - 9289992)>>12; //-264 + dst[i+VOFW]= (FFMIN(dst[i+VOFW],30775)*4663 - 9289992)>>12; //-264 + } + } + } +} + +static int RENAME(swScale)(SwsContext *c, uint8_t* src[], int srcStride[], int srcSliceY, + int srcSliceH, uint8_t* dst[], int dstStride[]){ + + /* load a few things into local vars to make the code more readable? and faster */ + const int srcW= c->srcW; + const int dstW= c->dstW; + const int dstH= c->dstH; + const int chrDstW= c->chrDstW; + const int chrSrcW= c->chrSrcW; + const int lumXInc= c->lumXInc; + const int chrXInc= c->chrXInc; + const int dstFormat= c->dstFormat; + const int srcFormat= c->srcFormat; + const int flags= c->flags; + const int canMMX2BeUsed= c->canMMX2BeUsed; + int16_t *vLumFilterPos= c->vLumFilterPos; + int16_t *vChrFilterPos= c->vChrFilterPos; + int16_t *hLumFilterPos= c->hLumFilterPos; + int16_t *hChrFilterPos= c->hChrFilterPos; + int16_t *vLumFilter= c->vLumFilter; + int16_t *vChrFilter= c->vChrFilter; + int16_t *hLumFilter= c->hLumFilter; + int16_t *hChrFilter= c->hChrFilter; + int32_t *lumMmxFilter= c->lumMmxFilter; + int32_t *chrMmxFilter= c->chrMmxFilter; + const int vLumFilterSize= c->vLumFilterSize; + const int vChrFilterSize= c->vChrFilterSize; + const int hLumFilterSize= c->hLumFilterSize; + const int hChrFilterSize= c->hChrFilterSize; + int16_t **lumPixBuf= c->lumPixBuf; + int16_t **chrPixBuf= c->chrPixBuf; + const int vLumBufSize= c->vLumBufSize; + const int vChrBufSize= c->vChrBufSize; + uint8_t *funnyYCode= c->funnyYCode; + uint8_t *funnyUVCode= c->funnyUVCode; + uint8_t *formatConvBuffer= c->formatConvBuffer; + const int chrSrcSliceY= srcSliceY >> c->chrSrcVSubSample; + const int chrSrcSliceH= -((-srcSliceH) >> c->chrSrcVSubSample); + int lastDstY; + uint32_t *pal=c->pal_yuv; + + /* vars which will change and which we need to store back in the context */ + int dstY= c->dstY; + int lumBufIndex= c->lumBufIndex; + int chrBufIndex= c->chrBufIndex; + int lastInLumBuf= c->lastInLumBuf; + int lastInChrBuf= c->lastInChrBuf; + + if (isPacked(c->srcFormat)){ + src[0]= + src[1]= + src[2]= src[0]; + srcStride[0]= + srcStride[1]= + srcStride[2]= srcStride[0]; + } + srcStride[1]<<= c->vChrDrop; + srcStride[2]<<= c->vChrDrop; + + //printf("swscale %X %X %X -> %X %X %X\n", (int)src[0], (int)src[1], (int)src[2], + // (int)dst[0], (int)dst[1], (int)dst[2]); + +#if 0 //self test FIXME move to a vfilter or something + { + static volatile int i=0; + i++; + if (srcFormat==PIX_FMT_YUV420P && i==1 && srcSliceH>= c->srcH) + selfTest(src, srcStride, c->srcW, c->srcH); + i--; + } +#endif + + //printf("sws Strides:%d %d %d -> %d %d %d\n", srcStride[0],srcStride[1],srcStride[2], + //dstStride[0],dstStride[1],dstStride[2]); + + if (dstStride[0]%8 !=0 || dstStride[1]%8 !=0 || dstStride[2]%8 !=0) + { + static int warnedAlready=0; //FIXME move this into the context perhaps + if (flags & SWS_PRINT_INFO && !warnedAlready) + { + av_log(c, AV_LOG_WARNING, "Warning: dstStride is not aligned!\n" + " ->cannot do aligned memory accesses anymore\n"); + warnedAlready=1; + } + } + + /* Note the user might start scaling the picture in the middle so this + will not get executed. This is not really intended but works + currently, so people might do it. */ + if (srcSliceY ==0){ + lumBufIndex=0; + chrBufIndex=0; + dstY=0; + lastInLumBuf= -1; + lastInChrBuf= -1; + } + + lastDstY= dstY; + + for (;dstY < dstH; dstY++){ + unsigned char *dest =dst[0]+dstStride[0]*dstY; + const int chrDstY= dstY>>c->chrDstVSubSample; + unsigned char *uDest=dst[1]+dstStride[1]*chrDstY; + unsigned char *vDest=dst[2]+dstStride[2]*chrDstY; + + const int firstLumSrcY= vLumFilterPos[dstY]; //First line needed as input + const int firstChrSrcY= vChrFilterPos[chrDstY]; //First line needed as input + const int lastLumSrcY= firstLumSrcY + vLumFilterSize -1; // Last line needed as input + const int lastChrSrcY= firstChrSrcY + vChrFilterSize -1; // Last line needed as input + + //printf("dstY:%d dstH:%d firstLumSrcY:%d lastInLumBuf:%d vLumBufSize: %d vChrBufSize: %d slice: %d %d vLumFilterSize: %d firstChrSrcY: %d vChrFilterSize: %d c->chrSrcVSubSample: %d\n", + // dstY, dstH, firstLumSrcY, lastInLumBuf, vLumBufSize, vChrBufSize, srcSliceY, srcSliceH, vLumFilterSize, firstChrSrcY, vChrFilterSize, c->chrSrcVSubSample); + //handle holes (FAST_BILINEAR & weird filters) + if (firstLumSrcY > lastInLumBuf) lastInLumBuf= firstLumSrcY-1; + if (firstChrSrcY > lastInChrBuf) lastInChrBuf= firstChrSrcY-1; + //printf("%d %d %d\n", firstChrSrcY, lastInChrBuf, vChrBufSize); + assert(firstLumSrcY >= lastInLumBuf - vLumBufSize + 1); + assert(firstChrSrcY >= lastInChrBuf - vChrBufSize + 1); + + // Do we have enough lines in this slice to output the dstY line + if (lastLumSrcY < srcSliceY + srcSliceH && lastChrSrcY < -((-srcSliceY - srcSliceH)>>c->chrSrcVSubSample)) + { + //Do horizontal scaling + while(lastInLumBuf < lastLumSrcY) + { + uint8_t *s= src[0]+(lastInLumBuf + 1 - srcSliceY)*srcStride[0]; + lumBufIndex++; + //printf("%d %d %d %d\n", lumBufIndex, vLumBufSize, lastInLumBuf, lastLumSrcY); + assert(lumBufIndex < 2*vLumBufSize); + assert(lastInLumBuf + 1 - srcSliceY < srcSliceH); + assert(lastInLumBuf + 1 - srcSliceY >= 0); + //printf("%d %d\n", lumBufIndex, vLumBufSize); + RENAME(hyscale)(c, lumPixBuf[ lumBufIndex ], dstW, s, srcW, lumXInc, + flags, canMMX2BeUsed, hLumFilter, hLumFilterPos, hLumFilterSize, + funnyYCode, c->srcFormat, formatConvBuffer, + c->lumMmx2Filter, c->lumMmx2FilterPos, pal); + lastInLumBuf++; + } + while(lastInChrBuf < lastChrSrcY) + { + uint8_t *src1= src[1]+(lastInChrBuf + 1 - chrSrcSliceY)*srcStride[1]; + uint8_t *src2= src[2]+(lastInChrBuf + 1 - chrSrcSliceY)*srcStride[2]; + chrBufIndex++; + assert(chrBufIndex < 2*vChrBufSize); + assert(lastInChrBuf + 1 - chrSrcSliceY < (chrSrcSliceH)); + assert(lastInChrBuf + 1 - chrSrcSliceY >= 0); + //FIXME replace parameters through context struct (some at least) + + if (!(isGray(srcFormat) || isGray(dstFormat))) + RENAME(hcscale)(c, chrPixBuf[ chrBufIndex ], chrDstW, src1, src2, chrSrcW, chrXInc, + flags, canMMX2BeUsed, hChrFilter, hChrFilterPos, hChrFilterSize, + funnyUVCode, c->srcFormat, formatConvBuffer, + c->chrMmx2Filter, c->chrMmx2FilterPos, pal); + lastInChrBuf++; + } + //wrap buf index around to stay inside the ring buffer + if (lumBufIndex >= vLumBufSize) lumBufIndex-= vLumBufSize; + if (chrBufIndex >= vChrBufSize) chrBufIndex-= vChrBufSize; + } + else // not enough lines left in this slice -> load the rest in the buffer + { + /* printf("%d %d Last:%d %d LastInBuf:%d %d Index:%d %d Y:%d FSize: %d %d BSize: %d %d\n", + firstChrSrcY,firstLumSrcY,lastChrSrcY,lastLumSrcY, + lastInChrBuf,lastInLumBuf,chrBufIndex,lumBufIndex,dstY,vChrFilterSize,vLumFilterSize, + vChrBufSize, vLumBufSize);*/ + + //Do horizontal scaling + while(lastInLumBuf+1 < srcSliceY + srcSliceH) + { + uint8_t *s= src[0]+(lastInLumBuf + 1 - srcSliceY)*srcStride[0]; + lumBufIndex++; + assert(lumBufIndex < 2*vLumBufSize); + assert(lastInLumBuf + 1 - srcSliceY < srcSliceH); + assert(lastInLumBuf + 1 - srcSliceY >= 0); + RENAME(hyscale)(c, lumPixBuf[ lumBufIndex ], dstW, s, srcW, lumXInc, + flags, canMMX2BeUsed, hLumFilter, hLumFilterPos, hLumFilterSize, + funnyYCode, c->srcFormat, formatConvBuffer, + c->lumMmx2Filter, c->lumMmx2FilterPos, pal); + lastInLumBuf++; + } + while(lastInChrBuf+1 < (chrSrcSliceY + chrSrcSliceH)) + { + uint8_t *src1= src[1]+(lastInChrBuf + 1 - chrSrcSliceY)*srcStride[1]; + uint8_t *src2= src[2]+(lastInChrBuf + 1 - chrSrcSliceY)*srcStride[2]; + chrBufIndex++; + assert(chrBufIndex < 2*vChrBufSize); + assert(lastInChrBuf + 1 - chrSrcSliceY < chrSrcSliceH); + assert(lastInChrBuf + 1 - chrSrcSliceY >= 0); + + if (!(isGray(srcFormat) || isGray(dstFormat))) + RENAME(hcscale)(c, chrPixBuf[ chrBufIndex ], chrDstW, src1, src2, chrSrcW, chrXInc, + flags, canMMX2BeUsed, hChrFilter, hChrFilterPos, hChrFilterSize, + funnyUVCode, c->srcFormat, formatConvBuffer, + c->chrMmx2Filter, c->chrMmx2FilterPos, pal); + lastInChrBuf++; + } + //wrap buf index around to stay inside the ring buffer + if (lumBufIndex >= vLumBufSize) lumBufIndex-= vLumBufSize; + if (chrBufIndex >= vChrBufSize) chrBufIndex-= vChrBufSize; + break; //we can't output a dstY line so let's try with the next slice + } + +#if HAVE_MMX + c->blueDither= ff_dither8[dstY&1]; + if (c->dstFormat == PIX_FMT_RGB555 || c->dstFormat == PIX_FMT_BGR555) + c->greenDither= ff_dither8[dstY&1]; + else + c->greenDither= ff_dither4[dstY&1]; + c->redDither= ff_dither8[(dstY+1)&1]; +#endif + if (dstY < dstH-2) + { + int16_t **lumSrcPtr= lumPixBuf + lumBufIndex + firstLumSrcY - lastInLumBuf + vLumBufSize; + int16_t **chrSrcPtr= chrPixBuf + chrBufIndex + firstChrSrcY - lastInChrBuf + vChrBufSize; +#if HAVE_MMX + int i; + if (flags & SWS_ACCURATE_RND){ + int s= APCK_SIZE / 8; + for (i=0; i<vLumFilterSize; i+=2){ + *(void**)&lumMmxFilter[s*i ]= lumSrcPtr[i ]; + *(void**)&lumMmxFilter[s*i+APCK_PTR2/4 ]= lumSrcPtr[i+(vLumFilterSize>1)]; + lumMmxFilter[s*i+APCK_COEF/4 ]= + lumMmxFilter[s*i+APCK_COEF/4+1]= vLumFilter[dstY*vLumFilterSize + i ] + + (vLumFilterSize>1 ? vLumFilter[dstY*vLumFilterSize + i + 1]<<16 : 0); + } + for (i=0; i<vChrFilterSize; i+=2){ + *(void**)&chrMmxFilter[s*i ]= chrSrcPtr[i ]; + *(void**)&chrMmxFilter[s*i+APCK_PTR2/4 ]= chrSrcPtr[i+(vChrFilterSize>1)]; + chrMmxFilter[s*i+APCK_COEF/4 ]= + chrMmxFilter[s*i+APCK_COEF/4+1]= vChrFilter[chrDstY*vChrFilterSize + i ] + + (vChrFilterSize>1 ? vChrFilter[chrDstY*vChrFilterSize + i + 1]<<16 : 0); + } + }else{ + for (i=0; i<vLumFilterSize; i++) + { + lumMmxFilter[4*i+0]= (int32_t)lumSrcPtr[i]; + lumMmxFilter[4*i+1]= (uint64_t)lumSrcPtr[i] >> 32; + lumMmxFilter[4*i+2]= + lumMmxFilter[4*i+3]= + ((uint16_t)vLumFilter[dstY*vLumFilterSize + i])*0x10001; + } + for (i=0; i<vChrFilterSize; i++) + { + chrMmxFilter[4*i+0]= (int32_t)chrSrcPtr[i]; + chrMmxFilter[4*i+1]= (uint64_t)chrSrcPtr[i] >> 32; + chrMmxFilter[4*i+2]= + chrMmxFilter[4*i+3]= + ((uint16_t)vChrFilter[chrDstY*vChrFilterSize + i])*0x10001; + } + } +#endif + if (dstFormat == PIX_FMT_NV12 || dstFormat == PIX_FMT_NV21){ + const int chrSkipMask= (1<<c->chrDstVSubSample)-1; + if (dstY&chrSkipMask) uDest= NULL; //FIXME split functions in lumi / chromi + RENAME(yuv2nv12X)(c, + vLumFilter+dstY*vLumFilterSize , lumSrcPtr, vLumFilterSize, + vChrFilter+chrDstY*vChrFilterSize, chrSrcPtr, vChrFilterSize, + dest, uDest, dstW, chrDstW, dstFormat); + } + else if (isPlanarYUV(dstFormat) || dstFormat==PIX_FMT_GRAY8) //YV12 like + { + const int chrSkipMask= (1<<c->chrDstVSubSample)-1; + if ((dstY&chrSkipMask) || isGray(dstFormat)) uDest=vDest= NULL; //FIXME split functions in lumi / chromi + if (vLumFilterSize == 1 && vChrFilterSize == 1) // unscaled YV12 + { + int16_t *lumBuf = lumPixBuf[0]; + int16_t *chrBuf= chrPixBuf[0]; + RENAME(yuv2yuv1)(c, lumBuf, chrBuf, dest, uDest, vDest, dstW, chrDstW); + } + else //General YV12 + { + RENAME(yuv2yuvX)(c, + vLumFilter+dstY*vLumFilterSize , lumSrcPtr, vLumFilterSize, + vChrFilter+chrDstY*vChrFilterSize, chrSrcPtr, vChrFilterSize, + dest, uDest, vDest, dstW, chrDstW); + } + } + else + { + assert(lumSrcPtr + vLumFilterSize - 1 < lumPixBuf + vLumBufSize*2); + assert(chrSrcPtr + vChrFilterSize - 1 < chrPixBuf + vChrBufSize*2); + if (vLumFilterSize == 1 && vChrFilterSize == 2) //unscaled RGB + { + int chrAlpha= vChrFilter[2*dstY+1]; + if(flags & SWS_FULL_CHR_H_INT){ + yuv2rgbXinC_full(c, //FIXME write a packed1_full function + vLumFilter+dstY*vLumFilterSize, lumSrcPtr, vLumFilterSize, + vChrFilter+dstY*vChrFilterSize, chrSrcPtr, vChrFilterSize, + dest, dstW, dstY); + }else{ + RENAME(yuv2packed1)(c, *lumSrcPtr, *chrSrcPtr, *(chrSrcPtr+1), + dest, dstW, chrAlpha, dstFormat, flags, dstY); + } + } + else if (vLumFilterSize == 2 && vChrFilterSize == 2) //bilinear upscale RGB + { + int lumAlpha= vLumFilter[2*dstY+1]; + int chrAlpha= vChrFilter[2*dstY+1]; + lumMmxFilter[2]= + lumMmxFilter[3]= vLumFilter[2*dstY ]*0x10001; + chrMmxFilter[2]= + chrMmxFilter[3]= vChrFilter[2*chrDstY]*0x10001; + if(flags & SWS_FULL_CHR_H_INT){ + yuv2rgbXinC_full(c, //FIXME write a packed2_full function + vLumFilter+dstY*vLumFilterSize, lumSrcPtr, vLumFilterSize, + vChrFilter+dstY*vChrFilterSize, chrSrcPtr, vChrFilterSize, + dest, dstW, dstY); + }else{ + RENAME(yuv2packed2)(c, *lumSrcPtr, *(lumSrcPtr+1), *chrSrcPtr, *(chrSrcPtr+1), + dest, dstW, lumAlpha, chrAlpha, dstY); + } + } + else //general RGB + { + if(flags & SWS_FULL_CHR_H_INT){ + yuv2rgbXinC_full(c, + vLumFilter+dstY*vLumFilterSize, lumSrcPtr, vLumFilterSize, + vChrFilter+dstY*vChrFilterSize, chrSrcPtr, vChrFilterSize, + dest, dstW, dstY); + }else{ + RENAME(yuv2packedX)(c, + vLumFilter+dstY*vLumFilterSize, lumSrcPtr, vLumFilterSize, + vChrFilter+dstY*vChrFilterSize, chrSrcPtr, vChrFilterSize, + dest, dstW, dstY); + } + } + } + } + else // hmm looks like we can't use MMX here without overwriting this array's tail + { + int16_t **lumSrcPtr= lumPixBuf + lumBufIndex + firstLumSrcY - lastInLumBuf + vLumBufSize; + int16_t **chrSrcPtr= chrPixBuf + chrBufIndex + firstChrSrcY - lastInChrBuf + vChrBufSize; + if (dstFormat == PIX_FMT_NV12 || dstFormat == PIX_FMT_NV21){ + const int chrSkipMask= (1<<c->chrDstVSubSample)-1; + if (dstY&chrSkipMask) uDest= NULL; //FIXME split functions in lumi / chromi + yuv2nv12XinC( + vLumFilter+dstY*vLumFilterSize , lumSrcPtr, vLumFilterSize, + vChrFilter+chrDstY*vChrFilterSize, chrSrcPtr, vChrFilterSize, + dest, uDest, dstW, chrDstW, dstFormat); + } + else if (isPlanarYUV(dstFormat) || dstFormat==PIX_FMT_GRAY8) //YV12 + { + const int chrSkipMask= (1<<c->chrDstVSubSample)-1; + if ((dstY&chrSkipMask) || isGray(dstFormat)) uDest=vDest= NULL; //FIXME split functions in lumi / chromi + yuv2yuvXinC( + vLumFilter+dstY*vLumFilterSize , lumSrcPtr, vLumFilterSize, + vChrFilter+chrDstY*vChrFilterSize, chrSrcPtr, vChrFilterSize, + dest, uDest, vDest, dstW, chrDstW); + } + else + { + assert(lumSrcPtr + vLumFilterSize - 1 < lumPixBuf + vLumBufSize*2); + assert(chrSrcPtr + vChrFilterSize - 1 < chrPixBuf + vChrBufSize*2); + if(flags & SWS_FULL_CHR_H_INT){ + yuv2rgbXinC_full(c, + vLumFilter+dstY*vLumFilterSize, lumSrcPtr, vLumFilterSize, + vChrFilter+dstY*vChrFilterSize, chrSrcPtr, vChrFilterSize, + dest, dstW, dstY); + }else{ + yuv2packedXinC(c, + vLumFilter+dstY*vLumFilterSize, lumSrcPtr, vLumFilterSize, + vChrFilter+dstY*vChrFilterSize, chrSrcPtr, vChrFilterSize, + dest, dstW, dstY); + } + } + } + } + +#if HAVE_MMX + __asm__ volatile(SFENCE:::"memory"); + __asm__ volatile(EMMS:::"memory"); +#endif + /* store changed local vars back in the context */ + c->dstY= dstY; + c->lumBufIndex= lumBufIndex; + c->chrBufIndex= chrBufIndex; + c->lastInLumBuf= lastInLumBuf; + c->lastInChrBuf= lastInChrBuf; + + return dstY - lastDstY; +} diff --git a/libswscale/yuv2rgb.c b/libswscale/yuv2rgb.c new file mode 100644 index 0000000000..65af412c2c --- /dev/null +++ b/libswscale/yuv2rgb.c @@ -0,0 +1,684 @@ +/* + * software YUV to RGB converter + * + * Copyright (C) 2009 Konstantin Shishkov + * + * MMX/MMX2 template stuff (needed for fast movntq support), + * 1,4,8bpp support and context / deglobalize stuff + * by Michael Niedermayer (michaelni@gmx.at) + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#include <stdio.h> +#include <stdlib.h> +#include <inttypes.h> +#include <assert.h> + +#include "config.h" +#include "rgb2rgb.h" +#include "swscale.h" +#include "swscale_internal.h" + +#define DITHER1XBPP // only for MMX + +extern const uint8_t dither_8x8_32[8][8]; +extern const uint8_t dither_8x8_73[8][8]; +extern const uint8_t dither_8x8_220[8][8]; + +#if HAVE_MMX && CONFIG_GPL + +/* hope these constant values are cache line aligned */ +DECLARE_ASM_CONST(8, uint64_t, mmx_00ffw) = 0x00ff00ff00ff00ffULL; +DECLARE_ASM_CONST(8, uint64_t, mmx_redmask) = 0xf8f8f8f8f8f8f8f8ULL; +DECLARE_ASM_CONST(8, uint64_t, mmx_grnmask) = 0xfcfcfcfcfcfcfcfcULL; + +//MMX versions +#undef RENAME +#undef HAVE_MMX2 +#undef HAVE_AMD3DNOW +#define HAVE_MMX2 0 +#define HAVE_AMD3DNOW 0 +#define RENAME(a) a ## _MMX +#include "yuv2rgb_template.c" + +//MMX2 versions +#undef RENAME +#undef HAVE_MMX2 +#define HAVE_MMX2 1 +#define RENAME(a) a ## _MMX2 +#include "yuv2rgb_template.c" + +#endif /* HAVE_MMX && CONFIG_GPL */ + +const int32_t ff_yuv2rgb_coeffs[8][4] = { + {117504, 138453, 13954, 34903}, /* no sequence_display_extension */ + {117504, 138453, 13954, 34903}, /* ITU-R Rec. 709 (1990) */ + {104597, 132201, 25675, 53279}, /* unspecified */ + {104597, 132201, 25675, 53279}, /* reserved */ + {104448, 132798, 24759, 53109}, /* FCC */ + {104597, 132201, 25675, 53279}, /* ITU-R Rec. 624-4 System B, G */ + {104597, 132201, 25675, 53279}, /* SMPTE 170M */ + {117579, 136230, 16907, 35559} /* SMPTE 240M (1987) */ +}; + +#define LOADCHROMA(i) \ + U = pu[i]; \ + V = pv[i]; \ + r = (void *)c->table_rV[V]; \ + g = (void *)(c->table_gU[U] + c->table_gV[V]); \ + b = (void *)c->table_bU[U]; + +#define PUTRGB(dst,src,i,o) \ + Y = src[2*i+o]; \ + dst[2*i ] = r[Y] + g[Y] + b[Y]; \ + Y = src[2*i+1-o]; \ + dst[2*i+1] = r[Y] + g[Y] + b[Y]; + +#define PUTRGB24(dst,src,i) \ + Y = src[2*i]; \ + dst[6*i+0] = r[Y]; dst[6*i+1] = g[Y]; dst[6*i+2] = b[Y]; \ + Y = src[2*i+1]; \ + dst[6*i+3] = r[Y]; dst[6*i+4] = g[Y]; dst[6*i+5] = b[Y]; + +#define PUTBGR24(dst,src,i) \ + Y = src[2*i]; \ + dst[6*i+0] = b[Y]; dst[6*i+1] = g[Y]; dst[6*i+2] = r[Y]; \ + Y = src[2*i+1]; \ + dst[6*i+3] = b[Y]; dst[6*i+4] = g[Y]; dst[6*i+5] = r[Y]; + +#define YUV2RGBFUNC(func_name, dst_type) \ +static int func_name(SwsContext *c, uint8_t* src[], int srcStride[], int srcSliceY, \ + int srcSliceH, uint8_t* dst[], int dstStride[]){\ + int y;\ +\ + if (c->srcFormat == PIX_FMT_YUV422P) {\ + srcStride[1] *= 2;\ + srcStride[2] *= 2;\ + }\ + for (y=0; y<srcSliceH; y+=2) {\ + dst_type *dst_1 = (dst_type*)(dst[0] + (y+srcSliceY )*dstStride[0]);\ + dst_type *dst_2 = (dst_type*)(dst[0] + (y+srcSliceY+1)*dstStride[0]);\ + dst_type av_unused *r, *b;\ + dst_type *g;\ + uint8_t *py_1 = src[0] + y*srcStride[0];\ + uint8_t *py_2 = py_1 + srcStride[0];\ + uint8_t *pu = src[1] + (y>>1)*srcStride[1];\ + uint8_t *pv = src[2] + (y>>1)*srcStride[2];\ + unsigned int h_size = c->dstW>>3;\ + while (h_size--) {\ + int av_unused U, V;\ + int Y;\ + +#define ENDYUV2RGBLINE(dst_delta)\ + pu += 4;\ + pv += 4;\ + py_1 += 8;\ + py_2 += 8;\ + dst_1 += dst_delta;\ + dst_2 += dst_delta;\ + }\ + if (c->dstW & 4) {\ + int av_unused Y, U, V;\ + +#define ENDYUV2RGBFUNC()\ + }\ + }\ + return srcSliceH;\ +} + +#define CLOSEYUV2RGBFUNC(dst_delta)\ + ENDYUV2RGBLINE(dst_delta)\ + ENDYUV2RGBFUNC() + +YUV2RGBFUNC(yuv2rgb_c_32, uint32_t) + LOADCHROMA(0); + PUTRGB(dst_1,py_1,0,0); + PUTRGB(dst_2,py_2,0,1); + + LOADCHROMA(1); + PUTRGB(dst_2,py_2,1,1); + PUTRGB(dst_1,py_1,1,0); + LOADCHROMA(1); + PUTRGB(dst_2,py_2,1,1); + PUTRGB(dst_1,py_1,1,0); + + LOADCHROMA(2); + PUTRGB(dst_1,py_1,2,0); + PUTRGB(dst_2,py_2,2,1); + + LOADCHROMA(3); + PUTRGB(dst_2,py_2,3,1); + PUTRGB(dst_1,py_1,3,0); +ENDYUV2RGBLINE(8) + LOADCHROMA(0); + PUTRGB(dst_1,py_1,0,0); + PUTRGB(dst_2,py_2,0,1); + + LOADCHROMA(1); + PUTRGB(dst_2,py_2,1,1); + PUTRGB(dst_1,py_1,1,0); +ENDYUV2RGBFUNC() + +YUV2RGBFUNC(yuv2rgb_c_24_rgb, uint8_t) + LOADCHROMA(0); + PUTRGB24(dst_1,py_1,0); + PUTRGB24(dst_2,py_2,0); + + LOADCHROMA(1); + PUTRGB24(dst_2,py_2,1); + PUTRGB24(dst_1,py_1,1); + + LOADCHROMA(2); + PUTRGB24(dst_1,py_1,2); + PUTRGB24(dst_2,py_2,2); + + LOADCHROMA(3); + PUTRGB24(dst_2,py_2,3); + PUTRGB24(dst_1,py_1,3); +ENDYUV2RGBLINE(24) + LOADCHROMA(0); + PUTRGB24(dst_1,py_1,0); + PUTRGB24(dst_2,py_2,0); + + LOADCHROMA(1); + PUTRGB24(dst_2,py_2,1); + PUTRGB24(dst_1,py_1,1); +ENDYUV2RGBFUNC() + +// only trivial mods from yuv2rgb_c_24_rgb +YUV2RGBFUNC(yuv2rgb_c_24_bgr, uint8_t) + LOADCHROMA(0); + PUTBGR24(dst_1,py_1,0); + PUTBGR24(dst_2,py_2,0); + + LOADCHROMA(1); + PUTBGR24(dst_2,py_2,1); + PUTBGR24(dst_1,py_1,1); + + LOADCHROMA(2); + PUTBGR24(dst_1,py_1,2); + PUTBGR24(dst_2,py_2,2); + + LOADCHROMA(3); + PUTBGR24(dst_2,py_2,3); + PUTBGR24(dst_1,py_1,3); +ENDYUV2RGBLINE(24) + LOADCHROMA(0); + PUTBGR24(dst_1,py_1,0); + PUTBGR24(dst_2,py_2,0); + + LOADCHROMA(1); + PUTBGR24(dst_2,py_2,1); + PUTBGR24(dst_1,py_1,1); +ENDYUV2RGBFUNC() + +// This is exactly the same code as yuv2rgb_c_32 except for the types of +// r, g, b, dst_1, dst_2 +YUV2RGBFUNC(yuv2rgb_c_16, uint16_t) + LOADCHROMA(0); + PUTRGB(dst_1,py_1,0,0); + PUTRGB(dst_2,py_2,0,1); + + LOADCHROMA(1); + PUTRGB(dst_2,py_2,1,1); + PUTRGB(dst_1,py_1,1,0); + + LOADCHROMA(2); + PUTRGB(dst_1,py_1,2,0); + PUTRGB(dst_2,py_2,2,1); + + LOADCHROMA(3); + PUTRGB(dst_2,py_2,3,1); + PUTRGB(dst_1,py_1,3,0); +CLOSEYUV2RGBFUNC(8) + +// This is exactly the same code as yuv2rgb_c_32 except for the types of +// r, g, b, dst_1, dst_2 +YUV2RGBFUNC(yuv2rgb_c_8, uint8_t) + LOADCHROMA(0); + PUTRGB(dst_1,py_1,0,0); + PUTRGB(dst_2,py_2,0,1); + + LOADCHROMA(1); + PUTRGB(dst_2,py_2,1,1); + PUTRGB(dst_1,py_1,1,0); + + LOADCHROMA(2); + PUTRGB(dst_1,py_1,2,0); + PUTRGB(dst_2,py_2,2,1); + + LOADCHROMA(3); + PUTRGB(dst_2,py_2,3,1); + PUTRGB(dst_1,py_1,3,0); +CLOSEYUV2RGBFUNC(8) + +// r, g, b, dst_1, dst_2 +YUV2RGBFUNC(yuv2rgb_c_8_ordered_dither, uint8_t) + const uint8_t *d32 = dither_8x8_32[y&7]; + const uint8_t *d64 = dither_8x8_73[y&7]; +#define PUTRGB8(dst,src,i,o) \ + Y = src[2*i]; \ + dst[2*i] = r[Y+d32[0+o]] + g[Y+d32[0+o]] + b[Y+d64[0+o]]; \ + Y = src[2*i+1]; \ + dst[2*i+1] = r[Y+d32[1+o]] + g[Y+d32[1+o]] + b[Y+d64[1+o]]; + + LOADCHROMA(0); + PUTRGB8(dst_1,py_1,0,0); + PUTRGB8(dst_2,py_2,0,0+8); + + LOADCHROMA(1); + PUTRGB8(dst_2,py_2,1,2+8); + PUTRGB8(dst_1,py_1,1,2); + + LOADCHROMA(2); + PUTRGB8(dst_1,py_1,2,4); + PUTRGB8(dst_2,py_2,2,4+8); + + LOADCHROMA(3); + PUTRGB8(dst_2,py_2,3,6+8); + PUTRGB8(dst_1,py_1,3,6); +CLOSEYUV2RGBFUNC(8) + + +// This is exactly the same code as yuv2rgb_c_32 except for the types of +// r, g, b, dst_1, dst_2 +YUV2RGBFUNC(yuv2rgb_c_4, uint8_t) + int acc; +#define PUTRGB4(dst,src,i) \ + Y = src[2*i]; \ + acc = r[Y] + g[Y] + b[Y]; \ + Y = src[2*i+1]; \ + acc |= (r[Y] + g[Y] + b[Y])<<4; \ + dst[i] = acc; + + LOADCHROMA(0); + PUTRGB4(dst_1,py_1,0); + PUTRGB4(dst_2,py_2,0); + + LOADCHROMA(1); + PUTRGB4(dst_2,py_2,1); + PUTRGB4(dst_1,py_1,1); + + LOADCHROMA(2); + PUTRGB4(dst_1,py_1,2); + PUTRGB4(dst_2,py_2,2); + + LOADCHROMA(3); + PUTRGB4(dst_2,py_2,3); + PUTRGB4(dst_1,py_1,3); +CLOSEYUV2RGBFUNC(4) + +YUV2RGBFUNC(yuv2rgb_c_4_ordered_dither, uint8_t) + const uint8_t *d64 = dither_8x8_73[y&7]; + const uint8_t *d128 = dither_8x8_220[y&7]; + int acc; + +#define PUTRGB4D(dst,src,i,o) \ + Y = src[2*i]; \ + acc = r[Y+d128[0+o]] + g[Y+d64[0+o]] + b[Y+d128[0+o]]; \ + Y = src[2*i+1]; \ + acc |= (r[Y+d128[1+o]] + g[Y+d64[1+o]] + b[Y+d128[1+o]])<<4; \ + dst[i]= acc; + + LOADCHROMA(0); + PUTRGB4D(dst_1,py_1,0,0); + PUTRGB4D(dst_2,py_2,0,0+8); + + LOADCHROMA(1); + PUTRGB4D(dst_2,py_2,1,2+8); + PUTRGB4D(dst_1,py_1,1,2); + + LOADCHROMA(2); + PUTRGB4D(dst_1,py_1,2,4); + PUTRGB4D(dst_2,py_2,2,4+8); + + LOADCHROMA(3); + PUTRGB4D(dst_2,py_2,3,6+8); + PUTRGB4D(dst_1,py_1,3,6); +CLOSEYUV2RGBFUNC(4) + +// This is exactly the same code as yuv2rgb_c_32 except for the types of +// r, g, b, dst_1, dst_2 +YUV2RGBFUNC(yuv2rgb_c_4b, uint8_t) + LOADCHROMA(0); + PUTRGB(dst_1,py_1,0,0); + PUTRGB(dst_2,py_2,0,1); + + LOADCHROMA(1); + PUTRGB(dst_2,py_2,1,1); + PUTRGB(dst_1,py_1,1,0); + + LOADCHROMA(2); + PUTRGB(dst_1,py_1,2,0); + PUTRGB(dst_2,py_2,2,1); + + LOADCHROMA(3); + PUTRGB(dst_2,py_2,3,1); + PUTRGB(dst_1,py_1,3,0); +CLOSEYUV2RGBFUNC(8) + +YUV2RGBFUNC(yuv2rgb_c_4b_ordered_dither, uint8_t) + const uint8_t *d64 = dither_8x8_73[y&7]; + const uint8_t *d128 = dither_8x8_220[y&7]; + +#define PUTRGB4DB(dst,src,i,o) \ + Y = src[2*i]; \ + dst[2*i] = r[Y+d128[0+o]] + g[Y+d64[0+o]] + b[Y+d128[0+o]]; \ + Y = src[2*i+1]; \ + dst[2*i+1] = r[Y+d128[1+o]] + g[Y+d64[1+o]] + b[Y+d128[1+o]]; + + LOADCHROMA(0); + PUTRGB4DB(dst_1,py_1,0,0); + PUTRGB4DB(dst_2,py_2,0,0+8); + + LOADCHROMA(1); + PUTRGB4DB(dst_2,py_2,1,2+8); + PUTRGB4DB(dst_1,py_1,1,2); + + LOADCHROMA(2); + PUTRGB4DB(dst_1,py_1,2,4); + PUTRGB4DB(dst_2,py_2,2,4+8); + + LOADCHROMA(3); + PUTRGB4DB(dst_2,py_2,3,6+8); + PUTRGB4DB(dst_1,py_1,3,6); +CLOSEYUV2RGBFUNC(8) + +YUV2RGBFUNC(yuv2rgb_c_1_ordered_dither, uint8_t) + const uint8_t *d128 = dither_8x8_220[y&7]; + char out_1 = 0, out_2 = 0; + g= c->table_gU[128] + c->table_gV[128]; + +#define PUTRGB1(out,src,i,o) \ + Y = src[2*i]; \ + out+= out + g[Y+d128[0+o]]; \ + Y = src[2*i+1]; \ + out+= out + g[Y+d128[1+o]]; + + PUTRGB1(out_1,py_1,0,0); + PUTRGB1(out_2,py_2,0,0+8); + + PUTRGB1(out_2,py_2,1,2+8); + PUTRGB1(out_1,py_1,1,2); + + PUTRGB1(out_1,py_1,2,4); + PUTRGB1(out_2,py_2,2,4+8); + + PUTRGB1(out_2,py_2,3,6+8); + PUTRGB1(out_1,py_1,3,6); + + dst_1[0]= out_1; + dst_2[0]= out_2; +CLOSEYUV2RGBFUNC(1) + +SwsFunc sws_yuv2rgb_get_func_ptr(SwsContext *c) +{ + SwsFunc t = NULL; +#if (HAVE_MMX2 || HAVE_MMX) && CONFIG_GPL + if (c->flags & SWS_CPU_CAPS_MMX2) { + switch (c->dstFormat) { + case PIX_FMT_RGB32: return yuv420_rgb32_MMX2; + case PIX_FMT_BGR24: return yuv420_rgb24_MMX2; + case PIX_FMT_RGB565: return yuv420_rgb16_MMX2; + case PIX_FMT_RGB555: return yuv420_rgb15_MMX2; + } + } + if (c->flags & SWS_CPU_CAPS_MMX) { + switch (c->dstFormat) { + case PIX_FMT_RGB32: return yuv420_rgb32_MMX; + case PIX_FMT_BGR24: return yuv420_rgb24_MMX; + case PIX_FMT_RGB565: return yuv420_rgb16_MMX; + case PIX_FMT_RGB555: return yuv420_rgb15_MMX; + } + } +#endif +#if HAVE_VIS + t = sws_yuv2rgb_init_vis(c); +#endif +#if CONFIG_MLIB + t = sws_yuv2rgb_init_mlib(c); +#endif +#if HAVE_ALTIVEC && CONFIG_GPL + if (c->flags & SWS_CPU_CAPS_ALTIVEC) + t = sws_yuv2rgb_init_altivec(c); +#endif + +#if ARCH_BFIN + if (c->flags & SWS_CPU_CAPS_BFIN) + t = sws_ff_bfin_yuv2rgb_get_func_ptr(c); +#endif + + if (t) + return t; + + av_log(c, AV_LOG_WARNING, "No accelerated colorspace conversion found.\n"); + + switch (c->dstFormat) { + case PIX_FMT_BGR32_1: + case PIX_FMT_RGB32_1: + case PIX_FMT_BGR32: + case PIX_FMT_RGB32: return yuv2rgb_c_32; + case PIX_FMT_RGB24: return yuv2rgb_c_24_rgb; + case PIX_FMT_BGR24: return yuv2rgb_c_24_bgr; + case PIX_FMT_RGB565: + case PIX_FMT_BGR565: + case PIX_FMT_RGB555: + case PIX_FMT_BGR555: return yuv2rgb_c_16; + case PIX_FMT_RGB8: + case PIX_FMT_BGR8: return yuv2rgb_c_8_ordered_dither; + case PIX_FMT_RGB4: + case PIX_FMT_BGR4: return yuv2rgb_c_4_ordered_dither; + case PIX_FMT_RGB4_BYTE: + case PIX_FMT_BGR4_BYTE: return yuv2rgb_c_4b_ordered_dither; + case PIX_FMT_MONOBLACK: return yuv2rgb_c_1_ordered_dither; + default: + assert(0); + } + return NULL; +} + +static void fill_table(uint8_t* table[256], const int elemsize, const int inc, uint8_t *y_table) +{ + int i; + int64_t cb = 0; + + y_table -= elemsize * (inc >> 9); + + for (i = 0; i < 256; i++) { + table[i] = y_table + elemsize * (cb >> 16); + cb += inc; + } +} + +static void fill_gv_table(int table[256], const int elemsize, const int inc) +{ + int i; + int64_t cb = 0; + int off = -(inc >> 9); + + for (i = 0; i < 256; i++) { + table[i] = elemsize * (off + (cb >> 16)); + cb += inc; + } +} + +av_cold int sws_yuv2rgb_c_init_tables(SwsContext *c, const int inv_table[4], int fullRange, + int brightness, int contrast, int saturation) +{ + const int isRgb = c->dstFormat==PIX_FMT_RGB32 + || c->dstFormat==PIX_FMT_RGB32_1 + || c->dstFormat==PIX_FMT_BGR24 + || c->dstFormat==PIX_FMT_RGB565 + || c->dstFormat==PIX_FMT_RGB555 + || c->dstFormat==PIX_FMT_RGB8 + || c->dstFormat==PIX_FMT_RGB4 + || c->dstFormat==PIX_FMT_RGB4_BYTE + || c->dstFormat==PIX_FMT_MONOBLACK; + const int bpp = fmt_depth(c->dstFormat); + uint8_t *y_table; + uint16_t *y_table16; + uint32_t *y_table32; + int i, base, rbase, gbase, bbase, abase; + const int yoffs = fullRange ? 384 : 326; + + int64_t crv = inv_table[0]; + int64_t cbu = inv_table[1]; + int64_t cgu = -inv_table[2]; + int64_t cgv = -inv_table[3]; + int64_t cy = 1<<16; + int64_t oy = 0; + + int64_t yb = 0; + + if (!fullRange) { + cy = (cy*255) / 219; + oy = 16<<16; + } else { + crv = (crv*224) / 255; + cbu = (cbu*224) / 255; + cgu = (cgu*224) / 255; + cgv = (cgv*224) / 255; + } + + cy = (cy *contrast ) >> 16; + crv = (crv*contrast * saturation) >> 32; + cbu = (cbu*contrast * saturation) >> 32; + cgu = (cgu*contrast * saturation) >> 32; + cgv = (cgv*contrast * saturation) >> 32; + oy -= 256*brightness; + + //scale coefficients by cy + crv = ((crv << 16) + 0x8000) / cy; + cbu = ((cbu << 16) + 0x8000) / cy; + cgu = ((cgu << 16) + 0x8000) / cy; + cgv = ((cgv << 16) + 0x8000) / cy; + + av_free(c->yuvTable); + + switch (bpp) { + case 1: + c->yuvTable = av_malloc(1024); + y_table = c->yuvTable; + yb = -(384<<16) - oy; + for (i = 0; i < 1024-110; i++) { + y_table[i+110] = av_clip_uint8((yb + 0x8000) >> 16) >> 7; + yb += cy; + } + fill_table(c->table_gU, 1, cgu, y_table + yoffs); + fill_gv_table(c->table_gV, 1, cgv); + break; + case 4: + case 4|128: + rbase = isRgb ? 3 : 0; + gbase = 1; + bbase = isRgb ? 0 : 3; + c->yuvTable = av_malloc(1024*3); + y_table = c->yuvTable; + yb = -(384<<16) - oy; + for (i = 0; i < 1024-110; i++) { + int yval = av_clip_uint8((yb + 0x8000) >> 16); + y_table[i+110 ] = (yval >> 7) << rbase; + y_table[i+ 37+1024] = ((yval + 43) / 85) << gbase; + y_table[i+110+2048] = (yval >> 7) << bbase; + yb += cy; + } + fill_table(c->table_rV, 1, crv, y_table + yoffs); + fill_table(c->table_gU, 1, cgu, y_table + yoffs + 1024); + fill_table(c->table_bU, 1, cbu, y_table + yoffs + 2048); + fill_gv_table(c->table_gV, 1, cgv); + break; + case 8: + rbase = isRgb ? 5 : 0; + gbase = isRgb ? 2 : 3; + bbase = isRgb ? 0 : 6; + c->yuvTable = av_malloc(1024*3); + y_table = c->yuvTable; + yb = -(384<<16) - oy; + for (i = 0; i < 1024-38; i++) { + int yval = av_clip_uint8((yb + 0x8000) >> 16); + y_table[i+16 ] = ((yval + 18) / 36) << rbase; + y_table[i+16+1024] = ((yval + 18) / 36) << gbase; + y_table[i+37+2048] = ((yval + 43) / 85) << bbase; + yb += cy; + } + fill_table(c->table_rV, 1, crv, y_table + yoffs); + fill_table(c->table_gU, 1, cgu, y_table + yoffs + 1024); + fill_table(c->table_bU, 1, cbu, y_table + yoffs + 2048); + fill_gv_table(c->table_gV, 1, cgv); + break; + case 15: + case 16: + rbase = isRgb ? bpp - 5 : 0; + gbase = 5; + bbase = isRgb ? 0 : (bpp - 5); + c->yuvTable = av_malloc(1024*3*2); + y_table16 = c->yuvTable; + yb = -(384<<16) - oy; + for (i = 0; i < 1024; i++) { + uint8_t yval = av_clip_uint8((yb + 0x8000) >> 16); + y_table16[i ] = (yval >> 3) << rbase; + y_table16[i+1024] = (yval >> (18 - bpp)) << gbase; + y_table16[i+2048] = (yval >> 3) << bbase; + yb += cy; + } + fill_table(c->table_rV, 2, crv, y_table16 + yoffs); + fill_table(c->table_gU, 2, cgu, y_table16 + yoffs + 1024); + fill_table(c->table_bU, 2, cbu, y_table16 + yoffs + 2048); + fill_gv_table(c->table_gV, 2, cgv); + break; + case 24: + c->yuvTable = av_malloc(1024); + y_table = c->yuvTable; + yb = -(384<<16) - oy; + for (i = 0; i < 1024; i++) { + y_table[i] = av_clip_uint8((yb + 0x8000) >> 16); + yb += cy; + } + fill_table(c->table_rV, 1, crv, y_table + yoffs); + fill_table(c->table_gU, 1, cgu, y_table + yoffs); + fill_table(c->table_bU, 1, cbu, y_table + yoffs); + fill_gv_table(c->table_gV, 1, cgv); + break; + case 32: + base = (c->dstFormat == PIX_FMT_RGB32_1 || c->dstFormat == PIX_FMT_BGR32_1) ? 8 : 0; + rbase = base + (isRgb ? 16 : 0); + gbase = base + 8; + bbase = base + (isRgb ? 0 : 16); + abase = (base + 24) & 31; + c->yuvTable = av_malloc(1024*3*4); + y_table32 = c->yuvTable; + yb = -(384<<16) - oy; + for (i = 0; i < 1024; i++) { + uint8_t yval = av_clip_uint8((yb + 0x8000) >> 16); + y_table32[i ] = (yval << rbase) + (255 << abase); + y_table32[i+1024] = yval << gbase; + y_table32[i+2048] = yval << bbase; + yb += cy; + } + fill_table(c->table_rV, 4, crv, y_table32 + yoffs); + fill_table(c->table_gU, 4, cgu, y_table32 + yoffs + 1024); + fill_table(c->table_bU, 4, cbu, y_table32 + yoffs + 2048); + fill_gv_table(c->table_gV, 4, cgv); + break; + default: + c->yuvTable = NULL; + av_log(c, AV_LOG_ERROR, "%ibpp not supported by yuv2rgb\n", bpp); + return -1; + } + return 0; +} diff --git a/libswscale/yuv2rgb_altivec.c b/libswscale/yuv2rgb_altivec.c new file mode 100644 index 0000000000..b3a87a0360 --- /dev/null +++ b/libswscale/yuv2rgb_altivec.c @@ -0,0 +1,962 @@ +/* + * AltiVec acceleration for colorspace conversion + * + * copyright (C) 2004 Marc Hoffman <marc.hoffman@analog.com> + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/* +Convert I420 YV12 to RGB in various formats, + it rejects images that are not in 420 formats, + it rejects images that don't have widths of multiples of 16, + it rejects images that don't have heights of multiples of 2. +Reject defers to C simulation code. + +Lots of optimizations to be done here. + +1. Need to fix saturation code. I just couldn't get it to fly with packs + and adds, so we currently use max/min to clip. + +2. The inefficient use of chroma loading needs a bit of brushing up. + +3. Analysis of pipeline stalls needs to be done. Use shark to identify + pipeline stalls. + + +MODIFIED to calculate coeffs from currently selected color space. +MODIFIED core to be a macro where you specify the output format. +ADDED UYVY conversion which is never called due to some thing in swscale. +CORRECTED algorithim selection to be strict on input formats. +ADDED runtime detection of AltiVec. + +ADDED altivec_yuv2packedX vertical scl + RGB converter + +March 27,2004 +PERFORMANCE ANALYSIS + +The C version uses 25% of the processor or ~250Mips for D1 video rawvideo +used as test. +The AltiVec version uses 10% of the processor or ~100Mips for D1 video +same sequence. + +720 * 480 * 30 ~10MPS + +so we have roughly 10 clocks per pixel. This is too high, something has +to be wrong. + +OPTIMIZED clip codes to utilize vec_max and vec_packs removing the +need for vec_min. + +OPTIMIZED DST OUTPUT cache/DMA controls. We are pretty much guaranteed to have +the input video frame, it was just decompressed so it probably resides in L1 +caches. However, we are creating the output video stream. This needs to use the +DSTST instruction to optimize for the cache. We couple this with the fact that +we are not going to be visiting the input buffer again so we mark it Least +Recently Used. This shaves 25% of the processor cycles off. + +Now memcpy is the largest mips consumer in the system, probably due +to the inefficient X11 stuff. + +GL libraries seem to be very slow on this machine 1.33Ghz PB running +Jaguar, this is not the case for my 1Ghz PB. I thought it might be +a versioning issue, however I have libGL.1.2.dylib for both +machines. (We need to figure this out now.) + +GL2 libraries work now with patch for RGB32. + +NOTE: quartz vo driver ARGB32_to_RGB24 consumes 30% of the processor. + +Integrated luma prescaling adjustment for saturation/contrast/brightness +adjustment. +*/ + +#include <stdio.h> +#include <stdlib.h> +#include <string.h> +#include <inttypes.h> +#include <assert.h> +#include "config.h" +#include "rgb2rgb.h" +#include "swscale.h" +#include "swscale_internal.h" + +#undef PROFILE_THE_BEAST +#undef INC_SCALING + +typedef unsigned char ubyte; +typedef signed char sbyte; + + +/* RGB interleaver, 16 planar pels 8-bit samples per channel in + homogeneous vector registers x0,x1,x2 are interleaved with the + following technique: + + o0 = vec_mergeh (x0,x1); + o1 = vec_perm (o0, x2, perm_rgb_0); + o2 = vec_perm (o0, x2, perm_rgb_1); + o3 = vec_mergel (x0,x1); + o4 = vec_perm (o3,o2,perm_rgb_2); + o5 = vec_perm (o3,o2,perm_rgb_3); + + perm_rgb_0: o0(RG).h v1(B) --> o1* + 0 1 2 3 4 + rgbr|gbrg|brgb|rgbr + 0010 0100 1001 0010 + 0102 3145 2673 894A + + perm_rgb_1: o0(RG).h v1(B) --> o2 + 0 1 2 3 4 + gbrg|brgb|bbbb|bbbb + 0100 1001 1111 1111 + B5CD 6EF7 89AB CDEF + + perm_rgb_2: o3(RG).l o2(rgbB.l) --> o4* + 0 1 2 3 4 + gbrg|brgb|rgbr|gbrg + 1111 1111 0010 0100 + 89AB CDEF 0182 3945 + + perm_rgb_2: o3(RG).l o2(rgbB.l) ---> o5* + 0 1 2 3 4 + brgb|rgbr|gbrg|brgb + 1001 0010 0100 1001 + a67b 89cA BdCD eEFf + +*/ +static +const vector unsigned char + perm_rgb_0 = {0x00,0x01,0x10,0x02,0x03,0x11,0x04,0x05, + 0x12,0x06,0x07,0x13,0x08,0x09,0x14,0x0a}, + perm_rgb_1 = {0x0b,0x15,0x0c,0x0d,0x16,0x0e,0x0f,0x17, + 0x18,0x19,0x1a,0x1b,0x1c,0x1d,0x1e,0x1f}, + perm_rgb_2 = {0x10,0x11,0x12,0x13,0x14,0x15,0x16,0x17, + 0x00,0x01,0x18,0x02,0x03,0x19,0x04,0x05}, + perm_rgb_3 = {0x1a,0x06,0x07,0x1b,0x08,0x09,0x1c,0x0a, + 0x0b,0x1d,0x0c,0x0d,0x1e,0x0e,0x0f,0x1f}; + +#define vec_merge3(x2,x1,x0,y0,y1,y2) \ +do { \ + __typeof__(x0) o0,o2,o3; \ + o0 = vec_mergeh (x0,x1); \ + y0 = vec_perm (o0, x2, perm_rgb_0); \ + o2 = vec_perm (o0, x2, perm_rgb_1); \ + o3 = vec_mergel (x0,x1); \ + y1 = vec_perm (o3,o2,perm_rgb_2); \ + y2 = vec_perm (o3,o2,perm_rgb_3); \ +} while(0) + +#define vec_mstbgr24(x0,x1,x2,ptr) \ +do { \ + __typeof__(x0) _0,_1,_2; \ + vec_merge3 (x0,x1,x2,_0,_1,_2); \ + vec_st (_0, 0, ptr++); \ + vec_st (_1, 0, ptr++); \ + vec_st (_2, 0, ptr++); \ +} while (0); + +#define vec_mstrgb24(x0,x1,x2,ptr) \ +do { \ + __typeof__(x0) _0,_1,_2; \ + vec_merge3 (x2,x1,x0,_0,_1,_2); \ + vec_st (_0, 0, ptr++); \ + vec_st (_1, 0, ptr++); \ + vec_st (_2, 0, ptr++); \ +} while (0); + +/* pack the pixels in rgb0 format + msb R + lsb 0 +*/ +#define vec_mstrgb32(T,x0,x1,x2,x3,ptr) \ +do { \ + T _0,_1,_2,_3; \ + _0 = vec_mergeh (x0,x1); \ + _1 = vec_mergeh (x2,x3); \ + _2 = (T)vec_mergeh ((vector unsigned short)_0,(vector unsigned short)_1); \ + _3 = (T)vec_mergel ((vector unsigned short)_0,(vector unsigned short)_1); \ + vec_st (_2, 0*16, (T *)ptr); \ + vec_st (_3, 1*16, (T *)ptr); \ + _0 = vec_mergel (x0,x1); \ + _1 = vec_mergel (x2,x3); \ + _2 = (T)vec_mergeh ((vector unsigned short)_0,(vector unsigned short)_1); \ + _3 = (T)vec_mergel ((vector unsigned short)_0,(vector unsigned short)_1); \ + vec_st (_2, 2*16, (T *)ptr); \ + vec_st (_3, 3*16, (T *)ptr); \ + ptr += 4; \ +} while (0); + +/* + + | 1 0 1.4021 | | Y | + | 1 -0.3441 -0.7142 |x| Cb| + | 1 1.7718 0 | | Cr| + + + Y: [-128 127] + Cb/Cr : [-128 127] + + typical yuv conversion work on Y: 0-255 this version has been optimized for jpeg decode. + +*/ + + + + +#define vec_unh(x) \ + (vector signed short) \ + vec_perm(x,(__typeof__(x)){0}, \ + ((vector unsigned char){0x10,0x00,0x10,0x01,0x10,0x02,0x10,0x03,\ + 0x10,0x04,0x10,0x05,0x10,0x06,0x10,0x07})) +#define vec_unl(x) \ + (vector signed short) \ + vec_perm(x,(__typeof__(x)){0}, \ + ((vector unsigned char){0x10,0x08,0x10,0x09,0x10,0x0A,0x10,0x0B,\ + 0x10,0x0C,0x10,0x0D,0x10,0x0E,0x10,0x0F})) + +#define vec_clip_s16(x) \ + vec_max (vec_min (x, ((vector signed short){235,235,235,235,235,235,235,235})), \ + ((vector signed short){ 16, 16, 16, 16, 16, 16, 16, 16})) + +#define vec_packclp(x,y) \ + (vector unsigned char)vec_packs \ + ((vector unsigned short)vec_max (x,((vector signed short) {0})), \ + (vector unsigned short)vec_max (y,((vector signed short) {0}))) + +//#define out_pixels(a,b,c,ptr) vec_mstrgb32(__typeof__(a),((__typeof__ (a)){255}),a,a,a,ptr) + + +static inline void cvtyuvtoRGB (SwsContext *c, + vector signed short Y, vector signed short U, vector signed short V, + vector signed short *R, vector signed short *G, vector signed short *B) +{ + vector signed short vx,ux,uvx; + + Y = vec_mradds (Y, c->CY, c->OY); + U = vec_sub (U,(vector signed short) + vec_splat((vector signed short){128},0)); + V = vec_sub (V,(vector signed short) + vec_splat((vector signed short){128},0)); + + // ux = (CBU*(u<<c->CSHIFT)+0x4000)>>15; + ux = vec_sl (U, c->CSHIFT); + *B = vec_mradds (ux, c->CBU, Y); + + // vx = (CRV*(v<<c->CSHIFT)+0x4000)>>15; + vx = vec_sl (V, c->CSHIFT); + *R = vec_mradds (vx, c->CRV, Y); + + // uvx = ((CGU*u) + (CGV*v))>>15; + uvx = vec_mradds (U, c->CGU, Y); + *G = vec_mradds (V, c->CGV, uvx); +} + + +/* + ------------------------------------------------------------------------------ + CS converters + ------------------------------------------------------------------------------ +*/ + + +#define DEFCSP420_CVT(name,out_pixels) \ +static int altivec_##name (SwsContext *c, \ + unsigned char **in, int *instrides, \ + int srcSliceY, int srcSliceH, \ + unsigned char **oplanes, int *outstrides) \ +{ \ + int w = c->srcW; \ + int h = srcSliceH; \ + int i,j; \ + int instrides_scl[3]; \ + vector unsigned char y0,y1; \ + \ + vector signed char u,v; \ + \ + vector signed short Y0,Y1,Y2,Y3; \ + vector signed short U,V; \ + vector signed short vx,ux,uvx; \ + vector signed short vx0,ux0,uvx0; \ + vector signed short vx1,ux1,uvx1; \ + vector signed short R0,G0,B0; \ + vector signed short R1,G1,B1; \ + vector unsigned char R,G,B; \ + \ + vector unsigned char *y1ivP, *y2ivP, *uivP, *vivP; \ + vector unsigned char align_perm; \ + \ + vector signed short \ + lCY = c->CY, \ + lOY = c->OY, \ + lCRV = c->CRV, \ + lCBU = c->CBU, \ + lCGU = c->CGU, \ + lCGV = c->CGV; \ + \ + vector unsigned short lCSHIFT = c->CSHIFT; \ + \ + ubyte *y1i = in[0]; \ + ubyte *y2i = in[0]+instrides[0]; \ + ubyte *ui = in[1]; \ + ubyte *vi = in[2]; \ + \ + vector unsigned char *oute \ + = (vector unsigned char *) \ + (oplanes[0]+srcSliceY*outstrides[0]); \ + vector unsigned char *outo \ + = (vector unsigned char *) \ + (oplanes[0]+srcSliceY*outstrides[0]+outstrides[0]); \ + \ + \ + instrides_scl[0] = instrides[0]*2-w; /* the loop moves y{1,2}i by w */ \ + instrides_scl[1] = instrides[1]-w/2; /* the loop moves ui by w/2 */ \ + instrides_scl[2] = instrides[2]-w/2; /* the loop moves vi by w/2 */ \ + \ + \ + for (i=0;i<h/2;i++) { \ + vec_dstst (outo, (0x02000002|(((w*3+32)/32)<<16)), 0); \ + vec_dstst (oute, (0x02000002|(((w*3+32)/32)<<16)), 1); \ + \ + for (j=0;j<w/16;j++) { \ + \ + y1ivP = (vector unsigned char *)y1i; \ + y2ivP = (vector unsigned char *)y2i; \ + uivP = (vector unsigned char *)ui; \ + vivP = (vector unsigned char *)vi; \ + \ + align_perm = vec_lvsl (0, y1i); \ + y0 = (vector unsigned char) \ + vec_perm (y1ivP[0], y1ivP[1], align_perm); \ + \ + align_perm = vec_lvsl (0, y2i); \ + y1 = (vector unsigned char) \ + vec_perm (y2ivP[0], y2ivP[1], align_perm); \ + \ + align_perm = vec_lvsl (0, ui); \ + u = (vector signed char) \ + vec_perm (uivP[0], uivP[1], align_perm); \ + \ + align_perm = vec_lvsl (0, vi); \ + v = (vector signed char) \ + vec_perm (vivP[0], vivP[1], align_perm); \ + \ + u = (vector signed char) \ + vec_sub (u,(vector signed char) \ + vec_splat((vector signed char){128},0)); \ + v = (vector signed char) \ + vec_sub (v,(vector signed char) \ + vec_splat((vector signed char){128},0)); \ + \ + U = vec_unpackh (u); \ + V = vec_unpackh (v); \ + \ + \ + Y0 = vec_unh (y0); \ + Y1 = vec_unl (y0); \ + Y2 = vec_unh (y1); \ + Y3 = vec_unl (y1); \ + \ + Y0 = vec_mradds (Y0, lCY, lOY); \ + Y1 = vec_mradds (Y1, lCY, lOY); \ + Y2 = vec_mradds (Y2, lCY, lOY); \ + Y3 = vec_mradds (Y3, lCY, lOY); \ + \ + /* ux = (CBU*(u<<CSHIFT)+0x4000)>>15 */ \ + ux = vec_sl (U, lCSHIFT); \ + ux = vec_mradds (ux, lCBU, (vector signed short){0}); \ + ux0 = vec_mergeh (ux,ux); \ + ux1 = vec_mergel (ux,ux); \ + \ + /* vx = (CRV*(v<<CSHIFT)+0x4000)>>15; */ \ + vx = vec_sl (V, lCSHIFT); \ + vx = vec_mradds (vx, lCRV, (vector signed short){0}); \ + vx0 = vec_mergeh (vx,vx); \ + vx1 = vec_mergel (vx,vx); \ + \ + /* uvx = ((CGU*u) + (CGV*v))>>15 */ \ + uvx = vec_mradds (U, lCGU, (vector signed short){0}); \ + uvx = vec_mradds (V, lCGV, uvx); \ + uvx0 = vec_mergeh (uvx,uvx); \ + uvx1 = vec_mergel (uvx,uvx); \ + \ + R0 = vec_add (Y0,vx0); \ + G0 = vec_add (Y0,uvx0); \ + B0 = vec_add (Y0,ux0); \ + R1 = vec_add (Y1,vx1); \ + G1 = vec_add (Y1,uvx1); \ + B1 = vec_add (Y1,ux1); \ + \ + R = vec_packclp (R0,R1); \ + G = vec_packclp (G0,G1); \ + B = vec_packclp (B0,B1); \ + \ + out_pixels(R,G,B,oute); \ + \ + R0 = vec_add (Y2,vx0); \ + G0 = vec_add (Y2,uvx0); \ + B0 = vec_add (Y2,ux0); \ + R1 = vec_add (Y3,vx1); \ + G1 = vec_add (Y3,uvx1); \ + B1 = vec_add (Y3,ux1); \ + R = vec_packclp (R0,R1); \ + G = vec_packclp (G0,G1); \ + B = vec_packclp (B0,B1); \ + \ + \ + out_pixels(R,G,B,outo); \ + \ + y1i += 16; \ + y2i += 16; \ + ui += 8; \ + vi += 8; \ + \ + } \ + \ + outo += (outstrides[0])>>4; \ + oute += (outstrides[0])>>4; \ + \ + ui += instrides_scl[1]; \ + vi += instrides_scl[2]; \ + y1i += instrides_scl[0]; \ + y2i += instrides_scl[0]; \ + } \ + return srcSliceH; \ +} + + +#define out_abgr(a,b,c,ptr) vec_mstrgb32(__typeof__(a),((__typeof__ (a)){255}),c,b,a,ptr) +#define out_bgra(a,b,c,ptr) vec_mstrgb32(__typeof__(a),c,b,a,((__typeof__ (a)){255}),ptr) +#define out_rgba(a,b,c,ptr) vec_mstrgb32(__typeof__(a),a,b,c,((__typeof__ (a)){255}),ptr) +#define out_argb(a,b,c,ptr) vec_mstrgb32(__typeof__(a),((__typeof__ (a)){255}),a,b,c,ptr) +#define out_rgb24(a,b,c,ptr) vec_mstrgb24(a,b,c,ptr) +#define out_bgr24(a,b,c,ptr) vec_mstbgr24(a,b,c,ptr) + +DEFCSP420_CVT (yuv2_abgr, out_abgr) +#if 1 +DEFCSP420_CVT (yuv2_bgra, out_bgra) +#else +static int altivec_yuv2_bgra32 (SwsContext *c, + unsigned char **in, int *instrides, + int srcSliceY, int srcSliceH, + unsigned char **oplanes, int *outstrides) +{ + int w = c->srcW; + int h = srcSliceH; + int i,j; + int instrides_scl[3]; + vector unsigned char y0,y1; + + vector signed char u,v; + + vector signed short Y0,Y1,Y2,Y3; + vector signed short U,V; + vector signed short vx,ux,uvx; + vector signed short vx0,ux0,uvx0; + vector signed short vx1,ux1,uvx1; + vector signed short R0,G0,B0; + vector signed short R1,G1,B1; + vector unsigned char R,G,B; + + vector unsigned char *uivP, *vivP; + vector unsigned char align_perm; + + vector signed short + lCY = c->CY, + lOY = c->OY, + lCRV = c->CRV, + lCBU = c->CBU, + lCGU = c->CGU, + lCGV = c->CGV; + + vector unsigned short lCSHIFT = c->CSHIFT; + + ubyte *y1i = in[0]; + ubyte *y2i = in[0]+w; + ubyte *ui = in[1]; + ubyte *vi = in[2]; + + vector unsigned char *oute + = (vector unsigned char *) + (oplanes[0]+srcSliceY*outstrides[0]); + vector unsigned char *outo + = (vector unsigned char *) + (oplanes[0]+srcSliceY*outstrides[0]+outstrides[0]); + + + instrides_scl[0] = instrides[0]; + instrides_scl[1] = instrides[1]-w/2; /* the loop moves ui by w/2 */ + instrides_scl[2] = instrides[2]-w/2; /* the loop moves vi by w/2 */ + + + for (i=0;i<h/2;i++) { + vec_dstst (outo, (0x02000002|(((w*3+32)/32)<<16)), 0); + vec_dstst (oute, (0x02000002|(((w*3+32)/32)<<16)), 1); + + for (j=0;j<w/16;j++) { + + y0 = vec_ldl (0,y1i); + y1 = vec_ldl (0,y2i); + uivP = (vector unsigned char *)ui; + vivP = (vector unsigned char *)vi; + + align_perm = vec_lvsl (0, ui); + u = (vector signed char)vec_perm (uivP[0], uivP[1], align_perm); + + align_perm = vec_lvsl (0, vi); + v = (vector signed char)vec_perm (vivP[0], vivP[1], align_perm); + u = (vector signed char) + vec_sub (u,(vector signed char) + vec_splat((vector signed char){128},0)); + + v = (vector signed char) + vec_sub (v, (vector signed char) + vec_splat((vector signed char){128},0)); + + U = vec_unpackh (u); + V = vec_unpackh (v); + + + Y0 = vec_unh (y0); + Y1 = vec_unl (y0); + Y2 = vec_unh (y1); + Y3 = vec_unl (y1); + + Y0 = vec_mradds (Y0, lCY, lOY); + Y1 = vec_mradds (Y1, lCY, lOY); + Y2 = vec_mradds (Y2, lCY, lOY); + Y3 = vec_mradds (Y3, lCY, lOY); + + /* ux = (CBU*(u<<CSHIFT)+0x4000)>>15 */ + ux = vec_sl (U, lCSHIFT); + ux = vec_mradds (ux, lCBU, (vector signed short){0}); + ux0 = vec_mergeh (ux,ux); + ux1 = vec_mergel (ux,ux); + + /* vx = (CRV*(v<<CSHIFT)+0x4000)>>15; */ + vx = vec_sl (V, lCSHIFT); + vx = vec_mradds (vx, lCRV, (vector signed short){0}); + vx0 = vec_mergeh (vx,vx); + vx1 = vec_mergel (vx,vx); + /* uvx = ((CGU*u) + (CGV*v))>>15 */ + uvx = vec_mradds (U, lCGU, (vector signed short){0}); + uvx = vec_mradds (V, lCGV, uvx); + uvx0 = vec_mergeh (uvx,uvx); + uvx1 = vec_mergel (uvx,uvx); + R0 = vec_add (Y0,vx0); + G0 = vec_add (Y0,uvx0); + B0 = vec_add (Y0,ux0); + R1 = vec_add (Y1,vx1); + G1 = vec_add (Y1,uvx1); + B1 = vec_add (Y1,ux1); + R = vec_packclp (R0,R1); + G = vec_packclp (G0,G1); + B = vec_packclp (B0,B1); + + out_argb(R,G,B,oute); + R0 = vec_add (Y2,vx0); + G0 = vec_add (Y2,uvx0); + B0 = vec_add (Y2,ux0); + R1 = vec_add (Y3,vx1); + G1 = vec_add (Y3,uvx1); + B1 = vec_add (Y3,ux1); + R = vec_packclp (R0,R1); + G = vec_packclp (G0,G1); + B = vec_packclp (B0,B1); + + out_argb(R,G,B,outo); + y1i += 16; + y2i += 16; + ui += 8; + vi += 8; + + } + + outo += (outstrides[0])>>4; + oute += (outstrides[0])>>4; + + ui += instrides_scl[1]; + vi += instrides_scl[2]; + y1i += instrides_scl[0]; + y2i += instrides_scl[0]; + } + return srcSliceH; +} + +#endif + + +DEFCSP420_CVT (yuv2_rgba, out_rgba) +DEFCSP420_CVT (yuv2_argb, out_argb) +DEFCSP420_CVT (yuv2_rgb24, out_rgb24) +DEFCSP420_CVT (yuv2_bgr24, out_bgr24) + + +// uyvy|uyvy|uyvy|uyvy +// 0123 4567 89ab cdef +static +const vector unsigned char + demux_u = {0x10,0x00,0x10,0x00, + 0x10,0x04,0x10,0x04, + 0x10,0x08,0x10,0x08, + 0x10,0x0c,0x10,0x0c}, + demux_v = {0x10,0x02,0x10,0x02, + 0x10,0x06,0x10,0x06, + 0x10,0x0A,0x10,0x0A, + 0x10,0x0E,0x10,0x0E}, + demux_y = {0x10,0x01,0x10,0x03, + 0x10,0x05,0x10,0x07, + 0x10,0x09,0x10,0x0B, + 0x10,0x0D,0x10,0x0F}; + +/* + this is so I can play live CCIR raw video +*/ +static int altivec_uyvy_rgb32 (SwsContext *c, + unsigned char **in, int *instrides, + int srcSliceY, int srcSliceH, + unsigned char **oplanes, int *outstrides) +{ + int w = c->srcW; + int h = srcSliceH; + int i,j; + vector unsigned char uyvy; + vector signed short Y,U,V; + vector signed short R0,G0,B0,R1,G1,B1; + vector unsigned char R,G,B; + vector unsigned char *out; + ubyte *img; + + img = in[0]; + out = (vector unsigned char *)(oplanes[0]+srcSliceY*outstrides[0]); + + for (i=0;i<h;i++) { + for (j=0;j<w/16;j++) { + uyvy = vec_ld (0, img); + U = (vector signed short) + vec_perm (uyvy, (vector unsigned char){0}, demux_u); + + V = (vector signed short) + vec_perm (uyvy, (vector unsigned char){0}, demux_v); + + Y = (vector signed short) + vec_perm (uyvy, (vector unsigned char){0}, demux_y); + + cvtyuvtoRGB (c, Y,U,V,&R0,&G0,&B0); + + uyvy = vec_ld (16, img); + U = (vector signed short) + vec_perm (uyvy, (vector unsigned char){0}, demux_u); + + V = (vector signed short) + vec_perm (uyvy, (vector unsigned char){0}, demux_v); + + Y = (vector signed short) + vec_perm (uyvy, (vector unsigned char){0}, demux_y); + + cvtyuvtoRGB (c, Y,U,V,&R1,&G1,&B1); + + R = vec_packclp (R0,R1); + G = vec_packclp (G0,G1); + B = vec_packclp (B0,B1); + + // vec_mstbgr24 (R,G,B, out); + out_rgba (R,G,B,out); + + img += 32; + } + } + return srcSliceH; +} + + + +/* Ok currently the acceleration routine only supports + inputs of widths a multiple of 16 + and heights a multiple 2 + + So we just fall back to the C codes for this. +*/ +SwsFunc sws_yuv2rgb_init_altivec (SwsContext *c) +{ + if (!(c->flags & SWS_CPU_CAPS_ALTIVEC)) + return NULL; + + /* + and this seems not to matter too much I tried a bunch of + videos with abnormal widths and MPlayer crashes elsewhere. + mplayer -vo x11 -rawvideo on:w=350:h=240 raw-350x240.eyuv + boom with X11 bad match. + + */ + if ((c->srcW & 0xf) != 0) return NULL; + + switch (c->srcFormat) { + case PIX_FMT_YUV410P: + case PIX_FMT_YUV420P: + /*case IMGFMT_CLPL: ??? */ + case PIX_FMT_GRAY8: + case PIX_FMT_NV12: + case PIX_FMT_NV21: + if ((c->srcH & 0x1) != 0) + return NULL; + + switch(c->dstFormat){ + case PIX_FMT_RGB24: + av_log(c, AV_LOG_WARNING, "ALTIVEC: Color Space RGB24\n"); + return altivec_yuv2_rgb24; + case PIX_FMT_BGR24: + av_log(c, AV_LOG_WARNING, "ALTIVEC: Color Space BGR24\n"); + return altivec_yuv2_bgr24; + case PIX_FMT_ARGB: + av_log(c, AV_LOG_WARNING, "ALTIVEC: Color Space ARGB\n"); + return altivec_yuv2_argb; + case PIX_FMT_ABGR: + av_log(c, AV_LOG_WARNING, "ALTIVEC: Color Space ABGR\n"); + return altivec_yuv2_abgr; + case PIX_FMT_RGBA: + av_log(c, AV_LOG_WARNING, "ALTIVEC: Color Space RGBA\n"); + return altivec_yuv2_rgba; + case PIX_FMT_BGRA: + av_log(c, AV_LOG_WARNING, "ALTIVEC: Color Space BGRA\n"); + return altivec_yuv2_bgra; + default: return NULL; + } + break; + + case PIX_FMT_UYVY422: + switch(c->dstFormat){ + case PIX_FMT_BGR32: + av_log(c, AV_LOG_WARNING, "ALTIVEC: Color Space UYVY -> RGB32\n"); + return altivec_uyvy_rgb32; + default: return NULL; + } + break; + + } + return NULL; +} + +void sws_yuv2rgb_altivec_init_tables (SwsContext *c, const int inv_table[4],int brightness,int contrast, int saturation) +{ + union { + signed short tmp[8] __attribute__ ((aligned(16))); + vector signed short vec; + } buf; + + buf.tmp[0] = ((0xffffLL) * contrast>>8)>>9; //cy + buf.tmp[1] = -256*brightness; //oy + buf.tmp[2] = (inv_table[0]>>3) *(contrast>>16)*(saturation>>16); //crv + buf.tmp[3] = (inv_table[1]>>3) *(contrast>>16)*(saturation>>16); //cbu + buf.tmp[4] = -((inv_table[2]>>1)*(contrast>>16)*(saturation>>16)); //cgu + buf.tmp[5] = -((inv_table[3]>>1)*(contrast>>16)*(saturation>>16)); //cgv + + + c->CSHIFT = (vector unsigned short)vec_splat_u16(2); + c->CY = vec_splat ((vector signed short)buf.vec, 0); + c->OY = vec_splat ((vector signed short)buf.vec, 1); + c->CRV = vec_splat ((vector signed short)buf.vec, 2); + c->CBU = vec_splat ((vector signed short)buf.vec, 3); + c->CGU = vec_splat ((vector signed short)buf.vec, 4); + c->CGV = vec_splat ((vector signed short)buf.vec, 5); +#if 0 + { + int i; + char *v[6]={"cy","oy","crv","cbu","cgu","cgv"}; + for (i=0; i<6; i++) + printf("%s %d ", v[i],buf.tmp[i] ); + printf("\n"); + } +#endif + return; +} + + +void +altivec_yuv2packedX (SwsContext *c, + int16_t *lumFilter, int16_t **lumSrc, int lumFilterSize, + int16_t *chrFilter, int16_t **chrSrc, int chrFilterSize, + uint8_t *dest, int dstW, int dstY) +{ + int i,j; + vector signed short X,X0,X1,Y0,U0,V0,Y1,U1,V1,U,V; + vector signed short R0,G0,B0,R1,G1,B1; + + vector unsigned char R,G,B; + vector unsigned char *out,*nout; + + vector signed short RND = vec_splat_s16(1<<3); + vector unsigned short SCL = vec_splat_u16(4); + unsigned long scratch[16] __attribute__ ((aligned (16))); + + vector signed short *YCoeffs, *CCoeffs; + + YCoeffs = c->vYCoeffsBank+dstY*lumFilterSize; + CCoeffs = c->vCCoeffsBank+dstY*chrFilterSize; + + out = (vector unsigned char *)dest; + + for (i=0; i<dstW; i+=16){ + Y0 = RND; + Y1 = RND; + /* extract 16 coeffs from lumSrc */ + for (j=0; j<lumFilterSize; j++) { + X0 = vec_ld (0, &lumSrc[j][i]); + X1 = vec_ld (16, &lumSrc[j][i]); + Y0 = vec_mradds (X0, YCoeffs[j], Y0); + Y1 = vec_mradds (X1, YCoeffs[j], Y1); + } + + U = RND; + V = RND; + /* extract 8 coeffs from U,V */ + for (j=0; j<chrFilterSize; j++) { + X = vec_ld (0, &chrSrc[j][i/2]); + U = vec_mradds (X, CCoeffs[j], U); + X = vec_ld (0, &chrSrc[j][i/2+2048]); + V = vec_mradds (X, CCoeffs[j], V); + } + + /* scale and clip signals */ + Y0 = vec_sra (Y0, SCL); + Y1 = vec_sra (Y1, SCL); + U = vec_sra (U, SCL); + V = vec_sra (V, SCL); + + Y0 = vec_clip_s16 (Y0); + Y1 = vec_clip_s16 (Y1); + U = vec_clip_s16 (U); + V = vec_clip_s16 (V); + + /* now we have + Y0= y0 y1 y2 y3 y4 y5 y6 y7 Y1= y8 y9 y10 y11 y12 y13 y14 y15 + U= u0 u1 u2 u3 u4 u5 u6 u7 V= v0 v1 v2 v3 v4 v5 v6 v7 + + Y0= y0 y1 y2 y3 y4 y5 y6 y7 Y1= y8 y9 y10 y11 y12 y13 y14 y15 + U0= u0 u0 u1 u1 u2 u2 u3 u3 U1= u4 u4 u5 u5 u6 u6 u7 u7 + V0= v0 v0 v1 v1 v2 v2 v3 v3 V1= v4 v4 v5 v5 v6 v6 v7 v7 + */ + + U0 = vec_mergeh (U,U); + V0 = vec_mergeh (V,V); + + U1 = vec_mergel (U,U); + V1 = vec_mergel (V,V); + + cvtyuvtoRGB (c, Y0,U0,V0,&R0,&G0,&B0); + cvtyuvtoRGB (c, Y1,U1,V1,&R1,&G1,&B1); + + R = vec_packclp (R0,R1); + G = vec_packclp (G0,G1); + B = vec_packclp (B0,B1); + + switch(c->dstFormat) { + case PIX_FMT_ABGR: out_abgr (R,G,B,out); break; + case PIX_FMT_BGRA: out_bgra (R,G,B,out); break; + case PIX_FMT_RGBA: out_rgba (R,G,B,out); break; + case PIX_FMT_ARGB: out_argb (R,G,B,out); break; + case PIX_FMT_RGB24: out_rgb24 (R,G,B,out); break; + case PIX_FMT_BGR24: out_bgr24 (R,G,B,out); break; + default: + { + /* If this is reached, the caller should have called yuv2packedXinC + instead. */ + static int printed_error_message; + if (!printed_error_message) { + av_log(c, AV_LOG_ERROR, "altivec_yuv2packedX doesn't support %s output\n", + sws_format_name(c->dstFormat)); + printed_error_message=1; + } + return; + } + } + } + + if (i < dstW) { + i -= 16; + + Y0 = RND; + Y1 = RND; + /* extract 16 coeffs from lumSrc */ + for (j=0; j<lumFilterSize; j++) { + X0 = vec_ld (0, &lumSrc[j][i]); + X1 = vec_ld (16, &lumSrc[j][i]); + Y0 = vec_mradds (X0, YCoeffs[j], Y0); + Y1 = vec_mradds (X1, YCoeffs[j], Y1); + } + + U = RND; + V = RND; + /* extract 8 coeffs from U,V */ + for (j=0; j<chrFilterSize; j++) { + X = vec_ld (0, &chrSrc[j][i/2]); + U = vec_mradds (X, CCoeffs[j], U); + X = vec_ld (0, &chrSrc[j][i/2+2048]); + V = vec_mradds (X, CCoeffs[j], V); + } + + /* scale and clip signals */ + Y0 = vec_sra (Y0, SCL); + Y1 = vec_sra (Y1, SCL); + U = vec_sra (U, SCL); + V = vec_sra (V, SCL); + + Y0 = vec_clip_s16 (Y0); + Y1 = vec_clip_s16 (Y1); + U = vec_clip_s16 (U); + V = vec_clip_s16 (V); + + /* now we have + Y0= y0 y1 y2 y3 y4 y5 y6 y7 Y1= y8 y9 y10 y11 y12 y13 y14 y15 + U = u0 u1 u2 u3 u4 u5 u6 u7 V = v0 v1 v2 v3 v4 v5 v6 v7 + + Y0= y0 y1 y2 y3 y4 y5 y6 y7 Y1= y8 y9 y10 y11 y12 y13 y14 y15 + U0= u0 u0 u1 u1 u2 u2 u3 u3 U1= u4 u4 u5 u5 u6 u6 u7 u7 + V0= v0 v0 v1 v1 v2 v2 v3 v3 V1= v4 v4 v5 v5 v6 v6 v7 v7 + */ + + U0 = vec_mergeh (U,U); + V0 = vec_mergeh (V,V); + + U1 = vec_mergel (U,U); + V1 = vec_mergel (V,V); + + cvtyuvtoRGB (c, Y0,U0,V0,&R0,&G0,&B0); + cvtyuvtoRGB (c, Y1,U1,V1,&R1,&G1,&B1); + + R = vec_packclp (R0,R1); + G = vec_packclp (G0,G1); + B = vec_packclp (B0,B1); + + nout = (vector unsigned char *)scratch; + switch(c->dstFormat) { + case PIX_FMT_ABGR: out_abgr (R,G,B,nout); break; + case PIX_FMT_BGRA: out_bgra (R,G,B,nout); break; + case PIX_FMT_RGBA: out_rgba (R,G,B,nout); break; + case PIX_FMT_ARGB: out_argb (R,G,B,nout); break; + case PIX_FMT_RGB24: out_rgb24 (R,G,B,nout); break; + case PIX_FMT_BGR24: out_bgr24 (R,G,B,nout); break; + default: + /* Unreachable, I think. */ + av_log(c, AV_LOG_ERROR, "altivec_yuv2packedX doesn't support %s output\n", + sws_format_name(c->dstFormat)); + return; + } + + memcpy (&((uint32_t*)dest)[i], scratch, (dstW-i)/4); + } + +} diff --git a/libswscale/yuv2rgb_bfin.c b/libswscale/yuv2rgb_bfin.c new file mode 100644 index 0000000000..58cc5b6a35 --- /dev/null +++ b/libswscale/yuv2rgb_bfin.c @@ -0,0 +1,203 @@ +/* + * Copyright (C) 2007 Marc Hoffman <marc.hoffman@analog.com> + * + * Blackfin video color space converter operations + * convert I420 YV12 to RGB in various formats + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#include <stdio.h> +#include <stdlib.h> +#include <string.h> +#include <inttypes.h> +#include <assert.h> +#include "config.h" +#include <unistd.h> +#include "rgb2rgb.h" +#include "swscale.h" +#include "swscale_internal.h" + +#ifdef __FDPIC__ +#define L1CODE __attribute__ ((l1_text)) +#else +#define L1CODE +#endif + +void ff_bfin_yuv2rgb555_line (uint8_t *Y, uint8_t *U, uint8_t *V, uint8_t *out, + int w, uint32_t *coeffs) L1CODE; + +void ff_bfin_yuv2rgb565_line (uint8_t *Y, uint8_t *U, uint8_t *V, uint8_t *out, + int w, uint32_t *coeffs) L1CODE; + +void ff_bfin_yuv2rgb24_line (uint8_t *Y, uint8_t *U, uint8_t *V, uint8_t *out, + int w, uint32_t *coeffs) L1CODE; + +typedef void (* ltransform)(uint8_t *Y, uint8_t *U, uint8_t *V, uint8_t *out, + int w, uint32_t *coeffs); + + +static void bfin_prepare_coefficients (SwsContext *c, int rgb, int masks) +{ + int oy; + oy = c->yOffset&0xffff; + oy = oy >> 3; // keep everything U8.0 for offset calculation + + c->oc = 128*0x01010101U; + c->oy = oy*0x01010101U; + + /* copy 64bit vector coeffs down to 32bit vector coeffs */ + c->cy = c->yCoeff; + c->zero = 0; + + if (rgb) { + c->crv = c->vrCoeff; + c->cbu = c->ubCoeff; + c->cgu = c->ugCoeff; + c->cgv = c->vgCoeff; + } else { + c->crv = c->ubCoeff; + c->cbu = c->vrCoeff; + c->cgu = c->vgCoeff; + c->cgv = c->ugCoeff; + } + + + if (masks == 555) { + c->rmask = 0x001f * 0x00010001U; + c->gmask = 0x03e0 * 0x00010001U; + c->bmask = 0x7c00 * 0x00010001U; + } else if (masks == 565) { + c->rmask = 0x001f * 0x00010001U; + c->gmask = 0x07e0 * 0x00010001U; + c->bmask = 0xf800 * 0x00010001U; + } +} + +static int core_yuv420_rgb (SwsContext *c, + uint8_t **in, int *instrides, + int srcSliceY, int srcSliceH, + uint8_t **oplanes, int *outstrides, + ltransform lcscf, int rgb, int masks) +{ + uint8_t *py,*pu,*pv,*op; + int w = instrides[0]; + int h2 = srcSliceH>>1; + int i; + + bfin_prepare_coefficients (c, rgb, masks); + + py = in[0]; + pu = in[1+(1^rgb)]; + pv = in[1+(0^rgb)]; + + op = oplanes[0] + srcSliceY*outstrides[0]; + + for (i=0;i<h2;i++) { + + lcscf (py, pu, pv, op, w, &c->oy); + + py += instrides[0]; + op += outstrides[0]; + + lcscf (py, pu, pv, op, w, &c->oy); + + py += instrides[0]; + pu += instrides[1]; + pv += instrides[2]; + op += outstrides[0]; + } + + return srcSliceH; +} + + +static int bfin_yuv420_rgb555 (SwsContext *c, + uint8_t **in, int *instrides, + int srcSliceY, int srcSliceH, + uint8_t **oplanes, int *outstrides) +{ + return core_yuv420_rgb (c, in, instrides, srcSliceY, srcSliceH, oplanes, outstrides, + ff_bfin_yuv2rgb555_line, 1, 555); +} + +static int bfin_yuv420_bgr555 (SwsContext *c, + uint8_t **in, int *instrides, + int srcSliceY, int srcSliceH, + uint8_t **oplanes, int *outstrides) +{ + return core_yuv420_rgb (c, in, instrides, srcSliceY, srcSliceH, oplanes, outstrides, + ff_bfin_yuv2rgb555_line, 0, 555); +} + +static int bfin_yuv420_rgb24 (SwsContext *c, + uint8_t **in, int *instrides, + int srcSliceY, int srcSliceH, + uint8_t **oplanes, int *outstrides) +{ + return core_yuv420_rgb (c, in, instrides, srcSliceY, srcSliceH, oplanes, outstrides, + ff_bfin_yuv2rgb24_line, 1, 888); +} + +static int bfin_yuv420_bgr24 (SwsContext *c, + uint8_t **in, int *instrides, + int srcSliceY, int srcSliceH, + uint8_t **oplanes, int *outstrides) +{ + return core_yuv420_rgb (c, in, instrides, srcSliceY, srcSliceH, oplanes, outstrides, + ff_bfin_yuv2rgb24_line, 0, 888); +} + +static int bfin_yuv420_rgb565 (SwsContext *c, + uint8_t **in, int *instrides, + int srcSliceY, int srcSliceH, + uint8_t **oplanes, int *outstrides) +{ + return core_yuv420_rgb (c, in, instrides, srcSliceY, srcSliceH, oplanes, outstrides, + ff_bfin_yuv2rgb565_line, 1, 565); +} + +static int bfin_yuv420_bgr565 (SwsContext *c, + uint8_t **in, int *instrides, + int srcSliceY, int srcSliceH, + uint8_t **oplanes, int *outstrides) +{ + return core_yuv420_rgb (c, in, instrides, srcSliceY, srcSliceH, oplanes, outstrides, + ff_bfin_yuv2rgb565_line, 0, 565); +} + + +SwsFunc ff_bfin_yuv2rgb_get_func_ptr (SwsContext *c) +{ + SwsFunc f; + + switch(c->dstFormat) { + case PIX_FMT_RGB555: f = bfin_yuv420_rgb555; break; + case PIX_FMT_BGR555: f = bfin_yuv420_bgr555; break; + case PIX_FMT_RGB565: f = bfin_yuv420_rgb565; break; + case PIX_FMT_BGR565: f = bfin_yuv420_bgr565; break; + case PIX_FMT_RGB24: f = bfin_yuv420_rgb24; break; + case PIX_FMT_BGR24: f = bfin_yuv420_bgr24; break; + default: + return 0; + } + + av_log(c, AV_LOG_INFO, "BlackFin accelerated color space converter %s\n", + sws_format_name (c->dstFormat)); + + return f; +} diff --git a/libswscale/yuv2rgb_mlib.c b/libswscale/yuv2rgb_mlib.c new file mode 100644 index 0000000000..68247914e7 --- /dev/null +++ b/libswscale/yuv2rgb_mlib.c @@ -0,0 +1,85 @@ +/* + * software YUV to RGB converter using mediaLib + * + * Copyright (C) 2003 Michael Niedermayer <michaelni@gmx.at> + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#include <mlib_types.h> +#include <mlib_status.h> +#include <mlib_sys.h> +#include <mlib_video.h> +#include <inttypes.h> +#include <stdlib.h> +#include <assert.h> + +#include "swscale.h" + +static int mlib_YUV2ARGB420_32(SwsContext *c, uint8_t* src[], int srcStride[], int srcSliceY, + int srcSliceH, uint8_t* dst[], int dstStride[]){ + if(c->srcFormat == PIX_FMT_YUV422P){ + srcStride[1] *= 2; + srcStride[2] *= 2; + } + + assert(srcStride[1] == srcStride[2]); + + mlib_VideoColorYUV2ARGB420(dst[0]+srcSliceY*dstStride[0], src[0], src[1], src[2], c->dstW, + srcSliceH, dstStride[0], srcStride[0], srcStride[1]); + return srcSliceH; +} + +static int mlib_YUV2ABGR420_32(SwsContext *c, uint8_t* src[], int srcStride[], int srcSliceY, + int srcSliceH, uint8_t* dst[], int dstStride[]){ + if(c->srcFormat == PIX_FMT_YUV422P){ + srcStride[1] *= 2; + srcStride[2] *= 2; + } + + assert(srcStride[1] == srcStride[2]); + + mlib_VideoColorYUV2ABGR420(dst[0]+srcSliceY*dstStride[0], src[0], src[1], src[2], c->dstW, + srcSliceH, dstStride[0], srcStride[0], srcStride[1]); + return srcSliceH; +} + +static int mlib_YUV2RGB420_24(SwsContext *c, uint8_t* src[], int srcStride[], int srcSliceY, + int srcSliceH, uint8_t* dst[], int dstStride[]){ + if(c->srcFormat == PIX_FMT_YUV422P){ + srcStride[1] *= 2; + srcStride[2] *= 2; + } + + assert(srcStride[1] == srcStride[2]); + + mlib_VideoColorYUV2RGB420(dst[0]+srcSliceY*dstStride[0], src[0], src[1], src[2], c->dstW, + srcSliceH, dstStride[0], srcStride[0], srcStride[1]); + return srcSliceH; +} + + +SwsFunc sws_yuv2rgb_init_mlib(SwsContext *c) +{ + switch(c->dstFormat){ + case PIX_FMT_RGB24: return mlib_YUV2RGB420_24; + case PIX_FMT_BGR32: return mlib_YUV2ARGB420_32; + case PIX_FMT_RGB32: return mlib_YUV2ABGR420_32; + default: return NULL; + } +} + diff --git a/libswscale/yuv2rgb_template.c b/libswscale/yuv2rgb_template.c new file mode 100644 index 0000000000..f55568b0ab --- /dev/null +++ b/libswscale/yuv2rgb_template.c @@ -0,0 +1,453 @@ +/* + * yuv2rgb_mmx.c, software YUV to RGB converter with Intel MMX "technology" + * + * Copyright (C) 2000, Silicon Integrated System Corp + * + * Author: Olie Lho <ollie@sis.com.tw> + * + * 15,24 bpp and dithering from Michael Niedermayer (michaelni@gmx.at) + * MMX/MMX2 Template stuff from Michael Niedermayer (needed for fast movntq support) + * context / deglobalize stuff by Michael Niedermayer + * + * This file is part of mpeg2dec, a free MPEG-2 video decoder + * + * mpeg2dec is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2, or (at your option) + * any later version. + * + * mpeg2dec is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with mpeg2dec; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#undef MOVNTQ +#undef EMMS +#undef SFENCE + +#if HAVE_AMD3DNOW +/* On K6 femms is faster than emms. On K7 femms is directly mapped to emms. */ +#define EMMS "femms" +#else +#define EMMS "emms" +#endif + +#if HAVE_MMX2 +#define MOVNTQ "movntq" +#define SFENCE "sfence" +#else +#define MOVNTQ "movq" +#define SFENCE "/nop" +#endif + +#define YUV2RGB \ + /* Do the multiply part of the conversion for even and odd pixels, + register usage: + mm0 -> Cblue, mm1 -> Cred, mm2 -> Cgreen even pixels, + mm3 -> Cblue, mm4 -> Cred, mm5 -> Cgreen odd pixels, + mm6 -> Y even, mm7 -> Y odd */\ + /* convert the chroma part */\ + "punpcklbw %%mm4, %%mm0;" /* scatter 4 Cb 00 u3 00 u2 00 u1 00 u0 */ \ + "punpcklbw %%mm4, %%mm1;" /* scatter 4 Cr 00 v3 00 v2 00 v1 00 v0 */ \ +\ + "psllw $3, %%mm0;" /* Promote precision */ \ + "psllw $3, %%mm1;" /* Promote precision */ \ +\ + "psubsw "U_OFFSET"(%4), %%mm0;" /* Cb -= 128 */ \ + "psubsw "V_OFFSET"(%4), %%mm1;" /* Cr -= 128 */ \ +\ + "movq %%mm0, %%mm2;" /* Copy 4 Cb 00 u3 00 u2 00 u1 00 u0 */ \ + "movq %%mm1, %%mm3;" /* Copy 4 Cr 00 v3 00 v2 00 v1 00 v0 */ \ +\ + "pmulhw "UG_COEFF"(%4), %%mm2;" /* Mul Cb with green coeff -> Cb green */ \ + "pmulhw "VG_COEFF"(%4), %%mm3;" /* Mul Cr with green coeff -> Cr green */ \ +\ + "pmulhw "UB_COEFF"(%4), %%mm0;" /* Mul Cb -> Cblue 00 b3 00 b2 00 b1 00 b0 */\ + "pmulhw "VR_COEFF"(%4), %%mm1;" /* Mul Cr -> Cred 00 r3 00 r2 00 r1 00 r0 */\ +\ + "paddsw %%mm3, %%mm2;" /* Cb green + Cr green -> Cgreen */\ +\ + /* convert the luma part */\ + "movq %%mm6, %%mm7;" /* Copy 8 Y Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0 */\ + "pand "MANGLE(mmx_00ffw)", %%mm6;" /* get Y even 00 Y6 00 Y4 00 Y2 00 Y0 */\ +\ + "psrlw $8, %%mm7;" /* get Y odd 00 Y7 00 Y5 00 Y3 00 Y1 */\ +\ + "psllw $3, %%mm6;" /* Promote precision */\ + "psllw $3, %%mm7;" /* Promote precision */\ +\ + "psubw "Y_OFFSET"(%4), %%mm6;" /* Y -= 16 */\ + "psubw "Y_OFFSET"(%4), %%mm7;" /* Y -= 16 */\ +\ + "pmulhw "Y_COEFF"(%4), %%mm6;" /* Mul 4 Y even 00 y6 00 y4 00 y2 00 y0 */\ + "pmulhw "Y_COEFF"(%4), %%mm7;" /* Mul 4 Y odd 00 y7 00 y5 00 y3 00 y1 */\ +\ + /* Do the addition part of the conversion for even and odd pixels, + register usage: + mm0 -> Cblue, mm1 -> Cred, mm2 -> Cgreen even pixels, + mm3 -> Cblue, mm4 -> Cred, mm5 -> Cgreen odd pixels, + mm6 -> Y even, mm7 -> Y odd */\ + "movq %%mm0, %%mm3;" /* Copy Cblue */\ + "movq %%mm1, %%mm4;" /* Copy Cred */\ + "movq %%mm2, %%mm5;" /* Copy Cgreen */\ +\ + "paddsw %%mm6, %%mm0;" /* Y even + Cblue 00 B6 00 B4 00 B2 00 B0 */\ + "paddsw %%mm7, %%mm3;" /* Y odd + Cblue 00 B7 00 B5 00 B3 00 B1 */\ +\ + "paddsw %%mm6, %%mm1;" /* Y even + Cred 00 R6 00 R4 00 R2 00 R0 */\ + "paddsw %%mm7, %%mm4;" /* Y odd + Cred 00 R7 00 R5 00 R3 00 R1 */\ +\ + "paddsw %%mm6, %%mm2;" /* Y even + Cgreen 00 G6 00 G4 00 G2 00 G0 */\ + "paddsw %%mm7, %%mm5;" /* Y odd + Cgreen 00 G7 00 G5 00 G3 00 G1 */\ +\ + /* Limit RGB even to 0..255 */\ + "packuswb %%mm0, %%mm0;" /* B6 B4 B2 B0 B6 B4 B2 B0 */\ + "packuswb %%mm1, %%mm1;" /* R6 R4 R2 R0 R6 R4 R2 R0 */\ + "packuswb %%mm2, %%mm2;" /* G6 G4 G2 G0 G6 G4 G2 G0 */\ +\ + /* Limit RGB odd to 0..255 */\ + "packuswb %%mm3, %%mm3;" /* B7 B5 B3 B1 B7 B5 B3 B1 */\ + "packuswb %%mm4, %%mm4;" /* R7 R5 R3 R1 R7 R5 R3 R1 */\ + "packuswb %%mm5, %%mm5;" /* G7 G5 G3 G1 G7 G5 G3 G1 */\ +\ + /* Interleave RGB even and odd */\ + "punpcklbw %%mm3, %%mm0;" /* B7 B6 B5 B4 B3 B2 B1 B0 */\ + "punpcklbw %%mm4, %%mm1;" /* R7 R6 R5 R4 R3 R2 R1 R0 */\ + "punpcklbw %%mm5, %%mm2;" /* G7 G6 G5 G4 G3 G2 G1 G0 */\ + + +#define YUV422_UNSHIFT \ + if(c->srcFormat == PIX_FMT_YUV422P){ \ + srcStride[1] *= 2; \ + srcStride[2] *= 2; \ + } \ + +#define YUV2RGB_LOOP(depth) \ + h_size= (c->dstW+7)&~7; \ + if(h_size*depth > FFABS(dstStride[0])) h_size-=8; \ +\ + __asm__ volatile ("pxor %mm4, %mm4;" /* zero mm4 */ ); \ + for (y= 0; y<srcSliceH; y++ ) { \ + uint8_t *image = dst[0] + (y+srcSliceY)*dstStride[0]; \ + uint8_t *py = src[0] + y*srcStride[0]; \ + uint8_t *pu = src[1] + (y>>1)*srcStride[1]; \ + uint8_t *pv = src[2] + (y>>1)*srcStride[2]; \ + long index= -h_size/2; \ + +#define YUV2RGB_INIT \ + /* This MMX assembly code deals with a SINGLE scan line at a time, \ + * it converts 8 pixels in each iteration. */ \ + __asm__ volatile ( \ + /* load data for start of next scan line */ \ + "movd (%2, %0), %%mm0;" /* Load 4 Cb 00 00 00 00 u3 u2 u1 u0 */ \ + "movd (%3, %0), %%mm1;" /* Load 4 Cr 00 00 00 00 v3 v2 v1 v0 */ \ + "movq (%5, %0, 2), %%mm6;" /* Load 8 Y Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0 */ \ + /* \ + ".balign 16 \n\t" \ + */ \ + "1: \n\t" \ + /* No speed difference on my p3@500 with prefetch, \ + * if it is faster for anyone with -benchmark then tell me. \ + PREFETCH" 64(%0) \n\t" \ + PREFETCH" 64(%1) \n\t" \ + PREFETCH" 64(%2) \n\t" \ + */ \ + +#define YUV2RGB_ENDLOOP(depth) \ + "add $"AV_STRINGIFY(depth*8)", %1 \n\t" \ + "add $4, %0 \n\t" \ + " js 1b \n\t" \ +\ + : "+r" (index), "+r" (image) \ + : "r" (pu - index), "r" (pv - index), "r"(&c->redDither), "r" (py - 2*index) \ + ); \ + } \ + __asm__ volatile (EMMS); \ + return srcSliceH; \ + +static inline int RENAME(yuv420_rgb16)(SwsContext *c, uint8_t* src[], int srcStride[], int srcSliceY, + int srcSliceH, uint8_t* dst[], int dstStride[]){ + int y, h_size; + + YUV422_UNSHIFT + YUV2RGB_LOOP(2) + + c->blueDither= ff_dither8[y&1]; + c->greenDither= ff_dither4[y&1]; + c->redDither= ff_dither8[(y+1)&1]; + + YUV2RGB_INIT + YUV2RGB + +#ifdef DITHER1XBPP + "paddusb "BLUE_DITHER"(%4), %%mm0;" + "paddusb "GREEN_DITHER"(%4), %%mm2;" + "paddusb "RED_DITHER"(%4), %%mm1;" +#endif + /* mask unneeded bits off */ + "pand "MANGLE(mmx_redmask)", %%mm0;" /* b7b6b5b4 b3_0_0_0 b7b6b5b4 b3_0_0_0 */ + "pand "MANGLE(mmx_grnmask)", %%mm2;" /* g7g6g5g4 g3g2_0_0 g7g6g5g4 g3g2_0_0 */ + "pand "MANGLE(mmx_redmask)", %%mm1;" /* r7r6r5r4 r3_0_0_0 r7r6r5r4 r3_0_0_0 */ + + "psrlw $3, %%mm0;" /* 0_0_0_b7 b6b5b4b3 0_0_0_b7 b6b5b4b3 */ + "pxor %%mm4, %%mm4;" /* zero mm4 */ + + "movq %%mm0, %%mm5;" /* Copy B7-B0 */ + "movq %%mm2, %%mm7;" /* Copy G7-G0 */ + + /* convert RGB24 plane to RGB16 pack for pixel 0-3 */ + "punpcklbw %%mm4, %%mm2;" /* 0_0_0_0 0_0_0_0 g7g6g5g4 g3g2_0_0 */ + "punpcklbw %%mm1, %%mm0;" /* r7r6r5r4 r3_0_0_0 0_0_0_b7 b6b5b4b3 */ + + "psllw $3, %%mm2;" /* 0_0_0_0 0_g7g6g5 g4g3g2_0 0_0_0_0 */ + "por %%mm2, %%mm0;" /* r7r6r5r4 r3g7g6g5 g4g3g2b7 b6b5b4b3 */ + + "movq 8 (%5, %0, 2), %%mm6;" /* Load 8 Y Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0 */ + MOVNTQ " %%mm0, (%1);" /* store pixel 0-3 */ + + /* convert RGB24 plane to RGB16 pack for pixel 0-3 */ + "punpckhbw %%mm4, %%mm7;" /* 0_0_0_0 0_0_0_0 g7g6g5g4 g3g2_0_0 */ + "punpckhbw %%mm1, %%mm5;" /* r7r6r5r4 r3_0_0_0 0_0_0_b7 b6b5b4b3 */ + + "psllw $3, %%mm7;" /* 0_0_0_0 0_g7g6g5 g4g3g2_0 0_0_0_0 */ + "movd 4 (%2, %0), %%mm0;" /* Load 4 Cb 00 00 00 00 u3 u2 u1 u0 */ + + "por %%mm7, %%mm5;" /* r7r6r5r4 r3g7g6g5 g4g3g2b7 b6b5b4b3 */ + "movd 4 (%3, %0), %%mm1;" /* Load 4 Cr 00 00 00 00 v3 v2 v1 v0 */ + + MOVNTQ " %%mm5, 8 (%1);" /* store pixel 4-7 */ + + YUV2RGB_ENDLOOP(2) +} + +static inline int RENAME(yuv420_rgb15)(SwsContext *c, uint8_t* src[], int srcStride[], int srcSliceY, + int srcSliceH, uint8_t* dst[], int dstStride[]){ + int y, h_size; + + YUV422_UNSHIFT + YUV2RGB_LOOP(2) + + c->blueDither= ff_dither8[y&1]; + c->greenDither= ff_dither8[y&1]; + c->redDither= ff_dither8[(y+1)&1]; + + YUV2RGB_INIT + YUV2RGB + +#ifdef DITHER1XBPP + "paddusb "BLUE_DITHER"(%4), %%mm0 \n\t" + "paddusb "GREEN_DITHER"(%4), %%mm2 \n\t" + "paddusb "RED_DITHER"(%4), %%mm1 \n\t" +#endif + + /* mask unneeded bits off */ + "pand "MANGLE(mmx_redmask)", %%mm0;" /* b7b6b5b4 b3_0_0_0 b7b6b5b4 b3_0_0_0 */ + "pand "MANGLE(mmx_redmask)", %%mm2;" /* g7g6g5g4 g3_0_0_0 g7g6g5g4 g3_0_0_0 */ + "pand "MANGLE(mmx_redmask)", %%mm1;" /* r7r6r5r4 r3_0_0_0 r7r6r5r4 r3_0_0_0 */ + + "psrlw $3, %%mm0;" /* 0_0_0_b7 b6b5b4b3 0_0_0_b7 b6b5b4b3 */ + "psrlw $1, %%mm1;" /* 0_r7r6r5 r4r3_0_0 0_r7r6r5 r4r3_0_0 */ + "pxor %%mm4, %%mm4;" /* zero mm4 */ + + "movq %%mm0, %%mm5;" /* Copy B7-B0 */ + "movq %%mm2, %%mm7;" /* Copy G7-G0 */ + + /* convert RGB24 plane to RGB16 pack for pixel 0-3 */ + "punpcklbw %%mm4, %%mm2;" /* 0_0_0_0 0_0_0_0 g7g6g5g4 g3_0_0_0 */ + "punpcklbw %%mm1, %%mm0;" /* r7r6r5r4 r3_0_0_0 0_0_0_b7 b6b5b4b3 */ + + "psllw $2, %%mm2;" /* 0_0_0_0 0_0_g7g6 g5g4g3_0 0_0_0_0 */ + "por %%mm2, %%mm0;" /* 0_r7r6r5 r4r3g7g6 g5g4g3b7 b6b5b4b3 */ + + "movq 8 (%5, %0, 2), %%mm6;" /* Load 8 Y Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0 */ + MOVNTQ " %%mm0, (%1);" /* store pixel 0-3 */ + + /* convert RGB24 plane to RGB16 pack for pixel 0-3 */ + "punpckhbw %%mm4, %%mm7;" /* 0_0_0_0 0_0_0_0 0_g7g6g5 g4g3_0_0 */ + "punpckhbw %%mm1, %%mm5;" /* r7r6r5r4 r3_0_0_0 0_0_0_b7 b6b5b4b3 */ + + "psllw $2, %%mm7;" /* 0_0_0_0 0_0_g7g6 g5g4g3_0 0_0_0_0 */ + "movd 4 (%2, %0), %%mm0;" /* Load 4 Cb 00 00 00 00 u3 u2 u1 u0 */ + + "por %%mm7, %%mm5;" /* 0_r7r6r5 r4r3g7g6 g5g4g3b7 b6b5b4b3 */ + "movd 4 (%3, %0), %%mm1;" /* Load 4 Cr 00 00 00 00 v3 v2 v1 v0 */ + + MOVNTQ " %%mm5, 8 (%1);" /* store pixel 4-7 */ + + YUV2RGB_ENDLOOP(2) +} + +static inline int RENAME(yuv420_rgb24)(SwsContext *c, uint8_t* src[], int srcStride[], int srcSliceY, + int srcSliceH, uint8_t* dst[], int dstStride[]){ + int y, h_size; + + YUV422_UNSHIFT + YUV2RGB_LOOP(3) + + YUV2RGB_INIT + YUV2RGB + /* mm0=B, %%mm2=G, %%mm1=R */ +#if HAVE_MMX2 + "movq "MANGLE(ff_M24A)", %%mm4 \n\t" + "movq "MANGLE(ff_M24C)", %%mm7 \n\t" + "pshufw $0x50, %%mm0, %%mm5 \n\t" /* B3 B2 B3 B2 B1 B0 B1 B0 */ + "pshufw $0x50, %%mm2, %%mm3 \n\t" /* G3 G2 G3 G2 G1 G0 G1 G0 */ + "pshufw $0x00, %%mm1, %%mm6 \n\t" /* R1 R0 R1 R0 R1 R0 R1 R0 */ + + "pand %%mm4, %%mm5 \n\t" /* B2 B1 B0 */ + "pand %%mm4, %%mm3 \n\t" /* G2 G1 G0 */ + "pand %%mm7, %%mm6 \n\t" /* R1 R0 */ + + "psllq $8, %%mm3 \n\t" /* G2 G1 G0 */ + "por %%mm5, %%mm6 \n\t" + "por %%mm3, %%mm6 \n\t" + MOVNTQ" %%mm6, (%1) \n\t" + + "psrlq $8, %%mm2 \n\t" /* 00 G7 G6 G5 G4 G3 G2 G1 */ + "pshufw $0xA5, %%mm0, %%mm5 \n\t" /* B5 B4 B5 B4 B3 B2 B3 B2 */ + "pshufw $0x55, %%mm2, %%mm3 \n\t" /* G4 G3 G4 G3 G4 G3 G4 G3 */ + "pshufw $0xA5, %%mm1, %%mm6 \n\t" /* R5 R4 R5 R4 R3 R2 R3 R2 */ + + "pand "MANGLE(ff_M24B)", %%mm5 \n\t" /* B5 B4 B3 */ + "pand %%mm7, %%mm3 \n\t" /* G4 G3 */ + "pand %%mm4, %%mm6 \n\t" /* R4 R3 R2 */ + + "por %%mm5, %%mm3 \n\t" /* B5 G4 B4 G3 B3 */ + "por %%mm3, %%mm6 \n\t" + MOVNTQ" %%mm6, 8(%1) \n\t" + + "pshufw $0xFF, %%mm0, %%mm5 \n\t" /* B7 B6 B7 B6 B7 B6 B6 B7 */ + "pshufw $0xFA, %%mm2, %%mm3 \n\t" /* 00 G7 00 G7 G6 G5 G6 G5 */ + "pshufw $0xFA, %%mm1, %%mm6 \n\t" /* R7 R6 R7 R6 R5 R4 R5 R4 */ + "movd 4 (%2, %0), %%mm0;" /* Load 4 Cb 00 00 00 00 u3 u2 u1 u0 */ + + "pand %%mm7, %%mm5 \n\t" /* B7 B6 */ + "pand %%mm4, %%mm3 \n\t" /* G7 G6 G5 */ + "pand "MANGLE(ff_M24B)", %%mm6 \n\t" /* R7 R6 R5 */ + "movd 4 (%3, %0), %%mm1;" /* Load 4 Cr 00 00 00 00 v3 v2 v1 v0 */ +\ + "por %%mm5, %%mm3 \n\t" + "por %%mm3, %%mm6 \n\t" + MOVNTQ" %%mm6, 16(%1) \n\t" + "movq 8 (%5, %0, 2), %%mm6;" /* Load 8 Y Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0 */ + "pxor %%mm4, %%mm4 \n\t" + +#else + + "pxor %%mm4, %%mm4 \n\t" + "movq %%mm0, %%mm5 \n\t" /* B */ + "movq %%mm1, %%mm6 \n\t" /* R */ + "punpcklbw %%mm2, %%mm0 \n\t" /* GBGBGBGB 0 */ + "punpcklbw %%mm4, %%mm1 \n\t" /* 0R0R0R0R 0 */ + "punpckhbw %%mm2, %%mm5 \n\t" /* GBGBGBGB 2 */ + "punpckhbw %%mm4, %%mm6 \n\t" /* 0R0R0R0R 2 */ + "movq %%mm0, %%mm7 \n\t" /* GBGBGBGB 0 */ + "movq %%mm5, %%mm3 \n\t" /* GBGBGBGB 2 */ + "punpcklwd %%mm1, %%mm7 \n\t" /* 0RGB0RGB 0 */ + "punpckhwd %%mm1, %%mm0 \n\t" /* 0RGB0RGB 1 */ + "punpcklwd %%mm6, %%mm5 \n\t" /* 0RGB0RGB 2 */ + "punpckhwd %%mm6, %%mm3 \n\t" /* 0RGB0RGB 3 */ + + "movq %%mm7, %%mm2 \n\t" /* 0RGB0RGB 0 */ + "movq %%mm0, %%mm6 \n\t" /* 0RGB0RGB 1 */ + "movq %%mm5, %%mm1 \n\t" /* 0RGB0RGB 2 */ + "movq %%mm3, %%mm4 \n\t" /* 0RGB0RGB 3 */ + + "psllq $40, %%mm7 \n\t" /* RGB00000 0 */ + "psllq $40, %%mm0 \n\t" /* RGB00000 1 */ + "psllq $40, %%mm5 \n\t" /* RGB00000 2 */ + "psllq $40, %%mm3 \n\t" /* RGB00000 3 */ + + "punpckhdq %%mm2, %%mm7 \n\t" /* 0RGBRGB0 0 */ + "punpckhdq %%mm6, %%mm0 \n\t" /* 0RGBRGB0 1 */ + "punpckhdq %%mm1, %%mm5 \n\t" /* 0RGBRGB0 2 */ + "punpckhdq %%mm4, %%mm3 \n\t" /* 0RGBRGB0 3 */ + + "psrlq $8, %%mm7 \n\t" /* 00RGBRGB 0 */ + "movq %%mm0, %%mm6 \n\t" /* 0RGBRGB0 1 */ + "psllq $40, %%mm0 \n\t" /* GB000000 1 */ + "por %%mm0, %%mm7 \n\t" /* GBRGBRGB 0 */ + MOVNTQ" %%mm7, (%1) \n\t" + + "movd 4 (%2, %0), %%mm0;" /* Load 4 Cb 00 00 00 00 u3 u2 u1 u0 */ + + "psrlq $24, %%mm6 \n\t" /* 0000RGBR 1 */ + "movq %%mm5, %%mm1 \n\t" /* 0RGBRGB0 2 */ + "psllq $24, %%mm5 \n\t" /* BRGB0000 2 */ + "por %%mm5, %%mm6 \n\t" /* BRGBRGBR 1 */ + MOVNTQ" %%mm6, 8(%1) \n\t" + + "movq 8 (%5, %0, 2), %%mm6;" /* Load 8 Y Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0 */ + + "psrlq $40, %%mm1 \n\t" /* 000000RG 2 */ + "psllq $8, %%mm3 \n\t" /* RGBRGB00 3 */ + "por %%mm3, %%mm1 \n\t" /* RGBRGBRG 2 */ + MOVNTQ" %%mm1, 16(%1) \n\t" + + "movd 4 (%3, %0), %%mm1;" /* Load 4 Cr 00 00 00 00 v3 v2 v1 v0 */ + "pxor %%mm4, %%mm4 \n\t" +#endif + + YUV2RGB_ENDLOOP(3) +} + +#define RGB_PLANAR2PACKED32 \ + /* convert RGB plane to RGB packed format, \ + mm0 -> B, mm1 -> R, mm2 -> G, mm3 -> A, \ + mm4 -> GB, mm5 -> AR pixel 4-7, \ + mm6 -> GB, mm7 -> AR pixel 0-3 */ \ + "movq %%mm0, %%mm6;" /* B7 B6 B5 B4 B3 B2 B1 B0 */ \ + "movq %%mm1, %%mm7;" /* R7 R6 R5 R4 R3 R2 R1 R0 */ \ +\ + "movq %%mm0, %%mm4;" /* B7 B6 B5 B4 B3 B2 B1 B0 */ \ + "movq %%mm1, %%mm5;" /* R7 R6 R5 R4 R3 R2 R1 R0 */ \ +\ + "punpcklbw %%mm2, %%mm6;" /* G3 B3 G2 B2 G1 B1 G0 B0 */ \ + "punpcklbw %%mm3, %%mm7;" /* A3 R3 A2 R2 A1 R1 A0 R0 */ \ +\ + "punpcklwd %%mm7, %%mm6;" /* A1 R1 B1 G1 A0 R0 B0 G0 */ \ + MOVNTQ " %%mm6, (%1);" /* Store ARGB1 ARGB0 */ \ +\ + "movq %%mm0, %%mm6;" /* B7 B6 B5 B4 B3 B2 B1 B0 */ \ + "punpcklbw %%mm2, %%mm6;" /* G3 B3 G2 B2 G1 B1 G0 B0 */ \ +\ + "punpckhwd %%mm7, %%mm6;" /* A3 R3 G3 B3 A2 R2 B3 G2 */ \ + MOVNTQ " %%mm6, 8 (%1);" /* Store ARGB3 ARGB2 */ \ +\ + "punpckhbw %%mm2, %%mm4;" /* G7 B7 G6 B6 G5 B5 G4 B4 */ \ + "punpckhbw %%mm3, %%mm5;" /* A7 R7 A6 R6 A5 R5 A4 R4 */ \ +\ + "punpcklwd %%mm5, %%mm4;" /* A5 R5 B5 G5 A4 R4 B4 G4 */ \ + MOVNTQ " %%mm4, 16 (%1);" /* Store ARGB5 ARGB4 */ \ +\ + "movq %%mm0, %%mm4;" /* B7 B6 B5 B4 B3 B2 B1 B0 */ \ + "punpckhbw %%mm2, %%mm4;" /* G7 B7 G6 B6 G5 B5 G4 B4 */ \ +\ + "punpckhwd %%mm5, %%mm4;" /* A7 R7 G7 B7 A6 R6 B6 G6 */ \ + MOVNTQ " %%mm4, 24 (%1);" /* Store ARGB7 ARGB6 */ \ +\ + "movd 4 (%2, %0), %%mm0;" /* Load 4 Cb 00 00 00 00 u3 u2 u1 u0 */ \ + "movd 4 (%3, %0), %%mm1;" /* Load 4 Cr 00 00 00 00 v3 v2 v1 v0 */ \ +\ + "pxor %%mm4, %%mm4;" /* zero mm4 */ \ + "movq 8 (%5, %0, 2), %%mm6;" /* Load 8 Y Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0 */ \ + +static inline int RENAME(yuv420_rgb32)(SwsContext *c, uint8_t* src[], int srcStride[], int srcSliceY, + int srcSliceH, uint8_t* dst[], int dstStride[]){ + int y, h_size; + + YUV422_UNSHIFT + YUV2RGB_LOOP(4) + + YUV2RGB_INIT + YUV2RGB + "pcmpeqd %%mm3, %%mm3;" /* fill mm3 */ + RGB_PLANAR2PACKED32 + + YUV2RGB_ENDLOOP(4) +} diff --git a/libswscale/yuv2rgb_vis.c b/libswscale/yuv2rgb_vis.c new file mode 100644 index 0000000000..2e2737aa9f --- /dev/null +++ b/libswscale/yuv2rgb_vis.c @@ -0,0 +1,209 @@ +/* + * VIS optimized software YUV to RGB converter + * Copyright (c) 2007 Denes Balatoni <dbalatoni@programozo.hu> + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#include <inttypes.h> +#include <stdlib.h> + +#include "swscale.h" +#include "swscale_internal.h" + +#define YUV2RGB_INIT \ + "wr %%g0, 0x10, %%gsr \n\t" \ + "ldd [%5], %%f32 \n\t" \ + "ldd [%5+8], %%f34 \n\t" \ + "ldd [%5+16], %%f36 \n\t" \ + "ldd [%5+24], %%f38 \n\t" \ + "ldd [%5+32], %%f40 \n\t" \ + "ldd [%5+40], %%f42 \n\t" \ + "ldd [%5+48], %%f44 \n\t" \ + "ldd [%5+56], %%f46 \n\t" \ + "ldd [%5+64], %%f48 \n\t" \ + "ldd [%5+72], %%f50 \n\t" + +#define YUV2RGB_KERNEL \ + /* ^^^^ f0=Y f3=u f5=v */ \ + "fmul8x16 %%f3, %%f48, %%f6 \n\t" \ + "fmul8x16 %%f19, %%f48, %%f22 \n\t" \ + "fmul8x16 %%f5, %%f44, %%f8 \n\t" \ + "fmul8x16 %%f21, %%f44, %%f24 \n\t" \ + "fmul8x16 %%f0, %%f42, %%f0 \n\t" \ + "fmul8x16 %%f16, %%f42, %%f16 \n\t" \ + "fmul8x16 %%f3, %%f50, %%f2 \n\t" \ + "fmul8x16 %%f19, %%f50, %%f18 \n\t" \ + "fmul8x16 %%f5, %%f46, %%f4 \n\t" \ + "fmul8x16 %%f21, %%f46, %%f20 \n\t" \ + \ + "fpsub16 %%f6, %%f34, %%f6 \n\t" /* 1 */ \ + "fpsub16 %%f22, %%f34, %%f22 \n\t" /* 1 */ \ + "fpsub16 %%f8, %%f38, %%f8 \n\t" /* 3 */ \ + "fpsub16 %%f24, %%f38, %%f24 \n\t" /* 3 */ \ + "fpsub16 %%f0, %%f32, %%f0 \n\t" /* 0 */ \ + "fpsub16 %%f16, %%f32, %%f16 \n\t" /* 0 */ \ + "fpsub16 %%f2, %%f36, %%f2 \n\t" /* 2 */ \ + "fpsub16 %%f18, %%f36, %%f18 \n\t" /* 2 */ \ + "fpsub16 %%f4, %%f40, %%f4 \n\t" /* 4 */ \ + "fpsub16 %%f20, %%f40, %%f20 \n\t" /* 4 */ \ + \ + "fpadd16 %%f0, %%f8, %%f8 \n\t" /* Gt */ \ + "fpadd16 %%f16, %%f24, %%f24 \n\t" /* Gt */ \ + "fpadd16 %%f0, %%f4, %%f4 \n\t" /* R */ \ + "fpadd16 %%f16, %%f20, %%f20 \n\t" /* R */ \ + "fpadd16 %%f0, %%f6, %%f6 \n\t" /* B */ \ + "fpadd16 %%f16, %%f22, %%f22 \n\t" /* B */ \ + "fpadd16 %%f8, %%f2, %%f2 \n\t" /* G */ \ + "fpadd16 %%f24, %%f18, %%f18 \n\t" /* G */ \ + \ + "fpack16 %%f4, %%f4 \n\t" \ + "fpack16 %%f20, %%f20 \n\t" \ + "fpack16 %%f6, %%f6 \n\t" \ + "fpack16 %%f22, %%f22 \n\t" \ + "fpack16 %%f2, %%f2 \n\t" \ + "fpack16 %%f18, %%f18 \n\t" + + + +// FIXME: must be changed to set alpha to 255 instead of 0 +static int vis_420P_ARGB32(SwsContext *c, uint8_t* src[], int srcStride[], int srcSliceY, + int srcSliceH, uint8_t* dst[], int dstStride[]){ + int y, out1, out2, out3, out4, out5, out6; + + for(y=0;y < srcSliceH;++y) { + __asm__ volatile ( + YUV2RGB_INIT + "wr %%g0, 0xd2, %%asi \n\t" /* ASI_FL16_P */ + "1: \n\t" + "ldda [%1] %%asi, %%f2 \n\t" + "ldda [%1+2] %%asi, %%f18 \n\t" + "ldda [%2] %%asi, %%f4 \n\t" + "ldda [%2+2] %%asi, %%f20 \n\t" + "ld [%0], %%f0 \n\t" + "ld [%0+4], %%f16 \n\t" + "fpmerge %%f3, %%f3, %%f2 \n\t" + "fpmerge %%f19, %%f19, %%f18 \n\t" + "fpmerge %%f5, %%f5, %%f4 \n\t" + "fpmerge %%f21, %%f21, %%f20 \n\t" + YUV2RGB_KERNEL + "fzero %%f0 \n\t" + "fpmerge %%f4, %%f6, %%f8 \n\t" // r,b,t1 + "fpmerge %%f20, %%f22, %%f24 \n\t" // r,b,t1 + "fpmerge %%f0, %%f2, %%f10 \n\t" // 0,g,t2 + "fpmerge %%f0, %%f18, %%f26 \n\t" // 0,g,t2 + "fpmerge %%f10, %%f8, %%f4 \n\t" // t2,t1,msb + "fpmerge %%f26, %%f24, %%f20 \n\t" // t2,t1,msb + "fpmerge %%f11, %%f9, %%f6 \n\t" // t2,t1,lsb + "fpmerge %%f27, %%f25, %%f22 \n\t" // t2,t1,lsb + "std %%f4, [%3] \n\t" + "std %%f20, [%3+16] \n\t" + "std %%f6, [%3+8] \n\t" + "std %%f22, [%3+24] \n\t" + + "add %0, 8, %0 \n\t" + "add %1, 4, %1 \n\t" + "add %2, 4, %2 \n\t" + "subcc %4, 8, %4 \n\t" + "bne 1b \n\t" + "add %3, 32, %3 \n\t" //delay slot + : "=r" (out1), "=r" (out2), "=r" (out3), "=r" (out4), "=r" (out5), "=r" (out6) + : "0" (src[0]+(y+srcSliceY)*srcStride[0]), "1" (src[1]+((y+srcSliceY)>>1)*srcStride[1]), + "2" (src[2]+((y+srcSliceY)>>1)*srcStride[2]), "3" (dst[0]+(y+srcSliceY)*dstStride[0]), + "4" (c->dstW), + "5" (c->sparc_coeffs) + ); + } + + return srcSliceH; +} + +// FIXME: must be changed to set alpha to 255 instead of 0 +static int vis_422P_ARGB32(SwsContext *c, uint8_t* src[], int srcStride[], int srcSliceY, + int srcSliceH, uint8_t* dst[], int dstStride[]){ + int y, out1, out2, out3, out4, out5, out6; + + for(y=0;y < srcSliceH;++y) { + __asm__ volatile ( + YUV2RGB_INIT + "wr %%g0, 0xd2, %%asi \n\t" /* ASI_FL16_P */ + "1: \n\t" + "ldda [%1] %%asi, %%f2 \n\t" + "ldda [%1+2] %%asi, %%f18 \n\t" + "ldda [%2] %%asi, %%f4 \n\t" + "ldda [%2+2] %%asi, %%f20 \n\t" + "ld [%0], %%f0 \n\t" + "ld [%0+4], %%f16 \n\t" + "fpmerge %%f3, %%f3, %%f2 \n\t" + "fpmerge %%f19, %%f19, %%f18 \n\t" + "fpmerge %%f5, %%f5, %%f4 \n\t" + "fpmerge %%f21, %%f21, %%f20 \n\t" + YUV2RGB_KERNEL + "fzero %%f0 \n\t" + "fpmerge %%f4, %%f6, %%f8 \n\t" // r,b,t1 + "fpmerge %%f20, %%f22, %%f24 \n\t" // r,b,t1 + "fpmerge %%f0, %%f2, %%f10 \n\t" // 0,g,t2 + "fpmerge %%f0, %%f18, %%f26 \n\t" // 0,g,t2 + "fpmerge %%f10, %%f8, %%f4 \n\t" // t2,t1,msb + "fpmerge %%f26, %%f24, %%f20 \n\t" // t2,t1,msb + "fpmerge %%f11, %%f9, %%f6 \n\t" // t2,t1,lsb + "fpmerge %%f27, %%f25, %%f22 \n\t" // t2,t1,lsb + "std %%f4, [%3] \n\t" + "std %%f20, [%3+16] \n\t" + "std %%f6, [%3+8] \n\t" + "std %%f22, [%3+24] \n\t" + + "add %0, 8, %0 \n\t" + "add %1, 4, %1 \n\t" + "add %2, 4, %2 \n\t" + "subcc %4, 8, %4 \n\t" + "bne 1b \n\t" + "add %3, 32, %3 \n\t" //delay slot + : "=r" (out1), "=r" (out2), "=r" (out3), "=r" (out4), "=r" (out5), "=r" (out6) + : "0" (src[0]+(y+srcSliceY)*srcStride[0]), "1" (src[1]+(y+srcSliceY)*srcStride[1]), + "2" (src[2]+(y+srcSliceY)*srcStride[2]), "3" (dst[0]+(y+srcSliceY)*dstStride[0]), + "4" (c->dstW), + "5" (c->sparc_coeffs) + ); + } + + return srcSliceH; +} + +SwsFunc sws_yuv2rgb_init_vis(SwsContext *c) { + c->sparc_coeffs[5]=c->yCoeff; + c->sparc_coeffs[6]=c->vgCoeff; + c->sparc_coeffs[7]=c->vrCoeff; + c->sparc_coeffs[8]=c->ubCoeff; + c->sparc_coeffs[9]=c->ugCoeff; + + c->sparc_coeffs[0]=(((int16_t)c->yOffset*(int16_t)c->yCoeff >>11) & 0xffff) * 0x0001000100010001ULL; + c->sparc_coeffs[1]=(((int16_t)c->uOffset*(int16_t)c->ubCoeff>>11) & 0xffff) * 0x0001000100010001ULL; + c->sparc_coeffs[2]=(((int16_t)c->uOffset*(int16_t)c->ugCoeff>>11) & 0xffff) * 0x0001000100010001ULL; + c->sparc_coeffs[3]=(((int16_t)c->vOffset*(int16_t)c->vgCoeff>>11) & 0xffff) * 0x0001000100010001ULL; + c->sparc_coeffs[4]=(((int16_t)c->vOffset*(int16_t)c->vrCoeff>>11) & 0xffff) * 0x0001000100010001ULL; + + if (c->dstFormat == PIX_FMT_RGB32 && c->srcFormat == PIX_FMT_YUV422P && (c->dstW & 7)==0) { + av_log(c, AV_LOG_INFO, "SPARC VIS accelerated YUV422P -> RGB32 (WARNING: alpha value is wrong)\n"); + return vis_422P_ARGB32; + } + else if (c->dstFormat == PIX_FMT_RGB32 && c->srcFormat == PIX_FMT_YUV420P && (c->dstW & 7)==0) { + av_log(c, AV_LOG_INFO, "SPARC VIS accelerated YUV420P -> RGB32 (WARNING: alpha value is wrong)\n"); + return vis_420P_ARGB32; + } + return NULL; +} |