aboutsummaryrefslogtreecommitdiffstats
path: root/libavcodec/x86/fmtconvert.asm
blob: 818437672ff37aaa1447ff2bb51a7065b30318c6 (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
;******************************************************************************
;* x86 optimized Format Conversion Utils
;* Copyright (c) 2008 Loren Merritt
;*
;* This file is part of Libav.
;*
;* Libav is free software; you can redistribute it and/or
;* modify it under the terms of the GNU Lesser General Public
;* License as published by the Free Software Foundation; either
;* version 2.1 of the License, or (at your option) any later version.
;*
;* Libav is distributed in the hope that it will be useful,
;* but WITHOUT ANY WARRANTY; without even the implied warranty of
;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
;* Lesser General Public License for more details.
;*
;* You should have received a copy of the GNU Lesser General Public
;* License along with Libav; if not, write to the Free Software
;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
;******************************************************************************

%include "libavutil/x86/x86util.asm"

SECTION_TEXT

%macro CVTPS2PI 2
%if cpuflag(sse)
    cvtps2pi %1, %2
%elif cpuflag(3dnow)
    pf2id %1, %2
%endif
%endmacro

;------------------------------------------------------------------------------
; void ff_int32_to_float_fmul_scalar(float *dst, const int32_t *src, float mul,
;                                    int len);
;------------------------------------------------------------------------------
%macro INT32_TO_FLOAT_FMUL_SCALAR 1
%if UNIX64
cglobal int32_to_float_fmul_scalar, 3, 3, %1, dst, src, len
%else
cglobal int32_to_float_fmul_scalar, 4, 4, %1, dst, src, mul, len
%endif
%if WIN64
    SWAP 0, 2
%elif ARCH_X86_32
    movss   m0, mulm
%endif
    SPLATD  m0
    shl     lenq, 2
    add     srcq, lenq
    add     dstq, lenq
    neg     lenq
.loop:
%if cpuflag(sse2)
    cvtdq2ps  m1, [srcq+lenq   ]
    cvtdq2ps  m2, [srcq+lenq+16]
%else
    cvtpi2ps  m1, [srcq+lenq   ]
    cvtpi2ps  m3, [srcq+lenq+ 8]
    cvtpi2ps  m2, [srcq+lenq+16]
    cvtpi2ps  m4, [srcq+lenq+24]
    movlhps   m1, m3
    movlhps   m2, m4
%endif
    mulps     m1, m0
    mulps     m2, m0
    mova  [dstq+lenq   ], m1
    mova  [dstq+lenq+16], m2
    add     lenq, 32
    jl .loop
    REP_RET
%endmacro

INIT_XMM sse
INT32_TO_FLOAT_FMUL_SCALAR 5
INIT_XMM sse2
INT32_TO_FLOAT_FMUL_SCALAR 3


;------------------------------------------------------------------------------
; void ff_float_to_int16(int16_t *dst, const float *src, long len);
;------------------------------------------------------------------------------
%macro FLOAT_TO_INT16 1
cglobal float_to_int16, 3, 3, %1, dst, src, len
    add       lenq, lenq
    lea       srcq, [srcq+2*lenq]
    add       dstq, lenq
    neg       lenq
.loop:
%if cpuflag(sse2)
    cvtps2dq    m0, [srcq+2*lenq   ]
    cvtps2dq    m1, [srcq+2*lenq+16]
    packssdw    m0, m1
    mova  [dstq+lenq], m0
%else
    CVTPS2PI    m0, [srcq+2*lenq   ]
    CVTPS2PI    m1, [srcq+2*lenq+ 8]
    CVTPS2PI    m2, [srcq+2*lenq+16]
    CVTPS2PI    m3, [srcq+2*lenq+24]
    packssdw    m0, m1
    packssdw    m2, m3
    mova  [dstq+lenq  ], m0
    mova  [dstq+lenq+8], m2
%endif
    add       lenq, 16
    js .loop
%if mmsize == 8
    emms
%endif
    REP_RET
%endmacro

INIT_XMM sse2
FLOAT_TO_INT16 2
INIT_MMX sse
FLOAT_TO_INT16 0
INIT_MMX 3dnow
FLOAT_TO_INT16 0

;------------------------------------------------------------------------------
; void ff_float_to_int16_step(int16_t *dst, const float *src, long len, long step);
;------------------------------------------------------------------------------
%macro FLOAT_TO_INT16_STEP 1
cglobal float_to_int16_step, 4, 7, %1, dst, src, len, step, step3, v1, v2
    add       lenq, lenq
    lea       srcq, [srcq+2*lenq]
    lea     step3q, [stepq*3]
    neg       lenq
.loop:
%if cpuflag(sse2)
    cvtps2dq    m0, [srcq+2*lenq   ]
    cvtps2dq    m1, [srcq+2*lenq+16]
    packssdw    m0, m1
    movd       v1d, m0
    psrldq      m0, 4
    movd       v2d, m0
    psrldq      m0, 4
    mov     [dstq], v1w
    mov  [dstq+stepq*4], v2w
    shr        v1d, 16
    shr        v2d, 16
    mov  [dstq+stepq*2], v1w
    mov  [dstq+step3q*2], v2w
    lea       dstq, [dstq+stepq*8]
    movd       v1d, m0
    psrldq      m0, 4
    movd       v2d, m0
    mov     [dstq], v1w
    mov  [dstq+stepq*4], v2w
    shr        v1d, 16
    shr        v2d, 16
    mov  [dstq+stepq*2], v1w
    mov  [dstq+step3q*2], v2w
    lea       dstq, [dstq+stepq*8]
%else
    CVTPS2PI    m0, [srcq+2*lenq   ]
    CVTPS2PI    m1, [srcq+2*lenq+ 8]
    CVTPS2PI    m2, [srcq+2*lenq+16]
    CVTPS2PI    m3, [srcq+2*lenq+24]
    packssdw    m0, m1
    packssdw    m2, m3
    movd       v1d, m0
    psrlq       m0, 32
    movd       v2d, m0
    mov     [dstq], v1w
    mov  [dstq+stepq*4], v2w
    shr        v1d, 16
    shr        v2d, 16
    mov  [dstq+stepq*2], v1w
    mov  [dstq+step3q*2], v2w
    lea       dstq, [dstq+stepq*8]
    movd       v1d, m2
    psrlq       m2, 32
    movd       v2d, m2
    mov     [dstq], v1w
    mov  [dstq+stepq*4], v2w
    shr        v1d, 16
    shr        v2d, 16
    mov  [dstq+stepq*2], v1w
    mov  [dstq+step3q*2], v2w
    lea       dstq, [dstq+stepq*8]
%endif
    add       lenq, 16
    js .loop
%if mmsize == 8
    emms
%endif
    REP_RET
%endmacro

INIT_XMM sse2
FLOAT_TO_INT16_STEP 2
INIT_MMX sse
FLOAT_TO_INT16_STEP 0
INIT_MMX 3dnow
FLOAT_TO_INT16_STEP 0

;-------------------------------------------------------------------------------
; void ff_float_to_int16_interleave2(int16_t *dst, const float **src, long len);
;-------------------------------------------------------------------------------
%macro FLOAT_TO_INT16_INTERLEAVE2 0
cglobal float_to_int16_interleave2, 3, 4, 2, dst, src0, src1, len
    lea      lenq, [4*r2q]
    mov     src1q, [src0q+gprsize]
    mov     src0q, [src0q]
    add      dstq, lenq
    add     src0q, lenq
    add     src1q, lenq
    neg      lenq
.loop:
%if cpuflag(sse2)
    cvtps2dq   m0, [src0q+lenq]
    cvtps2dq   m1, [src1q+lenq]
    packssdw   m0, m1
    movhlps    m1, m0
    punpcklwd  m0, m1
    mova  [dstq+lenq], m0
%else
    CVTPS2PI   m0, [src0q+lenq  ]
    CVTPS2PI   m1, [src0q+lenq+8]
    CVTPS2PI   m2, [src1q+lenq  ]
    CVTPS2PI   m3, [src1q+lenq+8]
    packssdw   m0, m1
    packssdw   m2, m3
    mova       m1, m0
    punpcklwd  m0, m2
    punpckhwd  m1, m2
    mova  [dstq+lenq  ], m0
    mova  [dstq+lenq+8], m1
%endif
    add      lenq, 16
    js .loop
%if mmsize == 8
    emms
%endif
    REP_RET
%endmacro

INIT_MMX 3dnow
FLOAT_TO_INT16_INTERLEAVE2
INIT_MMX sse
FLOAT_TO_INT16_INTERLEAVE2
INIT_XMM sse2
FLOAT_TO_INT16_INTERLEAVE2

;-----------------------------------------------------------------------------
; void ff_float_to_int16_interleave6(int16_t *dst, const float **src, int len)
;-----------------------------------------------------------------------------
%macro FLOAT_TO_INT16_INTERLEAVE6 0
cglobal float_to_int16_interleave6, 2, 8, 0, dst, src, src1, src2, src3, src4, src5, len
%if ARCH_X86_64
    mov     lend, r2d
%else
    %define lend dword r2m
%endif
    mov src1q, [srcq+1*gprsize]
    mov src2q, [srcq+2*gprsize]
    mov src3q, [srcq+3*gprsize]
    mov src4q, [srcq+4*gprsize]
    mov src5q, [srcq+5*gprsize]
    mov srcq,  [srcq]
    sub src1q, srcq
    sub src2q, srcq
    sub src3q, srcq
    sub src4q, srcq
    sub src5q, srcq
.loop:
    CVTPS2PI   mm0, [srcq]
    CVTPS2PI   mm1, [srcq+src1q]
    CVTPS2PI   mm2, [srcq+src2q]
    CVTPS2PI   mm3, [srcq+src3q]
    CVTPS2PI   mm4, [srcq+src4q]
    CVTPS2PI   mm5, [srcq+src5q]
    packssdw   mm0, mm3
    packssdw   mm1, mm4
    packssdw   mm2, mm5
    PSWAPD     mm3, mm0
    punpcklwd  mm0, mm1
    punpckhwd  mm1, mm2
    punpcklwd  mm2, mm3
    PSWAPD     mm3, mm0
    punpckldq  mm0, mm2
    punpckhdq  mm2, mm1
    punpckldq  mm1, mm3
    movq [dstq   ], mm0
    movq [dstq+16], mm2
    movq [dstq+ 8], mm1
    add srcq, 8
    add dstq, 24
    sub lend, 2
    jg .loop
    emms
    RET
%endmacro ; FLOAT_TO_INT16_INTERLEAVE6

INIT_MMX sse
FLOAT_TO_INT16_INTERLEAVE6
INIT_MMX 3dnow
FLOAT_TO_INT16_INTERLEAVE6
INIT_MMX 3dnowext
FLOAT_TO_INT16_INTERLEAVE6

;-----------------------------------------------------------------------------
; void ff_float_interleave6(float *dst, const float **src, unsigned int len);
;-----------------------------------------------------------------------------

%macro FLOAT_INTERLEAVE6 1
cglobal float_interleave6, 2, 8, %1, dst, src, src1, src2, src3, src4, src5, len
%if ARCH_X86_64
    mov     lend, r2d
%else
    %define lend dword r2m
%endif
    mov    src1q, [srcq+1*gprsize]
    mov    src2q, [srcq+2*gprsize]
    mov    src3q, [srcq+3*gprsize]
    mov    src4q, [srcq+4*gprsize]
    mov    src5q, [srcq+5*gprsize]
    mov     srcq, [srcq]
    sub    src1q, srcq
    sub    src2q, srcq
    sub    src3q, srcq
    sub    src4q, srcq
    sub    src5q, srcq
.loop:
%if cpuflag(sse)
    movaps    m0, [srcq]
    movaps    m1, [srcq+src1q]
    movaps    m2, [srcq+src2q]
    movaps    m3, [srcq+src3q]
    movaps    m4, [srcq+src4q]
    movaps    m5, [srcq+src5q]

    SBUTTERFLYPS 0, 1, 6
    SBUTTERFLYPS 2, 3, 6
    SBUTTERFLYPS 4, 5, 6

    movaps    m6, m4
    shufps    m4, m0, 0xe4
    movlhps   m0, m2
    movhlps   m6, m2
    movaps [dstq   ], m0
    movaps [dstq+16], m4
    movaps [dstq+32], m6

    movaps    m6, m5
    shufps    m5, m1, 0xe4
    movlhps   m1, m3
    movhlps   m6, m3
    movaps [dstq+48], m1
    movaps [dstq+64], m5
    movaps [dstq+80], m6
%else ; mmx
    movq       m0, [srcq]
    movq       m1, [srcq+src1q]
    movq       m2, [srcq+src2q]
    movq       m3, [srcq+src3q]
    movq       m4, [srcq+src4q]
    movq       m5, [srcq+src5q]

    SBUTTERFLY dq, 0, 1, 6
    SBUTTERFLY dq, 2, 3, 6
    SBUTTERFLY dq, 4, 5, 6
    movq [dstq   ], m0
    movq [dstq+ 8], m2
    movq [dstq+16], m4
    movq [dstq+24], m1
    movq [dstq+32], m3
    movq [dstq+40], m5
%endif
    add      srcq, mmsize
    add      dstq, mmsize*6
    sub      lend, mmsize/4
    jg .loop
%if mmsize == 8
    emms
%endif
    REP_RET
%endmacro

INIT_MMX mmx
FLOAT_INTERLEAVE6 0
INIT_XMM sse
FLOAT_INTERLEAVE6 7

;-----------------------------------------------------------------------------
; void ff_float_interleave2(float *dst, const float **src, unsigned int len);
;-----------------------------------------------------------------------------

%macro FLOAT_INTERLEAVE2 1
cglobal float_interleave2, 3, 4, %1, dst, src, len, src1
    mov     src1q, [srcq+gprsize]
    mov      srcq, [srcq        ]
    sub     src1q, srcq
.loop:
    mova       m0, [srcq             ]
    mova       m1, [srcq+src1q       ]
    mova       m3, [srcq      +mmsize]
    mova       m4, [srcq+src1q+mmsize]

    mova       m2, m0
    PUNPCKLDQ  m0, m1
    PUNPCKHDQ  m2, m1

    mova       m1, m3
    PUNPCKLDQ  m3, m4
    PUNPCKHDQ  m1, m4

    mova  [dstq         ], m0
    mova  [dstq+1*mmsize], m2
    mova  [dstq+2*mmsize], m3
    mova  [dstq+3*mmsize], m1

    add      srcq, mmsize*2
    add      dstq, mmsize*4
    sub      lend, mmsize/2
    jg .loop
%if mmsize == 8
    emms
%endif
    REP_RET
%endmacro

INIT_MMX mmx
%define PUNPCKLDQ punpckldq
%define PUNPCKHDQ punpckhdq
FLOAT_INTERLEAVE2 0
INIT_XMM sse
%define PUNPCKLDQ unpcklps
%define PUNPCKHDQ unpckhps
FLOAT_INTERLEAVE2 5