1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
|
;******************************************************************************
;* SIMD lossless video DSP utils
;* Copyright (c) 2008 Loren Merritt
;* Copyright (c) 2014 Michael Niedermayer
;* Copyright (c) 2017 Jokyo Images
;*
;* This file is part of FFmpeg.
;*
;* FFmpeg is free software; you can redistribute it and/or
;* modify it under the terms of the GNU Lesser General Public
;* License as published by the Free Software Foundation; either
;* version 2.1 of the License, or (at your option) any later version.
;*
;* FFmpeg is distributed in the hope that it will be useful,
;* but WITHOUT ANY WARRANTY; without even the implied warranty of
;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
;* Lesser General Public License for more details.
;*
;* You should have received a copy of the GNU Lesser General Public
;* License along with FFmpeg; if not, write to the Free Software
;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
;******************************************************************************
%include "libavutil/x86/x86util.asm"
SECTION_RODATA
cextern pb_15
pb_zzzzzzzz77777777: times 8 db -1
pb_7: times 8 db 7
pb_ef: times 8 db 14,15
pb_67: times 8 db 6, 7
pb_zzzz3333zzzzbbbb: db -1,-1,-1,-1,3,3,3,3,-1,-1,-1,-1,11,11,11,11
pb_zz11zz55zz99zzdd: db -1,-1,1,1,-1,-1,5,5,-1,-1,9,9,-1,-1,13,13
pb_zzzz2323zzzzabab: db -1,-1,-1,-1, 2, 3, 2, 3,-1,-1,-1,-1,10,11,10,11
pb_zzzzzzzz67676767: db -1,-1,-1,-1,-1,-1,-1,-1, 6, 7, 6, 7, 6, 7, 6, 7
SECTION .text
;------------------------------------------------------------------------------
; void ff_add_median_pred(uint8_t *dst, const uint8_t *top,
; const uint8_t *diff, int w,
; int *left, int *left_top)
;------------------------------------------------------------------------------
INIT_XMM sse2
cglobal add_median_pred, 6,6,8, dst, top, diff, w, left, left_top
movu m0, [topq]
mova m2, m0
movd m4, [left_topq]
LSHIFT m2, 1
mova m1, m0
por m4, m2
movd m3, [leftq]
psubb m0, m4 ; t-tl
add dstq, wq
add topq, wq
add diffq, wq
neg wq
jmp .skip
.loop:
movu m4, [topq+wq]
mova m0, m4
LSHIFT m4, 1
por m4, m1
mova m1, m0 ; t
psubb m0, m4 ; t-tl
.skip:
movu m2, [diffq+wq]
%assign i 0
%rep mmsize
mova m4, m0
paddb m4, m3 ; t-tl+l
mova m5, m3
pmaxub m3, m1
pminub m5, m1
pminub m3, m4
pmaxub m3, m5 ; median
paddb m3, m2 ; +residual
%if i==0
mova m7, m3
LSHIFT m7, mmsize-1
%else
mova m6, m3
RSHIFT m7, 1
LSHIFT m6, mmsize-1
por m7, m6
%endif
%if i<mmsize-1
RSHIFT m0, 1
RSHIFT m1, 1
RSHIFT m2, 1
%endif
%assign i i+1
%endrep
movu [dstq+wq], m7
add wq, mmsize
jl .loop
movzx r2d, byte [dstq-1]
mov [leftq], r2d
movzx r2d, byte [topq-1]
mov [left_topq], r2d
RET
%macro ADD_LEFT_LOOP 2 ; %1 = dst_is_aligned, %2 = src_is_aligned
add srcq, wq
add dstq, wq
neg wq
%%.loop:
pshufb xm0, xm5
%if %2
mova m1, [srcq+wq]
%else
movu m1, [srcq+wq]
%endif
psllw m2, m1, 8
paddb m1, m2
pshufb m2, m1, m3
paddb m1, m2
pshufb m2, m1, m4
paddb m1, m2
%if mmsize >= 16
pshufb m2, m1, m6
paddb m1, m2
%endif
paddb xm0, xm1
%if %1
mova [dstq+wq], xm0
%else
movq [dstq+wq], xm0
movhps [dstq+wq+8], xm0
%endif
%if mmsize == 32
vextracti128 xm2, m1, 1 ; get second lane of the ymm
pshufb xm0, xm5 ; set alls val to last val of the first lane
paddb xm0, xm2
;store val
%if %1
mova [dstq+wq+16], xm0
%else;
movq [dstq+wq+16], xm0
movhps [dstq+wq+16+8], xm0
%endif
%endif
add wq, mmsize
jl %%.loop
%if mmsize == 32
movzx eax, byte [dstq - 1]
%else;
mov eax, mmsize-1
sub eax, wd
movd m1, eax
pshufb m0, m1
movd eax, m0
%endif
RET
%endmacro
;------------------------------------------------------------------------------
; int ff_add_left_pred(uint8_t *dst, const uint8_t *src, int w, int left)
;------------------------------------------------------------------------------
INIT_MMX ssse3
cglobal add_left_pred, 3,3,7, dst, src, w, left
.skip_prologue:
mova m5, [pb_7]
mova m4, [pb_zzzz3333zzzzbbbb]
mova m3, [pb_zz11zz55zz99zzdd]
movd m0, leftm
psllq m0, 56
ADD_LEFT_LOOP 1, 1
%macro ADD_LEFT_PRED_UNALIGNED 0
cglobal add_left_pred_unaligned, 3,3,7, dst, src, w, left
mova xm5, [pb_15]
VBROADCASTI128 m6, [pb_zzzzzzzz77777777]
VBROADCASTI128 m4, [pb_zzzz3333zzzzbbbb]
VBROADCASTI128 m3, [pb_zz11zz55zz99zzdd]
movd xm0, leftm
pslldq xm0, 15
test srcq, mmsize - 1
jnz .src_unaligned
test dstq, mmsize - 1
jnz .dst_unaligned
ADD_LEFT_LOOP 1, 1
.dst_unaligned:
ADD_LEFT_LOOP 0, 1
.src_unaligned:
ADD_LEFT_LOOP 0, 0
%endmacro
INIT_XMM ssse3
ADD_LEFT_PRED_UNALIGNED
%if HAVE_AVX2_EXTERNAL
INIT_YMM avx2
ADD_LEFT_PRED_UNALIGNED
%endif
;------------------------------------------------------------------------------
; void ff_add_bytes(uint8_t *dst, uint8_t *src, ptrdiff_t w);
;------------------------------------------------------------------------------
%macro ADD_BYTES 0
cglobal add_bytes, 3,4,2, dst, src, w, size
mov sizeq, wq
and sizeq, -2*mmsize
jz .2
add dstq, sizeq
add srcq, sizeq
neg sizeq
.1:
mova m0, [srcq + sizeq]
mova m1, [srcq + sizeq + mmsize]
paddb m0, [dstq + sizeq]
paddb m1, [dstq + sizeq + mmsize]
mova [dstq + sizeq], m0
mova [dstq + sizeq + mmsize], m1
add sizeq, 2*mmsize
jl .1
.2:
and wq, 2*mmsize-1
jz .end
add dstq, wq
add srcq, wq
neg wq
.3:
mov sizeb, [srcq + wq]
add [dstq + wq], sizeb
inc wq
jl .3
.end:
RET
%endmacro
INIT_XMM sse2
ADD_BYTES
%if HAVE_AVX2_EXTERNAL
INIT_YMM avx2
ADD_BYTES
%endif
%macro ADD_HFYU_LEFT_LOOP_INT16 2 ; %1 = dst alignment (a/u), %2 = src alignment (a/u)
add wd, wd
add srcq, wq
add dstq, wq
neg wq
%%.loop:
mov%2 m1, [srcq+wq]
mova m2, m1
pslld m1, 16
paddw m1, m2
mova m2, m1
pshufb m1, m3
paddw m1, m2
pshufb m0, m5
%if mmsize == 16
mova m2, m1
pshufb m1, m4
paddw m1, m2
%endif
paddw m0, m1
pand m0, m7
%ifidn %1, a
mova [dstq+wq], m0
%else
movq [dstq+wq], m0
movhps [dstq+wq+8], m0
%endif
add wq, mmsize
jl %%.loop
mov eax, mmsize-1
sub eax, wd
mov wd, eax
shl wd, 8
lea eax, [wd+eax-1]
movd m1, eax
pshufb m0, m1
movd eax, m0
RET
%endmacro
;---------------------------------------------------------------------------------------------
; int add_left_pred_int16(uint16_t *dst, const uint16_t *src, unsigned mask, int w, int left)
;---------------------------------------------------------------------------------------------
INIT_MMX ssse3
cglobal add_left_pred_int16, 4,4,8, dst, src, mask, w, left
.skip_prologue:
mova m5, [pb_67]
mova m3, [pb_zzzz2323zzzzabab]
movd m0, leftm
psllq m0, 48
movd m7, maskm
SPLATW m7 ,m7
ADD_HFYU_LEFT_LOOP_INT16 a, a
INIT_XMM ssse3
cglobal add_left_pred_int16_unaligned, 4,4,8, dst, src, mask, w, left
mova m5, [pb_ef]
mova m4, [pb_zzzzzzzz67676767]
mova m3, [pb_zzzz2323zzzzabab]
movd m0, leftm
pslldq m0, 14
movd m7, maskm
SPLATW m7 ,m7
test srcq, 15
jnz .src_unaligned
test dstq, 15
jnz .dst_unaligned
ADD_HFYU_LEFT_LOOP_INT16 a, a
.dst_unaligned:
ADD_HFYU_LEFT_LOOP_INT16 u, a
.src_unaligned:
ADD_HFYU_LEFT_LOOP_INT16 u, u
;---------------------------------------------------------------------------------------------
; void add_gradient_pred(uint8_t *src, const ptrdiff_t stride, const ptrdiff_t width)
;---------------------------------------------------------------------------------------------
%macro ADD_GRADIENT_PRED 0
cglobal add_gradient_pred, 3,4,5, src, stride, width, tmp
mova xm0, [pb_15]
;load src - 1 in xm1
movd xm1, [srcq-1]
%if cpuflag(avx2)
vpbroadcastb xm1, xm1
%else
pxor xm2, xm2
pshufb xm1, xm2
%endif
add srcq, widthq
neg widthq
neg strideq
.loop:
lea tmpq, [srcq + strideq]
mova m2, [tmpq + widthq] ; A = src[x-stride]
movu m3, [tmpq + widthq - 1] ; B = src[x - (stride + 1)]
mova m4, [srcq + widthq] ; current val (src[x])
psubb m2, m3; A - B
; prefix sum A-B
pslldq m3, m2, 1
paddb m2, m3
pslldq m3, m2, 2
paddb m2, m3
pslldq m3, m2, 4
paddb m2, m3
pslldq m3, m2, 8
paddb m2, m3
; prefix sum current val
pslldq m3, m4, 1
paddb m4, m3
pslldq m3, m4, 2
paddb m4, m3
pslldq m3, m4, 4
paddb m4, m3
pslldq m3, m4, 8
paddb m4, m3
; last sum
paddb m2, m4 ; current + (A - B)
paddb xm1, xm2 ; += C
mova [srcq + widthq], xm1 ; store
pshufb xm1, xm0 ; put last val in all val of xm1
%if mmsize == 32
vextracti128 xm2, m2, 1 ; get second lane of the ymm
paddb xm1, xm2; += C
mova [srcq + widthq + 16], xm1 ; store
pshufb xm1, xm0 ; put last val in all val of m1
%endif
add widthq, mmsize
jl .loop
RET
%endmacro
INIT_XMM ssse3
ADD_GRADIENT_PRED
%if HAVE_AVX2_EXTERNAL
INIT_YMM avx2
ADD_GRADIENT_PRED
%endif
|