1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
|
; *****************************************************************************
; * Provide SIMD optimizations for add_residual functions for HEVC decoding
; * Copyright (c) 2014 Pierre-Edouard LEPERE
; *
; * This file is part of FFmpeg.
; *
; * FFmpeg is free software; you can redistribute it and/or
; * modify it under the terms of the GNU Lesser General Public
; * License as published by the Free Software Foundation; either
; * version 2.1 of the License, or (at your option) any later version.
; *
; * FFmpeg is distributed in the hope that it will be useful,
; * but WITHOUT ANY WARRANTY; without even the implied warranty of
; * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
; * Lesser General Public License for more details.
; *
; * You should have received a copy of the GNU Lesser General Public
; * License along with FFmpeg; if not, write to the Free Software
; * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
; ******************************************************************************
%include "libavutil/x86/x86util.asm"
SECTION .text
cextern pw_1023
%define max_pixels_10 pw_1023
; the add_res macros and functions were largely inspired by h264_idct.asm from the x264 project
%macro ADD_RES_MMX_4_8 0
mova m2, [r1]
mova m4, [r1+8]
pxor m3, m3
psubw m3, m2
packuswb m2, m2
packuswb m3, m3
pxor m5, m5
psubw m5, m4
packuswb m4, m4
packuswb m5, m5
movh m0, [r0]
movh m1, [r0+r2]
paddusb m0, m2
paddusb m1, m4
psubusb m0, m3
psubusb m1, m5
movh [r0], m0
movh [r0+r2], m1
%endmacro
INIT_MMX mmxext
; void ff_hevc_add_residual_4_8_mmxext(uint8_t *dst, int16_t *res, ptrdiff_t stride)
cglobal hevc_add_residual_4_8, 3, 4, 6
ADD_RES_MMX_4_8
add r1, 16
lea r0, [r0+r2*2]
ADD_RES_MMX_4_8
RET
%macro ADD_RES_SSE_8_8 0
pxor m3, m3
mova m4, [r1]
mova m6, [r1+16]
mova m0, [r1+32]
mova m2, [r1+48]
psubw m5, m3, m4
psubw m7, m3, m6
psubw m1, m3, m0
packuswb m4, m0
packuswb m5, m1
psubw m3, m2
packuswb m6, m2
packuswb m7, m3
movq m0, [r0]
movq m1, [r0+r2]
movhps m0, [r0+r2*2]
movhps m1, [r0+r3]
paddusb m0, m4
paddusb m1, m6
psubusb m0, m5
psubusb m1, m7
movq [r0], m0
movq [r0+r2], m1
movhps [r0+2*r2], m0
movhps [r0+r3], m1
%endmacro
%macro ADD_RES_SSE_16_32_8 3
mova xm2, [r1+%1]
mova xm6, [r1+%1+16]
%if cpuflag(avx2)
vinserti128 m2, m2, [r1+%1+32], 1
vinserti128 m6, m6, [r1+%1+48], 1
%endif
%if cpuflag(avx)
psubw m1, m0, m2
psubw m5, m0, m6
%else
mova m1, m0
mova m5, m0
psubw m1, m2
psubw m5, m6
%endif
packuswb m2, m6
packuswb m1, m5
mova xm4, [r1+%1+mmsize*2]
mova xm6, [r1+%1+mmsize*2+16]
%if cpuflag(avx2)
vinserti128 m4, m4, [r1+%1+96 ], 1
vinserti128 m6, m6, [r1+%1+112], 1
%endif
%if cpuflag(avx)
psubw m3, m0, m4
psubw m5, m0, m6
%else
mova m3, m0
mova m5, m0
psubw m3, m4
psubw m5, m6
%endif
packuswb m4, m6
packuswb m3, m5
paddusb m2, [%2]
paddusb m4, [%3]
psubusb m2, m1
psubusb m4, m3
mova [%2], m2
mova [%3], m4
%endmacro
%macro TRANSFORM_ADD_8 0
; void ff_hevc_add_residual_8_8_<opt>(uint8_t *dst, int16_t *res, ptrdiff_t stride)
cglobal hevc_add_residual_8_8, 3, 4, 8
lea r3, [r2*3]
ADD_RES_SSE_8_8
add r1, 64
lea r0, [r0+r2*4]
ADD_RES_SSE_8_8
RET
; void ff_hevc_add_residual_16_8_<opt>(uint8_t *dst, int16_t *res, ptrdiff_t stride)
cglobal hevc_add_residual_16_8, 3, 4, 7
pxor m0, m0
lea r3, [r2*3]
ADD_RES_SSE_16_32_8 0, r0, r0+r2
ADD_RES_SSE_16_32_8 64, r0+r2*2, r0+r3
%rep 3
add r1, 128
lea r0, [r0+r2*4]
ADD_RES_SSE_16_32_8 0, r0, r0+r2
ADD_RES_SSE_16_32_8 64, r0+r2*2, r0+r3
%endrep
RET
; void ff_hevc_add_residual_32_8_<opt>(uint8_t *dst, int16_t *res, ptrdiff_t stride)
cglobal hevc_add_residual_32_8, 3, 4, 7
pxor m0, m0
ADD_RES_SSE_16_32_8 0, r0, r0+16
ADD_RES_SSE_16_32_8 64, r0+r2, r0+r2+16
%rep 15
add r1, 128
lea r0, [r0+r2*2]
ADD_RES_SSE_16_32_8 0, r0, r0+16
ADD_RES_SSE_16_32_8 64, r0+r2, r0+r2+16
%endrep
RET
%endmacro
INIT_XMM sse2
TRANSFORM_ADD_8
INIT_XMM avx
TRANSFORM_ADD_8
%if HAVE_AVX2_EXTERNAL
INIT_YMM avx2
; void ff_hevc_add_residual_32_8_avx2(uint8_t *dst, int16_t *res, ptrdiff_t stride)
cglobal hevc_add_residual_32_8, 3, 4, 7
pxor m0, m0
lea r3, [r2*3]
ADD_RES_SSE_16_32_8 0, r0, r0+r2
ADD_RES_SSE_16_32_8 128, r0+r2*2, r0+r3
%rep 7
add r1, 256
lea r0, [r0+r2*4]
ADD_RES_SSE_16_32_8 0, r0, r0+r2
ADD_RES_SSE_16_32_8 128, r0+r2*2, r0+r3
%endrep
RET
%endif
%macro ADD_RES_SSE_8_10 4
mova m0, [%4]
mova m1, [%4+16]
mova m2, [%4+32]
mova m3, [%4+48]
paddw m0, [%1+0]
paddw m1, [%1+%2]
paddw m2, [%1+%2*2]
paddw m3, [%1+%3]
CLIPW m0, m4, m5
CLIPW m1, m4, m5
CLIPW m2, m4, m5
CLIPW m3, m4, m5
mova [%1+0], m0
mova [%1+%2], m1
mova [%1+%2*2], m2
mova [%1+%3], m3
%endmacro
%macro ADD_RES_MMX_4_10 3
mova m0, [%1+0]
mova m1, [%1+%2]
paddw m0, [%3]
paddw m1, [%3+8]
CLIPW m0, m2, m3
CLIPW m1, m2, m3
mova [%1+0], m0
mova [%1+%2], m1
%endmacro
%macro ADD_RES_SSE_16_10 3
mova m0, [%3]
mova m1, [%3+16]
mova m2, [%3+32]
mova m3, [%3+48]
paddw m0, [%1]
paddw m1, [%1+16]
paddw m2, [%1+%2]
paddw m3, [%1+%2+16]
CLIPW m0, m4, m5
CLIPW m1, m4, m5
CLIPW m2, m4, m5
CLIPW m3, m4, m5
mova [%1], m0
mova [%1+16], m1
mova [%1+%2], m2
mova [%1+%2+16], m3
%endmacro
%macro ADD_RES_SSE_32_10 2
mova m0, [%2]
mova m1, [%2+16]
mova m2, [%2+32]
mova m3, [%2+48]
paddw m0, [%1]
paddw m1, [%1+16]
paddw m2, [%1+32]
paddw m3, [%1+48]
CLIPW m0, m4, m5
CLIPW m1, m4, m5
CLIPW m2, m4, m5
CLIPW m3, m4, m5
mova [%1], m0
mova [%1+16], m1
mova [%1+32], m2
mova [%1+48], m3
%endmacro
%macro ADD_RES_AVX2_16_10 4
mova m0, [%4]
mova m1, [%4+32]
mova m2, [%4+64]
mova m3, [%4+96]
paddw m0, [%1+0]
paddw m1, [%1+%2]
paddw m2, [%1+%2*2]
paddw m3, [%1+%3]
CLIPW m0, m4, m5
CLIPW m1, m4, m5
CLIPW m2, m4, m5
CLIPW m3, m4, m5
mova [%1+0], m0
mova [%1+%2], m1
mova [%1+%2*2], m2
mova [%1+%3], m3
%endmacro
%macro ADD_RES_AVX2_32_10 3
mova m0, [%3]
mova m1, [%3+32]
mova m2, [%3+64]
mova m3, [%3+96]
paddw m0, [%1]
paddw m1, [%1+32]
paddw m2, [%1+%2]
paddw m3, [%1+%2+32]
CLIPW m0, m4, m5
CLIPW m1, m4, m5
CLIPW m2, m4, m5
CLIPW m3, m4, m5
mova [%1], m0
mova [%1+32], m1
mova [%1+%2], m2
mova [%1+%2+32], m3
%endmacro
; void ff_hevc_add_residual_<4|8|16|32>_10(pixel *dst, int16_t *block, ptrdiff_t stride)
INIT_MMX mmxext
cglobal hevc_add_residual_4_10, 3, 4, 6
pxor m2, m2
mova m3, [max_pixels_10]
ADD_RES_MMX_4_10 r0, r2, r1
add r1, 16
lea r0, [r0+2*r2]
ADD_RES_MMX_4_10 r0, r2, r1
RET
INIT_XMM sse2
cglobal hevc_add_residual_8_10, 3, 4, 6
pxor m4, m4
mova m5, [max_pixels_10]
lea r3, [r2*3]
ADD_RES_SSE_8_10 r0, r2, r3, r1
lea r0, [r0+r2*4]
add r1, 64
ADD_RES_SSE_8_10 r0, r2, r3, r1
RET
cglobal hevc_add_residual_16_10, 3, 4, 6
pxor m4, m4
mova m5, [max_pixels_10]
ADD_RES_SSE_16_10 r0, r2, r1
%rep 7
lea r0, [r0+r2*2]
add r1, 64
ADD_RES_SSE_16_10 r0, r2, r1
%endrep
RET
cglobal hevc_add_residual_32_10, 3, 4, 6
pxor m4, m4
mova m5, [max_pixels_10]
ADD_RES_SSE_32_10 r0, r1
%rep 31
lea r0, [r0+r2]
add r1, 64
ADD_RES_SSE_32_10 r0, r1
%endrep
RET
%if HAVE_AVX2_EXTERNAL
INIT_YMM avx2
cglobal hevc_add_residual_16_10, 3, 4, 6
pxor m4, m4
mova m5, [max_pixels_10]
lea r3, [r2*3]
ADD_RES_AVX2_16_10 r0, r2, r3, r1
%rep 3
lea r0, [r0+r2*4]
add r1, 128
ADD_RES_AVX2_16_10 r0, r2, r3, r1
%endrep
RET
cglobal hevc_add_residual_32_10, 3, 4, 6
pxor m4, m4
mova m5, [max_pixels_10]
ADD_RES_AVX2_32_10 r0, r2, r1
%rep 15
lea r0, [r0+r2*2]
add r1, 128
ADD_RES_AVX2_32_10 r0, r2, r1
%endrep
RET
%endif ;HAVE_AVX2_EXTERNAL
|