1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
|
;*****************************************************************************
;* MMX/SSE2/AVX-optimized 10-bit H.264 chroma MC code
;*****************************************************************************
;* Copyright (C) 2005-2011 x264 project
;*
;* Authors: Daniel Kang <daniel.d.kang@gmail.com>
;*
;* This file is part of Libav.
;*
;* Libav is free software; you can redistribute it and/or
;* modify it under the terms of the GNU Lesser General Public
;* License as published by the Free Software Foundation; either
;* version 2.1 of the License, or (at your option) any later version.
;*
;* Libav is distributed in the hope that it will be useful,
;* but WITHOUT ANY WARRANTY; without even the implied warranty of
;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
;* Lesser General Public License for more details.
;*
;* You should have received a copy of the GNU Lesser General Public
;* License along with Libav; if not, write to the Free Software
;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
;******************************************************************************
%include "x86inc.asm"
%include "x86util.asm"
SECTION_RODATA
cextern pw_4
cextern pw_8
cextern pw_32
cextern pw_64
SECTION .text
%macro MV0_PIXELS_MC8 0
lea r4, [r2*3 ]
lea r5, [r2*4 ]
.next4rows
movu m0, [r1 ]
movu m1, [r1+r2 ]
CHROMAMC_AVG m0, [r0 ]
CHROMAMC_AVG m1, [r0+r2 ]
mova [r0 ], m0
mova [r0+r2 ], m1
movu m0, [r1+r2*2]
movu m1, [r1+r4 ]
CHROMAMC_AVG m0, [r0+r2*2]
CHROMAMC_AVG m1, [r0+r4 ]
mova [r0+r2*2], m0
mova [r0+r4 ], m1
add r1, r5
add r0, r5
sub r3d, 4
jne .next4rows
%endmacro
;-----------------------------------------------------------------------------
; void put/avg_h264_chroma_mc8(pixel *dst, pixel *src, int stride, int h, int mx, int my)
;-----------------------------------------------------------------------------
%macro CHROMA_MC8 2
; put/avg_h264_chroma_mc8_*(uint8_t *dst /*align 8*/, uint8_t *src /*align 1*/,
; int stride, int h, int mx, int my)
cglobal %1_h264_chroma_mc8_10_%2, 6,7,8
movsxdifnidn r2, r2d
mov r6d, r5d
or r6d, r4d
jne .at_least_one_non_zero
; mx == 0 AND my == 0 - no filter needed
MV0_PIXELS_MC8
REP_RET
.at_least_one_non_zero
mov r6d, 2
test r5d, r5d
je .x_interpolation
mov r6, r2 ; dxy = x ? 1 : stride
test r4d, r4d
jne .xy_interpolation
.x_interpolation
; mx == 0 XOR my == 0 - 1 dimensional filter only
or r4d, r5d ; x + y
movd m5, r4d
mova m4, [pw_8]
mova m6, [pw_4] ; mm6 = rnd >> 3
SPLATW m5, m5 ; mm5 = B = x
psubw m4, m5 ; mm4 = A = 8-x
.next1drow
movu m0, [r1 ] ; mm0 = src[0..7]
movu m2, [r1+r6] ; mm2 = src[1..8]
pmullw m0, m4 ; mm0 = A * src[0..7]
pmullw m2, m5 ; mm2 = B * src[1..8]
paddw m0, m6
paddw m0, m2
psrlw m0, 3
CHROMAMC_AVG m0, [r0]
mova [r0], m0 ; dst[0..7] = (A * src[0..7] + B * src[1..8] + (rnd >> 3)) >> 3
add r0, r2
add r1, r2
dec r3d
jne .next1drow
REP_RET
.xy_interpolation ; general case, bilinear
movd m4, r4m ; x
movd m6, r5m ; y
SPLATW m4, m4 ; mm4 = x words
SPLATW m6, m6 ; mm6 = y words
psllw m5, m4, 3 ; mm5 = 8x
pmullw m4, m6 ; mm4 = x * y
psllw m6, 3 ; mm6 = 8y
paddw m1, m5, m6 ; mm7 = 8x+8y
mova m7, m4 ; DD = x * y
psubw m5, m4 ; mm5 = B = 8x - xy
psubw m6, m4 ; mm6 = C = 8y - xy
paddw m4, [pw_64]
psubw m4, m1 ; mm4 = A = xy - (8x+8y) + 64
movu m0, [r1 ] ; mm0 = src[0..7]
movu m1, [r1+2] ; mm1 = src[1..8]
.next2drow
add r1, r2
pmullw m2, m0, m4
pmullw m1, m5
paddw m2, m1 ; mm2 = A * src[0..7] + B * src[1..8]
movu m0, [r1]
movu m1, [r1+2]
pmullw m3, m0, m6
paddw m2, m3 ; mm2 += C * src[0..7+strde]
pmullw m3, m1, m7
paddw m2, m3 ; mm2 += D * src[1..8+strde]
paddw m2, [pw_32]
psrlw m2, 6
CHROMAMC_AVG m2, [r0]
mova [r0], m2 ; dst[0..7] = (mm2 + 32) >> 6
add r0, r2
dec r3d
jne .next2drow
REP_RET
%endmacro
;-----------------------------------------------------------------------------
; void put/avg_h264_chroma_mc4(pixel *dst, pixel *src, int stride, int h, int mx, int my)
;-----------------------------------------------------------------------------
;TODO: xmm mc4
%macro MC4_OP 2
movq %1, [r1 ]
movq m1, [r1+2]
add r1, r2
pmullw %1, m4
pmullw m1, m2
paddw m1, %1
mova %1, m1
pmullw %2, m5
pmullw m1, m3
paddw %2, [pw_32]
paddw m1, %2
psrlw m1, 6
CHROMAMC_AVG m1, %2, [r0]
movq [r0], m1
add r0, r2
%endmacro
%macro CHROMA_MC4 2
cglobal %1_h264_chroma_mc4_10_%2, 6,6,7
movsxdifnidn r2, r2d
movd m2, r4m ; x
movd m3, r5m ; y
mova m4, [pw_8]
mova m5, m4
SPLATW m2, m2
SPLATW m3, m3
psubw m4, m2
psubw m5, m3
movq m0, [r1 ]
movq m6, [r1+2]
add r1, r2
pmullw m0, m4
pmullw m6, m2
paddw m6, m0
.next2rows
MC4_OP m0, m6
MC4_OP m6, m0
sub r3d, 2
jnz .next2rows
REP_RET
%endmacro
;-----------------------------------------------------------------------------
; void put/avg_h264_chroma_mc2(pixel *dst, pixel *src, int stride, int h, int mx, int my)
;-----------------------------------------------------------------------------
%macro CHROMA_MC2 2
cglobal %1_h264_chroma_mc2_10_%2, 6,7
movsxdifnidn r2, r2d
mov r6d, r4d
shl r4d, 16
sub r4d, r6d
add r4d, 8
imul r5d, r4d ; x*y<<16 | y*(8-x)
shl r4d, 3
sub r4d, r5d ; x*(8-y)<<16 | (8-x)*(8-y)
movd m5, r4d
movd m6, r5d
punpckldq m5, m5 ; mm5 = {A,B,A,B}
punpckldq m6, m6 ; mm6 = {C,D,C,D}
pxor m7, m7
pshufw m2, [r1], 0x94 ; mm0 = src[0,1,1,2]
.nextrow
add r1, r2
movq m1, m2
pmaddwd m1, m5 ; mm1 = A * src[0,1] + B * src[1,2]
pshufw m0, [r1], 0x94 ; mm0 = src[0,1,1,2]
movq m2, m0
pmaddwd m0, m6
paddw m1, [pw_32]
paddw m1, m0 ; mm1 += C * src[0,1] + D * src[1,2]
psrlw m1, 6
packssdw m1, m7
CHROMAMC_AVG m1, m3, [r0]
movd [r0], m1
add r0, r2
dec r3d
jnz .nextrow
REP_RET
%endmacro
%macro NOTHING 2-3
%endmacro
%macro AVG 2-3
%if %0==3
movq %2, %3
%endif
PAVG %1, %2
%endmacro
%define CHROMAMC_AVG NOTHING
INIT_XMM
CHROMA_MC8 put, sse2
%ifdef HAVE_AVX
INIT_AVX
CHROMA_MC8 put, avx
%endif
INIT_MMX
CHROMA_MC4 put, mmxext
CHROMA_MC2 put, mmxext
%define CHROMAMC_AVG AVG
%define PAVG pavgw
INIT_XMM
CHROMA_MC8 avg, sse2
%ifdef HAVE_AVX
INIT_AVX
CHROMA_MC8 avg, avx
%endif
INIT_MMX
CHROMA_MC4 avg, mmxext
CHROMA_MC2 avg, mmxext
|