aboutsummaryrefslogtreecommitdiffstats
path: root/libavfilter/x86/vf_ssim.asm
blob: 3293e667012d9ee163fd311741e693dc81361429 (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
;*****************************************************************************
;* x86-optimized functions for ssim filter
;*
;* Copyright (C) 2015 Ronald S. Bultje <rsbultje@gmail.com>
;*
;* This file is part of FFmpeg.
;*
;* FFmpeg is free software; you can redistribute it and/or
;* modify it under the terms of the GNU Lesser General Public
;* License as published by the Free Software Foundation; either
;* version 2.1 of the License, or (at your option) any later version.
;*
;* FFmpeg is distributed in the hope that it will be useful,
;* but WITHOUT ANY WARRANTY; without even the implied warranty of
;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
;* Lesser General Public License for more details.
;*
;* You should have received a copy of the GNU Lesser General Public
;* License along with FFmpeg; if not, write to the Free Software
;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
;******************************************************************************

%include "libavutil/x86/x86util.asm"

SECTION_RODATA

pw_1: times 8 dw 1
ssim_c1: times 4 dd 416 ;(.01*.01*255*255*64 + .5)
ssim_c2: times 4 dd 235963 ;(.03*.03*255*255*64*63 + .5)

SECTION .text

%macro SSIM_4X4_LINE 1
%if ARCH_X86_64
cglobal ssim_4x4_line, 6, 8, %1, buf, buf_stride, ref, ref_stride, sums, w, buf_stride3, ref_stride3
%else
cglobal ssim_4x4_line, 5, 7, %1, buf, buf_stride, ref, ref_stride, sums, buf_stride3, ref_stride3
%define wd r5mp
%endif
    lea     ref_stride3q, [ref_strideq*3]
    lea     buf_stride3q, [buf_strideq*3]
%if notcpuflag(xop)
    pxor              m7, m7
    mova             m15, [pw_1]
%endif

.loop:
%if cpuflag(xop)
    pmovzxbw          m0, [bufq+buf_strideq*0]
    pmovzxbw          m1, [refq+ref_strideq*0]
    pmaddwd           m4, m0, m0
    pmaddwd           m6, m0, m1
    pmovzxbw          m2, [bufq+buf_strideq*1]
    vpmadcswd         m4, m1, m1, m4
    pmovzxbw          m3, [refq+ref_strideq*1]
    paddw             m0, m2
    vpmadcswd         m4, m2, m2, m4
    vpmadcswd         m6, m2, m3, m6
    paddw             m1, m3
    vpmadcswd         m4, m3, m3, m4

    pmovzxbw          m2, [bufq+buf_strideq*2]
    pmovzxbw          m3, [refq+ref_strideq*2]
    vpmadcswd         m4, m2, m2, m4
    vpmadcswd         m6, m2, m3, m6
    pmovzxbw          m5, [bufq+buf_stride3q]
    pmovzxbw          m7, [refq+ref_stride3q]
    vpmadcswd         m4, m3, m3, m4
    vpmadcswd         m6, m5, m7, m6
    paddw             m0, m2
    paddw             m1, m3
    vpmadcswd         m4, m5, m5, m4
    paddw             m0, m5
    paddw             m1, m7
    vpmadcswd         m4, m7, m7, m4
%else
    movh              m0, [bufq+buf_strideq*0]  ; a1
    movh              m1, [refq+ref_strideq*0]  ; b1
    movh              m2, [bufq+buf_strideq*1]  ; a2
    movh              m3, [refq+ref_strideq*1]  ; b2
    punpcklbw         m0, m7                    ; s1 [word]
    punpcklbw         m1, m7                    ; s2 [word]
    punpcklbw         m2, m7                    ; s1 [word]
    punpcklbw         m3, m7                    ; s2 [word]
    pmaddwd           m4, m0, m0                ; a1 * a1
    pmaddwd           m5, m1, m1                ; b1 * b1
    pmaddwd           m8, m2, m2                ; a2 * a2
    pmaddwd           m9, m3, m3                ; b2 * b2
    paddd             m4, m5                    ; ss
    paddd             m8, m9                    ; ss
    pmaddwd           m6, m0, m1                ; a1 * b1 = ss12
    pmaddwd           m5, m2, m3                ; a2 * b2 = ss12
    paddw             m0, m2
    paddw             m1, m3
    paddd             m6, m5                    ; s12
    paddd             m4, m8                    ; ss

    movh              m2, [bufq+buf_strideq*2]  ; a3
    movh              m3, [refq+ref_strideq*2]  ; b3
    movh              m5, [bufq+buf_stride3q]   ; a4
    movh              m8, [refq+ref_stride3q]   ; b4
    punpcklbw         m2, m7                    ; s1 [word]
    punpcklbw         m3, m7                    ; s2 [word]
    punpcklbw         m5, m7                    ; s1 [word]
    punpcklbw         m8, m7                    ; s2 [word]
    pmaddwd           m9, m2, m2                ; a3 * a3
    pmaddwd          m10, m3, m3                ; b3 * b3
    pmaddwd          m12, m5, m5                ; a4 * a4
    pmaddwd          m13, m8, m8                ; b4 * b4
    pmaddwd          m11, m2, m3                ; a3 * b3 = ss12
    pmaddwd          m14, m5, m8                ; a4 * b4 = ss12
    paddd             m9, m10
    paddd            m12, m13
    paddw             m0, m2
    paddw             m1, m3
    paddw             m0, m5
    paddw             m1, m8
    paddd             m6, m11
    paddd             m4, m9
    paddd             m6, m14
    paddd             m4, m12
%endif

    ; m0 = [word] s1 a,a,a,a,b,b,b,b
    ; m1 = [word] s2 a,a,a,a,b,b,b,b
    ; m4 = [dword] ss a,a,b,b
    ; m6 = [dword] s12 a,a,b,b

%if cpuflag(xop)
    vphaddwq          m0, m0                    ; [dword] s1  a, 0, b, 0
    vphaddwq          m1, m1                    ; [dword] s2  a, 0, b, 0
    vphadddq          m4, m4                    ; [dword] ss  a, 0, b, 0
    vphadddq          m6, m6                    ; [dword] s12 a, 0, b, 0
    punpckhdq     m2, m0, m1                    ; [dword] s1  b, s2 b, 0, 0
    punpckldq         m0, m1                    ; [dword] s1  a, s2 a, 0, 0
    punpckhdq     m3, m4, m6                    ; [dword] ss  b, s12 b, 0, 0
    punpckldq         m4, m6                    ; [dword] ss  a, s12 a, 0, 0
    punpcklqdq    m1, m2, m3                    ; [dword] b s1, s2, ss, s12
    punpcklqdq        m0, m4                    ; [dword] a s1, s2, ss, s12
%else
    pmaddwd           m0, m15                   ; [dword] s1 a,a,b,b
    pmaddwd           m1, m15                   ; [dword] s2 a,a,b,b
    phaddd            m0, m4                    ; [dword] s1 a, b, ss a, b
    phaddd            m1, m6                    ; [dword] s2 a, b, s12 a, b
    punpckhdq     m2, m0, m1                    ; [dword] ss a, s12 a, ss b, s12 b
    punpckldq         m0, m1                    ; [dword] s1 a, s2 a, s1 b, s2 b
    punpckhqdq    m1, m0, m2                    ; [dword] b s1, s2, ss, s12
    punpcklqdq        m0, m2                    ; [dword] a s1, s2, ss, s12
%endif

    mova  [sumsq+     0], m0
    mova  [sumsq+mmsize], m1

    add             bufq, mmsize/2
    add             refq, mmsize/2
    add            sumsq, mmsize*2
    sub               wd, mmsize/8
    jg .loop
    RET
%endmacro

%if ARCH_X86_64
INIT_XMM ssse3
SSIM_4X4_LINE 16
%endif
%if HAVE_XOP_EXTERNAL
INIT_XMM xop
SSIM_4X4_LINE 8
%endif

INIT_XMM sse4
cglobal ssim_end_line, 3, 3, 6, sum0, sum1, w
    pxor              m0, m0
.loop:
    mova              m1, [sum0q+mmsize*0]
    mova              m2, [sum0q+mmsize*1]
    mova              m3, [sum0q+mmsize*2]
    mova              m4, [sum0q+mmsize*3]
    paddd             m1, [sum1q+mmsize*0]
    paddd             m2, [sum1q+mmsize*1]
    paddd             m3, [sum1q+mmsize*2]
    paddd             m4, [sum1q+mmsize*3]
    paddd             m1, m2
    paddd             m2, m3
    paddd             m3, m4
    paddd             m4, [sum0q+mmsize*4]
    paddd             m4, [sum1q+mmsize*4]
    TRANSPOSE4x4D      1, 2, 3, 4, 5

    ; m1 = fs1, m2 = fs2, m3 = fss, m4 = fs12
    pslld             m3, 6
    pslld             m4, 6
    pmulld            m5, m1, m2                ; fs1 * fs2
    pmulld            m1, m1                    ; fs1 * fs1
    pmulld            m2, m2                    ; fs2 * fs2
    psubd             m3, m1
    psubd             m4, m5                    ; covariance
    psubd             m3, m2                    ; variance

    ; m1 = fs1 * fs1, m2 = fs2 * fs2, m3 = variance, m4 = covariance, m5 = fs1 * fs2
    paddd             m4, m4                    ; 2 * covariance
    paddd             m5, m5                    ; 2 * fs1 * fs2
    paddd             m1, m2                    ; fs1 * fs1 + fs2 * fs2
    paddd             m3, [ssim_c2]             ; variance + ssim_c2
    paddd             m4, [ssim_c2]             ; 2 * covariance + ssim_c2
    paddd             m5, [ssim_c1]             ; 2 * fs1 * fs2 + ssim_c1
    paddd             m1, [ssim_c1]             ; fs1 * fs1 + fs2 * fs2 + ssim_c1

    ; convert to float
    cvtdq2ps          m3, m3
    cvtdq2ps          m4, m4
    cvtdq2ps          m5, m5
    cvtdq2ps          m1, m1
    mulps             m4, m5
    mulps             m3, m1
    divps             m4, m3                    ; ssim_endl
    addps             m0, m4                    ; ssim
    add            sum0q, mmsize*4
    add            sum1q, mmsize*4
    sub               wd, 4
    jg .loop

    ; subps the ones we added too much
    test              wd, wd
    jz .end
    add               wd, 4
    test              wd, 2
    jz .skip2
    psrldq            m4, 8
.skip2:
    test              wd, 1
    jz .skip1
    psrldq            m4, 4
.skip1:
    subps             m0, m4

.end:
    movhlps           m4, m0
    addps             m0, m4
    movss             m4, m0
    shufps            m0, m0, 1
    addss             m0, m4
%if ARCH_X86_32
    movss            r0m, m0
    fld             r0mp
%endif
    RET