1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
|
; /*
; * SIMD optimized idct functions for HEVC decoding
; * Copyright (c) 2014 Pierre-Edouard LEPERE
; * Copyright (c) 2014 James Almer
; *
; * This file is part of FFmpeg.
; *
; * FFmpeg is free software; you can redistribute it and/or
; * modify it under the terms of the GNU Lesser General Public
; * License as published by the Free Software Foundation; either
; * version 2.1 of the License, or (at your option) any later version.
; *
; * FFmpeg is distributed in the hope that it will be useful,
; * but WITHOUT ANY WARRANTY; without even the implied warranty of
; * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
; * Lesser General Public License for more details.
; *
; * You should have received a copy of the GNU Lesser General Public
; * License along with FFmpeg; if not, write to the Free Software
; * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
; */
%include "libavutil/x86/x86util.asm"
SECTION .text
; void ff_hevc_idctHxW_dc_{8,10}_<opt>(int16_t *coeffs)
; %1 = HxW
; %2 = number of loops
; %3 = bitdepth
%macro IDCT_DC 3
cglobal hevc_idct%1x%1_dc_%3, 1, 2, 1, coeff, tmp
movsx tmpq, word [coeffq]
add tmpw, ((1 << 14-%3) + 1)
sar tmpw, (15-%3)
movd xm0, tmpd
SPLATW m0, xm0
DEFINE_ARGS coeff, cnt
mov cntd, %2
.loop:
mova [coeffq+mmsize*0], m0
mova [coeffq+mmsize*1], m0
mova [coeffq+mmsize*2], m0
mova [coeffq+mmsize*3], m0
mova [coeffq+mmsize*4], m0
mova [coeffq+mmsize*5], m0
mova [coeffq+mmsize*6], m0
mova [coeffq+mmsize*7], m0
add coeffq, mmsize*8
dec cntd
jg .loop
RET
%endmacro
; %1 = HxW
; %2 = bitdepth
%macro IDCT_DC_NL 2 ; No loop
cglobal hevc_idct%1x%1_dc_%2, 1, 2, 1, coeff, tmp
movsx tmpq, word [coeffq]
add tmpw, ((1 << 14-%2) + 1)
sar tmpw, (15-%2)
movd m0, tmpd
SPLATW m0, xm0
mova [coeffq+mmsize*0], m0
mova [coeffq+mmsize*1], m0
mova [coeffq+mmsize*2], m0
mova [coeffq+mmsize*3], m0
%if mmsize == 16
mova [coeffq+mmsize*4], m0
mova [coeffq+mmsize*5], m0
mova [coeffq+mmsize*6], m0
mova [coeffq+mmsize*7], m0
%endif
RET
%endmacro
; 8-bit
INIT_MMX mmxext
IDCT_DC_NL 4, 8
IDCT_DC 8, 2, 8
INIT_XMM sse2
IDCT_DC_NL 8, 8
IDCT_DC 16, 4, 8
IDCT_DC 32, 16, 8
%if HAVE_AVX2_EXTERNAL
INIT_YMM avx2
IDCT_DC 16, 2, 8
IDCT_DC 32, 8, 8
%endif ;HAVE_AVX2_EXTERNAL
; 10-bit
INIT_MMX mmxext
IDCT_DC_NL 4, 10
IDCT_DC 8, 2, 10
INIT_XMM sse2
IDCT_DC_NL 8, 10
IDCT_DC 16, 4, 10
IDCT_DC 32, 16, 10
%if HAVE_AVX2_EXTERNAL
INIT_YMM avx2
IDCT_DC 16, 2, 10
IDCT_DC 32, 8, 10
%endif ;HAVE_AVX2_EXTERNAL
; 12-bit
INIT_MMX mmxext
IDCT_DC_NL 4, 12
IDCT_DC 8, 2, 12
INIT_XMM sse2
IDCT_DC_NL 8, 12
IDCT_DC 16, 4, 12
IDCT_DC 32, 16, 12
%if HAVE_AVX2_EXTERNAL
INIT_YMM avx2
IDCT_DC 16, 2, 12
IDCT_DC 32, 8, 12
%endif ;HAVE_AVX2_EXTERNAL
|