aboutsummaryrefslogtreecommitdiffstats
path: root/libavcodec/alpha/dsputil_alpha_asm.S
blob: 57b21fca128a39a9bb9ad3ce005803e00b56f25f (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
/*
 * Alpha optimized DSP utils
 * Copyright (c) 2002 Falk Hueffner <falk@debian.org>
 *
 * This file is part of FFmpeg.
 *
 * FFmpeg is free software; you can redistribute it and/or
 * modify it under the terms of the GNU Lesser General Public
 * License as published by the Free Software Foundation; either
 * version 2.1 of the License, or (at your option) any later version.
 *
 * FFmpeg is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 * Lesser General Public License for more details.
 *
 * You should have received a copy of the GNU Lesser General Public
 * License along with FFmpeg; if not, write to the Free Software
 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
 */

/*
 * These functions are scheduled for pca56. They should work
 * reasonably on ev6, though.
 */

#include "regdef.h"

/* Some nicer register names.  */
#define ta t10
#define tb t11
#define tc t12
#define td AT
/* Danger: these overlap with the argument list and the return value */
#define te a5
#define tf a4
#define tg a3
#define th v0

        .set noat
        .set noreorder
        .arch pca56
        .text

/************************************************************************
 * void put_pixels_axp_asm(uint8_t *block, const uint8_t *pixels,
 *                         int line_size, int h)
 */
        .align 6
        .globl put_pixels_axp_asm
        .ent put_pixels_axp_asm
put_pixels_axp_asm:
        .frame sp, 0, ra
        .prologue 0

#if CONFIG_GPROF
        lda     AT, _mcount
        jsr     AT, (AT), _mcount
#endif

        and     a1, 7, t0
        beq     t0, $aligned

        .align 4
$unaligned:
        ldq_u   t0, 0(a1)
        ldq_u   t1, 8(a1)
        addq    a1, a2, a1
        nop

        ldq_u   t2, 0(a1)
        ldq_u   t3, 8(a1)
        addq    a1, a2, a1
        nop

        ldq_u   t4, 0(a1)
        ldq_u   t5, 8(a1)
        addq    a1, a2, a1
        nop

        ldq_u   t6, 0(a1)
        ldq_u   t7, 8(a1)
        extql   t0, a1, t0
        addq    a1, a2, a1

        extqh   t1, a1, t1
        addq    a0, a2, t8
        extql   t2, a1, t2
        addq    t8, a2, t9

        extqh   t3, a1, t3
        addq    t9, a2, ta
        extql   t4, a1, t4
        or      t0, t1, t0

        extqh   t5, a1, t5
        or      t2, t3, t2
        extql   t6, a1, t6
        or      t4, t5, t4

        extqh   t7, a1, t7
        or      t6, t7, t6
        stq     t0, 0(a0)
        stq     t2, 0(t8)

        stq     t4, 0(t9)
        subq    a3, 4, a3
        stq     t6, 0(ta)
        addq    ta, a2, a0

        bne     a3, $unaligned
        ret

        .align 4
$aligned:
        ldq     t0, 0(a1)
        addq    a1, a2, a1
        ldq     t1, 0(a1)
        addq    a1, a2, a1

        ldq     t2, 0(a1)
        addq    a1, a2, a1
        ldq     t3, 0(a1)

        addq    a0, a2, t4
        addq    a1, a2, a1
        addq    t4, a2, t5
        subq    a3, 4, a3

        stq     t0, 0(a0)
        addq    t5, a2, t6
        stq     t1, 0(t4)
        addq    t6, a2, a0

        stq     t2, 0(t5)
        stq     t3, 0(t6)

        bne     a3, $aligned
        ret
        .end put_pixels_axp_asm

/************************************************************************
 * void put_pixels_clamped_mvi_asm(const DCTELEM *block, uint8_t *pixels,
 *                                 int line_size)
 */
        .align 6
        .globl put_pixels_clamped_mvi_asm
        .ent put_pixels_clamped_mvi_asm
put_pixels_clamped_mvi_asm:
        .frame sp, 0, ra
        .prologue 0

#if CONFIG_GPROF
        lda     AT, _mcount
        jsr     AT, (AT), _mcount
#endif

        lda     t8, -1
        lda     t9, 8           # loop counter
        zap     t8, 0xaa, t8    # 00ff00ff00ff00ff

        .align 4
1:      ldq     t0,  0(a0)
        ldq     t1,  8(a0)
        ldq     t2, 16(a0)
        ldq     t3, 24(a0)

        maxsw4  t0, zero, t0
        subq    t9, 2, t9
        maxsw4  t1, zero, t1
        lda     a0, 32(a0)

        maxsw4  t2, zero, t2
        addq    a1, a2, ta
        maxsw4  t3, zero, t3
        minsw4  t0, t8, t0

        minsw4  t1, t8, t1
        minsw4  t2, t8, t2
        minsw4  t3, t8, t3
        pkwb    t0, t0

        pkwb    t1, t1
        pkwb    t2, t2
        pkwb    t3, t3
        stl     t0, 0(a1)

        stl     t1, 4(a1)
        addq    ta, a2, a1
        stl     t2, 0(ta)
        stl     t3, 4(ta)

        bne     t9, 1b
        ret
        .end put_pixels_clamped_mvi_asm

/************************************************************************
 * void add_pixels_clamped_mvi_asm(const DCTELEM *block, uint8_t *pixels,
 *                                 int line_size)
 */
        .align 6
        .globl add_pixels_clamped_mvi_asm
        .ent add_pixels_clamped_mvi_asm
add_pixels_clamped_mvi_asm:
        .frame sp, 0, ra
        .prologue 0

#if CONFIG_GPROF
        lda     AT, _mcount
        jsr     AT, (AT), _mcount
#endif

        lda     t1, -1
        lda     th, 8
        zap     t1, 0x33, tg
        nop

        srl     tg, 1, t0
        xor     tg, t0, tg      # 0x8000800080008000
        zap     t1, 0xaa, tf    # 0x00ff00ff00ff00ff

        .align 4
1:      ldl     t1, 0(a1)       # pix0 (try to hit cache line soon)
        ldl     t4, 4(a1)       # pix1
        addq    a1, a2, te      # pixels += line_size
        ldq     t0, 0(a0)       # shorts0

        ldl     t7, 0(te)       # pix2 (try to hit cache line soon)
        ldl     ta, 4(te)       # pix3
        ldq     t3, 8(a0)       # shorts1
        ldq     t6, 16(a0)      # shorts2

        ldq     t9, 24(a0)      # shorts3
        unpkbw  t1, t1          # 0 0 (quarter/op no.)
        and     t0, tg, t2      # 0 1
        unpkbw  t4, t4          # 1 0

        bic     t0, tg, t0      # 0 2
        unpkbw  t7, t7          # 2 0
        and     t3, tg, t5      # 1 1
        addq    t0, t1, t0      # 0 3

        xor     t0, t2, t0      # 0 4
        unpkbw  ta, ta          # 3 0
        and     t6, tg, t8      # 2 1
        maxsw4  t0, zero, t0    # 0 5

        bic     t3, tg, t3      # 1 2
        bic     t6, tg, t6      # 2 2
        minsw4  t0, tf, t0      # 0 6
        addq    t3, t4, t3      # 1 3

        pkwb    t0, t0          # 0 7
        xor     t3, t5, t3      # 1 4
        maxsw4  t3, zero, t3    # 1 5
        addq    t6, t7, t6      # 2 3

        xor     t6, t8, t6      # 2 4
        and     t9, tg, tb      # 3 1
        minsw4  t3, tf, t3      # 1 6
        bic     t9, tg, t9      # 3 2

        maxsw4  t6, zero, t6    # 2 5
        addq    t9, ta, t9      # 3 3
        stl     t0, 0(a1)       # 0 8
        minsw4  t6, tf, t6      # 2 6

        xor     t9, tb, t9      # 3 4
        maxsw4  t9, zero, t9    # 3 5
        lda     a0, 32(a0)      # block += 16;
        pkwb    t3, t3          # 1 7

        minsw4  t9, tf, t9      # 3 6
        subq    th, 2, th
        pkwb    t6, t6          # 2 7
        pkwb    t9, t9          # 3 7

        stl     t3, 4(a1)       # 1 8
        addq    te, a2, a1      # pixels += line_size
        stl     t6, 0(te)       # 2 8
        stl     t9, 4(te)       # 3 8

        bne     th, 1b
        ret
        .end add_pixels_clamped_mvi_asm