1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
|
/*
* Simple IDCT
*
* Copyright (c) 2001 Michael Niedermayer <michaelni@gmx.at>
* Copyright (c) 2007 Mans Rullgard <mru@inprovide.com>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#define W1 22725 /* cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5 */
#define W2 21407 /* cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5 */
#define W3 19266 /* cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5 */
#define W4 16383 /* cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5 */
#define W5 12873 /* cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5 */
#define W6 8867 /* cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5 */
#define W7 4520 /* cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5 */
#define ROW_SHIFT 11
#define COL_SHIFT 20
#define W13 (W1 | (W3 << 16))
#define W26 (W2 | (W6 << 16))
#define W42 (W4 | (W2 << 16))
#define W42n (-W4&0xffff | (-W2 << 16))
#define W46 (W4 | (W6 << 16))
#define W57 (W5 | (W7 << 16))
.text
.align
w13: .long W13
w26: .long W26
w42: .long W42
w42n: .long W42n
w46: .long W46
w57: .long W57
.macro idct_row_start shift
ldr ip, [pc, #(w42-.-8)] /* ip = W4 | (W2 << 16) */
ldr lr, [pc, #(w46-.-8)] /* lr = W4 | (W6 << 16) */
ldr v7, [pc, #(w57-.-8)] /* v7 = W5 | (W7 << 16) */
mov a2, #(1<<(\shift-1))
smlad v1, a3, ip, a2
smlsd v4, a3, ip, a2
ldr ip, [pc, #(w13-.-8)] /* ip = W1 | (W3 << 16) */
smlad v2, a3, lr, a2
smlsd v3, a3, lr, a2
smusdx fp, a4, v7 /* fp = B3 = W7*row[1] - W5*row[3] */
smuad v5, a4, ip /* v5 = B0 = W1*row[1] + W3*row[3] */
.endm
/*
Compute partial IDCT of single row.
shift = left-shift amount
a1 = source address
a3 = row[2,0] <= 2 cycles
a4 = row[3,1]
ip = w42 <= 2 cycles
Output in registers v1--v8
*/
.macro idct_row shift
ldr lr, [a1, #12] /* lr = row[7,5] */
pkhtb a3, ip, v7, asr #16 /* a4 = W7 | (W3 << 16) */
pkhbt a2, ip, v7, lsl #16 /* a2 = W1 | (W5 << 16) */
smusdx v6, a3, a4 /* v6 = -B1 = W7*row[3] - W3*row[1] */
smlad v5, lr, v7, v5 /* B0 += W5*row[5] + W7*row[7] */
smusdx v7, a4, a2 /* v7 = B2 = W5*row[1] - W1*row[3] */
ldr a4, [pc, #(w42n-.-8)] /* a4 = -W4 | (-W2 << 16) */
smlad v7, lr, a3, v7 /* B2 += W7*row[5] + W3*row[7] */
ldr a3, [a1, #4] /* a3 = row[6,4] */
smlsdx fp, lr, ip, fp /* B3 += W3*row[5] - W1*row[7] */
ldr ip, [pc, #(w46-.-8)] /* ip = W4 | (W6 << 16) */
smlad v6, lr, a2, v6 /* B1 -= W1*row[5] + W5*row[7] */
smlad v2, a3, a4, v2 /* A1 += -W4*row[4] - W2*row[6] */
smlsd v3, a3, a4, v3 /* A2 += -W4*row[4] + W2*row[6] */
smlad v1, a3, ip, v1 /* A0 += W4*row[4] + W6*row[6] */
smlsd v4, a3, ip, v4 /* A3 += W4*row[4] - W6*row[6] */
.endm
/*
Compute partial IDCT of half row.
shift = left-shift amount
a3 = row[2,0]
a4 = row[3,1]
ip = w42
Output in registers v1--v8
*/
.macro idct_row4 shift
pkhtb a3, ip, v7, asr #16 /* a4 = W7 | (W3 << 16) */
pkhbt a2, ip, v7, lsl #16 /* a2 = W1 | (W5 << 16) */
smusdx v6, a3, a4 /* v6 = -B1 = W7*row[3] - W3*row[1] */
smusdx v7, a4, a2 /* v7 = B2 = W5*row[1] - W1*row[3] */
.endm
/*
Compute final part of IDCT single row without shift.
Input in registers v1--v8
Output in registers ip, v1--v3, lr, v5--v7
*/
.macro idct_finish
add ip, v1, v5 /* a2 = A0 + B0 */
sub lr, v1, v5 /* a3 = A0 - B0 */
sub v1, v2, v6 /* a3 = A1 + B1 */
add v5, v2, v6 /* a3 = A1 - B1 */
add v2, v3, v7 /* a2 = A2 + B2 */
sub v6, v3, v7 /* a2 = A2 - B2 */
add v3, v4, fp /* a3 = A3 + B3 */
sub v7, v4, fp /* a3 = A3 - B3 */
.endm
/*
Compute final part of IDCT single row.
shift = right-shift amount
Input/output in registers v1--v8
*/
.macro idct_finish_shift shift
add a4, v1, v5 /* a4 = A0 + B0 */
sub a3, v1, v5 /* a3 = A0 - B0 */
mov v1, a4, asr #\shift
mov v5, a3, asr #\shift
sub a4, v2, v6 /* a4 = A1 + B1 */
add a3, v2, v6 /* a3 = A1 - B1 */
mov v2, a4, asr #\shift
mov v6, a3, asr #\shift
add a4, v3, v7 /* a4 = A2 + B2 */
sub a3, v3, v7 /* a3 = A2 - B2 */
mov v3, a4, asr #\shift
mov v7, a3, asr #\shift
add a4, v4, fp /* a4 = A3 + B3 */
sub a3, v4, fp /* a3 = A3 - B3 */
mov v4, a4, asr #\shift
mov fp, a3, asr #\shift
.endm
/*
Compute final part of IDCT single row, saturating results at 8 bits.
shift = right-shift amount
Input/output in registers v1--v8
*/
.macro idct_finish_shift_sat shift
add a4, v1, v5 /* a4 = A0 + B0 */
sub ip, v1, v5 /* ip = A0 - B0 */
usat v1, #8, a4, asr #\shift
usat v5, #8, ip, asr #\shift
sub a4, v2, v6 /* a4 = A1 + B1 */
add ip, v2, v6 /* ip = A1 - B1 */
usat v2, #8, a4, asr #\shift
usat v6, #8, ip, asr #\shift
add a4, v3, v7 /* a4 = A2 + B2 */
sub ip, v3, v7 /* ip = A2 - B2 */
usat v3, #8, a4, asr #\shift
usat v7, #8, ip, asr #\shift
add a4, v4, fp /* a4 = A3 + B3 */
sub ip, v4, fp /* ip = A3 - B3 */
usat v4, #8, a4, asr #\shift
usat fp, #8, ip, asr #\shift
.endm
/*
Compute IDCT of single row, storing as column.
a1 = source
a2 = dest
*/
.align
.func idct_row_armv6
idct_row_armv6:
ldr fp, [a1, #12] /* fp = row[7,5] */
ldr v7, [a1, #4] /* v7 = row[6,4] */
ldr a4, [a1, #8] /* a4 = row[3,1] */
ldr a3, [a1] /* a3 = row[2,0] */
mov ip, #(1<<(ROW_SHIFT-1))
orrs v5, fp, v7
cmpeq v5, a4
cmpeq v5, a3, lsr #16
beq 1f
cmp v5, #0
stmfd sp!, {a2, lr}
ldr v5, [pc, #(w42-.-8)] /* v5 = W4 | (W2 << 16) */
ldr v6, [pc, #(w46-.-8)] /* v6 = W4 | (W6 << 16) */
ldr v7, [pc, #(w57-.-8)] /* v7 = W5 | (W7 << 16) */
smlad v1, a3, v5, ip
smlsd v4, a3, v5, ip
ldr a2, [pc, #(w13-.-8)] /* a2 = W1 | (W3 << 16) */
smlad v2, a3, v6, ip
smlsd v3, a3, v6, ip
smusdx lr, a4, v7 /* lr = B3 = W7*row[1] - W5*row[3] */
smuad v5, a4, a2 /* v5 = B0 = W1*row[1] + W3*row[3] */
pkhtb a3, a2, v7, asr #16 /* a3 = W7 | (W3 << 16) */
pkhbt ip, a2, v7, lsl #16 /* ip = W1 | (W5 << 16) */
smusdx v6, a3, a4 /* v6 = -B1 = W7*row[3] - W3*row[1] */
smusdx a4, a4, ip /* v7 = B2 = W5*row[1] - W1*row[3] */
beq 3f
smlad v5, fp, v7, v5 /* B0 += W5*row[5] + W7*row[7] */
smlad v7, fp, a3, a4 /* B2 += W7*row[5] + W3*row[7] */
ldr a4, [pc, #(w42n-.-8)] /* a4 = -W4 | (-W2 << 16) */
ldr a3, [a1, #4] /* a3 = row[6,4] */
smlsdx lr, fp, a2, lr /* B3 += W3*row[5] - W1*row[7] */
ldr a2, [pc, #(w46-.-8)] /* a2 = W4 | (W6 << 16) */
smlad v6, fp, ip, v6 /* B1 -= W1*row[5] + W5*row[7] */
smlad v2, a3, a4, v2 /* A1 += -W4*row[4] - W2*row[6] */
smlsd v3, a3, a4, v3 /* A2 += -W4*row[4] + W2*row[6] */
smlad v1, a3, a2, v1 /* A0 += W4*row[4] + W6*row[6] */
smlsd v4, a3, a2, v4 /* A3 += W4*row[4] - W6*row[6] */
ldr a2, [sp], #4
add a4, v1, v5 /* a4 = A0 + B0 */
sub a3, v1, v5 /* a3 = A0 - B0 */
mov v1, a4, asr #ROW_SHIFT
mov v5, a3, asr #ROW_SHIFT
sub a4, v2, v6 /* a4 = A1 + B1 */
add a3, v2, v6 /* a3 = A1 - B1 */
mov v2, a4, asr #ROW_SHIFT
mov v6, a3, asr #ROW_SHIFT
add a4, v3, v7 /* a4 = A2 + B2 */
sub a3, v3, v7 /* a3 = A2 - B2 */
mov v3, a4, asr #ROW_SHIFT
mov v7, a3, asr #ROW_SHIFT
add a4, v4, lr /* a4 = A3 + B3 */
sub a3, v4, lr /* a3 = A3 - B3 */
mov v4, a4, asr #ROW_SHIFT
mov fp, a3, asr #ROW_SHIFT
strh v1, [a2]
strh v2, [a2, #(16*2)]
strh v3, [a2, #(16*4)]
strh v4, [a2, #(16*6)]
strh fp, [a2, #(16*1)]
strh v7, [a2, #(16*3)]
strh v6, [a2, #(16*5)]
strh v5, [a2, #(16*7)]
ldr pc, [sp], #4
3: ldr a2, [sp], #4
add v7, v1, v5 /* v7 = A0 + B0 */
sub a3, v1, v5 /* a3 = A0 - B0 */
mov v1, v7, asr #ROW_SHIFT
mov v5, a3, asr #ROW_SHIFT
sub v7, v2, v6 /* v7 = A1 + B1 */
add a3, v2, v6 /* a3 = A1 - B1 */
mov v2, v7, asr #ROW_SHIFT
mov v6, a3, asr #ROW_SHIFT
add v7, v3, a4 /* v7 = A2 + B2 */
sub a3, v3, a4 /* a3 = A2 - B2 */
mov v3, v7, asr #ROW_SHIFT
mov v7, a3, asr #ROW_SHIFT
add a4, v4, lr /* xx = A3 + B3 */
sub a3, v4, lr /* a3 = A3 - B3 */
mov v4, a4, asr #ROW_SHIFT
mov fp, a3, asr #ROW_SHIFT
strh v1, [a2]
strh v2, [a2, #(16*2)]
strh v3, [a2, #(16*4)]
strh v4, [a2, #(16*6)]
strh fp, [a2, #(16*1)]
strh v7, [a2, #(16*3)]
strh v6, [a2, #(16*5)]
strh v5, [a2, #(16*7)]
ldr pc, [sp], #4
1: mov a3, a3, lsl #3
strh a3, [a2]
strh a3, [a2, #(16*2)]
strh a3, [a2, #(16*4)]
strh a3, [a2, #(16*6)]
strh a3, [a2, #(16*1)]
strh a3, [a2, #(16*3)]
strh a3, [a2, #(16*5)]
strh a3, [a2, #(16*7)]
mov pc, lr
.endfunc
/*
Compute IDCT of single column, read as row.
a1 = source
a2 = dest
*/
.align
.func idct_col_armv6
idct_col_armv6:
stmfd sp!, {a2, lr}
ldr a3, [a1] /* a3 = row[2,0] */
ldr a4, [a1, #8] /* a4 = row[3,1] */
idct_row_start COL_SHIFT
idct_row COL_SHIFT
ldr a2, [sp], #4
idct_finish_shift COL_SHIFT
strh v1, [a2]
strh v2, [a2, #(16*1)]
strh v3, [a2, #(16*2)]
strh v4, [a2, #(16*3)]
strh fp, [a2, #(16*4)]
strh v7, [a2, #(16*5)]
strh v6, [a2, #(16*6)]
strh v5, [a2, #(16*7)]
ldr pc, [sp], #4
.endfunc
/*
Compute IDCT of single column, read as row, store saturated 8-bit.
a1 = source
a2 = dest
a3 = line size
*/
.align
.func idct_col_put_armv6
idct_col_put_armv6:
stmfd sp!, {a2, a3, lr}
ldr a3, [a1] /* a3 = row[2,0] */
ldr a4, [a1, #8] /* a4 = row[3,1] */
idct_row_start COL_SHIFT
idct_row COL_SHIFT
ldmfd sp!, {a2, a3}
idct_finish_shift_sat COL_SHIFT
strb v1, [a2], a3
strb v2, [a2], a3
strb v3, [a2], a3
strb v4, [a2], a3
strb fp, [a2], a3
strb v7, [a2], a3
strb v6, [a2], a3
strb v5, [a2], a3
sub a2, a2, a3, lsl #3
ldr pc, [sp], #4
.endfunc
/*
Compute IDCT of single column, read as row, add/store saturated 8-bit.
a1 = source
a2 = dest
a3 = line size
*/
.align
.func idct_col_add_armv6
idct_col_add_armv6:
stmfd sp!, {a2, a3, lr}
ldr a3, [a1] /* a3 = row[2,0] */
ldr a4, [a1, #8] /* a4 = row[3,1] */
idct_row_start COL_SHIFT
idct_row COL_SHIFT
ldmfd sp!, {a2, a3}
idct_finish
ldrb a4, [a2]
ldrb v4, [a2, a3]
ldrb fp, [a2, a3, lsl #2]
add ip, a4, ip, asr #COL_SHIFT
usat ip, #8, ip
add v1, v4, v1, asr #COL_SHIFT
strb ip, [a2], a3
ldrb ip, [a2, a3]
usat v1, #8, v1
ldrb fp, [a2, a3, lsl #2]
add v2, ip, v2, asr #COL_SHIFT
usat v2, #8, v2
strb v1, [a2], a3
ldrb a4, [a2, a3]
ldrb ip, [a2, a3, lsl #2]
strb v2, [a2], a3
ldrb v4, [a2, a3]
ldrb v1, [a2, a3, lsl #2]
add v3, a4, v3, asr #COL_SHIFT
usat v3, #8, v3
add v7, v4, v7, asr #COL_SHIFT
usat v7, #8, v7
add v6, fp, v6, asr #COL_SHIFT
usat v6, #8, v6
add v5, ip, v5, asr #COL_SHIFT
usat v5, #8, v5
add lr, v1, lr, asr #COL_SHIFT
usat lr, #8, lr
strb v3, [a2], a3
strb v7, [a2], a3
strb v6, [a2], a3
strb v5, [a2], a3
strb lr, [a2], a3
sub a2, a2, a3, lsl #3
ldr pc, [sp], #4
.endfunc
/*
Compute 8 IDCT row transforms.
func = IDCT row->col function
width = width of columns in bytes
*/
.macro idct_rows func width
bl \func
add a1, a1, #(16*2)
add a2, a2, #\width
bl \func
add a1, a1, #(16*2)
add a2, a2, #\width
bl \func
add a1, a1, #(16*2)
add a2, a2, #\width
bl \func
sub a1, a1, #(16*5)
add a2, a2, #\width
bl \func
add a1, a1, #(16*2)
add a2, a2, #\width
bl \func
add a1, a1, #(16*2)
add a2, a2, #\width
bl \func
add a1, a1, #(16*2)
add a2, a2, #\width
bl \func
sub a1, a1, #(16*7)
.endm
.align
.global ff_simple_idct_armv6
.func ff_simple_idct_armv6
/* void ff_simple_idct_armv6(DCTELEM *data); */
ff_simple_idct_armv6:
stmfd sp!, {v1, v2, v3, v4, v5, v6, v7, fp, lr}
sub sp, sp, #128
mov a2, sp
idct_rows idct_row_armv6, 2
mov a2, a1
mov a1, sp
idct_rows idct_col_armv6, 2
add sp, sp, #128
ldmfd sp!, {v1, v2, v3, v4, v5, v6, v7, fp, pc}
.endfunc
.align
.global ff_simple_idct_add_armv6
.func ff_simple_idct_add_armv6
/* ff_simple_idct_add_armv6(uint8_t *dest, int line_size, DCTELEM *data); */
ff_simple_idct_add_armv6:
stmfd sp!, {a1, a2, v1, v2, v3, v4, v5, v6, v7, fp, lr}
sub sp, sp, #128
mov a1, a3
mov a2, sp
idct_rows idct_row_armv6, 2
mov a1, sp
ldr a2, [sp, #128]
ldr a3, [sp, #(128+4)]
idct_rows idct_col_add_armv6, 1
add sp, sp, #(128+8)
ldmfd sp!, {v1, v2, v3, v4, v5, v6, v7, fp, pc}
.endfunc
.align
.global ff_simple_idct_put_armv6
.func ff_simple_idct_put_armv6
/* ff_simple_idct_put_armv6(uint8_t *dest, int line_size, DCTELEM *data); */
ff_simple_idct_put_armv6:
stmfd sp!, {a1, a2, v1, v2, v3, v4, v5, v6, v7, fp, lr}
sub sp, sp, #128
mov a1, a3
mov a2, sp
idct_rows idct_row_armv6, 2
mov a1, sp
ldr a2, [sp, #128]
ldr a3, [sp, #(128+4)]
idct_rows idct_col_put_armv6, 1
add sp, sp, #(128+8)
ldmfd sp!, {v1, v2, v3, v4, v5, v6, v7, fp, pc}
.endfunc
|