1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
|
/*
* simple math operations
* Copyright (c) 2006 Michael Niedermayer <michaelni@gmx.at> et al
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef AVCODEC_X86_MATHOPS_H
#define AVCODEC_X86_MATHOPS_H
#include "config.h"
#include "libavutil/common.h"
#if ARCH_X86_32
#define MULL MULL
static av_always_inline av_const int MULL(int a, int b, unsigned shift)
{
int rt, dummy;
__asm__ (
"imull %3 \n\t"
"shrdl %4, %%edx, %%eax \n\t"
:"=a"(rt), "=d"(dummy)
:"a"(a), "rm"(b), "i"(shift)
);
return rt;
}
#define MULH MULH
static av_always_inline av_const int MULH(int a, int b)
{
int rt, dummy;
__asm__ (
"imull %3"
:"=d"(rt), "=a"(dummy)
:"a"(a), "rm"(b)
);
return rt;
}
#define MUL64 MUL64
static av_always_inline av_const int64_t MUL64(int a, int b)
{
int64_t rt;
__asm__ (
"imull %2"
:"=A"(rt)
:"a"(a), "rm"(b)
);
return rt;
}
#endif /* ARCH_X86_32 */
#if HAVE_CMOV
/* median of 3 */
#define mid_pred mid_pred
static inline av_const int mid_pred(int a, int b, int c)
{
int i=b;
__asm__ volatile(
"cmp %2, %1 \n\t"
"cmovg %1, %0 \n\t"
"cmovg %2, %1 \n\t"
"cmp %3, %1 \n\t"
"cmovl %3, %1 \n\t"
"cmp %1, %0 \n\t"
"cmovg %1, %0 \n\t"
:"+&r"(i), "+&r"(a)
:"r"(b), "r"(c)
);
return i;
}
#endif
#if HAVE_CMOV
#define COPY3_IF_LT(x, y, a, b, c, d)\
__asm__ volatile(\
"cmpl %0, %3 \n\t"\
"cmovl %3, %0 \n\t"\
"cmovl %4, %1 \n\t"\
"cmovl %5, %2 \n\t"\
: "+&r" (x), "+&r" (a), "+r" (c)\
: "r" (y), "r" (b), "r" (d)\
);
#endif
// avoid +32 for shift optimization (gcc should do that ...)
#define NEG_SSR32 NEG_SSR32
static inline int32_t NEG_SSR32( int32_t a, int8_t s){
__asm__ ("sarl %1, %0\n\t"
: "+r" (a)
: "ic" ((uint8_t)(-s))
);
return a;
}
#define NEG_USR32 NEG_USR32
static inline uint32_t NEG_USR32(uint32_t a, int8_t s){
__asm__ ("shrl %1, %0\n\t"
: "+r" (a)
: "ic" ((uint8_t)(-s))
);
return a;
}
#endif /* AVCODEC_X86_MATHOPS_H */
|