aboutsummaryrefslogtreecommitdiffstats
path: root/libavutil/arm/intmath.h
blob: 155d1a877d8bf268440b4d58b25b295d8aada622 (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
/*
 * Copyright (c) 2010 Mans Rullgard <mans@mansr.com>
 *
 * This file is part of Libav.
 *
 * Libav is free software; you can redistribute it and/or
 * modify it under the terms of the GNU Lesser General Public
 * License as published by the Free Software Foundation; either
 * version 2.1 of the License, or (at your option) any later version.
 *
 * Libav is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 * Lesser General Public License for more details.
 *
 * You should have received a copy of the GNU Lesser General Public
 * License along with Libav; if not, write to the Free Software
 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
 */

#ifndef AVUTIL_ARM_INTMATH_H
#define AVUTIL_ARM_INTMATH_H

#include <stdint.h>

#include "config.h"
#include "libavutil/attributes.h"

#if HAVE_INLINE_ASM

#if HAVE_ARMV6

#define FASTDIV FASTDIV
static av_always_inline av_const int FASTDIV(int a, int b)
{
    int r, t;
    __asm__ volatile("cmp     %3, #2               \n\t"
                     "ldr     %1, [%4, %3, lsl #2] \n\t"
                     "lsrle   %0, %2, #1           \n\t"
                     "smmulgt %0, %1, %2           \n\t"
                     : "=&r"(r), "=&r"(t) : "r"(a), "r"(b), "r"(ff_inverse));
    return r;
}

#define av_clip_uint8 av_clip_uint8_arm
static av_always_inline av_const uint8_t av_clip_uint8_arm(int a)
{
    unsigned x;
    __asm__ volatile ("usat %0, #8,  %1" : "=r"(x) : "r"(a));
    return x;
}

#define av_clip_int8 av_clip_int8_arm
static av_always_inline av_const uint8_t av_clip_int8_arm(int a)
{
    unsigned x;
    __asm__ volatile ("ssat %0, #8,  %1" : "=r"(x) : "r"(a));
    return x;
}

#define av_clip_uint16 av_clip_uint16_arm
static av_always_inline av_const uint16_t av_clip_uint16_arm(int a)
{
    unsigned x;
    __asm__ volatile ("usat %0, #16, %1" : "=r"(x) : "r"(a));
    return x;
}

#define av_clip_int16 av_clip_int16_arm
static av_always_inline av_const int16_t av_clip_int16_arm(int a)
{
    int x;
    __asm__ volatile ("ssat %0, #16, %1" : "=r"(x) : "r"(a));
    return x;
}

#else /* HAVE_ARMV6 */

#define FASTDIV FASTDIV
static av_always_inline av_const int FASTDIV(int a, int b)
{
    int r, t;
    __asm__ volatile("umull %1, %0, %2, %3"
                     : "=&r"(r), "=&r"(t) : "r"(a), "r"(ff_inverse[b]));
    return r;
}

#endif /* HAVE_ARMV6 */

#define av_clipl_int32 av_clipl_int32_arm
static av_always_inline av_const int32_t av_clipl_int32_arm(int64_t a)
{
    int x, y;
    __asm__ volatile ("adds   %1, %R2, %Q2, lsr #31  \n\t"
                      "mvnne  %1, #1<<31             \n\t"
                      "moveq  %0, %Q2                \n\t"
                      "eorne  %0, %1,  %R2, asr #31  \n\t"
                      : "=r"(x), "=&r"(y) : "r"(a));
    return x;
}

#endif /* HAVE_INLINE_ASM */

#endif /* AVUTIL_ARM_INTMATH_H */