aboutsummaryrefslogtreecommitdiffstats
path: root/contrib/libs/llvm12/lib/Target/AArch64/AArch64CompressJumpTables.cpp
blob: 2328a8b4deb8248201b16f36298e64d61d2ec927 (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
//==-- AArch64CompressJumpTables.cpp - Compress jump tables for AArch64 --====//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
// This pass looks at the basic blocks each jump-table refers to and works out
// whether they can be emitted in a compressed form (with 8 or 16-bit
// entries). If so, it changes the opcode and flags them in the associated
// AArch64FunctionInfo.
//
//===----------------------------------------------------------------------===//

#include "AArch64.h"
#include "AArch64MachineFunctionInfo.h"
#include "AArch64Subtarget.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/CodeGen/MachineJumpTableInfo.h"
#include "llvm/CodeGen/TargetInstrInfo.h"
#include "llvm/CodeGen/TargetSubtargetInfo.h"
#include "llvm/MC/MCContext.h"
#include "llvm/Support/Alignment.h"
#include "llvm/Support/Debug.h"

using namespace llvm;

#define DEBUG_TYPE "aarch64-jump-tables"

STATISTIC(NumJT8, "Number of jump-tables with 1-byte entries");
STATISTIC(NumJT16, "Number of jump-tables with 2-byte entries");
STATISTIC(NumJT32, "Number of jump-tables with 4-byte entries");

namespace {
class AArch64CompressJumpTables : public MachineFunctionPass {
  const TargetInstrInfo *TII;
  MachineFunction *MF;
  SmallVector<int, 8> BlockInfo;

  /// Returns the size in instructions of the block \p MBB, or None if we
  /// couldn't get a safe upper bound.
  Optional<int> computeBlockSize(MachineBasicBlock &MBB);

  /// Gather information about the function, returns false if we can't perform
  /// this optimization for some reason.
  bool scanFunction();

  bool compressJumpTable(MachineInstr &MI, int Offset);

public:
  static char ID;
  AArch64CompressJumpTables() : MachineFunctionPass(ID) {
    initializeAArch64CompressJumpTablesPass(*PassRegistry::getPassRegistry());
  }

  bool runOnMachineFunction(MachineFunction &MF) override;

  MachineFunctionProperties getRequiredProperties() const override {
    return MachineFunctionProperties().set(
        MachineFunctionProperties::Property::NoVRegs);
  }
  StringRef getPassName() const override {
    return "AArch64 Compress Jump Tables";
  }
};
char AArch64CompressJumpTables::ID = 0;
} // namespace

INITIALIZE_PASS(AArch64CompressJumpTables, DEBUG_TYPE,
                "AArch64 compress jump tables pass", false, false)

Optional<int>
AArch64CompressJumpTables::computeBlockSize(MachineBasicBlock &MBB) {
  int Size = 0;
  for (const MachineInstr &MI : MBB) {
    // Inline asm may contain some directives like .bytes which we don't
    // currently have the ability to parse accurately. To be safe, just avoid
    // computing a size and bail out.
    if (MI.getOpcode() == AArch64::INLINEASM ||
        MI.getOpcode() == AArch64::INLINEASM_BR)
      return None;
    Size += TII->getInstSizeInBytes(MI);
  }
  return Size;
}

bool AArch64CompressJumpTables::scanFunction() {
  BlockInfo.clear();
  BlockInfo.resize(MF->getNumBlockIDs());

  unsigned Offset = 0;
  for (MachineBasicBlock &MBB : *MF) {
    const Align Alignment = MBB.getAlignment();
    unsigned AlignedOffset;
    if (Alignment == Align(1))
      AlignedOffset = Offset;
    else
      AlignedOffset = alignTo(Offset, Alignment);
    BlockInfo[MBB.getNumber()] = AlignedOffset;
    auto BlockSize = computeBlockSize(MBB);
    if (!BlockSize)
      return false;
    Offset = AlignedOffset + *BlockSize;
  }
  return true;
}

bool AArch64CompressJumpTables::compressJumpTable(MachineInstr &MI,
                                                  int Offset) {
  if (MI.getOpcode() != AArch64::JumpTableDest32)
    return false;

  int JTIdx = MI.getOperand(4).getIndex();
  auto &JTInfo = *MF->getJumpTableInfo();
  const MachineJumpTableEntry &JT = JTInfo.getJumpTables()[JTIdx];

  // The jump-table might have been optimized away.
  if (JT.MBBs.empty())
    return false;

  int MaxOffset = std::numeric_limits<int>::min(),
      MinOffset = std::numeric_limits<int>::max();
  MachineBasicBlock *MinBlock = nullptr;
  for (auto *Block : JT.MBBs) {
    int BlockOffset = BlockInfo[Block->getNumber()];
    assert(BlockOffset % 4 == 0 && "misaligned basic block");

    MaxOffset = std::max(MaxOffset, BlockOffset);
    if (BlockOffset <= MinOffset) {
      MinOffset = BlockOffset;
      MinBlock = Block;
    }
  }
  assert(MinBlock && "Failed to find minimum offset block");

  // The ADR instruction needed to calculate the address of the first reachable
  // basic block can address +/-1MB.
  if (!isInt<21>(MinOffset - Offset)) {
    ++NumJT32;
    return false;
  }

  int Span = MaxOffset - MinOffset;
  auto *AFI = MF->getInfo<AArch64FunctionInfo>();
  if (isUInt<8>(Span / 4)) {
    AFI->setJumpTableEntryInfo(JTIdx, 1, MinBlock->getSymbol());
    MI.setDesc(TII->get(AArch64::JumpTableDest8));
    ++NumJT8;
    return true;
  }
  if (isUInt<16>(Span / 4)) {
    AFI->setJumpTableEntryInfo(JTIdx, 2, MinBlock->getSymbol());
    MI.setDesc(TII->get(AArch64::JumpTableDest16));
    ++NumJT16;
    return true;
  }

  ++NumJT32;
  return false;
}

bool AArch64CompressJumpTables::runOnMachineFunction(MachineFunction &MFIn) {
  bool Changed = false;
  MF = &MFIn;

  const auto &ST = MF->getSubtarget<AArch64Subtarget>();
  TII = ST.getInstrInfo();

  if (ST.force32BitJumpTables() && !MF->getFunction().hasMinSize())
    return false;

  if (!scanFunction())
    return false;

  for (MachineBasicBlock &MBB : *MF) {
    int Offset = BlockInfo[MBB.getNumber()];
    for (MachineInstr &MI : MBB) {
      Changed |= compressJumpTable(MI, Offset);
      Offset += TII->getInstSizeInBytes(MI);
    }
  }

  return Changed;
}

FunctionPass *llvm::createAArch64CompressJumpTablesPass() {
  return new AArch64CompressJumpTables();
}