1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
|
//===- LowerSwitch.cpp - Eliminate Switch instructions --------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// The LowerSwitch transformation rewrites switch instructions with a sequence
// of branches, which allows targets to get away with not implementing the
// switch instruction until it is convenient.
//
//===----------------------------------------------------------------------===//
#include "llvm/Transforms/Utils/LowerSwitch.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/Analysis/AssumptionCache.h"
#include "llvm/Analysis/LazyValueInfo.h"
#include "llvm/Analysis/ValueTracking.h"
#include "llvm/IR/BasicBlock.h"
#include "llvm/IR/CFG.h"
#include "llvm/IR/ConstantRange.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/InstrTypes.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/PassManager.h"
#include "llvm/IR/Value.h"
#include "llvm/InitializePasses.h"
#include "llvm/Pass.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/KnownBits.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Transforms/Utils.h"
#include "llvm/Transforms/Utils/BasicBlockUtils.h"
#include <algorithm>
#include <cassert>
#include <cstdint>
#include <iterator>
#include <limits>
#include <vector>
using namespace llvm;
#define DEBUG_TYPE "lower-switch"
namespace {
struct IntRange {
int64_t Low, High;
};
} // end anonymous namespace
namespace {
// Return true iff R is covered by Ranges.
bool IsInRanges(const IntRange &R, const std::vector<IntRange> &Ranges) {
// Note: Ranges must be sorted, non-overlapping and non-adjacent.
// Find the first range whose High field is >= R.High,
// then check if the Low field is <= R.Low. If so, we
// have a Range that covers R.
auto I = llvm::lower_bound(
Ranges, R, [](IntRange A, IntRange B) { return A.High < B.High; });
return I != Ranges.end() && I->Low <= R.Low;
}
struct CaseRange {
ConstantInt *Low;
ConstantInt *High;
BasicBlock *BB;
CaseRange(ConstantInt *low, ConstantInt *high, BasicBlock *bb)
: Low(low), High(high), BB(bb) {}
};
using CaseVector = std::vector<CaseRange>;
using CaseItr = std::vector<CaseRange>::iterator;
/// The comparison function for sorting the switch case values in the vector.
/// WARNING: Case ranges should be disjoint!
struct CaseCmp {
bool operator()(const CaseRange &C1, const CaseRange &C2) {
const ConstantInt *CI1 = cast<const ConstantInt>(C1.Low);
const ConstantInt *CI2 = cast<const ConstantInt>(C2.High);
return CI1->getValue().slt(CI2->getValue());
}
};
/// Used for debugging purposes.
LLVM_ATTRIBUTE_USED
raw_ostream &operator<<(raw_ostream &O, const CaseVector &C) {
O << "[";
for (CaseVector::const_iterator B = C.begin(), E = C.end(); B != E;) {
O << "[" << B->Low->getValue() << ", " << B->High->getValue() << "]";
if (++B != E)
O << ", ";
}
return O << "]";
}
/// Update the first occurrence of the "switch statement" BB in the PHI
/// node with the "new" BB. The other occurrences will:
///
/// 1) Be updated by subsequent calls to this function. Switch statements may
/// have more than one outcoming edge into the same BB if they all have the same
/// value. When the switch statement is converted these incoming edges are now
/// coming from multiple BBs.
/// 2) Removed if subsequent incoming values now share the same case, i.e.,
/// multiple outcome edges are condensed into one. This is necessary to keep the
/// number of phi values equal to the number of branches to SuccBB.
void FixPhis(
BasicBlock *SuccBB, BasicBlock *OrigBB, BasicBlock *NewBB,
const unsigned NumMergedCases = std::numeric_limits<unsigned>::max()) {
for (BasicBlock::iterator I = SuccBB->begin(),
IE = SuccBB->getFirstNonPHI()->getIterator();
I != IE; ++I) {
PHINode *PN = cast<PHINode>(I);
// Only update the first occurrence.
unsigned Idx = 0, E = PN->getNumIncomingValues();
unsigned LocalNumMergedCases = NumMergedCases;
for (; Idx != E; ++Idx) {
if (PN->getIncomingBlock(Idx) == OrigBB) {
PN->setIncomingBlock(Idx, NewBB);
break;
}
}
// Remove additional occurrences coming from condensed cases and keep the
// number of incoming values equal to the number of branches to SuccBB.
SmallVector<unsigned, 8> Indices;
for (++Idx; LocalNumMergedCases > 0 && Idx < E; ++Idx)
if (PN->getIncomingBlock(Idx) == OrigBB) {
Indices.push_back(Idx);
LocalNumMergedCases--;
}
// Remove incoming values in the reverse order to prevent invalidating
// *successive* index.
for (unsigned III : llvm::reverse(Indices))
PN->removeIncomingValue(III);
}
}
/// Create a new leaf block for the binary lookup tree. It checks if the
/// switch's value == the case's value. If not, then it jumps to the default
/// branch. At this point in the tree, the value can't be another valid case
/// value, so the jump to the "default" branch is warranted.
BasicBlock *NewLeafBlock(CaseRange &Leaf, Value *Val, ConstantInt *LowerBound,
ConstantInt *UpperBound, BasicBlock *OrigBlock,
BasicBlock *Default) {
Function *F = OrigBlock->getParent();
BasicBlock *NewLeaf = BasicBlock::Create(Val->getContext(), "LeafBlock");
F->getBasicBlockList().insert(++OrigBlock->getIterator(), NewLeaf);
// Emit comparison
ICmpInst *Comp = nullptr;
if (Leaf.Low == Leaf.High) {
// Make the seteq instruction...
Comp =
new ICmpInst(*NewLeaf, ICmpInst::ICMP_EQ, Val, Leaf.Low, "SwitchLeaf");
} else {
// Make range comparison
if (Leaf.Low == LowerBound) {
// Val >= Min && Val <= Hi --> Val <= Hi
Comp = new ICmpInst(*NewLeaf, ICmpInst::ICMP_SLE, Val, Leaf.High,
"SwitchLeaf");
} else if (Leaf.High == UpperBound) {
// Val <= Max && Val >= Lo --> Val >= Lo
Comp = new ICmpInst(*NewLeaf, ICmpInst::ICMP_SGE, Val, Leaf.Low,
"SwitchLeaf");
} else if (Leaf.Low->isZero()) {
// Val >= 0 && Val <= Hi --> Val <=u Hi
Comp = new ICmpInst(*NewLeaf, ICmpInst::ICMP_ULE, Val, Leaf.High,
"SwitchLeaf");
} else {
// Emit V-Lo <=u Hi-Lo
Constant *NegLo = ConstantExpr::getNeg(Leaf.Low);
Instruction *Add = BinaryOperator::CreateAdd(
Val, NegLo, Val->getName() + ".off", NewLeaf);
Constant *UpperBound = ConstantExpr::getAdd(NegLo, Leaf.High);
Comp = new ICmpInst(*NewLeaf, ICmpInst::ICMP_ULE, Add, UpperBound,
"SwitchLeaf");
}
}
// Make the conditional branch...
BasicBlock *Succ = Leaf.BB;
BranchInst::Create(Succ, Default, Comp, NewLeaf);
// If there were any PHI nodes in this successor, rewrite one entry
// from OrigBlock to come from NewLeaf.
for (BasicBlock::iterator I = Succ->begin(); isa<PHINode>(I); ++I) {
PHINode *PN = cast<PHINode>(I);
// Remove all but one incoming entries from the cluster
uint64_t Range = Leaf.High->getSExtValue() - Leaf.Low->getSExtValue();
for (uint64_t j = 0; j < Range; ++j) {
PN->removeIncomingValue(OrigBlock);
}
int BlockIdx = PN->getBasicBlockIndex(OrigBlock);
assert(BlockIdx != -1 && "Switch didn't go to this successor??");
PN->setIncomingBlock((unsigned)BlockIdx, NewLeaf);
}
return NewLeaf;
}
/// Convert the switch statement into a binary lookup of the case values.
/// The function recursively builds this tree. LowerBound and UpperBound are
/// used to keep track of the bounds for Val that have already been checked by
/// a block emitted by one of the previous calls to switchConvert in the call
/// stack.
BasicBlock *SwitchConvert(CaseItr Begin, CaseItr End, ConstantInt *LowerBound,
ConstantInt *UpperBound, Value *Val,
BasicBlock *Predecessor, BasicBlock *OrigBlock,
BasicBlock *Default,
const std::vector<IntRange> &UnreachableRanges) {
assert(LowerBound && UpperBound && "Bounds must be initialized");
unsigned Size = End - Begin;
if (Size == 1) {
// Check if the Case Range is perfectly squeezed in between
// already checked Upper and Lower bounds. If it is then we can avoid
// emitting the code that checks if the value actually falls in the range
// because the bounds already tell us so.
if (Begin->Low == LowerBound && Begin->High == UpperBound) {
unsigned NumMergedCases = 0;
NumMergedCases = UpperBound->getSExtValue() - LowerBound->getSExtValue();
FixPhis(Begin->BB, OrigBlock, Predecessor, NumMergedCases);
return Begin->BB;
}
return NewLeafBlock(*Begin, Val, LowerBound, UpperBound, OrigBlock,
Default);
}
unsigned Mid = Size / 2;
std::vector<CaseRange> LHS(Begin, Begin + Mid);
LLVM_DEBUG(dbgs() << "LHS: " << LHS << "\n");
std::vector<CaseRange> RHS(Begin + Mid, End);
LLVM_DEBUG(dbgs() << "RHS: " << RHS << "\n");
CaseRange &Pivot = *(Begin + Mid);
LLVM_DEBUG(dbgs() << "Pivot ==> [" << Pivot.Low->getValue() << ", "
<< Pivot.High->getValue() << "]\n");
// NewLowerBound here should never be the integer minimal value.
// This is because it is computed from a case range that is never
// the smallest, so there is always a case range that has at least
// a smaller value.
ConstantInt *NewLowerBound = Pivot.Low;
// Because NewLowerBound is never the smallest representable integer
// it is safe here to subtract one.
ConstantInt *NewUpperBound = ConstantInt::get(NewLowerBound->getContext(),
NewLowerBound->getValue() - 1);
if (!UnreachableRanges.empty()) {
// Check if the gap between LHS's highest and NewLowerBound is unreachable.
int64_t GapLow = LHS.back().High->getSExtValue() + 1;
int64_t GapHigh = NewLowerBound->getSExtValue() - 1;
IntRange Gap = { GapLow, GapHigh };
if (GapHigh >= GapLow && IsInRanges(Gap, UnreachableRanges))
NewUpperBound = LHS.back().High;
}
LLVM_DEBUG(dbgs() << "LHS Bounds ==> [" << LowerBound->getSExtValue() << ", "
<< NewUpperBound->getSExtValue() << "]\n"
<< "RHS Bounds ==> [" << NewLowerBound->getSExtValue()
<< ", " << UpperBound->getSExtValue() << "]\n");
// Create a new node that checks if the value is < pivot. Go to the
// left branch if it is and right branch if not.
Function* F = OrigBlock->getParent();
BasicBlock* NewNode = BasicBlock::Create(Val->getContext(), "NodeBlock");
ICmpInst* Comp = new ICmpInst(ICmpInst::ICMP_SLT,
Val, Pivot.Low, "Pivot");
BasicBlock *LBranch =
SwitchConvert(LHS.begin(), LHS.end(), LowerBound, NewUpperBound, Val,
NewNode, OrigBlock, Default, UnreachableRanges);
BasicBlock *RBranch =
SwitchConvert(RHS.begin(), RHS.end(), NewLowerBound, UpperBound, Val,
NewNode, OrigBlock, Default, UnreachableRanges);
F->getBasicBlockList().insert(++OrigBlock->getIterator(), NewNode);
NewNode->getInstList().push_back(Comp);
BranchInst::Create(LBranch, RBranch, Comp, NewNode);
return NewNode;
}
/// Transform simple list of \p SI's cases into list of CaseRange's \p Cases.
/// \post \p Cases wouldn't contain references to \p SI's default BB.
/// \returns Number of \p SI's cases that do not reference \p SI's default BB.
unsigned Clusterify(CaseVector &Cases, SwitchInst *SI) {
unsigned NumSimpleCases = 0;
// Start with "simple" cases
for (auto Case : SI->cases()) {
if (Case.getCaseSuccessor() == SI->getDefaultDest())
continue;
Cases.push_back(CaseRange(Case.getCaseValue(), Case.getCaseValue(),
Case.getCaseSuccessor()));
++NumSimpleCases;
}
llvm::sort(Cases, CaseCmp());
// Merge case into clusters
if (Cases.size() >= 2) {
CaseItr I = Cases.begin();
for (CaseItr J = std::next(I), E = Cases.end(); J != E; ++J) {
int64_t nextValue = J->Low->getSExtValue();
int64_t currentValue = I->High->getSExtValue();
BasicBlock* nextBB = J->BB;
BasicBlock* currentBB = I->BB;
// If the two neighboring cases go to the same destination, merge them
// into a single case.
assert(nextValue > currentValue && "Cases should be strictly ascending");
if ((nextValue == currentValue + 1) && (currentBB == nextBB)) {
I->High = J->High;
// FIXME: Combine branch weights.
} else if (++I != J) {
*I = *J;
}
}
Cases.erase(std::next(I), Cases.end());
}
return NumSimpleCases;
}
/// Replace the specified switch instruction with a sequence of chained if-then
/// insts in a balanced binary search.
void ProcessSwitchInst(SwitchInst *SI,
SmallPtrSetImpl<BasicBlock *> &DeleteList,
AssumptionCache *AC, LazyValueInfo *LVI) {
BasicBlock *OrigBlock = SI->getParent();
Function *F = OrigBlock->getParent();
Value *Val = SI->getCondition(); // The value we are switching on...
BasicBlock* Default = SI->getDefaultDest();
// Don't handle unreachable blocks. If there are successors with phis, this
// would leave them behind with missing predecessors.
if ((OrigBlock != &F->getEntryBlock() && pred_empty(OrigBlock)) ||
OrigBlock->getSinglePredecessor() == OrigBlock) {
DeleteList.insert(OrigBlock);
return;
}
// Prepare cases vector.
CaseVector Cases;
const unsigned NumSimpleCases = Clusterify(Cases, SI);
LLVM_DEBUG(dbgs() << "Clusterify finished. Total clusters: " << Cases.size()
<< ". Total non-default cases: " << NumSimpleCases
<< "\nCase clusters: " << Cases << "\n");
// If there is only the default destination, just branch.
if (Cases.empty()) {
BranchInst::Create(Default, OrigBlock);
// Remove all the references from Default's PHIs to OrigBlock, but one.
FixPhis(Default, OrigBlock, OrigBlock);
SI->eraseFromParent();
return;
}
ConstantInt *LowerBound = nullptr;
ConstantInt *UpperBound = nullptr;
bool DefaultIsUnreachableFromSwitch = false;
if (isa<UnreachableInst>(Default->getFirstNonPHIOrDbg())) {
// Make the bounds tightly fitted around the case value range, because we
// know that the value passed to the switch must be exactly one of the case
// values.
LowerBound = Cases.front().Low;
UpperBound = Cases.back().High;
DefaultIsUnreachableFromSwitch = true;
} else {
// Constraining the range of the value being switched over helps eliminating
// unreachable BBs and minimizing the number of `add` instructions
// newLeafBlock ends up emitting. Running CorrelatedValuePropagation after
// LowerSwitch isn't as good, and also much more expensive in terms of
// compile time for the following reasons:
// 1. it processes many kinds of instructions, not just switches;
// 2. even if limited to icmp instructions only, it will have to process
// roughly C icmp's per switch, where C is the number of cases in the
// switch, while LowerSwitch only needs to call LVI once per switch.
const DataLayout &DL = F->getParent()->getDataLayout();
KnownBits Known = computeKnownBits(Val, DL, /*Depth=*/0, AC, SI);
// TODO Shouldn't this create a signed range?
ConstantRange KnownBitsRange =
ConstantRange::fromKnownBits(Known, /*IsSigned=*/false);
const ConstantRange LVIRange = LVI->getConstantRange(Val, SI);
ConstantRange ValRange = KnownBitsRange.intersectWith(LVIRange);
// We delegate removal of unreachable non-default cases to other passes. In
// the unlikely event that some of them survived, we just conservatively
// maintain the invariant that all the cases lie between the bounds. This
// may, however, still render the default case effectively unreachable.
APInt Low = Cases.front().Low->getValue();
APInt High = Cases.back().High->getValue();
APInt Min = APIntOps::smin(ValRange.getSignedMin(), Low);
APInt Max = APIntOps::smax(ValRange.getSignedMax(), High);
LowerBound = ConstantInt::get(SI->getContext(), Min);
UpperBound = ConstantInt::get(SI->getContext(), Max);
DefaultIsUnreachableFromSwitch = (Min + (NumSimpleCases - 1) == Max);
}
std::vector<IntRange> UnreachableRanges;
if (DefaultIsUnreachableFromSwitch) {
DenseMap<BasicBlock *, unsigned> Popularity;
unsigned MaxPop = 0;
BasicBlock *PopSucc = nullptr;
IntRange R = {std::numeric_limits<int64_t>::min(),
std::numeric_limits<int64_t>::max()};
UnreachableRanges.push_back(R);
for (const auto &I : Cases) {
int64_t Low = I.Low->getSExtValue();
int64_t High = I.High->getSExtValue();
IntRange &LastRange = UnreachableRanges.back();
if (LastRange.Low == Low) {
// There is nothing left of the previous range.
UnreachableRanges.pop_back();
} else {
// Terminate the previous range.
assert(Low > LastRange.Low);
LastRange.High = Low - 1;
}
if (High != std::numeric_limits<int64_t>::max()) {
IntRange R = { High + 1, std::numeric_limits<int64_t>::max() };
UnreachableRanges.push_back(R);
}
// Count popularity.
int64_t N = High - Low + 1;
unsigned &Pop = Popularity[I.BB];
if ((Pop += N) > MaxPop) {
MaxPop = Pop;
PopSucc = I.BB;
}
}
#ifndef NDEBUG
/* UnreachableRanges should be sorted and the ranges non-adjacent. */
for (auto I = UnreachableRanges.begin(), E = UnreachableRanges.end();
I != E; ++I) {
assert(I->Low <= I->High);
auto Next = I + 1;
if (Next != E) {
assert(Next->Low > I->High);
}
}
#endif
// As the default block in the switch is unreachable, update the PHI nodes
// (remove all of the references to the default block) to reflect this.
const unsigned NumDefaultEdges = SI->getNumCases() + 1 - NumSimpleCases;
for (unsigned I = 0; I < NumDefaultEdges; ++I)
Default->removePredecessor(OrigBlock);
// Use the most popular block as the new default, reducing the number of
// cases.
assert(MaxPop > 0 && PopSucc);
Default = PopSucc;
llvm::erase_if(Cases,
[PopSucc](const CaseRange &R) { return R.BB == PopSucc; });
// If there are no cases left, just branch.
if (Cases.empty()) {
BranchInst::Create(Default, OrigBlock);
SI->eraseFromParent();
// As all the cases have been replaced with a single branch, only keep
// one entry in the PHI nodes.
for (unsigned I = 0 ; I < (MaxPop - 1) ; ++I)
PopSucc->removePredecessor(OrigBlock);
return;
}
// If the condition was a PHI node with the switch block as a predecessor
// removing predecessors may have caused the condition to be erased.
// Getting the condition value again here protects against that.
Val = SI->getCondition();
}
// Create a new, empty default block so that the new hierarchy of
// if-then statements go to this and the PHI nodes are happy.
BasicBlock *NewDefault = BasicBlock::Create(SI->getContext(), "NewDefault");
F->getBasicBlockList().insert(Default->getIterator(), NewDefault);
BranchInst::Create(Default, NewDefault);
BasicBlock *SwitchBlock =
SwitchConvert(Cases.begin(), Cases.end(), LowerBound, UpperBound, Val,
OrigBlock, OrigBlock, NewDefault, UnreachableRanges);
// If there are entries in any PHI nodes for the default edge, make sure
// to update them as well.
FixPhis(Default, OrigBlock, NewDefault);
// Branch to our shiny new if-then stuff...
BranchInst::Create(SwitchBlock, OrigBlock);
// We are now done with the switch instruction, delete it.
BasicBlock *OldDefault = SI->getDefaultDest();
OrigBlock->getInstList().erase(SI);
// If the Default block has no more predecessors just add it to DeleteList.
if (pred_empty(OldDefault))
DeleteList.insert(OldDefault);
}
bool LowerSwitch(Function &F, LazyValueInfo *LVI, AssumptionCache *AC) {
bool Changed = false;
SmallPtrSet<BasicBlock *, 8> DeleteList;
// We use make_early_inc_range here so that we don't traverse new blocks.
for (BasicBlock &Cur : llvm::make_early_inc_range(F)) {
// If the block is a dead Default block that will be deleted later, don't
// waste time processing it.
if (DeleteList.count(&Cur))
continue;
if (SwitchInst *SI = dyn_cast<SwitchInst>(Cur.getTerminator())) {
Changed = true;
ProcessSwitchInst(SI, DeleteList, AC, LVI);
}
}
for (BasicBlock *BB : DeleteList) {
LVI->eraseBlock(BB);
DeleteDeadBlock(BB);
}
return Changed;
}
/// Replace all SwitchInst instructions with chained branch instructions.
class LowerSwitchLegacyPass : public FunctionPass {
public:
// Pass identification, replacement for typeid
static char ID;
LowerSwitchLegacyPass() : FunctionPass(ID) {
initializeLowerSwitchLegacyPassPass(*PassRegistry::getPassRegistry());
}
bool runOnFunction(Function &F) override;
void getAnalysisUsage(AnalysisUsage &AU) const override {
AU.addRequired<LazyValueInfoWrapperPass>();
}
};
} // end anonymous namespace
char LowerSwitchLegacyPass::ID = 0;
// Publicly exposed interface to pass...
char &llvm::LowerSwitchID = LowerSwitchLegacyPass::ID;
INITIALIZE_PASS_BEGIN(LowerSwitchLegacyPass, "lowerswitch",
"Lower SwitchInst's to branches", false, false)
INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
INITIALIZE_PASS_DEPENDENCY(LazyValueInfoWrapperPass)
INITIALIZE_PASS_END(LowerSwitchLegacyPass, "lowerswitch",
"Lower SwitchInst's to branches", false, false)
// createLowerSwitchPass - Interface to this file...
FunctionPass *llvm::createLowerSwitchPass() {
return new LowerSwitchLegacyPass();
}
bool LowerSwitchLegacyPass::runOnFunction(Function &F) {
LazyValueInfo *LVI = &getAnalysis<LazyValueInfoWrapperPass>().getLVI();
auto *ACT = getAnalysisIfAvailable<AssumptionCacheTracker>();
AssumptionCache *AC = ACT ? &ACT->getAssumptionCache(F) : nullptr;
return LowerSwitch(F, LVI, AC);
}
PreservedAnalyses LowerSwitchPass::run(Function &F,
FunctionAnalysisManager &AM) {
LazyValueInfo *LVI = &AM.getResult<LazyValueAnalysis>(F);
AssumptionCache *AC = AM.getCachedResult<AssumptionAnalysis>(F);
return LowerSwitch(F, LVI, AC) ? PreservedAnalyses::none()
: PreservedAnalyses::all();
}
|