1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
|
//=== AArch64PostLegalizerCombiner.cpp --------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
///
/// \file
/// Post-legalization combines on generic MachineInstrs.
///
/// The combines here must preserve instruction legality.
///
/// Lowering combines (e.g. pseudo matching) should be handled by
/// AArch64PostLegalizerLowering.
///
/// Combines which don't rely on instruction legality should go in the
/// AArch64PreLegalizerCombiner.
///
//===----------------------------------------------------------------------===//
#include "AArch64TargetMachine.h"
#include "llvm/CodeGen/GlobalISel/Combiner.h"
#include "llvm/CodeGen/GlobalISel/CombinerHelper.h"
#include "llvm/CodeGen/GlobalISel/CombinerInfo.h"
#include "llvm/CodeGen/GlobalISel/GISelChangeObserver.h"
#include "llvm/CodeGen/GlobalISel/GISelKnownBits.h"
#include "llvm/CodeGen/GlobalISel/MIPatternMatch.h"
#include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
#include "llvm/CodeGen/GlobalISel/Utils.h"
#include "llvm/CodeGen/MachineDominators.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/TargetOpcodes.h"
#include "llvm/CodeGen/TargetPassConfig.h"
#include "llvm/Support/Debug.h"
#define DEBUG_TYPE "aarch64-postlegalizer-combiner"
using namespace llvm;
using namespace MIPatternMatch;
/// This combine tries do what performExtractVectorEltCombine does in SDAG.
/// Rewrite for pairwise fadd pattern
/// (s32 (g_extract_vector_elt
/// (g_fadd (vXs32 Other)
/// (g_vector_shuffle (vXs32 Other) undef <1,X,...> )) 0))
/// ->
/// (s32 (g_fadd (g_extract_vector_elt (vXs32 Other) 0)
/// (g_extract_vector_elt (vXs32 Other) 1))
bool matchExtractVecEltPairwiseAdd(
MachineInstr &MI, MachineRegisterInfo &MRI,
std::tuple<unsigned, LLT, Register> &MatchInfo) {
Register Src1 = MI.getOperand(1).getReg();
Register Src2 = MI.getOperand(2).getReg();
LLT DstTy = MRI.getType(MI.getOperand(0).getReg());
auto Cst = getIConstantVRegValWithLookThrough(Src2, MRI);
if (!Cst || Cst->Value != 0)
return false;
// SDAG also checks for FullFP16, but this looks to be beneficial anyway.
// Now check for an fadd operation. TODO: expand this for integer add?
auto *FAddMI = getOpcodeDef(TargetOpcode::G_FADD, Src1, MRI);
if (!FAddMI)
return false;
// If we add support for integer add, must restrict these types to just s64.
unsigned DstSize = DstTy.getSizeInBits();
if (DstSize != 16 && DstSize != 32 && DstSize != 64)
return false;
Register Src1Op1 = FAddMI->getOperand(1).getReg();
Register Src1Op2 = FAddMI->getOperand(2).getReg();
MachineInstr *Shuffle =
getOpcodeDef(TargetOpcode::G_SHUFFLE_VECTOR, Src1Op2, MRI);
MachineInstr *Other = MRI.getVRegDef(Src1Op1);
if (!Shuffle) {
Shuffle = getOpcodeDef(TargetOpcode::G_SHUFFLE_VECTOR, Src1Op1, MRI);
Other = MRI.getVRegDef(Src1Op2);
}
// We're looking for a shuffle that moves the second element to index 0.
if (Shuffle && Shuffle->getOperand(3).getShuffleMask()[0] == 1 &&
Other == MRI.getVRegDef(Shuffle->getOperand(1).getReg())) {
std::get<0>(MatchInfo) = TargetOpcode::G_FADD;
std::get<1>(MatchInfo) = DstTy;
std::get<2>(MatchInfo) = Other->getOperand(0).getReg();
return true;
}
return false;
}
bool applyExtractVecEltPairwiseAdd(
MachineInstr &MI, MachineRegisterInfo &MRI, MachineIRBuilder &B,
std::tuple<unsigned, LLT, Register> &MatchInfo) {
unsigned Opc = std::get<0>(MatchInfo);
assert(Opc == TargetOpcode::G_FADD && "Unexpected opcode!");
// We want to generate two extracts of elements 0 and 1, and add them.
LLT Ty = std::get<1>(MatchInfo);
Register Src = std::get<2>(MatchInfo);
LLT s64 = LLT::scalar(64);
B.setInstrAndDebugLoc(MI);
auto Elt0 = B.buildExtractVectorElement(Ty, Src, B.buildConstant(s64, 0));
auto Elt1 = B.buildExtractVectorElement(Ty, Src, B.buildConstant(s64, 1));
B.buildInstr(Opc, {MI.getOperand(0).getReg()}, {Elt0, Elt1});
MI.eraseFromParent();
return true;
}
static bool isSignExtended(Register R, MachineRegisterInfo &MRI) {
// TODO: check if extended build vector as well.
unsigned Opc = MRI.getVRegDef(R)->getOpcode();
return Opc == TargetOpcode::G_SEXT || Opc == TargetOpcode::G_SEXT_INREG;
}
static bool isZeroExtended(Register R, MachineRegisterInfo &MRI) {
// TODO: check if extended build vector as well.
return MRI.getVRegDef(R)->getOpcode() == TargetOpcode::G_ZEXT;
}
bool matchAArch64MulConstCombine(
MachineInstr &MI, MachineRegisterInfo &MRI,
std::function<void(MachineIRBuilder &B, Register DstReg)> &ApplyFn) {
assert(MI.getOpcode() == TargetOpcode::G_MUL);
Register LHS = MI.getOperand(1).getReg();
Register RHS = MI.getOperand(2).getReg();
Register Dst = MI.getOperand(0).getReg();
const LLT Ty = MRI.getType(LHS);
// The below optimizations require a constant RHS.
auto Const = getIConstantVRegValWithLookThrough(RHS, MRI);
if (!Const)
return false;
const APInt ConstValue = Const->Value.sextOrSelf(Ty.getSizeInBits());
// The following code is ported from AArch64ISelLowering.
// Multiplication of a power of two plus/minus one can be done more
// cheaply as as shift+add/sub. For now, this is true unilaterally. If
// future CPUs have a cheaper MADD instruction, this may need to be
// gated on a subtarget feature. For Cyclone, 32-bit MADD is 4 cycles and
// 64-bit is 5 cycles, so this is always a win.
// More aggressively, some multiplications N0 * C can be lowered to
// shift+add+shift if the constant C = A * B where A = 2^N + 1 and B = 2^M,
// e.g. 6=3*2=(2+1)*2.
// TODO: consider lowering more cases, e.g. C = 14, -6, -14 or even 45
// which equals to (1+2)*16-(1+2).
// TrailingZeroes is used to test if the mul can be lowered to
// shift+add+shift.
unsigned TrailingZeroes = ConstValue.countTrailingZeros();
if (TrailingZeroes) {
// Conservatively do not lower to shift+add+shift if the mul might be
// folded into smul or umul.
if (MRI.hasOneNonDBGUse(LHS) &&
(isSignExtended(LHS, MRI) || isZeroExtended(LHS, MRI)))
return false;
// Conservatively do not lower to shift+add+shift if the mul might be
// folded into madd or msub.
if (MRI.hasOneNonDBGUse(Dst)) {
MachineInstr &UseMI = *MRI.use_instr_begin(Dst);
unsigned UseOpc = UseMI.getOpcode();
if (UseOpc == TargetOpcode::G_ADD || UseOpc == TargetOpcode::G_PTR_ADD ||
UseOpc == TargetOpcode::G_SUB)
return false;
}
}
// Use ShiftedConstValue instead of ConstValue to support both shift+add/sub
// and shift+add+shift.
APInt ShiftedConstValue = ConstValue.ashr(TrailingZeroes);
unsigned ShiftAmt, AddSubOpc;
// Is the shifted value the LHS operand of the add/sub?
bool ShiftValUseIsLHS = true;
// Do we need to negate the result?
bool NegateResult = false;
if (ConstValue.isNonNegative()) {
// (mul x, 2^N + 1) => (add (shl x, N), x)
// (mul x, 2^N - 1) => (sub (shl x, N), x)
// (mul x, (2^N + 1) * 2^M) => (shl (add (shl x, N), x), M)
APInt SCVMinus1 = ShiftedConstValue - 1;
APInt CVPlus1 = ConstValue + 1;
if (SCVMinus1.isPowerOf2()) {
ShiftAmt = SCVMinus1.logBase2();
AddSubOpc = TargetOpcode::G_ADD;
} else if (CVPlus1.isPowerOf2()) {
ShiftAmt = CVPlus1.logBase2();
AddSubOpc = TargetOpcode::G_SUB;
} else
return false;
} else {
// (mul x, -(2^N - 1)) => (sub x, (shl x, N))
// (mul x, -(2^N + 1)) => - (add (shl x, N), x)
APInt CVNegPlus1 = -ConstValue + 1;
APInt CVNegMinus1 = -ConstValue - 1;
if (CVNegPlus1.isPowerOf2()) {
ShiftAmt = CVNegPlus1.logBase2();
AddSubOpc = TargetOpcode::G_SUB;
ShiftValUseIsLHS = false;
} else if (CVNegMinus1.isPowerOf2()) {
ShiftAmt = CVNegMinus1.logBase2();
AddSubOpc = TargetOpcode::G_ADD;
NegateResult = true;
} else
return false;
}
if (NegateResult && TrailingZeroes)
return false;
ApplyFn = [=](MachineIRBuilder &B, Register DstReg) {
auto Shift = B.buildConstant(LLT::scalar(64), ShiftAmt);
auto ShiftedVal = B.buildShl(Ty, LHS, Shift);
Register AddSubLHS = ShiftValUseIsLHS ? ShiftedVal.getReg(0) : LHS;
Register AddSubRHS = ShiftValUseIsLHS ? LHS : ShiftedVal.getReg(0);
auto Res = B.buildInstr(AddSubOpc, {Ty}, {AddSubLHS, AddSubRHS});
assert(!(NegateResult && TrailingZeroes) &&
"NegateResult and TrailingZeroes cannot both be true for now.");
// Negate the result.
if (NegateResult) {
B.buildSub(DstReg, B.buildConstant(Ty, 0), Res);
return;
}
// Shift the result.
if (TrailingZeroes) {
B.buildShl(DstReg, Res, B.buildConstant(LLT::scalar(64), TrailingZeroes));
return;
}
B.buildCopy(DstReg, Res.getReg(0));
};
return true;
}
bool applyAArch64MulConstCombine(
MachineInstr &MI, MachineRegisterInfo &MRI, MachineIRBuilder &B,
std::function<void(MachineIRBuilder &B, Register DstReg)> &ApplyFn) {
B.setInstrAndDebugLoc(MI);
ApplyFn(B, MI.getOperand(0).getReg());
MI.eraseFromParent();
return true;
}
/// Try to fold a G_MERGE_VALUES of 2 s32 sources, where the second source
/// is a zero, into a G_ZEXT of the first.
bool matchFoldMergeToZext(MachineInstr &MI, MachineRegisterInfo &MRI) {
auto &Merge = cast<GMerge>(MI);
LLT SrcTy = MRI.getType(Merge.getSourceReg(0));
if (SrcTy != LLT::scalar(32) || Merge.getNumSources() != 2)
return false;
return mi_match(Merge.getSourceReg(1), MRI, m_SpecificICst(0));
}
void applyFoldMergeToZext(MachineInstr &MI, MachineRegisterInfo &MRI,
MachineIRBuilder &B, GISelChangeObserver &Observer) {
// Mutate %d(s64) = G_MERGE_VALUES %a(s32), 0(s32)
// ->
// %d(s64) = G_ZEXT %a(s32)
Observer.changingInstr(MI);
MI.setDesc(B.getTII().get(TargetOpcode::G_ZEXT));
MI.RemoveOperand(2);
Observer.changedInstr(MI);
}
/// \returns True if a G_ANYEXT instruction \p MI should be mutated to a G_ZEXT
/// instruction.
static bool matchMutateAnyExtToZExt(MachineInstr &MI, MachineRegisterInfo &MRI) {
// If this is coming from a scalar compare then we can use a G_ZEXT instead of
// a G_ANYEXT:
//
// %cmp:_(s32) = G_[I|F]CMP ... <-- produces 0/1.
// %ext:_(s64) = G_ANYEXT %cmp(s32)
//
// By doing this, we can leverage more KnownBits combines.
assert(MI.getOpcode() == TargetOpcode::G_ANYEXT);
Register Dst = MI.getOperand(0).getReg();
Register Src = MI.getOperand(1).getReg();
return MRI.getType(Dst).isScalar() &&
mi_match(Src, MRI,
m_any_of(m_GICmp(m_Pred(), m_Reg(), m_Reg()),
m_GFCmp(m_Pred(), m_Reg(), m_Reg())));
}
static void applyMutateAnyExtToZExt(MachineInstr &MI, MachineRegisterInfo &MRI,
MachineIRBuilder &B,
GISelChangeObserver &Observer) {
Observer.changingInstr(MI);
MI.setDesc(B.getTII().get(TargetOpcode::G_ZEXT));
Observer.changedInstr(MI);
}
/// Match a 128b store of zero and split it into two 64 bit stores, for
/// size/performance reasons.
static bool matchSplitStoreZero128(MachineInstr &MI, MachineRegisterInfo &MRI) {
GStore &Store = cast<GStore>(MI);
if (!Store.isSimple())
return false;
LLT ValTy = MRI.getType(Store.getValueReg());
if (!ValTy.isVector() || ValTy.getSizeInBits() != 128)
return false;
if (ValTy.getSizeInBits() != Store.getMemSizeInBits())
return false; // Don't split truncating stores.
if (!MRI.hasOneNonDBGUse(Store.getValueReg()))
return false;
auto MaybeCst = isConstantOrConstantSplatVector(
*MRI.getVRegDef(Store.getValueReg()), MRI);
return MaybeCst && MaybeCst->isZero();
}
static void applySplitStoreZero128(MachineInstr &MI, MachineRegisterInfo &MRI,
MachineIRBuilder &B,
GISelChangeObserver &Observer) {
B.setInstrAndDebugLoc(MI);
GStore &Store = cast<GStore>(MI);
assert(MRI.getType(Store.getValueReg()).isVector() &&
"Expected a vector store value");
LLT NewTy = LLT::scalar(64);
Register PtrReg = Store.getPointerReg();
auto Zero = B.buildConstant(NewTy, 0);
auto HighPtr = B.buildPtrAdd(MRI.getType(PtrReg), PtrReg,
B.buildConstant(LLT::scalar(64), 8));
auto &MF = *MI.getMF();
auto *LowMMO = MF.getMachineMemOperand(&Store.getMMO(), 0, NewTy);
auto *HighMMO = MF.getMachineMemOperand(&Store.getMMO(), 8, NewTy);
B.buildStore(Zero, PtrReg, *LowMMO);
B.buildStore(Zero, HighPtr, *HighMMO);
Store.eraseFromParent();
}
#define AARCH64POSTLEGALIZERCOMBINERHELPER_GENCOMBINERHELPER_DEPS
#include "AArch64GenPostLegalizeGICombiner.inc"
#undef AARCH64POSTLEGALIZERCOMBINERHELPER_GENCOMBINERHELPER_DEPS
namespace {
#define AARCH64POSTLEGALIZERCOMBINERHELPER_GENCOMBINERHELPER_H
#include "AArch64GenPostLegalizeGICombiner.inc"
#undef AARCH64POSTLEGALIZERCOMBINERHELPER_GENCOMBINERHELPER_H
class AArch64PostLegalizerCombinerInfo : public CombinerInfo {
GISelKnownBits *KB;
MachineDominatorTree *MDT;
public:
AArch64GenPostLegalizerCombinerHelperRuleConfig GeneratedRuleCfg;
AArch64PostLegalizerCombinerInfo(bool EnableOpt, bool OptSize, bool MinSize,
GISelKnownBits *KB,
MachineDominatorTree *MDT)
: CombinerInfo(/*AllowIllegalOps*/ true, /*ShouldLegalizeIllegal*/ false,
/*LegalizerInfo*/ nullptr, EnableOpt, OptSize, MinSize),
KB(KB), MDT(MDT) {
if (!GeneratedRuleCfg.parseCommandLineOption())
report_fatal_error("Invalid rule identifier");
}
virtual bool combine(GISelChangeObserver &Observer, MachineInstr &MI,
MachineIRBuilder &B) const override;
};
bool AArch64PostLegalizerCombinerInfo::combine(GISelChangeObserver &Observer,
MachineInstr &MI,
MachineIRBuilder &B) const {
const auto *LI =
MI.getParent()->getParent()->getSubtarget().getLegalizerInfo();
CombinerHelper Helper(Observer, B, KB, MDT, LI);
AArch64GenPostLegalizerCombinerHelper Generated(GeneratedRuleCfg);
return Generated.tryCombineAll(Observer, MI, B, Helper);
}
#define AARCH64POSTLEGALIZERCOMBINERHELPER_GENCOMBINERHELPER_CPP
#include "AArch64GenPostLegalizeGICombiner.inc"
#undef AARCH64POSTLEGALIZERCOMBINERHELPER_GENCOMBINERHELPER_CPP
class AArch64PostLegalizerCombiner : public MachineFunctionPass {
public:
static char ID;
AArch64PostLegalizerCombiner(bool IsOptNone = false);
StringRef getPassName() const override {
return "AArch64PostLegalizerCombiner";
}
bool runOnMachineFunction(MachineFunction &MF) override;
void getAnalysisUsage(AnalysisUsage &AU) const override;
private:
bool IsOptNone;
};
} // end anonymous namespace
void AArch64PostLegalizerCombiner::getAnalysisUsage(AnalysisUsage &AU) const {
AU.addRequired<TargetPassConfig>();
AU.setPreservesCFG();
getSelectionDAGFallbackAnalysisUsage(AU);
AU.addRequired<GISelKnownBitsAnalysis>();
AU.addPreserved<GISelKnownBitsAnalysis>();
if (!IsOptNone) {
AU.addRequired<MachineDominatorTree>();
AU.addPreserved<MachineDominatorTree>();
AU.addRequired<GISelCSEAnalysisWrapperPass>();
AU.addPreserved<GISelCSEAnalysisWrapperPass>();
}
MachineFunctionPass::getAnalysisUsage(AU);
}
AArch64PostLegalizerCombiner::AArch64PostLegalizerCombiner(bool IsOptNone)
: MachineFunctionPass(ID), IsOptNone(IsOptNone) {
initializeAArch64PostLegalizerCombinerPass(*PassRegistry::getPassRegistry());
}
bool AArch64PostLegalizerCombiner::runOnMachineFunction(MachineFunction &MF) {
if (MF.getProperties().hasProperty(
MachineFunctionProperties::Property::FailedISel))
return false;
assert(MF.getProperties().hasProperty(
MachineFunctionProperties::Property::Legalized) &&
"Expected a legalized function?");
auto *TPC = &getAnalysis<TargetPassConfig>();
const Function &F = MF.getFunction();
bool EnableOpt =
MF.getTarget().getOptLevel() != CodeGenOpt::None && !skipFunction(F);
GISelKnownBits *KB = &getAnalysis<GISelKnownBitsAnalysis>().get(MF);
MachineDominatorTree *MDT =
IsOptNone ? nullptr : &getAnalysis<MachineDominatorTree>();
AArch64PostLegalizerCombinerInfo PCInfo(EnableOpt, F.hasOptSize(),
F.hasMinSize(), KB, MDT);
GISelCSEAnalysisWrapper &Wrapper =
getAnalysis<GISelCSEAnalysisWrapperPass>().getCSEWrapper();
auto *CSEInfo = &Wrapper.get(TPC->getCSEConfig());
Combiner C(PCInfo, TPC);
return C.combineMachineInstrs(MF, CSEInfo);
}
char AArch64PostLegalizerCombiner::ID = 0;
INITIALIZE_PASS_BEGIN(AArch64PostLegalizerCombiner, DEBUG_TYPE,
"Combine AArch64 MachineInstrs after legalization", false,
false)
INITIALIZE_PASS_DEPENDENCY(TargetPassConfig)
INITIALIZE_PASS_DEPENDENCY(GISelKnownBitsAnalysis)
INITIALIZE_PASS_END(AArch64PostLegalizerCombiner, DEBUG_TYPE,
"Combine AArch64 MachineInstrs after legalization", false,
false)
namespace llvm {
FunctionPass *createAArch64PostLegalizerCombiner(bool IsOptNone) {
return new AArch64PostLegalizerCombiner(IsOptNone);
}
} // end namespace llvm
|