aboutsummaryrefslogtreecommitdiffstats
path: root/contrib/libs/llvm12/lib/CodeGen/PeepholeOptimizer.cpp
diff options
context:
space:
mode:
authorshadchin <shadchin@yandex-team.ru>2022-02-10 16:44:30 +0300
committerDaniil Cherednik <dcherednik@yandex-team.ru>2022-02-10 16:44:30 +0300
commit2598ef1d0aee359b4b6d5fdd1758916d5907d04f (patch)
tree012bb94d777798f1f56ac1cec429509766d05181 /contrib/libs/llvm12/lib/CodeGen/PeepholeOptimizer.cpp
parent6751af0b0c1b952fede40b19b71da8025b5d8bcf (diff)
downloadydb-2598ef1d0aee359b4b6d5fdd1758916d5907d04f.tar.gz
Restoring authorship annotation for <shadchin@yandex-team.ru>. Commit 1 of 2.
Diffstat (limited to 'contrib/libs/llvm12/lib/CodeGen/PeepholeOptimizer.cpp')
-rw-r--r--contrib/libs/llvm12/lib/CodeGen/PeepholeOptimizer.cpp158
1 files changed, 79 insertions, 79 deletions
diff --git a/contrib/libs/llvm12/lib/CodeGen/PeepholeOptimizer.cpp b/contrib/libs/llvm12/lib/CodeGen/PeepholeOptimizer.cpp
index 34ac396c04..f519182d07 100644
--- a/contrib/libs/llvm12/lib/CodeGen/PeepholeOptimizer.cpp
+++ b/contrib/libs/llvm12/lib/CodeGen/PeepholeOptimizer.cpp
@@ -178,11 +178,11 @@ namespace {
}
}
- MachineFunctionProperties getRequiredProperties() const override {
- return MachineFunctionProperties()
- .set(MachineFunctionProperties::Property::IsSSA);
- }
-
+ MachineFunctionProperties getRequiredProperties() const override {
+ return MachineFunctionProperties()
+ .set(MachineFunctionProperties::Property::IsSSA);
+ }
+
/// Track Def -> Use info used for rewriting copies.
using RewriteMapTy = SmallDenseMap<RegSubRegPair, ValueTrackerResult>;
@@ -201,39 +201,39 @@ namespace {
SmallPtrSetImpl<MachineInstr *> &LocalMIs);
bool optimizeRecurrence(MachineInstr &PHI);
bool findNextSource(RegSubRegPair RegSubReg, RewriteMapTy &RewriteMap);
- bool isMoveImmediate(MachineInstr &MI, SmallSet<Register, 4> &ImmDefRegs,
- DenseMap<Register, MachineInstr *> &ImmDefMIs);
- bool foldImmediate(MachineInstr &MI, SmallSet<Register, 4> &ImmDefRegs,
- DenseMap<Register, MachineInstr *> &ImmDefMIs);
+ bool isMoveImmediate(MachineInstr &MI, SmallSet<Register, 4> &ImmDefRegs,
+ DenseMap<Register, MachineInstr *> &ImmDefMIs);
+ bool foldImmediate(MachineInstr &MI, SmallSet<Register, 4> &ImmDefRegs,
+ DenseMap<Register, MachineInstr *> &ImmDefMIs);
/// Finds recurrence cycles, but only ones that formulated around
/// a def operand and a use operand that are tied. If there is a use
/// operand commutable with the tied use operand, find recurrence cycle
/// along that operand as well.
- bool findTargetRecurrence(Register Reg,
- const SmallSet<Register, 2> &TargetReg,
+ bool findTargetRecurrence(Register Reg,
+ const SmallSet<Register, 2> &TargetReg,
RecurrenceCycle &RC);
/// If copy instruction \p MI is a virtual register copy, track it in
- /// the set \p CopyMIs. If this virtual register was previously seen as a
- /// copy, replace the uses of this copy with the previously seen copy's
- /// destination register.
+ /// the set \p CopyMIs. If this virtual register was previously seen as a
+ /// copy, replace the uses of this copy with the previously seen copy's
+ /// destination register.
bool foldRedundantCopy(MachineInstr &MI,
- DenseMap<RegSubRegPair, MachineInstr *> &CopyMIs);
+ DenseMap<RegSubRegPair, MachineInstr *> &CopyMIs);
/// Is the register \p Reg a non-allocatable physical register?
- bool isNAPhysCopy(Register Reg);
+ bool isNAPhysCopy(Register Reg);
/// If copy instruction \p MI is a non-allocatable virtual<->physical
/// register copy, track it in the \p NAPhysToVirtMIs map. If this
/// non-allocatable physical register was previously copied to a virtual
/// registered and hasn't been clobbered, the virt->phys copy can be
/// deleted.
- bool foldRedundantNAPhysCopy(
- MachineInstr &MI, DenseMap<Register, MachineInstr *> &NAPhysToVirtMIs);
+ bool foldRedundantNAPhysCopy(
+ MachineInstr &MI, DenseMap<Register, MachineInstr *> &NAPhysToVirtMIs);
bool isLoadFoldable(MachineInstr &MI,
- SmallSet<Register, 16> &FoldAsLoadDefCandidates);
+ SmallSet<Register, 16> &FoldAsLoadDefCandidates);
/// Check whether \p MI is understood by the register coalescer
/// but may require some rewriting.
@@ -294,7 +294,7 @@ namespace {
public:
ValueTrackerResult() = default;
- ValueTrackerResult(Register Reg, unsigned SubReg) {
+ ValueTrackerResult(Register Reg, unsigned SubReg) {
addSource(Reg, SubReg);
}
@@ -308,11 +308,11 @@ namespace {
Inst = nullptr;
}
- void addSource(Register SrcReg, unsigned SrcSubReg) {
+ void addSource(Register SrcReg, unsigned SrcSubReg) {
RegSrcs.push_back(RegSubRegPair(SrcReg, SrcSubReg));
}
- void setSource(int Idx, Register SrcReg, unsigned SrcSubReg) {
+ void setSource(int Idx, Register SrcReg, unsigned SrcSubReg) {
assert(Idx < getNumSources() && "Reg pair source out of index");
RegSrcs[Idx] = RegSubRegPair(SrcReg, SrcSubReg);
}
@@ -323,7 +323,7 @@ namespace {
return RegSrcs[Idx];
}
- Register getSrcReg(int Idx) const {
+ Register getSrcReg(int Idx) const {
assert(Idx < getNumSources() && "Reg source out of index");
return RegSrcs[Idx].Reg;
}
@@ -376,7 +376,7 @@ namespace {
unsigned DefSubReg;
/// The register where the value can be found.
- Register Reg;
+ Register Reg;
/// MachineRegisterInfo used to perform tracking.
const MachineRegisterInfo &MRI;
@@ -418,11 +418,11 @@ namespace {
/// Indeed, when \p Reg is a physical register that constructor does not
/// know which definition of \p Reg it should track.
/// Use the next constructor to track a physical register.
- ValueTracker(Register Reg, unsigned DefSubReg,
+ ValueTracker(Register Reg, unsigned DefSubReg,
const MachineRegisterInfo &MRI,
const TargetInstrInfo *TII = nullptr)
: DefSubReg(DefSubReg), Reg(Reg), MRI(MRI), TII(TII) {
- if (!Reg.isPhysical()) {
+ if (!Reg.isPhysical()) {
Def = MRI.getVRegDef(Reg);
DefIdx = MRI.def_begin(Reg).getOperandNo();
}
@@ -827,7 +827,7 @@ public:
/// Rewrite the current source with \p NewReg and \p NewSubReg if possible.
/// \return True if the rewriting was possible, false otherwise.
- virtual bool RewriteCurrentSource(Register NewReg, unsigned NewSubReg) = 0;
+ virtual bool RewriteCurrentSource(Register NewReg, unsigned NewSubReg) = 0;
};
/// Rewriter for COPY instructions.
@@ -855,7 +855,7 @@ public:
return true;
}
- bool RewriteCurrentSource(Register NewReg, unsigned NewSubReg) override {
+ bool RewriteCurrentSource(Register NewReg, unsigned NewSubReg) override {
if (CurrentSrcIdx != 1)
return false;
MachineOperand &MOSrc = CopyLike.getOperand(CurrentSrcIdx);
@@ -900,7 +900,7 @@ public:
return true;
}
- bool RewriteCurrentSource(Register NewReg, unsigned NewSubReg) override {
+ bool RewriteCurrentSource(Register NewReg, unsigned NewSubReg) override {
return false;
}
};
@@ -944,7 +944,7 @@ public:
return true;
}
- bool RewriteCurrentSource(Register NewReg, unsigned NewSubReg) override {
+ bool RewriteCurrentSource(Register NewReg, unsigned NewSubReg) override {
if (CurrentSrcIdx != 2)
return false;
// We are rewriting the inserted reg.
@@ -991,7 +991,7 @@ public:
return true;
}
- bool RewriteCurrentSource(Register NewReg, unsigned NewSubReg) override {
+ bool RewriteCurrentSource(Register NewReg, unsigned NewSubReg) override {
// The only source we can rewrite is the input register.
if (CurrentSrcIdx != 1)
return false;
@@ -1069,7 +1069,7 @@ public:
return MODef.getSubReg() == 0;
}
- bool RewriteCurrentSource(Register NewReg, unsigned NewSubReg) override {
+ bool RewriteCurrentSource(Register NewReg, unsigned NewSubReg) override {
// We cannot rewrite out of bound operands.
// Moreover, rewritable sources are at odd positions.
if ((CurrentSrcIdx & 1) != 1 || CurrentSrcIdx > CopyLike.getNumOperands())
@@ -1315,7 +1315,7 @@ bool PeepholeOptimizer::optimizeUncoalescableCopy(
/// We only fold loads to virtual registers and the virtual register defined
/// has a single user.
bool PeepholeOptimizer::isLoadFoldable(
- MachineInstr &MI, SmallSet<Register, 16> &FoldAsLoadDefCandidates) {
+ MachineInstr &MI, SmallSet<Register, 16> &FoldAsLoadDefCandidates) {
if (!MI.canFoldAsLoad() || !MI.mayLoad())
return false;
const MCInstrDesc &MCID = MI.getDesc();
@@ -1326,7 +1326,7 @@ bool PeepholeOptimizer::isLoadFoldable(
// To reduce compilation time, we check MRI->hasOneNonDBGUser when inserting
// loads. It should be checked when processing uses of the load, since
// uses can be removed during peephole.
- if (Reg.isVirtual() && !MI.getOperand(0).getSubReg() &&
+ if (Reg.isVirtual() && !MI.getOperand(0).getSubReg() &&
MRI->hasOneNonDBGUser(Reg)) {
FoldAsLoadDefCandidates.insert(Reg);
return true;
@@ -1335,15 +1335,15 @@ bool PeepholeOptimizer::isLoadFoldable(
}
bool PeepholeOptimizer::isMoveImmediate(
- MachineInstr &MI, SmallSet<Register, 4> &ImmDefRegs,
- DenseMap<Register, MachineInstr *> &ImmDefMIs) {
+ MachineInstr &MI, SmallSet<Register, 4> &ImmDefRegs,
+ DenseMap<Register, MachineInstr *> &ImmDefMIs) {
const MCInstrDesc &MCID = MI.getDesc();
if (!MI.isMoveImmediate())
return false;
if (MCID.getNumDefs() != 1)
return false;
Register Reg = MI.getOperand(0).getReg();
- if (Reg.isVirtual()) {
+ if (Reg.isVirtual()) {
ImmDefMIs.insert(std::make_pair(Reg, &MI));
ImmDefRegs.insert(Reg);
return true;
@@ -1355,19 +1355,19 @@ bool PeepholeOptimizer::isMoveImmediate(
/// Try folding register operands that are defined by move immediate
/// instructions, i.e. a trivial constant folding optimization, if
/// and only if the def and use are in the same BB.
-bool PeepholeOptimizer::foldImmediate(
- MachineInstr &MI, SmallSet<Register, 4> &ImmDefRegs,
- DenseMap<Register, MachineInstr *> &ImmDefMIs) {
+bool PeepholeOptimizer::foldImmediate(
+ MachineInstr &MI, SmallSet<Register, 4> &ImmDefRegs,
+ DenseMap<Register, MachineInstr *> &ImmDefMIs) {
for (unsigned i = 0, e = MI.getDesc().getNumOperands(); i != e; ++i) {
MachineOperand &MO = MI.getOperand(i);
if (!MO.isReg() || MO.isDef())
continue;
Register Reg = MO.getReg();
- if (!Reg.isVirtual())
+ if (!Reg.isVirtual())
continue;
if (ImmDefRegs.count(Reg) == 0)
continue;
- DenseMap<Register, MachineInstr *>::iterator II = ImmDefMIs.find(Reg);
+ DenseMap<Register, MachineInstr *>::iterator II = ImmDefMIs.find(Reg);
assert(II != ImmDefMIs.end() && "couldn't find immediate definition");
if (TII->FoldImmediate(MI, *II->second, Reg, MRI)) {
++NumImmFold;
@@ -1391,30 +1391,30 @@ bool PeepholeOptimizer::foldImmediate(
// %2 = COPY %0:sub1
//
// Should replace %2 uses with %1:sub1
-bool PeepholeOptimizer::foldRedundantCopy(
- MachineInstr &MI, DenseMap<RegSubRegPair, MachineInstr *> &CopyMIs) {
+bool PeepholeOptimizer::foldRedundantCopy(
+ MachineInstr &MI, DenseMap<RegSubRegPair, MachineInstr *> &CopyMIs) {
assert(MI.isCopy() && "expected a COPY machine instruction");
Register SrcReg = MI.getOperand(1).getReg();
- unsigned SrcSubReg = MI.getOperand(1).getSubReg();
- if (!SrcReg.isVirtual())
+ unsigned SrcSubReg = MI.getOperand(1).getSubReg();
+ if (!SrcReg.isVirtual())
return false;
Register DstReg = MI.getOperand(0).getReg();
- if (!DstReg.isVirtual())
+ if (!DstReg.isVirtual())
return false;
- RegSubRegPair SrcPair(SrcReg, SrcSubReg);
-
- if (CopyMIs.insert(std::make_pair(SrcPair, &MI)).second) {
+ RegSubRegPair SrcPair(SrcReg, SrcSubReg);
+
+ if (CopyMIs.insert(std::make_pair(SrcPair, &MI)).second) {
// First copy of this reg seen.
return false;
}
- MachineInstr *PrevCopy = CopyMIs.find(SrcPair)->second;
+ MachineInstr *PrevCopy = CopyMIs.find(SrcPair)->second;
- assert(SrcSubReg == PrevCopy->getOperand(1).getSubReg() &&
- "Unexpected mismatching subreg!");
+ assert(SrcSubReg == PrevCopy->getOperand(1).getSubReg() &&
+ "Unexpected mismatching subreg!");
Register PrevDstReg = PrevCopy->getOperand(0).getReg();
@@ -1432,12 +1432,12 @@ bool PeepholeOptimizer::foldRedundantCopy(
return true;
}
-bool PeepholeOptimizer::isNAPhysCopy(Register Reg) {
- return Reg.isPhysical() && !MRI->isAllocatable(Reg);
+bool PeepholeOptimizer::isNAPhysCopy(Register Reg) {
+ return Reg.isPhysical() && !MRI->isAllocatable(Reg);
}
bool PeepholeOptimizer::foldRedundantNAPhysCopy(
- MachineInstr &MI, DenseMap<Register, MachineInstr *> &NAPhysToVirtMIs) {
+ MachineInstr &MI, DenseMap<Register, MachineInstr *> &NAPhysToVirtMIs) {
assert(MI.isCopy() && "expected a COPY machine instruction");
if (DisableNAPhysCopyOpt)
@@ -1446,17 +1446,17 @@ bool PeepholeOptimizer::foldRedundantNAPhysCopy(
Register DstReg = MI.getOperand(0).getReg();
Register SrcReg = MI.getOperand(1).getReg();
if (isNAPhysCopy(SrcReg) && Register::isVirtualRegister(DstReg)) {
- // %vreg = COPY $physreg
+ // %vreg = COPY $physreg
// Avoid using a datastructure which can track multiple live non-allocatable
// phys->virt copies since LLVM doesn't seem to do this.
NAPhysToVirtMIs.insert({SrcReg, &MI});
return false;
}
- if (!(SrcReg.isVirtual() && isNAPhysCopy(DstReg)))
+ if (!(SrcReg.isVirtual() && isNAPhysCopy(DstReg)))
return false;
- // $physreg = COPY %vreg
+ // $physreg = COPY %vreg
auto PrevCopy = NAPhysToVirtMIs.find(DstReg);
if (PrevCopy == NAPhysToVirtMIs.end()) {
// We can't remove the copy: there was an intervening clobber of the
@@ -1486,11 +1486,11 @@ bool PeepholeOptimizer::foldRedundantNAPhysCopy(
/// \bried Returns true if \p MO is a virtual register operand.
static bool isVirtualRegisterOperand(MachineOperand &MO) {
- return MO.isReg() && MO.getReg().isVirtual();
+ return MO.isReg() && MO.getReg().isVirtual();
}
bool PeepholeOptimizer::findTargetRecurrence(
- Register Reg, const SmallSet<Register, 2> &TargetRegs,
+ Register Reg, const SmallSet<Register, 2> &TargetRegs,
RecurrenceCycle &RC) {
// Recurrence found if Reg is in TargetRegs.
if (TargetRegs.count(Reg))
@@ -1561,7 +1561,7 @@ bool PeepholeOptimizer::findTargetRecurrence(
/// %1 of ADD instruction, the redundant move instruction can be
/// avoided.
bool PeepholeOptimizer::optimizeRecurrence(MachineInstr &PHI) {
- SmallSet<Register, 2> TargetRegs;
+ SmallSet<Register, 2> TargetRegs;
for (unsigned Idx = 1; Idx < PHI.getNumOperands(); Idx += 2) {
MachineOperand &MO = PHI.getOperand(Idx);
assert(isVirtualRegisterOperand(MO) && "Invalid PHI instruction");
@@ -1617,20 +1617,20 @@ bool PeepholeOptimizer::runOnMachineFunction(MachineFunction &MF) {
// during the scan, if a MI is not in the set, it is assumed to be located
// after. Newly created MIs have to be inserted in the set as well.
SmallPtrSet<MachineInstr*, 16> LocalMIs;
- SmallSet<Register, 4> ImmDefRegs;
- DenseMap<Register, MachineInstr *> ImmDefMIs;
- SmallSet<Register, 16> FoldAsLoadDefCandidates;
+ SmallSet<Register, 4> ImmDefRegs;
+ DenseMap<Register, MachineInstr *> ImmDefMIs;
+ SmallSet<Register, 16> FoldAsLoadDefCandidates;
// Track when a non-allocatable physical register is copied to a virtual
// register so that useless moves can be removed.
//
- // $physreg is the map index; MI is the last valid `%vreg = COPY $physreg`
- // without any intervening re-definition of $physreg.
- DenseMap<Register, MachineInstr *> NAPhysToVirtMIs;
+ // $physreg is the map index; MI is the last valid `%vreg = COPY $physreg`
+ // without any intervening re-definition of $physreg.
+ DenseMap<Register, MachineInstr *> NAPhysToVirtMIs;
- // Set of pairs of virtual registers and their subregs that are copied
- // from.
- DenseMap<RegSubRegPair, MachineInstr *> CopySrcMIs;
+ // Set of pairs of virtual registers and their subregs that are copied
+ // from.
+ DenseMap<RegSubRegPair, MachineInstr *> CopySrcMIs;
bool IsLoopHeader = MLI->isLoopHeader(&MBB);
@@ -1641,10 +1641,10 @@ bool PeepholeOptimizer::runOnMachineFunction(MachineFunction &MF) {
++MII;
LocalMIs.insert(MI);
- // Skip debug instructions. They should not affect this peephole
- // optimization.
+ // Skip debug instructions. They should not affect this peephole
+ // optimization.
if (MI->isDebugInstr())
- continue;
+ continue;
if (MI->isPosition())
continue;
@@ -1674,7 +1674,7 @@ bool PeepholeOptimizer::runOnMachineFunction(MachineFunction &MF) {
} else if (MO.isRegMask()) {
const uint32_t *RegMask = MO.getRegMask();
for (auto &RegMI : NAPhysToVirtMIs) {
- Register Def = RegMI.first;
+ Register Def = RegMI.first;
if (MachineOperand::clobbersPhysReg(RegMask, Def)) {
LLVM_DEBUG(dbgs()
<< "NAPhysCopy: invalidating because of " << *MI);
@@ -1719,8 +1719,8 @@ bool PeepholeOptimizer::runOnMachineFunction(MachineFunction &MF) {
continue;
}
- if (MI->isCopy() && (foldRedundantCopy(*MI, CopySrcMIs) ||
- foldRedundantNAPhysCopy(*MI, NAPhysToVirtMIs))) {
+ if (MI->isCopy() && (foldRedundantCopy(*MI, CopySrcMIs) ||
+ foldRedundantNAPhysCopy(*MI, NAPhysToVirtMIs))) {
LocalMIs.erase(MI);
LLVM_DEBUG(dbgs() << "Deleting redundant copy: " << *MI << "\n");
MI->eraseFromParent();
@@ -1758,13 +1758,13 @@ bool PeepholeOptimizer::runOnMachineFunction(MachineFunction &MF) {
const MachineOperand &MOp = MI->getOperand(i);
if (!MOp.isReg())
continue;
- Register FoldAsLoadDefReg = MOp.getReg();
+ Register FoldAsLoadDefReg = MOp.getReg();
if (FoldAsLoadDefCandidates.count(FoldAsLoadDefReg)) {
// We need to fold load after optimizeCmpInstr, since
// optimizeCmpInstr can enable folding by converting SUB to CMP.
// Save FoldAsLoadDefReg because optimizeLoadInstr() resets it and
// we need it for markUsesInDebugValueAsUndef().
- Register FoldedReg = FoldAsLoadDefReg;
+ Register FoldedReg = FoldAsLoadDefReg;
MachineInstr *DefMI = nullptr;
if (MachineInstr *FoldMI =
TII->optimizeLoadInstr(*MI, MRI, FoldAsLoadDefReg, DefMI)) {