aboutsummaryrefslogtreecommitdiffstats
path: root/contrib/libs/llvm12/lib/CodeGen/InlineSpiller.cpp
diff options
context:
space:
mode:
authorshadchin <shadchin@yandex-team.ru>2022-02-10 16:44:39 +0300
committerDaniil Cherednik <dcherednik@yandex-team.ru>2022-02-10 16:44:39 +0300
commite9656aae26e0358d5378e5b63dcac5c8dbe0e4d0 (patch)
tree64175d5cadab313b3e7039ebaa06c5bc3295e274 /contrib/libs/llvm12/lib/CodeGen/InlineSpiller.cpp
parent2598ef1d0aee359b4b6d5fdd1758916d5907d04f (diff)
downloadydb-e9656aae26e0358d5378e5b63dcac5c8dbe0e4d0.tar.gz
Restoring authorship annotation for <shadchin@yandex-team.ru>. Commit 2 of 2.
Diffstat (limited to 'contrib/libs/llvm12/lib/CodeGen/InlineSpiller.cpp')
-rw-r--r--contrib/libs/llvm12/lib/CodeGen/InlineSpiller.cpp166
1 files changed, 83 insertions, 83 deletions
diff --git a/contrib/libs/llvm12/lib/CodeGen/InlineSpiller.cpp b/contrib/libs/llvm12/lib/CodeGen/InlineSpiller.cpp
index ab43fb9db9..876e1d3f93 100644
--- a/contrib/libs/llvm12/lib/CodeGen/InlineSpiller.cpp
+++ b/contrib/libs/llvm12/lib/CodeGen/InlineSpiller.cpp
@@ -153,7 +153,7 @@ public:
unsigned Original);
bool rmFromMergeableSpills(MachineInstr &Spill, int StackSlot);
void hoistAllSpills();
- void LRE_DidCloneVirtReg(Register, Register) override;
+ void LRE_DidCloneVirtReg(Register, Register) override;
};
class InlineSpiller : public Spiller {
@@ -269,14 +269,14 @@ static Register isFullCopyOf(const MachineInstr &MI, Register Reg) {
return Register();
}
-static void getVDefInterval(const MachineInstr &MI, LiveIntervals &LIS) {
- for (unsigned I = 0, E = MI.getNumOperands(); I != E; ++I) {
- const MachineOperand &MO = MI.getOperand(I);
- if (MO.isReg() && MO.isDef() && Register::isVirtualRegister(MO.getReg()))
- LIS.getInterval(MO.getReg());
- }
-}
-
+static void getVDefInterval(const MachineInstr &MI, LiveIntervals &LIS) {
+ for (unsigned I = 0, E = MI.getNumOperands(); I != E; ++I) {
+ const MachineOperand &MO = MI.getOperand(I);
+ if (MO.isReg() && MO.isDef() && Register::isVirtualRegister(MO.getReg()))
+ LIS.getInterval(MO.getReg());
+ }
+}
+
/// isSnippet - Identify if a live interval is a snippet that should be spilled.
/// It is assumed that SnipLI is a virtual register with the same original as
/// Edit->getReg().
@@ -297,9 +297,9 @@ bool InlineSpiller::isSnippet(const LiveInterval &SnipLI) {
// Check that all uses satisfy our criteria.
for (MachineRegisterInfo::reg_instr_nodbg_iterator
- RI = MRI.reg_instr_nodbg_begin(SnipLI.reg()),
- E = MRI.reg_instr_nodbg_end();
- RI != E;) {
+ RI = MRI.reg_instr_nodbg_begin(SnipLI.reg()),
+ E = MRI.reg_instr_nodbg_end();
+ RI != E;) {
MachineInstr &MI = *RI++;
// Allow copies to/from Reg.
@@ -308,11 +308,11 @@ bool InlineSpiller::isSnippet(const LiveInterval &SnipLI) {
// Allow stack slot loads.
int FI;
- if (SnipLI.reg() == TII.isLoadFromStackSlot(MI, FI) && FI == StackSlot)
+ if (SnipLI.reg() == TII.isLoadFromStackSlot(MI, FI) && FI == StackSlot)
continue;
// Allow stack slot stores.
- if (SnipLI.reg() == TII.isStoreToStackSlot(MI, FI) && FI == StackSlot)
+ if (SnipLI.reg() == TII.isStoreToStackSlot(MI, FI) && FI == StackSlot)
continue;
// Allow a single additional instruction.
@@ -418,21 +418,21 @@ bool InlineSpiller::hoistSpillInsideBB(LiveInterval &SpillLI,
MII = DefMI;
++MII;
}
- MachineInstrSpan MIS(MII, MBB);
+ MachineInstrSpan MIS(MII, MBB);
// Insert spill without kill flag immediately after def.
TII.storeRegToStackSlot(*MBB, MII, SrcReg, false, StackSlot,
MRI.getRegClass(SrcReg), &TRI);
- LIS.InsertMachineInstrRangeInMaps(MIS.begin(), MII);
- for (const MachineInstr &MI : make_range(MIS.begin(), MII))
- getVDefInterval(MI, LIS);
+ LIS.InsertMachineInstrRangeInMaps(MIS.begin(), MII);
+ for (const MachineInstr &MI : make_range(MIS.begin(), MII))
+ getVDefInterval(MI, LIS);
--MII; // Point to store instruction.
LLVM_DEBUG(dbgs() << "\thoisted: " << SrcVNI->def << '\t' << *MII);
- // If there is only 1 store instruction is required for spill, add it
- // to mergeable list. In X86 AMX, 2 intructions are required to store.
- // We disable the merge for this case.
- if (MIS.begin() == MII)
- HSpiller.addToMergeableSpills(*MII, StackSlot, Original);
+ // If there is only 1 store instruction is required for spill, add it
+ // to mergeable list. In X86 AMX, 2 intructions are required to store.
+ // We disable the merge for this case.
+ if (MIS.begin() == MII)
+ HSpiller.addToMergeableSpills(*MII, StackSlot, Original);
++NumSpills;
return true;
}
@@ -448,7 +448,7 @@ void InlineSpiller::eliminateRedundantSpills(LiveInterval &SLI, VNInfo *VNI) {
do {
LiveInterval *LI;
std::tie(LI, VNI) = WorkList.pop_back_val();
- Register Reg = LI->reg();
+ Register Reg = LI->reg();
LLVM_DEBUG(dbgs() << "Checking redundant spills for " << VNI->id << '@'
<< VNI->def << " in " << *LI << '\n');
@@ -527,7 +527,7 @@ void InlineSpiller::markValueUsed(LiveInterval *LI, VNInfo *VNI) {
if (!SnippetCopies.count(MI))
continue;
LiveInterval &SnipLI = LIS.getInterval(MI->getOperand(1).getReg());
- assert(isRegToSpill(SnipLI.reg()) && "Unexpected register in copy");
+ assert(isRegToSpill(SnipLI.reg()) && "Unexpected register in copy");
VNInfo *SnipVNI = SnipLI.getVNInfoAt(VNI->def.getRegSlot(true));
assert(SnipVNI && "Snippet undefined before copy");
WorkList.push_back(std::make_pair(&SnipLI, SnipVNI));
@@ -572,7 +572,7 @@ bool InlineSpiller::canGuaranteeAssignmentAfterRemat(Register VReg,
bool InlineSpiller::reMaterializeFor(LiveInterval &VirtReg, MachineInstr &MI) {
// Analyze instruction
SmallVector<std::pair<MachineInstr *, unsigned>, 8> Ops;
- VirtRegInfo RI = AnalyzeVirtRegInBundle(MI, VirtReg.reg(), &Ops);
+ VirtRegInfo RI = AnalyzeVirtRegInBundle(MI, VirtReg.reg(), &Ops);
if (!RI.Reads)
return false;
@@ -584,7 +584,7 @@ bool InlineSpiller::reMaterializeFor(LiveInterval &VirtReg, MachineInstr &MI) {
LLVM_DEBUG(dbgs() << "\tadding <undef> flags: ");
for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
MachineOperand &MO = MI.getOperand(i);
- if (MO.isReg() && MO.isUse() && MO.getReg() == VirtReg.reg())
+ if (MO.isReg() && MO.isUse() && MO.getReg() == VirtReg.reg())
MO.setIsUndef();
}
LLVM_DEBUG(dbgs() << UseIdx << '\t' << MI);
@@ -624,7 +624,7 @@ bool InlineSpiller::reMaterializeFor(LiveInterval &VirtReg, MachineInstr &MI) {
// If we can't guarantee that we'll be able to actually assign the new vreg,
// we can't remat.
- if (!canGuaranteeAssignmentAfterRemat(VirtReg.reg(), MI)) {
+ if (!canGuaranteeAssignmentAfterRemat(VirtReg.reg(), MI)) {
markValueUsed(&VirtReg, ParentVNI);
LLVM_DEBUG(dbgs() << "\tcannot remat for " << UseIdx << '\t' << MI);
return false;
@@ -649,7 +649,7 @@ bool InlineSpiller::reMaterializeFor(LiveInterval &VirtReg, MachineInstr &MI) {
// Replace operands
for (const auto &OpPair : Ops) {
MachineOperand &MO = OpPair.first->getOperand(OpPair.second);
- if (MO.isReg() && MO.isUse() && MO.getReg() == VirtReg.reg()) {
+ if (MO.isReg() && MO.isUse() && MO.getReg() == VirtReg.reg()) {
MO.setReg(NewVReg);
MO.setIsKill();
}
@@ -826,14 +826,14 @@ foldMemoryOperand(ArrayRef<std::pair<MachineInstr *, unsigned>> Ops,
bool WasCopy = MI->isCopy();
Register ImpReg;
- // TII::foldMemoryOperand will do what we need here for statepoint
- // (fold load into use and remove corresponding def). We will replace
- // uses of removed def with loads (spillAroundUses).
- // For that to work we need to untie def and use to pass it through
- // foldMemoryOperand and signal foldPatchpoint that it is allowed to
- // fold them.
- bool UntieRegs = MI->getOpcode() == TargetOpcode::STATEPOINT;
-
+ // TII::foldMemoryOperand will do what we need here for statepoint
+ // (fold load into use and remove corresponding def). We will replace
+ // uses of removed def with loads (spillAroundUses).
+ // For that to work we need to untie def and use to pass it through
+ // foldMemoryOperand and signal foldPatchpoint that it is allowed to
+ // fold them.
+ bool UntieRegs = MI->getOpcode() == TargetOpcode::STATEPOINT;
+
// Spill subregs if the target allows it.
// We always want to spill subregs for stackmap/patchpoint pseudos.
bool SpillSubRegs = TII.isSubregFoldable() ||
@@ -859,7 +859,7 @@ foldMemoryOperand(ArrayRef<std::pair<MachineInstr *, unsigned>> Ops,
if (LoadMI && MO.isDef())
return false;
// Tied use operands should not be passed to foldMemoryOperand.
- if (UntieRegs || !MI->isRegTiedToDefOperand(Idx))
+ if (UntieRegs || !MI->isRegTiedToDefOperand(Idx))
FoldOps.push_back(Idx);
}
@@ -870,31 +870,31 @@ foldMemoryOperand(ArrayRef<std::pair<MachineInstr *, unsigned>> Ops,
MachineInstrSpan MIS(MI, MI->getParent());
- SmallVector<std::pair<unsigned, unsigned> > TiedOps;
- if (UntieRegs)
- for (unsigned Idx : FoldOps) {
- MachineOperand &MO = MI->getOperand(Idx);
- if (!MO.isTied())
- continue;
- unsigned Tied = MI->findTiedOperandIdx(Idx);
- if (MO.isUse())
- TiedOps.emplace_back(Tied, Idx);
- else {
- assert(MO.isDef() && "Tied to not use and def?");
- TiedOps.emplace_back(Idx, Tied);
- }
- MI->untieRegOperand(Idx);
- }
-
+ SmallVector<std::pair<unsigned, unsigned> > TiedOps;
+ if (UntieRegs)
+ for (unsigned Idx : FoldOps) {
+ MachineOperand &MO = MI->getOperand(Idx);
+ if (!MO.isTied())
+ continue;
+ unsigned Tied = MI->findTiedOperandIdx(Idx);
+ if (MO.isUse())
+ TiedOps.emplace_back(Tied, Idx);
+ else {
+ assert(MO.isDef() && "Tied to not use and def?");
+ TiedOps.emplace_back(Idx, Tied);
+ }
+ MI->untieRegOperand(Idx);
+ }
+
MachineInstr *FoldMI =
LoadMI ? TII.foldMemoryOperand(*MI, FoldOps, *LoadMI, &LIS)
: TII.foldMemoryOperand(*MI, FoldOps, StackSlot, &LIS, &VRM);
- if (!FoldMI) {
- // Re-tie operands.
- for (auto Tied : TiedOps)
- MI->tieOperands(Tied.first, Tied.second);
+ if (!FoldMI) {
+ // Re-tie operands.
+ for (auto Tied : TiedOps)
+ MI->tieOperands(Tied.first, Tied.second);
return false;
- }
+ }
// Remove LIS for any dead defs in the original MI not in FoldMI.
for (MIBundleOperands MO(*MI); MO.isValid(); ++MO) {
@@ -913,7 +913,7 @@ foldMemoryOperand(ArrayRef<std::pair<MachineInstr *, unsigned>> Ops,
// FoldMI does not define this physreg. Remove the LI segment.
assert(MO->isDead() && "Cannot fold physreg def");
SlotIndex Idx = LIS.getInstructionIndex(*MI).getRegSlot();
- LIS.removePhysRegDefAt(Reg.asMCReg(), Idx);
+ LIS.removePhysRegDefAt(Reg.asMCReg(), Idx);
}
int FI;
@@ -950,11 +950,11 @@ foldMemoryOperand(ArrayRef<std::pair<MachineInstr *, unsigned>> Ops,
++NumFolded;
else if (Ops.front().second == 0) {
++NumSpills;
- // If there is only 1 store instruction is required for spill, add it
- // to mergeable list. In X86 AMX, 2 intructions are required to store.
- // We disable the merge for this case.
- if (std::distance(MIS.begin(), MIS.end()) <= 1)
- HSpiller.addToMergeableSpills(*FoldMI, StackSlot, Original);
+ // If there is only 1 store instruction is required for spill, add it
+ // to mergeable list. In X86 AMX, 2 intructions are required to store.
+ // We disable the merge for this case.
+ if (std::distance(MIS.begin(), MIS.end()) <= 1)
+ HSpiller.addToMergeableSpills(*FoldMI, StackSlot, Original);
} else
++NumReloads;
return true;
@@ -1001,7 +1001,7 @@ void InlineSpiller::insertSpill(Register NewVReg, bool isKill,
MachineInstrSpan MIS(MI, &MBB);
MachineBasicBlock::iterator SpillBefore = std::next(MI);
bool IsRealSpill = isRealSpill(*MI);
-
+
if (IsRealSpill)
TII.storeRegToStackSlot(MBB, SpillBefore, NewVReg, isKill, StackSlot,
MRI.getRegClass(NewVReg), &TRI);
@@ -1015,16 +1015,16 @@ void InlineSpiller::insertSpill(Register NewVReg, bool isKill,
MachineBasicBlock::iterator Spill = std::next(MI);
LIS.InsertMachineInstrRangeInMaps(Spill, MIS.end());
- for (const MachineInstr &MI : make_range(Spill, MIS.end()))
- getVDefInterval(MI, LIS);
+ for (const MachineInstr &MI : make_range(Spill, MIS.end()))
+ getVDefInterval(MI, LIS);
LLVM_DEBUG(
dumpMachineInstrRangeWithSlotIndex(Spill, MIS.end(), LIS, "spill"));
++NumSpills;
- // If there is only 1 store instruction is required for spill, add it
- // to mergeable list. In X86 AMX, 2 intructions are required to store.
- // We disable the merge for this case.
- if (IsRealSpill && std::distance(Spill, MIS.end()) <= 1)
+ // If there is only 1 store instruction is required for spill, add it
+ // to mergeable list. In X86 AMX, 2 intructions are required to store.
+ // We disable the merge for this case.
+ if (IsRealSpill && std::distance(Spill, MIS.end()) <= 1)
HSpiller.addToMergeableSpills(*Spill, StackSlot, Original);
}
@@ -1214,7 +1214,7 @@ void HoistSpillHelper::addToMergeableSpills(MachineInstr &Spill, int StackSlot,
// save a copy of LiveInterval in StackSlotToOrigLI because the original
// LiveInterval may be cleared after all its references are spilled.
if (StackSlotToOrigLI.find(StackSlot) == StackSlotToOrigLI.end()) {
- auto LI = std::make_unique<LiveInterval>(OrigLI.reg(), OrigLI.weight());
+ auto LI = std::make_unique<LiveInterval>(OrigLI.reg(), OrigLI.weight());
LI->assign(OrigLI, Allocator);
StackSlotToOrigLI[StackSlot] = std::move(LI);
}
@@ -1242,7 +1242,7 @@ bool HoistSpillHelper::rmFromMergeableSpills(MachineInstr &Spill,
bool HoistSpillHelper::isSpillCandBB(LiveInterval &OrigLI, VNInfo &OrigVNI,
MachineBasicBlock &BB, Register &LiveReg) {
SlotIndex Idx;
- Register OrigReg = OrigLI.reg();
+ Register OrigReg = OrigLI.reg();
MachineBasicBlock::iterator MI = IPA.getLastInsertPointIter(OrigLI, BB);
if (MI != BB.end())
Idx = LIS.getInstructionIndex(*MI);
@@ -1570,13 +1570,13 @@ void HoistSpillHelper::hoistAllSpills() {
for (auto const &Insert : SpillsToIns) {
MachineBasicBlock *BB = Insert.first;
Register LiveReg = Insert.second;
- MachineBasicBlock::iterator MII = IPA.getLastInsertPointIter(OrigLI, *BB);
- MachineInstrSpan MIS(MII, BB);
- TII.storeRegToStackSlot(*BB, MII, LiveReg, false, Slot,
+ MachineBasicBlock::iterator MII = IPA.getLastInsertPointIter(OrigLI, *BB);
+ MachineInstrSpan MIS(MII, BB);
+ TII.storeRegToStackSlot(*BB, MII, LiveReg, false, Slot,
MRI.getRegClass(LiveReg), &TRI);
- LIS.InsertMachineInstrRangeInMaps(MIS.begin(), MII);
- for (const MachineInstr &MI : make_range(MIS.begin(), MII))
- getVDefInterval(MI, LIS);
+ LIS.InsertMachineInstrRangeInMaps(MIS.begin(), MII);
+ for (const MachineInstr &MI : make_range(MIS.begin(), MII))
+ getVDefInterval(MI, LIS);
++NumSpills;
}
@@ -1596,13 +1596,13 @@ void HoistSpillHelper::hoistAllSpills() {
/// For VirtReg clone, the \p New register should have the same physreg or
/// stackslot as the \p old register.
-void HoistSpillHelper::LRE_DidCloneVirtReg(Register New, Register Old) {
+void HoistSpillHelper::LRE_DidCloneVirtReg(Register New, Register Old) {
if (VRM.hasPhys(Old))
VRM.assignVirt2Phys(New, VRM.getPhys(Old));
else if (VRM.getStackSlot(Old) != VirtRegMap::NO_STACK_SLOT)
VRM.assignVirt2StackSlot(New, VRM.getStackSlot(Old));
else
llvm_unreachable("VReg should be assigned either physreg or stackslot");
- if (VRM.hasShape(Old))
- VRM.assignVirt2Shape(New, VRM.getShape(Old));
+ if (VRM.hasShape(Old))
+ VRM.assignVirt2Shape(New, VRM.getShape(Old));
}