diff options
author | shadchin <shadchin@yandex-team.ru> | 2022-02-10 16:44:39 +0300 |
---|---|---|
committer | Daniil Cherednik <dcherednik@yandex-team.ru> | 2022-02-10 16:44:39 +0300 |
commit | e9656aae26e0358d5378e5b63dcac5c8dbe0e4d0 (patch) | |
tree | 64175d5cadab313b3e7039ebaa06c5bc3295e274 /contrib/libs/llvm12/lib/CodeGen/MachineSink.cpp | |
parent | 2598ef1d0aee359b4b6d5fdd1758916d5907d04f (diff) | |
download | ydb-e9656aae26e0358d5378e5b63dcac5c8dbe0e4d0.tar.gz |
Restoring authorship annotation for <shadchin@yandex-team.ru>. Commit 2 of 2.
Diffstat (limited to 'contrib/libs/llvm12/lib/CodeGen/MachineSink.cpp')
-rw-r--r-- | contrib/libs/llvm12/lib/CodeGen/MachineSink.cpp | 512 |
1 files changed, 256 insertions, 256 deletions
diff --git a/contrib/libs/llvm12/lib/CodeGen/MachineSink.cpp b/contrib/libs/llvm12/lib/CodeGen/MachineSink.cpp index 34aca6e2d3..378df1b75e 100644 --- a/contrib/libs/llvm12/lib/CodeGen/MachineSink.cpp +++ b/contrib/libs/llvm12/lib/CodeGen/MachineSink.cpp @@ -34,8 +34,8 @@ #include "llvm/CodeGen/MachineOperand.h" #include "llvm/CodeGen/MachinePostDominators.h" #include "llvm/CodeGen/MachineRegisterInfo.h" -#include "llvm/CodeGen/RegisterClassInfo.h" -#include "llvm/CodeGen/RegisterPressure.h" +#include "llvm/CodeGen/RegisterClassInfo.h" +#include "llvm/CodeGen/RegisterPressure.h" #include "llvm/CodeGen/TargetInstrInfo.h" #include "llvm/CodeGen/TargetRegisterInfo.h" #include "llvm/CodeGen/TargetSubtargetInfo.h" @@ -79,18 +79,18 @@ static cl::opt<unsigned> SplitEdgeProbabilityThreshold( "splitted critical edge"), cl::init(40), cl::Hidden); -static cl::opt<unsigned> SinkLoadInstsPerBlockThreshold( - "machine-sink-load-instrs-threshold", - cl::desc("Do not try to find alias store for a load if there is a in-path " - "block whose instruction number is higher than this threshold."), - cl::init(2000), cl::Hidden); - -static cl::opt<unsigned> SinkLoadBlocksThreshold( - "machine-sink-load-blocks-threshold", - cl::desc("Do not try to find alias store for a load if the block number in " - "the straight line is higher than this threshold."), - cl::init(20), cl::Hidden); - +static cl::opt<unsigned> SinkLoadInstsPerBlockThreshold( + "machine-sink-load-instrs-threshold", + cl::desc("Do not try to find alias store for a load if there is a in-path " + "block whose instruction number is higher than this threshold."), + cl::init(2000), cl::Hidden); + +static cl::opt<unsigned> SinkLoadBlocksThreshold( + "machine-sink-load-blocks-threshold", + cl::desc("Do not try to find alias store for a load if the block number in " + "the straight line is higher than this threshold."), + cl::init(20), cl::Hidden); + STATISTIC(NumSunk, "Number of machine instructions sunk"); STATISTIC(NumSplit, "Number of critical edges split"); STATISTIC(NumCoalesces, "Number of copies coalesced"); @@ -108,7 +108,7 @@ namespace { MachineBlockFrequencyInfo *MBFI; const MachineBranchProbabilityInfo *MBPI; AliasAnalysis *AA; - RegisterClassInfo RegClassInfo; + RegisterClassInfo RegClassInfo; // Remember which edges have been considered for breaking. SmallSet<std::pair<MachineBasicBlock*, MachineBasicBlock*>, 8> @@ -142,15 +142,15 @@ namespace { /// current block. DenseSet<DebugVariable> SeenDbgVars; - std::map<std::pair<MachineBasicBlock *, MachineBasicBlock *>, bool> - HasStoreCache; - std::map<std::pair<MachineBasicBlock *, MachineBasicBlock *>, - std::vector<MachineInstr *>> - StoreInstrCache; - - /// Cached BB's register pressure. - std::map<MachineBasicBlock *, std::vector<unsigned>> CachedRegisterPressure; - + std::map<std::pair<MachineBasicBlock *, MachineBasicBlock *>, bool> + HasStoreCache; + std::map<std::pair<MachineBasicBlock *, MachineBasicBlock *>, + std::vector<MachineInstr *>> + StoreInstrCache; + + /// Cached BB's register pressure. + std::map<MachineBasicBlock *, std::vector<unsigned>> CachedRegisterPressure; + public: static char ID; // Pass identification @@ -183,9 +183,9 @@ namespace { MachineBasicBlock *From, MachineBasicBlock *To); - bool hasStoreBetween(MachineBasicBlock *From, MachineBasicBlock *To, - MachineInstr &MI); - + bool hasStoreBetween(MachineBasicBlock *From, MachineBasicBlock *To, + MachineInstr &MI); + /// Postpone the splitting of the given critical /// edge (\p From, \p To). /// @@ -211,12 +211,12 @@ namespace { /// to the copy source. void SalvageUnsunkDebugUsersOfCopy(MachineInstr &, MachineBasicBlock *TargetBlock); - bool AllUsesDominatedByBlock(Register Reg, MachineBasicBlock *MBB, - MachineBasicBlock *DefMBB, bool &BreakPHIEdge, - bool &LocalUse) const; + bool AllUsesDominatedByBlock(Register Reg, MachineBasicBlock *MBB, + MachineBasicBlock *DefMBB, bool &BreakPHIEdge, + bool &LocalUse) const; MachineBasicBlock *FindSuccToSinkTo(MachineInstr &MI, MachineBasicBlock *MBB, bool &BreakPHIEdge, AllSuccsCache &AllSuccessors); - bool isProfitableToSinkTo(Register Reg, MachineInstr &MI, + bool isProfitableToSinkTo(Register Reg, MachineInstr &MI, MachineBasicBlock *MBB, MachineBasicBlock *SuccToSinkTo, AllSuccsCache &AllSuccessors); @@ -227,8 +227,8 @@ namespace { SmallVector<MachineBasicBlock *, 4> & GetAllSortedSuccessors(MachineInstr &MI, MachineBasicBlock *MBB, AllSuccsCache &AllSuccessors) const; - - std::vector<unsigned> &getBBRegisterPressure(MachineBasicBlock &MBB); + + std::vector<unsigned> &getBBRegisterPressure(MachineBasicBlock &MBB); }; } // end anonymous namespace @@ -282,11 +282,11 @@ bool MachineSinking::PerformTrivialForwardCoalescing(MachineInstr &MI, /// occur in blocks dominated by the specified block. If any use is in the /// definition block, then return false since it is never legal to move def /// after uses. -bool MachineSinking::AllUsesDominatedByBlock(Register Reg, - MachineBasicBlock *MBB, - MachineBasicBlock *DefMBB, - bool &BreakPHIEdge, - bool &LocalUse) const { +bool MachineSinking::AllUsesDominatedByBlock(Register Reg, + MachineBasicBlock *MBB, + MachineBasicBlock *DefMBB, + bool &BreakPHIEdge, + bool &LocalUse) const { assert(Register::isVirtualRegister(Reg) && "Only makes sense for vregs"); // Ignore debug uses because debug info doesn't affect the code. @@ -355,7 +355,7 @@ bool MachineSinking::runOnMachineFunction(MachineFunction &MF) { MBFI = UseBlockFreqInfo ? &getAnalysis<MachineBlockFrequencyInfo>() : nullptr; MBPI = &getAnalysis<MachineBranchProbabilityInfo>(); AA = &getAnalysis<AAResultsWrapperPass>().getAAResults(); - RegClassInfo.runOnMachineFunction(MF); + RegClassInfo.runOnMachineFunction(MF); bool EverMadeChange = false; @@ -376,9 +376,9 @@ bool MachineSinking::runOnMachineFunction(MachineFunction &MF) { << printMBBReference(*Pair.first) << " -- " << printMBBReference(*NewSucc) << " -- " << printMBBReference(*Pair.second) << '\n'); - if (MBFI) - MBFI->onEdgeSplit(*Pair.first, *NewSucc, *MBPI); - + if (MBFI) + MBFI->onEdgeSplit(*Pair.first, *NewSucc, *MBPI); + MadeChange = true; ++NumSplit; } else @@ -389,9 +389,9 @@ bool MachineSinking::runOnMachineFunction(MachineFunction &MF) { EverMadeChange = true; } - HasStoreCache.clear(); - StoreInstrCache.clear(); - + HasStoreCache.clear(); + StoreInstrCache.clear(); + // Now clear any kill flags for recorded registers. for (auto I : RegsToClearKillFlags) MRI->clearKillFlags(I); @@ -449,8 +449,8 @@ bool MachineSinking::ProcessBlock(MachineBasicBlock &MBB) { SeenDbgUsers.clear(); SeenDbgVars.clear(); - // recalculate the bb register pressure after sinking one BB. - CachedRegisterPressure.clear(); + // recalculate the bb register pressure after sinking one BB. + CachedRegisterPressure.clear(); return MadeChange; } @@ -462,7 +462,7 @@ void MachineSinking::ProcessDbgInst(MachineInstr &MI) { DebugVariable Var(MI.getDebugVariable(), MI.getDebugExpression(), MI.getDebugLoc()->getInlinedAt()); - bool SeenBefore = SeenDbgVars.contains(Var); + bool SeenBefore = SeenDbgVars.contains(Var); MachineOperand &MO = MI.getDebugOperand(0); if (MO.isReg() && MO.getReg().isVirtual()) @@ -593,44 +593,44 @@ bool MachineSinking::PostponeSplitCriticalEdge(MachineInstr &MI, return true; } -std::vector<unsigned> & -MachineSinking::getBBRegisterPressure(MachineBasicBlock &MBB) { - // Currently to save compiling time, MBB's register pressure will not change - // in one ProcessBlock iteration because of CachedRegisterPressure. but MBB's - // register pressure is changed after sinking any instructions into it. - // FIXME: need a accurate and cheap register pressure estiminate model here. - auto RP = CachedRegisterPressure.find(&MBB); - if (RP != CachedRegisterPressure.end()) - return RP->second; - - RegionPressure Pressure; - RegPressureTracker RPTracker(Pressure); - - // Initialize the register pressure tracker. - RPTracker.init(MBB.getParent(), &RegClassInfo, nullptr, &MBB, MBB.end(), - /*TrackLaneMasks*/ false, /*TrackUntiedDefs=*/true); - - for (MachineBasicBlock::iterator MII = MBB.instr_end(), - MIE = MBB.instr_begin(); - MII != MIE; --MII) { - MachineInstr &MI = *std::prev(MII); - if (MI.isDebugValue() || MI.isDebugLabel()) - continue; - RegisterOperands RegOpers; - RegOpers.collect(MI, *TRI, *MRI, false, false); - RPTracker.recedeSkipDebugValues(); - assert(&*RPTracker.getPos() == &MI && "RPTracker sync error!"); - RPTracker.recede(RegOpers); - } - - RPTracker.closeRegion(); - auto It = CachedRegisterPressure.insert( - std::make_pair(&MBB, RPTracker.getPressure().MaxSetPressure)); - return It.first->second; -} - +std::vector<unsigned> & +MachineSinking::getBBRegisterPressure(MachineBasicBlock &MBB) { + // Currently to save compiling time, MBB's register pressure will not change + // in one ProcessBlock iteration because of CachedRegisterPressure. but MBB's + // register pressure is changed after sinking any instructions into it. + // FIXME: need a accurate and cheap register pressure estiminate model here. + auto RP = CachedRegisterPressure.find(&MBB); + if (RP != CachedRegisterPressure.end()) + return RP->second; + + RegionPressure Pressure; + RegPressureTracker RPTracker(Pressure); + + // Initialize the register pressure tracker. + RPTracker.init(MBB.getParent(), &RegClassInfo, nullptr, &MBB, MBB.end(), + /*TrackLaneMasks*/ false, /*TrackUntiedDefs=*/true); + + for (MachineBasicBlock::iterator MII = MBB.instr_end(), + MIE = MBB.instr_begin(); + MII != MIE; --MII) { + MachineInstr &MI = *std::prev(MII); + if (MI.isDebugValue() || MI.isDebugLabel()) + continue; + RegisterOperands RegOpers; + RegOpers.collect(MI, *TRI, *MRI, false, false); + RPTracker.recedeSkipDebugValues(); + assert(&*RPTracker.getPos() == &MI && "RPTracker sync error!"); + RPTracker.recede(RegOpers); + } + + RPTracker.closeRegion(); + auto It = CachedRegisterPressure.insert( + std::make_pair(&MBB, RPTracker.getPressure().MaxSetPressure)); + return It.first->second; +} + /// isProfitableToSinkTo - Return true if it is profitable to sink MI. -bool MachineSinking::isProfitableToSinkTo(Register Reg, MachineInstr &MI, +bool MachineSinking::isProfitableToSinkTo(Register Reg, MachineInstr &MI, MachineBasicBlock *MBB, MachineBasicBlock *SuccToSinkTo, AllSuccsCache &AllSuccessors) { @@ -666,73 +666,73 @@ bool MachineSinking::isProfitableToSinkTo(Register Reg, MachineInstr &MI, FindSuccToSinkTo(MI, SuccToSinkTo, BreakPHIEdge, AllSuccessors)) return isProfitableToSinkTo(Reg, MI, SuccToSinkTo, MBB2, AllSuccessors); - MachineLoop *ML = LI->getLoopFor(MBB); - - // If the instruction is not inside a loop, it is not profitable to sink MI to - // a post dominate block SuccToSinkTo. - if (!ML) - return false; - - auto isRegisterPressureSetExceedLimit = [&](const TargetRegisterClass *RC) { - unsigned Weight = TRI->getRegClassWeight(RC).RegWeight; - const int *PS = TRI->getRegClassPressureSets(RC); - // Get register pressure for block SuccToSinkTo. - std::vector<unsigned> BBRegisterPressure = - getBBRegisterPressure(*SuccToSinkTo); - for (; *PS != -1; PS++) - // check if any register pressure set exceeds limit in block SuccToSinkTo - // after sinking. - if (Weight + BBRegisterPressure[*PS] >= - TRI->getRegPressureSetLimit(*MBB->getParent(), *PS)) - return true; - return false; - }; - - // If this instruction is inside a loop and sinking this instruction can make - // more registers live range shorten, it is still prifitable. - for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) { - const MachineOperand &MO = MI.getOperand(i); - // Ignore non-register operands. - if (!MO.isReg()) - continue; - Register Reg = MO.getReg(); - if (Reg == 0) - continue; - - // Don't handle physical register. - if (Register::isPhysicalRegister(Reg)) - return false; - - // Users for the defs are all dominated by SuccToSinkTo. - if (MO.isDef()) { - // This def register's live range is shortened after sinking. - bool LocalUse = false; - if (!AllUsesDominatedByBlock(Reg, SuccToSinkTo, MBB, BreakPHIEdge, - LocalUse)) - return false; - } else { - MachineInstr *DefMI = MRI->getVRegDef(Reg); - // DefMI is defined outside of loop. There should be no live range - // impact for this operand. Defination outside of loop means: - // 1: defination is outside of loop. - // 2: defination is in this loop, but it is a PHI in the loop header. - if (LI->getLoopFor(DefMI->getParent()) != ML || - (DefMI->isPHI() && LI->isLoopHeader(DefMI->getParent()))) - continue; - // The DefMI is defined inside the loop. - // If sinking this operand makes some register pressure set exceed limit, - // it is not profitable. - if (isRegisterPressureSetExceedLimit(MRI->getRegClass(Reg))) { - LLVM_DEBUG(dbgs() << "register pressure exceed limit, not profitable."); - return false; - } - } - } - - // If MI is in loop and all its operands are alive across the whole loop or if - // no operand sinking make register pressure set exceed limit, it is - // profitable to sink MI. - return true; + MachineLoop *ML = LI->getLoopFor(MBB); + + // If the instruction is not inside a loop, it is not profitable to sink MI to + // a post dominate block SuccToSinkTo. + if (!ML) + return false; + + auto isRegisterPressureSetExceedLimit = [&](const TargetRegisterClass *RC) { + unsigned Weight = TRI->getRegClassWeight(RC).RegWeight; + const int *PS = TRI->getRegClassPressureSets(RC); + // Get register pressure for block SuccToSinkTo. + std::vector<unsigned> BBRegisterPressure = + getBBRegisterPressure(*SuccToSinkTo); + for (; *PS != -1; PS++) + // check if any register pressure set exceeds limit in block SuccToSinkTo + // after sinking. + if (Weight + BBRegisterPressure[*PS] >= + TRI->getRegPressureSetLimit(*MBB->getParent(), *PS)) + return true; + return false; + }; + + // If this instruction is inside a loop and sinking this instruction can make + // more registers live range shorten, it is still prifitable. + for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) { + const MachineOperand &MO = MI.getOperand(i); + // Ignore non-register operands. + if (!MO.isReg()) + continue; + Register Reg = MO.getReg(); + if (Reg == 0) + continue; + + // Don't handle physical register. + if (Register::isPhysicalRegister(Reg)) + return false; + + // Users for the defs are all dominated by SuccToSinkTo. + if (MO.isDef()) { + // This def register's live range is shortened after sinking. + bool LocalUse = false; + if (!AllUsesDominatedByBlock(Reg, SuccToSinkTo, MBB, BreakPHIEdge, + LocalUse)) + return false; + } else { + MachineInstr *DefMI = MRI->getVRegDef(Reg); + // DefMI is defined outside of loop. There should be no live range + // impact for this operand. Defination outside of loop means: + // 1: defination is outside of loop. + // 2: defination is in this loop, but it is a PHI in the loop header. + if (LI->getLoopFor(DefMI->getParent()) != ML || + (DefMI->isPHI() && LI->isLoopHeader(DefMI->getParent()))) + continue; + // The DefMI is defined inside the loop. + // If sinking this operand makes some register pressure set exceed limit, + // it is not profitable. + if (isRegisterPressureSetExceedLimit(MRI->getRegClass(Reg))) { + LLVM_DEBUG(dbgs() << "register pressure exceed limit, not profitable."); + return false; + } + } + } + + // If MI is in loop and all its operands are alive across the whole loop or if + // no operand sinking make register pressure set exceed limit, it is + // profitable to sink MI. + return true; } /// Get the sorted sequence of successors for this MachineBasicBlock, possibly @@ -745,7 +745,7 @@ MachineSinking::GetAllSortedSuccessors(MachineInstr &MI, MachineBasicBlock *MBB, if (Succs != AllSuccessors.end()) return Succs->second; - SmallVector<MachineBasicBlock *, 4> AllSuccs(MBB->successors()); + SmallVector<MachineBasicBlock *, 4> AllSuccs(MBB->successors()); // Handle cases where sinking can happen but where the sink point isn't a // successor. For example: @@ -1007,97 +1007,97 @@ static void performSink(MachineInstr &MI, MachineBasicBlock &SuccToSinkTo, } } -/// hasStoreBetween - check if there is store betweeen straight line blocks From -/// and To. -bool MachineSinking::hasStoreBetween(MachineBasicBlock *From, - MachineBasicBlock *To, MachineInstr &MI) { - // Make sure From and To are in straight line which means From dominates To - // and To post dominates From. - if (!DT->dominates(From, To) || !PDT->dominates(To, From)) - return true; - - auto BlockPair = std::make_pair(From, To); - - // Does these two blocks pair be queried before and have a definite cached - // result? - if (HasStoreCache.find(BlockPair) != HasStoreCache.end()) - return HasStoreCache[BlockPair]; - - if (StoreInstrCache.find(BlockPair) != StoreInstrCache.end()) - return llvm::any_of(StoreInstrCache[BlockPair], [&](MachineInstr *I) { - return I->mayAlias(AA, MI, false); - }); - - bool SawStore = false; - bool HasAliasedStore = false; - DenseSet<MachineBasicBlock *> HandledBlocks; - DenseSet<MachineBasicBlock *> HandledDomBlocks; - // Go through all reachable blocks from From. - for (MachineBasicBlock *BB : depth_first(From)) { - // We insert the instruction at the start of block To, so no need to worry - // about stores inside To. - // Store in block From should be already considered when just enter function - // SinkInstruction. - if (BB == To || BB == From) - continue; - - // We already handle this BB in previous iteration. - if (HandledBlocks.count(BB)) - continue; - - HandledBlocks.insert(BB); - // To post dominates BB, it must be a path from block From. - if (PDT->dominates(To, BB)) { - if (!HandledDomBlocks.count(BB)) - HandledDomBlocks.insert(BB); - - // If this BB is too big or the block number in straight line between From - // and To is too big, stop searching to save compiling time. - if (BB->size() > SinkLoadInstsPerBlockThreshold || - HandledDomBlocks.size() > SinkLoadBlocksThreshold) { - for (auto *DomBB : HandledDomBlocks) { - if (DomBB != BB && DT->dominates(DomBB, BB)) - HasStoreCache[std::make_pair(DomBB, To)] = true; - else if(DomBB != BB && DT->dominates(BB, DomBB)) - HasStoreCache[std::make_pair(From, DomBB)] = true; - } - HasStoreCache[BlockPair] = true; - return true; - } - - for (MachineInstr &I : *BB) { - // Treat as alias conservatively for a call or an ordered memory - // operation. - if (I.isCall() || I.hasOrderedMemoryRef()) { - for (auto *DomBB : HandledDomBlocks) { - if (DomBB != BB && DT->dominates(DomBB, BB)) - HasStoreCache[std::make_pair(DomBB, To)] = true; - else if(DomBB != BB && DT->dominates(BB, DomBB)) - HasStoreCache[std::make_pair(From, DomBB)] = true; - } - HasStoreCache[BlockPair] = true; - return true; - } - - if (I.mayStore()) { - SawStore = true; - // We still have chance to sink MI if all stores between are not - // aliased to MI. - // Cache all store instructions, so that we don't need to go through - // all From reachable blocks for next load instruction. - if (I.mayAlias(AA, MI, false)) - HasAliasedStore = true; - StoreInstrCache[BlockPair].push_back(&I); - } - } - } - } - // If there is no store at all, cache the result. - if (!SawStore) - HasStoreCache[BlockPair] = false; - return HasAliasedStore; -} - +/// hasStoreBetween - check if there is store betweeen straight line blocks From +/// and To. +bool MachineSinking::hasStoreBetween(MachineBasicBlock *From, + MachineBasicBlock *To, MachineInstr &MI) { + // Make sure From and To are in straight line which means From dominates To + // and To post dominates From. + if (!DT->dominates(From, To) || !PDT->dominates(To, From)) + return true; + + auto BlockPair = std::make_pair(From, To); + + // Does these two blocks pair be queried before and have a definite cached + // result? + if (HasStoreCache.find(BlockPair) != HasStoreCache.end()) + return HasStoreCache[BlockPair]; + + if (StoreInstrCache.find(BlockPair) != StoreInstrCache.end()) + return llvm::any_of(StoreInstrCache[BlockPair], [&](MachineInstr *I) { + return I->mayAlias(AA, MI, false); + }); + + bool SawStore = false; + bool HasAliasedStore = false; + DenseSet<MachineBasicBlock *> HandledBlocks; + DenseSet<MachineBasicBlock *> HandledDomBlocks; + // Go through all reachable blocks from From. + for (MachineBasicBlock *BB : depth_first(From)) { + // We insert the instruction at the start of block To, so no need to worry + // about stores inside To. + // Store in block From should be already considered when just enter function + // SinkInstruction. + if (BB == To || BB == From) + continue; + + // We already handle this BB in previous iteration. + if (HandledBlocks.count(BB)) + continue; + + HandledBlocks.insert(BB); + // To post dominates BB, it must be a path from block From. + if (PDT->dominates(To, BB)) { + if (!HandledDomBlocks.count(BB)) + HandledDomBlocks.insert(BB); + + // If this BB is too big or the block number in straight line between From + // and To is too big, stop searching to save compiling time. + if (BB->size() > SinkLoadInstsPerBlockThreshold || + HandledDomBlocks.size() > SinkLoadBlocksThreshold) { + for (auto *DomBB : HandledDomBlocks) { + if (DomBB != BB && DT->dominates(DomBB, BB)) + HasStoreCache[std::make_pair(DomBB, To)] = true; + else if(DomBB != BB && DT->dominates(BB, DomBB)) + HasStoreCache[std::make_pair(From, DomBB)] = true; + } + HasStoreCache[BlockPair] = true; + return true; + } + + for (MachineInstr &I : *BB) { + // Treat as alias conservatively for a call or an ordered memory + // operation. + if (I.isCall() || I.hasOrderedMemoryRef()) { + for (auto *DomBB : HandledDomBlocks) { + if (DomBB != BB && DT->dominates(DomBB, BB)) + HasStoreCache[std::make_pair(DomBB, To)] = true; + else if(DomBB != BB && DT->dominates(BB, DomBB)) + HasStoreCache[std::make_pair(From, DomBB)] = true; + } + HasStoreCache[BlockPair] = true; + return true; + } + + if (I.mayStore()) { + SawStore = true; + // We still have chance to sink MI if all stores between are not + // aliased to MI. + // Cache all store instructions, so that we don't need to go through + // all From reachable blocks for next load instruction. + if (I.mayAlias(AA, MI, false)) + HasAliasedStore = true; + StoreInstrCache[BlockPair].push_back(&I); + } + } + } + } + // If there is no store at all, cache the result. + if (!SawStore) + HasStoreCache[BlockPair] = false; + return HasAliasedStore; +} + /// SinkInstruction - Determine whether it is safe to sink the specified machine /// instruction out of its current block into a successor. bool MachineSinking::SinkInstruction(MachineInstr &MI, bool &SawStore, @@ -1158,9 +1158,9 @@ bool MachineSinking::SinkInstruction(MachineInstr &MI, bool &SawStore, // We cannot sink a load across a critical edge - there may be stores in // other code paths. bool TryBreak = false; - bool Store = - MI.mayLoad() ? hasStoreBetween(ParentBlock, SuccToSinkTo, MI) : true; - if (!MI.isSafeToMove(AA, Store)) { + bool Store = + MI.mayLoad() ? hasStoreBetween(ParentBlock, SuccToSinkTo, MI) : true; + if (!MI.isSafeToMove(AA, Store)) { LLVM_DEBUG(dbgs() << " *** NOTE: Won't sink load along critical edge.\n"); TryBreak = true; } @@ -1491,9 +1491,9 @@ static bool hasRegisterDependency(MachineInstr *MI, return HasRegDependency; } -static SmallSet<MCRegister, 4> getRegUnits(MCRegister Reg, - const TargetRegisterInfo *TRI) { - SmallSet<MCRegister, 4> RegUnits; +static SmallSet<MCRegister, 4> getRegUnits(MCRegister Reg, + const TargetRegisterInfo *TRI) { + SmallSet<MCRegister, 4> RegUnits; for (auto RI = MCRegUnitIterator(Reg, TRI); RI.isValid(); ++RI) RegUnits.insert(*RI); return RegUnits; @@ -1543,8 +1543,8 @@ bool PostRAMachineSinking::tryToSinkCopy(MachineBasicBlock &CurBB, continue; // Record debug use of each reg unit. - SmallSet<MCRegister, 4> Units = getRegUnits(MO.getReg(), TRI); - for (MCRegister Reg : Units) + SmallSet<MCRegister, 4> Units = getRegUnits(MO.getReg(), TRI); + for (MCRegister Reg : Units) SeenDbgInstrs[Reg].push_back(MI); } continue; @@ -1592,13 +1592,13 @@ bool PostRAMachineSinking::tryToSinkCopy(MachineBasicBlock &CurBB, if (!MO.isReg() || !MO.isDef()) continue; - SmallSet<MCRegister, 4> Units = getRegUnits(MO.getReg(), TRI); - for (MCRegister Reg : Units) + SmallSet<MCRegister, 4> Units = getRegUnits(MO.getReg(), TRI); + for (MCRegister Reg : Units) for (auto *MI : SeenDbgInstrs.lookup(Reg)) DbgValsToSinkSet.insert(MI); } - SmallVector<MachineInstr *, 4> DbgValsToSink(DbgValsToSinkSet.begin(), - DbgValsToSinkSet.end()); + SmallVector<MachineInstr *, 4> DbgValsToSink(DbgValsToSinkSet.begin(), + DbgValsToSinkSet.end()); // Clear the kill flag if SrcReg is killed between MI and the end of the // block. |