diff options
author | shadchin <shadchin@yandex-team.ru> | 2022-02-10 16:44:30 +0300 |
---|---|---|
committer | Daniil Cherednik <dcherednik@yandex-team.ru> | 2022-02-10 16:44:30 +0300 |
commit | 2598ef1d0aee359b4b6d5fdd1758916d5907d04f (patch) | |
tree | 012bb94d777798f1f56ac1cec429509766d05181 /contrib/libs/llvm12/lib/CodeGen/MachineInstr.cpp | |
parent | 6751af0b0c1b952fede40b19b71da8025b5d8bcf (diff) | |
download | ydb-2598ef1d0aee359b4b6d5fdd1758916d5907d04f.tar.gz |
Restoring authorship annotation for <shadchin@yandex-team.ru>. Commit 1 of 2.
Diffstat (limited to 'contrib/libs/llvm12/lib/CodeGen/MachineInstr.cpp')
-rw-r--r-- | contrib/libs/llvm12/lib/CodeGen/MachineInstr.cpp | 238 |
1 files changed, 119 insertions, 119 deletions
diff --git a/contrib/libs/llvm12/lib/CodeGen/MachineInstr.cpp b/contrib/libs/llvm12/lib/CodeGen/MachineInstr.cpp index b6cfd7dcbf..c6dd640fa1 100644 --- a/contrib/libs/llvm12/lib/CodeGen/MachineInstr.cpp +++ b/contrib/libs/llvm12/lib/CodeGen/MachineInstr.cpp @@ -34,7 +34,7 @@ #include "llvm/CodeGen/MachineOperand.h" #include "llvm/CodeGen/MachineRegisterInfo.h" #include "llvm/CodeGen/PseudoSourceValue.h" -#include "llvm/CodeGen/StackMaps.h" +#include "llvm/CodeGen/StackMaps.h" #include "llvm/CodeGen/TargetInstrInfo.h" #include "llvm/CodeGen/TargetRegisterInfo.h" #include "llvm/CodeGen/TargetSubtargetInfo.h" @@ -117,7 +117,7 @@ void MachineInstr::addImplicitDefUseOperands(MachineFunction &MF) { /// the MCInstrDesc. MachineInstr::MachineInstr(MachineFunction &MF, const MCInstrDesc &tid, DebugLoc dl, bool NoImp) - : MCID(&tid), debugLoc(std::move(dl)), DebugInstrNum(0) { + : MCID(&tid), debugLoc(std::move(dl)), DebugInstrNum(0) { assert(debugLoc.hasTrivialDestructor() && "Expected trivial destructor"); // Reserve space for the expected number of operands. @@ -131,12 +131,12 @@ MachineInstr::MachineInstr(MachineFunction &MF, const MCInstrDesc &tid, addImplicitDefUseOperands(MF); } -/// MachineInstr ctor - Copies MachineInstr arg exactly. -/// Does not copy the number from debug instruction numbering, to preserve -/// uniqueness. +/// MachineInstr ctor - Copies MachineInstr arg exactly. +/// Does not copy the number from debug instruction numbering, to preserve +/// uniqueness. MachineInstr::MachineInstr(MachineFunction &MF, const MachineInstr &MI) - : MCID(&MI.getDesc()), Info(MI.Info), debugLoc(MI.getDebugLoc()), - DebugInstrNum(0) { + : MCID(&MI.getDesc()), Info(MI.Info), debugLoc(MI.getDebugLoc()), + DebugInstrNum(0) { assert(debugLoc.hasTrivialDestructor() && "Expected trivial destructor"); CapOperands = OperandCapacity::get(MI.getNumOperands()); @@ -150,10 +150,10 @@ MachineInstr::MachineInstr(MachineFunction &MF, const MachineInstr &MI) setFlags(MI.Flags); } -void MachineInstr::moveBefore(MachineInstr *MovePos) { - MovePos->getParent()->splice(MovePos, getParent(), getIterator()); -} - +void MachineInstr::moveBefore(MachineInstr *MovePos) { + MovePos->getParent()->splice(MovePos, getParent(), getIterator()); +} + /// getRegInfo - If this instruction is embedded into a MachineFunction, /// return the MachineRegisterInfo object for the current function, otherwise /// return null. @@ -711,7 +711,7 @@ bool MachineInstr::isCandidateForCallSiteEntry(QueryType Type) const { case TargetOpcode::PATCHPOINT: case TargetOpcode::STACKMAP: case TargetOpcode::STATEPOINT: - case TargetOpcode::FENTRY_CALL: + case TargetOpcode::FENTRY_CALL: return false; } return true; @@ -841,27 +841,27 @@ const DILabel *MachineInstr::getDebugLabel() const { } const MachineOperand &MachineInstr::getDebugVariableOp() const { - assert((isDebugValue() || isDebugRef()) && "not a DBG_VALUE"); + assert((isDebugValue() || isDebugRef()) && "not a DBG_VALUE"); return getOperand(2); } MachineOperand &MachineInstr::getDebugVariableOp() { - assert((isDebugValue() || isDebugRef()) && "not a DBG_VALUE"); + assert((isDebugValue() || isDebugRef()) && "not a DBG_VALUE"); return getOperand(2); } const DILocalVariable *MachineInstr::getDebugVariable() const { - assert((isDebugValue() || isDebugRef()) && "not a DBG_VALUE"); + assert((isDebugValue() || isDebugRef()) && "not a DBG_VALUE"); return cast<DILocalVariable>(getOperand(2).getMetadata()); } MachineOperand &MachineInstr::getDebugExpressionOp() { - assert((isDebugValue() || isDebugRef()) && "not a DBG_VALUE"); + assert((isDebugValue() || isDebugRef()) && "not a DBG_VALUE"); return getOperand(3); } const DIExpression *MachineInstr::getDebugExpression() const { - assert((isDebugValue() || isDebugRef()) && "not a DBG_VALUE"); + assert((isDebugValue() || isDebugRef()) && "not a DBG_VALUE"); return cast<DIExpression>(getOperand(3).getMetadata()); } @@ -1100,12 +1100,12 @@ void MachineInstr::tieOperands(unsigned DefIdx, unsigned UseIdx) { if (DefIdx < TiedMax) UseMO.TiedTo = DefIdx + 1; else { - // Inline asm can use the group descriptors to find tied operands, - // statepoint tied operands are trivial to match (1-1 reg def with reg use), - // but on normal instruction, the tied def must be within the first TiedMax + // Inline asm can use the group descriptors to find tied operands, + // statepoint tied operands are trivial to match (1-1 reg def with reg use), + // but on normal instruction, the tied def must be within the first TiedMax // operands. - assert((isInlineAsm() || getOpcode() == TargetOpcode::STATEPOINT) && - "DefIdx out of range"); + assert((isInlineAsm() || getOpcode() == TargetOpcode::STATEPOINT) && + "DefIdx out of range"); UseMO.TiedTo = TiedMax; } @@ -1125,7 +1125,7 @@ unsigned MachineInstr::findTiedOperandIdx(unsigned OpIdx) const { return MO.TiedTo - 1; // Uses on normal instructions can be out of range. - if (!isInlineAsm() && getOpcode() != TargetOpcode::STATEPOINT) { + if (!isInlineAsm() && getOpcode() != TargetOpcode::STATEPOINT) { // Normal tied defs must be in the 0..TiedMax-1 range. if (MO.isUse()) return TiedMax - 1; @@ -1138,25 +1138,25 @@ unsigned MachineInstr::findTiedOperandIdx(unsigned OpIdx) const { llvm_unreachable("Can't find tied use"); } - if (getOpcode() == TargetOpcode::STATEPOINT) { - // In STATEPOINT defs correspond 1-1 to GC pointer operands passed - // on registers. - StatepointOpers SO(this); - unsigned CurUseIdx = SO.getFirstGCPtrIdx(); - assert(CurUseIdx != -1U && "only gc pointer statepoint operands can be tied"); - unsigned NumDefs = getNumDefs(); - for (unsigned CurDefIdx = 0; CurDefIdx < NumDefs; ++CurDefIdx) { - while (!getOperand(CurUseIdx).isReg()) - CurUseIdx = StackMaps::getNextMetaArgIdx(this, CurUseIdx); - if (OpIdx == CurDefIdx) - return CurUseIdx; - if (OpIdx == CurUseIdx) - return CurDefIdx; - CurUseIdx = StackMaps::getNextMetaArgIdx(this, CurUseIdx); - } - llvm_unreachable("Can't find tied use"); - } - + if (getOpcode() == TargetOpcode::STATEPOINT) { + // In STATEPOINT defs correspond 1-1 to GC pointer operands passed + // on registers. + StatepointOpers SO(this); + unsigned CurUseIdx = SO.getFirstGCPtrIdx(); + assert(CurUseIdx != -1U && "only gc pointer statepoint operands can be tied"); + unsigned NumDefs = getNumDefs(); + for (unsigned CurDefIdx = 0; CurDefIdx < NumDefs; ++CurDefIdx) { + while (!getOperand(CurUseIdx).isReg()) + CurUseIdx = StackMaps::getNextMetaArgIdx(this, CurUseIdx); + if (OpIdx == CurDefIdx) + return CurUseIdx; + if (OpIdx == CurUseIdx) + return CurDefIdx; + CurUseIdx = StackMaps::getNextMetaArgIdx(this, CurUseIdx); + } + llvm_unreachable("Can't find tied use"); + } + // Now deal with inline asm by parsing the operand group descriptor flags. // Find the beginning of each operand group. SmallVector<unsigned, 8> GroupIdx; @@ -1240,7 +1240,7 @@ bool MachineInstr::isSafeToMove(AAResults *AA, bool &SawStore) const { // See if this instruction does a load. If so, we have to guarantee that the // loaded value doesn't change between the load and the its intended - // destination. The check for isInvariantLoad gives the target the chance to + // destination. The check for isInvariantLoad gives the target the chance to // classify the load as always returning a constant, e.g. a constant pool // load. if (mayLoad() && !isDereferenceableInvariantLoad(AA)) @@ -1251,21 +1251,21 @@ bool MachineInstr::isSafeToMove(AAResults *AA, bool &SawStore) const { return true; } -static bool MemOperandsHaveAlias(const MachineFrameInfo &MFI, AAResults *AA, - bool UseTBAA, const MachineMemOperand *MMOa, - const MachineMemOperand *MMOb) { - // The following interface to AA is fashioned after DAGCombiner::isAlias and - // operates with MachineMemOperand offset with some important assumptions: +static bool MemOperandsHaveAlias(const MachineFrameInfo &MFI, AAResults *AA, + bool UseTBAA, const MachineMemOperand *MMOa, + const MachineMemOperand *MMOb) { + // The following interface to AA is fashioned after DAGCombiner::isAlias and + // operates with MachineMemOperand offset with some important assumptions: // - LLVM fundamentally assumes flat address spaces. - // - MachineOperand offset can *only* result from legalization and cannot - // affect queries other than the trivial case of overlap checking. - // - These offsets never wrap and never step outside of allocated objects. + // - MachineOperand offset can *only* result from legalization and cannot + // affect queries other than the trivial case of overlap checking. + // - These offsets never wrap and never step outside of allocated objects. // - There should never be any negative offsets here. // // FIXME: Modify API to hide this math from "user" - // Even before we go to AA we can reason locally about some memory objects. It - // can save compile time, and possibly catch some corner cases not currently - // covered. + // Even before we go to AA we can reason locally about some memory objects. It + // can save compile time, and possibly catch some corner cases not currently + // covered. int64_t OffsetA = MMOa->getOffset(); int64_t OffsetB = MMOb->getOffset(); @@ -1307,63 +1307,63 @@ static bool MemOperandsHaveAlias(const MachineFrameInfo &MFI, AAResults *AA, assert((OffsetA >= 0) && "Negative MachineMemOperand offset"); assert((OffsetB >= 0) && "Negative MachineMemOperand offset"); - int64_t OverlapA = - KnownWidthA ? WidthA + OffsetA - MinOffset : MemoryLocation::UnknownSize; - int64_t OverlapB = - KnownWidthB ? WidthB + OffsetB - MinOffset : MemoryLocation::UnknownSize; + int64_t OverlapA = + KnownWidthA ? WidthA + OffsetA - MinOffset : MemoryLocation::UnknownSize; + int64_t OverlapB = + KnownWidthB ? WidthB + OffsetB - MinOffset : MemoryLocation::UnknownSize; AliasResult AAResult = AA->alias( - MemoryLocation(ValA, OverlapA, UseTBAA ? MMOa->getAAInfo() : AAMDNodes()), + MemoryLocation(ValA, OverlapA, UseTBAA ? MMOa->getAAInfo() : AAMDNodes()), MemoryLocation(ValB, OverlapB, UseTBAA ? MMOb->getAAInfo() : AAMDNodes())); return (AAResult != NoAlias); } -bool MachineInstr::mayAlias(AAResults *AA, const MachineInstr &Other, - bool UseTBAA) const { - const MachineFunction *MF = getMF(); - const TargetInstrInfo *TII = MF->getSubtarget().getInstrInfo(); - const MachineFrameInfo &MFI = MF->getFrameInfo(); - - // Exclude call instruction which may alter the memory but can not be handled - // by this function. - if (isCall() || Other.isCall()) - return true; - - // If neither instruction stores to memory, they can't alias in any - // meaningful way, even if they read from the same address. - if (!mayStore() && !Other.mayStore()) - return false; - - // Both instructions must be memory operations to be able to alias. - if (!mayLoadOrStore() || !Other.mayLoadOrStore()) - return false; - - // Let the target decide if memory accesses cannot possibly overlap. - if (TII->areMemAccessesTriviallyDisjoint(*this, Other)) - return false; - - // Memory operations without memory operands may access anything. Be - // conservative and assume `MayAlias`. - if (memoperands_empty() || Other.memoperands_empty()) - return true; - - // Skip if there are too many memory operands. - auto NumChecks = getNumMemOperands() * Other.getNumMemOperands(); - if (NumChecks > TII->getMemOperandAACheckLimit()) - return true; - - // Check each pair of memory operands from both instructions, which can't - // alias only if all pairs won't alias. - for (auto *MMOa : memoperands()) - for (auto *MMOb : Other.memoperands()) - if (MemOperandsHaveAlias(MFI, AA, UseTBAA, MMOa, MMOb)) - return true; - - return false; -} - +bool MachineInstr::mayAlias(AAResults *AA, const MachineInstr &Other, + bool UseTBAA) const { + const MachineFunction *MF = getMF(); + const TargetInstrInfo *TII = MF->getSubtarget().getInstrInfo(); + const MachineFrameInfo &MFI = MF->getFrameInfo(); + + // Exclude call instruction which may alter the memory but can not be handled + // by this function. + if (isCall() || Other.isCall()) + return true; + + // If neither instruction stores to memory, they can't alias in any + // meaningful way, even if they read from the same address. + if (!mayStore() && !Other.mayStore()) + return false; + + // Both instructions must be memory operations to be able to alias. + if (!mayLoadOrStore() || !Other.mayLoadOrStore()) + return false; + + // Let the target decide if memory accesses cannot possibly overlap. + if (TII->areMemAccessesTriviallyDisjoint(*this, Other)) + return false; + + // Memory operations without memory operands may access anything. Be + // conservative and assume `MayAlias`. + if (memoperands_empty() || Other.memoperands_empty()) + return true; + + // Skip if there are too many memory operands. + auto NumChecks = getNumMemOperands() * Other.getNumMemOperands(); + if (NumChecks > TII->getMemOperandAACheckLimit()) + return true; + + // Check each pair of memory operands from both instructions, which can't + // alias only if all pairs won't alias. + for (auto *MMOa : memoperands()) + for (auto *MMOb : Other.memoperands()) + if (MemOperandsHaveAlias(MFI, AA, UseTBAA, MMOa, MMOb)) + return true; + + return false; +} + /// hasOrderedMemoryRef - Return true if this instruction may have an ordered /// or volatile memory reference, or if the information describing the memory /// reference is not available. Return false if it is known to have no ordered @@ -1462,8 +1462,8 @@ bool MachineInstr::hasUnmodeledSideEffects() const { } bool MachineInstr::isLoadFoldBarrier() const { - return mayStore() || isCall() || - (hasUnmodeledSideEffects() && !isPseudoProbe()); + return mayStore() || isCall() || + (hasUnmodeledSideEffects() && !isPseudoProbe()); } /// allDefsAreDead - Return true if all the defs of this instruction are dead. @@ -1492,8 +1492,8 @@ void MachineInstr::copyImplicitOps(MachineFunction &MF, bool MachineInstr::hasComplexRegisterTies() const { const MCInstrDesc &MCID = getDesc(); - if (MCID.Opcode == TargetOpcode::STATEPOINT) - return true; + if (MCID.Opcode == TargetOpcode::STATEPOINT) + return true; for (unsigned I = 0, E = getNumOperands(); I < E; ++I) { const auto &Operand = getOperand(I); if (!Operand.isReg() || Operand.isDef()) @@ -1800,12 +1800,12 @@ void MachineInstr::print(raw_ostream &OS, ModuleSlotTracker &MST, HeapAllocMarker->printAsOperand(OS, MST); } - if (DebugInstrNum) { - if (!FirstOp) - OS << ","; - OS << " debug-instr-number " << DebugInstrNum; - } - + if (DebugInstrNum) { + if (!FirstOp) + OS << ","; + OS << " debug-instr-number " << DebugInstrNum; + } + if (!SkipDebugLoc) { if (const DebugLoc &DL = getDebugLoc()) { if (!FirstOp) @@ -2280,9 +2280,9 @@ MachineInstr::getFoldedRestoreSize(const TargetInstrInfo *TII) const { return getSpillSlotSize(Accesses, getMF()->getFrameInfo()); return None; } - -unsigned MachineInstr::getDebugInstrNum() { - if (DebugInstrNum == 0) - DebugInstrNum = getParent()->getParent()->getNewDebugInstrNum(); - return DebugInstrNum; -} + +unsigned MachineInstr::getDebugInstrNum() { + if (DebugInstrNum == 0) + DebugInstrNum = getParent()->getParent()->getNewDebugInstrNum(); + return DebugInstrNum; +} |