aboutsummaryrefslogtreecommitdiffstats
path: root/contrib/libs/llvm12/lib/Target/PowerPC/PPCInstrInfo.h
diff options
context:
space:
mode:
authorshadchin <shadchin@yandex-team.ru>2022-02-10 16:44:39 +0300
committerDaniil Cherednik <dcherednik@yandex-team.ru>2022-02-10 16:44:39 +0300
commite9656aae26e0358d5378e5b63dcac5c8dbe0e4d0 (patch)
tree64175d5cadab313b3e7039ebaa06c5bc3295e274 /contrib/libs/llvm12/lib/Target/PowerPC/PPCInstrInfo.h
parent2598ef1d0aee359b4b6d5fdd1758916d5907d04f (diff)
downloadydb-e9656aae26e0358d5378e5b63dcac5c8dbe0e4d0.tar.gz
Restoring authorship annotation for <shadchin@yandex-team.ru>. Commit 2 of 2.
Diffstat (limited to 'contrib/libs/llvm12/lib/Target/PowerPC/PPCInstrInfo.h')
-rw-r--r--contrib/libs/llvm12/lib/Target/PowerPC/PPCInstrInfo.h160
1 files changed, 80 insertions, 80 deletions
diff --git a/contrib/libs/llvm12/lib/Target/PowerPC/PPCInstrInfo.h b/contrib/libs/llvm12/lib/Target/PowerPC/PPCInstrInfo.h
index 1f4fc87086..c6ef1742b7 100644
--- a/contrib/libs/llvm12/lib/Target/PowerPC/PPCInstrInfo.h
+++ b/contrib/libs/llvm12/lib/Target/PowerPC/PPCInstrInfo.h
@@ -123,72 +123,72 @@ enum SpillOpcodeKey {
SOK_VectorFloat8Spill,
SOK_VectorFloat4Spill,
SOK_SpillToVSR,
- SOK_PairedVecSpill,
- SOK_AccumulatorSpill,
- SOK_UAccumulatorSpill,
+ SOK_PairedVecSpill,
+ SOK_AccumulatorSpill,
+ SOK_UAccumulatorSpill,
SOK_SPESpill,
SOK_LastOpcodeSpill // This must be last on the enum.
};
// Define list of load and store spill opcodes.
-#define NoInstr PPC::INSTRUCTION_LIST_END
+#define NoInstr PPC::INSTRUCTION_LIST_END
#define Pwr8LoadOpcodes \
{ \
PPC::LWZ, PPC::LD, PPC::LFD, PPC::LFS, PPC::RESTORE_CR, \
PPC::RESTORE_CRBIT, PPC::LVX, PPC::LXVD2X, PPC::LXSDX, PPC::LXSSPX, \
- PPC::SPILLTOVSR_LD, NoInstr, NoInstr, NoInstr, PPC::EVLDD \
+ PPC::SPILLTOVSR_LD, NoInstr, NoInstr, NoInstr, PPC::EVLDD \
}
#define Pwr9LoadOpcodes \
{ \
PPC::LWZ, PPC::LD, PPC::LFD, PPC::LFS, PPC::RESTORE_CR, \
PPC::RESTORE_CRBIT, PPC::LVX, PPC::LXV, PPC::DFLOADf64, \
- PPC::DFLOADf32, PPC::SPILLTOVSR_LD, NoInstr, NoInstr, NoInstr, NoInstr \
+ PPC::DFLOADf32, PPC::SPILLTOVSR_LD, NoInstr, NoInstr, NoInstr, NoInstr \
+ }
+
+#define Pwr10LoadOpcodes \
+ { \
+ PPC::LWZ, PPC::LD, PPC::LFD, PPC::LFS, PPC::RESTORE_CR, \
+ PPC::RESTORE_CRBIT, PPC::LVX, PPC::LXV, PPC::DFLOADf64, \
+ PPC::DFLOADf32, PPC::SPILLTOVSR_LD, PPC::LXVP, PPC::RESTORE_ACC, \
+ PPC::RESTORE_UACC, NoInstr \
}
-#define Pwr10LoadOpcodes \
- { \
- PPC::LWZ, PPC::LD, PPC::LFD, PPC::LFS, PPC::RESTORE_CR, \
- PPC::RESTORE_CRBIT, PPC::LVX, PPC::LXV, PPC::DFLOADf64, \
- PPC::DFLOADf32, PPC::SPILLTOVSR_LD, PPC::LXVP, PPC::RESTORE_ACC, \
- PPC::RESTORE_UACC, NoInstr \
- }
-
#define Pwr8StoreOpcodes \
{ \
PPC::STW, PPC::STD, PPC::STFD, PPC::STFS, PPC::SPILL_CR, PPC::SPILL_CRBIT, \
- PPC::STVX, PPC::STXVD2X, PPC::STXSDX, PPC::STXSSPX, \
- PPC::SPILLTOVSR_ST, NoInstr, NoInstr, NoInstr, PPC::EVSTDD \
+ PPC::STVX, PPC::STXVD2X, PPC::STXSDX, PPC::STXSSPX, \
+ PPC::SPILLTOVSR_ST, NoInstr, NoInstr, NoInstr, PPC::EVSTDD \
}
#define Pwr9StoreOpcodes \
{ \
PPC::STW, PPC::STD, PPC::STFD, PPC::STFS, PPC::SPILL_CR, PPC::SPILL_CRBIT, \
PPC::STVX, PPC::STXV, PPC::DFSTOREf64, PPC::DFSTOREf32, \
- PPC::SPILLTOVSR_ST, NoInstr, NoInstr, NoInstr, NoInstr \
+ PPC::SPILLTOVSR_ST, NoInstr, NoInstr, NoInstr, NoInstr \
+ }
+
+#define Pwr10StoreOpcodes \
+ { \
+ PPC::STW, PPC::STD, PPC::STFD, PPC::STFS, PPC::SPILL_CR, PPC::SPILL_CRBIT, \
+ PPC::STVX, PPC::STXV, PPC::DFSTOREf64, PPC::DFSTOREf32, \
+ PPC::SPILLTOVSR_ST, PPC::STXVP, PPC::SPILL_ACC, PPC::SPILL_UACC, \
+ NoInstr \
}
-#define Pwr10StoreOpcodes \
- { \
- PPC::STW, PPC::STD, PPC::STFD, PPC::STFS, PPC::SPILL_CR, PPC::SPILL_CRBIT, \
- PPC::STVX, PPC::STXV, PPC::DFSTOREf64, PPC::DFSTOREf32, \
- PPC::SPILLTOVSR_ST, PPC::STXVP, PPC::SPILL_ACC, PPC::SPILL_UACC, \
- NoInstr \
- }
-
// Initialize arrays for load and store spill opcodes on supported subtargets.
#define StoreOpcodesForSpill \
- { Pwr8StoreOpcodes, Pwr9StoreOpcodes, Pwr10StoreOpcodes }
+ { Pwr8StoreOpcodes, Pwr9StoreOpcodes, Pwr10StoreOpcodes }
#define LoadOpcodesForSpill \
- { Pwr8LoadOpcodes, Pwr9LoadOpcodes, Pwr10LoadOpcodes }
+ { Pwr8LoadOpcodes, Pwr9LoadOpcodes, Pwr10LoadOpcodes }
class PPCSubtarget;
class PPCInstrInfo : public PPCGenInstrInfo {
PPCSubtarget &Subtarget;
const PPCRegisterInfo RI;
- const unsigned StoreSpillOpcodesArray[3][SOK_LastOpcodeSpill] =
+ const unsigned StoreSpillOpcodesArray[3][SOK_LastOpcodeSpill] =
StoreOpcodesForSpill;
- const unsigned LoadSpillOpcodesArray[3][SOK_LastOpcodeSpill] =
+ const unsigned LoadSpillOpcodesArray[3][SOK_LastOpcodeSpill] =
LoadOpcodesForSpill;
void StoreRegToStackSlot(MachineFunction &MF, unsigned SrcReg, bool isKill,
@@ -246,17 +246,17 @@ class PPCInstrInfo : public PPCGenInstrInfo {
unsigned getSpillTarget() const;
const unsigned *getStoreOpcodesForSpillArray() const;
const unsigned *getLoadOpcodesForSpillArray() const;
- unsigned getSpillIndex(const TargetRegisterClass *RC) const;
+ unsigned getSpillIndex(const TargetRegisterClass *RC) const;
int16_t getFMAOpIdxInfo(unsigned Opcode) const;
void reassociateFMA(MachineInstr &Root, MachineCombinerPattern Pattern,
SmallVectorImpl<MachineInstr *> &InsInstrs,
SmallVectorImpl<MachineInstr *> &DelInstrs,
DenseMap<unsigned, unsigned> &InstrIdxForVirtReg) const;
- bool isLoadFromConstantPool(MachineInstr *I) const;
- Register
- generateLoadForNewConst(unsigned Idx, MachineInstr *MI, Type *Ty,
- SmallVectorImpl<MachineInstr *> &InsInstrs) const;
- const Constant *getConstantFromConstantPool(MachineInstr *I) const;
+ bool isLoadFromConstantPool(MachineInstr *I) const;
+ Register
+ generateLoadForNewConst(unsigned Idx, MachineInstr *MI, Type *Ty,
+ SmallVectorImpl<MachineInstr *> &InsInstrs) const;
+ const Constant *getConstantFromConstantPool(MachineInstr *I) const;
virtual void anchor();
protected:
@@ -291,10 +291,10 @@ public:
}
static bool isSameClassPhysRegCopy(unsigned Opcode) {
- unsigned CopyOpcodes[] = {PPC::OR, PPC::OR8, PPC::FMR,
- PPC::VOR, PPC::XXLOR, PPC::XXLORf,
- PPC::XSCPSGNDP, PPC::MCRF, PPC::CROR,
- PPC::EVOR, -1U};
+ unsigned CopyOpcodes[] = {PPC::OR, PPC::OR8, PPC::FMR,
+ PPC::VOR, PPC::XXLOR, PPC::XXLORf,
+ PPC::XSCPSGNDP, PPC::MCRF, PPC::CROR,
+ PPC::EVOR, -1U};
for (int i = 0; CopyOpcodes[i] != -1U; i++)
if (Opcode == CopyOpcodes[i])
return true;
@@ -348,30 +348,30 @@ public:
/// chain ending in \p Root. All potential patterns are output in the \p
/// P array.
bool getFMAPatterns(MachineInstr &Root,
- SmallVectorImpl<MachineCombinerPattern> &P,
- bool DoRegPressureReduce) const;
+ SmallVectorImpl<MachineCombinerPattern> &P,
+ bool DoRegPressureReduce) const;
/// Return true when there is potentially a faster code sequence
/// for an instruction chain ending in <Root>. All potential patterns are
/// output in the <Pattern> array.
- bool getMachineCombinerPatterns(MachineInstr &Root,
- SmallVectorImpl<MachineCombinerPattern> &P,
- bool DoRegPressureReduce) const override;
-
- /// On PowerPC, we leverage machine combiner pass to reduce register pressure
- /// when the register pressure is high for one BB.
- /// Return true if register pressure for \p MBB is high and ABI is supported
- /// to reduce register pressure. Otherwise return false.
- bool
- shouldReduceRegisterPressure(MachineBasicBlock *MBB,
- RegisterClassInfo *RegClassInfo) const override;
-
- /// Fixup the placeholders we put in genAlternativeCodeSequence() for
- /// MachineCombiner.
- void
- finalizeInsInstrs(MachineInstr &Root, MachineCombinerPattern &P,
- SmallVectorImpl<MachineInstr *> &InsInstrs) const override;
-
+ bool getMachineCombinerPatterns(MachineInstr &Root,
+ SmallVectorImpl<MachineCombinerPattern> &P,
+ bool DoRegPressureReduce) const override;
+
+ /// On PowerPC, we leverage machine combiner pass to reduce register pressure
+ /// when the register pressure is high for one BB.
+ /// Return true if register pressure for \p MBB is high and ABI is supported
+ /// to reduce register pressure. Otherwise return false.
+ bool
+ shouldReduceRegisterPressure(MachineBasicBlock *MBB,
+ RegisterClassInfo *RegClassInfo) const override;
+
+ /// Fixup the placeholders we put in genAlternativeCodeSequence() for
+ /// MachineCombiner.
+ void
+ finalizeInsInstrs(MachineInstr &Root, MachineCombinerPattern &P,
+ SmallVectorImpl<MachineInstr *> &InsInstrs) const override;
+
bool isAssociativeAndCommutative(const MachineInstr &Inst) const override;
/// On PowerPC, we try to reassociate FMA chain which will increase
@@ -503,18 +503,18 @@ public:
// Predication support.
bool isPredicated(const MachineInstr &MI) const override;
- bool isSchedulingBoundary(const MachineInstr &MI,
- const MachineBasicBlock *MBB,
- const MachineFunction &MF) const override;
-
+ bool isSchedulingBoundary(const MachineInstr &MI,
+ const MachineBasicBlock *MBB,
+ const MachineFunction &MF) const override;
+
bool PredicateInstruction(MachineInstr &MI,
ArrayRef<MachineOperand> Pred) const override;
bool SubsumesPredicate(ArrayRef<MachineOperand> Pred1,
ArrayRef<MachineOperand> Pred2) const override;
- bool ClobbersPredicate(MachineInstr &MI, std::vector<MachineOperand> &Pred,
- bool SkipDead) const override;
+ bool ClobbersPredicate(MachineInstr &MI, std::vector<MachineOperand> &Pred,
+ bool SkipDead) const override;
// Comparison optimization.
@@ -534,20 +534,20 @@ public:
int64_t &Offset, unsigned &Width,
const TargetRegisterInfo *TRI) const;
- /// Get the base operand and byte offset of an instruction that reads/writes
- /// memory.
- bool getMemOperandsWithOffsetWidth(
- const MachineInstr &LdSt,
- SmallVectorImpl<const MachineOperand *> &BaseOps, int64_t &Offset,
- bool &OffsetIsScalable, unsigned &Width,
- const TargetRegisterInfo *TRI) const override;
-
- /// Returns true if the two given memory operations should be scheduled
- /// adjacent.
- bool shouldClusterMemOps(ArrayRef<const MachineOperand *> BaseOps1,
- ArrayRef<const MachineOperand *> BaseOps2,
- unsigned NumLoads, unsigned NumBytes) const override;
-
+ /// Get the base operand and byte offset of an instruction that reads/writes
+ /// memory.
+ bool getMemOperandsWithOffsetWidth(
+ const MachineInstr &LdSt,
+ SmallVectorImpl<const MachineOperand *> &BaseOps, int64_t &Offset,
+ bool &OffsetIsScalable, unsigned &Width,
+ const TargetRegisterInfo *TRI) const override;
+
+ /// Returns true if the two given memory operations should be scheduled
+ /// adjacent.
+ bool shouldClusterMemOps(ArrayRef<const MachineOperand *> BaseOps1,
+ ArrayRef<const MachineOperand *> BaseOps2,
+ unsigned NumLoads, unsigned NumBytes) const override;
+
/// Return true if two MIs access different memory addresses and false
/// otherwise
bool
@@ -605,7 +605,7 @@ public:
bool convertToImmediateForm(MachineInstr &MI,
MachineInstr **KilledDef = nullptr) const;
bool foldFrameOffset(MachineInstr &MI) const;
- bool combineRLWINM(MachineInstr &MI, MachineInstr **ToErase = nullptr) const;
+ bool combineRLWINM(MachineInstr &MI, MachineInstr **ToErase = nullptr) const;
bool isADDIInstrEligibleForFolding(MachineInstr &ADDIMI, int64_t &Imm) const;
bool isADDInstrEligibleForFolding(MachineInstr &ADDMI) const;
bool isImmInstrEligibleForFolding(MachineInstr &MI, unsigned &BaseReg,