diff options
author | shadchin <shadchin@yandex-team.ru> | 2022-02-10 16:44:30 +0300 |
---|---|---|
committer | Daniil Cherednik <dcherednik@yandex-team.ru> | 2022-02-10 16:44:30 +0300 |
commit | 2598ef1d0aee359b4b6d5fdd1758916d5907d04f (patch) | |
tree | 012bb94d777798f1f56ac1cec429509766d05181 /contrib/libs/llvm12/lib/CodeGen/GlobalISel/Utils.cpp | |
parent | 6751af0b0c1b952fede40b19b71da8025b5d8bcf (diff) | |
download | ydb-2598ef1d0aee359b4b6d5fdd1758916d5907d04f.tar.gz |
Restoring authorship annotation for <shadchin@yandex-team.ru>. Commit 1 of 2.
Diffstat (limited to 'contrib/libs/llvm12/lib/CodeGen/GlobalISel/Utils.cpp')
-rw-r--r-- | contrib/libs/llvm12/lib/CodeGen/GlobalISel/Utils.cpp | 656 |
1 files changed, 328 insertions, 328 deletions
diff --git a/contrib/libs/llvm12/lib/CodeGen/GlobalISel/Utils.cpp b/contrib/libs/llvm12/lib/CodeGen/GlobalISel/Utils.cpp index cd24832244..2adc30eacc 100644 --- a/contrib/libs/llvm12/lib/CodeGen/GlobalISel/Utils.cpp +++ b/contrib/libs/llvm12/lib/CodeGen/GlobalISel/Utils.cpp @@ -11,11 +11,11 @@ #include "llvm/CodeGen/GlobalISel/Utils.h" #include "llvm/ADT/APFloat.h" -#include "llvm/ADT/APInt.h" -#include "llvm/ADT/Optional.h" +#include "llvm/ADT/APInt.h" +#include "llvm/ADT/Optional.h" #include "llvm/CodeGen/GlobalISel/GISelChangeObserver.h" -#include "llvm/CodeGen/GlobalISel/GISelKnownBits.h" -#include "llvm/CodeGen/GlobalISel/MIPatternMatch.h" +#include "llvm/CodeGen/GlobalISel/GISelKnownBits.h" +#include "llvm/CodeGen/GlobalISel/MIPatternMatch.h" #include "llvm/CodeGen/GlobalISel/RegisterBankInfo.h" #include "llvm/CodeGen/MachineInstr.h" #include "llvm/CodeGen/MachineInstrBuilder.h" @@ -23,16 +23,16 @@ #include "llvm/CodeGen/MachineRegisterInfo.h" #include "llvm/CodeGen/StackProtector.h" #include "llvm/CodeGen/TargetInstrInfo.h" -#include "llvm/CodeGen/TargetLowering.h" +#include "llvm/CodeGen/TargetLowering.h" #include "llvm/CodeGen/TargetPassConfig.h" #include "llvm/CodeGen/TargetRegisterInfo.h" #include "llvm/IR/Constants.h" -#include "llvm/Target/TargetMachine.h" +#include "llvm/Target/TargetMachine.h" #define DEBUG_TYPE "globalisel-utils" using namespace llvm; -using namespace MIPatternMatch; +using namespace MIPatternMatch; Register llvm::constrainRegToClass(MachineRegisterInfo &MRI, const TargetInstrInfo &TII, @@ -48,7 +48,7 @@ Register llvm::constrainOperandRegClass( const MachineFunction &MF, const TargetRegisterInfo &TRI, MachineRegisterInfo &MRI, const TargetInstrInfo &TII, const RegisterBankInfo &RBI, MachineInstr &InsertPt, - const TargetRegisterClass &RegClass, MachineOperand &RegMO) { + const TargetRegisterClass &RegClass, MachineOperand &RegMO) { Register Reg = RegMO.getReg(); // Assume physical registers are properly constrained. assert(Register::isVirtualRegister(Reg) && "PhysReg not implemented"); @@ -69,13 +69,13 @@ Register llvm::constrainOperandRegClass( TII.get(TargetOpcode::COPY), Reg) .addReg(ConstrainedReg); } - if (GISelChangeObserver *Observer = MF.getObserver()) { - Observer->changingInstr(*RegMO.getParent()); - } - RegMO.setReg(ConstrainedReg); - if (GISelChangeObserver *Observer = MF.getObserver()) { - Observer->changedInstr(*RegMO.getParent()); - } + if (GISelChangeObserver *Observer = MF.getObserver()) { + Observer->changingInstr(*RegMO.getParent()); + } + RegMO.setReg(ConstrainedReg); + if (GISelChangeObserver *Observer = MF.getObserver()) { + Observer->changedInstr(*RegMO.getParent()); + } } else { if (GISelChangeObserver *Observer = MF.getObserver()) { if (!RegMO.isDef()) { @@ -93,7 +93,7 @@ Register llvm::constrainOperandRegClass( const MachineFunction &MF, const TargetRegisterInfo &TRI, MachineRegisterInfo &MRI, const TargetInstrInfo &TII, const RegisterBankInfo &RBI, MachineInstr &InsertPt, const MCInstrDesc &II, - MachineOperand &RegMO, unsigned OpIdx) { + MachineOperand &RegMO, unsigned OpIdx) { Register Reg = RegMO.getReg(); // Assume physical registers are properly constrained. assert(Register::isVirtualRegister(Reg) && "PhysReg not implemented"); @@ -163,7 +163,7 @@ bool llvm::constrainSelectedInstRegOperands(MachineInstr &I, // If the operand is a vreg, we should constrain its regclass, and only // insert COPYs if that's impossible. // constrainOperandRegClass does that for us. - constrainOperandRegClass(MF, TRI, MRI, TII, RBI, I, I.getDesc(), MO, OpI); + constrainOperandRegClass(MF, TRI, MRI, TII, RBI, I, I.getDesc(), MO, OpI); // Tie uses to defs as indicated in MCInstrDesc if this hasn't already been // done. @@ -192,14 +192,14 @@ bool llvm::canReplaceReg(Register DstReg, Register SrcReg, bool llvm::isTriviallyDead(const MachineInstr &MI, const MachineRegisterInfo &MRI) { - // FIXME: This logical is mostly duplicated with - // DeadMachineInstructionElim::isDead. Why is LOCAL_ESCAPE not considered in - // MachineInstr::isLabel? - - // Don't delete frame allocation labels. - if (MI.getOpcode() == TargetOpcode::LOCAL_ESCAPE) - return false; - + // FIXME: This logical is mostly duplicated with + // DeadMachineInstructionElim::isDead. Why is LOCAL_ESCAPE not considered in + // MachineInstr::isLabel? + + // Don't delete frame allocation labels. + if (MI.getOpcode() == TargetOpcode::LOCAL_ESCAPE) + return false; + // If we can move an instruction, we can remove it. Otherwise, it has // a side-effect of some sort. bool SawStore = false; @@ -262,8 +262,8 @@ void llvm::reportGISelFailure(MachineFunction &MF, const TargetPassConfig &TPC, reportGISelFailure(MF, TPC, MORE, R); } -Optional<APInt> llvm::getConstantVRegVal(Register VReg, - const MachineRegisterInfo &MRI) { +Optional<APInt> llvm::getConstantVRegVal(Register VReg, + const MachineRegisterInfo &MRI) { Optional<ValueAndVReg> ValAndVReg = getConstantVRegValWithLookThrough(VReg, MRI, /*LookThroughInstrs*/ false); assert((!ValAndVReg || ValAndVReg->VReg == VReg) && @@ -273,17 +273,17 @@ Optional<APInt> llvm::getConstantVRegVal(Register VReg, return ValAndVReg->Value; } -Optional<int64_t> llvm::getConstantVRegSExtVal(Register VReg, - const MachineRegisterInfo &MRI) { - Optional<APInt> Val = getConstantVRegVal(VReg, MRI); - if (Val && Val->getBitWidth() <= 64) - return Val->getSExtValue(); - return None; -} - +Optional<int64_t> llvm::getConstantVRegSExtVal(Register VReg, + const MachineRegisterInfo &MRI) { + Optional<APInt> Val = getConstantVRegVal(VReg, MRI); + if (Val && Val->getBitWidth() <= 64) + return Val->getSExtValue(); + return None; +} + Optional<ValueAndVReg> llvm::getConstantVRegValWithLookThrough( Register VReg, const MachineRegisterInfo &MRI, bool LookThroughInstrs, - bool HandleFConstant, bool LookThroughAnyExt) { + bool HandleFConstant, bool LookThroughAnyExt) { SmallVector<std::pair<unsigned, unsigned>, 4> SeenOpcodes; MachineInstr *MI; auto IsConstantOpcode = [HandleFConstant](unsigned Opcode) { @@ -310,10 +310,10 @@ Optional<ValueAndVReg> llvm::getConstantVRegValWithLookThrough( while ((MI = MRI.getVRegDef(VReg)) && !IsConstantOpcode(MI->getOpcode()) && LookThroughInstrs) { switch (MI->getOpcode()) { - case TargetOpcode::G_ANYEXT: - if (!LookThroughAnyExt) - return None; - LLVM_FALLTHROUGH; + case TargetOpcode::G_ANYEXT: + if (!LookThroughAnyExt) + return None; + LLVM_FALLTHROUGH; case TargetOpcode::G_TRUNC: case TargetOpcode::G_SEXT: case TargetOpcode::G_ZEXT: @@ -347,7 +347,7 @@ Optional<ValueAndVReg> llvm::getConstantVRegValWithLookThrough( case TargetOpcode::G_TRUNC: Val = Val.trunc(OpcodeAndSize.second); break; - case TargetOpcode::G_ANYEXT: + case TargetOpcode::G_ANYEXT: case TargetOpcode::G_SEXT: Val = Val.sext(OpcodeAndSize.second); break; @@ -357,10 +357,10 @@ Optional<ValueAndVReg> llvm::getConstantVRegValWithLookThrough( } } - return ValueAndVReg{Val, VReg}; + return ValueAndVReg{Val, VReg}; } -const ConstantFP * +const ConstantFP * llvm::getConstantFPVRegVal(Register VReg, const MachineRegisterInfo &MRI) { MachineInstr *MI = MRI.getVRegDef(VReg); if (TargetOpcode::G_FCONSTANT != MI->getOpcode()) @@ -368,8 +368,8 @@ llvm::getConstantFPVRegVal(Register VReg, const MachineRegisterInfo &MRI) { return MI->getOperand(1).getFPImm(); } -Optional<DefinitionAndSourceRegister> -llvm::getDefSrcRegIgnoringCopies(Register Reg, const MachineRegisterInfo &MRI) { +Optional<DefinitionAndSourceRegister> +llvm::getDefSrcRegIgnoringCopies(Register Reg, const MachineRegisterInfo &MRI) { Register DefSrcReg = Reg; auto *DefMI = MRI.getVRegDef(Reg); auto DstTy = MRI.getType(DefMI->getOperand(0).getReg()); @@ -378,7 +378,7 @@ llvm::getDefSrcRegIgnoringCopies(Register Reg, const MachineRegisterInfo &MRI) { while (DefMI->getOpcode() == TargetOpcode::COPY) { Register SrcReg = DefMI->getOperand(1).getReg(); auto SrcTy = MRI.getType(SrcReg); - if (!SrcTy.isValid()) + if (!SrcTy.isValid()) break; DefMI = MRI.getVRegDef(SrcReg); DefSrcReg = SrcReg; @@ -386,8 +386,8 @@ llvm::getDefSrcRegIgnoringCopies(Register Reg, const MachineRegisterInfo &MRI) { return DefinitionAndSourceRegister{DefMI, DefSrcReg}; } -MachineInstr *llvm::getDefIgnoringCopies(Register Reg, - const MachineRegisterInfo &MRI) { +MachineInstr *llvm::getDefIgnoringCopies(Register Reg, + const MachineRegisterInfo &MRI) { Optional<DefinitionAndSourceRegister> DefSrcReg = getDefSrcRegIgnoringCopies(Reg, MRI); return DefSrcReg ? DefSrcReg->MI : nullptr; @@ -400,8 +400,8 @@ Register llvm::getSrcRegIgnoringCopies(Register Reg, return DefSrcReg ? DefSrcReg->Reg : Register(); } -MachineInstr *llvm::getOpcodeDef(unsigned Opcode, Register Reg, - const MachineRegisterInfo &MRI) { +MachineInstr *llvm::getOpcodeDef(unsigned Opcode, Register Reg, + const MachineRegisterInfo &MRI) { MachineInstr *DefMI = getDefIgnoringCopies(Reg, MRI); return DefMI && DefMI->getOpcode() == Opcode ? DefMI : nullptr; } @@ -430,8 +430,8 @@ Optional<APInt> llvm::ConstantFoldBinOp(unsigned Opcode, const Register Op1, if (!MaybeOp1Cst) return None; - const APInt &C1 = *MaybeOp1Cst; - const APInt &C2 = *MaybeOp2Cst; + const APInt &C1 = *MaybeOp1Cst; + const APInt &C2 = *MaybeOp2Cst; switch (Opcode) { default: break; @@ -480,8 +480,8 @@ bool llvm::isKnownNeverNaN(Register Val, const MachineRegisterInfo &MRI, if (!DefMI) return false; - const TargetMachine& TM = DefMI->getMF()->getTarget(); - if (DefMI->getFlag(MachineInstr::FmNoNans) || TM.Options.NoNaNsFPMath) + const TargetMachine& TM = DefMI->getMF()->getTarget(); + if (DefMI->getFlag(MachineInstr::FmNoNans) || TM.Options.NoNaNsFPMath) return true; if (SNaN) { @@ -512,40 +512,40 @@ Align llvm::inferAlignFromPtrInfo(MachineFunction &MF, return Align(1); } -Register llvm::getFunctionLiveInPhysReg(MachineFunction &MF, - const TargetInstrInfo &TII, - MCRegister PhysReg, - const TargetRegisterClass &RC, - LLT RegTy) { - DebugLoc DL; // FIXME: Is no location the right choice? - MachineBasicBlock &EntryMBB = MF.front(); - MachineRegisterInfo &MRI = MF.getRegInfo(); - Register LiveIn = MRI.getLiveInVirtReg(PhysReg); - if (LiveIn) { - MachineInstr *Def = MRI.getVRegDef(LiveIn); - if (Def) { - // FIXME: Should the verifier check this is in the entry block? - assert(Def->getParent() == &EntryMBB && "live-in copy not in entry block"); - return LiveIn; - } - - // It's possible the incoming argument register and copy was added during - // lowering, but later deleted due to being/becoming dead. If this happens, - // re-insert the copy. - } else { - // The live in register was not present, so add it. - LiveIn = MF.addLiveIn(PhysReg, &RC); - if (RegTy.isValid()) - MRI.setType(LiveIn, RegTy); - } - - BuildMI(EntryMBB, EntryMBB.begin(), DL, TII.get(TargetOpcode::COPY), LiveIn) - .addReg(PhysReg); - if (!EntryMBB.isLiveIn(PhysReg)) - EntryMBB.addLiveIn(PhysReg); - return LiveIn; -} - +Register llvm::getFunctionLiveInPhysReg(MachineFunction &MF, + const TargetInstrInfo &TII, + MCRegister PhysReg, + const TargetRegisterClass &RC, + LLT RegTy) { + DebugLoc DL; // FIXME: Is no location the right choice? + MachineBasicBlock &EntryMBB = MF.front(); + MachineRegisterInfo &MRI = MF.getRegInfo(); + Register LiveIn = MRI.getLiveInVirtReg(PhysReg); + if (LiveIn) { + MachineInstr *Def = MRI.getVRegDef(LiveIn); + if (Def) { + // FIXME: Should the verifier check this is in the entry block? + assert(Def->getParent() == &EntryMBB && "live-in copy not in entry block"); + return LiveIn; + } + + // It's possible the incoming argument register and copy was added during + // lowering, but later deleted due to being/becoming dead. If this happens, + // re-insert the copy. + } else { + // The live in register was not present, so add it. + LiveIn = MF.addLiveIn(PhysReg, &RC); + if (RegTy.isValid()) + MRI.setType(LiveIn, RegTy); + } + + BuildMI(EntryMBB, EntryMBB.begin(), DL, TII.get(TargetOpcode::COPY), LiveIn) + .addReg(PhysReg); + if (!EntryMBB.isLiveIn(PhysReg)) + EntryMBB.addLiveIn(PhysReg); + return LiveIn; +} + Optional<APInt> llvm::ConstantFoldExtOp(unsigned Opcode, const Register Op1, uint64_t Imm, const MachineRegisterInfo &MRI) { @@ -554,262 +554,262 @@ Optional<APInt> llvm::ConstantFoldExtOp(unsigned Opcode, const Register Op1, switch (Opcode) { default: break; - case TargetOpcode::G_SEXT_INREG: { - LLT Ty = MRI.getType(Op1); - return MaybeOp1Cst->trunc(Imm).sext(Ty.getScalarSizeInBits()); - } + case TargetOpcode::G_SEXT_INREG: { + LLT Ty = MRI.getType(Op1); + return MaybeOp1Cst->trunc(Imm).sext(Ty.getScalarSizeInBits()); } + } } return None; } -bool llvm::isKnownToBeAPowerOfTwo(Register Reg, const MachineRegisterInfo &MRI, - GISelKnownBits *KB) { - Optional<DefinitionAndSourceRegister> DefSrcReg = - getDefSrcRegIgnoringCopies(Reg, MRI); - if (!DefSrcReg) - return false; - - const MachineInstr &MI = *DefSrcReg->MI; - const LLT Ty = MRI.getType(Reg); - - switch (MI.getOpcode()) { - case TargetOpcode::G_CONSTANT: { - unsigned BitWidth = Ty.getScalarSizeInBits(); - const ConstantInt *CI = MI.getOperand(1).getCImm(); - return CI->getValue().zextOrTrunc(BitWidth).isPowerOf2(); - } - case TargetOpcode::G_SHL: { - // A left-shift of a constant one will have exactly one bit set because - // shifting the bit off the end is undefined. - - // TODO: Constant splat - if (auto ConstLHS = getConstantVRegVal(MI.getOperand(1).getReg(), MRI)) { - if (*ConstLHS == 1) - return true; - } - - break; - } - case TargetOpcode::G_LSHR: { - if (auto ConstLHS = getConstantVRegVal(MI.getOperand(1).getReg(), MRI)) { - if (ConstLHS->isSignMask()) - return true; - } - - break; - } - default: - break; - } - - // TODO: Are all operands of a build vector constant powers of two? - if (!KB) - return false; - - // More could be done here, though the above checks are enough - // to handle some common cases. - - // Fall back to computeKnownBits to catch other known cases. - KnownBits Known = KB->getKnownBits(Reg); - return (Known.countMaxPopulation() == 1) && (Known.countMinPopulation() == 1); -} - +bool llvm::isKnownToBeAPowerOfTwo(Register Reg, const MachineRegisterInfo &MRI, + GISelKnownBits *KB) { + Optional<DefinitionAndSourceRegister> DefSrcReg = + getDefSrcRegIgnoringCopies(Reg, MRI); + if (!DefSrcReg) + return false; + + const MachineInstr &MI = *DefSrcReg->MI; + const LLT Ty = MRI.getType(Reg); + + switch (MI.getOpcode()) { + case TargetOpcode::G_CONSTANT: { + unsigned BitWidth = Ty.getScalarSizeInBits(); + const ConstantInt *CI = MI.getOperand(1).getCImm(); + return CI->getValue().zextOrTrunc(BitWidth).isPowerOf2(); + } + case TargetOpcode::G_SHL: { + // A left-shift of a constant one will have exactly one bit set because + // shifting the bit off the end is undefined. + + // TODO: Constant splat + if (auto ConstLHS = getConstantVRegVal(MI.getOperand(1).getReg(), MRI)) { + if (*ConstLHS == 1) + return true; + } + + break; + } + case TargetOpcode::G_LSHR: { + if (auto ConstLHS = getConstantVRegVal(MI.getOperand(1).getReg(), MRI)) { + if (ConstLHS->isSignMask()) + return true; + } + + break; + } + default: + break; + } + + // TODO: Are all operands of a build vector constant powers of two? + if (!KB) + return false; + + // More could be done here, though the above checks are enough + // to handle some common cases. + + // Fall back to computeKnownBits to catch other known cases. + KnownBits Known = KB->getKnownBits(Reg); + return (Known.countMaxPopulation() == 1) && (Known.countMinPopulation() == 1); +} + void llvm::getSelectionDAGFallbackAnalysisUsage(AnalysisUsage &AU) { AU.addPreserved<StackProtector>(); } -static unsigned getLCMSize(unsigned OrigSize, unsigned TargetSize) { - unsigned Mul = OrigSize * TargetSize; - unsigned GCDSize = greatestCommonDivisor(OrigSize, TargetSize); - return Mul / GCDSize; -} - -LLT llvm::getLCMType(LLT OrigTy, LLT TargetTy) { - const unsigned OrigSize = OrigTy.getSizeInBits(); - const unsigned TargetSize = TargetTy.getSizeInBits(); - - if (OrigSize == TargetSize) - return OrigTy; - - if (OrigTy.isVector()) { - const LLT OrigElt = OrigTy.getElementType(); - - if (TargetTy.isVector()) { - const LLT TargetElt = TargetTy.getElementType(); - - if (OrigElt.getSizeInBits() == TargetElt.getSizeInBits()) { - int GCDElts = greatestCommonDivisor(OrigTy.getNumElements(), - TargetTy.getNumElements()); - // Prefer the original element type. - int Mul = OrigTy.getNumElements() * TargetTy.getNumElements(); - return LLT::vector(Mul / GCDElts, OrigTy.getElementType()); - } - } else { - if (OrigElt.getSizeInBits() == TargetSize) - return OrigTy; - } - - unsigned LCMSize = getLCMSize(OrigSize, TargetSize); - return LLT::vector(LCMSize / OrigElt.getSizeInBits(), OrigElt); +static unsigned getLCMSize(unsigned OrigSize, unsigned TargetSize) { + unsigned Mul = OrigSize * TargetSize; + unsigned GCDSize = greatestCommonDivisor(OrigSize, TargetSize); + return Mul / GCDSize; +} + +LLT llvm::getLCMType(LLT OrigTy, LLT TargetTy) { + const unsigned OrigSize = OrigTy.getSizeInBits(); + const unsigned TargetSize = TargetTy.getSizeInBits(); + + if (OrigSize == TargetSize) + return OrigTy; + + if (OrigTy.isVector()) { + const LLT OrigElt = OrigTy.getElementType(); + + if (TargetTy.isVector()) { + const LLT TargetElt = TargetTy.getElementType(); + + if (OrigElt.getSizeInBits() == TargetElt.getSizeInBits()) { + int GCDElts = greatestCommonDivisor(OrigTy.getNumElements(), + TargetTy.getNumElements()); + // Prefer the original element type. + int Mul = OrigTy.getNumElements() * TargetTy.getNumElements(); + return LLT::vector(Mul / GCDElts, OrigTy.getElementType()); + } + } else { + if (OrigElt.getSizeInBits() == TargetSize) + return OrigTy; + } + + unsigned LCMSize = getLCMSize(OrigSize, TargetSize); + return LLT::vector(LCMSize / OrigElt.getSizeInBits(), OrigElt); } - if (TargetTy.isVector()) { - unsigned LCMSize = getLCMSize(OrigSize, TargetSize); - return LLT::vector(LCMSize / OrigSize, OrigTy); + if (TargetTy.isVector()) { + unsigned LCMSize = getLCMSize(OrigSize, TargetSize); + return LLT::vector(LCMSize / OrigSize, OrigTy); } - unsigned LCMSize = getLCMSize(OrigSize, TargetSize); - - // Preserve pointer types. - if (LCMSize == OrigSize) - return OrigTy; - if (LCMSize == TargetSize) - return TargetTy; - - return LLT::scalar(LCMSize); -} - -LLT llvm::getGCDType(LLT OrigTy, LLT TargetTy) { - const unsigned OrigSize = OrigTy.getSizeInBits(); - const unsigned TargetSize = TargetTy.getSizeInBits(); - - if (OrigSize == TargetSize) - return OrigTy; - - if (OrigTy.isVector()) { - LLT OrigElt = OrigTy.getElementType(); - if (TargetTy.isVector()) { - LLT TargetElt = TargetTy.getElementType(); - if (OrigElt.getSizeInBits() == TargetElt.getSizeInBits()) { - int GCD = greatestCommonDivisor(OrigTy.getNumElements(), - TargetTy.getNumElements()); - return LLT::scalarOrVector(GCD, OrigElt); - } - } else { - // If the source is a vector of pointers, return a pointer element. - if (OrigElt.getSizeInBits() == TargetSize) - return OrigElt; - } - - unsigned GCD = greatestCommonDivisor(OrigSize, TargetSize); - if (GCD == OrigElt.getSizeInBits()) - return OrigElt; - - // If we can't produce the original element type, we have to use a smaller - // scalar. - if (GCD < OrigElt.getSizeInBits()) - return LLT::scalar(GCD); - return LLT::vector(GCD / OrigElt.getSizeInBits(), OrigElt); - } - - if (TargetTy.isVector()) { - // Try to preserve the original element type. - LLT TargetElt = TargetTy.getElementType(); - if (TargetElt.getSizeInBits() == OrigSize) - return OrigTy; - } - - unsigned GCD = greatestCommonDivisor(OrigSize, TargetSize); - return LLT::scalar(GCD); -} - -Optional<int> llvm::getSplatIndex(MachineInstr &MI) { - assert(MI.getOpcode() == TargetOpcode::G_SHUFFLE_VECTOR && - "Only G_SHUFFLE_VECTOR can have a splat index!"); - ArrayRef<int> Mask = MI.getOperand(3).getShuffleMask(); - auto FirstDefinedIdx = find_if(Mask, [](int Elt) { return Elt >= 0; }); - - // If all elements are undefined, this shuffle can be considered a splat. - // Return 0 for better potential for callers to simplify. - if (FirstDefinedIdx == Mask.end()) - return 0; - - // Make sure all remaining elements are either undef or the same - // as the first non-undef value. - int SplatValue = *FirstDefinedIdx; - if (any_of(make_range(std::next(FirstDefinedIdx), Mask.end()), - [&SplatValue](int Elt) { return Elt >= 0 && Elt != SplatValue; })) - return None; - - return SplatValue; -} - -static bool isBuildVectorOp(unsigned Opcode) { - return Opcode == TargetOpcode::G_BUILD_VECTOR || - Opcode == TargetOpcode::G_BUILD_VECTOR_TRUNC; -} - -// TODO: Handle mixed undef elements. -static bool isBuildVectorConstantSplat(const MachineInstr &MI, - const MachineRegisterInfo &MRI, - int64_t SplatValue) { - if (!isBuildVectorOp(MI.getOpcode())) - return false; - - const unsigned NumOps = MI.getNumOperands(); - for (unsigned I = 1; I != NumOps; ++I) { - Register Element = MI.getOperand(I).getReg(); - if (!mi_match(Element, MRI, m_SpecificICst(SplatValue))) - return false; + unsigned LCMSize = getLCMSize(OrigSize, TargetSize); + + // Preserve pointer types. + if (LCMSize == OrigSize) + return OrigTy; + if (LCMSize == TargetSize) + return TargetTy; + + return LLT::scalar(LCMSize); +} + +LLT llvm::getGCDType(LLT OrigTy, LLT TargetTy) { + const unsigned OrigSize = OrigTy.getSizeInBits(); + const unsigned TargetSize = TargetTy.getSizeInBits(); + + if (OrigSize == TargetSize) + return OrigTy; + + if (OrigTy.isVector()) { + LLT OrigElt = OrigTy.getElementType(); + if (TargetTy.isVector()) { + LLT TargetElt = TargetTy.getElementType(); + if (OrigElt.getSizeInBits() == TargetElt.getSizeInBits()) { + int GCD = greatestCommonDivisor(OrigTy.getNumElements(), + TargetTy.getNumElements()); + return LLT::scalarOrVector(GCD, OrigElt); + } + } else { + // If the source is a vector of pointers, return a pointer element. + if (OrigElt.getSizeInBits() == TargetSize) + return OrigElt; + } + + unsigned GCD = greatestCommonDivisor(OrigSize, TargetSize); + if (GCD == OrigElt.getSizeInBits()) + return OrigElt; + + // If we can't produce the original element type, we have to use a smaller + // scalar. + if (GCD < OrigElt.getSizeInBits()) + return LLT::scalar(GCD); + return LLT::vector(GCD / OrigElt.getSizeInBits(), OrigElt); } - return true; -} - -Optional<int64_t> -llvm::getBuildVectorConstantSplat(const MachineInstr &MI, - const MachineRegisterInfo &MRI) { - if (!isBuildVectorOp(MI.getOpcode())) - return None; - - const unsigned NumOps = MI.getNumOperands(); - Optional<int64_t> Scalar; - for (unsigned I = 1; I != NumOps; ++I) { - Register Element = MI.getOperand(I).getReg(); - int64_t ElementValue; - if (!mi_match(Element, MRI, m_ICst(ElementValue))) - return None; - if (!Scalar) - Scalar = ElementValue; - else if (*Scalar != ElementValue) - return None; + if (TargetTy.isVector()) { + // Try to preserve the original element type. + LLT TargetElt = TargetTy.getElementType(); + if (TargetElt.getSizeInBits() == OrigSize) + return OrigTy; + } + + unsigned GCD = greatestCommonDivisor(OrigSize, TargetSize); + return LLT::scalar(GCD); +} + +Optional<int> llvm::getSplatIndex(MachineInstr &MI) { + assert(MI.getOpcode() == TargetOpcode::G_SHUFFLE_VECTOR && + "Only G_SHUFFLE_VECTOR can have a splat index!"); + ArrayRef<int> Mask = MI.getOperand(3).getShuffleMask(); + auto FirstDefinedIdx = find_if(Mask, [](int Elt) { return Elt >= 0; }); + + // If all elements are undefined, this shuffle can be considered a splat. + // Return 0 for better potential for callers to simplify. + if (FirstDefinedIdx == Mask.end()) + return 0; + + // Make sure all remaining elements are either undef or the same + // as the first non-undef value. + int SplatValue = *FirstDefinedIdx; + if (any_of(make_range(std::next(FirstDefinedIdx), Mask.end()), + [&SplatValue](int Elt) { return Elt >= 0 && Elt != SplatValue; })) + return None; + + return SplatValue; +} + +static bool isBuildVectorOp(unsigned Opcode) { + return Opcode == TargetOpcode::G_BUILD_VECTOR || + Opcode == TargetOpcode::G_BUILD_VECTOR_TRUNC; +} + +// TODO: Handle mixed undef elements. +static bool isBuildVectorConstantSplat(const MachineInstr &MI, + const MachineRegisterInfo &MRI, + int64_t SplatValue) { + if (!isBuildVectorOp(MI.getOpcode())) + return false; + + const unsigned NumOps = MI.getNumOperands(); + for (unsigned I = 1; I != NumOps; ++I) { + Register Element = MI.getOperand(I).getReg(); + if (!mi_match(Element, MRI, m_SpecificICst(SplatValue))) + return false; } - return Scalar; -} - -bool llvm::isBuildVectorAllZeros(const MachineInstr &MI, - const MachineRegisterInfo &MRI) { - return isBuildVectorConstantSplat(MI, MRI, 0); -} - -bool llvm::isBuildVectorAllOnes(const MachineInstr &MI, - const MachineRegisterInfo &MRI) { - return isBuildVectorConstantSplat(MI, MRI, -1); -} - -bool llvm::isConstTrueVal(const TargetLowering &TLI, int64_t Val, bool IsVector, - bool IsFP) { - switch (TLI.getBooleanContents(IsVector, IsFP)) { - case TargetLowering::UndefinedBooleanContent: - return Val & 0x1; - case TargetLowering::ZeroOrOneBooleanContent: - return Val == 1; - case TargetLowering::ZeroOrNegativeOneBooleanContent: - return Val == -1; + return true; +} + +Optional<int64_t> +llvm::getBuildVectorConstantSplat(const MachineInstr &MI, + const MachineRegisterInfo &MRI) { + if (!isBuildVectorOp(MI.getOpcode())) + return None; + + const unsigned NumOps = MI.getNumOperands(); + Optional<int64_t> Scalar; + for (unsigned I = 1; I != NumOps; ++I) { + Register Element = MI.getOperand(I).getReg(); + int64_t ElementValue; + if (!mi_match(Element, MRI, m_ICst(ElementValue))) + return None; + if (!Scalar) + Scalar = ElementValue; + else if (*Scalar != ElementValue) + return None; } - llvm_unreachable("Invalid boolean contents"); -} -int64_t llvm::getICmpTrueVal(const TargetLowering &TLI, bool IsVector, - bool IsFP) { - switch (TLI.getBooleanContents(IsVector, IsFP)) { - case TargetLowering::UndefinedBooleanContent: - case TargetLowering::ZeroOrOneBooleanContent: - return 1; - case TargetLowering::ZeroOrNegativeOneBooleanContent: - return -1; - } - llvm_unreachable("Invalid boolean contents"); -} + return Scalar; +} + +bool llvm::isBuildVectorAllZeros(const MachineInstr &MI, + const MachineRegisterInfo &MRI) { + return isBuildVectorConstantSplat(MI, MRI, 0); +} + +bool llvm::isBuildVectorAllOnes(const MachineInstr &MI, + const MachineRegisterInfo &MRI) { + return isBuildVectorConstantSplat(MI, MRI, -1); +} + +bool llvm::isConstTrueVal(const TargetLowering &TLI, int64_t Val, bool IsVector, + bool IsFP) { + switch (TLI.getBooleanContents(IsVector, IsFP)) { + case TargetLowering::UndefinedBooleanContent: + return Val & 0x1; + case TargetLowering::ZeroOrOneBooleanContent: + return Val == 1; + case TargetLowering::ZeroOrNegativeOneBooleanContent: + return Val == -1; + } + llvm_unreachable("Invalid boolean contents"); +} + +int64_t llvm::getICmpTrueVal(const TargetLowering &TLI, bool IsVector, + bool IsFP) { + switch (TLI.getBooleanContents(IsVector, IsFP)) { + case TargetLowering::UndefinedBooleanContent: + case TargetLowering::ZeroOrOneBooleanContent: + return 1; + case TargetLowering::ZeroOrNegativeOneBooleanContent: + return -1; + } + llvm_unreachable("Invalid boolean contents"); +} |