aboutsummaryrefslogtreecommitdiffstats
path: root/contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel
diff options
context:
space:
mode:
authorshadchin <shadchin@yandex-team.ru>2022-02-10 16:44:39 +0300
committerDaniil Cherednik <dcherednik@yandex-team.ru>2022-02-10 16:44:39 +0300
commite9656aae26e0358d5378e5b63dcac5c8dbe0e4d0 (patch)
tree64175d5cadab313b3e7039ebaa06c5bc3295e274 /contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel
parent2598ef1d0aee359b4b6d5fdd1758916d5907d04f (diff)
downloadydb-e9656aae26e0358d5378e5b63dcac5c8dbe0e4d0.tar.gz
Restoring authorship annotation for <shadchin@yandex-team.ru>. Commit 2 of 2.
Diffstat (limited to 'contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel')
-rw-r--r--contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel/CSEInfo.h8
-rw-r--r--contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel/CallLowering.h260
-rw-r--r--contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel/CombinerHelper.h526
-rw-r--r--contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel/GISelChangeObserver.h2
-rw-r--r--contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel/GISelKnownBits.h16
-rw-r--r--contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel/IRTranslator.h160
-rw-r--r--contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel/InstructionSelector.h68
-rw-r--r--contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel/InstructionSelectorImpl.h132
-rw-r--r--contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel/LegalizationArtifactCombiner.h210
-rw-r--r--contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel/LegalizerHelper.h136
-rw-r--r--contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel/LegalizerInfo.h140
-rw-r--r--contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel/Localizer.h10
-rw-r--r--contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel/MIPatternMatch.h202
-rw-r--r--contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel/MachineIRBuilder.h320
-rw-r--r--contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel/RegisterBankInfo.h24
-rw-r--r--contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel/Utils.h208
16 files changed, 1211 insertions, 1211 deletions
diff --git a/contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel/CSEInfo.h b/contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel/CSEInfo.h
index 92da54451d..83db4418b3 100644
--- a/contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel/CSEInfo.h
+++ b/contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel/CSEInfo.h
@@ -25,10 +25,10 @@
#include "llvm/CodeGen/GlobalISel/GISelWorkList.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/Support/Allocator.h"
-#include "llvm/Support/CodeGen.h"
+#include "llvm/Support/CodeGen.h"
namespace llvm {
-class MachineBasicBlock;
+class MachineBasicBlock;
/// A class that wraps MachineInstrs and derives from FoldingSetNode in order to
/// be uniqued in a CSEMap. The tradeoff here is extra memory allocations for
@@ -189,8 +189,8 @@ public:
const GISelInstProfileBuilder &addNodeIDRegNum(Register Reg) const;
- const GISelInstProfileBuilder &addNodeIDReg(Register Reg) const;
-
+ const GISelInstProfileBuilder &addNodeIDReg(Register Reg) const;
+
const GISelInstProfileBuilder &addNodeIDImmediate(int64_t Imm) const;
const GISelInstProfileBuilder &
addNodeIDMBB(const MachineBasicBlock *MBB) const;
diff --git a/contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel/CallLowering.h b/contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel/CallLowering.h
index 842d7cf08b..549f200269 100644
--- a/contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel/CallLowering.h
+++ b/contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel/CallLowering.h
@@ -24,11 +24,11 @@
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/CodeGen/CallingConvLower.h"
-#include "llvm/CodeGen/MachineOperand.h"
+#include "llvm/CodeGen/MachineOperand.h"
#include "llvm/CodeGen/TargetCallingConv.h"
-#include "llvm/IR/Attributes.h"
+#include "llvm/IR/Attributes.h"
#include "llvm/IR/CallingConv.h"
-#include "llvm/IR/Type.h"
+#include "llvm/IR/Type.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/MachineValueType.h"
#include <cstdint>
@@ -39,7 +39,7 @@ namespace llvm {
class CallBase;
class DataLayout;
class Function;
-class FunctionLoweringInfo;
+class FunctionLoweringInfo;
class MachineIRBuilder;
struct MachinePointerInfo;
class MachineRegisterInfo;
@@ -51,20 +51,20 @@ class CallLowering {
virtual void anchor();
public:
- struct BaseArgInfo {
- Type *Ty;
- SmallVector<ISD::ArgFlagsTy, 4> Flags;
- bool IsFixed;
-
- BaseArgInfo(Type *Ty,
- ArrayRef<ISD::ArgFlagsTy> Flags = ArrayRef<ISD::ArgFlagsTy>(),
- bool IsFixed = true)
- : Ty(Ty), Flags(Flags.begin(), Flags.end()), IsFixed(IsFixed) {}
-
- BaseArgInfo() : Ty(nullptr), IsFixed(false) {}
- };
-
- struct ArgInfo : public BaseArgInfo {
+ struct BaseArgInfo {
+ Type *Ty;
+ SmallVector<ISD::ArgFlagsTy, 4> Flags;
+ bool IsFixed;
+
+ BaseArgInfo(Type *Ty,
+ ArrayRef<ISD::ArgFlagsTy> Flags = ArrayRef<ISD::ArgFlagsTy>(),
+ bool IsFixed = true)
+ : Ty(Ty), Flags(Flags.begin(), Flags.end()), IsFixed(IsFixed) {}
+
+ BaseArgInfo() : Ty(nullptr), IsFixed(false) {}
+ };
+
+ struct ArgInfo : public BaseArgInfo {
SmallVector<Register, 4> Regs;
// If the argument had to be split into multiple parts according to the
// target calling convention, then this contains the original vregs
@@ -74,7 +74,7 @@ public:
ArgInfo(ArrayRef<Register> Regs, Type *Ty,
ArrayRef<ISD::ArgFlagsTy> Flags = ArrayRef<ISD::ArgFlagsTy>(),
bool IsFixed = true)
- : BaseArgInfo(Ty, Flags, IsFixed), Regs(Regs.begin(), Regs.end()) {
+ : BaseArgInfo(Ty, Flags, IsFixed), Regs(Regs.begin(), Regs.end()) {
if (!Regs.empty() && Flags.empty())
this->Flags.push_back(ISD::ArgFlagsTy());
// FIXME: We should have just one way of saying "no register".
@@ -83,7 +83,7 @@ public:
"only void types should have no register");
}
- ArgInfo() : BaseArgInfo() {}
+ ArgInfo() : BaseArgInfo() {}
};
struct CallLoweringInfo {
@@ -119,15 +119,15 @@ public:
/// True if the call is to a vararg function.
bool IsVarArg = false;
-
- /// True if the function's return value can be lowered to registers.
- bool CanLowerReturn = true;
-
- /// VReg to hold the hidden sret parameter.
- Register DemoteRegister;
-
- /// The stack index for sret demotion.
- int DemoteStackIndex;
+
+ /// True if the function's return value can be lowered to registers.
+ bool CanLowerReturn = true;
+
+ /// VReg to hold the hidden sret parameter.
+ Register DemoteRegister;
+
+ /// The stack index for sret demotion.
+ int DemoteStackIndex;
};
/// Argument handling is mostly uniform between the four places that
@@ -137,18 +137,18 @@ public:
/// argument should go, exactly what happens can vary slightly. This
/// class abstracts the differences.
struct ValueHandler {
- ValueHandler(bool IsIncoming, MachineIRBuilder &MIRBuilder,
- MachineRegisterInfo &MRI, CCAssignFn *AssignFn)
- : MIRBuilder(MIRBuilder), MRI(MRI), AssignFn(AssignFn),
- IsIncomingArgumentHandler(IsIncoming) {}
+ ValueHandler(bool IsIncoming, MachineIRBuilder &MIRBuilder,
+ MachineRegisterInfo &MRI, CCAssignFn *AssignFn)
+ : MIRBuilder(MIRBuilder), MRI(MRI), AssignFn(AssignFn),
+ IsIncomingArgumentHandler(IsIncoming) {}
virtual ~ValueHandler() = default;
/// Returns true if the handler is dealing with incoming arguments,
/// i.e. those that move values from some physical location to vregs.
- bool isIncomingArgumentHandler() const {
- return IsIncomingArgumentHandler;
- }
+ bool isIncomingArgumentHandler() const {
+ return IsIncomingArgumentHandler;
+ }
/// Materialize a VReg containing the address of the specified
/// stack-based object. This is either based on a FrameIndex or
@@ -176,7 +176,7 @@ public:
virtual void assignValueToAddress(const ArgInfo &Arg, Register Addr,
uint64_t Size, MachinePointerInfo &MPO,
CCValAssign &VA) {
- assert(Arg.Regs.size() == 1);
+ assert(Arg.Regs.size() == 1);
assignValueToAddress(Arg.Regs[0], Addr, Size, MPO, VA);
}
@@ -207,22 +207,22 @@ public:
CCAssignFn *AssignFn;
private:
- bool IsIncomingArgumentHandler;
+ bool IsIncomingArgumentHandler;
virtual void anchor();
};
- struct IncomingValueHandler : public ValueHandler {
- IncomingValueHandler(MachineIRBuilder &MIRBuilder, MachineRegisterInfo &MRI,
- CCAssignFn *AssignFn)
- : ValueHandler(true, MIRBuilder, MRI, AssignFn) {}
- };
-
- struct OutgoingValueHandler : public ValueHandler {
- OutgoingValueHandler(MachineIRBuilder &MIRBuilder, MachineRegisterInfo &MRI,
- CCAssignFn *AssignFn)
- : ValueHandler(false, MIRBuilder, MRI, AssignFn) {}
- };
-
+ struct IncomingValueHandler : public ValueHandler {
+ IncomingValueHandler(MachineIRBuilder &MIRBuilder, MachineRegisterInfo &MRI,
+ CCAssignFn *AssignFn)
+ : ValueHandler(true, MIRBuilder, MRI, AssignFn) {}
+ };
+
+ struct OutgoingValueHandler : public ValueHandler {
+ OutgoingValueHandler(MachineIRBuilder &MIRBuilder, MachineRegisterInfo &MRI,
+ CCAssignFn *AssignFn)
+ : ValueHandler(false, MIRBuilder, MRI, AssignFn) {}
+ };
+
protected:
/// Getter for generic TargetLowering class.
const TargetLowering *getTLI() const {
@@ -235,17 +235,17 @@ protected:
return static_cast<const XXXTargetLowering *>(TLI);
}
- /// \returns Flags corresponding to the attributes on the \p ArgIdx-th
- /// parameter of \p Call.
- ISD::ArgFlagsTy getAttributesForArgIdx(const CallBase &Call,
- unsigned ArgIdx) const;
-
- /// Adds flags to \p Flags based off of the attributes in \p Attrs.
- /// \p OpIdx is the index in \p Attrs to add flags from.
- void addArgFlagsFromAttributes(ISD::ArgFlagsTy &Flags,
- const AttributeList &Attrs,
- unsigned OpIdx) const;
-
+ /// \returns Flags corresponding to the attributes on the \p ArgIdx-th
+ /// parameter of \p Call.
+ ISD::ArgFlagsTy getAttributesForArgIdx(const CallBase &Call,
+ unsigned ArgIdx) const;
+
+ /// Adds flags to \p Flags based off of the attributes in \p Attrs.
+ /// \p OpIdx is the index in \p Attrs to add flags from.
+ void addArgFlagsFromAttributes(ISD::ArgFlagsTy &Flags,
+ const AttributeList &Attrs,
+ unsigned OpIdx) const;
+
template <typename FuncInfoTy>
void setArgFlags(ArgInfo &Arg, unsigned OpIdx, const DataLayout &DL,
const FuncInfoTy &FuncInfo) const;
@@ -269,7 +269,7 @@ protected:
MachineIRBuilder &MIRBuilder) const;
/// Invoke Handler::assignArg on each of the given \p Args and then use
- /// \p Handler to move them to the assigned locations.
+ /// \p Handler to move them to the assigned locations.
///
/// \return True if everything has succeeded, false otherwise.
bool handleAssignments(MachineIRBuilder &MIRBuilder,
@@ -289,14 +289,14 @@ protected:
CCAssignFn &AssignFnFixed,
CCAssignFn &AssignFnVarArg) const;
- /// Check whether parameters to a call that are passed in callee saved
- /// registers are the same as from the calling function. This needs to be
- /// checked for tail call eligibility.
- bool parametersInCSRMatch(const MachineRegisterInfo &MRI,
- const uint32_t *CallerPreservedMask,
- const SmallVectorImpl<CCValAssign> &ArgLocs,
- const SmallVectorImpl<ArgInfo> &OutVals) const;
-
+ /// Check whether parameters to a call that are passed in callee saved
+ /// registers are the same as from the calling function. This needs to be
+ /// checked for tail call eligibility.
+ bool parametersInCSRMatch(const MachineRegisterInfo &MRI,
+ const uint32_t *CallerPreservedMask,
+ const SmallVectorImpl<CCValAssign> &ArgLocs,
+ const SmallVectorImpl<ArgInfo> &OutVals) const;
+
/// \returns True if the calling convention for a callee and its caller pass
/// results in the same way. Typically used for tail call eligibility checks.
///
@@ -327,73 +327,73 @@ public:
return false;
}
- /// Load the returned value from the stack into virtual registers in \p VRegs.
- /// It uses the frame index \p FI and the start offset from \p DemoteReg.
- /// The loaded data size will be determined from \p RetTy.
- void insertSRetLoads(MachineIRBuilder &MIRBuilder, Type *RetTy,
- ArrayRef<Register> VRegs, Register DemoteReg,
- int FI) const;
-
- /// Store the return value given by \p VRegs into stack starting at the offset
- /// specified in \p DemoteReg.
- void insertSRetStores(MachineIRBuilder &MIRBuilder, Type *RetTy,
- ArrayRef<Register> VRegs, Register DemoteReg) const;
-
- /// Insert the hidden sret ArgInfo to the beginning of \p SplitArgs.
- /// This function should be called from the target specific
- /// lowerFormalArguments when \p F requires the sret demotion.
- void insertSRetIncomingArgument(const Function &F,
- SmallVectorImpl<ArgInfo> &SplitArgs,
- Register &DemoteReg, MachineRegisterInfo &MRI,
- const DataLayout &DL) const;
-
- /// For the call-base described by \p CB, insert the hidden sret ArgInfo to
- /// the OrigArgs field of \p Info.
- void insertSRetOutgoingArgument(MachineIRBuilder &MIRBuilder,
- const CallBase &CB,
- CallLoweringInfo &Info) const;
-
- /// \return True if the return type described by \p Outs can be returned
- /// without performing sret demotion.
- bool checkReturn(CCState &CCInfo, SmallVectorImpl<BaseArgInfo> &Outs,
- CCAssignFn *Fn) const;
-
- /// Get the type and the ArgFlags for the split components of \p RetTy as
- /// returned by \c ComputeValueVTs.
- void getReturnInfo(CallingConv::ID CallConv, Type *RetTy, AttributeList Attrs,
- SmallVectorImpl<BaseArgInfo> &Outs,
- const DataLayout &DL) const;
-
- /// Toplevel function to check the return type based on the target calling
- /// convention. \return True if the return value of \p MF can be returned
- /// without performing sret demotion.
- bool checkReturnTypeForCallConv(MachineFunction &MF) const;
-
- /// This hook must be implemented to check whether the return values
- /// described by \p Outs can fit into the return registers. If false
- /// is returned, an sret-demotion is performed.
- virtual bool canLowerReturn(MachineFunction &MF, CallingConv::ID CallConv,
- SmallVectorImpl<BaseArgInfo> &Outs,
- bool IsVarArg) const {
- return true;
- }
-
+ /// Load the returned value from the stack into virtual registers in \p VRegs.
+ /// It uses the frame index \p FI and the start offset from \p DemoteReg.
+ /// The loaded data size will be determined from \p RetTy.
+ void insertSRetLoads(MachineIRBuilder &MIRBuilder, Type *RetTy,
+ ArrayRef<Register> VRegs, Register DemoteReg,
+ int FI) const;
+
+ /// Store the return value given by \p VRegs into stack starting at the offset
+ /// specified in \p DemoteReg.
+ void insertSRetStores(MachineIRBuilder &MIRBuilder, Type *RetTy,
+ ArrayRef<Register> VRegs, Register DemoteReg) const;
+
+ /// Insert the hidden sret ArgInfo to the beginning of \p SplitArgs.
+ /// This function should be called from the target specific
+ /// lowerFormalArguments when \p F requires the sret demotion.
+ void insertSRetIncomingArgument(const Function &F,
+ SmallVectorImpl<ArgInfo> &SplitArgs,
+ Register &DemoteReg, MachineRegisterInfo &MRI,
+ const DataLayout &DL) const;
+
+ /// For the call-base described by \p CB, insert the hidden sret ArgInfo to
+ /// the OrigArgs field of \p Info.
+ void insertSRetOutgoingArgument(MachineIRBuilder &MIRBuilder,
+ const CallBase &CB,
+ CallLoweringInfo &Info) const;
+
+ /// \return True if the return type described by \p Outs can be returned
+ /// without performing sret demotion.
+ bool checkReturn(CCState &CCInfo, SmallVectorImpl<BaseArgInfo> &Outs,
+ CCAssignFn *Fn) const;
+
+ /// Get the type and the ArgFlags for the split components of \p RetTy as
+ /// returned by \c ComputeValueVTs.
+ void getReturnInfo(CallingConv::ID CallConv, Type *RetTy, AttributeList Attrs,
+ SmallVectorImpl<BaseArgInfo> &Outs,
+ const DataLayout &DL) const;
+
+ /// Toplevel function to check the return type based on the target calling
+ /// convention. \return True if the return value of \p MF can be returned
+ /// without performing sret demotion.
+ bool checkReturnTypeForCallConv(MachineFunction &MF) const;
+
+ /// This hook must be implemented to check whether the return values
+ /// described by \p Outs can fit into the return registers. If false
+ /// is returned, an sret-demotion is performed.
+ virtual bool canLowerReturn(MachineFunction &MF, CallingConv::ID CallConv,
+ SmallVectorImpl<BaseArgInfo> &Outs,
+ bool IsVarArg) const {
+ return true;
+ }
+
/// This hook must be implemented to lower outgoing return values, described
/// by \p Val, into the specified virtual registers \p VRegs.
/// This hook is used by GlobalISel.
///
- /// \p FLI is required for sret demotion.
- ///
+ /// \p FLI is required for sret demotion.
+ ///
/// \p SwiftErrorVReg is non-zero if the function has a swifterror parameter
/// that needs to be implicitly returned.
///
/// \return True if the lowering succeeds, false otherwise.
virtual bool lowerReturn(MachineIRBuilder &MIRBuilder, const Value *Val,
- ArrayRef<Register> VRegs, FunctionLoweringInfo &FLI,
+ ArrayRef<Register> VRegs, FunctionLoweringInfo &FLI,
Register SwiftErrorVReg) const {
if (!supportSwiftError()) {
assert(SwiftErrorVReg == 0 && "attempt to use unsupported swifterror");
- return lowerReturn(MIRBuilder, Val, VRegs, FLI);
+ return lowerReturn(MIRBuilder, Val, VRegs, FLI);
}
return false;
}
@@ -401,8 +401,8 @@ public:
/// This hook behaves as the extended lowerReturn function, but for targets
/// that do not support swifterror value promotion.
virtual bool lowerReturn(MachineIRBuilder &MIRBuilder, const Value *Val,
- ArrayRef<Register> VRegs,
- FunctionLoweringInfo &FLI) const {
+ ArrayRef<Register> VRegs,
+ FunctionLoweringInfo &FLI) const {
return false;
}
@@ -415,13 +415,13 @@ public:
/// the second in \c VRegs[1], and so on. For each argument, there will be one
/// register for each non-aggregate type, as returned by \c computeValueLLTs.
/// \p MIRBuilder is set to the proper insertion for the argument
- /// lowering. \p FLI is required for sret demotion.
+ /// lowering. \p FLI is required for sret demotion.
///
/// \return True if the lowering succeeded, false otherwise.
virtual bool lowerFormalArguments(MachineIRBuilder &MIRBuilder,
const Function &F,
- ArrayRef<ArrayRef<Register>> VRegs,
- FunctionLoweringInfo &FLI) const {
+ ArrayRef<ArrayRef<Register>> VRegs,
+ FunctionLoweringInfo &FLI) const {
return false;
}
diff --git a/contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel/CombinerHelper.h b/contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel/CombinerHelper.h
index 7e482d9b7d..1d29a2ddc8 100644
--- a/contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel/CombinerHelper.h
+++ b/contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel/CombinerHelper.h
@@ -24,8 +24,8 @@
#ifndef LLVM_CODEGEN_GLOBALISEL_COMBINER_HELPER_H
#define LLVM_CODEGEN_GLOBALISEL_COMBINER_HELPER_H
-#include "llvm/ADT/APFloat.h"
-#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/APFloat.h"
+#include "llvm/ADT/DenseMap.h"
#include "llvm/CodeGen/LowLevelType.h"
#include "llvm/CodeGen/Register.h"
#include "llvm/Support/Alignment.h"
@@ -34,15 +34,15 @@ namespace llvm {
class GISelChangeObserver;
class MachineIRBuilder;
-class MachineInstrBuilder;
+class MachineInstrBuilder;
class MachineRegisterInfo;
class MachineInstr;
class MachineOperand;
class GISelKnownBits;
class MachineDominatorTree;
class LegalizerInfo;
-struct LegalityQuery;
-class TargetLowering;
+struct LegalityQuery;
+class TargetLowering;
struct PreferredTuple {
LLT Ty; // The result type of the extend.
@@ -62,37 +62,37 @@ struct PtrAddChain {
Register Base;
};
-struct RegisterImmPair {
- Register Reg;
- int64_t Imm;
-};
-
-struct ShiftOfShiftedLogic {
- MachineInstr *Logic;
- MachineInstr *Shift2;
- Register LogicNonShiftReg;
- uint64_t ValSum;
-};
-
-using OperandBuildSteps =
- SmallVector<std::function<void(MachineInstrBuilder &)>, 4>;
-struct InstructionBuildSteps {
- unsigned Opcode = 0; /// The opcode for the produced instruction.
- OperandBuildSteps OperandFns; /// Operands to be added to the instruction.
- InstructionBuildSteps() = default;
- InstructionBuildSteps(unsigned Opcode, const OperandBuildSteps &OperandFns)
- : Opcode(Opcode), OperandFns(OperandFns) {}
-};
-
-struct InstructionStepsMatchInfo {
- /// Describes instructions to be built during a combine.
- SmallVector<InstructionBuildSteps, 2> InstrsToBuild;
- InstructionStepsMatchInfo() = default;
- InstructionStepsMatchInfo(
- std::initializer_list<InstructionBuildSteps> InstrsToBuild)
- : InstrsToBuild(InstrsToBuild) {}
-};
-
+struct RegisterImmPair {
+ Register Reg;
+ int64_t Imm;
+};
+
+struct ShiftOfShiftedLogic {
+ MachineInstr *Logic;
+ MachineInstr *Shift2;
+ Register LogicNonShiftReg;
+ uint64_t ValSum;
+};
+
+using OperandBuildSteps =
+ SmallVector<std::function<void(MachineInstrBuilder &)>, 4>;
+struct InstructionBuildSteps {
+ unsigned Opcode = 0; /// The opcode for the produced instruction.
+ OperandBuildSteps OperandFns; /// Operands to be added to the instruction.
+ InstructionBuildSteps() = default;
+ InstructionBuildSteps(unsigned Opcode, const OperandBuildSteps &OperandFns)
+ : Opcode(Opcode), OperandFns(OperandFns) {}
+};
+
+struct InstructionStepsMatchInfo {
+ /// Describes instructions to be built during a combine.
+ SmallVector<InstructionBuildSteps, 2> InstrsToBuild;
+ InstructionStepsMatchInfo() = default;
+ InstructionStepsMatchInfo(
+ std::initializer_list<InstructionBuildSteps> InstrsToBuild)
+ : InstrsToBuild(InstrsToBuild) {}
+};
+
class CombinerHelper {
protected:
MachineIRBuilder &Builder;
@@ -112,12 +112,12 @@ public:
return KB;
}
- const TargetLowering &getTargetLowering() const;
-
- /// \return true if the combine is running prior to legalization, or if \p
- /// Query is legal on the target.
- bool isLegalOrBeforeLegalizer(const LegalityQuery &Query) const;
-
+ const TargetLowering &getTargetLowering() const;
+
+ /// \return true if the combine is running prior to legalization, or if \p
+ /// Query is legal on the target.
+ bool isLegalOrBeforeLegalizer(const LegalityQuery &Query) const;
+
/// MachineRegisterInfo::replaceRegWith() and inform the observer of the changes
void replaceRegWith(MachineRegisterInfo &MRI, Register FromReg, Register ToReg) const;
@@ -156,18 +156,18 @@ public:
bool matchCombineIndexedLoadStore(MachineInstr &MI, IndexedLoadStoreMatchInfo &MatchInfo);
void applyCombineIndexedLoadStore(MachineInstr &MI, IndexedLoadStoreMatchInfo &MatchInfo);
- bool matchSextTruncSextLoad(MachineInstr &MI);
- bool applySextTruncSextLoad(MachineInstr &MI);
+ bool matchSextTruncSextLoad(MachineInstr &MI);
+ bool applySextTruncSextLoad(MachineInstr &MI);
- /// Match sext_inreg(load p), imm -> sextload p
- bool matchSextInRegOfLoad(MachineInstr &MI, std::tuple<Register, unsigned> &MatchInfo);
- bool applySextInRegOfLoad(MachineInstr &MI, std::tuple<Register, unsigned> &MatchInfo);
+ /// Match sext_inreg(load p), imm -> sextload p
+ bool matchSextInRegOfLoad(MachineInstr &MI, std::tuple<Register, unsigned> &MatchInfo);
+ bool applySextInRegOfLoad(MachineInstr &MI, std::tuple<Register, unsigned> &MatchInfo);
+
+ /// If a brcond's true block is not the fallthrough, make it so by inverting
+ /// the condition and swapping operands.
+ bool matchOptBrCondByInvertingCond(MachineInstr &MI);
+ void applyOptBrCondByInvertingCond(MachineInstr &MI);
- /// If a brcond's true block is not the fallthrough, make it so by inverting
- /// the condition and swapping operands.
- bool matchOptBrCondByInvertingCond(MachineInstr &MI);
- void applyOptBrCondByInvertingCond(MachineInstr &MI);
-
/// If \p MI is G_CONCAT_VECTORS, try to combine it.
/// Returns true if MI changed.
/// Right now, we support:
@@ -243,28 +243,28 @@ public:
bool matchPtrAddImmedChain(MachineInstr &MI, PtrAddChain &MatchInfo);
bool applyPtrAddImmedChain(MachineInstr &MI, PtrAddChain &MatchInfo);
- /// Fold (shift (shift base, x), y) -> (shift base (x+y))
- bool matchShiftImmedChain(MachineInstr &MI, RegisterImmPair &MatchInfo);
- bool applyShiftImmedChain(MachineInstr &MI, RegisterImmPair &MatchInfo);
-
- /// If we have a shift-by-constant of a bitwise logic op that itself has a
- /// shift-by-constant operand with identical opcode, we may be able to convert
- /// that into 2 independent shifts followed by the logic op.
- bool matchShiftOfShiftedLogic(MachineInstr &MI,
- ShiftOfShiftedLogic &MatchInfo);
- bool applyShiftOfShiftedLogic(MachineInstr &MI,
- ShiftOfShiftedLogic &MatchInfo);
-
+ /// Fold (shift (shift base, x), y) -> (shift base (x+y))
+ bool matchShiftImmedChain(MachineInstr &MI, RegisterImmPair &MatchInfo);
+ bool applyShiftImmedChain(MachineInstr &MI, RegisterImmPair &MatchInfo);
+
+ /// If we have a shift-by-constant of a bitwise logic op that itself has a
+ /// shift-by-constant operand with identical opcode, we may be able to convert
+ /// that into 2 independent shifts followed by the logic op.
+ bool matchShiftOfShiftedLogic(MachineInstr &MI,
+ ShiftOfShiftedLogic &MatchInfo);
+ bool applyShiftOfShiftedLogic(MachineInstr &MI,
+ ShiftOfShiftedLogic &MatchInfo);
+
/// Transform a multiply by a power-of-2 value to a left shift.
bool matchCombineMulToShl(MachineInstr &MI, unsigned &ShiftVal);
bool applyCombineMulToShl(MachineInstr &MI, unsigned &ShiftVal);
- // Transform a G_SHL with an extended source into a narrower shift if
- // possible.
- bool matchCombineShlOfExtend(MachineInstr &MI, RegisterImmPair &MatchData);
- bool applyCombineShlOfExtend(MachineInstr &MI,
- const RegisterImmPair &MatchData);
-
+ // Transform a G_SHL with an extended source into a narrower shift if
+ // possible.
+ bool matchCombineShlOfExtend(MachineInstr &MI, RegisterImmPair &MatchData);
+ bool applyCombineShlOfExtend(MachineInstr &MI,
+ const RegisterImmPair &MatchData);
+
/// Reduce a shift by a constant to an unmerge and a shift on a half sized
/// type. This will not produce a shift smaller than \p TargetShiftSize.
bool matchCombineShiftToUnmerge(MachineInstr &MI, unsigned TargetShiftSize,
@@ -272,86 +272,86 @@ public:
bool applyCombineShiftToUnmerge(MachineInstr &MI, const unsigned &ShiftVal);
bool tryCombineShiftToUnmerge(MachineInstr &MI, unsigned TargetShiftAmount);
- /// Transform <ty,...> G_UNMERGE(G_MERGE ty X, Y, Z) -> ty X, Y, Z.
- bool
- matchCombineUnmergeMergeToPlainValues(MachineInstr &MI,
- SmallVectorImpl<Register> &Operands);
- bool
- applyCombineUnmergeMergeToPlainValues(MachineInstr &MI,
- SmallVectorImpl<Register> &Operands);
-
- /// Transform G_UNMERGE Constant -> Constant1, Constant2, ...
- bool matchCombineUnmergeConstant(MachineInstr &MI,
- SmallVectorImpl<APInt> &Csts);
- bool applyCombineUnmergeConstant(MachineInstr &MI,
- SmallVectorImpl<APInt> &Csts);
-
- /// Transform X, Y<dead> = G_UNMERGE Z -> X = G_TRUNC Z.
- bool matchCombineUnmergeWithDeadLanesToTrunc(MachineInstr &MI);
- bool applyCombineUnmergeWithDeadLanesToTrunc(MachineInstr &MI);
-
- /// Transform X, Y = G_UNMERGE(G_ZEXT(Z)) -> X = G_ZEXT(Z); Y = G_CONSTANT 0
- bool matchCombineUnmergeZExtToZExt(MachineInstr &MI);
- bool applyCombineUnmergeZExtToZExt(MachineInstr &MI);
-
- /// Transform fp_instr(cst) to constant result of the fp operation.
- bool matchCombineConstantFoldFpUnary(MachineInstr &MI,
- Optional<APFloat> &Cst);
- bool applyCombineConstantFoldFpUnary(MachineInstr &MI,
- Optional<APFloat> &Cst);
-
- /// Transform IntToPtr(PtrToInt(x)) to x if cast is in the same address space.
- bool matchCombineI2PToP2I(MachineInstr &MI, Register &Reg);
- bool applyCombineI2PToP2I(MachineInstr &MI, Register &Reg);
-
- /// Transform PtrToInt(IntToPtr(x)) to x.
- bool matchCombineP2IToI2P(MachineInstr &MI, Register &Reg);
- bool applyCombineP2IToI2P(MachineInstr &MI, Register &Reg);
-
- /// Transform G_ADD (G_PTRTOINT x), y -> G_PTRTOINT (G_PTR_ADD x, y)
- /// Transform G_ADD y, (G_PTRTOINT x) -> G_PTRTOINT (G_PTR_ADD x, y)
- bool matchCombineAddP2IToPtrAdd(MachineInstr &MI,
- std::pair<Register, bool> &PtrRegAndCommute);
- bool applyCombineAddP2IToPtrAdd(MachineInstr &MI,
- std::pair<Register, bool> &PtrRegAndCommute);
-
- // Transform G_PTR_ADD (G_PTRTOINT C1), C2 -> C1 + C2
- bool matchCombineConstPtrAddToI2P(MachineInstr &MI, int64_t &NewCst);
- bool applyCombineConstPtrAddToI2P(MachineInstr &MI, int64_t &NewCst);
-
- /// Transform anyext(trunc(x)) to x.
- bool matchCombineAnyExtTrunc(MachineInstr &MI, Register &Reg);
- bool applyCombineAnyExtTrunc(MachineInstr &MI, Register &Reg);
-
- /// Transform [asz]ext([asz]ext(x)) to [asz]ext x.
- bool matchCombineExtOfExt(MachineInstr &MI,
- std::tuple<Register, unsigned> &MatchInfo);
- bool applyCombineExtOfExt(MachineInstr &MI,
- std::tuple<Register, unsigned> &MatchInfo);
-
- /// Transform fneg(fneg(x)) to x.
- bool matchCombineFNegOfFNeg(MachineInstr &MI, Register &Reg);
-
- /// Match fabs(fabs(x)) to fabs(x).
- bool matchCombineFAbsOfFAbs(MachineInstr &MI, Register &Src);
- bool applyCombineFAbsOfFAbs(MachineInstr &MI, Register &Src);
-
- /// Transform trunc ([asz]ext x) to x or ([asz]ext x) or (trunc x).
- bool matchCombineTruncOfExt(MachineInstr &MI,
- std::pair<Register, unsigned> &MatchInfo);
- bool applyCombineTruncOfExt(MachineInstr &MI,
- std::pair<Register, unsigned> &MatchInfo);
-
- /// Transform trunc (shl x, K) to shl (trunc x),
- /// K => K < VT.getScalarSizeInBits().
- bool matchCombineTruncOfShl(MachineInstr &MI,
- std::pair<Register, Register> &MatchInfo);
- bool applyCombineTruncOfShl(MachineInstr &MI,
- std::pair<Register, Register> &MatchInfo);
-
- /// Transform G_MUL(x, -1) to G_SUB(0, x)
- bool applyCombineMulByNegativeOne(MachineInstr &MI);
-
+ /// Transform <ty,...> G_UNMERGE(G_MERGE ty X, Y, Z) -> ty X, Y, Z.
+ bool
+ matchCombineUnmergeMergeToPlainValues(MachineInstr &MI,
+ SmallVectorImpl<Register> &Operands);
+ bool
+ applyCombineUnmergeMergeToPlainValues(MachineInstr &MI,
+ SmallVectorImpl<Register> &Operands);
+
+ /// Transform G_UNMERGE Constant -> Constant1, Constant2, ...
+ bool matchCombineUnmergeConstant(MachineInstr &MI,
+ SmallVectorImpl<APInt> &Csts);
+ bool applyCombineUnmergeConstant(MachineInstr &MI,
+ SmallVectorImpl<APInt> &Csts);
+
+ /// Transform X, Y<dead> = G_UNMERGE Z -> X = G_TRUNC Z.
+ bool matchCombineUnmergeWithDeadLanesToTrunc(MachineInstr &MI);
+ bool applyCombineUnmergeWithDeadLanesToTrunc(MachineInstr &MI);
+
+ /// Transform X, Y = G_UNMERGE(G_ZEXT(Z)) -> X = G_ZEXT(Z); Y = G_CONSTANT 0
+ bool matchCombineUnmergeZExtToZExt(MachineInstr &MI);
+ bool applyCombineUnmergeZExtToZExt(MachineInstr &MI);
+
+ /// Transform fp_instr(cst) to constant result of the fp operation.
+ bool matchCombineConstantFoldFpUnary(MachineInstr &MI,
+ Optional<APFloat> &Cst);
+ bool applyCombineConstantFoldFpUnary(MachineInstr &MI,
+ Optional<APFloat> &Cst);
+
+ /// Transform IntToPtr(PtrToInt(x)) to x if cast is in the same address space.
+ bool matchCombineI2PToP2I(MachineInstr &MI, Register &Reg);
+ bool applyCombineI2PToP2I(MachineInstr &MI, Register &Reg);
+
+ /// Transform PtrToInt(IntToPtr(x)) to x.
+ bool matchCombineP2IToI2P(MachineInstr &MI, Register &Reg);
+ bool applyCombineP2IToI2P(MachineInstr &MI, Register &Reg);
+
+ /// Transform G_ADD (G_PTRTOINT x), y -> G_PTRTOINT (G_PTR_ADD x, y)
+ /// Transform G_ADD y, (G_PTRTOINT x) -> G_PTRTOINT (G_PTR_ADD x, y)
+ bool matchCombineAddP2IToPtrAdd(MachineInstr &MI,
+ std::pair<Register, bool> &PtrRegAndCommute);
+ bool applyCombineAddP2IToPtrAdd(MachineInstr &MI,
+ std::pair<Register, bool> &PtrRegAndCommute);
+
+ // Transform G_PTR_ADD (G_PTRTOINT C1), C2 -> C1 + C2
+ bool matchCombineConstPtrAddToI2P(MachineInstr &MI, int64_t &NewCst);
+ bool applyCombineConstPtrAddToI2P(MachineInstr &MI, int64_t &NewCst);
+
+ /// Transform anyext(trunc(x)) to x.
+ bool matchCombineAnyExtTrunc(MachineInstr &MI, Register &Reg);
+ bool applyCombineAnyExtTrunc(MachineInstr &MI, Register &Reg);
+
+ /// Transform [asz]ext([asz]ext(x)) to [asz]ext x.
+ bool matchCombineExtOfExt(MachineInstr &MI,
+ std::tuple<Register, unsigned> &MatchInfo);
+ bool applyCombineExtOfExt(MachineInstr &MI,
+ std::tuple<Register, unsigned> &MatchInfo);
+
+ /// Transform fneg(fneg(x)) to x.
+ bool matchCombineFNegOfFNeg(MachineInstr &MI, Register &Reg);
+
+ /// Match fabs(fabs(x)) to fabs(x).
+ bool matchCombineFAbsOfFAbs(MachineInstr &MI, Register &Src);
+ bool applyCombineFAbsOfFAbs(MachineInstr &MI, Register &Src);
+
+ /// Transform trunc ([asz]ext x) to x or ([asz]ext x) or (trunc x).
+ bool matchCombineTruncOfExt(MachineInstr &MI,
+ std::pair<Register, unsigned> &MatchInfo);
+ bool applyCombineTruncOfExt(MachineInstr &MI,
+ std::pair<Register, unsigned> &MatchInfo);
+
+ /// Transform trunc (shl x, K) to shl (trunc x),
+ /// K => K < VT.getScalarSizeInBits().
+ bool matchCombineTruncOfShl(MachineInstr &MI,
+ std::pair<Register, Register> &MatchInfo);
+ bool applyCombineTruncOfShl(MachineInstr &MI,
+ std::pair<Register, Register> &MatchInfo);
+
+ /// Transform G_MUL(x, -1) to G_SUB(0, x)
+ bool applyCombineMulByNegativeOne(MachineInstr &MI);
+
/// Return true if any explicit use operand on \p MI is defined by a
/// G_IMPLICIT_DEF.
bool matchAnyExplicitUseIsUndef(MachineInstr &MI);
@@ -366,13 +366,13 @@ public:
/// Return true if a G_STORE instruction \p MI is storing an undef value.
bool matchUndefStore(MachineInstr &MI);
- /// Return true if a G_SELECT instruction \p MI has an undef comparison.
- bool matchUndefSelectCmp(MachineInstr &MI);
-
- /// Return true if a G_SELECT instruction \p MI has a constant comparison. If
- /// true, \p OpIdx will store the operand index of the known selected value.
- bool matchConstantSelectCmp(MachineInstr &MI, unsigned &OpIdx);
-
+ /// Return true if a G_SELECT instruction \p MI has an undef comparison.
+ bool matchUndefSelectCmp(MachineInstr &MI);
+
+ /// Return true if a G_SELECT instruction \p MI has a constant comparison. If
+ /// true, \p OpIdx will store the operand index of the known selected value.
+ bool matchConstantSelectCmp(MachineInstr &MI, unsigned &OpIdx);
+
/// Replace an instruction with a G_FCONSTANT with value \p C.
bool replaceInstWithFConstant(MachineInstr &MI, double C);
@@ -385,9 +385,9 @@ public:
/// Delete \p MI and replace all of its uses with its \p OpIdx-th operand.
bool replaceSingleDefInstWithOperand(MachineInstr &MI, unsigned OpIdx);
- /// Delete \p MI and replace all of its uses with \p Replacement.
- bool replaceSingleDefInstWithReg(MachineInstr &MI, Register Replacement);
-
+ /// Delete \p MI and replace all of its uses with \p Replacement.
+ bool replaceSingleDefInstWithReg(MachineInstr &MI, Register Replacement);
+
/// Return true if \p MOP1 and \p MOP2 are register operands are defined by
/// equivalent instructions.
bool matchEqualDefs(const MachineOperand &MOP1, const MachineOperand &MOP2);
@@ -405,12 +405,12 @@ public:
/// Check if operand \p OpIdx is zero.
bool matchOperandIsZero(MachineInstr &MI, unsigned OpIdx);
- /// Check if operand \p OpIdx is undef.
- bool matchOperandIsUndef(MachineInstr &MI, unsigned OpIdx);
-
- /// Check if operand \p OpIdx is known to be a power of 2.
- bool matchOperandIsKnownToBeAPowerOfTwo(MachineInstr &MI, unsigned OpIdx);
-
+ /// Check if operand \p OpIdx is undef.
+ bool matchOperandIsUndef(MachineInstr &MI, unsigned OpIdx);
+
+ /// Check if operand \p OpIdx is known to be a power of 2.
+ bool matchOperandIsKnownToBeAPowerOfTwo(MachineInstr &MI, unsigned OpIdx);
+
/// Erase \p MI
bool eraseInst(MachineInstr &MI);
@@ -420,79 +420,79 @@ public:
bool applySimplifyAddToSub(MachineInstr &MI,
std::tuple<Register, Register> &MatchInfo);
- /// Match (logic_op (op x...), (op y...)) -> (op (logic_op x, y))
- bool
- matchHoistLogicOpWithSameOpcodeHands(MachineInstr &MI,
- InstructionStepsMatchInfo &MatchInfo);
-
- /// Replace \p MI with a series of instructions described in \p MatchInfo.
- bool applyBuildInstructionSteps(MachineInstr &MI,
- InstructionStepsMatchInfo &MatchInfo);
-
- /// Match ashr (shl x, C), C -> sext_inreg (C)
- bool matchAshrShlToSextInreg(MachineInstr &MI,
- std::tuple<Register, int64_t> &MatchInfo);
- bool applyAshShlToSextInreg(MachineInstr &MI,
- std::tuple<Register, int64_t> &MatchInfo);
- /// \return true if \p MI is a G_AND instruction whose operands are x and y
- /// where x & y == x or x & y == y. (E.g., one of operands is all-ones value.)
- ///
- /// \param [in] MI - The G_AND instruction.
- /// \param [out] Replacement - A register the G_AND should be replaced with on
- /// success.
- bool matchRedundantAnd(MachineInstr &MI, Register &Replacement);
-
- /// \return true if \p MI is a G_OR instruction whose operands are x and y
- /// where x | y == x or x | y == y. (E.g., one of operands is all-zeros
- /// value.)
- ///
- /// \param [in] MI - The G_OR instruction.
- /// \param [out] Replacement - A register the G_OR should be replaced with on
- /// success.
- bool matchRedundantOr(MachineInstr &MI, Register &Replacement);
-
- /// \return true if \p MI is a G_SEXT_INREG that can be erased.
- bool matchRedundantSExtInReg(MachineInstr &MI);
-
- /// Combine inverting a result of a compare into the opposite cond code.
- bool matchNotCmp(MachineInstr &MI, SmallVectorImpl<Register> &RegsToNegate);
- bool applyNotCmp(MachineInstr &MI, SmallVectorImpl<Register> &RegsToNegate);
-
- /// Fold (xor (and x, y), y) -> (and (not x), y)
- ///{
- bool matchXorOfAndWithSameReg(MachineInstr &MI,
- std::pair<Register, Register> &MatchInfo);
- bool applyXorOfAndWithSameReg(MachineInstr &MI,
- std::pair<Register, Register> &MatchInfo);
- ///}
-
- /// Combine G_PTR_ADD with nullptr to G_INTTOPTR
- bool matchPtrAddZero(MachineInstr &MI);
- bool applyPtrAddZero(MachineInstr &MI);
-
- /// Combine G_UREM x, (known power of 2) to an add and bitmasking.
- bool applySimplifyURemByPow2(MachineInstr &MI);
-
- bool matchCombineInsertVecElts(MachineInstr &MI,
- SmallVectorImpl<Register> &MatchInfo);
-
- bool applyCombineInsertVecElts(MachineInstr &MI,
- SmallVectorImpl<Register> &MatchInfo);
-
- /// Match expression trees of the form
- ///
- /// \code
- /// sN *a = ...
- /// sM val = a[0] | (a[1] << N) | (a[2] << 2N) | (a[3] << 3N) ...
- /// \endcode
- ///
- /// And check if the tree can be replaced with a M-bit load + possibly a
- /// bswap.
- bool matchLoadOrCombine(MachineInstr &MI,
- std::function<void(MachineIRBuilder &)> &MatchInfo);
- bool applyLoadOrCombine(MachineInstr &MI,
- std::function<void(MachineIRBuilder &)> &MatchInfo);
-
+ /// Match (logic_op (op x...), (op y...)) -> (op (logic_op x, y))
+ bool
+ matchHoistLogicOpWithSameOpcodeHands(MachineInstr &MI,
+ InstructionStepsMatchInfo &MatchInfo);
+
+ /// Replace \p MI with a series of instructions described in \p MatchInfo.
+ bool applyBuildInstructionSteps(MachineInstr &MI,
+ InstructionStepsMatchInfo &MatchInfo);
+
+ /// Match ashr (shl x, C), C -> sext_inreg (C)
+ bool matchAshrShlToSextInreg(MachineInstr &MI,
+ std::tuple<Register, int64_t> &MatchInfo);
+ bool applyAshShlToSextInreg(MachineInstr &MI,
+ std::tuple<Register, int64_t> &MatchInfo);
+ /// \return true if \p MI is a G_AND instruction whose operands are x and y
+ /// where x & y == x or x & y == y. (E.g., one of operands is all-ones value.)
+ ///
+ /// \param [in] MI - The G_AND instruction.
+ /// \param [out] Replacement - A register the G_AND should be replaced with on
+ /// success.
+ bool matchRedundantAnd(MachineInstr &MI, Register &Replacement);
+
+ /// \return true if \p MI is a G_OR instruction whose operands are x and y
+ /// where x | y == x or x | y == y. (E.g., one of operands is all-zeros
+ /// value.)
+ ///
+ /// \param [in] MI - The G_OR instruction.
+ /// \param [out] Replacement - A register the G_OR should be replaced with on
+ /// success.
+ bool matchRedundantOr(MachineInstr &MI, Register &Replacement);
+
+ /// \return true if \p MI is a G_SEXT_INREG that can be erased.
+ bool matchRedundantSExtInReg(MachineInstr &MI);
+
+ /// Combine inverting a result of a compare into the opposite cond code.
+ bool matchNotCmp(MachineInstr &MI, SmallVectorImpl<Register> &RegsToNegate);
+ bool applyNotCmp(MachineInstr &MI, SmallVectorImpl<Register> &RegsToNegate);
+
+ /// Fold (xor (and x, y), y) -> (and (not x), y)
+ ///{
+ bool matchXorOfAndWithSameReg(MachineInstr &MI,
+ std::pair<Register, Register> &MatchInfo);
+ bool applyXorOfAndWithSameReg(MachineInstr &MI,
+ std::pair<Register, Register> &MatchInfo);
+ ///}
+
+ /// Combine G_PTR_ADD with nullptr to G_INTTOPTR
+ bool matchPtrAddZero(MachineInstr &MI);
+ bool applyPtrAddZero(MachineInstr &MI);
+
+ /// Combine G_UREM x, (known power of 2) to an add and bitmasking.
+ bool applySimplifyURemByPow2(MachineInstr &MI);
+
+ bool matchCombineInsertVecElts(MachineInstr &MI,
+ SmallVectorImpl<Register> &MatchInfo);
+
+ bool applyCombineInsertVecElts(MachineInstr &MI,
+ SmallVectorImpl<Register> &MatchInfo);
+
+ /// Match expression trees of the form
+ ///
+ /// \code
+ /// sN *a = ...
+ /// sM val = a[0] | (a[1] << N) | (a[2] << 2N) | (a[3] << 3N) ...
+ /// \endcode
+ ///
+ /// And check if the tree can be replaced with a M-bit load + possibly a
+ /// bswap.
+ bool matchLoadOrCombine(MachineInstr &MI,
+ std::function<void(MachineIRBuilder &)> &MatchInfo);
+ bool applyLoadOrCombine(MachineInstr &MI,
+ std::function<void(MachineIRBuilder &)> &MatchInfo);
+
/// Try to transform \p MI by using all of the above
/// combine functions. Returns true if changed.
bool tryCombine(MachineInstr &MI);
@@ -521,30 +521,30 @@ private:
/// \returns true if a candidate is found.
bool findPreIndexCandidate(MachineInstr &MI, Register &Addr, Register &Base,
Register &Offset);
-
- /// Helper function for matchLoadOrCombine. Searches for Registers
- /// which may have been produced by a load instruction + some arithmetic.
- ///
- /// \param [in] Root - The search root.
- ///
- /// \returns The Registers found during the search.
- Optional<SmallVector<Register, 8>>
- findCandidatesForLoadOrCombine(const MachineInstr *Root) const;
-
- /// Helper function for matchLoadOrCombine.
- ///
- /// Checks if every register in \p RegsToVisit is defined by a load
- /// instruction + some arithmetic.
- ///
- /// \param [out] MemOffset2Idx - Maps the byte positions each load ends up
- /// at to the index of the load.
- /// \param [in] MemSizeInBits - The number of bits each load should produce.
- ///
- /// \returns The lowest-index load found and the lowest index on success.
- Optional<std::pair<MachineInstr *, int64_t>> findLoadOffsetsForLoadOrCombine(
- SmallDenseMap<int64_t, int64_t, 8> &MemOffset2Idx,
- const SmallVector<Register, 8> &RegsToVisit,
- const unsigned MemSizeInBits);
+
+ /// Helper function for matchLoadOrCombine. Searches for Registers
+ /// which may have been produced by a load instruction + some arithmetic.
+ ///
+ /// \param [in] Root - The search root.
+ ///
+ /// \returns The Registers found during the search.
+ Optional<SmallVector<Register, 8>>
+ findCandidatesForLoadOrCombine(const MachineInstr *Root) const;
+
+ /// Helper function for matchLoadOrCombine.
+ ///
+ /// Checks if every register in \p RegsToVisit is defined by a load
+ /// instruction + some arithmetic.
+ ///
+ /// \param [out] MemOffset2Idx - Maps the byte positions each load ends up
+ /// at to the index of the load.
+ /// \param [in] MemSizeInBits - The number of bits each load should produce.
+ ///
+ /// \returns The lowest-index load found and the lowest index on success.
+ Optional<std::pair<MachineInstr *, int64_t>> findLoadOffsetsForLoadOrCombine(
+ SmallDenseMap<int64_t, int64_t, 8> &MemOffset2Idx,
+ const SmallVector<Register, 8> &RegsToVisit,
+ const unsigned MemSizeInBits);
};
} // namespace llvm
diff --git a/contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel/GISelChangeObserver.h b/contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel/GISelChangeObserver.h
index 3f2f27e157..0833e960fe 100644
--- a/contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel/GISelChangeObserver.h
+++ b/contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel/GISelChangeObserver.h
@@ -58,7 +58,7 @@ public:
/// For convenience, finishedChangingAllUsesOfReg() will report the completion
/// of the changes. The use list may change between this call and
/// finishedChangingAllUsesOfReg().
- void changingAllUsesOfReg(const MachineRegisterInfo &MRI, Register Reg);
+ void changingAllUsesOfReg(const MachineRegisterInfo &MRI, Register Reg);
/// All instructions reported as changing by changingAllUsesOfReg() have
/// finished being changed.
void finishedChangingAllUsesOfReg();
diff --git a/contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel/GISelKnownBits.h b/contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel/GISelKnownBits.h
index 87b74ea403..452ddd17c0 100644
--- a/contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel/GISelKnownBits.h
+++ b/contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel/GISelKnownBits.h
@@ -20,7 +20,7 @@
#ifndef LLVM_CODEGEN_GLOBALISEL_KNOWNBITSINFO_H
#define LLVM_CODEGEN_GLOBALISEL_KNOWNBITSINFO_H
-#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/DenseMap.h"
#include "llvm/CodeGen/GlobalISel/GISelChangeObserver.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/CodeGen/Register.h"
@@ -41,13 +41,13 @@ class GISelKnownBits : public GISelChangeObserver {
/// Cache maintained during a computeKnownBits request.
SmallDenseMap<Register, KnownBits, 16> ComputeKnownBitsCache;
- void computeKnownBitsMin(Register Src0, Register Src1, KnownBits &Known,
- const APInt &DemandedElts,
- unsigned Depth = 0);
-
- unsigned computeNumSignBitsMin(Register Src0, Register Src1,
- const APInt &DemandedElts, unsigned Depth = 0);
-
+ void computeKnownBitsMin(Register Src0, Register Src1, KnownBits &Known,
+ const APInt &DemandedElts,
+ unsigned Depth = 0);
+
+ unsigned computeNumSignBitsMin(Register Src0, Register Src1,
+ const APInt &DemandedElts, unsigned Depth = 0);
+
public:
GISelKnownBits(MachineFunction &MF, unsigned MaxDepth = 6);
virtual ~GISelKnownBits() = default;
diff --git a/contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel/IRTranslator.h b/contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel/IRTranslator.h
index ec913aa7b9..6c1ac8e115 100644
--- a/contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel/IRTranslator.h
+++ b/contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel/IRTranslator.h
@@ -27,14 +27,14 @@
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/SmallVector.h"
-#include "llvm/CodeGen/FunctionLoweringInfo.h"
+#include "llvm/CodeGen/FunctionLoweringInfo.h"
#include "llvm/CodeGen/GlobalISel/CSEMIRBuilder.h"
-#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/CodeGen/SwiftErrorValueTracking.h"
#include "llvm/CodeGen/SwitchLoweringUtils.h"
#include "llvm/IR/Intrinsics.h"
#include "llvm/Support/Allocator.h"
-#include "llvm/Support/CodeGen.h"
+#include "llvm/Support/CodeGen.h"
#include <memory>
#include <utility>
@@ -45,7 +45,7 @@ class BasicBlock;
class CallInst;
class CallLowering;
class Constant;
-class ConstrainedFPIntrinsic;
+class ConstrainedFPIntrinsic;
class DataLayout;
class Instruction;
class MachineBasicBlock;
@@ -226,14 +226,14 @@ private:
/// Translate an LLVM string intrinsic (memcpy, memset, ...).
bool translateMemFunc(const CallInst &CI, MachineIRBuilder &MIRBuilder,
- unsigned Opcode);
+ unsigned Opcode);
void getStackGuard(Register DstReg, MachineIRBuilder &MIRBuilder);
bool translateOverflowIntrinsic(const CallInst &CI, unsigned Op,
MachineIRBuilder &MIRBuilder);
- bool translateFixedPointIntrinsic(unsigned Op, const CallInst &CI,
- MachineIRBuilder &MIRBuilder);
+ bool translateFixedPointIntrinsic(unsigned Op, const CallInst &CI,
+ MachineIRBuilder &MIRBuilder);
/// Helper function for translateSimpleIntrinsic.
/// \return The generic opcode for \p IntrinsicID if \p IntrinsicID is a
@@ -267,19 +267,19 @@ private:
/// \pre \p U is a call instruction.
bool translateCall(const User &U, MachineIRBuilder &MIRBuilder);
- /// When an invoke or a cleanupret unwinds to the next EH pad, there are
- /// many places it could ultimately go. In the IR, we have a single unwind
- /// destination, but in the machine CFG, we enumerate all the possible blocks.
- /// This function skips over imaginary basic blocks that hold catchswitch
- /// instructions, and finds all the "real" machine
- /// basic block destinations. As those destinations may not be successors of
- /// EHPadBB, here we also calculate the edge probability to those
- /// destinations. The passed-in Prob is the edge probability to EHPadBB.
- bool findUnwindDestinations(
- const BasicBlock *EHPadBB, BranchProbability Prob,
- SmallVectorImpl<std::pair<MachineBasicBlock *, BranchProbability>>
- &UnwindDests);
-
+ /// When an invoke or a cleanupret unwinds to the next EH pad, there are
+ /// many places it could ultimately go. In the IR, we have a single unwind
+ /// destination, but in the machine CFG, we enumerate all the possible blocks.
+ /// This function skips over imaginary basic blocks that hold catchswitch
+ /// instructions, and finds all the "real" machine
+ /// basic block destinations. As those destinations may not be successors of
+ /// EHPadBB, here we also calculate the edge probability to those
+ /// destinations. The passed-in Prob is the edge probability to EHPadBB.
+ bool findUnwindDestinations(
+ const BasicBlock *EHPadBB, BranchProbability Prob,
+ SmallVectorImpl<std::pair<MachineBasicBlock *, BranchProbability>>
+ &UnwindDests);
+
bool translateInvoke(const User &U, MachineIRBuilder &MIRBuilder);
bool translateCallBr(const User &U, MachineIRBuilder &MIRBuilder);
@@ -311,37 +311,37 @@ private:
/// MachineBasicBlocks for the function have been created.
void finishPendingPhis();
- /// Translate \p Inst into a unary operation \p Opcode.
- /// \pre \p U is a unary operation.
- bool translateUnaryOp(unsigned Opcode, const User &U,
- MachineIRBuilder &MIRBuilder);
-
+ /// Translate \p Inst into a unary operation \p Opcode.
+ /// \pre \p U is a unary operation.
+ bool translateUnaryOp(unsigned Opcode, const User &U,
+ MachineIRBuilder &MIRBuilder);
+
/// Translate \p Inst into a binary operation \p Opcode.
/// \pre \p U is a binary operation.
bool translateBinaryOp(unsigned Opcode, const User &U,
MachineIRBuilder &MIRBuilder);
- /// If the set of cases should be emitted as a series of branches, return
- /// true. If we should emit this as a bunch of and/or'd together conditions,
- /// return false.
- bool shouldEmitAsBranches(const std::vector<SwitchCG::CaseBlock> &Cases);
- /// Helper method for findMergedConditions.
- /// This function emits a branch and is used at the leaves of an OR or an
- /// AND operator tree.
- void emitBranchForMergedCondition(const Value *Cond, MachineBasicBlock *TBB,
- MachineBasicBlock *FBB,
- MachineBasicBlock *CurBB,
- MachineBasicBlock *SwitchBB,
- BranchProbability TProb,
- BranchProbability FProb, bool InvertCond);
- /// Used during condbr translation to find trees of conditions that can be
- /// optimized.
- void findMergedConditions(const Value *Cond, MachineBasicBlock *TBB,
- MachineBasicBlock *FBB, MachineBasicBlock *CurBB,
- MachineBasicBlock *SwitchBB,
- Instruction::BinaryOps Opc, BranchProbability TProb,
- BranchProbability FProb, bool InvertCond);
-
+ /// If the set of cases should be emitted as a series of branches, return
+ /// true. If we should emit this as a bunch of and/or'd together conditions,
+ /// return false.
+ bool shouldEmitAsBranches(const std::vector<SwitchCG::CaseBlock> &Cases);
+ /// Helper method for findMergedConditions.
+ /// This function emits a branch and is used at the leaves of an OR or an
+ /// AND operator tree.
+ void emitBranchForMergedCondition(const Value *Cond, MachineBasicBlock *TBB,
+ MachineBasicBlock *FBB,
+ MachineBasicBlock *CurBB,
+ MachineBasicBlock *SwitchBB,
+ BranchProbability TProb,
+ BranchProbability FProb, bool InvertCond);
+ /// Used during condbr translation to find trees of conditions that can be
+ /// optimized.
+ void findMergedConditions(const Value *Cond, MachineBasicBlock *TBB,
+ MachineBasicBlock *FBB, MachineBasicBlock *CurBB,
+ MachineBasicBlock *SwitchBB,
+ Instruction::BinaryOps Opc, BranchProbability TProb,
+ BranchProbability FProb, bool InvertCond);
+
/// Translate branch (br) instruction.
/// \pre \p U is a branch instruction.
bool translateBr(const User &U, MachineIRBuilder &MIRBuilder);
@@ -355,23 +355,23 @@ private:
void emitSwitchCase(SwitchCG::CaseBlock &CB, MachineBasicBlock *SwitchBB,
MachineIRBuilder &MIB);
- /// Generate for for the BitTest header block, which precedes each sequence of
- /// BitTestCases.
- void emitBitTestHeader(SwitchCG::BitTestBlock &BTB,
- MachineBasicBlock *SwitchMBB);
- /// Generate code to produces one "bit test" for a given BitTestCase \p B.
- void emitBitTestCase(SwitchCG::BitTestBlock &BB, MachineBasicBlock *NextMBB,
- BranchProbability BranchProbToNext, Register Reg,
- SwitchCG::BitTestCase &B, MachineBasicBlock *SwitchBB);
-
- bool lowerJumpTableWorkItem(
- SwitchCG::SwitchWorkListItem W, MachineBasicBlock *SwitchMBB,
- MachineBasicBlock *CurMBB, MachineBasicBlock *DefaultMBB,
- MachineIRBuilder &MIB, MachineFunction::iterator BBI,
- BranchProbability UnhandledProbs, SwitchCG::CaseClusterIt I,
- MachineBasicBlock *Fallthrough, bool FallthroughUnreachable);
-
- bool lowerSwitchRangeWorkItem(SwitchCG::CaseClusterIt I, Value *Cond,
+ /// Generate for for the BitTest header block, which precedes each sequence of
+ /// BitTestCases.
+ void emitBitTestHeader(SwitchCG::BitTestBlock &BTB,
+ MachineBasicBlock *SwitchMBB);
+ /// Generate code to produces one "bit test" for a given BitTestCase \p B.
+ void emitBitTestCase(SwitchCG::BitTestBlock &BB, MachineBasicBlock *NextMBB,
+ BranchProbability BranchProbToNext, Register Reg,
+ SwitchCG::BitTestCase &B, MachineBasicBlock *SwitchBB);
+
+ bool lowerJumpTableWorkItem(
+ SwitchCG::SwitchWorkListItem W, MachineBasicBlock *SwitchMBB,
+ MachineBasicBlock *CurMBB, MachineBasicBlock *DefaultMBB,
+ MachineIRBuilder &MIB, MachineFunction::iterator BBI,
+ BranchProbability UnhandledProbs, SwitchCG::CaseClusterIt I,
+ MachineBasicBlock *Fallthrough, bool FallthroughUnreachable);
+
+ bool lowerSwitchRangeWorkItem(SwitchCG::CaseClusterIt I, Value *Cond,
MachineBasicBlock *Fallthrough,
bool FallthroughUnreachable,
BranchProbability UnhandledProbs,
@@ -379,14 +379,14 @@ private:
MachineIRBuilder &MIB,
MachineBasicBlock *SwitchMBB);
- bool lowerBitTestWorkItem(
- SwitchCG::SwitchWorkListItem W, MachineBasicBlock *SwitchMBB,
- MachineBasicBlock *CurMBB, MachineBasicBlock *DefaultMBB,
- MachineIRBuilder &MIB, MachineFunction::iterator BBI,
- BranchProbability DefaultProb, BranchProbability UnhandledProbs,
- SwitchCG::CaseClusterIt I, MachineBasicBlock *Fallthrough,
- bool FallthroughUnreachable);
-
+ bool lowerBitTestWorkItem(
+ SwitchCG::SwitchWorkListItem W, MachineBasicBlock *SwitchMBB,
+ MachineBasicBlock *CurMBB, MachineBasicBlock *DefaultMBB,
+ MachineIRBuilder &MIB, MachineFunction::iterator BBI,
+ BranchProbability DefaultProb, BranchProbability UnhandledProbs,
+ SwitchCG::CaseClusterIt I, MachineBasicBlock *Fallthrough,
+ bool FallthroughUnreachable);
+
bool lowerSwitchWorkItem(SwitchCG::SwitchWorkListItem W, Value *Cond,
MachineBasicBlock *SwitchMBB,
MachineBasicBlock *DefaultMBB,
@@ -497,9 +497,9 @@ private:
bool translateFAdd(const User &U, MachineIRBuilder &MIRBuilder) {
return translateBinaryOp(TargetOpcode::G_FADD, U, MIRBuilder);
}
- bool translateFSub(const User &U, MachineIRBuilder &MIRBuilder) {
- return translateBinaryOp(TargetOpcode::G_FSUB, U, MIRBuilder);
- }
+ bool translateFSub(const User &U, MachineIRBuilder &MIRBuilder) {
+ return translateBinaryOp(TargetOpcode::G_FSUB, U, MIRBuilder);
+ }
bool translateFMul(const User &U, MachineIRBuilder &MIRBuilder) {
return translateBinaryOp(TargetOpcode::G_FMUL, U, MIRBuilder);
}
@@ -578,8 +578,8 @@ private:
/// Current target configuration. Controls how the pass handles errors.
const TargetPassConfig *TPC;
- CodeGenOpt::Level OptLevel;
-
+ CodeGenOpt::Level OptLevel;
+
/// Current optimization remark emitter. Used to report failures.
std::unique_ptr<OptimizationRemarkEmitter> ORE;
@@ -679,12 +679,12 @@ private:
BranchProbability getEdgeProbability(const MachineBasicBlock *Src,
const MachineBasicBlock *Dst) const;
- void addSuccessorWithProb(
- MachineBasicBlock *Src, MachineBasicBlock *Dst,
- BranchProbability Prob = BranchProbability::getUnknown());
+ void addSuccessorWithProb(
+ MachineBasicBlock *Src, MachineBasicBlock *Dst,
+ BranchProbability Prob = BranchProbability::getUnknown());
public:
- IRTranslator(CodeGenOpt::Level OptLevel = CodeGenOpt::None);
+ IRTranslator(CodeGenOpt::Level OptLevel = CodeGenOpt::None);
StringRef getPassName() const override { return "IRTranslator"; }
diff --git a/contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel/InstructionSelector.h b/contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel/InstructionSelector.h
index d61e273ce1..61123ff85f 100644
--- a/contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel/InstructionSelector.h
+++ b/contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel/InstructionSelector.h
@@ -119,14 +119,14 @@ enum {
/// - InsnID - Instruction ID
/// - Expected opcode
GIM_CheckOpcode,
-
- /// Check the opcode on the specified instruction, checking 2 acceptable
- /// alternatives.
- /// - InsnID - Instruction ID
- /// - Expected opcode
- /// - Alternative expected opcode
- GIM_CheckOpcodeIsEither,
-
+
+ /// Check the opcode on the specified instruction, checking 2 acceptable
+ /// alternatives.
+ /// - InsnID - Instruction ID
+ /// - Expected opcode
+ /// - Alternative expected opcode
+ GIM_CheckOpcodeIsEither,
+
/// Check the instruction has the right number of operands
/// - InsnID - Instruction ID
/// - Expected number of operands
@@ -179,15 +179,15 @@ enum {
GIM_CheckMemorySizeEqualToLLT,
GIM_CheckMemorySizeLessThanLLT,
GIM_CheckMemorySizeGreaterThanLLT,
-
- /// Check if this is a vector that can be treated as a vector splat
- /// constant. This is valid for both G_BUILD_VECTOR as well as
- /// G_BUILD_VECTOR_TRUNC. For AllOnes refers to individual bits, so a -1
- /// element.
- /// - InsnID - Instruction ID
- GIM_CheckIsBuildVectorAllOnes,
- GIM_CheckIsBuildVectorAllZeros,
-
+
+ /// Check if this is a vector that can be treated as a vector splat
+ /// constant. This is valid for both G_BUILD_VECTOR as well as
+ /// G_BUILD_VECTOR_TRUNC. For AllOnes refers to individual bits, so a -1
+ /// element.
+ /// - InsnID - Instruction ID
+ GIM_CheckIsBuildVectorAllOnes,
+ GIM_CheckIsBuildVectorAllZeros,
+
/// Check a generic C++ instruction predicate
/// - InsnID - Instruction ID
/// - PredicateID - The ID of the predicate function to call
@@ -261,15 +261,15 @@ enum {
/// - OtherOpIdx - Other operand index
GIM_CheckIsSameOperand,
- /// Predicates with 'let PredicateCodeUsesOperands = 1' need to examine some
- /// named operands that will be recorded in RecordedOperands. Names of these
- /// operands are referenced in predicate argument list. Emitter determines
- /// StoreIdx(corresponds to the order in which names appear in argument list).
- /// - InsnID - Instruction ID
- /// - OpIdx - Operand index
- /// - StoreIdx - Store location in RecordedOperands.
- GIM_RecordNamedOperand,
-
+ /// Predicates with 'let PredicateCodeUsesOperands = 1' need to examine some
+ /// named operands that will be recorded in RecordedOperands. Names of these
+ /// operands are referenced in predicate argument list. Emitter determines
+ /// StoreIdx(corresponds to the order in which names appear in argument list).
+ /// - InsnID - Instruction ID
+ /// - OpIdx - Operand index
+ /// - StoreIdx - Store location in RecordedOperands.
+ GIM_RecordNamedOperand,
+
/// Fail the current try-block, or completely fail to match if there is no
/// current try-block.
GIM_Reject,
@@ -462,11 +462,11 @@ protected:
std::vector<ComplexRendererFns::value_type> Renderers;
RecordedMIVector MIs;
DenseMap<unsigned, unsigned> TempRegisters;
- /// Named operands that predicate with 'let PredicateCodeUsesOperands = 1'
- /// referenced in its argument list. Operands are inserted at index set by
- /// emitter, it corresponds to the order in which names appear in argument
- /// list. Currently such predicates don't have more then 3 arguments.
- std::array<const MachineOperand *, 3> RecordedOperands;
+ /// Named operands that predicate with 'let PredicateCodeUsesOperands = 1'
+ /// referenced in its argument list. Operands are inserted at index set by
+ /// emitter, it corresponds to the order in which names appear in argument
+ /// list. Currently such predicates don't have more then 3 arguments.
+ std::array<const MachineOperand *, 3> RecordedOperands;
MatcherState(unsigned MaxRenderers);
};
@@ -527,9 +527,9 @@ protected:
llvm_unreachable(
"Subclasses must override this with a tablegen-erated function");
}
- virtual bool testMIPredicate_MI(
- unsigned, const MachineInstr &,
- const std::array<const MachineOperand *, 3> &Operands) const {
+ virtual bool testMIPredicate_MI(
+ unsigned, const MachineInstr &,
+ const std::array<const MachineOperand *, 3> &Operands) const {
llvm_unreachable(
"Subclasses must override this with a tablegen-erated function");
}
diff --git a/contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel/InstructionSelectorImpl.h b/contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel/InstructionSelectorImpl.h
index b5885ff663..e3202fc976 100644
--- a/contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel/InstructionSelectorImpl.h
+++ b/contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel/InstructionSelectorImpl.h
@@ -161,26 +161,26 @@ bool InstructionSelector::executeMatchTable(
break;
}
- case GIM_CheckOpcode:
- case GIM_CheckOpcodeIsEither: {
+ case GIM_CheckOpcode:
+ case GIM_CheckOpcodeIsEither: {
int64_t InsnID = MatchTable[CurrentIdx++];
- int64_t Expected0 = MatchTable[CurrentIdx++];
- int64_t Expected1 = -1;
- if (MatcherOpcode == GIM_CheckOpcodeIsEither)
- Expected1 = MatchTable[CurrentIdx++];
+ int64_t Expected0 = MatchTable[CurrentIdx++];
+ int64_t Expected1 = -1;
+ if (MatcherOpcode == GIM_CheckOpcodeIsEither)
+ Expected1 = MatchTable[CurrentIdx++];
assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
unsigned Opcode = State.MIs[InsnID]->getOpcode();
DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
- dbgs() << CurrentIdx << ": GIM_CheckOpcode(MIs[" << InsnID
- << "], ExpectedOpcode=" << Expected0;
- if (MatcherOpcode == GIM_CheckOpcodeIsEither)
- dbgs() << " || " << Expected1;
- dbgs() << ") // Got=" << Opcode << "\n";
- );
-
- if (Opcode != Expected0 && Opcode != Expected1) {
+ dbgs() << CurrentIdx << ": GIM_CheckOpcode(MIs[" << InsnID
+ << "], ExpectedOpcode=" << Expected0;
+ if (MatcherOpcode == GIM_CheckOpcodeIsEither)
+ dbgs() << " || " << Expected1;
+ dbgs() << ") // Got=" << Opcode << "\n";
+ );
+
+ if (Opcode != Expected0 && Opcode != Expected1) {
if (handleReject() == RejectAndGiveUp)
return false;
}
@@ -207,7 +207,7 @@ bool InstructionSelector::executeMatchTable(
CurrentIdx = MatchTable[CurrentIdx + (Opcode - LowerBound)];
if (!CurrentIdx) {
CurrentIdx = Default;
- break;
+ break;
}
OnFailResumeAt.push_back(Default);
break;
@@ -335,35 +335,35 @@ bool InstructionSelector::executeMatchTable(
return false;
break;
}
- case GIM_CheckIsBuildVectorAllOnes:
- case GIM_CheckIsBuildVectorAllZeros: {
- int64_t InsnID = MatchTable[CurrentIdx++];
-
- DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
- dbgs() << CurrentIdx
- << ": GIM_CheckBuildVectorAll{Zeros|Ones}(MIs["
- << InsnID << "])\n");
- assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
-
- const MachineInstr *MI = State.MIs[InsnID];
- assert((MI->getOpcode() == TargetOpcode::G_BUILD_VECTOR ||
- MI->getOpcode() == TargetOpcode::G_BUILD_VECTOR_TRUNC) &&
- "Expected G_BUILD_VECTOR or G_BUILD_VECTOR_TRUNC");
-
- if (MatcherOpcode == GIM_CheckIsBuildVectorAllOnes) {
- if (!isBuildVectorAllOnes(*MI, MRI)) {
- if (handleReject() == RejectAndGiveUp)
- return false;
- }
- } else {
- if (!isBuildVectorAllZeros(*MI, MRI)) {
- if (handleReject() == RejectAndGiveUp)
- return false;
- }
- }
-
- break;
- }
+ case GIM_CheckIsBuildVectorAllOnes:
+ case GIM_CheckIsBuildVectorAllZeros: {
+ int64_t InsnID = MatchTable[CurrentIdx++];
+
+ DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
+ dbgs() << CurrentIdx
+ << ": GIM_CheckBuildVectorAll{Zeros|Ones}(MIs["
+ << InsnID << "])\n");
+ assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
+
+ const MachineInstr *MI = State.MIs[InsnID];
+ assert((MI->getOpcode() == TargetOpcode::G_BUILD_VECTOR ||
+ MI->getOpcode() == TargetOpcode::G_BUILD_VECTOR_TRUNC) &&
+ "Expected G_BUILD_VECTOR or G_BUILD_VECTOR_TRUNC");
+
+ if (MatcherOpcode == GIM_CheckIsBuildVectorAllOnes) {
+ if (!isBuildVectorAllOnes(*MI, MRI)) {
+ if (handleReject() == RejectAndGiveUp)
+ return false;
+ }
+ } else {
+ if (!isBuildVectorAllZeros(*MI, MRI)) {
+ if (handleReject() == RejectAndGiveUp)
+ return false;
+ }
+ }
+
+ break;
+ }
case GIM_CheckCxxInsnPredicate: {
int64_t InsnID = MatchTable[CurrentIdx++];
int64_t Predicate = MatchTable[CurrentIdx++];
@@ -374,8 +374,8 @@ bool InstructionSelector::executeMatchTable(
assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
assert(Predicate > GIPFP_MI_Invalid && "Expected a valid predicate");
- if (!testMIPredicate_MI(Predicate, *State.MIs[InsnID],
- State.RecordedOperands))
+ if (!testMIPredicate_MI(Predicate, *State.MIs[InsnID],
+ State.RecordedOperands))
if (handleReject() == RejectAndGiveUp)
return false;
break;
@@ -625,20 +625,20 @@ bool InstructionSelector::executeMatchTable(
break;
}
- case GIM_RecordNamedOperand: {
- int64_t InsnID = MatchTable[CurrentIdx++];
- int64_t OpIdx = MatchTable[CurrentIdx++];
- uint64_t StoreIdx = MatchTable[CurrentIdx++];
-
- DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
- dbgs() << CurrentIdx << ": GIM_RecordNamedOperand(MIs["
- << InsnID << "]->getOperand(" << OpIdx
- << "), StoreIdx=" << StoreIdx << ")\n");
- assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
- assert(StoreIdx < State.RecordedOperands.size() && "Index out of range");
- State.RecordedOperands[StoreIdx] = &State.MIs[InsnID]->getOperand(OpIdx);
- break;
- }
+ case GIM_RecordNamedOperand: {
+ int64_t InsnID = MatchTable[CurrentIdx++];
+ int64_t OpIdx = MatchTable[CurrentIdx++];
+ uint64_t StoreIdx = MatchTable[CurrentIdx++];
+
+ DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
+ dbgs() << CurrentIdx << ": GIM_RecordNamedOperand(MIs["
+ << InsnID << "]->getOperand(" << OpIdx
+ << "), StoreIdx=" << StoreIdx << ")\n");
+ assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
+ assert(StoreIdx < State.RecordedOperands.size() && "Index out of range");
+ State.RecordedOperands[StoreIdx] = &State.MIs[InsnID]->getOperand(OpIdx);
+ break;
+ }
case GIM_CheckRegBankForClass: {
int64_t InsnID = MatchTable[CurrentIdx++];
int64_t OpIdx = MatchTable[CurrentIdx++];
@@ -1065,12 +1065,12 @@ bool InstructionSelector::executeMatchTable(
int64_t OpIdx = MatchTable[CurrentIdx++];
int64_t RCEnum = MatchTable[CurrentIdx++];
assert(OutMIs[InsnID] && "Attempted to add to undefined instruction");
- MachineInstr &I = *OutMIs[InsnID].getInstr();
- MachineFunction &MF = *I.getParent()->getParent();
- MachineRegisterInfo &MRI = MF.getRegInfo();
- const TargetRegisterClass &RC = *TRI.getRegClass(RCEnum);
- MachineOperand &MO = I.getOperand(OpIdx);
- constrainOperandRegClass(MF, TRI, MRI, TII, RBI, I, RC, MO);
+ MachineInstr &I = *OutMIs[InsnID].getInstr();
+ MachineFunction &MF = *I.getParent()->getParent();
+ MachineRegisterInfo &MRI = MF.getRegInfo();
+ const TargetRegisterClass &RC = *TRI.getRegClass(RCEnum);
+ MachineOperand &MO = I.getOperand(OpIdx);
+ constrainOperandRegClass(MF, TRI, MRI, TII, RBI, I, RC, MO);
DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
dbgs() << CurrentIdx << ": GIR_ConstrainOperandRC(OutMIs["
<< InsnID << "], " << OpIdx << ", " << RCEnum
diff --git a/contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel/LegalizationArtifactCombiner.h b/contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel/LegalizationArtifactCombiner.h
index 0d476efb2c..9bed6d039e 100644
--- a/contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel/LegalizationArtifactCombiner.h
+++ b/contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel/LegalizationArtifactCombiner.h
@@ -112,23 +112,23 @@ public:
Register SrcReg = lookThroughCopyInstrs(MI.getOperand(1).getReg());
// zext(trunc x) - > and (aext/copy/trunc x), mask
- // zext(sext x) -> and (sext x), mask
+ // zext(sext x) -> and (sext x), mask
Register TruncSrc;
- Register SextSrc;
- if (mi_match(SrcReg, MRI, m_GTrunc(m_Reg(TruncSrc))) ||
- mi_match(SrcReg, MRI, m_GSExt(m_Reg(SextSrc)))) {
+ Register SextSrc;
+ if (mi_match(SrcReg, MRI, m_GTrunc(m_Reg(TruncSrc))) ||
+ mi_match(SrcReg, MRI, m_GSExt(m_Reg(SextSrc)))) {
LLT DstTy = MRI.getType(DstReg);
if (isInstUnsupported({TargetOpcode::G_AND, {DstTy}}) ||
isConstantUnsupported(DstTy))
return false;
LLVM_DEBUG(dbgs() << ".. Combine MI: " << MI;);
LLT SrcTy = MRI.getType(SrcReg);
- APInt MaskVal = APInt::getAllOnesValue(SrcTy.getScalarSizeInBits());
- auto Mask = Builder.buildConstant(
- DstTy, MaskVal.zext(DstTy.getScalarSizeInBits()));
- auto Extended = SextSrc ? Builder.buildSExtOrTrunc(DstTy, SextSrc) :
- Builder.buildAnyExtOrTrunc(DstTy, TruncSrc);
- Builder.buildAnd(DstReg, Extended, Mask);
+ APInt MaskVal = APInt::getAllOnesValue(SrcTy.getScalarSizeInBits());
+ auto Mask = Builder.buildConstant(
+ DstTy, MaskVal.zext(DstTy.getScalarSizeInBits()));
+ auto Extended = SextSrc ? Builder.buildSExtOrTrunc(DstTy, SextSrc) :
+ Builder.buildAnyExtOrTrunc(DstTy, TruncSrc);
+ Builder.buildAnd(DstReg, Extended, Mask);
markInstAndDefDead(MI, *MRI.getVRegDef(SrcReg), DeadInsts);
return true;
}
@@ -493,7 +493,7 @@ public:
MachineRegisterInfo &MRI,
MachineIRBuilder &Builder,
SmallVectorImpl<Register> &UpdatedDefs,
- GISelChangeObserver &Observer) {
+ GISelChangeObserver &Observer) {
if (!llvm::canReplaceReg(DstReg, SrcReg, MRI)) {
Builder.buildCopy(DstReg, SrcReg);
UpdatedDefs.push_back(DstReg);
@@ -513,78 +513,78 @@ public:
Observer.changedInstr(*UseMI);
}
- /// Return the operand index in \p MI that defines \p Def
- static unsigned getDefIndex(const MachineInstr &MI, Register SearchDef) {
- unsigned DefIdx = 0;
- for (const MachineOperand &Def : MI.defs()) {
- if (Def.getReg() == SearchDef)
- break;
- ++DefIdx;
- }
-
- return DefIdx;
- }
-
- bool tryCombineUnmergeValues(MachineInstr &MI,
- SmallVectorImpl<MachineInstr *> &DeadInsts,
- SmallVectorImpl<Register> &UpdatedDefs,
- GISelChangeObserver &Observer) {
+ /// Return the operand index in \p MI that defines \p Def
+ static unsigned getDefIndex(const MachineInstr &MI, Register SearchDef) {
+ unsigned DefIdx = 0;
+ for (const MachineOperand &Def : MI.defs()) {
+ if (Def.getReg() == SearchDef)
+ break;
+ ++DefIdx;
+ }
+
+ return DefIdx;
+ }
+
+ bool tryCombineUnmergeValues(MachineInstr &MI,
+ SmallVectorImpl<MachineInstr *> &DeadInsts,
+ SmallVectorImpl<Register> &UpdatedDefs,
+ GISelChangeObserver &Observer) {
assert(MI.getOpcode() == TargetOpcode::G_UNMERGE_VALUES);
unsigned NumDefs = MI.getNumOperands() - 1;
- Register SrcReg = MI.getOperand(NumDefs).getReg();
- MachineInstr *SrcDef = getDefIgnoringCopies(SrcReg, MRI);
+ Register SrcReg = MI.getOperand(NumDefs).getReg();
+ MachineInstr *SrcDef = getDefIgnoringCopies(SrcReg, MRI);
if (!SrcDef)
return false;
LLT OpTy = MRI.getType(MI.getOperand(NumDefs).getReg());
LLT DestTy = MRI.getType(MI.getOperand(0).getReg());
-
- if (SrcDef->getOpcode() == TargetOpcode::G_UNMERGE_VALUES) {
- // %0:_(<4 x s16>) = G_FOO
- // %1:_(<2 x s16>), %2:_(<2 x s16>) = G_UNMERGE_VALUES %0
- // %3:_(s16), %4:_(s16) = G_UNMERGE_VALUES %1
- //
- // %3:_(s16), %4:_(s16), %5:_(s16), %6:_(s16) = G_UNMERGE_VALUES %0
- const unsigned NumSrcOps = SrcDef->getNumOperands();
- Register SrcUnmergeSrc = SrcDef->getOperand(NumSrcOps - 1).getReg();
- LLT SrcUnmergeSrcTy = MRI.getType(SrcUnmergeSrc);
-
- // If we need to decrease the number of vector elements in the result type
- // of an unmerge, this would involve the creation of an equivalent unmerge
- // to copy back to the original result registers.
- LegalizeActionStep ActionStep = LI.getAction(
- {TargetOpcode::G_UNMERGE_VALUES, {OpTy, SrcUnmergeSrcTy}});
- switch (ActionStep.Action) {
- case LegalizeActions::Lower:
- case LegalizeActions::Unsupported:
- break;
- case LegalizeActions::FewerElements:
- case LegalizeActions::NarrowScalar:
- if (ActionStep.TypeIdx == 1)
- return false;
- break;
- default:
- return false;
- }
-
- Builder.setInstrAndDebugLoc(MI);
- auto NewUnmerge = Builder.buildUnmerge(DestTy, SrcUnmergeSrc);
-
- // TODO: Should we try to process out the other defs now? If the other
- // defs of the source unmerge are also unmerged, we end up with a separate
- // unmerge for each one.
- unsigned SrcDefIdx = getDefIndex(*SrcDef, SrcReg);
- for (unsigned I = 0; I != NumDefs; ++I) {
- Register Def = MI.getOperand(I).getReg();
- replaceRegOrBuildCopy(Def, NewUnmerge.getReg(SrcDefIdx * NumDefs + I),
- MRI, Builder, UpdatedDefs, Observer);
- }
-
- markInstAndDefDead(MI, *SrcDef, DeadInsts, SrcDefIdx);
- return true;
- }
-
+
+ if (SrcDef->getOpcode() == TargetOpcode::G_UNMERGE_VALUES) {
+ // %0:_(<4 x s16>) = G_FOO
+ // %1:_(<2 x s16>), %2:_(<2 x s16>) = G_UNMERGE_VALUES %0
+ // %3:_(s16), %4:_(s16) = G_UNMERGE_VALUES %1
+ //
+ // %3:_(s16), %4:_(s16), %5:_(s16), %6:_(s16) = G_UNMERGE_VALUES %0
+ const unsigned NumSrcOps = SrcDef->getNumOperands();
+ Register SrcUnmergeSrc = SrcDef->getOperand(NumSrcOps - 1).getReg();
+ LLT SrcUnmergeSrcTy = MRI.getType(SrcUnmergeSrc);
+
+ // If we need to decrease the number of vector elements in the result type
+ // of an unmerge, this would involve the creation of an equivalent unmerge
+ // to copy back to the original result registers.
+ LegalizeActionStep ActionStep = LI.getAction(
+ {TargetOpcode::G_UNMERGE_VALUES, {OpTy, SrcUnmergeSrcTy}});
+ switch (ActionStep.Action) {
+ case LegalizeActions::Lower:
+ case LegalizeActions::Unsupported:
+ break;
+ case LegalizeActions::FewerElements:
+ case LegalizeActions::NarrowScalar:
+ if (ActionStep.TypeIdx == 1)
+ return false;
+ break;
+ default:
+ return false;
+ }
+
+ Builder.setInstrAndDebugLoc(MI);
+ auto NewUnmerge = Builder.buildUnmerge(DestTy, SrcUnmergeSrc);
+
+ // TODO: Should we try to process out the other defs now? If the other
+ // defs of the source unmerge are also unmerged, we end up with a separate
+ // unmerge for each one.
+ unsigned SrcDefIdx = getDefIndex(*SrcDef, SrcReg);
+ for (unsigned I = 0; I != NumDefs; ++I) {
+ Register Def = MI.getOperand(I).getReg();
+ replaceRegOrBuildCopy(Def, NewUnmerge.getReg(SrcDefIdx * NumDefs + I),
+ MRI, Builder, UpdatedDefs, Observer);
+ }
+
+ markInstAndDefDead(MI, *SrcDef, DeadInsts, SrcDefIdx);
+ return true;
+ }
+
MachineInstr *MergeI = SrcDef;
unsigned ConvertOp = 0;
@@ -812,12 +812,12 @@ public:
Changed = tryCombineSExt(MI, DeadInsts, UpdatedDefs);
break;
case TargetOpcode::G_UNMERGE_VALUES:
- Changed =
- tryCombineUnmergeValues(MI, DeadInsts, UpdatedDefs, WrapperObserver);
+ Changed =
+ tryCombineUnmergeValues(MI, DeadInsts, UpdatedDefs, WrapperObserver);
break;
case TargetOpcode::G_MERGE_VALUES:
- case TargetOpcode::G_BUILD_VECTOR:
- case TargetOpcode::G_CONCAT_VECTORS:
+ case TargetOpcode::G_BUILD_VECTOR:
+ case TargetOpcode::G_CONCAT_VECTORS:
// If any of the users of this merge are an unmerge, then add them to the
// artifact worklist in case there's folding that can be done looking up.
for (MachineInstr &U : MRI.use_instructions(MI.getOperand(0).getReg())) {
@@ -901,8 +901,8 @@ private:
/// dead.
/// MI is not marked dead.
void markDefDead(MachineInstr &MI, MachineInstr &DefMI,
- SmallVectorImpl<MachineInstr *> &DeadInsts,
- unsigned DefIdx = 0) {
+ SmallVectorImpl<MachineInstr *> &DeadInsts,
+ unsigned DefIdx = 0) {
// Collect all the copy instructions that are made dead, due to deleting
// this instruction. Collect all of them until the Trunc(DefMI).
// Eg,
@@ -929,27 +929,27 @@ private:
break;
PrevMI = TmpDef;
}
-
- if (PrevMI == &DefMI) {
- unsigned I = 0;
- bool IsDead = true;
- for (MachineOperand &Def : DefMI.defs()) {
- if (I != DefIdx) {
- if (!MRI.use_empty(Def.getReg())) {
- IsDead = false;
- break;
- }
- } else {
- if (!MRI.hasOneUse(DefMI.getOperand(DefIdx).getReg()))
- break;
- }
-
- ++I;
- }
-
- if (IsDead)
- DeadInsts.push_back(&DefMI);
- }
+
+ if (PrevMI == &DefMI) {
+ unsigned I = 0;
+ bool IsDead = true;
+ for (MachineOperand &Def : DefMI.defs()) {
+ if (I != DefIdx) {
+ if (!MRI.use_empty(Def.getReg())) {
+ IsDead = false;
+ break;
+ }
+ } else {
+ if (!MRI.hasOneUse(DefMI.getOperand(DefIdx).getReg()))
+ break;
+ }
+
+ ++I;
+ }
+
+ if (IsDead)
+ DeadInsts.push_back(&DefMI);
+ }
}
/// Mark MI as dead. If a def of one of MI's operands, DefMI, would also be
@@ -958,10 +958,10 @@ private:
/// copies in between the extends and the truncs, and this attempts to collect
/// the in between copies if they're dead.
void markInstAndDefDead(MachineInstr &MI, MachineInstr &DefMI,
- SmallVectorImpl<MachineInstr *> &DeadInsts,
- unsigned DefIdx = 0) {
+ SmallVectorImpl<MachineInstr *> &DeadInsts,
+ unsigned DefIdx = 0) {
DeadInsts.push_back(&MI);
- markDefDead(MI, DefMI, DeadInsts, DefIdx);
+ markDefDead(MI, DefMI, DeadInsts, DefIdx);
}
/// Erase the dead instructions in the list and call the observer hooks.
diff --git a/contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel/LegalizerHelper.h b/contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel/LegalizerHelper.h
index 050df84035..bf45c5e673 100644
--- a/contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel/LegalizerHelper.h
+++ b/contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel/LegalizerHelper.h
@@ -39,7 +39,7 @@ class LegalizerInfo;
class Legalizer;
class MachineRegisterInfo;
class GISelChangeObserver;
-class TargetLowering;
+class TargetLowering;
class LegalizerHelper {
public:
@@ -53,7 +53,7 @@ public:
private:
MachineRegisterInfo &MRI;
const LegalizerInfo &LI;
- const TargetLowering &TLI;
+ const TargetLowering &TLI;
public:
enum LegalizeResult {
@@ -71,7 +71,7 @@ public:
/// Expose LegalizerInfo so the clients can re-use.
const LegalizerInfo &getLegalizerInfo() const { return LI; }
- const TargetLowering &getTargetLowering() const { return TLI; }
+ const TargetLowering &getTargetLowering() const { return TLI; }
LegalizerHelper(MachineFunction &MF, GISelChangeObserver &Observer,
MachineIRBuilder &B);
@@ -164,10 +164,10 @@ public:
/// def by inserting a G_BITCAST from \p CastTy
void bitcastDst(MachineInstr &MI, LLT CastTy, unsigned OpIdx);
- /// Widen \p OrigReg to \p WideTy by merging to a wider type, padding with
- /// G_IMPLICIT_DEF, and producing dead results.
- Register widenWithUnmerge(LLT WideTy, Register OrigReg);
-
+ /// Widen \p OrigReg to \p WideTy by merging to a wider type, padding with
+ /// G_IMPLICIT_DEF, and producing dead results.
+ Register widenWithUnmerge(LLT WideTy, Register OrigReg);
+
private:
LegalizeResult
widenScalarMergeValues(MachineInstr &MI, unsigned TypeIdx, LLT WideTy);
@@ -177,10 +177,10 @@ private:
widenScalarExtract(MachineInstr &MI, unsigned TypeIdx, LLT WideTy);
LegalizeResult
widenScalarInsert(MachineInstr &MI, unsigned TypeIdx, LLT WideTy);
- LegalizeResult widenScalarAddoSubo(MachineInstr &MI, unsigned TypeIdx,
- LLT WideTy);
- LegalizeResult widenScalarAddSubShlSat(MachineInstr &MI, unsigned TypeIdx,
- LLT WideTy);
+ LegalizeResult widenScalarAddoSubo(MachineInstr &MI, unsigned TypeIdx,
+ LLT WideTy);
+ LegalizeResult widenScalarAddSubShlSat(MachineInstr &MI, unsigned TypeIdx,
+ LLT WideTy);
/// Helper function to split a wide generic register into bitwise blocks with
/// the given Type (which implies the number of blocks needed). The generic
@@ -207,19 +207,19 @@ private:
LLT PartTy, ArrayRef<Register> PartRegs,
LLT LeftoverTy = LLT(), ArrayRef<Register> LeftoverRegs = {});
- /// Unmerge \p SrcReg into smaller sized values, and append them to \p
- /// Parts. The elements of \p Parts will be the greatest common divisor type
- /// of \p DstTy, \p NarrowTy and the type of \p SrcReg. This will compute and
- /// return the GCD type.
+ /// Unmerge \p SrcReg into smaller sized values, and append them to \p
+ /// Parts. The elements of \p Parts will be the greatest common divisor type
+ /// of \p DstTy, \p NarrowTy and the type of \p SrcReg. This will compute and
+ /// return the GCD type.
LLT extractGCDType(SmallVectorImpl<Register> &Parts, LLT DstTy,
LLT NarrowTy, Register SrcReg);
- /// Unmerge \p SrcReg into \p GCDTy typed registers. This will append all of
- /// the unpacked registers to \p Parts. This version is if the common unmerge
- /// type is already known.
- void extractGCDType(SmallVectorImpl<Register> &Parts, LLT GCDTy,
- Register SrcReg);
-
+ /// Unmerge \p SrcReg into \p GCDTy typed registers. This will append all of
+ /// the unpacked registers to \p Parts. This version is if the common unmerge
+ /// type is already known.
+ void extractGCDType(SmallVectorImpl<Register> &Parts, LLT GCDTy,
+ Register SrcReg);
+
/// Produce a merge of values in \p VRegs to define \p DstReg. Perform a merge
/// from the least common multiple type, and convert as appropriate to \p
/// DstReg.
@@ -252,23 +252,23 @@ private:
ArrayRef<Register> Src1Regs,
ArrayRef<Register> Src2Regs, LLT NarrowTy);
- void changeOpcode(MachineInstr &MI, unsigned NewOpcode);
-
+ void changeOpcode(MachineInstr &MI, unsigned NewOpcode);
+
public:
- /// Return the alignment to use for a stack temporary object with the given
- /// type.
- Align getStackTemporaryAlignment(LLT Type, Align MinAlign = Align()) const;
-
- /// Create a stack temporary based on the size in bytes and the alignment
- MachineInstrBuilder createStackTemporary(TypeSize Bytes, Align Alignment,
- MachinePointerInfo &PtrInfo);
-
- /// Get a pointer to vector element \p Index located in memory for a vector of
- /// type \p VecTy starting at a base address of \p VecPtr. If \p Index is out
- /// of bounds the returned pointer is unspecified, but will be within the
- /// vector bounds.
- Register getVectorElementPointer(Register VecPtr, LLT VecTy, Register Index);
-
+ /// Return the alignment to use for a stack temporary object with the given
+ /// type.
+ Align getStackTemporaryAlignment(LLT Type, Align MinAlign = Align()) const;
+
+ /// Create a stack temporary based on the size in bytes and the alignment
+ MachineInstrBuilder createStackTemporary(TypeSize Bytes, Align Alignment,
+ MachinePointerInfo &PtrInfo);
+
+ /// Get a pointer to vector element \p Index located in memory for a vector of
+ /// type \p VecTy starting at a base address of \p VecPtr. If \p Index is out
+ /// of bounds the returned pointer is unspecified, but will be within the
+ /// vector bounds.
+ Register getVectorElementPointer(Register VecPtr, LLT VecTy, Register Index);
+
LegalizeResult fewerElementsVectorImplicitDef(MachineInstr &MI,
unsigned TypeIdx, LLT NarrowTy);
@@ -296,11 +296,11 @@ public:
LegalizeResult fewerElementsVectorUnmergeValues(MachineInstr &MI,
unsigned TypeIdx,
LLT NarrowTy);
- LegalizeResult fewerElementsVectorMerge(MachineInstr &MI, unsigned TypeIdx,
- LLT NarrowTy);
- LegalizeResult fewerElementsVectorExtractInsertVectorElt(MachineInstr &MI,
- unsigned TypeIdx,
- LLT NarrowTy);
+ LegalizeResult fewerElementsVectorMerge(MachineInstr &MI, unsigned TypeIdx,
+ LLT NarrowTy);
+ LegalizeResult fewerElementsVectorExtractInsertVectorElt(MachineInstr &MI,
+ unsigned TypeIdx,
+ LLT NarrowTy);
LegalizeResult
reduceLoadStoreWidth(MachineInstr &MI, unsigned TypeIdx, LLT NarrowTy);
@@ -323,7 +323,7 @@ public:
LegalizeResult narrowScalarShift(MachineInstr &MI, unsigned TypeIdx, LLT Ty);
LegalizeResult narrowScalarMul(MachineInstr &MI, LLT Ty);
- LegalizeResult narrowScalarFPTOI(MachineInstr &MI, unsigned TypeIdx, LLT Ty);
+ LegalizeResult narrowScalarFPTOI(MachineInstr &MI, unsigned TypeIdx, LLT Ty);
LegalizeResult narrowScalarExtract(MachineInstr &MI, unsigned TypeIdx, LLT Ty);
LegalizeResult narrowScalarInsert(MachineInstr &MI, unsigned TypeIdx, LLT Ty);
@@ -334,52 +334,52 @@ public:
LegalizeResult narrowScalarCTTZ(MachineInstr &MI, unsigned TypeIdx, LLT Ty);
LegalizeResult narrowScalarCTPOP(MachineInstr &MI, unsigned TypeIdx, LLT Ty);
- /// Perform Bitcast legalize action on G_EXTRACT_VECTOR_ELT.
- LegalizeResult bitcastExtractVectorElt(MachineInstr &MI, unsigned TypeIdx,
- LLT CastTy);
-
- /// Perform Bitcast legalize action on G_INSERT_VECTOR_ELT.
- LegalizeResult bitcastInsertVectorElt(MachineInstr &MI, unsigned TypeIdx,
- LLT CastTy);
-
+ /// Perform Bitcast legalize action on G_EXTRACT_VECTOR_ELT.
+ LegalizeResult bitcastExtractVectorElt(MachineInstr &MI, unsigned TypeIdx,
+ LLT CastTy);
+
+ /// Perform Bitcast legalize action on G_INSERT_VECTOR_ELT.
+ LegalizeResult bitcastInsertVectorElt(MachineInstr &MI, unsigned TypeIdx,
+ LLT CastTy);
+
LegalizeResult lowerBitcast(MachineInstr &MI);
- LegalizeResult lowerLoad(MachineInstr &MI);
- LegalizeResult lowerStore(MachineInstr &MI);
- LegalizeResult lowerBitCount(MachineInstr &MI);
+ LegalizeResult lowerLoad(MachineInstr &MI);
+ LegalizeResult lowerStore(MachineInstr &MI);
+ LegalizeResult lowerBitCount(MachineInstr &MI);
LegalizeResult lowerU64ToF32BitOps(MachineInstr &MI);
- LegalizeResult lowerUITOFP(MachineInstr &MI);
- LegalizeResult lowerSITOFP(MachineInstr &MI);
- LegalizeResult lowerFPTOUI(MachineInstr &MI);
+ LegalizeResult lowerUITOFP(MachineInstr &MI);
+ LegalizeResult lowerSITOFP(MachineInstr &MI);
+ LegalizeResult lowerFPTOUI(MachineInstr &MI);
LegalizeResult lowerFPTOSI(MachineInstr &MI);
LegalizeResult lowerFPTRUNC_F64_TO_F16(MachineInstr &MI);
- LegalizeResult lowerFPTRUNC(MachineInstr &MI);
- LegalizeResult lowerFPOWI(MachineInstr &MI);
+ LegalizeResult lowerFPTRUNC(MachineInstr &MI);
+ LegalizeResult lowerFPOWI(MachineInstr &MI);
- LegalizeResult lowerMinMax(MachineInstr &MI);
- LegalizeResult lowerFCopySign(MachineInstr &MI);
+ LegalizeResult lowerMinMax(MachineInstr &MI);
+ LegalizeResult lowerFCopySign(MachineInstr &MI);
LegalizeResult lowerFMinNumMaxNum(MachineInstr &MI);
LegalizeResult lowerFMad(MachineInstr &MI);
LegalizeResult lowerIntrinsicRound(MachineInstr &MI);
LegalizeResult lowerFFloor(MachineInstr &MI);
LegalizeResult lowerMergeValues(MachineInstr &MI);
LegalizeResult lowerUnmergeValues(MachineInstr &MI);
- LegalizeResult lowerExtractInsertVectorElt(MachineInstr &MI);
+ LegalizeResult lowerExtractInsertVectorElt(MachineInstr &MI);
LegalizeResult lowerShuffleVector(MachineInstr &MI);
LegalizeResult lowerDynStackAlloc(MachineInstr &MI);
LegalizeResult lowerExtract(MachineInstr &MI);
LegalizeResult lowerInsert(MachineInstr &MI);
LegalizeResult lowerSADDO_SSUBO(MachineInstr &MI);
- LegalizeResult lowerAddSubSatToMinMax(MachineInstr &MI);
- LegalizeResult lowerAddSubSatToAddoSubo(MachineInstr &MI);
- LegalizeResult lowerShlSat(MachineInstr &MI);
+ LegalizeResult lowerAddSubSatToMinMax(MachineInstr &MI);
+ LegalizeResult lowerAddSubSatToAddoSubo(MachineInstr &MI);
+ LegalizeResult lowerShlSat(MachineInstr &MI);
LegalizeResult lowerBswap(MachineInstr &MI);
LegalizeResult lowerBitreverse(MachineInstr &MI);
LegalizeResult lowerReadWriteRegister(MachineInstr &MI);
- LegalizeResult lowerSMULH_UMULH(MachineInstr &MI);
- LegalizeResult lowerSelect(MachineInstr &MI);
-
+ LegalizeResult lowerSMULH_UMULH(MachineInstr &MI);
+ LegalizeResult lowerSelect(MachineInstr &MI);
+
};
/// Helper function that creates a libcall to the given \p Name using the given
diff --git a/contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel/LegalizerInfo.h b/contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel/LegalizerInfo.h
index ff313f43c0..0ae41c1a8d 100644
--- a/contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel/LegalizerInfo.h
+++ b/contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel/LegalizerInfo.h
@@ -189,7 +189,7 @@ struct TypePairAndMemDesc {
MemSize == Other.MemSize;
}
- /// \returns true if this memory access is legal with for the access described
+ /// \returns true if this memory access is legal with for the access described
/// by \p Other (The alignment is sufficient for the size and result type).
bool isCompatible(const TypePairAndMemDesc &Other) const {
return Type0 == Other.Type0 && Type1 == Other.Type1 &&
@@ -224,19 +224,19 @@ Predicate any(Predicate P0, Predicate P1, Args... args) {
return any(any(P0, P1), args...);
}
-/// True iff the given type index is the specified type.
+/// True iff the given type index is the specified type.
LegalityPredicate typeIs(unsigned TypeIdx, LLT TypesInit);
/// True iff the given type index is one of the specified types.
LegalityPredicate typeInSet(unsigned TypeIdx,
std::initializer_list<LLT> TypesInit);
-
-/// True iff the given type index is not the specified type.
-inline LegalityPredicate typeIsNot(unsigned TypeIdx, LLT Type) {
- return [=](const LegalityQuery &Query) {
- return Query.Types[TypeIdx] != Type;
- };
-}
-
+
+/// True iff the given type index is not the specified type.
+inline LegalityPredicate typeIsNot(unsigned TypeIdx, LLT Type) {
+ return [=](const LegalityQuery &Query) {
+ return Query.Types[TypeIdx] != Type;
+ };
+}
+
/// True iff the given types for the given pair of type indexes is one of the
/// specified type pairs.
LegalityPredicate
@@ -322,11 +322,11 @@ LegalizeMutation changeElementTo(unsigned TypeIdx, unsigned FromTypeIdx);
/// Keep the same scalar or element type as the given type.
LegalizeMutation changeElementTo(unsigned TypeIdx, LLT Ty);
-/// Change the scalar size or element size to have the same scalar size as type
-/// index \p FromIndex. Unlike changeElementTo, this discards pointer types and
-/// only changes the size.
-LegalizeMutation changeElementSizeTo(unsigned TypeIdx, unsigned FromTypeIdx);
-
+/// Change the scalar size or element size to have the same scalar size as type
+/// index \p FromIndex. Unlike changeElementTo, this discards pointer types and
+/// only changes the size.
+LegalizeMutation changeElementSizeTo(unsigned TypeIdx, unsigned FromTypeIdx);
+
/// Widen the scalar type or vector element type for the given type index to the
/// next power of 2.
LegalizeMutation widenScalarOrEltToNextPow2(unsigned TypeIdx, unsigned Min = 0);
@@ -635,7 +635,7 @@ public:
/// The instruction is lowered when type index 0 is any type in the given
/// list. Keep type index 0 as the same type.
LegalizeRuleSet &lowerFor(std::initializer_list<LLT> Types) {
- return actionFor(LegalizeAction::Lower, Types);
+ return actionFor(LegalizeAction::Lower, Types);
}
/// The instruction is lowered when type index 0 is any type in the given
/// list.
@@ -646,7 +646,7 @@ public:
/// The instruction is lowered when type indexes 0 and 1 is any type pair in
/// the given list. Keep type index 0 as the same type.
LegalizeRuleSet &lowerFor(std::initializer_list<std::pair<LLT, LLT>> Types) {
- return actionFor(LegalizeAction::Lower, Types);
+ return actionFor(LegalizeAction::Lower, Types);
}
/// The instruction is lowered when type indexes 0 and 1 is any type pair in
/// the given list.
@@ -671,15 +671,15 @@ public:
Types2);
}
- /// The instruction is emitted as a library call.
- LegalizeRuleSet &libcall() {
- using namespace LegalizeMutations;
- // We have no choice but conservatively assume that predicate-less lowering
- // properly handles all type indices by design:
- markAllIdxsAsCovered();
- return actionIf(LegalizeAction::Libcall, always);
- }
-
+ /// The instruction is emitted as a library call.
+ LegalizeRuleSet &libcall() {
+ using namespace LegalizeMutations;
+ // We have no choice but conservatively assume that predicate-less lowering
+ // properly handles all type indices by design:
+ markAllIdxsAsCovered();
+ return actionIf(LegalizeAction::Libcall, always);
+ }
+
/// Like legalIf, but for the Libcall action.
LegalizeRuleSet &libcallIf(LegalityPredicate Predicate) {
// We have no choice but conservatively assume that a libcall with a
@@ -722,13 +722,13 @@ public:
markAllIdxsAsCovered();
return actionIf(LegalizeAction::NarrowScalar, Predicate, Mutation);
}
- /// Narrow the scalar, specified in mutation, when type indexes 0 and 1 is any
- /// type pair in the given list.
- LegalizeRuleSet &
- narrowScalarFor(std::initializer_list<std::pair<LLT, LLT>> Types,
- LegalizeMutation Mutation) {
- return actionFor(LegalizeAction::NarrowScalar, Types, Mutation);
- }
+ /// Narrow the scalar, specified in mutation, when type indexes 0 and 1 is any
+ /// type pair in the given list.
+ LegalizeRuleSet &
+ narrowScalarFor(std::initializer_list<std::pair<LLT, LLT>> Types,
+ LegalizeMutation Mutation) {
+ return actionFor(LegalizeAction::NarrowScalar, Types, Mutation);
+ }
/// Add more elements to reach the type selected by the mutation if the
/// predicate is true.
@@ -833,13 +833,13 @@ public:
LegalizeMutations::scalarize(TypeIdx));
}
- LegalizeRuleSet &scalarizeIf(LegalityPredicate Predicate, unsigned TypeIdx) {
- using namespace LegalityPredicates;
- return actionIf(LegalizeAction::FewerElements,
- all(Predicate, isVector(typeIdx(TypeIdx))),
- LegalizeMutations::scalarize(TypeIdx));
- }
-
+ LegalizeRuleSet &scalarizeIf(LegalityPredicate Predicate, unsigned TypeIdx) {
+ using namespace LegalityPredicates;
+ return actionIf(LegalizeAction::FewerElements,
+ all(Predicate, isVector(typeIdx(TypeIdx))),
+ LegalizeMutations::scalarize(TypeIdx));
+ }
+
/// Ensure the scalar or element is at least as wide as Ty.
LegalizeRuleSet &minScalarOrElt(unsigned TypeIdx, const LLT Ty) {
using namespace LegalityPredicates;
@@ -897,10 +897,10 @@ public:
return actionIf(
LegalizeAction::NarrowScalar,
[=](const LegalityQuery &Query) {
- const LLT QueryTy = Query.Types[TypeIdx];
- return QueryTy.isScalar() &&
- QueryTy.getSizeInBits() > Ty.getSizeInBits() &&
- Predicate(Query);
+ const LLT QueryTy = Query.Types[TypeIdx];
+ return QueryTy.isScalar() &&
+ QueryTy.getSizeInBits() > Ty.getSizeInBits() &&
+ Predicate(Query);
},
changeElementTo(typeIdx(TypeIdx), Ty));
}
@@ -926,27 +926,27 @@ public:
return Query.Types[LargeTypeIdx].getScalarSizeInBits() >
Query.Types[TypeIdx].getSizeInBits();
},
- LegalizeMutations::changeElementSizeTo(TypeIdx, LargeTypeIdx));
- }
-
- /// Narrow the scalar to match the size of another.
- LegalizeRuleSet &maxScalarSameAs(unsigned TypeIdx, unsigned NarrowTypeIdx) {
- typeIdx(TypeIdx);
- return narrowScalarIf(
+ LegalizeMutations::changeElementSizeTo(TypeIdx, LargeTypeIdx));
+ }
+
+ /// Narrow the scalar to match the size of another.
+ LegalizeRuleSet &maxScalarSameAs(unsigned TypeIdx, unsigned NarrowTypeIdx) {
+ typeIdx(TypeIdx);
+ return narrowScalarIf(
[=](const LegalityQuery &Query) {
- return Query.Types[NarrowTypeIdx].getScalarSizeInBits() <
- Query.Types[TypeIdx].getSizeInBits();
- },
- LegalizeMutations::changeElementSizeTo(TypeIdx, NarrowTypeIdx));
- }
-
- /// Change the type \p TypeIdx to have the same scalar size as type \p
- /// SameSizeIdx.
- LegalizeRuleSet &scalarSameSizeAs(unsigned TypeIdx, unsigned SameSizeIdx) {
- return minScalarSameAs(TypeIdx, SameSizeIdx)
- .maxScalarSameAs(TypeIdx, SameSizeIdx);
- }
-
+ return Query.Types[NarrowTypeIdx].getScalarSizeInBits() <
+ Query.Types[TypeIdx].getSizeInBits();
+ },
+ LegalizeMutations::changeElementSizeTo(TypeIdx, NarrowTypeIdx));
+ }
+
+ /// Change the type \p TypeIdx to have the same scalar size as type \p
+ /// SameSizeIdx.
+ LegalizeRuleSet &scalarSameSizeAs(unsigned TypeIdx, unsigned SameSizeIdx) {
+ return minScalarSameAs(TypeIdx, SameSizeIdx)
+ .maxScalarSameAs(TypeIdx, SameSizeIdx);
+ }
+
/// Conditionally widen the scalar or elt to match the size of another.
LegalizeRuleSet &minScalarEltSameAsIf(LegalityPredicate Predicate,
unsigned TypeIdx, unsigned LargeTypeIdx) {
@@ -1264,12 +1264,12 @@ public:
bool isLegal(const LegalityQuery &Query) const {
return getAction(Query).Action == LegalizeAction::Legal;
}
-
- bool isLegalOrCustom(const LegalityQuery &Query) const {
- auto Action = getAction(Query).Action;
- return Action == LegalizeAction::Legal || Action == LegalizeAction::Custom;
- }
-
+
+ bool isLegalOrCustom(const LegalityQuery &Query) const {
+ auto Action = getAction(Query).Action;
+ return Action == LegalizeAction::Legal || Action == LegalizeAction::Custom;
+ }
+
bool isLegal(const MachineInstr &MI, const MachineRegisterInfo &MRI) const;
bool isLegalOrCustom(const MachineInstr &MI,
const MachineRegisterInfo &MRI) const;
diff --git a/contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel/Localizer.h b/contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel/Localizer.h
index 65f63dc8bc..d1716931e6 100644
--- a/contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel/Localizer.h
+++ b/contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel/Localizer.h
@@ -71,11 +71,11 @@ private:
typedef SmallSetVector<MachineInstr *, 32> LocalizedSetVecT;
- /// If \p Op is a phi operand and not unique in that phi, that is,
- /// there are other operands in the phi with the same register,
- /// return true.
- bool isNonUniquePhiValue(MachineOperand &Op) const;
-
+ /// If \p Op is a phi operand and not unique in that phi, that is,
+ /// there are other operands in the phi with the same register,
+ /// return true.
+ bool isNonUniquePhiValue(MachineOperand &Op) const;
+
/// Do inter-block localization from the entry block.
bool localizeInterBlock(MachineFunction &MF,
LocalizedSetVecT &LocalizedInstrs);
diff --git a/contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel/MIPatternMatch.h b/contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel/MIPatternMatch.h
index 13085d3d44..223f61ccc5 100644
--- a/contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel/MIPatternMatch.h
+++ b/contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel/MIPatternMatch.h
@@ -46,25 +46,25 @@ inline OneUse_match<SubPat> m_OneUse(const SubPat &SP) {
return SP;
}
-template <typename SubPatternT> struct OneNonDBGUse_match {
- SubPatternT SubPat;
- OneNonDBGUse_match(const SubPatternT &SP) : SubPat(SP) {}
-
- bool match(const MachineRegisterInfo &MRI, Register Reg) {
- return MRI.hasOneNonDBGUse(Reg) && SubPat.match(MRI, Reg);
- }
-};
-
-template <typename SubPat>
-inline OneNonDBGUse_match<SubPat> m_OneNonDBGUse(const SubPat &SP) {
- return SP;
-}
-
+template <typename SubPatternT> struct OneNonDBGUse_match {
+ SubPatternT SubPat;
+ OneNonDBGUse_match(const SubPatternT &SP) : SubPat(SP) {}
+
+ bool match(const MachineRegisterInfo &MRI, Register Reg) {
+ return MRI.hasOneNonDBGUse(Reg) && SubPat.match(MRI, Reg);
+ }
+};
+
+template <typename SubPat>
+inline OneNonDBGUse_match<SubPat> m_OneNonDBGUse(const SubPat &SP) {
+ return SP;
+}
+
struct ConstantMatch {
int64_t &CR;
ConstantMatch(int64_t &C) : CR(C) {}
bool match(const MachineRegisterInfo &MRI, Register Reg) {
- if (auto MaybeCst = getConstantVRegSExtVal(Reg, MRI)) {
+ if (auto MaybeCst = getConstantVRegSExtVal(Reg, MRI)) {
CR = *MaybeCst;
return true;
}
@@ -74,29 +74,29 @@ struct ConstantMatch {
inline ConstantMatch m_ICst(int64_t &Cst) { return ConstantMatch(Cst); }
-/// Matcher for a specific constant value.
-struct SpecificConstantMatch {
- int64_t RequestedVal;
- SpecificConstantMatch(int64_t RequestedVal) : RequestedVal(RequestedVal) {}
- bool match(const MachineRegisterInfo &MRI, Register Reg) {
- int64_t MatchedVal;
- return mi_match(Reg, MRI, m_ICst(MatchedVal)) && MatchedVal == RequestedVal;
- }
-};
-
-/// Matches a constant equal to \p RequestedValue.
-inline SpecificConstantMatch m_SpecificICst(int64_t RequestedValue) {
- return SpecificConstantMatch(RequestedValue);
-}
-
-///{
-/// Convenience matchers for specific integer values.
-inline SpecificConstantMatch m_ZeroInt() { return SpecificConstantMatch(0); }
-inline SpecificConstantMatch m_AllOnesInt() {
- return SpecificConstantMatch(-1);
-}
-///}
-
+/// Matcher for a specific constant value.
+struct SpecificConstantMatch {
+ int64_t RequestedVal;
+ SpecificConstantMatch(int64_t RequestedVal) : RequestedVal(RequestedVal) {}
+ bool match(const MachineRegisterInfo &MRI, Register Reg) {
+ int64_t MatchedVal;
+ return mi_match(Reg, MRI, m_ICst(MatchedVal)) && MatchedVal == RequestedVal;
+ }
+};
+
+/// Matches a constant equal to \p RequestedValue.
+inline SpecificConstantMatch m_SpecificICst(int64_t RequestedValue) {
+ return SpecificConstantMatch(RequestedValue);
+}
+
+///{
+/// Convenience matchers for specific integer values.
+inline SpecificConstantMatch m_ZeroInt() { return SpecificConstantMatch(0); }
+inline SpecificConstantMatch m_AllOnesInt() {
+ return SpecificConstantMatch(-1);
+}
+///}
+
// TODO: Rework this for different kinds of MachineOperand.
// Currently assumes the Src for a match is a register.
// We might want to support taking in some MachineOperands and call getReg on
@@ -242,12 +242,12 @@ m_GAdd(const LHS &L, const RHS &R) {
}
template <typename LHS, typename RHS>
-inline BinaryOp_match<LHS, RHS, TargetOpcode::G_PTR_ADD, true>
-m_GPtrAdd(const LHS &L, const RHS &R) {
- return BinaryOp_match<LHS, RHS, TargetOpcode::G_PTR_ADD, true>(L, R);
-}
-
-template <typename LHS, typename RHS>
+inline BinaryOp_match<LHS, RHS, TargetOpcode::G_PTR_ADD, true>
+m_GPtrAdd(const LHS &L, const RHS &R) {
+ return BinaryOp_match<LHS, RHS, TargetOpcode::G_PTR_ADD, true>(L, R);
+}
+
+template <typename LHS, typename RHS>
inline BinaryOp_match<LHS, RHS, TargetOpcode::G_SUB> m_GSub(const LHS &L,
const RHS &R) {
return BinaryOp_match<LHS, RHS, TargetOpcode::G_SUB>(L, R);
@@ -284,12 +284,12 @@ m_GAnd(const LHS &L, const RHS &R) {
}
template <typename LHS, typename RHS>
-inline BinaryOp_match<LHS, RHS, TargetOpcode::G_XOR, true>
-m_GXor(const LHS &L, const RHS &R) {
- return BinaryOp_match<LHS, RHS, TargetOpcode::G_XOR, true>(L, R);
-}
-
-template <typename LHS, typename RHS>
+inline BinaryOp_match<LHS, RHS, TargetOpcode::G_XOR, true>
+m_GXor(const LHS &L, const RHS &R) {
+ return BinaryOp_match<LHS, RHS, TargetOpcode::G_XOR, true>(L, R);
+}
+
+template <typename LHS, typename RHS>
inline BinaryOp_match<LHS, RHS, TargetOpcode::G_OR, true> m_GOr(const LHS &L,
const RHS &R) {
return BinaryOp_match<LHS, RHS, TargetOpcode::G_OR, true>(L, R);
@@ -307,12 +307,12 @@ m_GLShr(const LHS &L, const RHS &R) {
return BinaryOp_match<LHS, RHS, TargetOpcode::G_LSHR, false>(L, R);
}
-template <typename LHS, typename RHS>
-inline BinaryOp_match<LHS, RHS, TargetOpcode::G_ASHR, false>
-m_GAShr(const LHS &L, const RHS &R) {
- return BinaryOp_match<LHS, RHS, TargetOpcode::G_ASHR, false>(L, R);
-}
-
+template <typename LHS, typename RHS>
+inline BinaryOp_match<LHS, RHS, TargetOpcode::G_ASHR, false>
+m_GAShr(const LHS &L, const RHS &R) {
+ return BinaryOp_match<LHS, RHS, TargetOpcode::G_ASHR, false>(L, R);
+}
+
// Helper for unary instructions (G_[ZSA]EXT/G_TRUNC) etc
template <typename SrcTy, unsigned Opcode> struct UnaryOp_match {
SrcTy L;
@@ -446,51 +446,51 @@ struct CheckType {
inline CheckType m_SpecificType(LLT Ty) { return Ty; }
-template <typename Src0Ty, typename Src1Ty, typename Src2Ty, unsigned Opcode>
-struct TernaryOp_match {
- Src0Ty Src0;
- Src1Ty Src1;
- Src2Ty Src2;
-
- TernaryOp_match(const Src0Ty &Src0, const Src1Ty &Src1, const Src2Ty &Src2)
- : Src0(Src0), Src1(Src1), Src2(Src2) {}
- template <typename OpTy>
- bool match(const MachineRegisterInfo &MRI, OpTy &&Op) {
- MachineInstr *TmpMI;
- if (mi_match(Op, MRI, m_MInstr(TmpMI))) {
- if (TmpMI->getOpcode() == Opcode && TmpMI->getNumOperands() == 4) {
- return (Src0.match(MRI, TmpMI->getOperand(1).getReg()) &&
- Src1.match(MRI, TmpMI->getOperand(2).getReg()) &&
- Src2.match(MRI, TmpMI->getOperand(3).getReg()));
- }
- }
- return false;
- }
-};
-template <typename Src0Ty, typename Src1Ty, typename Src2Ty>
-inline TernaryOp_match<Src0Ty, Src1Ty, Src2Ty,
- TargetOpcode::G_INSERT_VECTOR_ELT>
-m_GInsertVecElt(const Src0Ty &Src0, const Src1Ty &Src1, const Src2Ty &Src2) {
- return TernaryOp_match<Src0Ty, Src1Ty, Src2Ty,
- TargetOpcode::G_INSERT_VECTOR_ELT>(Src0, Src1, Src2);
-}
-
-/// Matches a register negated by a G_SUB.
-/// G_SUB 0, %negated_reg
-template <typename SrcTy>
-inline BinaryOp_match<SpecificConstantMatch, SrcTy, TargetOpcode::G_SUB>
-m_Neg(const SrcTy &&Src) {
- return m_GSub(m_ZeroInt(), Src);
-}
-
-/// Matches a register not-ed by a G_XOR.
-/// G_XOR %not_reg, -1
-template <typename SrcTy>
-inline BinaryOp_match<SrcTy, SpecificConstantMatch, TargetOpcode::G_XOR, true>
-m_Not(const SrcTy &&Src) {
- return m_GXor(Src, m_AllOnesInt());
-}
-
+template <typename Src0Ty, typename Src1Ty, typename Src2Ty, unsigned Opcode>
+struct TernaryOp_match {
+ Src0Ty Src0;
+ Src1Ty Src1;
+ Src2Ty Src2;
+
+ TernaryOp_match(const Src0Ty &Src0, const Src1Ty &Src1, const Src2Ty &Src2)
+ : Src0(Src0), Src1(Src1), Src2(Src2) {}
+ template <typename OpTy>
+ bool match(const MachineRegisterInfo &MRI, OpTy &&Op) {
+ MachineInstr *TmpMI;
+ if (mi_match(Op, MRI, m_MInstr(TmpMI))) {
+ if (TmpMI->getOpcode() == Opcode && TmpMI->getNumOperands() == 4) {
+ return (Src0.match(MRI, TmpMI->getOperand(1).getReg()) &&
+ Src1.match(MRI, TmpMI->getOperand(2).getReg()) &&
+ Src2.match(MRI, TmpMI->getOperand(3).getReg()));
+ }
+ }
+ return false;
+ }
+};
+template <typename Src0Ty, typename Src1Ty, typename Src2Ty>
+inline TernaryOp_match<Src0Ty, Src1Ty, Src2Ty,
+ TargetOpcode::G_INSERT_VECTOR_ELT>
+m_GInsertVecElt(const Src0Ty &Src0, const Src1Ty &Src1, const Src2Ty &Src2) {
+ return TernaryOp_match<Src0Ty, Src1Ty, Src2Ty,
+ TargetOpcode::G_INSERT_VECTOR_ELT>(Src0, Src1, Src2);
+}
+
+/// Matches a register negated by a G_SUB.
+/// G_SUB 0, %negated_reg
+template <typename SrcTy>
+inline BinaryOp_match<SpecificConstantMatch, SrcTy, TargetOpcode::G_SUB>
+m_Neg(const SrcTy &&Src) {
+ return m_GSub(m_ZeroInt(), Src);
+}
+
+/// Matches a register not-ed by a G_XOR.
+/// G_XOR %not_reg, -1
+template <typename SrcTy>
+inline BinaryOp_match<SrcTy, SpecificConstantMatch, TargetOpcode::G_XOR, true>
+m_Not(const SrcTy &&Src) {
+ return m_GXor(Src, m_AllOnesInt());
+}
+
} // namespace GMIPatternMatch
} // namespace llvm
diff --git a/contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel/MachineIRBuilder.h b/contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel/MachineIRBuilder.h
index 324d80e16c..6e3f7cdc26 100644
--- a/contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel/MachineIRBuilder.h
+++ b/contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel/MachineIRBuilder.h
@@ -25,10 +25,10 @@
#include "llvm/CodeGen/MachineBasicBlock.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
-#include "llvm/CodeGen/TargetOpcodes.h"
+#include "llvm/CodeGen/TargetOpcodes.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/DebugLoc.h"
-#include "llvm/IR/Module.h"
+#include "llvm/IR/Module.h"
namespace llvm {
@@ -231,7 +231,7 @@ class MachineIRBuilder {
protected:
void validateTruncExt(const LLT Dst, const LLT Src, bool IsExtend);
- void validateUnaryOp(const LLT Res, const LLT Op0);
+ void validateUnaryOp(const LLT Res, const LLT Op0);
void validateBinaryOp(const LLT Res, const LLT Op0, const LLT Op1);
void validateShiftOp(const LLT Res, const LLT Op0, const LLT Op1);
@@ -259,11 +259,11 @@ public:
setDebugLoc(MI.getDebugLoc());
}
- MachineIRBuilder(MachineInstr &MI, GISelChangeObserver &Observer) :
- MachineIRBuilder(MI) {
- setChangeObserver(Observer);
- }
-
+ MachineIRBuilder(MachineInstr &MI, GISelChangeObserver &Observer) :
+ MachineIRBuilder(MI) {
+ setChangeObserver(Observer);
+ }
+
virtual ~MachineIRBuilder() = default;
MachineIRBuilder(const MachineIRBuilderState &BState) : State(BState) {}
@@ -743,7 +743,7 @@ public:
/// depend on bit 0 (for now).
///
/// \return The newly created instruction.
- MachineInstrBuilder buildBrCond(const SrcOp &Tst, MachineBasicBlock &Dest);
+ MachineInstrBuilder buildBrCond(const SrcOp &Tst, MachineBasicBlock &Dest);
/// Build and insert G_BRINDIRECT \p Tgt
///
@@ -827,18 +827,18 @@ public:
///
/// \return a MachineInstrBuilder for the newly created instruction.
MachineInstrBuilder buildLoad(const DstOp &Res, const SrcOp &Addr,
- MachineMemOperand &MMO) {
- return buildLoadInstr(TargetOpcode::G_LOAD, Res, Addr, MMO);
- }
-
- /// Build and insert a G_LOAD instruction, while constructing the
- /// MachineMemOperand.
- MachineInstrBuilder
- buildLoad(const DstOp &Res, const SrcOp &Addr, MachinePointerInfo PtrInfo,
- Align Alignment,
- MachineMemOperand::Flags MMOFlags = MachineMemOperand::MONone,
- const AAMDNodes &AAInfo = AAMDNodes());
-
+ MachineMemOperand &MMO) {
+ return buildLoadInstr(TargetOpcode::G_LOAD, Res, Addr, MMO);
+ }
+
+ /// Build and insert a G_LOAD instruction, while constructing the
+ /// MachineMemOperand.
+ MachineInstrBuilder
+ buildLoad(const DstOp &Res, const SrcOp &Addr, MachinePointerInfo PtrInfo,
+ Align Alignment,
+ MachineMemOperand::Flags MMOFlags = MachineMemOperand::MONone,
+ const AAMDNodes &AAInfo = AAMDNodes());
+
/// Build and insert `Res = <opcode> Addr, MMO`.
///
/// Loads the value stored at \p Addr. Puts the result in \p Res.
@@ -871,14 +871,14 @@ public:
MachineInstrBuilder buildStore(const SrcOp &Val, const SrcOp &Addr,
MachineMemOperand &MMO);
- /// Build and insert a G_STORE instruction, while constructing the
- /// MachineMemOperand.
- MachineInstrBuilder
- buildStore(const SrcOp &Val, const SrcOp &Addr, MachinePointerInfo PtrInfo,
- Align Alignment,
- MachineMemOperand::Flags MMOFlags = MachineMemOperand::MONone,
- const AAMDNodes &AAInfo = AAMDNodes());
-
+ /// Build and insert a G_STORE instruction, while constructing the
+ /// MachineMemOperand.
+ MachineInstrBuilder
+ buildStore(const SrcOp &Val, const SrcOp &Addr, MachinePointerInfo PtrInfo,
+ Align Alignment,
+ MachineMemOperand::Flags MMOFlags = MachineMemOperand::MONone,
+ const AAMDNodes &AAInfo = AAMDNodes());
+
/// Build and insert `Res0, ... = G_EXTRACT Src, Idx0`.
///
/// \pre setBasicBlock or setMI must have been called.
@@ -970,23 +970,23 @@ public:
MachineInstrBuilder buildBuildVectorTrunc(const DstOp &Res,
ArrayRef<Register> Ops);
- /// Build and insert a vector splat of a scalar \p Src using a
- /// G_INSERT_VECTOR_ELT and G_SHUFFLE_VECTOR idiom.
- ///
- /// \pre setBasicBlock or setMI must have been called.
- /// \pre \p Src must have the same type as the element type of \p Dst
- ///
- /// \return a MachineInstrBuilder for the newly created instruction.
- MachineInstrBuilder buildShuffleSplat(const DstOp &Res, const SrcOp &Src);
-
- /// Build and insert \p Res = G_SHUFFLE_VECTOR \p Src1, \p Src2, \p Mask
- ///
- /// \pre setBasicBlock or setMI must have been called.
- ///
- /// \return a MachineInstrBuilder for the newly created instruction.
- MachineInstrBuilder buildShuffleVector(const DstOp &Res, const SrcOp &Src1,
- const SrcOp &Src2, ArrayRef<int> Mask);
-
+ /// Build and insert a vector splat of a scalar \p Src using a
+ /// G_INSERT_VECTOR_ELT and G_SHUFFLE_VECTOR idiom.
+ ///
+ /// \pre setBasicBlock or setMI must have been called.
+ /// \pre \p Src must have the same type as the element type of \p Dst
+ ///
+ /// \return a MachineInstrBuilder for the newly created instruction.
+ MachineInstrBuilder buildShuffleSplat(const DstOp &Res, const SrcOp &Src);
+
+ /// Build and insert \p Res = G_SHUFFLE_VECTOR \p Src1, \p Src2, \p Mask
+ ///
+ /// \pre setBasicBlock or setMI must have been called.
+ ///
+ /// \return a MachineInstrBuilder for the newly created instruction.
+ MachineInstrBuilder buildShuffleVector(const DstOp &Res, const SrcOp &Src1,
+ const SrcOp &Src2, ArrayRef<int> Mask);
+
/// Build and insert \p Res = G_CONCAT_VECTORS \p Op0, ...
///
/// G_CONCAT_VECTORS creates a vector from the concatenation of 2 or more
@@ -1570,13 +1570,13 @@ public:
return buildInstr(TargetOpcode::G_FSUB, {Dst}, {Src0, Src1}, Flags);
}
- /// Build and insert \p Res = G_FDIV \p Op0, \p Op1
- MachineInstrBuilder buildFDiv(const DstOp &Dst, const SrcOp &Src0,
- const SrcOp &Src1,
- Optional<unsigned> Flags = None) {
- return buildInstr(TargetOpcode::G_FDIV, {Dst}, {Src0, Src1}, Flags);
- }
-
+ /// Build and insert \p Res = G_FDIV \p Op0, \p Op1
+ MachineInstrBuilder buildFDiv(const DstOp &Dst, const SrcOp &Src0,
+ const SrcOp &Src1,
+ Optional<unsigned> Flags = None) {
+ return buildInstr(TargetOpcode::G_FDIV, {Dst}, {Src0, Src1}, Flags);
+ }
+
/// Build and insert \p Res = G_FMA \p Op0, \p Op1, \p Op2
MachineInstrBuilder buildFMA(const DstOp &Dst, const SrcOp &Src0,
const SrcOp &Src1, const SrcOp &Src2,
@@ -1639,13 +1639,13 @@ public:
return buildInstr(TargetOpcode::G_FEXP2, {Dst}, {Src}, Flags);
}
- /// Build and insert \p Dst = G_FPOW \p Src0, \p Src1
- MachineInstrBuilder buildFPow(const DstOp &Dst, const SrcOp &Src0,
- const SrcOp &Src1,
- Optional<unsigned> Flags = None) {
- return buildInstr(TargetOpcode::G_FPOW, {Dst}, {Src0, Src1}, Flags);
- }
-
+ /// Build and insert \p Dst = G_FPOW \p Src0, \p Src1
+ MachineInstrBuilder buildFPow(const DstOp &Dst, const SrcOp &Src0,
+ const SrcOp &Src1,
+ Optional<unsigned> Flags = None) {
+ return buildInstr(TargetOpcode::G_FPOW, {Dst}, {Src0, Src1}, Flags);
+ }
+
/// Build and insert \p Res = G_FCOPYSIGN \p Op0, \p Op1
MachineInstrBuilder buildFCopysign(const DstOp &Dst, const SrcOp &Src0,
const SrcOp &Src1) {
@@ -1696,11 +1696,11 @@ public:
return buildInstr(TargetOpcode::G_UMAX, {Dst}, {Src0, Src1});
}
- /// Build and insert \p Dst = G_ABS \p Src
- MachineInstrBuilder buildAbs(const DstOp &Dst, const SrcOp &Src) {
- return buildInstr(TargetOpcode::G_ABS, {Dst}, {Src});
- }
-
+ /// Build and insert \p Dst = G_ABS \p Src
+ MachineInstrBuilder buildAbs(const DstOp &Dst, const SrcOp &Src) {
+ return buildInstr(TargetOpcode::G_ABS, {Dst}, {Src});
+ }
+
/// Build and insert \p Res = G_JUMP_TABLE \p JTI
///
/// G_JUMP_TABLE sets \p Res to the address of the jump table specified by
@@ -1709,101 +1709,101 @@ public:
/// \return a MachineInstrBuilder for the newly created instruction.
MachineInstrBuilder buildJumpTable(const LLT PtrTy, unsigned JTI);
- /// Build and insert \p Res = G_VECREDUCE_SEQ_FADD \p ScalarIn, \p VecIn
- ///
- /// \p ScalarIn is the scalar accumulator input to start the sequential
- /// reduction operation of \p VecIn.
- MachineInstrBuilder buildVecReduceSeqFAdd(const DstOp &Dst,
- const SrcOp &ScalarIn,
- const SrcOp &VecIn) {
- return buildInstr(TargetOpcode::G_VECREDUCE_SEQ_FADD, {Dst},
- {ScalarIn, {VecIn}});
- }
-
- /// Build and insert \p Res = G_VECREDUCE_SEQ_FMUL \p ScalarIn, \p VecIn
- ///
- /// \p ScalarIn is the scalar accumulator input to start the sequential
- /// reduction operation of \p VecIn.
- MachineInstrBuilder buildVecReduceSeqFMul(const DstOp &Dst,
- const SrcOp &ScalarIn,
- const SrcOp &VecIn) {
- return buildInstr(TargetOpcode::G_VECREDUCE_SEQ_FMUL, {Dst},
- {ScalarIn, {VecIn}});
- }
-
- /// Build and insert \p Res = G_VECREDUCE_FADD \p Src
- ///
- /// \p ScalarIn is the scalar accumulator input to the reduction operation of
- /// \p VecIn.
- MachineInstrBuilder buildVecReduceFAdd(const DstOp &Dst,
- const SrcOp &ScalarIn,
- const SrcOp &VecIn) {
- return buildInstr(TargetOpcode::G_VECREDUCE_FADD, {Dst}, {ScalarIn, VecIn});
- }
-
- /// Build and insert \p Res = G_VECREDUCE_FMUL \p Src
- ///
- /// \p ScalarIn is the scalar accumulator input to the reduction operation of
- /// \p VecIn.
- MachineInstrBuilder buildVecReduceFMul(const DstOp &Dst,
- const SrcOp &ScalarIn,
- const SrcOp &VecIn) {
- return buildInstr(TargetOpcode::G_VECREDUCE_FMUL, {Dst}, {ScalarIn, VecIn});
- }
-
- /// Build and insert \p Res = G_VECREDUCE_FMAX \p Src
- MachineInstrBuilder buildVecReduceFMax(const DstOp &Dst, const SrcOp &Src) {
- return buildInstr(TargetOpcode::G_VECREDUCE_FMAX, {Dst}, {Src});
- }
-
- /// Build and insert \p Res = G_VECREDUCE_FMIN \p Src
- MachineInstrBuilder buildVecReduceFMin(const DstOp &Dst, const SrcOp &Src) {
- return buildInstr(TargetOpcode::G_VECREDUCE_FMIN, {Dst}, {Src});
- }
- /// Build and insert \p Res = G_VECREDUCE_ADD \p Src
- MachineInstrBuilder buildVecReduceAdd(const DstOp &Dst, const SrcOp &Src) {
- return buildInstr(TargetOpcode::G_VECREDUCE_ADD, {Dst}, {Src});
- }
-
- /// Build and insert \p Res = G_VECREDUCE_MUL \p Src
- MachineInstrBuilder buildVecReduceMul(const DstOp &Dst, const SrcOp &Src) {
- return buildInstr(TargetOpcode::G_VECREDUCE_MUL, {Dst}, {Src});
- }
-
- /// Build and insert \p Res = G_VECREDUCE_AND \p Src
- MachineInstrBuilder buildVecReduceAnd(const DstOp &Dst, const SrcOp &Src) {
- return buildInstr(TargetOpcode::G_VECREDUCE_AND, {Dst}, {Src});
- }
-
- /// Build and insert \p Res = G_VECREDUCE_OR \p Src
- MachineInstrBuilder buildVecReduceOr(const DstOp &Dst, const SrcOp &Src) {
- return buildInstr(TargetOpcode::G_VECREDUCE_OR, {Dst}, {Src});
- }
-
- /// Build and insert \p Res = G_VECREDUCE_XOR \p Src
- MachineInstrBuilder buildVecReduceXor(const DstOp &Dst, const SrcOp &Src) {
- return buildInstr(TargetOpcode::G_VECREDUCE_XOR, {Dst}, {Src});
- }
-
- /// Build and insert \p Res = G_VECREDUCE_SMAX \p Src
- MachineInstrBuilder buildVecReduceSMax(const DstOp &Dst, const SrcOp &Src) {
- return buildInstr(TargetOpcode::G_VECREDUCE_SMAX, {Dst}, {Src});
- }
-
- /// Build and insert \p Res = G_VECREDUCE_SMIN \p Src
- MachineInstrBuilder buildVecReduceSMin(const DstOp &Dst, const SrcOp &Src) {
- return buildInstr(TargetOpcode::G_VECREDUCE_SMIN, {Dst}, {Src});
- }
-
- /// Build and insert \p Res = G_VECREDUCE_UMAX \p Src
- MachineInstrBuilder buildVecReduceUMax(const DstOp &Dst, const SrcOp &Src) {
- return buildInstr(TargetOpcode::G_VECREDUCE_UMAX, {Dst}, {Src});
- }
-
- /// Build and insert \p Res = G_VECREDUCE_UMIN \p Src
- MachineInstrBuilder buildVecReduceUMin(const DstOp &Dst, const SrcOp &Src) {
- return buildInstr(TargetOpcode::G_VECREDUCE_UMIN, {Dst}, {Src});
- }
+ /// Build and insert \p Res = G_VECREDUCE_SEQ_FADD \p ScalarIn, \p VecIn
+ ///
+ /// \p ScalarIn is the scalar accumulator input to start the sequential
+ /// reduction operation of \p VecIn.
+ MachineInstrBuilder buildVecReduceSeqFAdd(const DstOp &Dst,
+ const SrcOp &ScalarIn,
+ const SrcOp &VecIn) {
+ return buildInstr(TargetOpcode::G_VECREDUCE_SEQ_FADD, {Dst},
+ {ScalarIn, {VecIn}});
+ }
+
+ /// Build and insert \p Res = G_VECREDUCE_SEQ_FMUL \p ScalarIn, \p VecIn
+ ///
+ /// \p ScalarIn is the scalar accumulator input to start the sequential
+ /// reduction operation of \p VecIn.
+ MachineInstrBuilder buildVecReduceSeqFMul(const DstOp &Dst,
+ const SrcOp &ScalarIn,
+ const SrcOp &VecIn) {
+ return buildInstr(TargetOpcode::G_VECREDUCE_SEQ_FMUL, {Dst},
+ {ScalarIn, {VecIn}});
+ }
+
+ /// Build and insert \p Res = G_VECREDUCE_FADD \p Src
+ ///
+ /// \p ScalarIn is the scalar accumulator input to the reduction operation of
+ /// \p VecIn.
+ MachineInstrBuilder buildVecReduceFAdd(const DstOp &Dst,
+ const SrcOp &ScalarIn,
+ const SrcOp &VecIn) {
+ return buildInstr(TargetOpcode::G_VECREDUCE_FADD, {Dst}, {ScalarIn, VecIn});
+ }
+
+ /// Build and insert \p Res = G_VECREDUCE_FMUL \p Src
+ ///
+ /// \p ScalarIn is the scalar accumulator input to the reduction operation of
+ /// \p VecIn.
+ MachineInstrBuilder buildVecReduceFMul(const DstOp &Dst,
+ const SrcOp &ScalarIn,
+ const SrcOp &VecIn) {
+ return buildInstr(TargetOpcode::G_VECREDUCE_FMUL, {Dst}, {ScalarIn, VecIn});
+ }
+
+ /// Build and insert \p Res = G_VECREDUCE_FMAX \p Src
+ MachineInstrBuilder buildVecReduceFMax(const DstOp &Dst, const SrcOp &Src) {
+ return buildInstr(TargetOpcode::G_VECREDUCE_FMAX, {Dst}, {Src});
+ }
+
+ /// Build and insert \p Res = G_VECREDUCE_FMIN \p Src
+ MachineInstrBuilder buildVecReduceFMin(const DstOp &Dst, const SrcOp &Src) {
+ return buildInstr(TargetOpcode::G_VECREDUCE_FMIN, {Dst}, {Src});
+ }
+ /// Build and insert \p Res = G_VECREDUCE_ADD \p Src
+ MachineInstrBuilder buildVecReduceAdd(const DstOp &Dst, const SrcOp &Src) {
+ return buildInstr(TargetOpcode::G_VECREDUCE_ADD, {Dst}, {Src});
+ }
+
+ /// Build and insert \p Res = G_VECREDUCE_MUL \p Src
+ MachineInstrBuilder buildVecReduceMul(const DstOp &Dst, const SrcOp &Src) {
+ return buildInstr(TargetOpcode::G_VECREDUCE_MUL, {Dst}, {Src});
+ }
+
+ /// Build and insert \p Res = G_VECREDUCE_AND \p Src
+ MachineInstrBuilder buildVecReduceAnd(const DstOp &Dst, const SrcOp &Src) {
+ return buildInstr(TargetOpcode::G_VECREDUCE_AND, {Dst}, {Src});
+ }
+
+ /// Build and insert \p Res = G_VECREDUCE_OR \p Src
+ MachineInstrBuilder buildVecReduceOr(const DstOp &Dst, const SrcOp &Src) {
+ return buildInstr(TargetOpcode::G_VECREDUCE_OR, {Dst}, {Src});
+ }
+
+ /// Build and insert \p Res = G_VECREDUCE_XOR \p Src
+ MachineInstrBuilder buildVecReduceXor(const DstOp &Dst, const SrcOp &Src) {
+ return buildInstr(TargetOpcode::G_VECREDUCE_XOR, {Dst}, {Src});
+ }
+
+ /// Build and insert \p Res = G_VECREDUCE_SMAX \p Src
+ MachineInstrBuilder buildVecReduceSMax(const DstOp &Dst, const SrcOp &Src) {
+ return buildInstr(TargetOpcode::G_VECREDUCE_SMAX, {Dst}, {Src});
+ }
+
+ /// Build and insert \p Res = G_VECREDUCE_SMIN \p Src
+ MachineInstrBuilder buildVecReduceSMin(const DstOp &Dst, const SrcOp &Src) {
+ return buildInstr(TargetOpcode::G_VECREDUCE_SMIN, {Dst}, {Src});
+ }
+
+ /// Build and insert \p Res = G_VECREDUCE_UMAX \p Src
+ MachineInstrBuilder buildVecReduceUMax(const DstOp &Dst, const SrcOp &Src) {
+ return buildInstr(TargetOpcode::G_VECREDUCE_UMAX, {Dst}, {Src});
+ }
+
+ /// Build and insert \p Res = G_VECREDUCE_UMIN \p Src
+ MachineInstrBuilder buildVecReduceUMin(const DstOp &Dst, const SrcOp &Src) {
+ return buildInstr(TargetOpcode::G_VECREDUCE_UMIN, {Dst}, {Src});
+ }
virtual MachineInstrBuilder buildInstr(unsigned Opc, ArrayRef<DstOp> DstOps,
ArrayRef<SrcOp> SrcOps,
Optional<unsigned> Flags = None);
diff --git a/contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel/RegisterBankInfo.h b/contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel/RegisterBankInfo.h
index baff41e3c3..0dbd1ecffe 100644
--- a/contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel/RegisterBankInfo.h
+++ b/contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel/RegisterBankInfo.h
@@ -111,37 +111,37 @@ public:
/// Currently the TableGen-like file would look like:
/// \code
/// PartialMapping[] = {
- /// /*32-bit add*/ {0, 32, GPR}, // Scalar entry repeated for first
- /// // vec elt.
- /// /*2x32-bit add*/ {0, 32, GPR}, {32, 32, GPR},
- /// /*<2x32-bit> vadd*/ {0, 64, VPR}
+ /// /*32-bit add*/ {0, 32, GPR}, // Scalar entry repeated for first
+ /// // vec elt.
+ /// /*2x32-bit add*/ {0, 32, GPR}, {32, 32, GPR},
+ /// /*<2x32-bit> vadd*/ {0, 64, VPR}
/// }; // PartialMapping duplicated.
///
/// ValueMapping[] {
- /// /*plain 32-bit add*/ {&PartialMapping[0], 1},
+ /// /*plain 32-bit add*/ {&PartialMapping[0], 1},
/// /*expanded vadd on 2xadd*/ {&PartialMapping[1], 2},
- /// /*plain <2x32-bit> vadd*/ {&PartialMapping[3], 1}
+ /// /*plain <2x32-bit> vadd*/ {&PartialMapping[3], 1}
/// };
/// \endcode
///
/// With the array of pointer, we would have:
/// \code
/// PartialMapping[] = {
- /// /*32-bit add lower */ { 0, 32, GPR},
+ /// /*32-bit add lower */ { 0, 32, GPR},
/// /*32-bit add upper */ {32, 32, GPR},
- /// /*<2x32-bit> vadd */ { 0, 64, VPR}
+ /// /*<2x32-bit> vadd */ { 0, 64, VPR}
/// }; // No more duplication.
///
/// BreakDowns[] = {
- /// /*AddBreakDown*/ &PartialMapping[0],
+ /// /*AddBreakDown*/ &PartialMapping[0],
/// /*2xAddBreakDown*/ &PartialMapping[0], &PartialMapping[1],
- /// /*VAddBreakDown*/ &PartialMapping[2]
+ /// /*VAddBreakDown*/ &PartialMapping[2]
/// }; // Addresses of PartialMapping duplicated (smaller).
///
/// ValueMapping[] {
- /// /*plain 32-bit add*/ {&BreakDowns[0], 1},
+ /// /*plain 32-bit add*/ {&BreakDowns[0], 1},
/// /*expanded vadd on 2xadd*/ {&BreakDowns[1], 2},
- /// /*plain <2x32-bit> vadd*/ {&BreakDowns[3], 1}
+ /// /*plain <2x32-bit> vadd*/ {&BreakDowns[3], 1}
/// };
/// \endcode
///
diff --git a/contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel/Utils.h b/contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel/Utils.h
index 6059e13234..d07bcae8e7 100644
--- a/contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel/Utils.h
+++ b/contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel/Utils.h
@@ -25,12 +25,12 @@
#include "llvm/CodeGen/Register.h"
#include "llvm/Support/Alignment.h"
#include "llvm/Support/LowLevelTypeImpl.h"
-#include <cstdint>
+#include <cstdint>
namespace llvm {
class AnalysisUsage;
-class GISelKnownBits;
+class GISelKnownBits;
class MachineFunction;
class MachineInstr;
class MachineOperand;
@@ -41,7 +41,7 @@ class MachineRegisterInfo;
class MCInstrDesc;
class RegisterBankInfo;
class TargetInstrInfo;
-class TargetLowering;
+class TargetLowering;
class TargetPassConfig;
class TargetRegisterInfo;
class TargetRegisterClass;
@@ -59,10 +59,10 @@ Register constrainRegToClass(MachineRegisterInfo &MRI,
/// Constrain the Register operand OpIdx, so that it is now constrained to the
/// TargetRegisterClass passed as an argument (RegClass).
-/// If this fails, create a new virtual register in the correct class and insert
-/// a COPY before \p InsertPt if it is a use or after if it is a definition.
-/// In both cases, the function also updates the register of RegMo. The debug
-/// location of \p InsertPt is used for the new copy.
+/// If this fails, create a new virtual register in the correct class and insert
+/// a COPY before \p InsertPt if it is a use or after if it is a definition.
+/// In both cases, the function also updates the register of RegMo. The debug
+/// location of \p InsertPt is used for the new copy.
///
/// \return The virtual register constrained to the right register class.
Register constrainOperandRegClass(const MachineFunction &MF,
@@ -72,13 +72,13 @@ Register constrainOperandRegClass(const MachineFunction &MF,
const RegisterBankInfo &RBI,
MachineInstr &InsertPt,
const TargetRegisterClass &RegClass,
- MachineOperand &RegMO);
+ MachineOperand &RegMO);
-/// Try to constrain Reg so that it is usable by argument OpIdx of the provided
-/// MCInstrDesc \p II. If this fails, create a new virtual register in the
-/// correct class and insert a COPY before \p InsertPt if it is a use or after
-/// if it is a definition. In both cases, the function also updates the register
-/// of RegMo.
+/// Try to constrain Reg so that it is usable by argument OpIdx of the provided
+/// MCInstrDesc \p II. If this fails, create a new virtual register in the
+/// correct class and insert a COPY before \p InsertPt if it is a use or after
+/// if it is a definition. In both cases, the function also updates the register
+/// of RegMo.
/// This is equivalent to constrainOperandRegClass(..., RegClass, ...)
/// with RegClass obtained from the MCInstrDesc. The debug location of \p
/// InsertPt is used for the new copy.
@@ -90,7 +90,7 @@ Register constrainOperandRegClass(const MachineFunction &MF,
const TargetInstrInfo &TII,
const RegisterBankInfo &RBI,
MachineInstr &InsertPt, const MCInstrDesc &II,
- MachineOperand &RegMO, unsigned OpIdx);
+ MachineOperand &RegMO, unsigned OpIdx);
/// Mutate the newly-selected instruction \p I to constrain its (possibly
/// generic) virtual register operands to the instruction's register class.
@@ -131,19 +131,19 @@ void reportGISelWarning(MachineFunction &MF, const TargetPassConfig &TPC,
MachineOptimizationRemarkEmitter &MORE,
MachineOptimizationRemarkMissed &R);
-/// If \p VReg is defined by a G_CONSTANT, return the corresponding value.
-Optional<APInt> getConstantVRegVal(Register VReg,
- const MachineRegisterInfo &MRI);
-
+/// If \p VReg is defined by a G_CONSTANT, return the corresponding value.
+Optional<APInt> getConstantVRegVal(Register VReg,
+ const MachineRegisterInfo &MRI);
+
/// If \p VReg is defined by a G_CONSTANT fits in int64_t
/// returns it.
-Optional<int64_t> getConstantVRegSExtVal(Register VReg,
- const MachineRegisterInfo &MRI);
-
+Optional<int64_t> getConstantVRegSExtVal(Register VReg,
+ const MachineRegisterInfo &MRI);
+
/// Simple struct used to hold a constant integer value and a virtual
/// register.
struct ValueAndVReg {
- APInt Value;
+ APInt Value;
Register VReg;
};
/// If \p VReg is defined by a statically evaluable chain of
@@ -153,13 +153,13 @@ struct ValueAndVReg {
/// When \p LookThroughInstrs == false this function behaves like
/// getConstantVRegVal.
/// When \p HandleFConstants == false the function bails on G_FCONSTANTs.
-/// When \p LookThroughAnyExt == true the function treats G_ANYEXT same as
-/// G_SEXT.
+/// When \p LookThroughAnyExt == true the function treats G_ANYEXT same as
+/// G_SEXT.
Optional<ValueAndVReg>
getConstantVRegValWithLookThrough(Register VReg, const MachineRegisterInfo &MRI,
bool LookThroughInstrs = true,
- bool HandleFConstants = true,
- bool LookThroughAnyExt = false);
+ bool HandleFConstants = true,
+ bool LookThroughAnyExt = false);
const ConstantFP* getConstantFPVRegVal(Register VReg,
const MachineRegisterInfo &MRI);
@@ -169,20 +169,20 @@ const ConstantFP* getConstantFPVRegVal(Register VReg,
MachineInstr *getOpcodeDef(unsigned Opcode, Register Reg,
const MachineRegisterInfo &MRI);
-/// Simple struct used to hold a Register value and the instruction which
-/// defines it.
-struct DefinitionAndSourceRegister {
- MachineInstr *MI;
- Register Reg;
-};
-
-/// Find the def instruction for \p Reg, and underlying value Register folding
-/// away any copies.
-Optional<DefinitionAndSourceRegister>
-getDefSrcRegIgnoringCopies(Register Reg, const MachineRegisterInfo &MRI);
-
-/// Find the def instruction for \p Reg, folding away any trivial copies. May
-/// return nullptr if \p Reg is not a generic virtual register.
+/// Simple struct used to hold a Register value and the instruction which
+/// defines it.
+struct DefinitionAndSourceRegister {
+ MachineInstr *MI;
+ Register Reg;
+};
+
+/// Find the def instruction for \p Reg, and underlying value Register folding
+/// away any copies.
+Optional<DefinitionAndSourceRegister>
+getDefSrcRegIgnoringCopies(Register Reg, const MachineRegisterInfo &MRI);
+
+/// Find the def instruction for \p Reg, folding away any trivial copies. May
+/// return nullptr if \p Reg is not a generic virtual register.
MachineInstr *getDefIgnoringCopies(Register Reg,
const MachineRegisterInfo &MRI);
@@ -207,12 +207,12 @@ Optional<APInt> ConstantFoldBinOp(unsigned Opcode, const Register Op1,
Optional<APInt> ConstantFoldExtOp(unsigned Opcode, const Register Op1,
uint64_t Imm, const MachineRegisterInfo &MRI);
-/// Test if the given value is known to have exactly one bit set. This differs
-/// from computeKnownBits in that it doesn't necessarily determine which bit is
-/// set.
-bool isKnownToBeAPowerOfTwo(Register Val, const MachineRegisterInfo &MRI,
- GISelKnownBits *KnownBits = nullptr);
-
+/// Test if the given value is known to have exactly one bit set. This differs
+/// from computeKnownBits in that it doesn't necessarily determine which bit is
+/// set.
+bool isKnownToBeAPowerOfTwo(Register Val, const MachineRegisterInfo &MRI,
+ GISelKnownBits *KnownBits = nullptr);
+
/// Returns true if \p Val can be assumed to never be a NaN. If \p SNaN is true,
/// this returns if \p Val can be assumed to never be a signaling NaN.
bool isKnownNeverNaN(Register Val, const MachineRegisterInfo &MRI,
@@ -225,66 +225,66 @@ inline bool isKnownNeverSNaN(Register Val, const MachineRegisterInfo &MRI) {
Align inferAlignFromPtrInfo(MachineFunction &MF, const MachinePointerInfo &MPO);
-/// Return a virtual register corresponding to the incoming argument register \p
-/// PhysReg. This register is expected to have class \p RC, and optional type \p
-/// RegTy. This assumes all references to the register will use the same type.
-///
-/// If there is an existing live-in argument register, it will be returned.
-/// This will also ensure there is a valid copy
-Register getFunctionLiveInPhysReg(MachineFunction &MF, const TargetInstrInfo &TII,
- MCRegister PhysReg,
- const TargetRegisterClass &RC,
- LLT RegTy = LLT());
-
-/// Return the least common multiple type of \p OrigTy and \p TargetTy, by changing the
-/// number of vector elements or scalar bitwidth. The intent is a
-/// G_MERGE_VALUES, G_BUILD_VECTOR, or G_CONCAT_VECTORS can be constructed from
-/// \p OrigTy elements, and unmerged into \p TargetTy
-LLVM_READNONE
-LLT getLCMType(LLT OrigTy, LLT TargetTy);
-
-/// Return a type where the total size is the greatest common divisor of \p
-/// OrigTy and \p TargetTy. This will try to either change the number of vector
-/// elements, or bitwidth of scalars. The intent is the result type can be used
-/// as the result of a G_UNMERGE_VALUES from \p OrigTy, and then some
-/// combination of G_MERGE_VALUES, G_BUILD_VECTOR and G_CONCAT_VECTORS (possibly
-/// with intermediate casts) can re-form \p TargetTy.
-///
-/// If these are vectors with different element types, this will try to produce
-/// a vector with a compatible total size, but the element type of \p OrigTy. If
-/// this can't be satisfied, this will produce a scalar smaller than the
-/// original vector elements.
-///
-/// In the worst case, this returns LLT::scalar(1)
-LLVM_READNONE
+/// Return a virtual register corresponding to the incoming argument register \p
+/// PhysReg. This register is expected to have class \p RC, and optional type \p
+/// RegTy. This assumes all references to the register will use the same type.
+///
+/// If there is an existing live-in argument register, it will be returned.
+/// This will also ensure there is a valid copy
+Register getFunctionLiveInPhysReg(MachineFunction &MF, const TargetInstrInfo &TII,
+ MCRegister PhysReg,
+ const TargetRegisterClass &RC,
+ LLT RegTy = LLT());
+
+/// Return the least common multiple type of \p OrigTy and \p TargetTy, by changing the
+/// number of vector elements or scalar bitwidth. The intent is a
+/// G_MERGE_VALUES, G_BUILD_VECTOR, or G_CONCAT_VECTORS can be constructed from
+/// \p OrigTy elements, and unmerged into \p TargetTy
+LLVM_READNONE
+LLT getLCMType(LLT OrigTy, LLT TargetTy);
+
+/// Return a type where the total size is the greatest common divisor of \p
+/// OrigTy and \p TargetTy. This will try to either change the number of vector
+/// elements, or bitwidth of scalars. The intent is the result type can be used
+/// as the result of a G_UNMERGE_VALUES from \p OrigTy, and then some
+/// combination of G_MERGE_VALUES, G_BUILD_VECTOR and G_CONCAT_VECTORS (possibly
+/// with intermediate casts) can re-form \p TargetTy.
+///
+/// If these are vectors with different element types, this will try to produce
+/// a vector with a compatible total size, but the element type of \p OrigTy. If
+/// this can't be satisfied, this will produce a scalar smaller than the
+/// original vector elements.
+///
+/// In the worst case, this returns LLT::scalar(1)
+LLVM_READNONE
LLT getGCDType(LLT OrigTy, LLT TargetTy);
-/// \returns The splat index of a G_SHUFFLE_VECTOR \p MI when \p MI is a splat.
-/// If \p MI is not a splat, returns None.
-Optional<int> getSplatIndex(MachineInstr &MI);
-
-/// Returns a scalar constant of a G_BUILD_VECTOR splat if it exists.
-Optional<int64_t> getBuildVectorConstantSplat(const MachineInstr &MI,
- const MachineRegisterInfo &MRI);
-
-/// Return true if the specified instruction is a G_BUILD_VECTOR or
-/// G_BUILD_VECTOR_TRUNC where all of the elements are 0 or undef.
-bool isBuildVectorAllZeros(const MachineInstr &MI,
- const MachineRegisterInfo &MRI);
-
-/// Return true if the specified instruction is a G_BUILD_VECTOR or
-/// G_BUILD_VECTOR_TRUNC where all of the elements are ~0 or undef.
-bool isBuildVectorAllOnes(const MachineInstr &MI,
- const MachineRegisterInfo &MRI);
-
-/// Returns true if given the TargetLowering's boolean contents information,
-/// the value \p Val contains a true value.
-bool isConstTrueVal(const TargetLowering &TLI, int64_t Val, bool IsVector,
- bool IsFP);
-
-/// Returns an integer representing true, as defined by the
-/// TargetBooleanContents.
-int64_t getICmpTrueVal(const TargetLowering &TLI, bool IsVector, bool IsFP);
+/// \returns The splat index of a G_SHUFFLE_VECTOR \p MI when \p MI is a splat.
+/// If \p MI is not a splat, returns None.
+Optional<int> getSplatIndex(MachineInstr &MI);
+
+/// Returns a scalar constant of a G_BUILD_VECTOR splat if it exists.
+Optional<int64_t> getBuildVectorConstantSplat(const MachineInstr &MI,
+ const MachineRegisterInfo &MRI);
+
+/// Return true if the specified instruction is a G_BUILD_VECTOR or
+/// G_BUILD_VECTOR_TRUNC where all of the elements are 0 or undef.
+bool isBuildVectorAllZeros(const MachineInstr &MI,
+ const MachineRegisterInfo &MRI);
+
+/// Return true if the specified instruction is a G_BUILD_VECTOR or
+/// G_BUILD_VECTOR_TRUNC where all of the elements are ~0 or undef.
+bool isBuildVectorAllOnes(const MachineInstr &MI,
+ const MachineRegisterInfo &MRI);
+
+/// Returns true if given the TargetLowering's boolean contents information,
+/// the value \p Val contains a true value.
+bool isConstTrueVal(const TargetLowering &TLI, int64_t Val, bool IsVector,
+ bool IsFP);
+
+/// Returns an integer representing true, as defined by the
+/// TargetBooleanContents.
+int64_t getICmpTrueVal(const TargetLowering &TLI, bool IsVector, bool IsFP);
} // End namespace llvm.
#endif