aboutsummaryrefslogtreecommitdiffstats
path: root/contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel
diff options
context:
space:
mode:
authorDevtools Arcadia <arcadia-devtools@yandex-team.ru>2022-02-07 18:08:42 +0300
committerDevtools Arcadia <arcadia-devtools@mous.vla.yp-c.yandex.net>2022-02-07 18:08:42 +0300
commit1110808a9d39d4b808aef724c861a2e1a38d2a69 (patch)
treee26c9fed0de5d9873cce7e00bc214573dc2195b7 /contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel
downloadydb-1110808a9d39d4b808aef724c861a2e1a38d2a69.tar.gz
intermediate changes
ref:cde9a383711a11544ce7e107a78147fb96cc4029
Diffstat (limited to 'contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel')
-rw-r--r--contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel/CSEInfo.h253
-rw-r--r--contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel/CSEMIRBuilder.h120
-rw-r--r--contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel/CallLowering.h474
-rw-r--r--contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel/Combiner.h57
-rw-r--r--contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel/CombinerHelper.h555
-rw-r--r--contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel/CombinerInfo.h83
-rw-r--r--contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel/ConstantFoldingMIRBuilder.h83
-rw-r--r--contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel/GISelChangeObserver.h150
-rw-r--r--contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel/GISelKnownBits.h142
-rw-r--r--contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel/GISelWorkList.h124
-rw-r--r--contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel/IRTranslator.h714
-rw-r--r--contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel/InlineAsmLowering.h78
-rw-r--r--contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel/InstructionSelect.h63
-rw-r--r--contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel/InstructionSelector.h558
-rw-r--r--contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel/InstructionSelectorImpl.h1163
-rw-r--r--contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel/LegalizationArtifactCombiner.h1022
-rw-r--r--contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel/Legalizer.h88
-rw-r--r--contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel/LegalizerHelper.h409
-rw-r--r--contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel/LegalizerInfo.h1498
-rw-r--r--contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel/Localizer.h108
-rw-r--r--contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel/LostDebugLocObserver.h61
-rw-r--r--contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel/MIPatternMatch.h501
-rw-r--r--contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel/MachineIRBuilder.h1817
-rw-r--r--contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel/RegBankSelect.h680
-rw-r--r--contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel/RegisterBank.h109
-rw-r--r--contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel/RegisterBankInfo.h786
-rw-r--r--contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel/Utils.h293
27 files changed, 11989 insertions, 0 deletions
diff --git a/contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel/CSEInfo.h b/contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel/CSEInfo.h
new file mode 100644
index 0000000000..83db4418b3
--- /dev/null
+++ b/contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel/CSEInfo.h
@@ -0,0 +1,253 @@
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//===- llvm/CodeGen/GlobalISel/CSEInfo.h ------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+/// Provides analysis for continuously CSEing during GISel passes.
+//
+//===----------------------------------------------------------------------===//
+#ifndef LLVM_CODEGEN_GLOBALISEL_CSEINFO_H
+#define LLVM_CODEGEN_GLOBALISEL_CSEINFO_H
+
+#include "llvm/ADT/FoldingSet.h"
+#include "llvm/CodeGen/CSEConfigBase.h"
+#include "llvm/CodeGen/GlobalISel/GISelChangeObserver.h"
+#include "llvm/CodeGen/GlobalISel/GISelWorkList.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/Support/Allocator.h"
+#include "llvm/Support/CodeGen.h"
+
+namespace llvm {
+class MachineBasicBlock;
+
+/// A class that wraps MachineInstrs and derives from FoldingSetNode in order to
+/// be uniqued in a CSEMap. The tradeoff here is extra memory allocations for
+/// UniqueMachineInstr vs making MachineInstr bigger.
+class UniqueMachineInstr : public FoldingSetNode {
+ friend class GISelCSEInfo;
+ const MachineInstr *MI;
+ explicit UniqueMachineInstr(const MachineInstr *MI) : MI(MI) {}
+
+public:
+ void Profile(FoldingSetNodeID &ID);
+};
+
+// A CSE config for fully optimized builds.
+class CSEConfigFull : public CSEConfigBase {
+public:
+ virtual ~CSEConfigFull() = default;
+ virtual bool shouldCSEOpc(unsigned Opc) override;
+};
+
+// Commonly used for O0 config.
+class CSEConfigConstantOnly : public CSEConfigBase {
+public:
+ virtual ~CSEConfigConstantOnly() = default;
+ virtual bool shouldCSEOpc(unsigned Opc) override;
+};
+
+// Returns the standard expected CSEConfig for the given optimization level.
+// We have this logic here so targets can make use of it from their derived
+// TargetPassConfig, but can't put this logic into TargetPassConfig directly
+// because the CodeGen library can't depend on GlobalISel.
+std::unique_ptr<CSEConfigBase>
+getStandardCSEConfigForOpt(CodeGenOpt::Level Level);
+
+/// The CSE Analysis object.
+/// This installs itself as a delegate to the MachineFunction to track
+/// new instructions as well as deletions. It however will not be able to
+/// track instruction mutations. In such cases, recordNewInstruction should be
+/// called (for eg inside MachineIRBuilder::recordInsertion).
+/// Also because of how just the instruction can be inserted without adding any
+/// operands to the instruction, instructions are uniqued and inserted lazily.
+/// CSEInfo should assert when trying to enter an incomplete instruction into
+/// the CSEMap. There is Opcode level granularity on which instructions can be
+/// CSE'd and for now, only Generic instructions are CSEable.
+class GISelCSEInfo : public GISelChangeObserver {
+ // Make it accessible only to CSEMIRBuilder.
+ friend class CSEMIRBuilder;
+
+ BumpPtrAllocator UniqueInstrAllocator;
+ FoldingSet<UniqueMachineInstr> CSEMap;
+ MachineRegisterInfo *MRI = nullptr;
+ MachineFunction *MF = nullptr;
+ std::unique_ptr<CSEConfigBase> CSEOpt;
+ /// Keep a cache of UniqueInstrs for each MachineInstr. In GISel,
+ /// often instructions are mutated (while their ID has completely changed).
+ /// Whenever mutation happens, invalidate the UniqueMachineInstr for the
+ /// MachineInstr
+ DenseMap<const MachineInstr *, UniqueMachineInstr *> InstrMapping;
+
+ /// Store instructions that are not fully formed in TemporaryInsts.
+ /// Also because CSE insertion happens lazily, we can remove insts from this
+ /// list and avoid inserting and then removing from the CSEMap.
+ GISelWorkList<8> TemporaryInsts;
+
+ // Only used in asserts.
+ DenseMap<unsigned, unsigned> OpcodeHitTable;
+
+ bool isUniqueMachineInstValid(const UniqueMachineInstr &UMI) const;
+
+ void invalidateUniqueMachineInstr(UniqueMachineInstr *UMI);
+
+ UniqueMachineInstr *getNodeIfExists(FoldingSetNodeID &ID,
+ MachineBasicBlock *MBB, void *&InsertPos);
+
+ /// Allocate and construct a new UniqueMachineInstr for MI and return.
+ UniqueMachineInstr *getUniqueInstrForMI(const MachineInstr *MI);
+
+ void insertNode(UniqueMachineInstr *UMI, void *InsertPos = nullptr);
+
+ /// Get the MachineInstr(Unique) if it exists already in the CSEMap and the
+ /// same MachineBasicBlock.
+ MachineInstr *getMachineInstrIfExists(FoldingSetNodeID &ID,
+ MachineBasicBlock *MBB,
+ void *&InsertPos);
+
+ /// Use this method to allocate a new UniqueMachineInstr for MI and insert it
+ /// into the CSEMap. MI should return true for shouldCSE(MI->getOpcode())
+ void insertInstr(MachineInstr *MI, void *InsertPos = nullptr);
+
+public:
+ GISelCSEInfo() = default;
+
+ virtual ~GISelCSEInfo();
+
+ void setMF(MachineFunction &MF);
+
+ Error verify();
+
+ /// Records a newly created inst in a list and lazily insert it to the CSEMap.
+ /// Sometimes, this method might be called with a partially constructed
+ /// MachineInstr,
+ // (right after BuildMI without adding any operands) - and in such cases,
+ // defer the hashing of the instruction to a later stage.
+ void recordNewInstruction(MachineInstr *MI);
+
+ /// Use this callback to inform CSE about a newly fully created instruction.
+ void handleRecordedInst(MachineInstr *MI);
+
+ /// Use this callback to insert all the recorded instructions. At this point,
+ /// all of these insts need to be fully constructed and should not be missing
+ /// any operands.
+ void handleRecordedInsts();
+
+ /// Remove this inst from the CSE map. If this inst has not been inserted yet,
+ /// it will be removed from the Tempinsts list if it exists.
+ void handleRemoveInst(MachineInstr *MI);
+
+ void releaseMemory();
+
+ void setCSEConfig(std::unique_ptr<CSEConfigBase> Opt) {
+ CSEOpt = std::move(Opt);
+ }
+
+ bool shouldCSE(unsigned Opc) const;
+
+ void analyze(MachineFunction &MF);
+
+ void countOpcodeHit(unsigned Opc);
+
+ void print();
+
+ // Observer API
+ void erasingInstr(MachineInstr &MI) override;
+ void createdInstr(MachineInstr &MI) override;
+ void changingInstr(MachineInstr &MI) override;
+ void changedInstr(MachineInstr &MI) override;
+};
+
+class TargetRegisterClass;
+class RegisterBank;
+
+// Simple builder class to easily profile properties about MIs.
+class GISelInstProfileBuilder {
+ FoldingSetNodeID &ID;
+ const MachineRegisterInfo &MRI;
+
+public:
+ GISelInstProfileBuilder(FoldingSetNodeID &ID, const MachineRegisterInfo &MRI)
+ : ID(ID), MRI(MRI) {}
+ // Profiling methods.
+ const GISelInstProfileBuilder &addNodeIDOpcode(unsigned Opc) const;
+ const GISelInstProfileBuilder &addNodeIDRegType(const LLT Ty) const;
+ const GISelInstProfileBuilder &addNodeIDRegType(const Register) const;
+
+ const GISelInstProfileBuilder &
+ addNodeIDRegType(const TargetRegisterClass *RC) const;
+ const GISelInstProfileBuilder &addNodeIDRegType(const RegisterBank *RB) const;
+
+ const GISelInstProfileBuilder &addNodeIDRegNum(Register Reg) const;
+
+ const GISelInstProfileBuilder &addNodeIDReg(Register Reg) const;
+
+ const GISelInstProfileBuilder &addNodeIDImmediate(int64_t Imm) const;
+ const GISelInstProfileBuilder &
+ addNodeIDMBB(const MachineBasicBlock *MBB) const;
+
+ const GISelInstProfileBuilder &
+ addNodeIDMachineOperand(const MachineOperand &MO) const;
+
+ const GISelInstProfileBuilder &addNodeIDFlag(unsigned Flag) const;
+ const GISelInstProfileBuilder &addNodeID(const MachineInstr *MI) const;
+};
+
+/// Simple wrapper that does the following.
+/// 1) Lazily evaluate the MachineFunction to compute CSEable instructions.
+/// 2) Allows configuration of which instructions are CSEd through CSEConfig
+/// object. Provides a method called get which takes a CSEConfig object.
+class GISelCSEAnalysisWrapper {
+ GISelCSEInfo Info;
+ MachineFunction *MF = nullptr;
+ bool AlreadyComputed = false;
+
+public:
+ /// Takes a CSEConfigBase object that defines what opcodes get CSEd.
+ /// If CSEConfig is already set, and the CSE Analysis has been preserved,
+ /// it will not use the new CSEOpt(use Recompute to force using the new
+ /// CSEOpt).
+ GISelCSEInfo &get(std::unique_ptr<CSEConfigBase> CSEOpt,
+ bool ReCompute = false);
+ void setMF(MachineFunction &MFunc) { MF = &MFunc; }
+ void setComputed(bool Computed) { AlreadyComputed = Computed; }
+ void releaseMemory() { Info.releaseMemory(); }
+};
+
+/// The actual analysis pass wrapper.
+class GISelCSEAnalysisWrapperPass : public MachineFunctionPass {
+ GISelCSEAnalysisWrapper Wrapper;
+
+public:
+ static char ID;
+ GISelCSEAnalysisWrapperPass();
+
+ void getAnalysisUsage(AnalysisUsage &AU) const override;
+
+ const GISelCSEAnalysisWrapper &getCSEWrapper() const { return Wrapper; }
+ GISelCSEAnalysisWrapper &getCSEWrapper() { return Wrapper; }
+
+ bool runOnMachineFunction(MachineFunction &MF) override;
+
+ void releaseMemory() override {
+ Wrapper.releaseMemory();
+ Wrapper.setComputed(false);
+ }
+};
+
+} // namespace llvm
+
+#endif
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel/CSEMIRBuilder.h b/contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel/CSEMIRBuilder.h
new file mode 100644
index 0000000000..9873131ab3
--- /dev/null
+++ b/contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel/CSEMIRBuilder.h
@@ -0,0 +1,120 @@
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//===-- llvm/CodeGen/GlobalISel/CSEMIRBuilder.h --*- C++ -*-==//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+/// \file
+/// This file implements a version of MachineIRBuilder which CSEs insts within
+/// a MachineBasicBlock.
+//===----------------------------------------------------------------------===//
+#ifndef LLVM_CODEGEN_GLOBALISEL_CSEMIRBUILDER_H
+#define LLVM_CODEGEN_GLOBALISEL_CSEMIRBUILDER_H
+
+#include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
+#include "llvm/CodeGen/GlobalISel/Utils.h"
+
+namespace llvm {
+
+/// Defines a builder that does CSE of MachineInstructions using GISelCSEInfo.
+/// Eg usage.
+///
+///
+/// GISelCSEInfo *Info =
+/// &getAnalysis<GISelCSEAnalysisWrapperPass>().getCSEInfo(); CSEMIRBuilder
+/// CB(Builder.getState()); CB.setCSEInfo(Info); auto A = CB.buildConstant(s32,
+/// 42); auto B = CB.buildConstant(s32, 42); assert(A == B); unsigned CReg =
+/// MRI.createGenericVirtualRegister(s32); auto C = CB.buildConstant(CReg, 42);
+/// assert(C->getOpcode() == TargetOpcode::COPY);
+/// Explicitly passing in a register would materialize a copy if possible.
+/// CSEMIRBuilder also does trivial constant folding for binary ops.
+class CSEMIRBuilder : public MachineIRBuilder {
+
+ /// Returns true if A dominates B (within the same basic block).
+ /// Both iterators must be in the same basic block.
+ //
+ // TODO: Another approach for checking dominance is having two iterators and
+ // making them go towards each other until they meet or reach begin/end. Which
+ // approach is better? Should this even change dynamically? For G_CONSTANTS
+ // most of which will be at the top of the BB, the top down approach would be
+ // a better choice. Does IRTranslator placing constants at the beginning still
+ // make sense? Should this change based on Opcode?
+ bool dominates(MachineBasicBlock::const_iterator A,
+ MachineBasicBlock::const_iterator B) const;
+
+ /// For given ID, find a machineinstr in the CSE Map. If found, check if it
+ /// dominates the current insertion point and if not, move it just before the
+ /// current insertion point and return it. If not found, return Null
+ /// MachineInstrBuilder.
+ MachineInstrBuilder getDominatingInstrForID(FoldingSetNodeID &ID,
+ void *&NodeInsertPos);
+ /// Simple check if we can CSE (we have the CSEInfo) or if this Opcode is
+ /// safe to CSE.
+ bool canPerformCSEForOpc(unsigned Opc) const;
+
+ void profileDstOp(const DstOp &Op, GISelInstProfileBuilder &B) const;
+
+ void profileDstOps(ArrayRef<DstOp> Ops, GISelInstProfileBuilder &B) const {
+ for (const DstOp &Op : Ops)
+ profileDstOp(Op, B);
+ }
+
+ void profileSrcOp(const SrcOp &Op, GISelInstProfileBuilder &B) const;
+
+ void profileSrcOps(ArrayRef<SrcOp> Ops, GISelInstProfileBuilder &B) const {
+ for (const SrcOp &Op : Ops)
+ profileSrcOp(Op, B);
+ }
+
+ void profileMBBOpcode(GISelInstProfileBuilder &B, unsigned Opc) const;
+
+ void profileEverything(unsigned Opc, ArrayRef<DstOp> DstOps,
+ ArrayRef<SrcOp> SrcOps, Optional<unsigned> Flags,
+ GISelInstProfileBuilder &B) const;
+
+ // Takes a MachineInstrBuilder and inserts it into the CSEMap using the
+ // NodeInsertPos.
+ MachineInstrBuilder memoizeMI(MachineInstrBuilder MIB, void *NodeInsertPos);
+
+ // If we have can CSE an instruction, but still need to materialize to a VReg,
+ // we emit a copy from the CSE'd inst to the VReg.
+ MachineInstrBuilder generateCopiesIfRequired(ArrayRef<DstOp> DstOps,
+ MachineInstrBuilder &MIB);
+
+ // If we have can CSE an instruction, but still need to materialize to a VReg,
+ // check if we can generate copies. It's not possible to return a single MIB,
+ // while emitting copies to multiple vregs.
+ bool checkCopyToDefsPossible(ArrayRef<DstOp> DstOps);
+
+public:
+ // Pull in base class constructors.
+ using MachineIRBuilder::MachineIRBuilder;
+ // Unhide buildInstr
+ MachineInstrBuilder buildInstr(unsigned Opc, ArrayRef<DstOp> DstOps,
+ ArrayRef<SrcOp> SrcOps,
+ Optional<unsigned> Flag = None) override;
+ // Bring in the other overload from the base class.
+ using MachineIRBuilder::buildConstant;
+
+ MachineInstrBuilder buildConstant(const DstOp &Res,
+ const ConstantInt &Val) override;
+
+ // Bring in the other overload from the base class.
+ using MachineIRBuilder::buildFConstant;
+ MachineInstrBuilder buildFConstant(const DstOp &Res,
+ const ConstantFP &Val) override;
+};
+} // namespace llvm
+#endif
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel/CallLowering.h b/contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel/CallLowering.h
new file mode 100644
index 0000000000..549f200269
--- /dev/null
+++ b/contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel/CallLowering.h
@@ -0,0 +1,474 @@
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//===- llvm/CodeGen/GlobalISel/CallLowering.h - Call lowering ---*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This file describes how to lower LLVM calls to machine code calls.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_GLOBALISEL_CALLLOWERING_H
+#define LLVM_CODEGEN_GLOBALISEL_CALLLOWERING_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/CodeGen/CallingConvLower.h"
+#include "llvm/CodeGen/MachineOperand.h"
+#include "llvm/CodeGen/TargetCallingConv.h"
+#include "llvm/IR/Attributes.h"
+#include "llvm/IR/CallingConv.h"
+#include "llvm/IR/Type.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/MachineValueType.h"
+#include <cstdint>
+#include <functional>
+
+namespace llvm {
+
+class CallBase;
+class DataLayout;
+class Function;
+class FunctionLoweringInfo;
+class MachineIRBuilder;
+struct MachinePointerInfo;
+class MachineRegisterInfo;
+class TargetLowering;
+class Value;
+
+class CallLowering {
+ const TargetLowering *TLI;
+
+ virtual void anchor();
+public:
+ struct BaseArgInfo {
+ Type *Ty;
+ SmallVector<ISD::ArgFlagsTy, 4> Flags;
+ bool IsFixed;
+
+ BaseArgInfo(Type *Ty,
+ ArrayRef<ISD::ArgFlagsTy> Flags = ArrayRef<ISD::ArgFlagsTy>(),
+ bool IsFixed = true)
+ : Ty(Ty), Flags(Flags.begin(), Flags.end()), IsFixed(IsFixed) {}
+
+ BaseArgInfo() : Ty(nullptr), IsFixed(false) {}
+ };
+
+ struct ArgInfo : public BaseArgInfo {
+ SmallVector<Register, 4> Regs;
+ // If the argument had to be split into multiple parts according to the
+ // target calling convention, then this contains the original vregs
+ // if the argument was an incoming arg.
+ SmallVector<Register, 2> OrigRegs;
+
+ ArgInfo(ArrayRef<Register> Regs, Type *Ty,
+ ArrayRef<ISD::ArgFlagsTy> Flags = ArrayRef<ISD::ArgFlagsTy>(),
+ bool IsFixed = true)
+ : BaseArgInfo(Ty, Flags, IsFixed), Regs(Regs.begin(), Regs.end()) {
+ if (!Regs.empty() && Flags.empty())
+ this->Flags.push_back(ISD::ArgFlagsTy());
+ // FIXME: We should have just one way of saying "no register".
+ assert(((Ty->isVoidTy() || Ty->isEmptyTy()) ==
+ (Regs.empty() || Regs[0] == 0)) &&
+ "only void types should have no register");
+ }
+
+ ArgInfo() : BaseArgInfo() {}
+ };
+
+ struct CallLoweringInfo {
+ /// Calling convention to be used for the call.
+ CallingConv::ID CallConv = CallingConv::C;
+
+ /// Destination of the call. It should be either a register, globaladdress,
+ /// or externalsymbol.
+ MachineOperand Callee = MachineOperand::CreateImm(0);
+
+ /// Descriptor for the return type of the function.
+ ArgInfo OrigRet;
+
+ /// List of descriptors of the arguments passed to the function.
+ SmallVector<ArgInfo, 8> OrigArgs;
+
+ /// Valid if the call has a swifterror inout parameter, and contains the
+ /// vreg that the swifterror should be copied into after the call.
+ Register SwiftErrorVReg;
+
+ MDNode *KnownCallees = nullptr;
+
+ /// True if the call must be tail call optimized.
+ bool IsMustTailCall = false;
+
+ /// True if the call passes all target-independent checks for tail call
+ /// optimization.
+ bool IsTailCall = false;
+
+ /// True if the call was lowered as a tail call. This is consumed by the
+ /// legalizer. This allows the legalizer to lower libcalls as tail calls.
+ bool LoweredTailCall = false;
+
+ /// True if the call is to a vararg function.
+ bool IsVarArg = false;
+
+ /// True if the function's return value can be lowered to registers.
+ bool CanLowerReturn = true;
+
+ /// VReg to hold the hidden sret parameter.
+ Register DemoteRegister;
+
+ /// The stack index for sret demotion.
+ int DemoteStackIndex;
+ };
+
+ /// Argument handling is mostly uniform between the four places that
+ /// make these decisions: function formal arguments, call
+ /// instruction args, call instruction returns and function
+ /// returns. However, once a decision has been made on where an
+ /// argument should go, exactly what happens can vary slightly. This
+ /// class abstracts the differences.
+ struct ValueHandler {
+ ValueHandler(bool IsIncoming, MachineIRBuilder &MIRBuilder,
+ MachineRegisterInfo &MRI, CCAssignFn *AssignFn)
+ : MIRBuilder(MIRBuilder), MRI(MRI), AssignFn(AssignFn),
+ IsIncomingArgumentHandler(IsIncoming) {}
+
+ virtual ~ValueHandler() = default;
+
+ /// Returns true if the handler is dealing with incoming arguments,
+ /// i.e. those that move values from some physical location to vregs.
+ bool isIncomingArgumentHandler() const {
+ return IsIncomingArgumentHandler;
+ }
+
+ /// Materialize a VReg containing the address of the specified
+ /// stack-based object. This is either based on a FrameIndex or
+ /// direct SP manipulation, depending on the context. \p MPO
+ /// should be initialized to an appropriate description of the
+ /// address created.
+ virtual Register getStackAddress(uint64_t Size, int64_t Offset,
+ MachinePointerInfo &MPO) = 0;
+
+ /// The specified value has been assigned to a physical register,
+ /// handle the appropriate COPY (either to or from) and mark any
+ /// relevant uses/defines as needed.
+ virtual void assignValueToReg(Register ValVReg, Register PhysReg,
+ CCValAssign &VA) = 0;
+
+ /// The specified value has been assigned to a stack
+ /// location. Load or store it there, with appropriate extension
+ /// if necessary.
+ virtual void assignValueToAddress(Register ValVReg, Register Addr,
+ uint64_t Size, MachinePointerInfo &MPO,
+ CCValAssign &VA) = 0;
+
+ /// An overload which takes an ArgInfo if additional information about
+ /// the arg is needed.
+ virtual void assignValueToAddress(const ArgInfo &Arg, Register Addr,
+ uint64_t Size, MachinePointerInfo &MPO,
+ CCValAssign &VA) {
+ assert(Arg.Regs.size() == 1);
+ assignValueToAddress(Arg.Regs[0], Addr, Size, MPO, VA);
+ }
+
+ /// Handle custom values, which may be passed into one or more of \p VAs.
+ /// \return The number of \p VAs that have been assigned after the first
+ /// one, and which should therefore be skipped from further
+ /// processing.
+ virtual unsigned assignCustomValue(const ArgInfo &Arg,
+ ArrayRef<CCValAssign> VAs) {
+ // This is not a pure virtual method because not all targets need to worry
+ // about custom values.
+ llvm_unreachable("Custom values not supported");
+ }
+
+ /// Extend a register to the location type given in VA, capped at extending
+ /// to at most MaxSize bits. If MaxSizeBits is 0 then no maximum is set.
+ Register extendRegister(Register ValReg, CCValAssign &VA,
+ unsigned MaxSizeBits = 0);
+
+ virtual bool assignArg(unsigned ValNo, MVT ValVT, MVT LocVT,
+ CCValAssign::LocInfo LocInfo, const ArgInfo &Info,
+ ISD::ArgFlagsTy Flags, CCState &State) {
+ return AssignFn(ValNo, ValVT, LocVT, LocInfo, Flags, State);
+ }
+
+ MachineIRBuilder &MIRBuilder;
+ MachineRegisterInfo &MRI;
+ CCAssignFn *AssignFn;
+
+ private:
+ bool IsIncomingArgumentHandler;
+ virtual void anchor();
+ };
+
+ struct IncomingValueHandler : public ValueHandler {
+ IncomingValueHandler(MachineIRBuilder &MIRBuilder, MachineRegisterInfo &MRI,
+ CCAssignFn *AssignFn)
+ : ValueHandler(true, MIRBuilder, MRI, AssignFn) {}
+ };
+
+ struct OutgoingValueHandler : public ValueHandler {
+ OutgoingValueHandler(MachineIRBuilder &MIRBuilder, MachineRegisterInfo &MRI,
+ CCAssignFn *AssignFn)
+ : ValueHandler(false, MIRBuilder, MRI, AssignFn) {}
+ };
+
+protected:
+ /// Getter for generic TargetLowering class.
+ const TargetLowering *getTLI() const {
+ return TLI;
+ }
+
+ /// Getter for target specific TargetLowering class.
+ template <class XXXTargetLowering>
+ const XXXTargetLowering *getTLI() const {
+ return static_cast<const XXXTargetLowering *>(TLI);
+ }
+
+ /// \returns Flags corresponding to the attributes on the \p ArgIdx-th
+ /// parameter of \p Call.
+ ISD::ArgFlagsTy getAttributesForArgIdx(const CallBase &Call,
+ unsigned ArgIdx) const;
+
+ /// Adds flags to \p Flags based off of the attributes in \p Attrs.
+ /// \p OpIdx is the index in \p Attrs to add flags from.
+ void addArgFlagsFromAttributes(ISD::ArgFlagsTy &Flags,
+ const AttributeList &Attrs,
+ unsigned OpIdx) const;
+
+ template <typename FuncInfoTy>
+ void setArgFlags(ArgInfo &Arg, unsigned OpIdx, const DataLayout &DL,
+ const FuncInfoTy &FuncInfo) const;
+
+ /// Generate instructions for packing \p SrcRegs into one big register
+ /// corresponding to the aggregate type \p PackedTy.
+ ///
+ /// \param SrcRegs should contain one virtual register for each base type in
+ /// \p PackedTy, as returned by computeValueLLTs.
+ ///
+ /// \return The packed register.
+ Register packRegs(ArrayRef<Register> SrcRegs, Type *PackedTy,
+ MachineIRBuilder &MIRBuilder) const;
+
+ /// Generate instructions for unpacking \p SrcReg into the \p DstRegs
+ /// corresponding to the aggregate type \p PackedTy.
+ ///
+ /// \param DstRegs should contain one virtual register for each base type in
+ /// \p PackedTy, as returned by computeValueLLTs.
+ void unpackRegs(ArrayRef<Register> DstRegs, Register SrcReg, Type *PackedTy,
+ MachineIRBuilder &MIRBuilder) const;
+
+ /// Invoke Handler::assignArg on each of the given \p Args and then use
+ /// \p Handler to move them to the assigned locations.
+ ///
+ /// \return True if everything has succeeded, false otherwise.
+ bool handleAssignments(MachineIRBuilder &MIRBuilder,
+ SmallVectorImpl<ArgInfo> &Args,
+ ValueHandler &Handler) const;
+ bool handleAssignments(CCState &CCState,
+ SmallVectorImpl<CCValAssign> &ArgLocs,
+ MachineIRBuilder &MIRBuilder,
+ SmallVectorImpl<ArgInfo> &Args,
+ ValueHandler &Handler) const;
+
+ /// Analyze passed or returned values from a call, supplied in \p ArgInfo,
+ /// incorporating info about the passed values into \p CCState.
+ ///
+ /// Used to check if arguments are suitable for tail call lowering.
+ bool analyzeArgInfo(CCState &CCState, SmallVectorImpl<ArgInfo> &Args,
+ CCAssignFn &AssignFnFixed,
+ CCAssignFn &AssignFnVarArg) const;
+
+ /// Check whether parameters to a call that are passed in callee saved
+ /// registers are the same as from the calling function. This needs to be
+ /// checked for tail call eligibility.
+ bool parametersInCSRMatch(const MachineRegisterInfo &MRI,
+ const uint32_t *CallerPreservedMask,
+ const SmallVectorImpl<CCValAssign> &ArgLocs,
+ const SmallVectorImpl<ArgInfo> &OutVals) const;
+
+ /// \returns True if the calling convention for a callee and its caller pass
+ /// results in the same way. Typically used for tail call eligibility checks.
+ ///
+ /// \p Info is the CallLoweringInfo for the call.
+ /// \p MF is the MachineFunction for the caller.
+ /// \p InArgs contains the results of the call.
+ /// \p CalleeAssignFnFixed is the CCAssignFn to be used for the callee for
+ /// fixed arguments.
+ /// \p CalleeAssignFnVarArg is similar, but for varargs.
+ /// \p CallerAssignFnFixed is the CCAssignFn to be used for the caller for
+ /// fixed arguments.
+ /// \p CallerAssignFnVarArg is similar, but for varargs.
+ bool resultsCompatible(CallLoweringInfo &Info, MachineFunction &MF,
+ SmallVectorImpl<ArgInfo> &InArgs,
+ CCAssignFn &CalleeAssignFnFixed,
+ CCAssignFn &CalleeAssignFnVarArg,
+ CCAssignFn &CallerAssignFnFixed,
+ CCAssignFn &CallerAssignFnVarArg) const;
+
+public:
+ CallLowering(const TargetLowering *TLI) : TLI(TLI) {}
+ virtual ~CallLowering() = default;
+
+ /// \return true if the target is capable of handling swifterror values that
+ /// have been promoted to a specified register. The extended versions of
+ /// lowerReturn and lowerCall should be implemented.
+ virtual bool supportSwiftError() const {
+ return false;
+ }
+
+ /// Load the returned value from the stack into virtual registers in \p VRegs.
+ /// It uses the frame index \p FI and the start offset from \p DemoteReg.
+ /// The loaded data size will be determined from \p RetTy.
+ void insertSRetLoads(MachineIRBuilder &MIRBuilder, Type *RetTy,
+ ArrayRef<Register> VRegs, Register DemoteReg,
+ int FI) const;
+
+ /// Store the return value given by \p VRegs into stack starting at the offset
+ /// specified in \p DemoteReg.
+ void insertSRetStores(MachineIRBuilder &MIRBuilder, Type *RetTy,
+ ArrayRef<Register> VRegs, Register DemoteReg) const;
+
+ /// Insert the hidden sret ArgInfo to the beginning of \p SplitArgs.
+ /// This function should be called from the target specific
+ /// lowerFormalArguments when \p F requires the sret demotion.
+ void insertSRetIncomingArgument(const Function &F,
+ SmallVectorImpl<ArgInfo> &SplitArgs,
+ Register &DemoteReg, MachineRegisterInfo &MRI,
+ const DataLayout &DL) const;
+
+ /// For the call-base described by \p CB, insert the hidden sret ArgInfo to
+ /// the OrigArgs field of \p Info.
+ void insertSRetOutgoingArgument(MachineIRBuilder &MIRBuilder,
+ const CallBase &CB,
+ CallLoweringInfo &Info) const;
+
+ /// \return True if the return type described by \p Outs can be returned
+ /// without performing sret demotion.
+ bool checkReturn(CCState &CCInfo, SmallVectorImpl<BaseArgInfo> &Outs,
+ CCAssignFn *Fn) const;
+
+ /// Get the type and the ArgFlags for the split components of \p RetTy as
+ /// returned by \c ComputeValueVTs.
+ void getReturnInfo(CallingConv::ID CallConv, Type *RetTy, AttributeList Attrs,
+ SmallVectorImpl<BaseArgInfo> &Outs,
+ const DataLayout &DL) const;
+
+ /// Toplevel function to check the return type based on the target calling
+ /// convention. \return True if the return value of \p MF can be returned
+ /// without performing sret demotion.
+ bool checkReturnTypeForCallConv(MachineFunction &MF) const;
+
+ /// This hook must be implemented to check whether the return values
+ /// described by \p Outs can fit into the return registers. If false
+ /// is returned, an sret-demotion is performed.
+ virtual bool canLowerReturn(MachineFunction &MF, CallingConv::ID CallConv,
+ SmallVectorImpl<BaseArgInfo> &Outs,
+ bool IsVarArg) const {
+ return true;
+ }
+
+ /// This hook must be implemented to lower outgoing return values, described
+ /// by \p Val, into the specified virtual registers \p VRegs.
+ /// This hook is used by GlobalISel.
+ ///
+ /// \p FLI is required for sret demotion.
+ ///
+ /// \p SwiftErrorVReg is non-zero if the function has a swifterror parameter
+ /// that needs to be implicitly returned.
+ ///
+ /// \return True if the lowering succeeds, false otherwise.
+ virtual bool lowerReturn(MachineIRBuilder &MIRBuilder, const Value *Val,
+ ArrayRef<Register> VRegs, FunctionLoweringInfo &FLI,
+ Register SwiftErrorVReg) const {
+ if (!supportSwiftError()) {
+ assert(SwiftErrorVReg == 0 && "attempt to use unsupported swifterror");
+ return lowerReturn(MIRBuilder, Val, VRegs, FLI);
+ }
+ return false;
+ }
+
+ /// This hook behaves as the extended lowerReturn function, but for targets
+ /// that do not support swifterror value promotion.
+ virtual bool lowerReturn(MachineIRBuilder &MIRBuilder, const Value *Val,
+ ArrayRef<Register> VRegs,
+ FunctionLoweringInfo &FLI) const {
+ return false;
+ }
+
+ virtual bool fallBackToDAGISel(const Function &F) const { return false; }
+
+ /// This hook must be implemented to lower the incoming (formal)
+ /// arguments, described by \p VRegs, for GlobalISel. Each argument
+ /// must end up in the related virtual registers described by \p VRegs.
+ /// In other words, the first argument should end up in \c VRegs[0],
+ /// the second in \c VRegs[1], and so on. For each argument, there will be one
+ /// register for each non-aggregate type, as returned by \c computeValueLLTs.
+ /// \p MIRBuilder is set to the proper insertion for the argument
+ /// lowering. \p FLI is required for sret demotion.
+ ///
+ /// \return True if the lowering succeeded, false otherwise.
+ virtual bool lowerFormalArguments(MachineIRBuilder &MIRBuilder,
+ const Function &F,
+ ArrayRef<ArrayRef<Register>> VRegs,
+ FunctionLoweringInfo &FLI) const {
+ return false;
+ }
+
+ /// This hook must be implemented to lower the given call instruction,
+ /// including argument and return value marshalling.
+ ///
+ ///
+ /// \return true if the lowering succeeded, false otherwise.
+ virtual bool lowerCall(MachineIRBuilder &MIRBuilder,
+ CallLoweringInfo &Info) const {
+ return false;
+ }
+
+ /// Lower the given call instruction, including argument and return value
+ /// marshalling.
+ ///
+ /// \p CI is the call/invoke instruction.
+ ///
+ /// \p ResRegs are the registers where the call's return value should be
+ /// stored (or 0 if there is no return value). There will be one register for
+ /// each non-aggregate type, as returned by \c computeValueLLTs.
+ ///
+ /// \p ArgRegs is a list of lists of virtual registers containing each
+ /// argument that needs to be passed (argument \c i should be placed in \c
+ /// ArgRegs[i]). For each argument, there will be one register for each
+ /// non-aggregate type, as returned by \c computeValueLLTs.
+ ///
+ /// \p SwiftErrorVReg is non-zero if the call has a swifterror inout
+ /// parameter, and contains the vreg that the swifterror should be copied into
+ /// after the call.
+ ///
+ /// \p GetCalleeReg is a callback to materialize a register for the callee if
+ /// the target determines it cannot jump to the destination based purely on \p
+ /// CI. This might be because \p CI is indirect, or because of the limited
+ /// range of an immediate jump.
+ ///
+ /// \return true if the lowering succeeded, false otherwise.
+ bool lowerCall(MachineIRBuilder &MIRBuilder, const CallBase &Call,
+ ArrayRef<Register> ResRegs,
+ ArrayRef<ArrayRef<Register>> ArgRegs, Register SwiftErrorVReg,
+ std::function<unsigned()> GetCalleeReg) const;
+};
+
+} // end namespace llvm
+
+#endif // LLVM_CODEGEN_GLOBALISEL_CALLLOWERING_H
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel/Combiner.h b/contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel/Combiner.h
new file mode 100644
index 0000000000..79feab0f8f
--- /dev/null
+++ b/contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel/Combiner.h
@@ -0,0 +1,57 @@
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//== ----- llvm/CodeGen/GlobalISel/Combiner.h -------------------*- C++ -*-== //
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+/// This contains common code to drive combines. Combiner Passes will need to
+/// setup a CombinerInfo and call combineMachineFunction.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_GLOBALISEL_COMBINER_H
+#define LLVM_CODEGEN_GLOBALISEL_COMBINER_H
+
+#include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+
+namespace llvm {
+class MachineRegisterInfo;
+class CombinerInfo;
+class GISelCSEInfo;
+class TargetPassConfig;
+class MachineFunction;
+
+class Combiner {
+public:
+ Combiner(CombinerInfo &CombinerInfo, const TargetPassConfig *TPC);
+
+ /// If CSEInfo is not null, then the Combiner will setup observer for
+ /// CSEInfo and instantiate a CSEMIRBuilder. Pass nullptr if CSE is not
+ /// needed.
+ bool combineMachineInstrs(MachineFunction &MF, GISelCSEInfo *CSEInfo);
+
+protected:
+ CombinerInfo &CInfo;
+
+ MachineRegisterInfo *MRI = nullptr;
+ const TargetPassConfig *TPC;
+ std::unique_ptr<MachineIRBuilder> Builder;
+};
+
+} // End namespace llvm.
+
+#endif // LLVM_CODEGEN_GLOBALISEL_GICOMBINER_H
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel/CombinerHelper.h b/contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel/CombinerHelper.h
new file mode 100644
index 0000000000..1d29a2ddc8
--- /dev/null
+++ b/contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel/CombinerHelper.h
@@ -0,0 +1,555 @@
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//===-- llvm/CodeGen/GlobalISel/CombinerHelper.h --------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===--------------------------------------------------------------------===//
+//
+/// This contains common combine transformations that may be used in a combine
+/// pass,or by the target elsewhere.
+/// Targets can pick individual opcode transformations from the helper or use
+/// tryCombine which invokes all transformations. All of the transformations
+/// return true if the MachineInstruction changed and false otherwise.
+//
+//===--------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_GLOBALISEL_COMBINER_HELPER_H
+#define LLVM_CODEGEN_GLOBALISEL_COMBINER_HELPER_H
+
+#include "llvm/ADT/APFloat.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/CodeGen/LowLevelType.h"
+#include "llvm/CodeGen/Register.h"
+#include "llvm/Support/Alignment.h"
+
+namespace llvm {
+
+class GISelChangeObserver;
+class MachineIRBuilder;
+class MachineInstrBuilder;
+class MachineRegisterInfo;
+class MachineInstr;
+class MachineOperand;
+class GISelKnownBits;
+class MachineDominatorTree;
+class LegalizerInfo;
+struct LegalityQuery;
+class TargetLowering;
+
+struct PreferredTuple {
+ LLT Ty; // The result type of the extend.
+ unsigned ExtendOpcode; // G_ANYEXT/G_SEXT/G_ZEXT
+ MachineInstr *MI;
+};
+
+struct IndexedLoadStoreMatchInfo {
+ Register Addr;
+ Register Base;
+ Register Offset;
+ bool IsPre;
+};
+
+struct PtrAddChain {
+ int64_t Imm;
+ Register Base;
+};
+
+struct RegisterImmPair {
+ Register Reg;
+ int64_t Imm;
+};
+
+struct ShiftOfShiftedLogic {
+ MachineInstr *Logic;
+ MachineInstr *Shift2;
+ Register LogicNonShiftReg;
+ uint64_t ValSum;
+};
+
+using OperandBuildSteps =
+ SmallVector<std::function<void(MachineInstrBuilder &)>, 4>;
+struct InstructionBuildSteps {
+ unsigned Opcode = 0; /// The opcode for the produced instruction.
+ OperandBuildSteps OperandFns; /// Operands to be added to the instruction.
+ InstructionBuildSteps() = default;
+ InstructionBuildSteps(unsigned Opcode, const OperandBuildSteps &OperandFns)
+ : Opcode(Opcode), OperandFns(OperandFns) {}
+};
+
+struct InstructionStepsMatchInfo {
+ /// Describes instructions to be built during a combine.
+ SmallVector<InstructionBuildSteps, 2> InstrsToBuild;
+ InstructionStepsMatchInfo() = default;
+ InstructionStepsMatchInfo(
+ std::initializer_list<InstructionBuildSteps> InstrsToBuild)
+ : InstrsToBuild(InstrsToBuild) {}
+};
+
+class CombinerHelper {
+protected:
+ MachineIRBuilder &Builder;
+ MachineRegisterInfo &MRI;
+ GISelChangeObserver &Observer;
+ GISelKnownBits *KB;
+ MachineDominatorTree *MDT;
+ const LegalizerInfo *LI;
+
+public:
+ CombinerHelper(GISelChangeObserver &Observer, MachineIRBuilder &B,
+ GISelKnownBits *KB = nullptr,
+ MachineDominatorTree *MDT = nullptr,
+ const LegalizerInfo *LI = nullptr);
+
+ GISelKnownBits *getKnownBits() const {
+ return KB;
+ }
+
+ const TargetLowering &getTargetLowering() const;
+
+ /// \return true if the combine is running prior to legalization, or if \p
+ /// Query is legal on the target.
+ bool isLegalOrBeforeLegalizer(const LegalityQuery &Query) const;
+
+ /// MachineRegisterInfo::replaceRegWith() and inform the observer of the changes
+ void replaceRegWith(MachineRegisterInfo &MRI, Register FromReg, Register ToReg) const;
+
+ /// Replace a single register operand with a new register and inform the
+ /// observer of the changes.
+ void replaceRegOpWith(MachineRegisterInfo &MRI, MachineOperand &FromRegOp,
+ Register ToReg) const;
+
+ /// If \p MI is COPY, try to combine it.
+ /// Returns true if MI changed.
+ bool tryCombineCopy(MachineInstr &MI);
+ bool matchCombineCopy(MachineInstr &MI);
+ void applyCombineCopy(MachineInstr &MI);
+
+ /// Returns true if \p DefMI precedes \p UseMI or they are the same
+ /// instruction. Both must be in the same basic block.
+ bool isPredecessor(const MachineInstr &DefMI, const MachineInstr &UseMI);
+
+ /// Returns true if \p DefMI dominates \p UseMI. By definition an
+ /// instruction dominates itself.
+ ///
+ /// If we haven't been provided with a MachineDominatorTree during
+ /// construction, this function returns a conservative result that tracks just
+ /// a single basic block.
+ bool dominates(const MachineInstr &DefMI, const MachineInstr &UseMI);
+
+ /// If \p MI is extend that consumes the result of a load, try to combine it.
+ /// Returns true if MI changed.
+ bool tryCombineExtendingLoads(MachineInstr &MI);
+ bool matchCombineExtendingLoads(MachineInstr &MI, PreferredTuple &MatchInfo);
+ void applyCombineExtendingLoads(MachineInstr &MI, PreferredTuple &MatchInfo);
+
+ /// Combine \p MI into a pre-indexed or post-indexed load/store operation if
+ /// legal and the surrounding code makes it useful.
+ bool tryCombineIndexedLoadStore(MachineInstr &MI);
+ bool matchCombineIndexedLoadStore(MachineInstr &MI, IndexedLoadStoreMatchInfo &MatchInfo);
+ void applyCombineIndexedLoadStore(MachineInstr &MI, IndexedLoadStoreMatchInfo &MatchInfo);
+
+ bool matchSextTruncSextLoad(MachineInstr &MI);
+ bool applySextTruncSextLoad(MachineInstr &MI);
+
+ /// Match sext_inreg(load p), imm -> sextload p
+ bool matchSextInRegOfLoad(MachineInstr &MI, std::tuple<Register, unsigned> &MatchInfo);
+ bool applySextInRegOfLoad(MachineInstr &MI, std::tuple<Register, unsigned> &MatchInfo);
+
+ /// If a brcond's true block is not the fallthrough, make it so by inverting
+ /// the condition and swapping operands.
+ bool matchOptBrCondByInvertingCond(MachineInstr &MI);
+ void applyOptBrCondByInvertingCond(MachineInstr &MI);
+
+ /// If \p MI is G_CONCAT_VECTORS, try to combine it.
+ /// Returns true if MI changed.
+ /// Right now, we support:
+ /// - concat_vector(undef, undef) => undef
+ /// - concat_vector(build_vector(A, B), build_vector(C, D)) =>
+ /// build_vector(A, B, C, D)
+ ///
+ /// \pre MI.getOpcode() == G_CONCAT_VECTORS.
+ bool tryCombineConcatVectors(MachineInstr &MI);
+ /// Check if the G_CONCAT_VECTORS \p MI is undef or if it
+ /// can be flattened into a build_vector.
+ /// In the first case \p IsUndef will be true.
+ /// In the second case \p Ops will contain the operands needed
+ /// to produce the flattened build_vector.
+ ///
+ /// \pre MI.getOpcode() == G_CONCAT_VECTORS.
+ bool matchCombineConcatVectors(MachineInstr &MI, bool &IsUndef,
+ SmallVectorImpl<Register> &Ops);
+ /// Replace \p MI with a flattened build_vector with \p Ops or an
+ /// implicit_def if IsUndef is true.
+ void applyCombineConcatVectors(MachineInstr &MI, bool IsUndef,
+ const ArrayRef<Register> Ops);
+
+ /// Try to combine G_SHUFFLE_VECTOR into G_CONCAT_VECTORS.
+ /// Returns true if MI changed.
+ ///
+ /// \pre MI.getOpcode() == G_SHUFFLE_VECTOR.
+ bool tryCombineShuffleVector(MachineInstr &MI);
+ /// Check if the G_SHUFFLE_VECTOR \p MI can be replaced by a
+ /// concat_vectors.
+ /// \p Ops will contain the operands needed to produce the flattened
+ /// concat_vectors.
+ ///
+ /// \pre MI.getOpcode() == G_SHUFFLE_VECTOR.
+ bool matchCombineShuffleVector(MachineInstr &MI,
+ SmallVectorImpl<Register> &Ops);
+ /// Replace \p MI with a concat_vectors with \p Ops.
+ void applyCombineShuffleVector(MachineInstr &MI,
+ const ArrayRef<Register> Ops);
+
+ /// Optimize memcpy intrinsics et al, e.g. constant len calls.
+ /// /p MaxLen if non-zero specifies the max length of a mem libcall to inline.
+ ///
+ /// For example (pre-indexed):
+ ///
+ /// $addr = G_PTR_ADD $base, $offset
+ /// [...]
+ /// $val = G_LOAD $addr
+ /// [...]
+ /// $whatever = COPY $addr
+ ///
+ /// -->
+ ///
+ /// $val, $addr = G_INDEXED_LOAD $base, $offset, 1 (IsPre)
+ /// [...]
+ /// $whatever = COPY $addr
+ ///
+ /// or (post-indexed):
+ ///
+ /// G_STORE $val, $base
+ /// [...]
+ /// $addr = G_PTR_ADD $base, $offset
+ /// [...]
+ /// $whatever = COPY $addr
+ ///
+ /// -->
+ ///
+ /// $addr = G_INDEXED_STORE $val, $base, $offset
+ /// [...]
+ /// $whatever = COPY $addr
+ bool tryCombineMemCpyFamily(MachineInstr &MI, unsigned MaxLen = 0);
+
+ bool matchPtrAddImmedChain(MachineInstr &MI, PtrAddChain &MatchInfo);
+ bool applyPtrAddImmedChain(MachineInstr &MI, PtrAddChain &MatchInfo);
+
+ /// Fold (shift (shift base, x), y) -> (shift base (x+y))
+ bool matchShiftImmedChain(MachineInstr &MI, RegisterImmPair &MatchInfo);
+ bool applyShiftImmedChain(MachineInstr &MI, RegisterImmPair &MatchInfo);
+
+ /// If we have a shift-by-constant of a bitwise logic op that itself has a
+ /// shift-by-constant operand with identical opcode, we may be able to convert
+ /// that into 2 independent shifts followed by the logic op.
+ bool matchShiftOfShiftedLogic(MachineInstr &MI,
+ ShiftOfShiftedLogic &MatchInfo);
+ bool applyShiftOfShiftedLogic(MachineInstr &MI,
+ ShiftOfShiftedLogic &MatchInfo);
+
+ /// Transform a multiply by a power-of-2 value to a left shift.
+ bool matchCombineMulToShl(MachineInstr &MI, unsigned &ShiftVal);
+ bool applyCombineMulToShl(MachineInstr &MI, unsigned &ShiftVal);
+
+ // Transform a G_SHL with an extended source into a narrower shift if
+ // possible.
+ bool matchCombineShlOfExtend(MachineInstr &MI, RegisterImmPair &MatchData);
+ bool applyCombineShlOfExtend(MachineInstr &MI,
+ const RegisterImmPair &MatchData);
+
+ /// Reduce a shift by a constant to an unmerge and a shift on a half sized
+ /// type. This will not produce a shift smaller than \p TargetShiftSize.
+ bool matchCombineShiftToUnmerge(MachineInstr &MI, unsigned TargetShiftSize,
+ unsigned &ShiftVal);
+ bool applyCombineShiftToUnmerge(MachineInstr &MI, const unsigned &ShiftVal);
+ bool tryCombineShiftToUnmerge(MachineInstr &MI, unsigned TargetShiftAmount);
+
+ /// Transform <ty,...> G_UNMERGE(G_MERGE ty X, Y, Z) -> ty X, Y, Z.
+ bool
+ matchCombineUnmergeMergeToPlainValues(MachineInstr &MI,
+ SmallVectorImpl<Register> &Operands);
+ bool
+ applyCombineUnmergeMergeToPlainValues(MachineInstr &MI,
+ SmallVectorImpl<Register> &Operands);
+
+ /// Transform G_UNMERGE Constant -> Constant1, Constant2, ...
+ bool matchCombineUnmergeConstant(MachineInstr &MI,
+ SmallVectorImpl<APInt> &Csts);
+ bool applyCombineUnmergeConstant(MachineInstr &MI,
+ SmallVectorImpl<APInt> &Csts);
+
+ /// Transform X, Y<dead> = G_UNMERGE Z -> X = G_TRUNC Z.
+ bool matchCombineUnmergeWithDeadLanesToTrunc(MachineInstr &MI);
+ bool applyCombineUnmergeWithDeadLanesToTrunc(MachineInstr &MI);
+
+ /// Transform X, Y = G_UNMERGE(G_ZEXT(Z)) -> X = G_ZEXT(Z); Y = G_CONSTANT 0
+ bool matchCombineUnmergeZExtToZExt(MachineInstr &MI);
+ bool applyCombineUnmergeZExtToZExt(MachineInstr &MI);
+
+ /// Transform fp_instr(cst) to constant result of the fp operation.
+ bool matchCombineConstantFoldFpUnary(MachineInstr &MI,
+ Optional<APFloat> &Cst);
+ bool applyCombineConstantFoldFpUnary(MachineInstr &MI,
+ Optional<APFloat> &Cst);
+
+ /// Transform IntToPtr(PtrToInt(x)) to x if cast is in the same address space.
+ bool matchCombineI2PToP2I(MachineInstr &MI, Register &Reg);
+ bool applyCombineI2PToP2I(MachineInstr &MI, Register &Reg);
+
+ /// Transform PtrToInt(IntToPtr(x)) to x.
+ bool matchCombineP2IToI2P(MachineInstr &MI, Register &Reg);
+ bool applyCombineP2IToI2P(MachineInstr &MI, Register &Reg);
+
+ /// Transform G_ADD (G_PTRTOINT x), y -> G_PTRTOINT (G_PTR_ADD x, y)
+ /// Transform G_ADD y, (G_PTRTOINT x) -> G_PTRTOINT (G_PTR_ADD x, y)
+ bool matchCombineAddP2IToPtrAdd(MachineInstr &MI,
+ std::pair<Register, bool> &PtrRegAndCommute);
+ bool applyCombineAddP2IToPtrAdd(MachineInstr &MI,
+ std::pair<Register, bool> &PtrRegAndCommute);
+
+ // Transform G_PTR_ADD (G_PTRTOINT C1), C2 -> C1 + C2
+ bool matchCombineConstPtrAddToI2P(MachineInstr &MI, int64_t &NewCst);
+ bool applyCombineConstPtrAddToI2P(MachineInstr &MI, int64_t &NewCst);
+
+ /// Transform anyext(trunc(x)) to x.
+ bool matchCombineAnyExtTrunc(MachineInstr &MI, Register &Reg);
+ bool applyCombineAnyExtTrunc(MachineInstr &MI, Register &Reg);
+
+ /// Transform [asz]ext([asz]ext(x)) to [asz]ext x.
+ bool matchCombineExtOfExt(MachineInstr &MI,
+ std::tuple<Register, unsigned> &MatchInfo);
+ bool applyCombineExtOfExt(MachineInstr &MI,
+ std::tuple<Register, unsigned> &MatchInfo);
+
+ /// Transform fneg(fneg(x)) to x.
+ bool matchCombineFNegOfFNeg(MachineInstr &MI, Register &Reg);
+
+ /// Match fabs(fabs(x)) to fabs(x).
+ bool matchCombineFAbsOfFAbs(MachineInstr &MI, Register &Src);
+ bool applyCombineFAbsOfFAbs(MachineInstr &MI, Register &Src);
+
+ /// Transform trunc ([asz]ext x) to x or ([asz]ext x) or (trunc x).
+ bool matchCombineTruncOfExt(MachineInstr &MI,
+ std::pair<Register, unsigned> &MatchInfo);
+ bool applyCombineTruncOfExt(MachineInstr &MI,
+ std::pair<Register, unsigned> &MatchInfo);
+
+ /// Transform trunc (shl x, K) to shl (trunc x),
+ /// K => K < VT.getScalarSizeInBits().
+ bool matchCombineTruncOfShl(MachineInstr &MI,
+ std::pair<Register, Register> &MatchInfo);
+ bool applyCombineTruncOfShl(MachineInstr &MI,
+ std::pair<Register, Register> &MatchInfo);
+
+ /// Transform G_MUL(x, -1) to G_SUB(0, x)
+ bool applyCombineMulByNegativeOne(MachineInstr &MI);
+
+ /// Return true if any explicit use operand on \p MI is defined by a
+ /// G_IMPLICIT_DEF.
+ bool matchAnyExplicitUseIsUndef(MachineInstr &MI);
+
+ /// Return true if all register explicit use operands on \p MI are defined by
+ /// a G_IMPLICIT_DEF.
+ bool matchAllExplicitUsesAreUndef(MachineInstr &MI);
+
+ /// Return true if a G_SHUFFLE_VECTOR instruction \p MI has an undef mask.
+ bool matchUndefShuffleVectorMask(MachineInstr &MI);
+
+ /// Return true if a G_STORE instruction \p MI is storing an undef value.
+ bool matchUndefStore(MachineInstr &MI);
+
+ /// Return true if a G_SELECT instruction \p MI has an undef comparison.
+ bool matchUndefSelectCmp(MachineInstr &MI);
+
+ /// Return true if a G_SELECT instruction \p MI has a constant comparison. If
+ /// true, \p OpIdx will store the operand index of the known selected value.
+ bool matchConstantSelectCmp(MachineInstr &MI, unsigned &OpIdx);
+
+ /// Replace an instruction with a G_FCONSTANT with value \p C.
+ bool replaceInstWithFConstant(MachineInstr &MI, double C);
+
+ /// Replace an instruction with a G_CONSTANT with value \p C.
+ bool replaceInstWithConstant(MachineInstr &MI, int64_t C);
+
+ /// Replace an instruction with a G_IMPLICIT_DEF.
+ bool replaceInstWithUndef(MachineInstr &MI);
+
+ /// Delete \p MI and replace all of its uses with its \p OpIdx-th operand.
+ bool replaceSingleDefInstWithOperand(MachineInstr &MI, unsigned OpIdx);
+
+ /// Delete \p MI and replace all of its uses with \p Replacement.
+ bool replaceSingleDefInstWithReg(MachineInstr &MI, Register Replacement);
+
+ /// Return true if \p MOP1 and \p MOP2 are register operands are defined by
+ /// equivalent instructions.
+ bool matchEqualDefs(const MachineOperand &MOP1, const MachineOperand &MOP2);
+
+ /// Return true if \p MOP is defined by a G_CONSTANT with a value equal to
+ /// \p C.
+ bool matchConstantOp(const MachineOperand &MOP, int64_t C);
+
+ /// Optimize (cond ? x : x) -> x
+ bool matchSelectSameVal(MachineInstr &MI);
+
+ /// Optimize (x op x) -> x
+ bool matchBinOpSameVal(MachineInstr &MI);
+
+ /// Check if operand \p OpIdx is zero.
+ bool matchOperandIsZero(MachineInstr &MI, unsigned OpIdx);
+
+ /// Check if operand \p OpIdx is undef.
+ bool matchOperandIsUndef(MachineInstr &MI, unsigned OpIdx);
+
+ /// Check if operand \p OpIdx is known to be a power of 2.
+ bool matchOperandIsKnownToBeAPowerOfTwo(MachineInstr &MI, unsigned OpIdx);
+
+ /// Erase \p MI
+ bool eraseInst(MachineInstr &MI);
+
+ /// Return true if MI is a G_ADD which can be simplified to a G_SUB.
+ bool matchSimplifyAddToSub(MachineInstr &MI,
+ std::tuple<Register, Register> &MatchInfo);
+ bool applySimplifyAddToSub(MachineInstr &MI,
+ std::tuple<Register, Register> &MatchInfo);
+
+ /// Match (logic_op (op x...), (op y...)) -> (op (logic_op x, y))
+ bool
+ matchHoistLogicOpWithSameOpcodeHands(MachineInstr &MI,
+ InstructionStepsMatchInfo &MatchInfo);
+
+ /// Replace \p MI with a series of instructions described in \p MatchInfo.
+ bool applyBuildInstructionSteps(MachineInstr &MI,
+ InstructionStepsMatchInfo &MatchInfo);
+
+ /// Match ashr (shl x, C), C -> sext_inreg (C)
+ bool matchAshrShlToSextInreg(MachineInstr &MI,
+ std::tuple<Register, int64_t> &MatchInfo);
+ bool applyAshShlToSextInreg(MachineInstr &MI,
+ std::tuple<Register, int64_t> &MatchInfo);
+ /// \return true if \p MI is a G_AND instruction whose operands are x and y
+ /// where x & y == x or x & y == y. (E.g., one of operands is all-ones value.)
+ ///
+ /// \param [in] MI - The G_AND instruction.
+ /// \param [out] Replacement - A register the G_AND should be replaced with on
+ /// success.
+ bool matchRedundantAnd(MachineInstr &MI, Register &Replacement);
+
+ /// \return true if \p MI is a G_OR instruction whose operands are x and y
+ /// where x | y == x or x | y == y. (E.g., one of operands is all-zeros
+ /// value.)
+ ///
+ /// \param [in] MI - The G_OR instruction.
+ /// \param [out] Replacement - A register the G_OR should be replaced with on
+ /// success.
+ bool matchRedundantOr(MachineInstr &MI, Register &Replacement);
+
+ /// \return true if \p MI is a G_SEXT_INREG that can be erased.
+ bool matchRedundantSExtInReg(MachineInstr &MI);
+
+ /// Combine inverting a result of a compare into the opposite cond code.
+ bool matchNotCmp(MachineInstr &MI, SmallVectorImpl<Register> &RegsToNegate);
+ bool applyNotCmp(MachineInstr &MI, SmallVectorImpl<Register> &RegsToNegate);
+
+ /// Fold (xor (and x, y), y) -> (and (not x), y)
+ ///{
+ bool matchXorOfAndWithSameReg(MachineInstr &MI,
+ std::pair<Register, Register> &MatchInfo);
+ bool applyXorOfAndWithSameReg(MachineInstr &MI,
+ std::pair<Register, Register> &MatchInfo);
+ ///}
+
+ /// Combine G_PTR_ADD with nullptr to G_INTTOPTR
+ bool matchPtrAddZero(MachineInstr &MI);
+ bool applyPtrAddZero(MachineInstr &MI);
+
+ /// Combine G_UREM x, (known power of 2) to an add and bitmasking.
+ bool applySimplifyURemByPow2(MachineInstr &MI);
+
+ bool matchCombineInsertVecElts(MachineInstr &MI,
+ SmallVectorImpl<Register> &MatchInfo);
+
+ bool applyCombineInsertVecElts(MachineInstr &MI,
+ SmallVectorImpl<Register> &MatchInfo);
+
+ /// Match expression trees of the form
+ ///
+ /// \code
+ /// sN *a = ...
+ /// sM val = a[0] | (a[1] << N) | (a[2] << 2N) | (a[3] << 3N) ...
+ /// \endcode
+ ///
+ /// And check if the tree can be replaced with a M-bit load + possibly a
+ /// bswap.
+ bool matchLoadOrCombine(MachineInstr &MI,
+ std::function<void(MachineIRBuilder &)> &MatchInfo);
+ bool applyLoadOrCombine(MachineInstr &MI,
+ std::function<void(MachineIRBuilder &)> &MatchInfo);
+
+ /// Try to transform \p MI by using all of the above
+ /// combine functions. Returns true if changed.
+ bool tryCombine(MachineInstr &MI);
+
+private:
+ // Memcpy family optimization helpers.
+ bool optimizeMemcpy(MachineInstr &MI, Register Dst, Register Src,
+ unsigned KnownLen, Align DstAlign, Align SrcAlign,
+ bool IsVolatile);
+ bool optimizeMemmove(MachineInstr &MI, Register Dst, Register Src,
+ unsigned KnownLen, Align DstAlign, Align SrcAlign,
+ bool IsVolatile);
+ bool optimizeMemset(MachineInstr &MI, Register Dst, Register Val,
+ unsigned KnownLen, Align DstAlign, bool IsVolatile);
+
+ /// Given a non-indexed load or store instruction \p MI, find an offset that
+ /// can be usefully and legally folded into it as a post-indexing operation.
+ ///
+ /// \returns true if a candidate is found.
+ bool findPostIndexCandidate(MachineInstr &MI, Register &Addr, Register &Base,
+ Register &Offset);
+
+ /// Given a non-indexed load or store instruction \p MI, find an offset that
+ /// can be usefully and legally folded into it as a pre-indexing operation.
+ ///
+ /// \returns true if a candidate is found.
+ bool findPreIndexCandidate(MachineInstr &MI, Register &Addr, Register &Base,
+ Register &Offset);
+
+ /// Helper function for matchLoadOrCombine. Searches for Registers
+ /// which may have been produced by a load instruction + some arithmetic.
+ ///
+ /// \param [in] Root - The search root.
+ ///
+ /// \returns The Registers found during the search.
+ Optional<SmallVector<Register, 8>>
+ findCandidatesForLoadOrCombine(const MachineInstr *Root) const;
+
+ /// Helper function for matchLoadOrCombine.
+ ///
+ /// Checks if every register in \p RegsToVisit is defined by a load
+ /// instruction + some arithmetic.
+ ///
+ /// \param [out] MemOffset2Idx - Maps the byte positions each load ends up
+ /// at to the index of the load.
+ /// \param [in] MemSizeInBits - The number of bits each load should produce.
+ ///
+ /// \returns The lowest-index load found and the lowest index on success.
+ Optional<std::pair<MachineInstr *, int64_t>> findLoadOffsetsForLoadOrCombine(
+ SmallDenseMap<int64_t, int64_t, 8> &MemOffset2Idx,
+ const SmallVector<Register, 8> &RegsToVisit,
+ const unsigned MemSizeInBits);
+};
+} // namespace llvm
+
+#endif
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel/CombinerInfo.h b/contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel/CombinerInfo.h
new file mode 100644
index 0000000000..8b9b79a88b
--- /dev/null
+++ b/contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel/CombinerInfo.h
@@ -0,0 +1,83 @@
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//===- llvm/CodeGen/GlobalISel/CombinerInfo.h ------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+/// Interface for Targets to specify which operations are combined how and when.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_GLOBALISEL_COMBINER_INFO_H
+#define LLVM_CODEGEN_GLOBALISEL_COMBINER_INFO_H
+
+#include <cassert>
+namespace llvm {
+
+class GISelChangeObserver;
+class LegalizerInfo;
+class MachineInstr;
+class MachineIRBuilder;
+class MachineRegisterInfo;
+
+// Contains information relevant to enabling/disabling various combines for a
+// pass.
+class CombinerInfo {
+public:
+ CombinerInfo(bool AllowIllegalOps, bool ShouldLegalizeIllegal,
+ const LegalizerInfo *LInfo, bool OptEnabled, bool OptSize,
+ bool MinSize)
+ : IllegalOpsAllowed(AllowIllegalOps),
+ LegalizeIllegalOps(ShouldLegalizeIllegal), LInfo(LInfo),
+ EnableOpt(OptEnabled), EnableOptSize(OptSize), EnableMinSize(MinSize) {
+ assert(((AllowIllegalOps || !LegalizeIllegalOps) || LInfo) &&
+ "Expecting legalizerInfo when illegalops not allowed");
+ }
+ virtual ~CombinerInfo() = default;
+ /// If \p IllegalOpsAllowed is false, the CombinerHelper will make use of
+ /// the legalizerInfo to check for legality before each transformation.
+ bool IllegalOpsAllowed; // TODO: Make use of this.
+
+ /// If \p LegalizeIllegalOps is true, the Combiner will also legalize the
+ /// illegal ops that are created.
+ bool LegalizeIllegalOps; // TODO: Make use of this.
+ const LegalizerInfo *LInfo;
+
+ /// Whether optimizations should be enabled. This is to distinguish between
+ /// uses of the combiner unconditionally and only when optimizations are
+ /// specifically enabled/
+ bool EnableOpt;
+ /// Whether we're optimizing for size.
+ bool EnableOptSize;
+ /// Whether we're optimizing for minsize (-Oz).
+ bool EnableMinSize;
+
+ /// Attempt to combine instructions using MI as the root.
+ ///
+ /// Use Observer to report the creation, modification, and erasure of
+ /// instructions. GISelChangeObserver will automatically report certain
+ /// kinds of operations. These operations are:
+ /// * Instructions that are newly inserted into the MachineFunction
+ /// * Instructions that are erased from the MachineFunction.
+ ///
+ /// However, it is important to report instruction modification and this is
+ /// not automatic.
+ virtual bool combine(GISelChangeObserver &Observer, MachineInstr &MI,
+ MachineIRBuilder &B) const = 0;
+};
+} // namespace llvm
+
+#endif
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel/ConstantFoldingMIRBuilder.h b/contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel/ConstantFoldingMIRBuilder.h
new file mode 100644
index 0000000000..149c5eb7c4
--- /dev/null
+++ b/contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel/ConstantFoldingMIRBuilder.h
@@ -0,0 +1,83 @@
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//===-- llvm/CodeGen/GlobalISel/ConstantFoldingMIRBuilder.h --*- C++ -*-==//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+/// \file
+/// This file implements a version of MachineIRBuilder which does trivial
+/// constant folding.
+//===----------------------------------------------------------------------===//
+#include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
+#include "llvm/CodeGen/GlobalISel/Utils.h"
+
+namespace llvm {
+
+/// An MIRBuilder which does trivial constant folding of binary ops.
+/// Calls to buildInstr will also try to constant fold binary ops.
+class ConstantFoldingMIRBuilder : public MachineIRBuilder {
+public:
+ // Pull in base class constructors.
+ using MachineIRBuilder::MachineIRBuilder;
+
+ virtual ~ConstantFoldingMIRBuilder() = default;
+
+ // Try to provide an overload for buildInstr for binary ops in order to
+ // constant fold.
+ MachineInstrBuilder buildInstr(unsigned Opc, ArrayRef<DstOp> DstOps,
+ ArrayRef<SrcOp> SrcOps,
+ Optional<unsigned> Flags = None) override {
+ switch (Opc) {
+ default:
+ break;
+ case TargetOpcode::G_ADD:
+ case TargetOpcode::G_AND:
+ case TargetOpcode::G_ASHR:
+ case TargetOpcode::G_LSHR:
+ case TargetOpcode::G_MUL:
+ case TargetOpcode::G_OR:
+ case TargetOpcode::G_SHL:
+ case TargetOpcode::G_SUB:
+ case TargetOpcode::G_XOR:
+ case TargetOpcode::G_UDIV:
+ case TargetOpcode::G_SDIV:
+ case TargetOpcode::G_UREM:
+ case TargetOpcode::G_SREM: {
+ assert(DstOps.size() == 1 && "Invalid dst ops");
+ assert(SrcOps.size() == 2 && "Invalid src ops");
+ const DstOp &Dst = DstOps[0];
+ const SrcOp &Src0 = SrcOps[0];
+ const SrcOp &Src1 = SrcOps[1];
+ if (auto MaybeCst =
+ ConstantFoldBinOp(Opc, Src0.getReg(), Src1.getReg(), *getMRI()))
+ return buildConstant(Dst, MaybeCst->getSExtValue());
+ break;
+ }
+ case TargetOpcode::G_SEXT_INREG: {
+ assert(DstOps.size() == 1 && "Invalid dst ops");
+ assert(SrcOps.size() == 2 && "Invalid src ops");
+ const DstOp &Dst = DstOps[0];
+ const SrcOp &Src0 = SrcOps[0];
+ const SrcOp &Src1 = SrcOps[1];
+ if (auto MaybeCst =
+ ConstantFoldExtOp(Opc, Src0.getReg(), Src1.getImm(), *getMRI()))
+ return buildConstant(Dst, MaybeCst->getSExtValue());
+ break;
+ }
+ }
+ return MachineIRBuilder::buildInstr(Opc, DstOps, SrcOps);
+ }
+};
+} // namespace llvm
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel/GISelChangeObserver.h b/contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel/GISelChangeObserver.h
new file mode 100644
index 0000000000..0833e960fe
--- /dev/null
+++ b/contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel/GISelChangeObserver.h
@@ -0,0 +1,150 @@
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//===----- llvm/CodeGen/GlobalISel/GISelChangeObserver.h --------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+/// This contains common code to allow clients to notify changes to machine
+/// instr.
+//
+//===----------------------------------------------------------------------===//
+#ifndef LLVM_CODEGEN_GLOBALISEL_GISELCHANGEOBSERVER_H
+#define LLVM_CODEGEN_GLOBALISEL_GISELCHANGEOBSERVER_H
+
+#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/CodeGen/MachineFunction.h"
+
+namespace llvm {
+class MachineInstr;
+class MachineRegisterInfo;
+
+/// Abstract class that contains various methods for clients to notify about
+/// changes. This should be the preferred way for APIs to notify changes.
+/// Typically calling erasingInstr/createdInstr multiple times should not affect
+/// the result. The observer would likely need to check if it was already
+/// notified earlier (consider using GISelWorkList).
+class GISelChangeObserver {
+ SmallPtrSet<MachineInstr *, 4> ChangingAllUsesOfReg;
+
+public:
+ virtual ~GISelChangeObserver() {}
+
+ /// An instruction is about to be erased.
+ virtual void erasingInstr(MachineInstr &MI) = 0;
+
+ /// An instruction has been created and inserted into the function.
+ /// Note that the instruction might not be a fully fledged instruction at this
+ /// point and won't be if the MachineFunction::Delegate is calling it. This is
+ /// because the delegate only sees the construction of the MachineInstr before
+ /// operands have been added.
+ virtual void createdInstr(MachineInstr &MI) = 0;
+
+ /// This instruction is about to be mutated in some way.
+ virtual void changingInstr(MachineInstr &MI) = 0;
+
+ /// This instruction was mutated in some way.
+ virtual void changedInstr(MachineInstr &MI) = 0;
+
+ /// All the instructions using the given register are being changed.
+ /// For convenience, finishedChangingAllUsesOfReg() will report the completion
+ /// of the changes. The use list may change between this call and
+ /// finishedChangingAllUsesOfReg().
+ void changingAllUsesOfReg(const MachineRegisterInfo &MRI, Register Reg);
+ /// All instructions reported as changing by changingAllUsesOfReg() have
+ /// finished being changed.
+ void finishedChangingAllUsesOfReg();
+
+};
+
+/// Simple wrapper observer that takes several observers, and calls
+/// each one for each event. If there are multiple observers (say CSE,
+/// Legalizer, Combiner), it's sufficient to register this to the machine
+/// function as the delegate.
+class GISelObserverWrapper : public MachineFunction::Delegate,
+ public GISelChangeObserver {
+ SmallVector<GISelChangeObserver *, 4> Observers;
+
+public:
+ GISelObserverWrapper() = default;
+ GISelObserverWrapper(ArrayRef<GISelChangeObserver *> Obs)
+ : Observers(Obs.begin(), Obs.end()) {}
+ // Adds an observer.
+ void addObserver(GISelChangeObserver *O) { Observers.push_back(O); }
+ // Removes an observer from the list and does nothing if observer is not
+ // present.
+ void removeObserver(GISelChangeObserver *O) {
+ auto It = std::find(Observers.begin(), Observers.end(), O);
+ if (It != Observers.end())
+ Observers.erase(It);
+ }
+ // API for Observer.
+ void erasingInstr(MachineInstr &MI) override {
+ for (auto &O : Observers)
+ O->erasingInstr(MI);
+ }
+ void createdInstr(MachineInstr &MI) override {
+ for (auto &O : Observers)
+ O->createdInstr(MI);
+ }
+ void changingInstr(MachineInstr &MI) override {
+ for (auto &O : Observers)
+ O->changingInstr(MI);
+ }
+ void changedInstr(MachineInstr &MI) override {
+ for (auto &O : Observers)
+ O->changedInstr(MI);
+ }
+ // API for MachineFunction::Delegate
+ void MF_HandleInsertion(MachineInstr &MI) override { createdInstr(MI); }
+ void MF_HandleRemoval(MachineInstr &MI) override { erasingInstr(MI); }
+};
+
+/// A simple RAII based Delegate installer.
+/// Use this in a scope to install a delegate to the MachineFunction and reset
+/// it at the end of the scope.
+class RAIIDelegateInstaller {
+ MachineFunction &MF;
+ MachineFunction::Delegate *Delegate;
+
+public:
+ RAIIDelegateInstaller(MachineFunction &MF, MachineFunction::Delegate *Del);
+ ~RAIIDelegateInstaller();
+};
+
+/// A simple RAII based Observer installer.
+/// Use this in a scope to install the Observer to the MachineFunction and reset
+/// it at the end of the scope.
+class RAIIMFObserverInstaller {
+ MachineFunction &MF;
+
+public:
+ RAIIMFObserverInstaller(MachineFunction &MF, GISelChangeObserver &Observer);
+ ~RAIIMFObserverInstaller();
+};
+
+/// Class to install both of the above.
+class RAIIMFObsDelInstaller {
+ RAIIDelegateInstaller DelI;
+ RAIIMFObserverInstaller ObsI;
+
+public:
+ RAIIMFObsDelInstaller(MachineFunction &MF, GISelObserverWrapper &Wrapper)
+ : DelI(MF, &Wrapper), ObsI(MF, Wrapper) {}
+ ~RAIIMFObsDelInstaller() = default;
+};
+
+} // namespace llvm
+#endif
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel/GISelKnownBits.h b/contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel/GISelKnownBits.h
new file mode 100644
index 0000000000..452ddd17c0
--- /dev/null
+++ b/contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel/GISelKnownBits.h
@@ -0,0 +1,142 @@
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//===- llvm/CodeGen/GlobalISel/GISelKnownBits.h ---------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+/// Provides analysis for querying information about KnownBits during GISel
+/// passes.
+//
+//===----------------------------------------------------------------------===//
+#ifndef LLVM_CODEGEN_GLOBALISEL_KNOWNBITSINFO_H
+#define LLVM_CODEGEN_GLOBALISEL_KNOWNBITSINFO_H
+
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/CodeGen/GlobalISel/GISelChangeObserver.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/CodeGen/Register.h"
+#include "llvm/InitializePasses.h"
+#include "llvm/Support/KnownBits.h"
+
+namespace llvm {
+
+class TargetLowering;
+class DataLayout;
+
+class GISelKnownBits : public GISelChangeObserver {
+ MachineFunction &MF;
+ MachineRegisterInfo &MRI;
+ const TargetLowering &TL;
+ const DataLayout &DL;
+ unsigned MaxDepth;
+ /// Cache maintained during a computeKnownBits request.
+ SmallDenseMap<Register, KnownBits, 16> ComputeKnownBitsCache;
+
+ void computeKnownBitsMin(Register Src0, Register Src1, KnownBits &Known,
+ const APInt &DemandedElts,
+ unsigned Depth = 0);
+
+ unsigned computeNumSignBitsMin(Register Src0, Register Src1,
+ const APInt &DemandedElts, unsigned Depth = 0);
+
+public:
+ GISelKnownBits(MachineFunction &MF, unsigned MaxDepth = 6);
+ virtual ~GISelKnownBits() = default;
+
+ const MachineFunction &getMachineFunction() const {
+ return MF;
+ }
+
+ const DataLayout &getDataLayout() const {
+ return DL;
+ }
+
+ virtual void computeKnownBitsImpl(Register R, KnownBits &Known,
+ const APInt &DemandedElts,
+ unsigned Depth = 0);
+
+ unsigned computeNumSignBits(Register R, const APInt &DemandedElts,
+ unsigned Depth = 0);
+ unsigned computeNumSignBits(Register R, unsigned Depth = 0);
+
+ // KnownBitsAPI
+ KnownBits getKnownBits(Register R);
+ KnownBits getKnownBits(Register R, const APInt &DemandedElts,
+ unsigned Depth = 0);
+
+ // Calls getKnownBits for first operand def of MI.
+ KnownBits getKnownBits(MachineInstr &MI);
+ APInt getKnownZeroes(Register R);
+ APInt getKnownOnes(Register R);
+
+ /// \return true if 'V & Mask' is known to be zero in DemandedElts. We use
+ /// this predicate to simplify operations downstream.
+ /// Mask is known to be zero for bits that V cannot have.
+ bool maskedValueIsZero(Register Val, const APInt &Mask) {
+ return Mask.isSubsetOf(getKnownBits(Val).Zero);
+ }
+
+ /// \return true if the sign bit of Op is known to be zero. We use this
+ /// predicate to simplify operations downstream.
+ bool signBitIsZero(Register Op);
+
+ static void computeKnownBitsForAlignment(KnownBits &Known,
+ Align Alignment) {
+ // The low bits are known zero if the pointer is aligned.
+ Known.Zero.setLowBits(Log2(Alignment));
+ }
+
+ /// \return The known alignment for the pointer-like value \p R.
+ Align computeKnownAlignment(Register R, unsigned Depth = 0);
+
+ // Observer API. No-op for non-caching implementation.
+ void erasingInstr(MachineInstr &MI) override{};
+ void createdInstr(MachineInstr &MI) override{};
+ void changingInstr(MachineInstr &MI) override{};
+ void changedInstr(MachineInstr &MI) override{};
+
+protected:
+ unsigned getMaxDepth() const { return MaxDepth; }
+};
+
+/// To use KnownBitsInfo analysis in a pass,
+/// KnownBitsInfo &Info = getAnalysis<GISelKnownBitsInfoAnalysis>().get(MF);
+/// Add to observer if the Info is caching.
+/// WrapperObserver.addObserver(Info);
+
+/// Eventually add other features such as caching/ser/deserializing
+/// to MIR etc. Those implementations can derive from GISelKnownBits
+/// and override computeKnownBitsImpl.
+class GISelKnownBitsAnalysis : public MachineFunctionPass {
+ std::unique_ptr<GISelKnownBits> Info;
+
+public:
+ static char ID;
+ GISelKnownBitsAnalysis() : MachineFunctionPass(ID) {
+ initializeGISelKnownBitsAnalysisPass(*PassRegistry::getPassRegistry());
+ }
+ GISelKnownBits &get(MachineFunction &MF) {
+ if (!Info)
+ Info = std::make_unique<GISelKnownBits>(MF);
+ return *Info.get();
+ }
+ void getAnalysisUsage(AnalysisUsage &AU) const override;
+ bool runOnMachineFunction(MachineFunction &MF) override;
+ void releaseMemory() override { Info.reset(); }
+};
+} // namespace llvm
+
+#endif // ifdef
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel/GISelWorkList.h b/contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel/GISelWorkList.h
new file mode 100644
index 0000000000..1507a64b05
--- /dev/null
+++ b/contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel/GISelWorkList.h
@@ -0,0 +1,124 @@
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//===- GISelWorkList.h - Worklist for GISel passes ----*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_GISEL_WORKLIST_H
+#define LLVM_GISEL_WORKLIST_H
+
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/SmallVector.h"
+
+namespace llvm {
+
+class MachineInstr;
+class MachineFunction;
+
+// Worklist which mostly works similar to InstCombineWorkList, but on
+// MachineInstrs. The main difference with something like a SetVector is that
+// erasing an element doesn't move all elements over one place - instead just
+// nulls out the element of the vector.
+//
+// FIXME: Does it make sense to factor out common code with the
+// instcombinerWorkList?
+template<unsigned N>
+class GISelWorkList {
+ SmallVector<MachineInstr *, N> Worklist;
+ DenseMap<MachineInstr *, unsigned> WorklistMap;
+
+#ifndef NDEBUG
+ bool Finalized = true;
+#endif
+
+public:
+ GISelWorkList() : WorklistMap(N) {}
+
+ bool empty() const { return WorklistMap.empty(); }
+
+ unsigned size() const { return WorklistMap.size(); }
+
+ // Since we don't know ahead of time how many instructions we're going to add
+ // to the worklist, and migrating densemap's elements is quite expensive
+ // everytime we resize, only insert to the smallvector (typically during the
+ // initial phase of populating lists). Before the worklist can be used,
+ // finalize should be called. Also assert with NDEBUG if list is ever used
+ // without finalizing. Note that unlike insert, we won't check for duplicates
+ // - so the ideal place to use this is during the initial prepopulating phase
+ // of most passes.
+ void deferred_insert(MachineInstr *I) {
+ Worklist.push_back(I);
+#ifndef NDEBUG
+ Finalized = false;
+#endif
+ }
+
+ // This should only be called when using deferred_insert.
+ // This asserts that the WorklistMap is empty, and then
+ // inserts all the elements in the Worklist into the map.
+ // It also asserts if there are any duplicate elements found.
+ void finalize() {
+ assert(WorklistMap.empty() && "Expecting empty worklistmap");
+ if (Worklist.size() > N)
+ WorklistMap.reserve(Worklist.size());
+ for (unsigned i = 0; i < Worklist.size(); ++i)
+ if (!WorklistMap.try_emplace(Worklist[i], i).second)
+ llvm_unreachable("Duplicate elements in the list");
+#ifndef NDEBUG
+ Finalized = true;
+#endif
+ }
+
+ /// Add the specified instruction to the worklist if it isn't already in it.
+ void insert(MachineInstr *I) {
+ assert(Finalized && "GISelWorkList used without finalizing");
+ if (WorklistMap.try_emplace(I, Worklist.size()).second)
+ Worklist.push_back(I);
+ }
+
+ /// Remove I from the worklist if it exists.
+ void remove(const MachineInstr *I) {
+ assert((Finalized || WorklistMap.empty()) && "Neither finalized nor empty");
+ auto It = WorklistMap.find(I);
+ if (It == WorklistMap.end())
+ return; // Not in worklist.
+
+ // Don't bother moving everything down, just null out the slot.
+ Worklist[It->second] = nullptr;
+
+ WorklistMap.erase(It);
+ }
+
+ void clear() {
+ Worklist.clear();
+ WorklistMap.clear();
+ }
+
+ MachineInstr *pop_back_val() {
+ assert(Finalized && "GISelWorkList used without finalizing");
+ MachineInstr *I;
+ do {
+ I = Worklist.pop_back_val();
+ } while(!I);
+ assert(I && "Pop back on empty worklist");
+ WorklistMap.erase(I);
+ return I;
+ }
+};
+
+} // end namespace llvm.
+
+#endif
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel/IRTranslator.h b/contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel/IRTranslator.h
new file mode 100644
index 0000000000..6c1ac8e115
--- /dev/null
+++ b/contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel/IRTranslator.h
@@ -0,0 +1,714 @@
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//===- llvm/CodeGen/GlobalISel/IRTranslator.h - IRTranslator ----*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+/// \file
+/// This file declares the IRTranslator pass.
+/// This pass is responsible for translating LLVM IR into MachineInstr.
+/// It uses target hooks to lower the ABI but aside from that, the pass
+/// generated code is generic. This is the default translator used for
+/// GlobalISel.
+///
+/// \todo Replace the comments with actual doxygen comments.
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_GLOBALISEL_IRTRANSLATOR_H
+#define LLVM_CODEGEN_GLOBALISEL_IRTRANSLATOR_H
+
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/CodeGen/FunctionLoweringInfo.h"
+#include "llvm/CodeGen/GlobalISel/CSEMIRBuilder.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/CodeGen/SwiftErrorValueTracking.h"
+#include "llvm/CodeGen/SwitchLoweringUtils.h"
+#include "llvm/IR/Intrinsics.h"
+#include "llvm/Support/Allocator.h"
+#include "llvm/Support/CodeGen.h"
+#include <memory>
+#include <utility>
+
+namespace llvm {
+
+class AllocaInst;
+class BasicBlock;
+class CallInst;
+class CallLowering;
+class Constant;
+class ConstrainedFPIntrinsic;
+class DataLayout;
+class Instruction;
+class MachineBasicBlock;
+class MachineFunction;
+class MachineInstr;
+class MachineRegisterInfo;
+class OptimizationRemarkEmitter;
+class PHINode;
+class TargetPassConfig;
+class User;
+class Value;
+
+// Technically the pass should run on an hypothetical MachineModule,
+// since it should translate Global into some sort of MachineGlobal.
+// The MachineGlobal should ultimately just be a transfer of ownership of
+// the interesting bits that are relevant to represent a global value.
+// That being said, we could investigate what would it cost to just duplicate
+// the information from the LLVM IR.
+// The idea is that ultimately we would be able to free up the memory used
+// by the LLVM IR as soon as the translation is over.
+class IRTranslator : public MachineFunctionPass {
+public:
+ static char ID;
+
+private:
+ /// Interface used to lower the everything related to calls.
+ const CallLowering *CLI;
+
+ /// This class contains the mapping between the Values to vreg related data.
+ class ValueToVRegInfo {
+ public:
+ ValueToVRegInfo() = default;
+
+ using VRegListT = SmallVector<Register, 1>;
+ using OffsetListT = SmallVector<uint64_t, 1>;
+
+ using const_vreg_iterator =
+ DenseMap<const Value *, VRegListT *>::const_iterator;
+ using const_offset_iterator =
+ DenseMap<const Value *, OffsetListT *>::const_iterator;
+
+ inline const_vreg_iterator vregs_end() const { return ValToVRegs.end(); }
+
+ VRegListT *getVRegs(const Value &V) {
+ auto It = ValToVRegs.find(&V);
+ if (It != ValToVRegs.end())
+ return It->second;
+
+ return insertVRegs(V);
+ }
+
+ OffsetListT *getOffsets(const Value &V) {
+ auto It = TypeToOffsets.find(V.getType());
+ if (It != TypeToOffsets.end())
+ return It->second;
+
+ return insertOffsets(V);
+ }
+
+ const_vreg_iterator findVRegs(const Value &V) const {
+ return ValToVRegs.find(&V);
+ }
+
+ bool contains(const Value &V) const {
+ return ValToVRegs.find(&V) != ValToVRegs.end();
+ }
+
+ void reset() {
+ ValToVRegs.clear();
+ TypeToOffsets.clear();
+ VRegAlloc.DestroyAll();
+ OffsetAlloc.DestroyAll();
+ }
+
+ private:
+ VRegListT *insertVRegs(const Value &V) {
+ assert(ValToVRegs.find(&V) == ValToVRegs.end() && "Value already exists");
+
+ // We placement new using our fast allocator since we never try to free
+ // the vectors until translation is finished.
+ auto *VRegList = new (VRegAlloc.Allocate()) VRegListT();
+ ValToVRegs[&V] = VRegList;
+ return VRegList;
+ }
+
+ OffsetListT *insertOffsets(const Value &V) {
+ assert(TypeToOffsets.find(V.getType()) == TypeToOffsets.end() &&
+ "Type already exists");
+
+ auto *OffsetList = new (OffsetAlloc.Allocate()) OffsetListT();
+ TypeToOffsets[V.getType()] = OffsetList;
+ return OffsetList;
+ }
+ SpecificBumpPtrAllocator<VRegListT> VRegAlloc;
+ SpecificBumpPtrAllocator<OffsetListT> OffsetAlloc;
+
+ // We store pointers to vectors here since references may be invalidated
+ // while we hold them if we stored the vectors directly.
+ DenseMap<const Value *, VRegListT*> ValToVRegs;
+ DenseMap<const Type *, OffsetListT*> TypeToOffsets;
+ };
+
+ /// Mapping of the values of the current LLVM IR function to the related
+ /// virtual registers and offsets.
+ ValueToVRegInfo VMap;
+
+ // N.b. it's not completely obvious that this will be sufficient for every
+ // LLVM IR construct (with "invoke" being the obvious candidate to mess up our
+ // lives.
+ DenseMap<const BasicBlock *, MachineBasicBlock *> BBToMBB;
+
+ // One BasicBlock can be translated to multiple MachineBasicBlocks. For such
+ // BasicBlocks translated to multiple MachineBasicBlocks, MachinePreds retains
+ // a mapping between the edges arriving at the BasicBlock to the corresponding
+ // created MachineBasicBlocks. Some BasicBlocks that get translated to a
+ // single MachineBasicBlock may also end up in this Map.
+ using CFGEdge = std::pair<const BasicBlock *, const BasicBlock *>;
+ DenseMap<CFGEdge, SmallVector<MachineBasicBlock *, 1>> MachinePreds;
+
+ // List of stubbed PHI instructions, for values and basic blocks to be filled
+ // in once all MachineBasicBlocks have been created.
+ SmallVector<std::pair<const PHINode *, SmallVector<MachineInstr *, 1>>, 4>
+ PendingPHIs;
+
+ /// Record of what frame index has been allocated to specified allocas for
+ /// this function.
+ DenseMap<const AllocaInst *, int> FrameIndices;
+
+ SwiftErrorValueTracking SwiftError;
+
+ /// \name Methods for translating form LLVM IR to MachineInstr.
+ /// \see ::translate for general information on the translate methods.
+ /// @{
+
+ /// Translate \p Inst into its corresponding MachineInstr instruction(s).
+ /// Insert the newly translated instruction(s) right where the CurBuilder
+ /// is set.
+ ///
+ /// The general algorithm is:
+ /// 1. Look for a virtual register for each operand or
+ /// create one.
+ /// 2 Update the VMap accordingly.
+ /// 2.alt. For constant arguments, if they are compile time constants,
+ /// produce an immediate in the right operand and do not touch
+ /// ValToReg. Actually we will go with a virtual register for each
+ /// constants because it may be expensive to actually materialize the
+ /// constant. Moreover, if the constant spans on several instructions,
+ /// CSE may not catch them.
+ /// => Update ValToVReg and remember that we saw a constant in Constants.
+ /// We will materialize all the constants in finalize.
+ /// Note: we would need to do something so that we can recognize such operand
+ /// as constants.
+ /// 3. Create the generic instruction.
+ ///
+ /// \return true if the translation succeeded.
+ bool translate(const Instruction &Inst);
+
+ /// Materialize \p C into virtual-register \p Reg. The generic instructions
+ /// performing this materialization will be inserted into the entry block of
+ /// the function.
+ ///
+ /// \return true if the materialization succeeded.
+ bool translate(const Constant &C, Register Reg);
+
+ // Translate U as a copy of V.
+ bool translateCopy(const User &U, const Value &V,
+ MachineIRBuilder &MIRBuilder);
+
+ /// Translate an LLVM bitcast into generic IR. Either a COPY or a G_BITCAST is
+ /// emitted.
+ bool translateBitCast(const User &U, MachineIRBuilder &MIRBuilder);
+
+ /// Translate an LLVM load instruction into generic IR.
+ bool translateLoad(const User &U, MachineIRBuilder &MIRBuilder);
+
+ /// Translate an LLVM store instruction into generic IR.
+ bool translateStore(const User &U, MachineIRBuilder &MIRBuilder);
+
+ /// Translate an LLVM string intrinsic (memcpy, memset, ...).
+ bool translateMemFunc(const CallInst &CI, MachineIRBuilder &MIRBuilder,
+ unsigned Opcode);
+
+ void getStackGuard(Register DstReg, MachineIRBuilder &MIRBuilder);
+
+ bool translateOverflowIntrinsic(const CallInst &CI, unsigned Op,
+ MachineIRBuilder &MIRBuilder);
+ bool translateFixedPointIntrinsic(unsigned Op, const CallInst &CI,
+ MachineIRBuilder &MIRBuilder);
+
+ /// Helper function for translateSimpleIntrinsic.
+ /// \return The generic opcode for \p IntrinsicID if \p IntrinsicID is a
+ /// simple intrinsic (ceil, fabs, etc.). Otherwise, returns
+ /// Intrinsic::not_intrinsic.
+ unsigned getSimpleIntrinsicOpcode(Intrinsic::ID ID);
+
+ /// Translates the intrinsics defined in getSimpleIntrinsicOpcode.
+ /// \return true if the translation succeeded.
+ bool translateSimpleIntrinsic(const CallInst &CI, Intrinsic::ID ID,
+ MachineIRBuilder &MIRBuilder);
+
+ bool translateConstrainedFPIntrinsic(const ConstrainedFPIntrinsic &FPI,
+ MachineIRBuilder &MIRBuilder);
+
+ bool translateKnownIntrinsic(const CallInst &CI, Intrinsic::ID ID,
+ MachineIRBuilder &MIRBuilder);
+
+ bool translateInlineAsm(const CallBase &CB, MachineIRBuilder &MIRBuilder);
+
+ /// Returns true if the value should be split into multiple LLTs.
+ /// If \p Offsets is given then the split type's offsets will be stored in it.
+ /// If \p Offsets is not empty it will be cleared first.
+ bool valueIsSplit(const Value &V,
+ SmallVectorImpl<uint64_t> *Offsets = nullptr);
+
+ /// Common code for translating normal calls or invokes.
+ bool translateCallBase(const CallBase &CB, MachineIRBuilder &MIRBuilder);
+
+ /// Translate call instruction.
+ /// \pre \p U is a call instruction.
+ bool translateCall(const User &U, MachineIRBuilder &MIRBuilder);
+
+ /// When an invoke or a cleanupret unwinds to the next EH pad, there are
+ /// many places it could ultimately go. In the IR, we have a single unwind
+ /// destination, but in the machine CFG, we enumerate all the possible blocks.
+ /// This function skips over imaginary basic blocks that hold catchswitch
+ /// instructions, and finds all the "real" machine
+ /// basic block destinations. As those destinations may not be successors of
+ /// EHPadBB, here we also calculate the edge probability to those
+ /// destinations. The passed-in Prob is the edge probability to EHPadBB.
+ bool findUnwindDestinations(
+ const BasicBlock *EHPadBB, BranchProbability Prob,
+ SmallVectorImpl<std::pair<MachineBasicBlock *, BranchProbability>>
+ &UnwindDests);
+
+ bool translateInvoke(const User &U, MachineIRBuilder &MIRBuilder);
+
+ bool translateCallBr(const User &U, MachineIRBuilder &MIRBuilder);
+
+ bool translateLandingPad(const User &U, MachineIRBuilder &MIRBuilder);
+
+ /// Translate one of LLVM's cast instructions into MachineInstrs, with the
+ /// given generic Opcode.
+ bool translateCast(unsigned Opcode, const User &U,
+ MachineIRBuilder &MIRBuilder);
+
+ /// Translate a phi instruction.
+ bool translatePHI(const User &U, MachineIRBuilder &MIRBuilder);
+
+ /// Translate a comparison (icmp or fcmp) instruction or constant.
+ bool translateCompare(const User &U, MachineIRBuilder &MIRBuilder);
+
+ /// Translate an integer compare instruction (or constant).
+ bool translateICmp(const User &U, MachineIRBuilder &MIRBuilder) {
+ return translateCompare(U, MIRBuilder);
+ }
+
+ /// Translate a floating-point compare instruction (or constant).
+ bool translateFCmp(const User &U, MachineIRBuilder &MIRBuilder) {
+ return translateCompare(U, MIRBuilder);
+ }
+
+ /// Add remaining operands onto phis we've translated. Executed after all
+ /// MachineBasicBlocks for the function have been created.
+ void finishPendingPhis();
+
+ /// Translate \p Inst into a unary operation \p Opcode.
+ /// \pre \p U is a unary operation.
+ bool translateUnaryOp(unsigned Opcode, const User &U,
+ MachineIRBuilder &MIRBuilder);
+
+ /// Translate \p Inst into a binary operation \p Opcode.
+ /// \pre \p U is a binary operation.
+ bool translateBinaryOp(unsigned Opcode, const User &U,
+ MachineIRBuilder &MIRBuilder);
+
+ /// If the set of cases should be emitted as a series of branches, return
+ /// true. If we should emit this as a bunch of and/or'd together conditions,
+ /// return false.
+ bool shouldEmitAsBranches(const std::vector<SwitchCG::CaseBlock> &Cases);
+ /// Helper method for findMergedConditions.
+ /// This function emits a branch and is used at the leaves of an OR or an
+ /// AND operator tree.
+ void emitBranchForMergedCondition(const Value *Cond, MachineBasicBlock *TBB,
+ MachineBasicBlock *FBB,
+ MachineBasicBlock *CurBB,
+ MachineBasicBlock *SwitchBB,
+ BranchProbability TProb,
+ BranchProbability FProb, bool InvertCond);
+ /// Used during condbr translation to find trees of conditions that can be
+ /// optimized.
+ void findMergedConditions(const Value *Cond, MachineBasicBlock *TBB,
+ MachineBasicBlock *FBB, MachineBasicBlock *CurBB,
+ MachineBasicBlock *SwitchBB,
+ Instruction::BinaryOps Opc, BranchProbability TProb,
+ BranchProbability FProb, bool InvertCond);
+
+ /// Translate branch (br) instruction.
+ /// \pre \p U is a branch instruction.
+ bool translateBr(const User &U, MachineIRBuilder &MIRBuilder);
+
+ // Begin switch lowering functions.
+ bool emitJumpTableHeader(SwitchCG::JumpTable &JT,
+ SwitchCG::JumpTableHeader &JTH,
+ MachineBasicBlock *HeaderBB);
+ void emitJumpTable(SwitchCG::JumpTable &JT, MachineBasicBlock *MBB);
+
+ void emitSwitchCase(SwitchCG::CaseBlock &CB, MachineBasicBlock *SwitchBB,
+ MachineIRBuilder &MIB);
+
+ /// Generate for for the BitTest header block, which precedes each sequence of
+ /// BitTestCases.
+ void emitBitTestHeader(SwitchCG::BitTestBlock &BTB,
+ MachineBasicBlock *SwitchMBB);
+ /// Generate code to produces one "bit test" for a given BitTestCase \p B.
+ void emitBitTestCase(SwitchCG::BitTestBlock &BB, MachineBasicBlock *NextMBB,
+ BranchProbability BranchProbToNext, Register Reg,
+ SwitchCG::BitTestCase &B, MachineBasicBlock *SwitchBB);
+
+ bool lowerJumpTableWorkItem(
+ SwitchCG::SwitchWorkListItem W, MachineBasicBlock *SwitchMBB,
+ MachineBasicBlock *CurMBB, MachineBasicBlock *DefaultMBB,
+ MachineIRBuilder &MIB, MachineFunction::iterator BBI,
+ BranchProbability UnhandledProbs, SwitchCG::CaseClusterIt I,
+ MachineBasicBlock *Fallthrough, bool FallthroughUnreachable);
+
+ bool lowerSwitchRangeWorkItem(SwitchCG::CaseClusterIt I, Value *Cond,
+ MachineBasicBlock *Fallthrough,
+ bool FallthroughUnreachable,
+ BranchProbability UnhandledProbs,
+ MachineBasicBlock *CurMBB,
+ MachineIRBuilder &MIB,
+ MachineBasicBlock *SwitchMBB);
+
+ bool lowerBitTestWorkItem(
+ SwitchCG::SwitchWorkListItem W, MachineBasicBlock *SwitchMBB,
+ MachineBasicBlock *CurMBB, MachineBasicBlock *DefaultMBB,
+ MachineIRBuilder &MIB, MachineFunction::iterator BBI,
+ BranchProbability DefaultProb, BranchProbability UnhandledProbs,
+ SwitchCG::CaseClusterIt I, MachineBasicBlock *Fallthrough,
+ bool FallthroughUnreachable);
+
+ bool lowerSwitchWorkItem(SwitchCG::SwitchWorkListItem W, Value *Cond,
+ MachineBasicBlock *SwitchMBB,
+ MachineBasicBlock *DefaultMBB,
+ MachineIRBuilder &MIB);
+
+ bool translateSwitch(const User &U, MachineIRBuilder &MIRBuilder);
+ // End switch lowering section.
+
+ bool translateIndirectBr(const User &U, MachineIRBuilder &MIRBuilder);
+
+ bool translateExtractValue(const User &U, MachineIRBuilder &MIRBuilder);
+
+ bool translateInsertValue(const User &U, MachineIRBuilder &MIRBuilder);
+
+ bool translateSelect(const User &U, MachineIRBuilder &MIRBuilder);
+
+ bool translateGetElementPtr(const User &U, MachineIRBuilder &MIRBuilder);
+
+ bool translateAlloca(const User &U, MachineIRBuilder &MIRBuilder);
+
+ /// Translate return (ret) instruction.
+ /// The target needs to implement CallLowering::lowerReturn for
+ /// this to succeed.
+ /// \pre \p U is a return instruction.
+ bool translateRet(const User &U, MachineIRBuilder &MIRBuilder);
+
+ bool translateFNeg(const User &U, MachineIRBuilder &MIRBuilder);
+
+ bool translateAdd(const User &U, MachineIRBuilder &MIRBuilder) {
+ return translateBinaryOp(TargetOpcode::G_ADD, U, MIRBuilder);
+ }
+ bool translateSub(const User &U, MachineIRBuilder &MIRBuilder) {
+ return translateBinaryOp(TargetOpcode::G_SUB, U, MIRBuilder);
+ }
+ bool translateAnd(const User &U, MachineIRBuilder &MIRBuilder) {
+ return translateBinaryOp(TargetOpcode::G_AND, U, MIRBuilder);
+ }
+ bool translateMul(const User &U, MachineIRBuilder &MIRBuilder) {
+ return translateBinaryOp(TargetOpcode::G_MUL, U, MIRBuilder);
+ }
+ bool translateOr(const User &U, MachineIRBuilder &MIRBuilder) {
+ return translateBinaryOp(TargetOpcode::G_OR, U, MIRBuilder);
+ }
+ bool translateXor(const User &U, MachineIRBuilder &MIRBuilder) {
+ return translateBinaryOp(TargetOpcode::G_XOR, U, MIRBuilder);
+ }
+
+ bool translateUDiv(const User &U, MachineIRBuilder &MIRBuilder) {
+ return translateBinaryOp(TargetOpcode::G_UDIV, U, MIRBuilder);
+ }
+ bool translateSDiv(const User &U, MachineIRBuilder &MIRBuilder) {
+ return translateBinaryOp(TargetOpcode::G_SDIV, U, MIRBuilder);
+ }
+ bool translateURem(const User &U, MachineIRBuilder &MIRBuilder) {
+ return translateBinaryOp(TargetOpcode::G_UREM, U, MIRBuilder);
+ }
+ bool translateSRem(const User &U, MachineIRBuilder &MIRBuilder) {
+ return translateBinaryOp(TargetOpcode::G_SREM, U, MIRBuilder);
+ }
+ bool translateIntToPtr(const User &U, MachineIRBuilder &MIRBuilder) {
+ return translateCast(TargetOpcode::G_INTTOPTR, U, MIRBuilder);
+ }
+ bool translatePtrToInt(const User &U, MachineIRBuilder &MIRBuilder) {
+ return translateCast(TargetOpcode::G_PTRTOINT, U, MIRBuilder);
+ }
+ bool translateTrunc(const User &U, MachineIRBuilder &MIRBuilder) {
+ return translateCast(TargetOpcode::G_TRUNC, U, MIRBuilder);
+ }
+ bool translateFPTrunc(const User &U, MachineIRBuilder &MIRBuilder) {
+ return translateCast(TargetOpcode::G_FPTRUNC, U, MIRBuilder);
+ }
+ bool translateFPExt(const User &U, MachineIRBuilder &MIRBuilder) {
+ return translateCast(TargetOpcode::G_FPEXT, U, MIRBuilder);
+ }
+ bool translateFPToUI(const User &U, MachineIRBuilder &MIRBuilder) {
+ return translateCast(TargetOpcode::G_FPTOUI, U, MIRBuilder);
+ }
+ bool translateFPToSI(const User &U, MachineIRBuilder &MIRBuilder) {
+ return translateCast(TargetOpcode::G_FPTOSI, U, MIRBuilder);
+ }
+ bool translateUIToFP(const User &U, MachineIRBuilder &MIRBuilder) {
+ return translateCast(TargetOpcode::G_UITOFP, U, MIRBuilder);
+ }
+ bool translateSIToFP(const User &U, MachineIRBuilder &MIRBuilder) {
+ return translateCast(TargetOpcode::G_SITOFP, U, MIRBuilder);
+ }
+ bool translateUnreachable(const User &U, MachineIRBuilder &MIRBuilder) {
+ return true;
+ }
+ bool translateSExt(const User &U, MachineIRBuilder &MIRBuilder) {
+ return translateCast(TargetOpcode::G_SEXT, U, MIRBuilder);
+ }
+
+ bool translateZExt(const User &U, MachineIRBuilder &MIRBuilder) {
+ return translateCast(TargetOpcode::G_ZEXT, U, MIRBuilder);
+ }
+
+ bool translateShl(const User &U, MachineIRBuilder &MIRBuilder) {
+ return translateBinaryOp(TargetOpcode::G_SHL, U, MIRBuilder);
+ }
+ bool translateLShr(const User &U, MachineIRBuilder &MIRBuilder) {
+ return translateBinaryOp(TargetOpcode::G_LSHR, U, MIRBuilder);
+ }
+ bool translateAShr(const User &U, MachineIRBuilder &MIRBuilder) {
+ return translateBinaryOp(TargetOpcode::G_ASHR, U, MIRBuilder);
+ }
+
+ bool translateFAdd(const User &U, MachineIRBuilder &MIRBuilder) {
+ return translateBinaryOp(TargetOpcode::G_FADD, U, MIRBuilder);
+ }
+ bool translateFSub(const User &U, MachineIRBuilder &MIRBuilder) {
+ return translateBinaryOp(TargetOpcode::G_FSUB, U, MIRBuilder);
+ }
+ bool translateFMul(const User &U, MachineIRBuilder &MIRBuilder) {
+ return translateBinaryOp(TargetOpcode::G_FMUL, U, MIRBuilder);
+ }
+ bool translateFDiv(const User &U, MachineIRBuilder &MIRBuilder) {
+ return translateBinaryOp(TargetOpcode::G_FDIV, U, MIRBuilder);
+ }
+ bool translateFRem(const User &U, MachineIRBuilder &MIRBuilder) {
+ return translateBinaryOp(TargetOpcode::G_FREM, U, MIRBuilder);
+ }
+
+ bool translateVAArg(const User &U, MachineIRBuilder &MIRBuilder);
+
+ bool translateInsertElement(const User &U, MachineIRBuilder &MIRBuilder);
+
+ bool translateExtractElement(const User &U, MachineIRBuilder &MIRBuilder);
+
+ bool translateShuffleVector(const User &U, MachineIRBuilder &MIRBuilder);
+
+ bool translateAtomicCmpXchg(const User &U, MachineIRBuilder &MIRBuilder);
+ bool translateAtomicRMW(const User &U, MachineIRBuilder &MIRBuilder);
+ bool translateFence(const User &U, MachineIRBuilder &MIRBuilder);
+ bool translateFreeze(const User &U, MachineIRBuilder &MIRBuilder);
+
+ // Stubs to keep the compiler happy while we implement the rest of the
+ // translation.
+ bool translateResume(const User &U, MachineIRBuilder &MIRBuilder) {
+ return false;
+ }
+ bool translateCleanupRet(const User &U, MachineIRBuilder &MIRBuilder) {
+ return false;
+ }
+ bool translateCatchRet(const User &U, MachineIRBuilder &MIRBuilder) {
+ return false;
+ }
+ bool translateCatchSwitch(const User &U, MachineIRBuilder &MIRBuilder) {
+ return false;
+ }
+ bool translateAddrSpaceCast(const User &U, MachineIRBuilder &MIRBuilder) {
+ return translateCast(TargetOpcode::G_ADDRSPACE_CAST, U, MIRBuilder);
+ }
+ bool translateCleanupPad(const User &U, MachineIRBuilder &MIRBuilder) {
+ return false;
+ }
+ bool translateCatchPad(const User &U, MachineIRBuilder &MIRBuilder) {
+ return false;
+ }
+ bool translateUserOp1(const User &U, MachineIRBuilder &MIRBuilder) {
+ return false;
+ }
+ bool translateUserOp2(const User &U, MachineIRBuilder &MIRBuilder) {
+ return false;
+ }
+
+ /// @}
+
+ // Builder for machine instruction a la IRBuilder.
+ // I.e., compared to regular MIBuilder, this one also inserts the instruction
+ // in the current block, it can creates block, etc., basically a kind of
+ // IRBuilder, but for Machine IR.
+ // CSEMIRBuilder CurBuilder;
+ std::unique_ptr<MachineIRBuilder> CurBuilder;
+
+ // Builder set to the entry block (just after ABI lowering instructions). Used
+ // as a convenient location for Constants.
+ // CSEMIRBuilder EntryBuilder;
+ std::unique_ptr<MachineIRBuilder> EntryBuilder;
+
+ // The MachineFunction currently being translated.
+ MachineFunction *MF;
+
+ /// MachineRegisterInfo used to create virtual registers.
+ MachineRegisterInfo *MRI = nullptr;
+
+ const DataLayout *DL;
+
+ /// Current target configuration. Controls how the pass handles errors.
+ const TargetPassConfig *TPC;
+
+ CodeGenOpt::Level OptLevel;
+
+ /// Current optimization remark emitter. Used to report failures.
+ std::unique_ptr<OptimizationRemarkEmitter> ORE;
+
+ FunctionLoweringInfo FuncInfo;
+
+ // True when either the Target Machine specifies no optimizations or the
+ // function has the optnone attribute.
+ bool EnableOpts = false;
+
+ /// True when the block contains a tail call. This allows the IRTranslator to
+ /// stop translating such blocks early.
+ bool HasTailCall = false;
+
+ /// Switch analysis and optimization.
+ class GISelSwitchLowering : public SwitchCG::SwitchLowering {
+ public:
+ GISelSwitchLowering(IRTranslator *irt, FunctionLoweringInfo &funcinfo)
+ : SwitchLowering(funcinfo), IRT(irt) {
+ assert(irt && "irt is null!");
+ }
+
+ virtual void addSuccessorWithProb(
+ MachineBasicBlock *Src, MachineBasicBlock *Dst,
+ BranchProbability Prob = BranchProbability::getUnknown()) override {
+ IRT->addSuccessorWithProb(Src, Dst, Prob);
+ }
+
+ virtual ~GISelSwitchLowering() = default;
+
+ private:
+ IRTranslator *IRT;
+ };
+
+ std::unique_ptr<GISelSwitchLowering> SL;
+
+ // * Insert all the code needed to materialize the constants
+ // at the proper place. E.g., Entry block or dominator block
+ // of each constant depending on how fancy we want to be.
+ // * Clear the different maps.
+ void finalizeFunction();
+
+ // Handle emitting jump tables for each basic block.
+ void finalizeBasicBlock();
+
+ /// Get the VRegs that represent \p Val.
+ /// Non-aggregate types have just one corresponding VReg and the list can be
+ /// used as a single "unsigned". Aggregates get flattened. If such VRegs do
+ /// not exist, they are created.
+ ArrayRef<Register> getOrCreateVRegs(const Value &Val);
+
+ Register getOrCreateVReg(const Value &Val) {
+ auto Regs = getOrCreateVRegs(Val);
+ if (Regs.empty())
+ return 0;
+ assert(Regs.size() == 1 &&
+ "attempt to get single VReg for aggregate or void");
+ return Regs[0];
+ }
+
+ /// Allocate some vregs and offsets in the VMap. Then populate just the
+ /// offsets while leaving the vregs empty.
+ ValueToVRegInfo::VRegListT &allocateVRegs(const Value &Val);
+
+ /// Get the frame index that represents \p Val.
+ /// If such VReg does not exist, it is created.
+ int getOrCreateFrameIndex(const AllocaInst &AI);
+
+ /// Get the alignment of the given memory operation instruction. This will
+ /// either be the explicitly specified value or the ABI-required alignment for
+ /// the type being accessed (according to the Module's DataLayout).
+ Align getMemOpAlign(const Instruction &I);
+
+ /// Get the MachineBasicBlock that represents \p BB. Specifically, the block
+ /// returned will be the head of the translated block (suitable for branch
+ /// destinations).
+ MachineBasicBlock &getMBB(const BasicBlock &BB);
+
+ /// Record \p NewPred as a Machine predecessor to `Edge.second`, corresponding
+ /// to `Edge.first` at the IR level. This is used when IRTranslation creates
+ /// multiple MachineBasicBlocks for a given IR block and the CFG is no longer
+ /// represented simply by the IR-level CFG.
+ void addMachineCFGPred(CFGEdge Edge, MachineBasicBlock *NewPred);
+
+ /// Returns the Machine IR predecessors for the given IR CFG edge. Usually
+ /// this is just the single MachineBasicBlock corresponding to the predecessor
+ /// in the IR. More complex lowering can result in multiple MachineBasicBlocks
+ /// preceding the original though (e.g. switch instructions).
+ SmallVector<MachineBasicBlock *, 1> getMachinePredBBs(CFGEdge Edge) {
+ auto RemappedEdge = MachinePreds.find(Edge);
+ if (RemappedEdge != MachinePreds.end())
+ return RemappedEdge->second;
+ return SmallVector<MachineBasicBlock *, 4>(1, &getMBB(*Edge.first));
+ }
+
+ /// Return branch probability calculated by BranchProbabilityInfo for IR
+ /// blocks.
+ BranchProbability getEdgeProbability(const MachineBasicBlock *Src,
+ const MachineBasicBlock *Dst) const;
+
+ void addSuccessorWithProb(
+ MachineBasicBlock *Src, MachineBasicBlock *Dst,
+ BranchProbability Prob = BranchProbability::getUnknown());
+
+public:
+ IRTranslator(CodeGenOpt::Level OptLevel = CodeGenOpt::None);
+
+ StringRef getPassName() const override { return "IRTranslator"; }
+
+ void getAnalysisUsage(AnalysisUsage &AU) const override;
+
+ // Algo:
+ // CallLowering = MF.subtarget.getCallLowering()
+ // F = MF.getParent()
+ // MIRBuilder.reset(MF)
+ // getMBB(F.getEntryBB())
+ // CallLowering->translateArguments(MIRBuilder, F, ValToVReg)
+ // for each bb in F
+ // getMBB(bb)
+ // for each inst in bb
+ // if (!translate(MIRBuilder, inst, ValToVReg, ConstantToSequence))
+ // report_fatal_error("Don't know how to translate input");
+ // finalize()
+ bool runOnMachineFunction(MachineFunction &MF) override;
+};
+
+} // end namespace llvm
+
+#endif // LLVM_CODEGEN_GLOBALISEL_IRTRANSLATOR_H
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel/InlineAsmLowering.h b/contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel/InlineAsmLowering.h
new file mode 100644
index 0000000000..99c36ac51d
--- /dev/null
+++ b/contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel/InlineAsmLowering.h
@@ -0,0 +1,78 @@
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//===- llvm/CodeGen/GlobalISel/InlineAsmLowering.h --------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This file describes how to lower LLVM inline asm to machine code INLINEASM.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_GLOBALISEL_INLINEASMLOWERING_H
+#define LLVM_CODEGEN_GLOBALISEL_INLINEASMLOWERING_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include <functional>
+
+namespace llvm {
+class CallBase;
+class MachineIRBuilder;
+class MachineOperand;
+class Register;
+class TargetLowering;
+class Value;
+
+class InlineAsmLowering {
+ const TargetLowering *TLI;
+
+ virtual void anchor();
+
+public:
+ /// Lower the given inline asm call instruction
+ /// \p GetOrCreateVRegs is a callback to materialize a register for the
+ /// input and output operands of the inline asm
+ /// \return True if the lowering succeeds, false otherwise.
+ bool lowerInlineAsm(MachineIRBuilder &MIRBuilder, const CallBase &CB,
+ std::function<ArrayRef<Register>(const Value &Val)>
+ GetOrCreateVRegs) const;
+
+ /// Lower the specified operand into the Ops vector.
+ /// \p Val is the IR input value to be lowered
+ /// \p Constraint is the user supplied constraint string
+ /// \p Ops is the vector to be filled with the lowered operands
+ /// \return True if the lowering succeeds, false otherwise.
+ virtual bool lowerAsmOperandForConstraint(Value *Val, StringRef Constraint,
+ std::vector<MachineOperand> &Ops,
+ MachineIRBuilder &MIRBuilder) const;
+
+protected:
+ /// Getter for generic TargetLowering class.
+ const TargetLowering *getTLI() const { return TLI; }
+
+ /// Getter for target specific TargetLowering class.
+ template <class XXXTargetLowering> const XXXTargetLowering *getTLI() const {
+ return static_cast<const XXXTargetLowering *>(TLI);
+ }
+
+public:
+ InlineAsmLowering(const TargetLowering *TLI) : TLI(TLI) {}
+ virtual ~InlineAsmLowering() = default;
+};
+
+} // end namespace llvm
+
+#endif // LLVM_CODEGEN_GLOBALISEL_INLINEASMLOWERING_H
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel/InstructionSelect.h b/contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel/InstructionSelect.h
new file mode 100644
index 0000000000..9117a0bf36
--- /dev/null
+++ b/contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel/InstructionSelect.h
@@ -0,0 +1,63 @@
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//== llvm/CodeGen/GlobalISel/InstructionSelect.h -----------------*- C++ -*-==//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+/// \file This file describes the interface of the MachineFunctionPass
+/// responsible for selecting (possibly generic) machine instructions to
+/// target-specific instructions.
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_GLOBALISEL_INSTRUCTIONSELECT_H
+#define LLVM_CODEGEN_GLOBALISEL_INSTRUCTIONSELECT_H
+
+#include "llvm/CodeGen/GlobalISel/InstructionSelector.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+
+namespace llvm {
+/// This pass is responsible for selecting generic machine instructions to
+/// target-specific instructions. It relies on the InstructionSelector provided
+/// by the target.
+/// Selection is done by examining blocks in post-order, and instructions in
+/// reverse order.
+///
+/// \post for all inst in MF: not isPreISelGenericOpcode(inst.opcode)
+class InstructionSelect : public MachineFunctionPass {
+public:
+ static char ID;
+ StringRef getPassName() const override { return "InstructionSelect"; }
+
+ void getAnalysisUsage(AnalysisUsage &AU) const override;
+
+ MachineFunctionProperties getRequiredProperties() const override {
+ return MachineFunctionProperties()
+ .set(MachineFunctionProperties::Property::IsSSA)
+ .set(MachineFunctionProperties::Property::Legalized)
+ .set(MachineFunctionProperties::Property::RegBankSelected);
+ }
+
+ MachineFunctionProperties getSetProperties() const override {
+ return MachineFunctionProperties().set(
+ MachineFunctionProperties::Property::Selected);
+ }
+
+ InstructionSelect();
+
+ bool runOnMachineFunction(MachineFunction &MF) override;
+};
+} // End namespace llvm.
+
+#endif
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel/InstructionSelector.h b/contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel/InstructionSelector.h
new file mode 100644
index 0000000000..61123ff85f
--- /dev/null
+++ b/contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel/InstructionSelector.h
@@ -0,0 +1,558 @@
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//===- llvm/CodeGen/GlobalISel/InstructionSelector.h ------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+/// \file This file declares the API for the instruction selector.
+/// This class is responsible for selecting machine instructions.
+/// It's implemented by the target. It's used by the InstructionSelect pass.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_GLOBALISEL_INSTRUCTIONSELECTOR_H
+#define LLVM_CODEGEN_GLOBALISEL_INSTRUCTIONSELECTOR_H
+
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/Optional.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/Support/CodeGenCoverage.h"
+#include "llvm/Support/LowLevelTypeImpl.h"
+#include <bitset>
+#include <cstddef>
+#include <cstdint>
+#include <functional>
+#include <initializer_list>
+#include <vector>
+
+namespace llvm {
+
+class APInt;
+class APFloat;
+class GISelKnownBits;
+class MachineInstr;
+class MachineInstrBuilder;
+class MachineFunction;
+class MachineOperand;
+class MachineRegisterInfo;
+class RegisterBankInfo;
+class TargetInstrInfo;
+class TargetRegisterClass;
+class TargetRegisterInfo;
+
+/// Container class for CodeGen predicate results.
+/// This is convenient because std::bitset does not have a constructor
+/// with an initializer list of set bits.
+///
+/// Each InstructionSelector subclass should define a PredicateBitset class
+/// with:
+/// const unsigned MAX_SUBTARGET_PREDICATES = 192;
+/// using PredicateBitset = PredicateBitsetImpl<MAX_SUBTARGET_PREDICATES>;
+/// and updating the constant to suit the target. Tablegen provides a suitable
+/// definition for the predicates in use in <Target>GenGlobalISel.inc when
+/// GET_GLOBALISEL_PREDICATE_BITSET is defined.
+template <std::size_t MaxPredicates>
+class PredicateBitsetImpl : public std::bitset<MaxPredicates> {
+public:
+ // Cannot inherit constructors because it's not supported by VC++..
+ PredicateBitsetImpl() = default;
+
+ PredicateBitsetImpl(const std::bitset<MaxPredicates> &B)
+ : std::bitset<MaxPredicates>(B) {}
+
+ PredicateBitsetImpl(std::initializer_list<unsigned> Init) {
+ for (auto I : Init)
+ std::bitset<MaxPredicates>::set(I);
+ }
+};
+
+enum {
+ /// Begin a try-block to attempt a match and jump to OnFail if it is
+ /// unsuccessful.
+ /// - OnFail - The MatchTable entry at which to resume if the match fails.
+ ///
+ /// FIXME: This ought to take an argument indicating the number of try-blocks
+ /// to exit on failure. It's usually one but the last match attempt of
+ /// a block will need more. The (implemented) alternative is to tack a
+ /// GIM_Reject on the end of each try-block which is simpler but
+ /// requires an extra opcode and iteration in the interpreter on each
+ /// failed match.
+ GIM_Try,
+
+ /// Switch over the opcode on the specified instruction
+ /// - InsnID - Instruction ID
+ /// - LowerBound - numerically minimum opcode supported
+ /// - UpperBound - numerically maximum + 1 opcode supported
+ /// - Default - failure jump target
+ /// - JumpTable... - (UpperBound - LowerBound) (at least 2) jump targets
+ GIM_SwitchOpcode,
+
+ /// Switch over the LLT on the specified instruction operand
+ /// - InsnID - Instruction ID
+ /// - OpIdx - Operand index
+ /// - LowerBound - numerically minimum Type ID supported
+ /// - UpperBound - numerically maximum + 1 Type ID supported
+ /// - Default - failure jump target
+ /// - JumpTable... - (UpperBound - LowerBound) (at least 2) jump targets
+ GIM_SwitchType,
+
+ /// Record the specified instruction
+ /// - NewInsnID - Instruction ID to define
+ /// - InsnID - Instruction ID
+ /// - OpIdx - Operand index
+ GIM_RecordInsn,
+
+ /// Check the feature bits
+ /// - Expected features
+ GIM_CheckFeatures,
+
+ /// Check the opcode on the specified instruction
+ /// - InsnID - Instruction ID
+ /// - Expected opcode
+ GIM_CheckOpcode,
+
+ /// Check the opcode on the specified instruction, checking 2 acceptable
+ /// alternatives.
+ /// - InsnID - Instruction ID
+ /// - Expected opcode
+ /// - Alternative expected opcode
+ GIM_CheckOpcodeIsEither,
+
+ /// Check the instruction has the right number of operands
+ /// - InsnID - Instruction ID
+ /// - Expected number of operands
+ GIM_CheckNumOperands,
+ /// Check an immediate predicate on the specified instruction
+ /// - InsnID - Instruction ID
+ /// - The predicate to test
+ GIM_CheckI64ImmPredicate,
+ /// Check an immediate predicate on the specified instruction via an APInt.
+ /// - InsnID - Instruction ID
+ /// - The predicate to test
+ GIM_CheckAPIntImmPredicate,
+ /// Check a floating point immediate predicate on the specified instruction.
+ /// - InsnID - Instruction ID
+ /// - The predicate to test
+ GIM_CheckAPFloatImmPredicate,
+ /// Check a memory operation has the specified atomic ordering.
+ /// - InsnID - Instruction ID
+ /// - Ordering - The AtomicOrdering value
+ GIM_CheckAtomicOrdering,
+ GIM_CheckAtomicOrderingOrStrongerThan,
+ GIM_CheckAtomicOrderingWeakerThan,
+ /// Check the size of the memory access for the given machine memory operand.
+ /// - InsnID - Instruction ID
+ /// - MMOIdx - MMO index
+ /// - Size - The size in bytes of the memory access
+ GIM_CheckMemorySizeEqualTo,
+
+ /// Check the address space of the memory access for the given machine memory
+ /// operand.
+ /// - InsnID - Instruction ID
+ /// - MMOIdx - MMO index
+ /// - NumAddrSpace - Number of valid address spaces
+ /// - AddrSpaceN - An allowed space of the memory access
+ /// - AddrSpaceN+1 ...
+ GIM_CheckMemoryAddressSpace,
+
+ /// Check the minimum alignment of the memory access for the given machine
+ /// memory operand.
+ /// - InsnID - Instruction ID
+ /// - MMOIdx - MMO index
+ /// - MinAlign - Minimum acceptable alignment
+ GIM_CheckMemoryAlignment,
+
+ /// Check the size of the memory access for the given machine memory operand
+ /// against the size of an operand.
+ /// - InsnID - Instruction ID
+ /// - MMOIdx - MMO index
+ /// - OpIdx - The operand index to compare the MMO against
+ GIM_CheckMemorySizeEqualToLLT,
+ GIM_CheckMemorySizeLessThanLLT,
+ GIM_CheckMemorySizeGreaterThanLLT,
+
+ /// Check if this is a vector that can be treated as a vector splat
+ /// constant. This is valid for both G_BUILD_VECTOR as well as
+ /// G_BUILD_VECTOR_TRUNC. For AllOnes refers to individual bits, so a -1
+ /// element.
+ /// - InsnID - Instruction ID
+ GIM_CheckIsBuildVectorAllOnes,
+ GIM_CheckIsBuildVectorAllZeros,
+
+ /// Check a generic C++ instruction predicate
+ /// - InsnID - Instruction ID
+ /// - PredicateID - The ID of the predicate function to call
+ GIM_CheckCxxInsnPredicate,
+
+ /// Check the type for the specified operand
+ /// - InsnID - Instruction ID
+ /// - OpIdx - Operand index
+ /// - Expected type
+ GIM_CheckType,
+ /// Check the type of a pointer to any address space.
+ /// - InsnID - Instruction ID
+ /// - OpIdx - Operand index
+ /// - SizeInBits - The size of the pointer value in bits.
+ GIM_CheckPointerToAny,
+ /// Check the register bank for the specified operand
+ /// - InsnID - Instruction ID
+ /// - OpIdx - Operand index
+ /// - Expected register bank (specified as a register class)
+ GIM_CheckRegBankForClass,
+
+ /// Check the operand matches a complex predicate
+ /// - InsnID - Instruction ID
+ /// - OpIdx - Operand index
+ /// - RendererID - The renderer to hold the result
+ /// - Complex predicate ID
+ GIM_CheckComplexPattern,
+
+ /// Check the operand is a specific integer
+ /// - InsnID - Instruction ID
+ /// - OpIdx - Operand index
+ /// - Expected integer
+ GIM_CheckConstantInt,
+ /// Check the operand is a specific literal integer (i.e. MO.isImm() or
+ /// MO.isCImm() is true).
+ /// - InsnID - Instruction ID
+ /// - OpIdx - Operand index
+ /// - Expected integer
+ GIM_CheckLiteralInt,
+ /// Check the operand is a specific intrinsic ID
+ /// - InsnID - Instruction ID
+ /// - OpIdx - Operand index
+ /// - Expected Intrinsic ID
+ GIM_CheckIntrinsicID,
+
+ /// Check the operand is a specific predicate
+ /// - InsnID - Instruction ID
+ /// - OpIdx - Operand index
+ /// - Expected predicate
+ GIM_CheckCmpPredicate,
+
+ /// Check the specified operand is an MBB
+ /// - InsnID - Instruction ID
+ /// - OpIdx - Operand index
+ GIM_CheckIsMBB,
+
+ /// Check the specified operand is an Imm
+ /// - InsnID - Instruction ID
+ /// - OpIdx - Operand index
+ GIM_CheckIsImm,
+
+ /// Check if the specified operand is safe to fold into the current
+ /// instruction.
+ /// - InsnID - Instruction ID
+ GIM_CheckIsSafeToFold,
+
+ /// Check the specified operands are identical.
+ /// - InsnID - Instruction ID
+ /// - OpIdx - Operand index
+ /// - OtherInsnID - Other instruction ID
+ /// - OtherOpIdx - Other operand index
+ GIM_CheckIsSameOperand,
+
+ /// Predicates with 'let PredicateCodeUsesOperands = 1' need to examine some
+ /// named operands that will be recorded in RecordedOperands. Names of these
+ /// operands are referenced in predicate argument list. Emitter determines
+ /// StoreIdx(corresponds to the order in which names appear in argument list).
+ /// - InsnID - Instruction ID
+ /// - OpIdx - Operand index
+ /// - StoreIdx - Store location in RecordedOperands.
+ GIM_RecordNamedOperand,
+
+ /// Fail the current try-block, or completely fail to match if there is no
+ /// current try-block.
+ GIM_Reject,
+
+ //=== Renderers ===
+
+ /// Mutate an instruction
+ /// - NewInsnID - Instruction ID to define
+ /// - OldInsnID - Instruction ID to mutate
+ /// - NewOpcode - The new opcode to use
+ GIR_MutateOpcode,
+
+ /// Build a new instruction
+ /// - InsnID - Instruction ID to define
+ /// - Opcode - The new opcode to use
+ GIR_BuildMI,
+
+ /// Copy an operand to the specified instruction
+ /// - NewInsnID - Instruction ID to modify
+ /// - OldInsnID - Instruction ID to copy from
+ /// - OpIdx - The operand to copy
+ GIR_Copy,
+
+ /// Copy an operand to the specified instruction or add a zero register if the
+ /// operand is a zero immediate.
+ /// - NewInsnID - Instruction ID to modify
+ /// - OldInsnID - Instruction ID to copy from
+ /// - OpIdx - The operand to copy
+ /// - ZeroReg - The zero register to use
+ GIR_CopyOrAddZeroReg,
+ /// Copy an operand to the specified instruction
+ /// - NewInsnID - Instruction ID to modify
+ /// - OldInsnID - Instruction ID to copy from
+ /// - OpIdx - The operand to copy
+ /// - SubRegIdx - The subregister to copy
+ GIR_CopySubReg,
+
+ /// Add an implicit register def to the specified instruction
+ /// - InsnID - Instruction ID to modify
+ /// - RegNum - The register to add
+ GIR_AddImplicitDef,
+ /// Add an implicit register use to the specified instruction
+ /// - InsnID - Instruction ID to modify
+ /// - RegNum - The register to add
+ GIR_AddImplicitUse,
+ /// Add an register to the specified instruction
+ /// - InsnID - Instruction ID to modify
+ /// - RegNum - The register to add
+ GIR_AddRegister,
+
+ /// Add a temporary register to the specified instruction
+ /// - InsnID - Instruction ID to modify
+ /// - TempRegID - The temporary register ID to add
+ /// - TempRegFlags - The register flags to set
+ GIR_AddTempRegister,
+
+ /// Add a temporary register to the specified instruction
+ /// - InsnID - Instruction ID to modify
+ /// - TempRegID - The temporary register ID to add
+ /// - TempRegFlags - The register flags to set
+ /// - SubRegIndex - The subregister index to set
+ GIR_AddTempSubRegister,
+
+ /// Add an immediate to the specified instruction
+ /// - InsnID - Instruction ID to modify
+ /// - Imm - The immediate to add
+ GIR_AddImm,
+ /// Render complex operands to the specified instruction
+ /// - InsnID - Instruction ID to modify
+ /// - RendererID - The renderer to call
+ GIR_ComplexRenderer,
+
+ /// Render sub-operands of complex operands to the specified instruction
+ /// - InsnID - Instruction ID to modify
+ /// - RendererID - The renderer to call
+ /// - RenderOpID - The suboperand to render.
+ GIR_ComplexSubOperandRenderer,
+ /// Render operands to the specified instruction using a custom function
+ /// - InsnID - Instruction ID to modify
+ /// - OldInsnID - Instruction ID to get the matched operand from
+ /// - RendererFnID - Custom renderer function to call
+ GIR_CustomRenderer,
+
+ /// Render operands to the specified instruction using a custom function,
+ /// reading from a specific operand.
+ /// - InsnID - Instruction ID to modify
+ /// - OldInsnID - Instruction ID to get the matched operand from
+ /// - OpIdx - Operand index in OldInsnID the render function should read from..
+ /// - RendererFnID - Custom renderer function to call
+ GIR_CustomOperandRenderer,
+
+ /// Render a G_CONSTANT operator as a sign-extended immediate.
+ /// - NewInsnID - Instruction ID to modify
+ /// - OldInsnID - Instruction ID to copy from
+ /// The operand index is implicitly 1.
+ GIR_CopyConstantAsSImm,
+
+ /// Render a G_FCONSTANT operator as a sign-extended immediate.
+ /// - NewInsnID - Instruction ID to modify
+ /// - OldInsnID - Instruction ID to copy from
+ /// The operand index is implicitly 1.
+ GIR_CopyFConstantAsFPImm,
+
+ /// Constrain an instruction operand to a register class.
+ /// - InsnID - Instruction ID to modify
+ /// - OpIdx - Operand index
+ /// - RCEnum - Register class enumeration value
+ GIR_ConstrainOperandRC,
+
+ /// Constrain an instructions operands according to the instruction
+ /// description.
+ /// - InsnID - Instruction ID to modify
+ GIR_ConstrainSelectedInstOperands,
+
+ /// Merge all memory operands into instruction.
+ /// - InsnID - Instruction ID to modify
+ /// - MergeInsnID... - One or more Instruction ID to merge into the result.
+ /// - GIU_MergeMemOperands_EndOfList - Terminates the list of instructions to
+ /// merge.
+ GIR_MergeMemOperands,
+
+ /// Erase from parent.
+ /// - InsnID - Instruction ID to erase
+ GIR_EraseFromParent,
+
+ /// Create a new temporary register that's not constrained.
+ /// - TempRegID - The temporary register ID to initialize.
+ /// - Expected type
+ GIR_MakeTempReg,
+
+ /// A successful emission
+ GIR_Done,
+
+ /// Increment the rule coverage counter.
+ /// - RuleID - The ID of the rule that was covered.
+ GIR_Coverage,
+
+ /// Keeping track of the number of the GI opcodes. Must be the last entry.
+ GIU_NumOpcodes,
+};
+
+enum {
+ /// Indicates the end of the variable-length MergeInsnID list in a
+ /// GIR_MergeMemOperands opcode.
+ GIU_MergeMemOperands_EndOfList = -1,
+};
+
+/// Provides the logic to select generic machine instructions.
+class InstructionSelector {
+public:
+ virtual ~InstructionSelector() = default;
+
+ /// Select the (possibly generic) instruction \p I to only use target-specific
+ /// opcodes. It is OK to insert multiple instructions, but they cannot be
+ /// generic pre-isel instructions.
+ ///
+ /// \returns whether selection succeeded.
+ /// \pre I.getParent() && I.getParent()->getParent()
+ /// \post
+ /// if returns true:
+ /// for I in all mutated/inserted instructions:
+ /// !isPreISelGenericOpcode(I.getOpcode())
+ virtual bool select(MachineInstr &I) = 0;
+
+ CodeGenCoverage *CoverageInfo = nullptr;
+ GISelKnownBits *KnownBits = nullptr;
+ MachineFunction *MF = nullptr;
+
+ virtual void setupGeneratedPerFunctionState(MachineFunction &MF) {
+ llvm_unreachable("TableGen should have emitted implementation");
+ }
+
+ /// Setup per-MF selector state.
+ virtual void setupMF(MachineFunction &mf,
+ GISelKnownBits &KB,
+ CodeGenCoverage &covinfo) {
+ CoverageInfo = &covinfo;
+ KnownBits = &KB;
+ MF = &mf;
+ setupGeneratedPerFunctionState(mf);
+ }
+
+protected:
+ using ComplexRendererFns =
+ Optional<SmallVector<std::function<void(MachineInstrBuilder &)>, 4>>;
+ using RecordedMIVector = SmallVector<MachineInstr *, 4>;
+ using NewMIVector = SmallVector<MachineInstrBuilder, 4>;
+
+ struct MatcherState {
+ std::vector<ComplexRendererFns::value_type> Renderers;
+ RecordedMIVector MIs;
+ DenseMap<unsigned, unsigned> TempRegisters;
+ /// Named operands that predicate with 'let PredicateCodeUsesOperands = 1'
+ /// referenced in its argument list. Operands are inserted at index set by
+ /// emitter, it corresponds to the order in which names appear in argument
+ /// list. Currently such predicates don't have more then 3 arguments.
+ std::array<const MachineOperand *, 3> RecordedOperands;
+
+ MatcherState(unsigned MaxRenderers);
+ };
+
+public:
+ template <class PredicateBitset, class ComplexMatcherMemFn,
+ class CustomRendererFn>
+ struct ISelInfoTy {
+ ISelInfoTy(const LLT *TypeObjects, size_t NumTypeObjects,
+ const PredicateBitset *FeatureBitsets,
+ const ComplexMatcherMemFn *ComplexPredicates,
+ const CustomRendererFn *CustomRenderers)
+ : TypeObjects(TypeObjects),
+ FeatureBitsets(FeatureBitsets),
+ ComplexPredicates(ComplexPredicates),
+ CustomRenderers(CustomRenderers) {
+
+ for (size_t I = 0; I < NumTypeObjects; ++I)
+ TypeIDMap[TypeObjects[I]] = I;
+ }
+ const LLT *TypeObjects;
+ const PredicateBitset *FeatureBitsets;
+ const ComplexMatcherMemFn *ComplexPredicates;
+ const CustomRendererFn *CustomRenderers;
+
+ SmallDenseMap<LLT, unsigned, 64> TypeIDMap;
+ };
+
+protected:
+ InstructionSelector();
+
+ /// Execute a given matcher table and return true if the match was successful
+ /// and false otherwise.
+ template <class TgtInstructionSelector, class PredicateBitset,
+ class ComplexMatcherMemFn, class CustomRendererFn>
+ bool executeMatchTable(
+ TgtInstructionSelector &ISel, NewMIVector &OutMIs, MatcherState &State,
+ const ISelInfoTy<PredicateBitset, ComplexMatcherMemFn, CustomRendererFn>
+ &ISelInfo,
+ const int64_t *MatchTable, const TargetInstrInfo &TII,
+ MachineRegisterInfo &MRI, const TargetRegisterInfo &TRI,
+ const RegisterBankInfo &RBI, const PredicateBitset &AvailableFeatures,
+ CodeGenCoverage &CoverageInfo) const;
+
+ virtual const int64_t *getMatchTable() const {
+ llvm_unreachable("Should have been overridden by tablegen if used");
+ }
+
+ virtual bool testImmPredicate_I64(unsigned, int64_t) const {
+ llvm_unreachable(
+ "Subclasses must override this with a tablegen-erated function");
+ }
+ virtual bool testImmPredicate_APInt(unsigned, const APInt &) const {
+ llvm_unreachable(
+ "Subclasses must override this with a tablegen-erated function");
+ }
+ virtual bool testImmPredicate_APFloat(unsigned, const APFloat &) const {
+ llvm_unreachable(
+ "Subclasses must override this with a tablegen-erated function");
+ }
+ virtual bool testMIPredicate_MI(
+ unsigned, const MachineInstr &,
+ const std::array<const MachineOperand *, 3> &Operands) const {
+ llvm_unreachable(
+ "Subclasses must override this with a tablegen-erated function");
+ }
+
+ bool isOperandImmEqual(const MachineOperand &MO, int64_t Value,
+ const MachineRegisterInfo &MRI) const;
+
+ /// Return true if the specified operand is a G_PTR_ADD with a G_CONSTANT on the
+ /// right-hand side. GlobalISel's separation of pointer and integer types
+ /// means that we don't need to worry about G_OR with equivalent semantics.
+ bool isBaseWithConstantOffset(const MachineOperand &Root,
+ const MachineRegisterInfo &MRI) const;
+
+ /// Return true if MI can obviously be folded into IntoMI.
+ /// MI and IntoMI do not need to be in the same basic blocks, but MI must
+ /// preceed IntoMI.
+ bool isObviouslySafeToFold(MachineInstr &MI, MachineInstr &IntoMI) const;
+};
+
+} // end namespace llvm
+
+#endif // LLVM_CODEGEN_GLOBALISEL_INSTRUCTIONSELECTOR_H
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel/InstructionSelectorImpl.h b/contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel/InstructionSelectorImpl.h
new file mode 100644
index 0000000000..e3202fc976
--- /dev/null
+++ b/contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel/InstructionSelectorImpl.h
@@ -0,0 +1,1163 @@
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//===- llvm/CodeGen/GlobalISel/InstructionSelectorImpl.h --------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+/// \file This file declares the API for the instruction selector.
+/// This class is responsible for selecting machine instructions.
+/// It's implemented by the target. It's used by the InstructionSelect pass.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_GLOBALISEL_INSTRUCTIONSELECTORIMPL_H
+#define LLVM_CODEGEN_GLOBALISEL_INSTRUCTIONSELECTORIMPL_H
+
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/CodeGen/GlobalISel/InstructionSelector.h"
+#include "llvm/CodeGen/GlobalISel/RegisterBankInfo.h"
+#include "llvm/CodeGen/GlobalISel/Utils.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/CodeGen/MachineOperand.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/CodeGen/TargetInstrInfo.h"
+#include "llvm/CodeGen/TargetOpcodes.h"
+#include "llvm/CodeGen/TargetRegisterInfo.h"
+#include "llvm/IR/Constants.h"
+#include "llvm/IR/DataLayout.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/raw_ostream.h"
+#include <cassert>
+#include <cstddef>
+#include <cstdint>
+
+namespace llvm {
+
+/// GlobalISel PatFrag Predicates
+enum {
+ GIPFP_I64_Invalid = 0,
+ GIPFP_APInt_Invalid = 0,
+ GIPFP_APFloat_Invalid = 0,
+ GIPFP_MI_Invalid = 0,
+};
+
+template <class TgtInstructionSelector, class PredicateBitset,
+ class ComplexMatcherMemFn, class CustomRendererFn>
+bool InstructionSelector::executeMatchTable(
+ TgtInstructionSelector &ISel, NewMIVector &OutMIs, MatcherState &State,
+ const ISelInfoTy<PredicateBitset, ComplexMatcherMemFn, CustomRendererFn>
+ &ISelInfo,
+ const int64_t *MatchTable, const TargetInstrInfo &TII,
+ MachineRegisterInfo &MRI, const TargetRegisterInfo &TRI,
+ const RegisterBankInfo &RBI, const PredicateBitset &AvailableFeatures,
+ CodeGenCoverage &CoverageInfo) const {
+
+ uint64_t CurrentIdx = 0;
+ SmallVector<uint64_t, 4> OnFailResumeAt;
+
+ // Bypass the flag check on the instruction, and only look at the MCInstrDesc.
+ bool NoFPException = !State.MIs[0]->getDesc().mayRaiseFPException();
+
+ const uint16_t Flags = State.MIs[0]->getFlags();
+
+ enum RejectAction { RejectAndGiveUp, RejectAndResume };
+ auto handleReject = [&]() -> RejectAction {
+ DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
+ dbgs() << CurrentIdx << ": Rejected\n");
+ if (OnFailResumeAt.empty())
+ return RejectAndGiveUp;
+ CurrentIdx = OnFailResumeAt.pop_back_val();
+ DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
+ dbgs() << CurrentIdx << ": Resume at " << CurrentIdx << " ("
+ << OnFailResumeAt.size() << " try-blocks remain)\n");
+ return RejectAndResume;
+ };
+
+ auto propagateFlags = [=](NewMIVector &OutMIs) {
+ for (auto MIB : OutMIs) {
+ // Set the NoFPExcept flag when no original matched instruction could
+ // raise an FP exception, but the new instruction potentially might.
+ uint16_t MIBFlags = Flags;
+ if (NoFPException && MIB->mayRaiseFPException())
+ MIBFlags |= MachineInstr::NoFPExcept;
+ MIB.setMIFlags(MIBFlags);
+ }
+
+ return true;
+ };
+
+ while (true) {
+ assert(CurrentIdx != ~0u && "Invalid MatchTable index");
+ int64_t MatcherOpcode = MatchTable[CurrentIdx++];
+ switch (MatcherOpcode) {
+ case GIM_Try: {
+ DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
+ dbgs() << CurrentIdx << ": Begin try-block\n");
+ OnFailResumeAt.push_back(MatchTable[CurrentIdx++]);
+ break;
+ }
+
+ case GIM_RecordInsn: {
+ int64_t NewInsnID = MatchTable[CurrentIdx++];
+ int64_t InsnID = MatchTable[CurrentIdx++];
+ int64_t OpIdx = MatchTable[CurrentIdx++];
+
+ // As an optimisation we require that MIs[0] is always the root. Refuse
+ // any attempt to modify it.
+ assert(NewInsnID != 0 && "Refusing to modify MIs[0]");
+
+ MachineOperand &MO = State.MIs[InsnID]->getOperand(OpIdx);
+ if (!MO.isReg()) {
+ DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
+ dbgs() << CurrentIdx << ": Not a register\n");
+ if (handleReject() == RejectAndGiveUp)
+ return false;
+ break;
+ }
+ if (Register::isPhysicalRegister(MO.getReg())) {
+ DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
+ dbgs() << CurrentIdx << ": Is a physical register\n");
+ if (handleReject() == RejectAndGiveUp)
+ return false;
+ break;
+ }
+
+ MachineInstr *NewMI = MRI.getVRegDef(MO.getReg());
+ if ((size_t)NewInsnID < State.MIs.size())
+ State.MIs[NewInsnID] = NewMI;
+ else {
+ assert((size_t)NewInsnID == State.MIs.size() &&
+ "Expected to store MIs in order");
+ State.MIs.push_back(NewMI);
+ }
+ DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
+ dbgs() << CurrentIdx << ": MIs[" << NewInsnID
+ << "] = GIM_RecordInsn(" << InsnID << ", " << OpIdx
+ << ")\n");
+ break;
+ }
+
+ case GIM_CheckFeatures: {
+ int64_t ExpectedBitsetID = MatchTable[CurrentIdx++];
+ DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
+ dbgs() << CurrentIdx
+ << ": GIM_CheckFeatures(ExpectedBitsetID="
+ << ExpectedBitsetID << ")\n");
+ if ((AvailableFeatures & ISelInfo.FeatureBitsets[ExpectedBitsetID]) !=
+ ISelInfo.FeatureBitsets[ExpectedBitsetID]) {
+ if (handleReject() == RejectAndGiveUp)
+ return false;
+ }
+ break;
+ }
+
+ case GIM_CheckOpcode:
+ case GIM_CheckOpcodeIsEither: {
+ int64_t InsnID = MatchTable[CurrentIdx++];
+ int64_t Expected0 = MatchTable[CurrentIdx++];
+ int64_t Expected1 = -1;
+ if (MatcherOpcode == GIM_CheckOpcodeIsEither)
+ Expected1 = MatchTable[CurrentIdx++];
+
+ assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
+ unsigned Opcode = State.MIs[InsnID]->getOpcode();
+
+ DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
+ dbgs() << CurrentIdx << ": GIM_CheckOpcode(MIs[" << InsnID
+ << "], ExpectedOpcode=" << Expected0;
+ if (MatcherOpcode == GIM_CheckOpcodeIsEither)
+ dbgs() << " || " << Expected1;
+ dbgs() << ") // Got=" << Opcode << "\n";
+ );
+
+ if (Opcode != Expected0 && Opcode != Expected1) {
+ if (handleReject() == RejectAndGiveUp)
+ return false;
+ }
+ break;
+ }
+ case GIM_SwitchOpcode: {
+ int64_t InsnID = MatchTable[CurrentIdx++];
+ int64_t LowerBound = MatchTable[CurrentIdx++];
+ int64_t UpperBound = MatchTable[CurrentIdx++];
+ int64_t Default = MatchTable[CurrentIdx++];
+
+ assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
+ const int64_t Opcode = State.MIs[InsnID]->getOpcode();
+
+ DEBUG_WITH_TYPE(TgtInstructionSelector::getName(), {
+ dbgs() << CurrentIdx << ": GIM_SwitchOpcode(MIs[" << InsnID << "], ["
+ << LowerBound << ", " << UpperBound << "), Default=" << Default
+ << ", JumpTable...) // Got=" << Opcode << "\n";
+ });
+ if (Opcode < LowerBound || UpperBound <= Opcode) {
+ CurrentIdx = Default;
+ break;
+ }
+ CurrentIdx = MatchTable[CurrentIdx + (Opcode - LowerBound)];
+ if (!CurrentIdx) {
+ CurrentIdx = Default;
+ break;
+ }
+ OnFailResumeAt.push_back(Default);
+ break;
+ }
+
+ case GIM_SwitchType: {
+ int64_t InsnID = MatchTable[CurrentIdx++];
+ int64_t OpIdx = MatchTable[CurrentIdx++];
+ int64_t LowerBound = MatchTable[CurrentIdx++];
+ int64_t UpperBound = MatchTable[CurrentIdx++];
+ int64_t Default = MatchTable[CurrentIdx++];
+
+ assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
+ MachineOperand &MO = State.MIs[InsnID]->getOperand(OpIdx);
+
+ DEBUG_WITH_TYPE(TgtInstructionSelector::getName(), {
+ dbgs() << CurrentIdx << ": GIM_SwitchType(MIs[" << InsnID
+ << "]->getOperand(" << OpIdx << "), [" << LowerBound << ", "
+ << UpperBound << "), Default=" << Default
+ << ", JumpTable...) // Got=";
+ if (!MO.isReg())
+ dbgs() << "Not a VReg\n";
+ else
+ dbgs() << MRI.getType(MO.getReg()) << "\n";
+ });
+ if (!MO.isReg()) {
+ CurrentIdx = Default;
+ break;
+ }
+ const LLT Ty = MRI.getType(MO.getReg());
+ const auto TyI = ISelInfo.TypeIDMap.find(Ty);
+ if (TyI == ISelInfo.TypeIDMap.end()) {
+ CurrentIdx = Default;
+ break;
+ }
+ const int64_t TypeID = TyI->second;
+ if (TypeID < LowerBound || UpperBound <= TypeID) {
+ CurrentIdx = Default;
+ break;
+ }
+ CurrentIdx = MatchTable[CurrentIdx + (TypeID - LowerBound)];
+ if (!CurrentIdx) {
+ CurrentIdx = Default;
+ break;
+ }
+ OnFailResumeAt.push_back(Default);
+ break;
+ }
+
+ case GIM_CheckNumOperands: {
+ int64_t InsnID = MatchTable[CurrentIdx++];
+ int64_t Expected = MatchTable[CurrentIdx++];
+ DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
+ dbgs() << CurrentIdx << ": GIM_CheckNumOperands(MIs["
+ << InsnID << "], Expected=" << Expected << ")\n");
+ assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
+ if (State.MIs[InsnID]->getNumOperands() != Expected) {
+ if (handleReject() == RejectAndGiveUp)
+ return false;
+ }
+ break;
+ }
+ case GIM_CheckI64ImmPredicate: {
+ int64_t InsnID = MatchTable[CurrentIdx++];
+ int64_t Predicate = MatchTable[CurrentIdx++];
+ DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
+ dbgs()
+ << CurrentIdx << ": GIM_CheckI64ImmPredicate(MIs["
+ << InsnID << "], Predicate=" << Predicate << ")\n");
+ assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
+ assert(State.MIs[InsnID]->getOpcode() == TargetOpcode::G_CONSTANT &&
+ "Expected G_CONSTANT");
+ assert(Predicate > GIPFP_I64_Invalid && "Expected a valid predicate");
+ int64_t Value = 0;
+ if (State.MIs[InsnID]->getOperand(1).isCImm())
+ Value = State.MIs[InsnID]->getOperand(1).getCImm()->getSExtValue();
+ else if (State.MIs[InsnID]->getOperand(1).isImm())
+ Value = State.MIs[InsnID]->getOperand(1).getImm();
+ else
+ llvm_unreachable("Expected Imm or CImm operand");
+
+ if (!testImmPredicate_I64(Predicate, Value))
+ if (handleReject() == RejectAndGiveUp)
+ return false;
+ break;
+ }
+ case GIM_CheckAPIntImmPredicate: {
+ int64_t InsnID = MatchTable[CurrentIdx++];
+ int64_t Predicate = MatchTable[CurrentIdx++];
+ DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
+ dbgs()
+ << CurrentIdx << ": GIM_CheckAPIntImmPredicate(MIs["
+ << InsnID << "], Predicate=" << Predicate << ")\n");
+ assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
+ assert(State.MIs[InsnID]->getOpcode() == TargetOpcode::G_CONSTANT &&
+ "Expected G_CONSTANT");
+ assert(Predicate > GIPFP_APInt_Invalid && "Expected a valid predicate");
+ APInt Value;
+ if (State.MIs[InsnID]->getOperand(1).isCImm())
+ Value = State.MIs[InsnID]->getOperand(1).getCImm()->getValue();
+ else
+ llvm_unreachable("Expected Imm or CImm operand");
+
+ if (!testImmPredicate_APInt(Predicate, Value))
+ if (handleReject() == RejectAndGiveUp)
+ return false;
+ break;
+ }
+ case GIM_CheckAPFloatImmPredicate: {
+ int64_t InsnID = MatchTable[CurrentIdx++];
+ int64_t Predicate = MatchTable[CurrentIdx++];
+ DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
+ dbgs()
+ << CurrentIdx << ": GIM_CheckAPFloatImmPredicate(MIs["
+ << InsnID << "], Predicate=" << Predicate << ")\n");
+ assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
+ assert(State.MIs[InsnID]->getOpcode() == TargetOpcode::G_FCONSTANT &&
+ "Expected G_FCONSTANT");
+ assert(State.MIs[InsnID]->getOperand(1).isFPImm() && "Expected FPImm operand");
+ assert(Predicate > GIPFP_APFloat_Invalid && "Expected a valid predicate");
+ APFloat Value = State.MIs[InsnID]->getOperand(1).getFPImm()->getValueAPF();
+
+ if (!testImmPredicate_APFloat(Predicate, Value))
+ if (handleReject() == RejectAndGiveUp)
+ return false;
+ break;
+ }
+ case GIM_CheckIsBuildVectorAllOnes:
+ case GIM_CheckIsBuildVectorAllZeros: {
+ int64_t InsnID = MatchTable[CurrentIdx++];
+
+ DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
+ dbgs() << CurrentIdx
+ << ": GIM_CheckBuildVectorAll{Zeros|Ones}(MIs["
+ << InsnID << "])\n");
+ assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
+
+ const MachineInstr *MI = State.MIs[InsnID];
+ assert((MI->getOpcode() == TargetOpcode::G_BUILD_VECTOR ||
+ MI->getOpcode() == TargetOpcode::G_BUILD_VECTOR_TRUNC) &&
+ "Expected G_BUILD_VECTOR or G_BUILD_VECTOR_TRUNC");
+
+ if (MatcherOpcode == GIM_CheckIsBuildVectorAllOnes) {
+ if (!isBuildVectorAllOnes(*MI, MRI)) {
+ if (handleReject() == RejectAndGiveUp)
+ return false;
+ }
+ } else {
+ if (!isBuildVectorAllZeros(*MI, MRI)) {
+ if (handleReject() == RejectAndGiveUp)
+ return false;
+ }
+ }
+
+ break;
+ }
+ case GIM_CheckCxxInsnPredicate: {
+ int64_t InsnID = MatchTable[CurrentIdx++];
+ int64_t Predicate = MatchTable[CurrentIdx++];
+ DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
+ dbgs()
+ << CurrentIdx << ": GIM_CheckCxxPredicate(MIs["
+ << InsnID << "], Predicate=" << Predicate << ")\n");
+ assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
+ assert(Predicate > GIPFP_MI_Invalid && "Expected a valid predicate");
+
+ if (!testMIPredicate_MI(Predicate, *State.MIs[InsnID],
+ State.RecordedOperands))
+ if (handleReject() == RejectAndGiveUp)
+ return false;
+ break;
+ }
+ case GIM_CheckAtomicOrdering: {
+ int64_t InsnID = MatchTable[CurrentIdx++];
+ AtomicOrdering Ordering = (AtomicOrdering)MatchTable[CurrentIdx++];
+ DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
+ dbgs() << CurrentIdx << ": GIM_CheckAtomicOrdering(MIs["
+ << InsnID << "], " << (uint64_t)Ordering << ")\n");
+ assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
+ if (!State.MIs[InsnID]->hasOneMemOperand())
+ if (handleReject() == RejectAndGiveUp)
+ return false;
+
+ for (const auto &MMO : State.MIs[InsnID]->memoperands())
+ if (MMO->getOrdering() != Ordering)
+ if (handleReject() == RejectAndGiveUp)
+ return false;
+ break;
+ }
+ case GIM_CheckAtomicOrderingOrStrongerThan: {
+ int64_t InsnID = MatchTable[CurrentIdx++];
+ AtomicOrdering Ordering = (AtomicOrdering)MatchTable[CurrentIdx++];
+ DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
+ dbgs() << CurrentIdx
+ << ": GIM_CheckAtomicOrderingOrStrongerThan(MIs["
+ << InsnID << "], " << (uint64_t)Ordering << ")\n");
+ assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
+ if (!State.MIs[InsnID]->hasOneMemOperand())
+ if (handleReject() == RejectAndGiveUp)
+ return false;
+
+ for (const auto &MMO : State.MIs[InsnID]->memoperands())
+ if (!isAtLeastOrStrongerThan(MMO->getOrdering(), Ordering))
+ if (handleReject() == RejectAndGiveUp)
+ return false;
+ break;
+ }
+ case GIM_CheckAtomicOrderingWeakerThan: {
+ int64_t InsnID = MatchTable[CurrentIdx++];
+ AtomicOrdering Ordering = (AtomicOrdering)MatchTable[CurrentIdx++];
+ DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
+ dbgs() << CurrentIdx
+ << ": GIM_CheckAtomicOrderingWeakerThan(MIs["
+ << InsnID << "], " << (uint64_t)Ordering << ")\n");
+ assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
+ if (!State.MIs[InsnID]->hasOneMemOperand())
+ if (handleReject() == RejectAndGiveUp)
+ return false;
+
+ for (const auto &MMO : State.MIs[InsnID]->memoperands())
+ if (!isStrongerThan(Ordering, MMO->getOrdering()))
+ if (handleReject() == RejectAndGiveUp)
+ return false;
+ break;
+ }
+ case GIM_CheckMemoryAddressSpace: {
+ int64_t InsnID = MatchTable[CurrentIdx++];
+ int64_t MMOIdx = MatchTable[CurrentIdx++];
+ // This accepts a list of possible address spaces.
+ const int NumAddrSpace = MatchTable[CurrentIdx++];
+
+ if (State.MIs[InsnID]->getNumMemOperands() <= MMOIdx) {
+ if (handleReject() == RejectAndGiveUp)
+ return false;
+ break;
+ }
+
+ // Need to still jump to the end of the list of address spaces if we find
+ // a match earlier.
+ const uint64_t LastIdx = CurrentIdx + NumAddrSpace;
+
+ const MachineMemOperand *MMO
+ = *(State.MIs[InsnID]->memoperands_begin() + MMOIdx);
+ const unsigned MMOAddrSpace = MMO->getAddrSpace();
+
+ bool Success = false;
+ for (int I = 0; I != NumAddrSpace; ++I) {
+ unsigned AddrSpace = MatchTable[CurrentIdx++];
+ DEBUG_WITH_TYPE(
+ TgtInstructionSelector::getName(),
+ dbgs() << "addrspace(" << MMOAddrSpace << ") vs "
+ << AddrSpace << '\n');
+
+ if (AddrSpace == MMOAddrSpace) {
+ Success = true;
+ break;
+ }
+ }
+
+ CurrentIdx = LastIdx;
+ if (!Success && handleReject() == RejectAndGiveUp)
+ return false;
+ break;
+ }
+ case GIM_CheckMemoryAlignment: {
+ int64_t InsnID = MatchTable[CurrentIdx++];
+ int64_t MMOIdx = MatchTable[CurrentIdx++];
+ unsigned MinAlign = MatchTable[CurrentIdx++];
+
+ assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
+
+ if (State.MIs[InsnID]->getNumMemOperands() <= MMOIdx) {
+ if (handleReject() == RejectAndGiveUp)
+ return false;
+ break;
+ }
+
+ MachineMemOperand *MMO
+ = *(State.MIs[InsnID]->memoperands_begin() + MMOIdx);
+ DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
+ dbgs() << CurrentIdx << ": GIM_CheckMemoryAlignment"
+ << "(MIs[" << InsnID << "]->memoperands() + " << MMOIdx
+ << ")->getAlignment() >= " << MinAlign << ")\n");
+ if (MMO->getAlign() < MinAlign && handleReject() == RejectAndGiveUp)
+ return false;
+
+ break;
+ }
+ case GIM_CheckMemorySizeEqualTo: {
+ int64_t InsnID = MatchTable[CurrentIdx++];
+ int64_t MMOIdx = MatchTable[CurrentIdx++];
+ uint64_t Size = MatchTable[CurrentIdx++];
+
+ DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
+ dbgs() << CurrentIdx
+ << ": GIM_CheckMemorySizeEqual(MIs[" << InsnID
+ << "]->memoperands() + " << MMOIdx
+ << ", Size=" << Size << ")\n");
+ assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
+
+ if (State.MIs[InsnID]->getNumMemOperands() <= MMOIdx) {
+ if (handleReject() == RejectAndGiveUp)
+ return false;
+ break;
+ }
+
+ MachineMemOperand *MMO = *(State.MIs[InsnID]->memoperands_begin() + MMOIdx);
+
+ DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
+ dbgs() << MMO->getSize() << " bytes vs " << Size
+ << " bytes\n");
+ if (MMO->getSize() != Size)
+ if (handleReject() == RejectAndGiveUp)
+ return false;
+
+ break;
+ }
+ case GIM_CheckMemorySizeEqualToLLT:
+ case GIM_CheckMemorySizeLessThanLLT:
+ case GIM_CheckMemorySizeGreaterThanLLT: {
+ int64_t InsnID = MatchTable[CurrentIdx++];
+ int64_t MMOIdx = MatchTable[CurrentIdx++];
+ int64_t OpIdx = MatchTable[CurrentIdx++];
+
+ DEBUG_WITH_TYPE(
+ TgtInstructionSelector::getName(),
+ dbgs() << CurrentIdx << ": GIM_CheckMemorySize"
+ << (MatcherOpcode == GIM_CheckMemorySizeEqualToLLT
+ ? "EqualTo"
+ : MatcherOpcode == GIM_CheckMemorySizeGreaterThanLLT
+ ? "GreaterThan"
+ : "LessThan")
+ << "LLT(MIs[" << InsnID << "]->memoperands() + " << MMOIdx
+ << ", OpIdx=" << OpIdx << ")\n");
+ assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
+
+ MachineOperand &MO = State.MIs[InsnID]->getOperand(OpIdx);
+ if (!MO.isReg()) {
+ DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
+ dbgs() << CurrentIdx << ": Not a register\n");
+ if (handleReject() == RejectAndGiveUp)
+ return false;
+ break;
+ }
+
+ if (State.MIs[InsnID]->getNumMemOperands() <= MMOIdx) {
+ if (handleReject() == RejectAndGiveUp)
+ return false;
+ break;
+ }
+
+ MachineMemOperand *MMO = *(State.MIs[InsnID]->memoperands_begin() + MMOIdx);
+
+ unsigned Size = MRI.getType(MO.getReg()).getSizeInBits();
+ if (MatcherOpcode == GIM_CheckMemorySizeEqualToLLT &&
+ MMO->getSizeInBits() != Size) {
+ if (handleReject() == RejectAndGiveUp)
+ return false;
+ } else if (MatcherOpcode == GIM_CheckMemorySizeLessThanLLT &&
+ MMO->getSizeInBits() >= Size) {
+ if (handleReject() == RejectAndGiveUp)
+ return false;
+ } else if (MatcherOpcode == GIM_CheckMemorySizeGreaterThanLLT &&
+ MMO->getSizeInBits() <= Size)
+ if (handleReject() == RejectAndGiveUp)
+ return false;
+
+ break;
+ }
+ case GIM_CheckType: {
+ int64_t InsnID = MatchTable[CurrentIdx++];
+ int64_t OpIdx = MatchTable[CurrentIdx++];
+ int64_t TypeID = MatchTable[CurrentIdx++];
+ DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
+ dbgs() << CurrentIdx << ": GIM_CheckType(MIs[" << InsnID
+ << "]->getOperand(" << OpIdx
+ << "), TypeID=" << TypeID << ")\n");
+ assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
+ MachineOperand &MO = State.MIs[InsnID]->getOperand(OpIdx);
+ if (!MO.isReg() ||
+ MRI.getType(MO.getReg()) != ISelInfo.TypeObjects[TypeID]) {
+ if (handleReject() == RejectAndGiveUp)
+ return false;
+ }
+ break;
+ }
+ case GIM_CheckPointerToAny: {
+ int64_t InsnID = MatchTable[CurrentIdx++];
+ int64_t OpIdx = MatchTable[CurrentIdx++];
+ int64_t SizeInBits = MatchTable[CurrentIdx++];
+
+ DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
+ dbgs() << CurrentIdx << ": GIM_CheckPointerToAny(MIs["
+ << InsnID << "]->getOperand(" << OpIdx
+ << "), SizeInBits=" << SizeInBits << ")\n");
+ assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
+ MachineOperand &MO = State.MIs[InsnID]->getOperand(OpIdx);
+ const LLT Ty = MRI.getType(MO.getReg());
+
+ // iPTR must be looked up in the target.
+ if (SizeInBits == 0) {
+ MachineFunction *MF = State.MIs[InsnID]->getParent()->getParent();
+ const unsigned AddrSpace = Ty.getAddressSpace();
+ SizeInBits = MF->getDataLayout().getPointerSizeInBits(AddrSpace);
+ }
+
+ assert(SizeInBits != 0 && "Pointer size must be known");
+
+ if (MO.isReg()) {
+ if (!Ty.isPointer() || Ty.getSizeInBits() != SizeInBits)
+ if (handleReject() == RejectAndGiveUp)
+ return false;
+ } else if (handleReject() == RejectAndGiveUp)
+ return false;
+
+ break;
+ }
+ case GIM_RecordNamedOperand: {
+ int64_t InsnID = MatchTable[CurrentIdx++];
+ int64_t OpIdx = MatchTable[CurrentIdx++];
+ uint64_t StoreIdx = MatchTable[CurrentIdx++];
+
+ DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
+ dbgs() << CurrentIdx << ": GIM_RecordNamedOperand(MIs["
+ << InsnID << "]->getOperand(" << OpIdx
+ << "), StoreIdx=" << StoreIdx << ")\n");
+ assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
+ assert(StoreIdx < State.RecordedOperands.size() && "Index out of range");
+ State.RecordedOperands[StoreIdx] = &State.MIs[InsnID]->getOperand(OpIdx);
+ break;
+ }
+ case GIM_CheckRegBankForClass: {
+ int64_t InsnID = MatchTable[CurrentIdx++];
+ int64_t OpIdx = MatchTable[CurrentIdx++];
+ int64_t RCEnum = MatchTable[CurrentIdx++];
+ DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
+ dbgs() << CurrentIdx << ": GIM_CheckRegBankForClass(MIs["
+ << InsnID << "]->getOperand(" << OpIdx
+ << "), RCEnum=" << RCEnum << ")\n");
+ assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
+ MachineOperand &MO = State.MIs[InsnID]->getOperand(OpIdx);
+ if (!MO.isReg() ||
+ &RBI.getRegBankFromRegClass(*TRI.getRegClass(RCEnum),
+ MRI.getType(MO.getReg())) !=
+ RBI.getRegBank(MO.getReg(), MRI, TRI)) {
+ if (handleReject() == RejectAndGiveUp)
+ return false;
+ }
+ break;
+ }
+
+ case GIM_CheckComplexPattern: {
+ int64_t InsnID = MatchTable[CurrentIdx++];
+ int64_t OpIdx = MatchTable[CurrentIdx++];
+ int64_t RendererID = MatchTable[CurrentIdx++];
+ int64_t ComplexPredicateID = MatchTable[CurrentIdx++];
+ DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
+ dbgs() << CurrentIdx << ": State.Renderers[" << RendererID
+ << "] = GIM_CheckComplexPattern(MIs[" << InsnID
+ << "]->getOperand(" << OpIdx
+ << "), ComplexPredicateID=" << ComplexPredicateID
+ << ")\n");
+ assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
+ // FIXME: Use std::invoke() when it's available.
+ ComplexRendererFns Renderer =
+ (ISel.*ISelInfo.ComplexPredicates[ComplexPredicateID])(
+ State.MIs[InsnID]->getOperand(OpIdx));
+ if (Renderer.hasValue())
+ State.Renderers[RendererID] = Renderer.getValue();
+ else
+ if (handleReject() == RejectAndGiveUp)
+ return false;
+ break;
+ }
+
+ case GIM_CheckConstantInt: {
+ int64_t InsnID = MatchTable[CurrentIdx++];
+ int64_t OpIdx = MatchTable[CurrentIdx++];
+ int64_t Value = MatchTable[CurrentIdx++];
+ DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
+ dbgs() << CurrentIdx << ": GIM_CheckConstantInt(MIs["
+ << InsnID << "]->getOperand(" << OpIdx
+ << "), Value=" << Value << ")\n");
+ assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
+ MachineOperand &MO = State.MIs[InsnID]->getOperand(OpIdx);
+ if (MO.isReg()) {
+ // isOperandImmEqual() will sign-extend to 64-bits, so should we.
+ LLT Ty = MRI.getType(MO.getReg());
+ Value = SignExtend64(Value, Ty.getSizeInBits());
+
+ if (!isOperandImmEqual(MO, Value, MRI)) {
+ if (handleReject() == RejectAndGiveUp)
+ return false;
+ }
+ } else if (handleReject() == RejectAndGiveUp)
+ return false;
+
+ break;
+ }
+
+ case GIM_CheckLiteralInt: {
+ int64_t InsnID = MatchTable[CurrentIdx++];
+ int64_t OpIdx = MatchTable[CurrentIdx++];
+ int64_t Value = MatchTable[CurrentIdx++];
+ DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
+ dbgs() << CurrentIdx << ": GIM_CheckLiteralInt(MIs["
+ << InsnID << "]->getOperand(" << OpIdx
+ << "), Value=" << Value << ")\n");
+ assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
+ MachineOperand &MO = State.MIs[InsnID]->getOperand(OpIdx);
+ if (MO.isImm() && MO.getImm() == Value)
+ break;
+
+ if (MO.isCImm() && MO.getCImm()->equalsInt(Value))
+ break;
+
+ if (handleReject() == RejectAndGiveUp)
+ return false;
+
+ break;
+ }
+
+ case GIM_CheckIntrinsicID: {
+ int64_t InsnID = MatchTable[CurrentIdx++];
+ int64_t OpIdx = MatchTable[CurrentIdx++];
+ int64_t Value = MatchTable[CurrentIdx++];
+ DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
+ dbgs() << CurrentIdx << ": GIM_CheckIntrinsicID(MIs["
+ << InsnID << "]->getOperand(" << OpIdx
+ << "), Value=" << Value << ")\n");
+ assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
+ MachineOperand &MO = State.MIs[InsnID]->getOperand(OpIdx);
+ if (!MO.isIntrinsicID() || MO.getIntrinsicID() != Value)
+ if (handleReject() == RejectAndGiveUp)
+ return false;
+ break;
+ }
+ case GIM_CheckCmpPredicate: {
+ int64_t InsnID = MatchTable[CurrentIdx++];
+ int64_t OpIdx = MatchTable[CurrentIdx++];
+ int64_t Value = MatchTable[CurrentIdx++];
+ DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
+ dbgs() << CurrentIdx << ": GIM_CheckCmpPredicate(MIs["
+ << InsnID << "]->getOperand(" << OpIdx
+ << "), Value=" << Value << ")\n");
+ assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
+ MachineOperand &MO = State.MIs[InsnID]->getOperand(OpIdx);
+ if (!MO.isPredicate() || MO.getPredicate() != Value)
+ if (handleReject() == RejectAndGiveUp)
+ return false;
+ break;
+ }
+ case GIM_CheckIsMBB: {
+ int64_t InsnID = MatchTable[CurrentIdx++];
+ int64_t OpIdx = MatchTable[CurrentIdx++];
+ DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
+ dbgs() << CurrentIdx << ": GIM_CheckIsMBB(MIs[" << InsnID
+ << "]->getOperand(" << OpIdx << "))\n");
+ assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
+ if (!State.MIs[InsnID]->getOperand(OpIdx).isMBB()) {
+ if (handleReject() == RejectAndGiveUp)
+ return false;
+ }
+ break;
+ }
+ case GIM_CheckIsImm: {
+ int64_t InsnID = MatchTable[CurrentIdx++];
+ int64_t OpIdx = MatchTable[CurrentIdx++];
+ DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
+ dbgs() << CurrentIdx << ": GIM_CheckIsImm(MIs[" << InsnID
+ << "]->getOperand(" << OpIdx << "))\n");
+ assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
+ if (!State.MIs[InsnID]->getOperand(OpIdx).isImm()) {
+ if (handleReject() == RejectAndGiveUp)
+ return false;
+ }
+ break;
+ }
+ case GIM_CheckIsSafeToFold: {
+ int64_t InsnID = MatchTable[CurrentIdx++];
+ DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
+ dbgs() << CurrentIdx << ": GIM_CheckIsSafeToFold(MIs["
+ << InsnID << "])\n");
+ assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
+ if (!isObviouslySafeToFold(*State.MIs[InsnID], *State.MIs[0])) {
+ if (handleReject() == RejectAndGiveUp)
+ return false;
+ }
+ break;
+ }
+ case GIM_CheckIsSameOperand: {
+ int64_t InsnID = MatchTable[CurrentIdx++];
+ int64_t OpIdx = MatchTable[CurrentIdx++];
+ int64_t OtherInsnID = MatchTable[CurrentIdx++];
+ int64_t OtherOpIdx = MatchTable[CurrentIdx++];
+ DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
+ dbgs() << CurrentIdx << ": GIM_CheckIsSameOperand(MIs["
+ << InsnID << "][" << OpIdx << "], MIs["
+ << OtherInsnID << "][" << OtherOpIdx << "])\n");
+ assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
+ assert(State.MIs[OtherInsnID] != nullptr && "Used insn before defined");
+ if (!State.MIs[InsnID]->getOperand(OpIdx).isIdenticalTo(
+ State.MIs[OtherInsnID]->getOperand(OtherOpIdx))) {
+ if (handleReject() == RejectAndGiveUp)
+ return false;
+ }
+ break;
+ }
+ case GIM_Reject:
+ DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
+ dbgs() << CurrentIdx << ": GIM_Reject\n");
+ if (handleReject() == RejectAndGiveUp)
+ return false;
+ break;
+
+ case GIR_MutateOpcode: {
+ int64_t OldInsnID = MatchTable[CurrentIdx++];
+ uint64_t NewInsnID = MatchTable[CurrentIdx++];
+ int64_t NewOpcode = MatchTable[CurrentIdx++];
+ if (NewInsnID >= OutMIs.size())
+ OutMIs.resize(NewInsnID + 1);
+
+ OutMIs[NewInsnID] = MachineInstrBuilder(*State.MIs[OldInsnID]->getMF(),
+ State.MIs[OldInsnID]);
+ OutMIs[NewInsnID]->setDesc(TII.get(NewOpcode));
+ DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
+ dbgs() << CurrentIdx << ": GIR_MutateOpcode(OutMIs["
+ << NewInsnID << "], MIs[" << OldInsnID << "], "
+ << NewOpcode << ")\n");
+ break;
+ }
+
+ case GIR_BuildMI: {
+ uint64_t NewInsnID = MatchTable[CurrentIdx++];
+ int64_t Opcode = MatchTable[CurrentIdx++];
+ if (NewInsnID >= OutMIs.size())
+ OutMIs.resize(NewInsnID + 1);
+
+ OutMIs[NewInsnID] = BuildMI(*State.MIs[0]->getParent(), State.MIs[0],
+ State.MIs[0]->getDebugLoc(), TII.get(Opcode));
+ DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
+ dbgs() << CurrentIdx << ": GIR_BuildMI(OutMIs["
+ << NewInsnID << "], " << Opcode << ")\n");
+ break;
+ }
+
+ case GIR_Copy: {
+ int64_t NewInsnID = MatchTable[CurrentIdx++];
+ int64_t OldInsnID = MatchTable[CurrentIdx++];
+ int64_t OpIdx = MatchTable[CurrentIdx++];
+ assert(OutMIs[NewInsnID] && "Attempted to add to undefined instruction");
+ OutMIs[NewInsnID].add(State.MIs[OldInsnID]->getOperand(OpIdx));
+ DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
+ dbgs()
+ << CurrentIdx << ": GIR_Copy(OutMIs[" << NewInsnID
+ << "], MIs[" << OldInsnID << "], " << OpIdx << ")\n");
+ break;
+ }
+
+ case GIR_CopyOrAddZeroReg: {
+ int64_t NewInsnID = MatchTable[CurrentIdx++];
+ int64_t OldInsnID = MatchTable[CurrentIdx++];
+ int64_t OpIdx = MatchTable[CurrentIdx++];
+ int64_t ZeroReg = MatchTable[CurrentIdx++];
+ assert(OutMIs[NewInsnID] && "Attempted to add to undefined instruction");
+ MachineOperand &MO = State.MIs[OldInsnID]->getOperand(OpIdx);
+ if (isOperandImmEqual(MO, 0, MRI))
+ OutMIs[NewInsnID].addReg(ZeroReg);
+ else
+ OutMIs[NewInsnID].add(MO);
+ DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
+ dbgs() << CurrentIdx << ": GIR_CopyOrAddZeroReg(OutMIs["
+ << NewInsnID << "], MIs[" << OldInsnID << "], "
+ << OpIdx << ", " << ZeroReg << ")\n");
+ break;
+ }
+
+ case GIR_CopySubReg: {
+ int64_t NewInsnID = MatchTable[CurrentIdx++];
+ int64_t OldInsnID = MatchTable[CurrentIdx++];
+ int64_t OpIdx = MatchTable[CurrentIdx++];
+ int64_t SubRegIdx = MatchTable[CurrentIdx++];
+ assert(OutMIs[NewInsnID] && "Attempted to add to undefined instruction");
+ OutMIs[NewInsnID].addReg(State.MIs[OldInsnID]->getOperand(OpIdx).getReg(),
+ 0, SubRegIdx);
+ DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
+ dbgs() << CurrentIdx << ": GIR_CopySubReg(OutMIs["
+ << NewInsnID << "], MIs[" << OldInsnID << "], "
+ << OpIdx << ", " << SubRegIdx << ")\n");
+ break;
+ }
+
+ case GIR_AddImplicitDef: {
+ int64_t InsnID = MatchTable[CurrentIdx++];
+ int64_t RegNum = MatchTable[CurrentIdx++];
+ assert(OutMIs[InsnID] && "Attempted to add to undefined instruction");
+ OutMIs[InsnID].addDef(RegNum, RegState::Implicit);
+ DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
+ dbgs() << CurrentIdx << ": GIR_AddImplicitDef(OutMIs["
+ << InsnID << "], " << RegNum << ")\n");
+ break;
+ }
+
+ case GIR_AddImplicitUse: {
+ int64_t InsnID = MatchTable[CurrentIdx++];
+ int64_t RegNum = MatchTable[CurrentIdx++];
+ assert(OutMIs[InsnID] && "Attempted to add to undefined instruction");
+ OutMIs[InsnID].addUse(RegNum, RegState::Implicit);
+ DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
+ dbgs() << CurrentIdx << ": GIR_AddImplicitUse(OutMIs["
+ << InsnID << "], " << RegNum << ")\n");
+ break;
+ }
+
+ case GIR_AddRegister: {
+ int64_t InsnID = MatchTable[CurrentIdx++];
+ int64_t RegNum = MatchTable[CurrentIdx++];
+ uint64_t RegFlags = MatchTable[CurrentIdx++];
+ assert(OutMIs[InsnID] && "Attempted to add to undefined instruction");
+ OutMIs[InsnID].addReg(RegNum, RegFlags);
+ DEBUG_WITH_TYPE(
+ TgtInstructionSelector::getName(),
+ dbgs() << CurrentIdx << ": GIR_AddRegister(OutMIs["
+ << InsnID << "], " << RegNum << ", " << RegFlags << ")\n");
+ break;
+ }
+
+ case GIR_AddTempRegister:
+ case GIR_AddTempSubRegister: {
+ int64_t InsnID = MatchTable[CurrentIdx++];
+ int64_t TempRegID = MatchTable[CurrentIdx++];
+ uint64_t TempRegFlags = MatchTable[CurrentIdx++];
+ unsigned SubReg = 0;
+ if (MatcherOpcode == GIR_AddTempSubRegister)
+ SubReg = MatchTable[CurrentIdx++];
+
+ assert(OutMIs[InsnID] && "Attempted to add to undefined instruction");
+
+ OutMIs[InsnID].addReg(State.TempRegisters[TempRegID], TempRegFlags, SubReg);
+ DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
+ dbgs() << CurrentIdx << ": GIR_AddTempRegister(OutMIs["
+ << InsnID << "], TempRegisters[" << TempRegID
+ << "]";
+ if (SubReg)
+ dbgs() << '.' << TRI.getSubRegIndexName(SubReg);
+ dbgs() << ", " << TempRegFlags << ")\n");
+ break;
+ }
+
+ case GIR_AddImm: {
+ int64_t InsnID = MatchTable[CurrentIdx++];
+ int64_t Imm = MatchTable[CurrentIdx++];
+ assert(OutMIs[InsnID] && "Attempted to add to undefined instruction");
+ OutMIs[InsnID].addImm(Imm);
+ DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
+ dbgs() << CurrentIdx << ": GIR_AddImm(OutMIs[" << InsnID
+ << "], " << Imm << ")\n");
+ break;
+ }
+
+ case GIR_ComplexRenderer: {
+ int64_t InsnID = MatchTable[CurrentIdx++];
+ int64_t RendererID = MatchTable[CurrentIdx++];
+ assert(OutMIs[InsnID] && "Attempted to add to undefined instruction");
+ for (const auto &RenderOpFn : State.Renderers[RendererID])
+ RenderOpFn(OutMIs[InsnID]);
+ DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
+ dbgs() << CurrentIdx << ": GIR_ComplexRenderer(OutMIs["
+ << InsnID << "], " << RendererID << ")\n");
+ break;
+ }
+ case GIR_ComplexSubOperandRenderer: {
+ int64_t InsnID = MatchTable[CurrentIdx++];
+ int64_t RendererID = MatchTable[CurrentIdx++];
+ int64_t RenderOpID = MatchTable[CurrentIdx++];
+ assert(OutMIs[InsnID] && "Attempted to add to undefined instruction");
+ State.Renderers[RendererID][RenderOpID](OutMIs[InsnID]);
+ DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
+ dbgs() << CurrentIdx
+ << ": GIR_ComplexSubOperandRenderer(OutMIs["
+ << InsnID << "], " << RendererID << ", "
+ << RenderOpID << ")\n");
+ break;
+ }
+
+ case GIR_CopyConstantAsSImm: {
+ int64_t NewInsnID = MatchTable[CurrentIdx++];
+ int64_t OldInsnID = MatchTable[CurrentIdx++];
+ assert(OutMIs[NewInsnID] && "Attempted to add to undefined instruction");
+ assert(State.MIs[OldInsnID]->getOpcode() == TargetOpcode::G_CONSTANT && "Expected G_CONSTANT");
+ if (State.MIs[OldInsnID]->getOperand(1).isCImm()) {
+ OutMIs[NewInsnID].addImm(
+ State.MIs[OldInsnID]->getOperand(1).getCImm()->getSExtValue());
+ } else if (State.MIs[OldInsnID]->getOperand(1).isImm())
+ OutMIs[NewInsnID].add(State.MIs[OldInsnID]->getOperand(1));
+ else
+ llvm_unreachable("Expected Imm or CImm operand");
+ DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
+ dbgs() << CurrentIdx << ": GIR_CopyConstantAsSImm(OutMIs["
+ << NewInsnID << "], MIs[" << OldInsnID << "])\n");
+ break;
+ }
+
+ // TODO: Needs a test case once we have a pattern that uses this.
+ case GIR_CopyFConstantAsFPImm: {
+ int64_t NewInsnID = MatchTable[CurrentIdx++];
+ int64_t OldInsnID = MatchTable[CurrentIdx++];
+ assert(OutMIs[NewInsnID] && "Attempted to add to undefined instruction");
+ assert(State.MIs[OldInsnID]->getOpcode() == TargetOpcode::G_FCONSTANT && "Expected G_FCONSTANT");
+ if (State.MIs[OldInsnID]->getOperand(1).isFPImm())
+ OutMIs[NewInsnID].addFPImm(
+ State.MIs[OldInsnID]->getOperand(1).getFPImm());
+ else
+ llvm_unreachable("Expected FPImm operand");
+ DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
+ dbgs() << CurrentIdx << ": GIR_CopyFPConstantAsFPImm(OutMIs["
+ << NewInsnID << "], MIs[" << OldInsnID << "])\n");
+ break;
+ }
+
+ case GIR_CustomRenderer: {
+ int64_t InsnID = MatchTable[CurrentIdx++];
+ int64_t OldInsnID = MatchTable[CurrentIdx++];
+ int64_t RendererFnID = MatchTable[CurrentIdx++];
+ assert(OutMIs[InsnID] && "Attempted to add to undefined instruction");
+ DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
+ dbgs() << CurrentIdx << ": GIR_CustomRenderer(OutMIs["
+ << InsnID << "], MIs[" << OldInsnID << "], "
+ << RendererFnID << ")\n");
+ (ISel.*ISelInfo.CustomRenderers[RendererFnID])(
+ OutMIs[InsnID], *State.MIs[OldInsnID],
+ -1); // Not a source operand of the old instruction.
+ break;
+ }
+ case GIR_CustomOperandRenderer: {
+ int64_t InsnID = MatchTable[CurrentIdx++];
+ int64_t OldInsnID = MatchTable[CurrentIdx++];
+ int64_t OpIdx = MatchTable[CurrentIdx++];
+ int64_t RendererFnID = MatchTable[CurrentIdx++];
+ assert(OutMIs[InsnID] && "Attempted to add to undefined instruction");
+
+ DEBUG_WITH_TYPE(
+ TgtInstructionSelector::getName(),
+ dbgs() << CurrentIdx << ": GIR_CustomOperandRenderer(OutMIs["
+ << InsnID << "], MIs[" << OldInsnID << "]->getOperand("
+ << OpIdx << "), "
+ << RendererFnID << ")\n");
+ (ISel.*ISelInfo.CustomRenderers[RendererFnID])(OutMIs[InsnID],
+ *State.MIs[OldInsnID],
+ OpIdx);
+ break;
+ }
+ case GIR_ConstrainOperandRC: {
+ int64_t InsnID = MatchTable[CurrentIdx++];
+ int64_t OpIdx = MatchTable[CurrentIdx++];
+ int64_t RCEnum = MatchTable[CurrentIdx++];
+ assert(OutMIs[InsnID] && "Attempted to add to undefined instruction");
+ MachineInstr &I = *OutMIs[InsnID].getInstr();
+ MachineFunction &MF = *I.getParent()->getParent();
+ MachineRegisterInfo &MRI = MF.getRegInfo();
+ const TargetRegisterClass &RC = *TRI.getRegClass(RCEnum);
+ MachineOperand &MO = I.getOperand(OpIdx);
+ constrainOperandRegClass(MF, TRI, MRI, TII, RBI, I, RC, MO);
+ DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
+ dbgs() << CurrentIdx << ": GIR_ConstrainOperandRC(OutMIs["
+ << InsnID << "], " << OpIdx << ", " << RCEnum
+ << ")\n");
+ break;
+ }
+
+ case GIR_ConstrainSelectedInstOperands: {
+ int64_t InsnID = MatchTable[CurrentIdx++];
+ assert(OutMIs[InsnID] && "Attempted to add to undefined instruction");
+ constrainSelectedInstRegOperands(*OutMIs[InsnID].getInstr(), TII, TRI,
+ RBI);
+ DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
+ dbgs() << CurrentIdx
+ << ": GIR_ConstrainSelectedInstOperands(OutMIs["
+ << InsnID << "])\n");
+ break;
+ }
+
+ case GIR_MergeMemOperands: {
+ int64_t InsnID = MatchTable[CurrentIdx++];
+ assert(OutMIs[InsnID] && "Attempted to add to undefined instruction");
+
+ DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
+ dbgs() << CurrentIdx << ": GIR_MergeMemOperands(OutMIs["
+ << InsnID << "]");
+ int64_t MergeInsnID = GIU_MergeMemOperands_EndOfList;
+ while ((MergeInsnID = MatchTable[CurrentIdx++]) !=
+ GIU_MergeMemOperands_EndOfList) {
+ DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
+ dbgs() << ", MIs[" << MergeInsnID << "]");
+ for (const auto &MMO : State.MIs[MergeInsnID]->memoperands())
+ OutMIs[InsnID].addMemOperand(MMO);
+ }
+ DEBUG_WITH_TYPE(TgtInstructionSelector::getName(), dbgs() << ")\n");
+ break;
+ }
+
+ case GIR_EraseFromParent: {
+ int64_t InsnID = MatchTable[CurrentIdx++];
+ assert(State.MIs[InsnID] &&
+ "Attempted to erase an undefined instruction");
+ State.MIs[InsnID]->eraseFromParent();
+ DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
+ dbgs() << CurrentIdx << ": GIR_EraseFromParent(MIs["
+ << InsnID << "])\n");
+ break;
+ }
+
+ case GIR_MakeTempReg: {
+ int64_t TempRegID = MatchTable[CurrentIdx++];
+ int64_t TypeID = MatchTable[CurrentIdx++];
+
+ State.TempRegisters[TempRegID] =
+ MRI.createGenericVirtualRegister(ISelInfo.TypeObjects[TypeID]);
+ DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
+ dbgs() << CurrentIdx << ": TempRegs[" << TempRegID
+ << "] = GIR_MakeTempReg(" << TypeID << ")\n");
+ break;
+ }
+
+ case GIR_Coverage: {
+ int64_t RuleID = MatchTable[CurrentIdx++];
+ CoverageInfo.setCovered(RuleID);
+
+ DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
+ dbgs()
+ << CurrentIdx << ": GIR_Coverage(" << RuleID << ")");
+ break;
+ }
+
+ case GIR_Done:
+ DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
+ dbgs() << CurrentIdx << ": GIR_Done\n");
+ propagateFlags(OutMIs);
+ return true;
+
+ default:
+ llvm_unreachable("Unexpected command");
+ }
+ }
+}
+
+} // end namespace llvm
+
+#endif // LLVM_CODEGEN_GLOBALISEL_INSTRUCTIONSELECTORIMPL_H
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel/LegalizationArtifactCombiner.h b/contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel/LegalizationArtifactCombiner.h
new file mode 100644
index 0000000000..9bed6d039e
--- /dev/null
+++ b/contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel/LegalizationArtifactCombiner.h
@@ -0,0 +1,1022 @@
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//===-- llvm/CodeGen/GlobalISel/LegalizationArtifactCombiner.h -----*- C++ -*-//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+// This file contains some helper functions which try to cleanup artifacts
+// such as G_TRUNCs/G_[ZSA]EXTENDS that were created during legalization to make
+// the types match. This file also contains some combines of merges that happens
+// at the end of the legalization.
+//===----------------------------------------------------------------------===//
+
+#include "llvm/CodeGen/GlobalISel/Legalizer.h"
+#include "llvm/CodeGen/GlobalISel/LegalizerInfo.h"
+#include "llvm/CodeGen/GlobalISel/MIPatternMatch.h"
+#include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
+#include "llvm/CodeGen/GlobalISel/Utils.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/Support/Debug.h"
+
+#define DEBUG_TYPE "legalizer"
+using namespace llvm::MIPatternMatch;
+
+namespace llvm {
+class LegalizationArtifactCombiner {
+ MachineIRBuilder &Builder;
+ MachineRegisterInfo &MRI;
+ const LegalizerInfo &LI;
+
+ static bool isArtifactCast(unsigned Opc) {
+ switch (Opc) {
+ case TargetOpcode::G_TRUNC:
+ case TargetOpcode::G_SEXT:
+ case TargetOpcode::G_ZEXT:
+ case TargetOpcode::G_ANYEXT:
+ return true;
+ default:
+ return false;
+ }
+ }
+
+public:
+ LegalizationArtifactCombiner(MachineIRBuilder &B, MachineRegisterInfo &MRI,
+ const LegalizerInfo &LI)
+ : Builder(B), MRI(MRI), LI(LI) {}
+
+ bool tryCombineAnyExt(MachineInstr &MI,
+ SmallVectorImpl<MachineInstr *> &DeadInsts,
+ SmallVectorImpl<Register> &UpdatedDefs) {
+ assert(MI.getOpcode() == TargetOpcode::G_ANYEXT);
+
+ Builder.setInstrAndDebugLoc(MI);
+ Register DstReg = MI.getOperand(0).getReg();
+ Register SrcReg = lookThroughCopyInstrs(MI.getOperand(1).getReg());
+
+ // aext(trunc x) - > aext/copy/trunc x
+ Register TruncSrc;
+ if (mi_match(SrcReg, MRI, m_GTrunc(m_Reg(TruncSrc)))) {
+ LLVM_DEBUG(dbgs() << ".. Combine MI: " << MI;);
+ Builder.buildAnyExtOrTrunc(DstReg, TruncSrc);
+ UpdatedDefs.push_back(DstReg);
+ markInstAndDefDead(MI, *MRI.getVRegDef(SrcReg), DeadInsts);
+ return true;
+ }
+
+ // aext([asz]ext x) -> [asz]ext x
+ Register ExtSrc;
+ MachineInstr *ExtMI;
+ if (mi_match(SrcReg, MRI,
+ m_all_of(m_MInstr(ExtMI), m_any_of(m_GAnyExt(m_Reg(ExtSrc)),
+ m_GSExt(m_Reg(ExtSrc)),
+ m_GZExt(m_Reg(ExtSrc)))))) {
+ Builder.buildInstr(ExtMI->getOpcode(), {DstReg}, {ExtSrc});
+ UpdatedDefs.push_back(DstReg);
+ markInstAndDefDead(MI, *ExtMI, DeadInsts);
+ return true;
+ }
+
+ // Try to fold aext(g_constant) when the larger constant type is legal.
+ // Can't use MIPattern because we don't have a specific constant in mind.
+ auto *SrcMI = MRI.getVRegDef(SrcReg);
+ if (SrcMI->getOpcode() == TargetOpcode::G_CONSTANT) {
+ const LLT DstTy = MRI.getType(DstReg);
+ if (isInstLegal({TargetOpcode::G_CONSTANT, {DstTy}})) {
+ auto &CstVal = SrcMI->getOperand(1);
+ Builder.buildConstant(
+ DstReg, CstVal.getCImm()->getValue().sext(DstTy.getSizeInBits()));
+ UpdatedDefs.push_back(DstReg);
+ markInstAndDefDead(MI, *SrcMI, DeadInsts);
+ return true;
+ }
+ }
+ return tryFoldImplicitDef(MI, DeadInsts, UpdatedDefs);
+ }
+
+ bool tryCombineZExt(MachineInstr &MI,
+ SmallVectorImpl<MachineInstr *> &DeadInsts,
+ SmallVectorImpl<Register> &UpdatedDefs,
+ GISelObserverWrapper &Observer) {
+ assert(MI.getOpcode() == TargetOpcode::G_ZEXT);
+
+ Builder.setInstrAndDebugLoc(MI);
+ Register DstReg = MI.getOperand(0).getReg();
+ Register SrcReg = lookThroughCopyInstrs(MI.getOperand(1).getReg());
+
+ // zext(trunc x) - > and (aext/copy/trunc x), mask
+ // zext(sext x) -> and (sext x), mask
+ Register TruncSrc;
+ Register SextSrc;
+ if (mi_match(SrcReg, MRI, m_GTrunc(m_Reg(TruncSrc))) ||
+ mi_match(SrcReg, MRI, m_GSExt(m_Reg(SextSrc)))) {
+ LLT DstTy = MRI.getType(DstReg);
+ if (isInstUnsupported({TargetOpcode::G_AND, {DstTy}}) ||
+ isConstantUnsupported(DstTy))
+ return false;
+ LLVM_DEBUG(dbgs() << ".. Combine MI: " << MI;);
+ LLT SrcTy = MRI.getType(SrcReg);
+ APInt MaskVal = APInt::getAllOnesValue(SrcTy.getScalarSizeInBits());
+ auto Mask = Builder.buildConstant(
+ DstTy, MaskVal.zext(DstTy.getScalarSizeInBits()));
+ auto Extended = SextSrc ? Builder.buildSExtOrTrunc(DstTy, SextSrc) :
+ Builder.buildAnyExtOrTrunc(DstTy, TruncSrc);
+ Builder.buildAnd(DstReg, Extended, Mask);
+ markInstAndDefDead(MI, *MRI.getVRegDef(SrcReg), DeadInsts);
+ return true;
+ }
+
+ // zext(zext x) -> (zext x)
+ Register ZextSrc;
+ if (mi_match(SrcReg, MRI, m_GZExt(m_Reg(ZextSrc)))) {
+ LLVM_DEBUG(dbgs() << ".. Combine MI: " << MI);
+ Observer.changingInstr(MI);
+ MI.getOperand(1).setReg(ZextSrc);
+ Observer.changedInstr(MI);
+ UpdatedDefs.push_back(DstReg);
+ markDefDead(MI, *MRI.getVRegDef(SrcReg), DeadInsts);
+ return true;
+ }
+
+ // Try to fold zext(g_constant) when the larger constant type is legal.
+ // Can't use MIPattern because we don't have a specific constant in mind.
+ auto *SrcMI = MRI.getVRegDef(SrcReg);
+ if (SrcMI->getOpcode() == TargetOpcode::G_CONSTANT) {
+ const LLT DstTy = MRI.getType(DstReg);
+ if (isInstLegal({TargetOpcode::G_CONSTANT, {DstTy}})) {
+ auto &CstVal = SrcMI->getOperand(1);
+ Builder.buildConstant(
+ DstReg, CstVal.getCImm()->getValue().zext(DstTy.getSizeInBits()));
+ UpdatedDefs.push_back(DstReg);
+ markInstAndDefDead(MI, *SrcMI, DeadInsts);
+ return true;
+ }
+ }
+ return tryFoldImplicitDef(MI, DeadInsts, UpdatedDefs);
+ }
+
+ bool tryCombineSExt(MachineInstr &MI,
+ SmallVectorImpl<MachineInstr *> &DeadInsts,
+ SmallVectorImpl<Register> &UpdatedDefs) {
+ assert(MI.getOpcode() == TargetOpcode::G_SEXT);
+
+ Builder.setInstrAndDebugLoc(MI);
+ Register DstReg = MI.getOperand(0).getReg();
+ Register SrcReg = lookThroughCopyInstrs(MI.getOperand(1).getReg());
+
+ // sext(trunc x) - > (sext_inreg (aext/copy/trunc x), c)
+ Register TruncSrc;
+ if (mi_match(SrcReg, MRI, m_GTrunc(m_Reg(TruncSrc)))) {
+ LLT DstTy = MRI.getType(DstReg);
+ if (isInstUnsupported({TargetOpcode::G_SEXT_INREG, {DstTy}}))
+ return false;
+ LLVM_DEBUG(dbgs() << ".. Combine MI: " << MI;);
+ LLT SrcTy = MRI.getType(SrcReg);
+ uint64_t SizeInBits = SrcTy.getScalarSizeInBits();
+ Builder.buildInstr(
+ TargetOpcode::G_SEXT_INREG, {DstReg},
+ {Builder.buildAnyExtOrTrunc(DstTy, TruncSrc), SizeInBits});
+ markInstAndDefDead(MI, *MRI.getVRegDef(SrcReg), DeadInsts);
+ return true;
+ }
+
+ // sext(zext x) -> (zext x)
+ // sext(sext x) -> (sext x)
+ Register ExtSrc;
+ MachineInstr *ExtMI;
+ if (mi_match(SrcReg, MRI,
+ m_all_of(m_MInstr(ExtMI), m_any_of(m_GZExt(m_Reg(ExtSrc)),
+ m_GSExt(m_Reg(ExtSrc)))))) {
+ LLVM_DEBUG(dbgs() << ".. Combine MI: " << MI);
+ Builder.buildInstr(ExtMI->getOpcode(), {DstReg}, {ExtSrc});
+ UpdatedDefs.push_back(DstReg);
+ markInstAndDefDead(MI, *MRI.getVRegDef(SrcReg), DeadInsts);
+ return true;
+ }
+
+ return tryFoldImplicitDef(MI, DeadInsts, UpdatedDefs);
+ }
+
+ bool tryCombineTrunc(MachineInstr &MI,
+ SmallVectorImpl<MachineInstr *> &DeadInsts,
+ SmallVectorImpl<Register> &UpdatedDefs,
+ GISelObserverWrapper &Observer) {
+ assert(MI.getOpcode() == TargetOpcode::G_TRUNC);
+
+ Builder.setInstr(MI);
+ Register DstReg = MI.getOperand(0).getReg();
+ Register SrcReg = lookThroughCopyInstrs(MI.getOperand(1).getReg());
+
+ // Try to fold trunc(g_constant) when the smaller constant type is legal.
+ // Can't use MIPattern because we don't have a specific constant in mind.
+ auto *SrcMI = MRI.getVRegDef(SrcReg);
+ if (SrcMI->getOpcode() == TargetOpcode::G_CONSTANT) {
+ const LLT DstTy = MRI.getType(DstReg);
+ if (isInstLegal({TargetOpcode::G_CONSTANT, {DstTy}})) {
+ auto &CstVal = SrcMI->getOperand(1);
+ Builder.buildConstant(
+ DstReg, CstVal.getCImm()->getValue().trunc(DstTy.getSizeInBits()));
+ UpdatedDefs.push_back(DstReg);
+ markInstAndDefDead(MI, *SrcMI, DeadInsts);
+ return true;
+ }
+ }
+
+ // Try to fold trunc(merge) to directly use the source of the merge.
+ // This gets rid of large, difficult to legalize, merges
+ if (SrcMI->getOpcode() == TargetOpcode::G_MERGE_VALUES) {
+ const Register MergeSrcReg = SrcMI->getOperand(1).getReg();
+ const LLT MergeSrcTy = MRI.getType(MergeSrcReg);
+ const LLT DstTy = MRI.getType(DstReg);
+
+ // We can only fold if the types are scalar
+ const unsigned DstSize = DstTy.getSizeInBits();
+ const unsigned MergeSrcSize = MergeSrcTy.getSizeInBits();
+ if (!DstTy.isScalar() || !MergeSrcTy.isScalar())
+ return false;
+
+ if (DstSize < MergeSrcSize) {
+ // When the merge source is larger than the destination, we can just
+ // truncate the merge source directly
+ if (isInstUnsupported({TargetOpcode::G_TRUNC, {DstTy, MergeSrcTy}}))
+ return false;
+
+ LLVM_DEBUG(dbgs() << "Combining G_TRUNC(G_MERGE_VALUES) to G_TRUNC: "
+ << MI);
+
+ Builder.buildTrunc(DstReg, MergeSrcReg);
+ UpdatedDefs.push_back(DstReg);
+ } else if (DstSize == MergeSrcSize) {
+ // If the sizes match we can simply try to replace the register
+ LLVM_DEBUG(
+ dbgs() << "Replacing G_TRUNC(G_MERGE_VALUES) with merge input: "
+ << MI);
+ replaceRegOrBuildCopy(DstReg, MergeSrcReg, MRI, Builder, UpdatedDefs,
+ Observer);
+ } else if (DstSize % MergeSrcSize == 0) {
+ // If the trunc size is a multiple of the merge source size we can use
+ // a smaller merge instead
+ if (isInstUnsupported(
+ {TargetOpcode::G_MERGE_VALUES, {DstTy, MergeSrcTy}}))
+ return false;
+
+ LLVM_DEBUG(
+ dbgs() << "Combining G_TRUNC(G_MERGE_VALUES) to G_MERGE_VALUES: "
+ << MI);
+
+ const unsigned NumSrcs = DstSize / MergeSrcSize;
+ assert(NumSrcs < SrcMI->getNumOperands() - 1 &&
+ "trunc(merge) should require less inputs than merge");
+ SmallVector<Register, 8> SrcRegs(NumSrcs);
+ for (unsigned i = 0; i < NumSrcs; ++i)
+ SrcRegs[i] = SrcMI->getOperand(i + 1).getReg();
+
+ Builder.buildMerge(DstReg, SrcRegs);
+ UpdatedDefs.push_back(DstReg);
+ } else {
+ // Unable to combine
+ return false;
+ }
+
+ markInstAndDefDead(MI, *SrcMI, DeadInsts);
+ return true;
+ }
+
+ // trunc(trunc) -> trunc
+ Register TruncSrc;
+ if (mi_match(SrcReg, MRI, m_GTrunc(m_Reg(TruncSrc)))) {
+ // Always combine trunc(trunc) since the eventual resulting trunc must be
+ // legal anyway as it must be legal for all outputs of the consumer type
+ // set.
+ LLVM_DEBUG(dbgs() << ".. Combine G_TRUNC(G_TRUNC): " << MI);
+
+ Builder.buildTrunc(DstReg, TruncSrc);
+ UpdatedDefs.push_back(DstReg);
+ markInstAndDefDead(MI, *MRI.getVRegDef(TruncSrc), DeadInsts);
+ return true;
+ }
+
+ return false;
+ }
+
+ /// Try to fold G_[ASZ]EXT (G_IMPLICIT_DEF).
+ bool tryFoldImplicitDef(MachineInstr &MI,
+ SmallVectorImpl<MachineInstr *> &DeadInsts,
+ SmallVectorImpl<Register> &UpdatedDefs) {
+ unsigned Opcode = MI.getOpcode();
+ assert(Opcode == TargetOpcode::G_ANYEXT || Opcode == TargetOpcode::G_ZEXT ||
+ Opcode == TargetOpcode::G_SEXT);
+
+ if (MachineInstr *DefMI = getOpcodeDef(TargetOpcode::G_IMPLICIT_DEF,
+ MI.getOperand(1).getReg(), MRI)) {
+ Builder.setInstr(MI);
+ Register DstReg = MI.getOperand(0).getReg();
+ LLT DstTy = MRI.getType(DstReg);
+
+ if (Opcode == TargetOpcode::G_ANYEXT) {
+ // G_ANYEXT (G_IMPLICIT_DEF) -> G_IMPLICIT_DEF
+ if (!isInstLegal({TargetOpcode::G_IMPLICIT_DEF, {DstTy}}))
+ return false;
+ LLVM_DEBUG(dbgs() << ".. Combine G_ANYEXT(G_IMPLICIT_DEF): " << MI;);
+ Builder.buildInstr(TargetOpcode::G_IMPLICIT_DEF, {DstReg}, {});
+ UpdatedDefs.push_back(DstReg);
+ } else {
+ // G_[SZ]EXT (G_IMPLICIT_DEF) -> G_CONSTANT 0 because the top
+ // bits will be 0 for G_ZEXT and 0/1 for the G_SEXT.
+ if (isConstantUnsupported(DstTy))
+ return false;
+ LLVM_DEBUG(dbgs() << ".. Combine G_[SZ]EXT(G_IMPLICIT_DEF): " << MI;);
+ Builder.buildConstant(DstReg, 0);
+ UpdatedDefs.push_back(DstReg);
+ }
+
+ markInstAndDefDead(MI, *DefMI, DeadInsts);
+ return true;
+ }
+ return false;
+ }
+
+ bool tryFoldUnmergeCast(MachineInstr &MI, MachineInstr &CastMI,
+ SmallVectorImpl<MachineInstr *> &DeadInsts,
+ SmallVectorImpl<Register> &UpdatedDefs) {
+
+ assert(MI.getOpcode() == TargetOpcode::G_UNMERGE_VALUES);
+
+ const unsigned CastOpc = CastMI.getOpcode();
+
+ if (!isArtifactCast(CastOpc))
+ return false;
+
+ const unsigned NumDefs = MI.getNumOperands() - 1;
+
+ const Register CastSrcReg = CastMI.getOperand(1).getReg();
+ const LLT CastSrcTy = MRI.getType(CastSrcReg);
+ const LLT DestTy = MRI.getType(MI.getOperand(0).getReg());
+ const LLT SrcTy = MRI.getType(MI.getOperand(NumDefs).getReg());
+
+ const unsigned CastSrcSize = CastSrcTy.getSizeInBits();
+ const unsigned DestSize = DestTy.getSizeInBits();
+
+ if (CastOpc == TargetOpcode::G_TRUNC) {
+ if (SrcTy.isVector() && SrcTy.getScalarType() == DestTy.getScalarType()) {
+ // %1:_(<4 x s8>) = G_TRUNC %0(<4 x s32>)
+ // %2:_(s8), %3:_(s8), %4:_(s8), %5:_(s8) = G_UNMERGE_VALUES %1
+ // =>
+ // %6:_(s32), %7:_(s32), %8:_(s32), %9:_(s32) = G_UNMERGE_VALUES %0
+ // %2:_(s8) = G_TRUNC %6
+ // %3:_(s8) = G_TRUNC %7
+ // %4:_(s8) = G_TRUNC %8
+ // %5:_(s8) = G_TRUNC %9
+
+ unsigned UnmergeNumElts =
+ DestTy.isVector() ? CastSrcTy.getNumElements() / NumDefs : 1;
+ LLT UnmergeTy = CastSrcTy.changeNumElements(UnmergeNumElts);
+
+ if (isInstUnsupported(
+ {TargetOpcode::G_UNMERGE_VALUES, {UnmergeTy, CastSrcTy}}))
+ return false;
+
+ Builder.setInstr(MI);
+ auto NewUnmerge = Builder.buildUnmerge(UnmergeTy, CastSrcReg);
+
+ for (unsigned I = 0; I != NumDefs; ++I) {
+ Register DefReg = MI.getOperand(I).getReg();
+ UpdatedDefs.push_back(DefReg);
+ Builder.buildTrunc(DefReg, NewUnmerge.getReg(I));
+ }
+
+ markInstAndDefDead(MI, CastMI, DeadInsts);
+ return true;
+ }
+
+ if (CastSrcTy.isScalar() && SrcTy.isScalar() && !DestTy.isVector()) {
+ // %1:_(s16) = G_TRUNC %0(s32)
+ // %2:_(s8), %3:_(s8) = G_UNMERGE_VALUES %1
+ // =>
+ // %2:_(s8), %3:_(s8), %4:_(s8), %5:_(s8) = G_UNMERGE_VALUES %0
+
+ // Unmerge(trunc) can be combined if the trunc source size is a multiple
+ // of the unmerge destination size
+ if (CastSrcSize % DestSize != 0)
+ return false;
+
+ // Check if the new unmerge is supported
+ if (isInstUnsupported(
+ {TargetOpcode::G_UNMERGE_VALUES, {DestTy, CastSrcTy}}))
+ return false;
+
+ // Gather the original destination registers and create new ones for the
+ // unused bits
+ const unsigned NewNumDefs = CastSrcSize / DestSize;
+ SmallVector<Register, 8> DstRegs(NewNumDefs);
+ for (unsigned Idx = 0; Idx < NewNumDefs; ++Idx) {
+ if (Idx < NumDefs)
+ DstRegs[Idx] = MI.getOperand(Idx).getReg();
+ else
+ DstRegs[Idx] = MRI.createGenericVirtualRegister(DestTy);
+ }
+
+ // Build new unmerge
+ Builder.setInstr(MI);
+ Builder.buildUnmerge(DstRegs, CastSrcReg);
+ UpdatedDefs.append(DstRegs.begin(), DstRegs.begin() + NewNumDefs);
+ markInstAndDefDead(MI, CastMI, DeadInsts);
+ return true;
+ }
+ }
+
+ // TODO: support combines with other casts as well
+ return false;
+ }
+
+ static bool canFoldMergeOpcode(unsigned MergeOp, unsigned ConvertOp,
+ LLT OpTy, LLT DestTy) {
+ // Check if we found a definition that is like G_MERGE_VALUES.
+ switch (MergeOp) {
+ default:
+ return false;
+ case TargetOpcode::G_BUILD_VECTOR:
+ case TargetOpcode::G_MERGE_VALUES:
+ // The convert operation that we will need to insert is
+ // going to convert the input of that type of instruction (scalar)
+ // to the destination type (DestTy).
+ // The conversion needs to stay in the same domain (scalar to scalar
+ // and vector to vector), so if we were to allow to fold the merge
+ // we would need to insert some bitcasts.
+ // E.g.,
+ // <2 x s16> = build_vector s16, s16
+ // <2 x s32> = zext <2 x s16>
+ // <2 x s16>, <2 x s16> = unmerge <2 x s32>
+ //
+ // As is the folding would produce:
+ // <2 x s16> = zext s16 <-- scalar to vector
+ // <2 x s16> = zext s16 <-- scalar to vector
+ // Which is invalid.
+ // Instead we would want to generate:
+ // s32 = zext s16
+ // <2 x s16> = bitcast s32
+ // s32 = zext s16
+ // <2 x s16> = bitcast s32
+ //
+ // That is not done yet.
+ if (ConvertOp == 0)
+ return true;
+ return !DestTy.isVector() && OpTy.isVector();
+ case TargetOpcode::G_CONCAT_VECTORS: {
+ if (ConvertOp == 0)
+ return true;
+ if (!DestTy.isVector())
+ return false;
+
+ const unsigned OpEltSize = OpTy.getElementType().getSizeInBits();
+
+ // Don't handle scalarization with a cast that isn't in the same
+ // direction as the vector cast. This could be handled, but it would
+ // require more intermediate unmerges.
+ if (ConvertOp == TargetOpcode::G_TRUNC)
+ return DestTy.getSizeInBits() <= OpEltSize;
+ return DestTy.getSizeInBits() >= OpEltSize;
+ }
+ }
+ }
+
+ /// Try to replace DstReg with SrcReg or build a COPY instruction
+ /// depending on the register constraints.
+ static void replaceRegOrBuildCopy(Register DstReg, Register SrcReg,
+ MachineRegisterInfo &MRI,
+ MachineIRBuilder &Builder,
+ SmallVectorImpl<Register> &UpdatedDefs,
+ GISelChangeObserver &Observer) {
+ if (!llvm::canReplaceReg(DstReg, SrcReg, MRI)) {
+ Builder.buildCopy(DstReg, SrcReg);
+ UpdatedDefs.push_back(DstReg);
+ return;
+ }
+ SmallVector<MachineInstr *, 4> UseMIs;
+ // Get the users and notify the observer before replacing.
+ for (auto &UseMI : MRI.use_instructions(DstReg)) {
+ UseMIs.push_back(&UseMI);
+ Observer.changingInstr(UseMI);
+ }
+ // Replace the registers.
+ MRI.replaceRegWith(DstReg, SrcReg);
+ UpdatedDefs.push_back(SrcReg);
+ // Notify the observer that we changed the instructions.
+ for (auto *UseMI : UseMIs)
+ Observer.changedInstr(*UseMI);
+ }
+
+ /// Return the operand index in \p MI that defines \p Def
+ static unsigned getDefIndex(const MachineInstr &MI, Register SearchDef) {
+ unsigned DefIdx = 0;
+ for (const MachineOperand &Def : MI.defs()) {
+ if (Def.getReg() == SearchDef)
+ break;
+ ++DefIdx;
+ }
+
+ return DefIdx;
+ }
+
+ bool tryCombineUnmergeValues(MachineInstr &MI,
+ SmallVectorImpl<MachineInstr *> &DeadInsts,
+ SmallVectorImpl<Register> &UpdatedDefs,
+ GISelChangeObserver &Observer) {
+ assert(MI.getOpcode() == TargetOpcode::G_UNMERGE_VALUES);
+
+ unsigned NumDefs = MI.getNumOperands() - 1;
+ Register SrcReg = MI.getOperand(NumDefs).getReg();
+ MachineInstr *SrcDef = getDefIgnoringCopies(SrcReg, MRI);
+ if (!SrcDef)
+ return false;
+
+ LLT OpTy = MRI.getType(MI.getOperand(NumDefs).getReg());
+ LLT DestTy = MRI.getType(MI.getOperand(0).getReg());
+
+ if (SrcDef->getOpcode() == TargetOpcode::G_UNMERGE_VALUES) {
+ // %0:_(<4 x s16>) = G_FOO
+ // %1:_(<2 x s16>), %2:_(<2 x s16>) = G_UNMERGE_VALUES %0
+ // %3:_(s16), %4:_(s16) = G_UNMERGE_VALUES %1
+ //
+ // %3:_(s16), %4:_(s16), %5:_(s16), %6:_(s16) = G_UNMERGE_VALUES %0
+ const unsigned NumSrcOps = SrcDef->getNumOperands();
+ Register SrcUnmergeSrc = SrcDef->getOperand(NumSrcOps - 1).getReg();
+ LLT SrcUnmergeSrcTy = MRI.getType(SrcUnmergeSrc);
+
+ // If we need to decrease the number of vector elements in the result type
+ // of an unmerge, this would involve the creation of an equivalent unmerge
+ // to copy back to the original result registers.
+ LegalizeActionStep ActionStep = LI.getAction(
+ {TargetOpcode::G_UNMERGE_VALUES, {OpTy, SrcUnmergeSrcTy}});
+ switch (ActionStep.Action) {
+ case LegalizeActions::Lower:
+ case LegalizeActions::Unsupported:
+ break;
+ case LegalizeActions::FewerElements:
+ case LegalizeActions::NarrowScalar:
+ if (ActionStep.TypeIdx == 1)
+ return false;
+ break;
+ default:
+ return false;
+ }
+
+ Builder.setInstrAndDebugLoc(MI);
+ auto NewUnmerge = Builder.buildUnmerge(DestTy, SrcUnmergeSrc);
+
+ // TODO: Should we try to process out the other defs now? If the other
+ // defs of the source unmerge are also unmerged, we end up with a separate
+ // unmerge for each one.
+ unsigned SrcDefIdx = getDefIndex(*SrcDef, SrcReg);
+ for (unsigned I = 0; I != NumDefs; ++I) {
+ Register Def = MI.getOperand(I).getReg();
+ replaceRegOrBuildCopy(Def, NewUnmerge.getReg(SrcDefIdx * NumDefs + I),
+ MRI, Builder, UpdatedDefs, Observer);
+ }
+
+ markInstAndDefDead(MI, *SrcDef, DeadInsts, SrcDefIdx);
+ return true;
+ }
+
+ MachineInstr *MergeI = SrcDef;
+ unsigned ConvertOp = 0;
+
+ // Handle intermediate conversions
+ unsigned SrcOp = SrcDef->getOpcode();
+ if (isArtifactCast(SrcOp)) {
+ ConvertOp = SrcOp;
+ MergeI = getDefIgnoringCopies(SrcDef->getOperand(1).getReg(), MRI);
+ }
+
+ if (!MergeI || !canFoldMergeOpcode(MergeI->getOpcode(),
+ ConvertOp, OpTy, DestTy)) {
+ // We might have a chance to combine later by trying to combine
+ // unmerge(cast) first
+ return tryFoldUnmergeCast(MI, *SrcDef, DeadInsts, UpdatedDefs);
+ }
+
+ const unsigned NumMergeRegs = MergeI->getNumOperands() - 1;
+
+ if (NumMergeRegs < NumDefs) {
+ if (NumDefs % NumMergeRegs != 0)
+ return false;
+
+ Builder.setInstr(MI);
+ // Transform to UNMERGEs, for example
+ // %1 = G_MERGE_VALUES %4, %5
+ // %9, %10, %11, %12 = G_UNMERGE_VALUES %1
+ // to
+ // %9, %10 = G_UNMERGE_VALUES %4
+ // %11, %12 = G_UNMERGE_VALUES %5
+
+ const unsigned NewNumDefs = NumDefs / NumMergeRegs;
+ for (unsigned Idx = 0; Idx < NumMergeRegs; ++Idx) {
+ SmallVector<Register, 8> DstRegs;
+ for (unsigned j = 0, DefIdx = Idx * NewNumDefs; j < NewNumDefs;
+ ++j, ++DefIdx)
+ DstRegs.push_back(MI.getOperand(DefIdx).getReg());
+
+ if (ConvertOp) {
+ LLT MergeSrcTy = MRI.getType(MergeI->getOperand(1).getReg());
+
+ // This is a vector that is being split and casted. Extract to the
+ // element type, and do the conversion on the scalars (or smaller
+ // vectors).
+ LLT MergeEltTy = MergeSrcTy.divide(NewNumDefs);
+
+ // Handle split to smaller vectors, with conversions.
+ // %2(<8 x s8>) = G_CONCAT_VECTORS %0(<4 x s8>), %1(<4 x s8>)
+ // %3(<8 x s16>) = G_SEXT %2
+ // %4(<2 x s16>), %5(<2 x s16>), %6(<2 x s16>), %7(<2 x s16>) = G_UNMERGE_VALUES %3
+ //
+ // =>
+ //
+ // %8(<2 x s8>), %9(<2 x s8>) = G_UNMERGE_VALUES %0
+ // %10(<2 x s8>), %11(<2 x s8>) = G_UNMERGE_VALUES %1
+ // %4(<2 x s16>) = G_SEXT %8
+ // %5(<2 x s16>) = G_SEXT %9
+ // %6(<2 x s16>) = G_SEXT %10
+ // %7(<2 x s16>)= G_SEXT %11
+
+ SmallVector<Register, 4> TmpRegs(NewNumDefs);
+ for (unsigned k = 0; k < NewNumDefs; ++k)
+ TmpRegs[k] = MRI.createGenericVirtualRegister(MergeEltTy);
+
+ Builder.buildUnmerge(TmpRegs, MergeI->getOperand(Idx + 1).getReg());
+
+ for (unsigned k = 0; k < NewNumDefs; ++k)
+ Builder.buildInstr(ConvertOp, {DstRegs[k]}, {TmpRegs[k]});
+ } else {
+ Builder.buildUnmerge(DstRegs, MergeI->getOperand(Idx + 1).getReg());
+ }
+ UpdatedDefs.append(DstRegs.begin(), DstRegs.end());
+ }
+
+ } else if (NumMergeRegs > NumDefs) {
+ if (ConvertOp != 0 || NumMergeRegs % NumDefs != 0)
+ return false;
+
+ Builder.setInstr(MI);
+ // Transform to MERGEs
+ // %6 = G_MERGE_VALUES %17, %18, %19, %20
+ // %7, %8 = G_UNMERGE_VALUES %6
+ // to
+ // %7 = G_MERGE_VALUES %17, %18
+ // %8 = G_MERGE_VALUES %19, %20
+
+ const unsigned NumRegs = NumMergeRegs / NumDefs;
+ for (unsigned DefIdx = 0; DefIdx < NumDefs; ++DefIdx) {
+ SmallVector<Register, 8> Regs;
+ for (unsigned j = 0, Idx = NumRegs * DefIdx + 1; j < NumRegs;
+ ++j, ++Idx)
+ Regs.push_back(MergeI->getOperand(Idx).getReg());
+
+ Register DefReg = MI.getOperand(DefIdx).getReg();
+ Builder.buildMerge(DefReg, Regs);
+ UpdatedDefs.push_back(DefReg);
+ }
+
+ } else {
+ LLT MergeSrcTy = MRI.getType(MergeI->getOperand(1).getReg());
+
+ if (!ConvertOp && DestTy != MergeSrcTy)
+ ConvertOp = TargetOpcode::G_BITCAST;
+
+ if (ConvertOp) {
+ Builder.setInstr(MI);
+
+ for (unsigned Idx = 0; Idx < NumDefs; ++Idx) {
+ Register MergeSrc = MergeI->getOperand(Idx + 1).getReg();
+ Register DefReg = MI.getOperand(Idx).getReg();
+ Builder.buildInstr(ConvertOp, {DefReg}, {MergeSrc});
+ UpdatedDefs.push_back(DefReg);
+ }
+
+ markInstAndDefDead(MI, *MergeI, DeadInsts);
+ return true;
+ }
+
+ assert(DestTy == MergeSrcTy &&
+ "Bitcast and the other kinds of conversions should "
+ "have happened earlier");
+
+ Builder.setInstr(MI);
+ for (unsigned Idx = 0; Idx < NumDefs; ++Idx) {
+ Register DstReg = MI.getOperand(Idx).getReg();
+ Register SrcReg = MergeI->getOperand(Idx + 1).getReg();
+ replaceRegOrBuildCopy(DstReg, SrcReg, MRI, Builder, UpdatedDefs,
+ Observer);
+ }
+ }
+
+ markInstAndDefDead(MI, *MergeI, DeadInsts);
+ return true;
+ }
+
+ static bool isMergeLikeOpcode(unsigned Opc) {
+ switch (Opc) {
+ case TargetOpcode::G_MERGE_VALUES:
+ case TargetOpcode::G_BUILD_VECTOR:
+ case TargetOpcode::G_CONCAT_VECTORS:
+ return true;
+ default:
+ return false;
+ }
+ }
+
+ bool tryCombineExtract(MachineInstr &MI,
+ SmallVectorImpl<MachineInstr *> &DeadInsts,
+ SmallVectorImpl<Register> &UpdatedDefs) {
+ assert(MI.getOpcode() == TargetOpcode::G_EXTRACT);
+
+ // Try to use the source registers from a G_MERGE_VALUES
+ //
+ // %2 = G_MERGE_VALUES %0, %1
+ // %3 = G_EXTRACT %2, N
+ // =>
+ //
+ // for N < %2.getSizeInBits() / 2
+ // %3 = G_EXTRACT %0, N
+ //
+ // for N >= %2.getSizeInBits() / 2
+ // %3 = G_EXTRACT %1, (N - %0.getSizeInBits()
+
+ Register SrcReg = lookThroughCopyInstrs(MI.getOperand(1).getReg());
+ MachineInstr *MergeI = MRI.getVRegDef(SrcReg);
+ if (!MergeI || !isMergeLikeOpcode(MergeI->getOpcode()))
+ return false;
+
+ Register DstReg = MI.getOperand(0).getReg();
+ LLT DstTy = MRI.getType(DstReg);
+ LLT SrcTy = MRI.getType(SrcReg);
+
+ // TODO: Do we need to check if the resulting extract is supported?
+ unsigned ExtractDstSize = DstTy.getSizeInBits();
+ unsigned Offset = MI.getOperand(2).getImm();
+ unsigned NumMergeSrcs = MergeI->getNumOperands() - 1;
+ unsigned MergeSrcSize = SrcTy.getSizeInBits() / NumMergeSrcs;
+ unsigned MergeSrcIdx = Offset / MergeSrcSize;
+
+ // Compute the offset of the last bit the extract needs.
+ unsigned EndMergeSrcIdx = (Offset + ExtractDstSize - 1) / MergeSrcSize;
+
+ // Can't handle the case where the extract spans multiple inputs.
+ if (MergeSrcIdx != EndMergeSrcIdx)
+ return false;
+
+ // TODO: We could modify MI in place in most cases.
+ Builder.setInstr(MI);
+ Builder.buildExtract(DstReg, MergeI->getOperand(MergeSrcIdx + 1).getReg(),
+ Offset - MergeSrcIdx * MergeSrcSize);
+ UpdatedDefs.push_back(DstReg);
+ markInstAndDefDead(MI, *MergeI, DeadInsts);
+ return true;
+ }
+
+ /// Try to combine away MI.
+ /// Returns true if it combined away the MI.
+ /// Adds instructions that are dead as a result of the combine
+ /// into DeadInsts, which can include MI.
+ bool tryCombineInstruction(MachineInstr &MI,
+ SmallVectorImpl<MachineInstr *> &DeadInsts,
+ GISelObserverWrapper &WrapperObserver) {
+ // This might be a recursive call, and we might have DeadInsts already
+ // populated. To avoid bad things happening later with multiple vreg defs
+ // etc, process the dead instructions now if any.
+ if (!DeadInsts.empty())
+ deleteMarkedDeadInsts(DeadInsts, WrapperObserver);
+
+ // Put here every vreg that was redefined in such a way that it's at least
+ // possible that one (or more) of its users (immediate or COPY-separated)
+ // could become artifact combinable with the new definition (or the
+ // instruction reachable from it through a chain of copies if any).
+ SmallVector<Register, 4> UpdatedDefs;
+ bool Changed = false;
+ switch (MI.getOpcode()) {
+ default:
+ return false;
+ case TargetOpcode::G_ANYEXT:
+ Changed = tryCombineAnyExt(MI, DeadInsts, UpdatedDefs);
+ break;
+ case TargetOpcode::G_ZEXT:
+ Changed = tryCombineZExt(MI, DeadInsts, UpdatedDefs, WrapperObserver);
+ break;
+ case TargetOpcode::G_SEXT:
+ Changed = tryCombineSExt(MI, DeadInsts, UpdatedDefs);
+ break;
+ case TargetOpcode::G_UNMERGE_VALUES:
+ Changed =
+ tryCombineUnmergeValues(MI, DeadInsts, UpdatedDefs, WrapperObserver);
+ break;
+ case TargetOpcode::G_MERGE_VALUES:
+ case TargetOpcode::G_BUILD_VECTOR:
+ case TargetOpcode::G_CONCAT_VECTORS:
+ // If any of the users of this merge are an unmerge, then add them to the
+ // artifact worklist in case there's folding that can be done looking up.
+ for (MachineInstr &U : MRI.use_instructions(MI.getOperand(0).getReg())) {
+ if (U.getOpcode() == TargetOpcode::G_UNMERGE_VALUES ||
+ U.getOpcode() == TargetOpcode::G_TRUNC) {
+ UpdatedDefs.push_back(MI.getOperand(0).getReg());
+ break;
+ }
+ }
+ break;
+ case TargetOpcode::G_EXTRACT:
+ Changed = tryCombineExtract(MI, DeadInsts, UpdatedDefs);
+ break;
+ case TargetOpcode::G_TRUNC:
+ Changed = tryCombineTrunc(MI, DeadInsts, UpdatedDefs, WrapperObserver);
+ if (!Changed) {
+ // Try to combine truncates away even if they are legal. As all artifact
+ // combines at the moment look only "up" the def-use chains, we achieve
+ // that by throwing truncates' users (with look through copies) into the
+ // ArtifactList again.
+ UpdatedDefs.push_back(MI.getOperand(0).getReg());
+ }
+ break;
+ }
+ // If the main loop through the ArtifactList found at least one combinable
+ // pair of artifacts, not only combine it away (as done above), but also
+ // follow the def-use chain from there to combine everything that can be
+ // combined within this def-use chain of artifacts.
+ while (!UpdatedDefs.empty()) {
+ Register NewDef = UpdatedDefs.pop_back_val();
+ assert(NewDef.isVirtual() && "Unexpected redefinition of a physreg");
+ for (MachineInstr &Use : MRI.use_instructions(NewDef)) {
+ switch (Use.getOpcode()) {
+ // Keep this list in sync with the list of all artifact combines.
+ case TargetOpcode::G_ANYEXT:
+ case TargetOpcode::G_ZEXT:
+ case TargetOpcode::G_SEXT:
+ case TargetOpcode::G_UNMERGE_VALUES:
+ case TargetOpcode::G_EXTRACT:
+ case TargetOpcode::G_TRUNC:
+ // Adding Use to ArtifactList.
+ WrapperObserver.changedInstr(Use);
+ break;
+ case TargetOpcode::COPY: {
+ Register Copy = Use.getOperand(0).getReg();
+ if (Copy.isVirtual())
+ UpdatedDefs.push_back(Copy);
+ break;
+ }
+ default:
+ // If we do not have an artifact combine for the opcode, there is no
+ // point in adding it to the ArtifactList as nothing interesting will
+ // be done to it anyway.
+ break;
+ }
+ }
+ }
+ return Changed;
+ }
+
+private:
+ static Register getArtifactSrcReg(const MachineInstr &MI) {
+ switch (MI.getOpcode()) {
+ case TargetOpcode::COPY:
+ case TargetOpcode::G_TRUNC:
+ case TargetOpcode::G_ZEXT:
+ case TargetOpcode::G_ANYEXT:
+ case TargetOpcode::G_SEXT:
+ case TargetOpcode::G_EXTRACT:
+ return MI.getOperand(1).getReg();
+ case TargetOpcode::G_UNMERGE_VALUES:
+ return MI.getOperand(MI.getNumOperands() - 1).getReg();
+ default:
+ llvm_unreachable("Not a legalization artifact happen");
+ }
+ }
+
+ /// Mark a def of one of MI's original operands, DefMI, as dead if changing MI
+ /// (either by killing it or changing operands) results in DefMI being dead
+ /// too. In-between COPYs or artifact-casts are also collected if they are
+ /// dead.
+ /// MI is not marked dead.
+ void markDefDead(MachineInstr &MI, MachineInstr &DefMI,
+ SmallVectorImpl<MachineInstr *> &DeadInsts,
+ unsigned DefIdx = 0) {
+ // Collect all the copy instructions that are made dead, due to deleting
+ // this instruction. Collect all of them until the Trunc(DefMI).
+ // Eg,
+ // %1(s1) = G_TRUNC %0(s32)
+ // %2(s1) = COPY %1(s1)
+ // %3(s1) = COPY %2(s1)
+ // %4(s32) = G_ANYEXT %3(s1)
+ // In this case, we would have replaced %4 with a copy of %0,
+ // and as a result, %3, %2, %1 are dead.
+ MachineInstr *PrevMI = &MI;
+ while (PrevMI != &DefMI) {
+ Register PrevRegSrc = getArtifactSrcReg(*PrevMI);
+
+ MachineInstr *TmpDef = MRI.getVRegDef(PrevRegSrc);
+ if (MRI.hasOneUse(PrevRegSrc)) {
+ if (TmpDef != &DefMI) {
+ assert((TmpDef->getOpcode() == TargetOpcode::COPY ||
+ isArtifactCast(TmpDef->getOpcode())) &&
+ "Expecting copy or artifact cast here");
+
+ DeadInsts.push_back(TmpDef);
+ }
+ } else
+ break;
+ PrevMI = TmpDef;
+ }
+
+ if (PrevMI == &DefMI) {
+ unsigned I = 0;
+ bool IsDead = true;
+ for (MachineOperand &Def : DefMI.defs()) {
+ if (I != DefIdx) {
+ if (!MRI.use_empty(Def.getReg())) {
+ IsDead = false;
+ break;
+ }
+ } else {
+ if (!MRI.hasOneUse(DefMI.getOperand(DefIdx).getReg()))
+ break;
+ }
+
+ ++I;
+ }
+
+ if (IsDead)
+ DeadInsts.push_back(&DefMI);
+ }
+ }
+
+ /// Mark MI as dead. If a def of one of MI's operands, DefMI, would also be
+ /// dead due to MI being killed, then mark DefMI as dead too.
+ /// Some of the combines (extends(trunc)), try to walk through redundant
+ /// copies in between the extends and the truncs, and this attempts to collect
+ /// the in between copies if they're dead.
+ void markInstAndDefDead(MachineInstr &MI, MachineInstr &DefMI,
+ SmallVectorImpl<MachineInstr *> &DeadInsts,
+ unsigned DefIdx = 0) {
+ DeadInsts.push_back(&MI);
+ markDefDead(MI, DefMI, DeadInsts, DefIdx);
+ }
+
+ /// Erase the dead instructions in the list and call the observer hooks.
+ /// Normally the Legalizer will deal with erasing instructions that have been
+ /// marked dead. However, for the trunc(ext(x)) cases we can end up trying to
+ /// process instructions which have been marked dead, but otherwise break the
+ /// MIR by introducing multiple vreg defs. For those cases, allow the combines
+ /// to explicitly delete the instructions before we run into trouble.
+ void deleteMarkedDeadInsts(SmallVectorImpl<MachineInstr *> &DeadInsts,
+ GISelObserverWrapper &WrapperObserver) {
+ for (auto *DeadMI : DeadInsts) {
+ LLVM_DEBUG(dbgs() << *DeadMI << "Is dead, eagerly deleting\n");
+ WrapperObserver.erasingInstr(*DeadMI);
+ DeadMI->eraseFromParentAndMarkDBGValuesForRemoval();
+ }
+ DeadInsts.clear();
+ }
+
+ /// Checks if the target legalizer info has specified anything about the
+ /// instruction, or if unsupported.
+ bool isInstUnsupported(const LegalityQuery &Query) const {
+ using namespace LegalizeActions;
+ auto Step = LI.getAction(Query);
+ return Step.Action == Unsupported || Step.Action == NotFound;
+ }
+
+ bool isInstLegal(const LegalityQuery &Query) const {
+ return LI.getAction(Query).Action == LegalizeActions::Legal;
+ }
+
+ bool isConstantUnsupported(LLT Ty) const {
+ if (!Ty.isVector())
+ return isInstUnsupported({TargetOpcode::G_CONSTANT, {Ty}});
+
+ LLT EltTy = Ty.getElementType();
+ return isInstUnsupported({TargetOpcode::G_CONSTANT, {EltTy}}) ||
+ isInstUnsupported({TargetOpcode::G_BUILD_VECTOR, {Ty, EltTy}});
+ }
+
+ /// Looks through copy instructions and returns the actual
+ /// source register.
+ Register lookThroughCopyInstrs(Register Reg) {
+ Register TmpReg;
+ while (mi_match(Reg, MRI, m_Copy(m_Reg(TmpReg)))) {
+ if (MRI.getType(TmpReg).isValid())
+ Reg = TmpReg;
+ else
+ break;
+ }
+ return Reg;
+ }
+};
+
+} // namespace llvm
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel/Legalizer.h b/contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel/Legalizer.h
new file mode 100644
index 0000000000..666876eb69
--- /dev/null
+++ b/contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel/Legalizer.h
@@ -0,0 +1,88 @@
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//== llvm/CodeGen/GlobalISel/Legalizer.h ---------------- -*- C++ -*-==//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+/// \file A pass to convert the target-illegal operations created by IR -> MIR
+/// translation into ones the target expects to be able to select. This may
+/// occur in multiple phases, for example G_ADD <2 x i8> -> G_ADD <2 x i16> ->
+/// G_ADD <4 x i16>.
+///
+/// The LegalizeHelper class is where most of the work happens, and is designed
+/// to be callable from other passes that find themselves with an illegal
+/// instruction.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_GLOBALISEL_LEGALIZEMACHINEIRPASS_H
+#define LLVM_CODEGEN_GLOBALISEL_LEGALIZEMACHINEIRPASS_H
+
+#include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+
+namespace llvm {
+
+class MachineRegisterInfo;
+class LostDebugLocObserver;
+
+class Legalizer : public MachineFunctionPass {
+public:
+ static char ID;
+
+ struct MFResult {
+ bool Changed;
+ const MachineInstr *FailedOn;
+ };
+
+private:
+ /// Initialize the field members using \p MF.
+ void init(MachineFunction &MF);
+
+public:
+ // Ctor, nothing fancy.
+ Legalizer();
+
+ StringRef getPassName() const override { return "Legalizer"; }
+
+ void getAnalysisUsage(AnalysisUsage &AU) const override;
+
+ MachineFunctionProperties getRequiredProperties() const override {
+ return MachineFunctionProperties().set(
+ MachineFunctionProperties::Property::IsSSA);
+ }
+
+ MachineFunctionProperties getSetProperties() const override {
+ return MachineFunctionProperties().set(
+ MachineFunctionProperties::Property::Legalized);
+ }
+
+ MachineFunctionProperties getClearedProperties() const override {
+ return MachineFunctionProperties().set(
+ MachineFunctionProperties::Property::NoPHIs);
+ }
+
+ bool runOnMachineFunction(MachineFunction &MF) override;
+
+ static MFResult
+ legalizeMachineFunction(MachineFunction &MF, const LegalizerInfo &LI,
+ ArrayRef<GISelChangeObserver *> AuxObservers,
+ LostDebugLocObserver &LocObserver,
+ MachineIRBuilder &MIRBuilder);
+};
+} // End namespace llvm.
+
+#endif
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel/LegalizerHelper.h b/contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel/LegalizerHelper.h
new file mode 100644
index 0000000000..bf45c5e673
--- /dev/null
+++ b/contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel/LegalizerHelper.h
@@ -0,0 +1,409 @@
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//== llvm/CodeGen/GlobalISel/LegalizerHelper.h ---------------- -*- C++ -*-==//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+/// \file A pass to convert the target-illegal operations created by IR -> MIR
+/// translation into ones the target expects to be able to select. This may
+/// occur in multiple phases, for example G_ADD <2 x i8> -> G_ADD <2 x i16> ->
+/// G_ADD <4 x i16>.
+///
+/// The LegalizerHelper class is where most of the work happens, and is
+/// designed to be callable from other passes that find themselves with an
+/// illegal instruction.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_GLOBALISEL_MACHINELEGALIZEHELPER_H
+#define LLVM_CODEGEN_GLOBALISEL_MACHINELEGALIZEHELPER_H
+
+#include "llvm/CodeGen/GlobalISel/CallLowering.h"
+#include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
+#include "llvm/CodeGen/LowLevelType.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/CodeGen/RuntimeLibcalls.h"
+
+namespace llvm {
+// Forward declarations.
+class LegalizerInfo;
+class Legalizer;
+class MachineRegisterInfo;
+class GISelChangeObserver;
+class TargetLowering;
+
+class LegalizerHelper {
+public:
+ /// Expose MIRBuilder so clients can set their own RecordInsertInstruction
+ /// functions
+ MachineIRBuilder &MIRBuilder;
+
+ /// To keep track of changes made by the LegalizerHelper.
+ GISelChangeObserver &Observer;
+
+private:
+ MachineRegisterInfo &MRI;
+ const LegalizerInfo &LI;
+ const TargetLowering &TLI;
+
+public:
+ enum LegalizeResult {
+ /// Instruction was already legal and no change was made to the
+ /// MachineFunction.
+ AlreadyLegal,
+
+ /// Instruction has been legalized and the MachineFunction changed.
+ Legalized,
+
+ /// Some kind of error has occurred and we could not legalize this
+ /// instruction.
+ UnableToLegalize,
+ };
+
+ /// Expose LegalizerInfo so the clients can re-use.
+ const LegalizerInfo &getLegalizerInfo() const { return LI; }
+ const TargetLowering &getTargetLowering() const { return TLI; }
+
+ LegalizerHelper(MachineFunction &MF, GISelChangeObserver &Observer,
+ MachineIRBuilder &B);
+ LegalizerHelper(MachineFunction &MF, const LegalizerInfo &LI,
+ GISelChangeObserver &Observer, MachineIRBuilder &B);
+
+ /// Replace \p MI by a sequence of legal instructions that can implement the
+ /// same operation. Note that this means \p MI may be deleted, so any iterator
+ /// steps should be performed before calling this function. \p Helper should
+ /// be initialized to the MachineFunction containing \p MI.
+ ///
+ /// Considered as an opaque blob, the legal code will use and define the same
+ /// registers as \p MI.
+ LegalizeResult legalizeInstrStep(MachineInstr &MI);
+
+ /// Legalize an instruction by emiting a runtime library call instead.
+ LegalizeResult libcall(MachineInstr &MI);
+
+ /// Legalize an instruction by reducing the width of the underlying scalar
+ /// type.
+ LegalizeResult narrowScalar(MachineInstr &MI, unsigned TypeIdx, LLT NarrowTy);
+
+ /// Legalize an instruction by performing the operation on a wider scalar type
+ /// (for example a 16-bit addition can be safely performed at 32-bits
+ /// precision, ignoring the unused bits).
+ LegalizeResult widenScalar(MachineInstr &MI, unsigned TypeIdx, LLT WideTy);
+
+ /// Legalize an instruction by replacing the value type
+ LegalizeResult bitcast(MachineInstr &MI, unsigned TypeIdx, LLT Ty);
+
+ /// Legalize an instruction by splitting it into simpler parts, hopefully
+ /// understood by the target.
+ LegalizeResult lower(MachineInstr &MI, unsigned TypeIdx, LLT Ty);
+
+ /// Legalize a vector instruction by splitting into multiple components, each
+ /// acting on the same scalar type as the original but with fewer elements.
+ LegalizeResult fewerElementsVector(MachineInstr &MI, unsigned TypeIdx,
+ LLT NarrowTy);
+
+ /// Legalize a vector instruction by increasing the number of vector elements
+ /// involved and ignoring the added elements later.
+ LegalizeResult moreElementsVector(MachineInstr &MI, unsigned TypeIdx,
+ LLT MoreTy);
+
+ /// Cast the given value to an LLT::scalar with an equivalent size. Returns
+ /// the register to use if an instruction was inserted. Returns the original
+ /// register if no coercion was necessary.
+ //
+ // This may also fail and return Register() if there is no legal way to cast.
+ Register coerceToScalar(Register Val);
+
+ /// Legalize a single operand \p OpIdx of the machine instruction \p MI as a
+ /// Use by extending the operand's type to \p WideTy using the specified \p
+ /// ExtOpcode for the extension instruction, and replacing the vreg of the
+ /// operand in place.
+ void widenScalarSrc(MachineInstr &MI, LLT WideTy, unsigned OpIdx,
+ unsigned ExtOpcode);
+
+ /// Legalize a single operand \p OpIdx of the machine instruction \p MI as a
+ /// Use by truncating the operand's type to \p NarrowTy using G_TRUNC, and
+ /// replacing the vreg of the operand in place.
+ void narrowScalarSrc(MachineInstr &MI, LLT NarrowTy, unsigned OpIdx);
+
+ /// Legalize a single operand \p OpIdx of the machine instruction \p MI as a
+ /// Def by extending the operand's type to \p WideTy and truncating it back
+ /// with the \p TruncOpcode, and replacing the vreg of the operand in place.
+ void widenScalarDst(MachineInstr &MI, LLT WideTy, unsigned OpIdx = 0,
+ unsigned TruncOpcode = TargetOpcode::G_TRUNC);
+
+ // Legalize a single operand \p OpIdx of the machine instruction \p MI as a
+ // Def by truncating the operand's type to \p NarrowTy, replacing in place and
+ // extending back with \p ExtOpcode.
+ void narrowScalarDst(MachineInstr &MI, LLT NarrowTy, unsigned OpIdx,
+ unsigned ExtOpcode);
+ /// Legalize a single operand \p OpIdx of the machine instruction \p MI as a
+ /// Def by performing it with additional vector elements and extracting the
+ /// result elements, and replacing the vreg of the operand in place.
+ void moreElementsVectorDst(MachineInstr &MI, LLT MoreTy, unsigned OpIdx);
+
+ /// Legalize a single operand \p OpIdx of the machine instruction \p MI as a
+ /// Use by producing a vector with undefined high elements, extracting the
+ /// original vector type, and replacing the vreg of the operand in place.
+ void moreElementsVectorSrc(MachineInstr &MI, LLT MoreTy, unsigned OpIdx);
+
+ /// Legalize a single operand \p OpIdx of the machine instruction \p MI as a
+ /// use by inserting a G_BITCAST to \p CastTy
+ void bitcastSrc(MachineInstr &MI, LLT CastTy, unsigned OpIdx);
+
+ /// Legalize a single operand \p OpIdx of the machine instruction \p MI as a
+ /// def by inserting a G_BITCAST from \p CastTy
+ void bitcastDst(MachineInstr &MI, LLT CastTy, unsigned OpIdx);
+
+ /// Widen \p OrigReg to \p WideTy by merging to a wider type, padding with
+ /// G_IMPLICIT_DEF, and producing dead results.
+ Register widenWithUnmerge(LLT WideTy, Register OrigReg);
+
+private:
+ LegalizeResult
+ widenScalarMergeValues(MachineInstr &MI, unsigned TypeIdx, LLT WideTy);
+ LegalizeResult
+ widenScalarUnmergeValues(MachineInstr &MI, unsigned TypeIdx, LLT WideTy);
+ LegalizeResult
+ widenScalarExtract(MachineInstr &MI, unsigned TypeIdx, LLT WideTy);
+ LegalizeResult
+ widenScalarInsert(MachineInstr &MI, unsigned TypeIdx, LLT WideTy);
+ LegalizeResult widenScalarAddoSubo(MachineInstr &MI, unsigned TypeIdx,
+ LLT WideTy);
+ LegalizeResult widenScalarAddSubShlSat(MachineInstr &MI, unsigned TypeIdx,
+ LLT WideTy);
+
+ /// Helper function to split a wide generic register into bitwise blocks with
+ /// the given Type (which implies the number of blocks needed). The generic
+ /// registers created are appended to Ops, starting at bit 0 of Reg.
+ void extractParts(Register Reg, LLT Ty, int NumParts,
+ SmallVectorImpl<Register> &VRegs);
+
+ /// Version which handles irregular splits.
+ bool extractParts(Register Reg, LLT RegTy, LLT MainTy,
+ LLT &LeftoverTy,
+ SmallVectorImpl<Register> &VRegs,
+ SmallVectorImpl<Register> &LeftoverVRegs);
+
+ /// Helper function to build a wide generic register \p DstReg of type \p
+ /// RegTy from smaller parts. This will produce a G_MERGE_VALUES,
+ /// G_BUILD_VECTOR, G_CONCAT_VECTORS, or sequence of G_INSERT as appropriate
+ /// for the types.
+ ///
+ /// \p PartRegs must be registers of type \p PartTy.
+ ///
+ /// If \p ResultTy does not evenly break into \p PartTy sized pieces, the
+ /// remainder must be specified with \p LeftoverRegs of type \p LeftoverTy.
+ void insertParts(Register DstReg, LLT ResultTy,
+ LLT PartTy, ArrayRef<Register> PartRegs,
+ LLT LeftoverTy = LLT(), ArrayRef<Register> LeftoverRegs = {});
+
+ /// Unmerge \p SrcReg into smaller sized values, and append them to \p
+ /// Parts. The elements of \p Parts will be the greatest common divisor type
+ /// of \p DstTy, \p NarrowTy and the type of \p SrcReg. This will compute and
+ /// return the GCD type.
+ LLT extractGCDType(SmallVectorImpl<Register> &Parts, LLT DstTy,
+ LLT NarrowTy, Register SrcReg);
+
+ /// Unmerge \p SrcReg into \p GCDTy typed registers. This will append all of
+ /// the unpacked registers to \p Parts. This version is if the common unmerge
+ /// type is already known.
+ void extractGCDType(SmallVectorImpl<Register> &Parts, LLT GCDTy,
+ Register SrcReg);
+
+ /// Produce a merge of values in \p VRegs to define \p DstReg. Perform a merge
+ /// from the least common multiple type, and convert as appropriate to \p
+ /// DstReg.
+ ///
+ /// \p VRegs should each have type \p GCDTy. This type should be greatest
+ /// common divisor type of \p DstReg, \p NarrowTy, and an undetermined source
+ /// type.
+ ///
+ /// \p NarrowTy is the desired result merge source type. If the source value
+ /// needs to be widened to evenly cover \p DstReg, inserts high bits
+ /// corresponding to the extension opcode \p PadStrategy.
+ ///
+ /// \p VRegs will be cleared, and the the result \p NarrowTy register pieces
+ /// will replace it. Returns The complete LCMTy that \p VRegs will cover when
+ /// merged.
+ LLT buildLCMMergePieces(LLT DstTy, LLT NarrowTy, LLT GCDTy,
+ SmallVectorImpl<Register> &VRegs,
+ unsigned PadStrategy = TargetOpcode::G_ANYEXT);
+
+ /// Merge the values in \p RemergeRegs to an \p LCMTy typed value. Extract the
+ /// low bits into \p DstReg. This is intended to use the outputs from
+ /// buildLCMMergePieces after processing.
+ void buildWidenedRemergeToDst(Register DstReg, LLT LCMTy,
+ ArrayRef<Register> RemergeRegs);
+
+ /// Perform generic multiplication of values held in multiple registers.
+ /// Generated instructions use only types NarrowTy and i1.
+ /// Destination can be same or two times size of the source.
+ void multiplyRegisters(SmallVectorImpl<Register> &DstRegs,
+ ArrayRef<Register> Src1Regs,
+ ArrayRef<Register> Src2Regs, LLT NarrowTy);
+
+ void changeOpcode(MachineInstr &MI, unsigned NewOpcode);
+
+public:
+ /// Return the alignment to use for a stack temporary object with the given
+ /// type.
+ Align getStackTemporaryAlignment(LLT Type, Align MinAlign = Align()) const;
+
+ /// Create a stack temporary based on the size in bytes and the alignment
+ MachineInstrBuilder createStackTemporary(TypeSize Bytes, Align Alignment,
+ MachinePointerInfo &PtrInfo);
+
+ /// Get a pointer to vector element \p Index located in memory for a vector of
+ /// type \p VecTy starting at a base address of \p VecPtr. If \p Index is out
+ /// of bounds the returned pointer is unspecified, but will be within the
+ /// vector bounds.
+ Register getVectorElementPointer(Register VecPtr, LLT VecTy, Register Index);
+
+ LegalizeResult fewerElementsVectorImplicitDef(MachineInstr &MI,
+ unsigned TypeIdx, LLT NarrowTy);
+
+ /// Legalize a instruction with a vector type where each operand may have a
+ /// different element type. All type indexes must have the same number of
+ /// elements.
+ LegalizeResult fewerElementsVectorMultiEltType(MachineInstr &MI,
+ unsigned TypeIdx, LLT NarrowTy);
+
+ LegalizeResult fewerElementsVectorCasts(MachineInstr &MI, unsigned TypeIdx,
+ LLT NarrowTy);
+
+ LegalizeResult
+ fewerElementsVectorCmp(MachineInstr &MI, unsigned TypeIdx, LLT NarrowTy);
+
+ LegalizeResult
+ fewerElementsVectorSelect(MachineInstr &MI, unsigned TypeIdx, LLT NarrowTy);
+
+ LegalizeResult fewerElementsVectorPhi(MachineInstr &MI,
+ unsigned TypeIdx, LLT NarrowTy);
+
+ LegalizeResult moreElementsVectorPhi(MachineInstr &MI, unsigned TypeIdx,
+ LLT MoreTy);
+
+ LegalizeResult fewerElementsVectorUnmergeValues(MachineInstr &MI,
+ unsigned TypeIdx,
+ LLT NarrowTy);
+ LegalizeResult fewerElementsVectorMerge(MachineInstr &MI, unsigned TypeIdx,
+ LLT NarrowTy);
+ LegalizeResult fewerElementsVectorExtractInsertVectorElt(MachineInstr &MI,
+ unsigned TypeIdx,
+ LLT NarrowTy);
+
+ LegalizeResult
+ reduceLoadStoreWidth(MachineInstr &MI, unsigned TypeIdx, LLT NarrowTy);
+
+ /// Legalize an instruction by reducing the operation width, either by
+ /// narrowing the type of the operation or by reducing the number of elements
+ /// of a vector.
+ /// The used strategy (narrow vs. fewerElements) is decided by \p NarrowTy.
+ /// Narrow is used if the scalar type of \p NarrowTy and \p DstTy differ,
+ /// fewerElements is used when the scalar type is the same but the number of
+ /// elements between \p NarrowTy and \p DstTy differ.
+ LegalizeResult reduceOperationWidth(MachineInstr &MI, unsigned TypeIdx,
+ LLT NarrowTy);
+
+ LegalizeResult fewerElementsVectorSextInReg(MachineInstr &MI, unsigned TypeIdx,
+ LLT NarrowTy);
+
+ LegalizeResult narrowScalarShiftByConstant(MachineInstr &MI, const APInt &Amt,
+ LLT HalfTy, LLT ShiftAmtTy);
+
+ LegalizeResult narrowScalarShift(MachineInstr &MI, unsigned TypeIdx, LLT Ty);
+ LegalizeResult narrowScalarMul(MachineInstr &MI, LLT Ty);
+ LegalizeResult narrowScalarFPTOI(MachineInstr &MI, unsigned TypeIdx, LLT Ty);
+ LegalizeResult narrowScalarExtract(MachineInstr &MI, unsigned TypeIdx, LLT Ty);
+ LegalizeResult narrowScalarInsert(MachineInstr &MI, unsigned TypeIdx, LLT Ty);
+
+ LegalizeResult narrowScalarBasic(MachineInstr &MI, unsigned TypeIdx, LLT Ty);
+ LegalizeResult narrowScalarExt(MachineInstr &MI, unsigned TypeIdx, LLT Ty);
+ LegalizeResult narrowScalarSelect(MachineInstr &MI, unsigned TypeIdx, LLT Ty);
+ LegalizeResult narrowScalarCTLZ(MachineInstr &MI, unsigned TypeIdx, LLT Ty);
+ LegalizeResult narrowScalarCTTZ(MachineInstr &MI, unsigned TypeIdx, LLT Ty);
+ LegalizeResult narrowScalarCTPOP(MachineInstr &MI, unsigned TypeIdx, LLT Ty);
+
+ /// Perform Bitcast legalize action on G_EXTRACT_VECTOR_ELT.
+ LegalizeResult bitcastExtractVectorElt(MachineInstr &MI, unsigned TypeIdx,
+ LLT CastTy);
+
+ /// Perform Bitcast legalize action on G_INSERT_VECTOR_ELT.
+ LegalizeResult bitcastInsertVectorElt(MachineInstr &MI, unsigned TypeIdx,
+ LLT CastTy);
+
+ LegalizeResult lowerBitcast(MachineInstr &MI);
+ LegalizeResult lowerLoad(MachineInstr &MI);
+ LegalizeResult lowerStore(MachineInstr &MI);
+ LegalizeResult lowerBitCount(MachineInstr &MI);
+
+ LegalizeResult lowerU64ToF32BitOps(MachineInstr &MI);
+ LegalizeResult lowerUITOFP(MachineInstr &MI);
+ LegalizeResult lowerSITOFP(MachineInstr &MI);
+ LegalizeResult lowerFPTOUI(MachineInstr &MI);
+ LegalizeResult lowerFPTOSI(MachineInstr &MI);
+
+ LegalizeResult lowerFPTRUNC_F64_TO_F16(MachineInstr &MI);
+ LegalizeResult lowerFPTRUNC(MachineInstr &MI);
+ LegalizeResult lowerFPOWI(MachineInstr &MI);
+
+ LegalizeResult lowerMinMax(MachineInstr &MI);
+ LegalizeResult lowerFCopySign(MachineInstr &MI);
+ LegalizeResult lowerFMinNumMaxNum(MachineInstr &MI);
+ LegalizeResult lowerFMad(MachineInstr &MI);
+ LegalizeResult lowerIntrinsicRound(MachineInstr &MI);
+ LegalizeResult lowerFFloor(MachineInstr &MI);
+ LegalizeResult lowerMergeValues(MachineInstr &MI);
+ LegalizeResult lowerUnmergeValues(MachineInstr &MI);
+ LegalizeResult lowerExtractInsertVectorElt(MachineInstr &MI);
+ LegalizeResult lowerShuffleVector(MachineInstr &MI);
+ LegalizeResult lowerDynStackAlloc(MachineInstr &MI);
+ LegalizeResult lowerExtract(MachineInstr &MI);
+ LegalizeResult lowerInsert(MachineInstr &MI);
+ LegalizeResult lowerSADDO_SSUBO(MachineInstr &MI);
+ LegalizeResult lowerAddSubSatToMinMax(MachineInstr &MI);
+ LegalizeResult lowerAddSubSatToAddoSubo(MachineInstr &MI);
+ LegalizeResult lowerShlSat(MachineInstr &MI);
+ LegalizeResult lowerBswap(MachineInstr &MI);
+ LegalizeResult lowerBitreverse(MachineInstr &MI);
+ LegalizeResult lowerReadWriteRegister(MachineInstr &MI);
+ LegalizeResult lowerSMULH_UMULH(MachineInstr &MI);
+ LegalizeResult lowerSelect(MachineInstr &MI);
+
+};
+
+/// Helper function that creates a libcall to the given \p Name using the given
+/// calling convention \p CC.
+LegalizerHelper::LegalizeResult
+createLibcall(MachineIRBuilder &MIRBuilder, const char *Name,
+ const CallLowering::ArgInfo &Result,
+ ArrayRef<CallLowering::ArgInfo> Args, CallingConv::ID CC);
+
+/// Helper function that creates the given libcall.
+LegalizerHelper::LegalizeResult
+createLibcall(MachineIRBuilder &MIRBuilder, RTLIB::Libcall Libcall,
+ const CallLowering::ArgInfo &Result,
+ ArrayRef<CallLowering::ArgInfo> Args);
+
+/// Create a libcall to memcpy et al.
+LegalizerHelper::LegalizeResult createMemLibcall(MachineIRBuilder &MIRBuilder,
+ MachineRegisterInfo &MRI,
+ MachineInstr &MI);
+
+} // End namespace llvm.
+
+#endif
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel/LegalizerInfo.h b/contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel/LegalizerInfo.h
new file mode 100644
index 0000000000..0ae41c1a8d
--- /dev/null
+++ b/contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel/LegalizerInfo.h
@@ -0,0 +1,1498 @@
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//===- llvm/CodeGen/GlobalISel/LegalizerInfo.h ------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+/// Interface for Targets to specify which operations they can successfully
+/// select and how the others should be expanded most efficiently.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_GLOBALISEL_LEGALIZERINFO_H
+#define LLVM_CODEGEN_GLOBALISEL_LEGALIZERINFO_H
+
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/None.h"
+#include "llvm/ADT/Optional.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/SmallBitVector.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/CodeGen/MachineFunction.h"
+#include "llvm/CodeGen/TargetOpcodes.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/LowLevelTypeImpl.h"
+#include "llvm/Support/raw_ostream.h"
+#include <cassert>
+#include <cstdint>
+#include <tuple>
+#include <unordered_map>
+#include <utility>
+
+namespace llvm {
+
+extern cl::opt<bool> DisableGISelLegalityCheck;
+
+class LegalizerHelper;
+class MachineInstr;
+class MachineRegisterInfo;
+class MCInstrInfo;
+class GISelChangeObserver;
+
+namespace LegalizeActions {
+enum LegalizeAction : std::uint8_t {
+ /// The operation is expected to be selectable directly by the target, and
+ /// no transformation is necessary.
+ Legal,
+
+ /// The operation should be synthesized from multiple instructions acting on
+ /// a narrower scalar base-type. For example a 64-bit add might be
+ /// implemented in terms of 32-bit add-with-carry.
+ NarrowScalar,
+
+ /// The operation should be implemented in terms of a wider scalar
+ /// base-type. For example a <2 x s8> add could be implemented as a <2
+ /// x s32> add (ignoring the high bits).
+ WidenScalar,
+
+ /// The (vector) operation should be implemented by splitting it into
+ /// sub-vectors where the operation is legal. For example a <8 x s64> add
+ /// might be implemented as 4 separate <2 x s64> adds.
+ FewerElements,
+
+ /// The (vector) operation should be implemented by widening the input
+ /// vector and ignoring the lanes added by doing so. For example <2 x i8> is
+ /// rarely legal, but you might perform an <8 x i8> and then only look at
+ /// the first two results.
+ MoreElements,
+
+ /// Perform the operation on a different, but equivalently sized type.
+ Bitcast,
+
+ /// The operation itself must be expressed in terms of simpler actions on
+ /// this target. E.g. a SREM replaced by an SDIV and subtraction.
+ Lower,
+
+ /// The operation should be implemented as a call to some kind of runtime
+ /// support library. For example this usually happens on machines that don't
+ /// support floating-point operations natively.
+ Libcall,
+
+ /// The target wants to do something special with this combination of
+ /// operand and type. A callback will be issued when it is needed.
+ Custom,
+
+ /// This operation is completely unsupported on the target. A programming
+ /// error has occurred.
+ Unsupported,
+
+ /// Sentinel value for when no action was found in the specified table.
+ NotFound,
+
+ /// Fall back onto the old rules.
+ /// TODO: Remove this once we've migrated
+ UseLegacyRules,
+};
+} // end namespace LegalizeActions
+raw_ostream &operator<<(raw_ostream &OS, LegalizeActions::LegalizeAction Action);
+
+using LegalizeActions::LegalizeAction;
+
+/// Legalization is decided based on an instruction's opcode, which type slot
+/// we're considering, and what the existing type is. These aspects are gathered
+/// together for convenience in the InstrAspect class.
+struct InstrAspect {
+ unsigned Opcode;
+ unsigned Idx = 0;
+ LLT Type;
+
+ InstrAspect(unsigned Opcode, LLT Type) : Opcode(Opcode), Type(Type) {}
+ InstrAspect(unsigned Opcode, unsigned Idx, LLT Type)
+ : Opcode(Opcode), Idx(Idx), Type(Type) {}
+
+ bool operator==(const InstrAspect &RHS) const {
+ return Opcode == RHS.Opcode && Idx == RHS.Idx && Type == RHS.Type;
+ }
+};
+
+/// The LegalityQuery object bundles together all the information that's needed
+/// to decide whether a given operation is legal or not.
+/// For efficiency, it doesn't make a copy of Types so care must be taken not
+/// to free it before using the query.
+struct LegalityQuery {
+ unsigned Opcode;
+ ArrayRef<LLT> Types;
+
+ struct MemDesc {
+ uint64_t SizeInBits;
+ uint64_t AlignInBits;
+ AtomicOrdering Ordering;
+ };
+
+ /// Operations which require memory can use this to place requirements on the
+ /// memory type for each MMO.
+ ArrayRef<MemDesc> MMODescrs;
+
+ constexpr LegalityQuery(unsigned Opcode, const ArrayRef<LLT> Types,
+ const ArrayRef<MemDesc> MMODescrs)
+ : Opcode(Opcode), Types(Types), MMODescrs(MMODescrs) {}
+ constexpr LegalityQuery(unsigned Opcode, const ArrayRef<LLT> Types)
+ : LegalityQuery(Opcode, Types, {}) {}
+
+ raw_ostream &print(raw_ostream &OS) const;
+};
+
+/// The result of a query. It either indicates a final answer of Legal or
+/// Unsupported or describes an action that must be taken to make an operation
+/// more legal.
+struct LegalizeActionStep {
+ /// The action to take or the final answer.
+ LegalizeAction Action;
+ /// If describing an action, the type index to change. Otherwise zero.
+ unsigned TypeIdx;
+ /// If describing an action, the new type for TypeIdx. Otherwise LLT{}.
+ LLT NewType;
+
+ LegalizeActionStep(LegalizeAction Action, unsigned TypeIdx,
+ const LLT NewType)
+ : Action(Action), TypeIdx(TypeIdx), NewType(NewType) {}
+
+ bool operator==(const LegalizeActionStep &RHS) const {
+ return std::tie(Action, TypeIdx, NewType) ==
+ std::tie(RHS.Action, RHS.TypeIdx, RHS.NewType);
+ }
+};
+
+using LegalityPredicate = std::function<bool (const LegalityQuery &)>;
+using LegalizeMutation =
+ std::function<std::pair<unsigned, LLT>(const LegalityQuery &)>;
+
+namespace LegalityPredicates {
+struct TypePairAndMemDesc {
+ LLT Type0;
+ LLT Type1;
+ uint64_t MemSize;
+ uint64_t Align;
+
+ bool operator==(const TypePairAndMemDesc &Other) const {
+ return Type0 == Other.Type0 && Type1 == Other.Type1 &&
+ Align == Other.Align &&
+ MemSize == Other.MemSize;
+ }
+
+ /// \returns true if this memory access is legal with for the access described
+ /// by \p Other (The alignment is sufficient for the size and result type).
+ bool isCompatible(const TypePairAndMemDesc &Other) const {
+ return Type0 == Other.Type0 && Type1 == Other.Type1 &&
+ Align >= Other.Align &&
+ MemSize == Other.MemSize;
+ }
+};
+
+/// True iff P0 and P1 are true.
+template<typename Predicate>
+Predicate all(Predicate P0, Predicate P1) {
+ return [=](const LegalityQuery &Query) {
+ return P0(Query) && P1(Query);
+ };
+}
+/// True iff all given predicates are true.
+template<typename Predicate, typename... Args>
+Predicate all(Predicate P0, Predicate P1, Args... args) {
+ return all(all(P0, P1), args...);
+}
+
+/// True iff P0 or P1 are true.
+template<typename Predicate>
+Predicate any(Predicate P0, Predicate P1) {
+ return [=](const LegalityQuery &Query) {
+ return P0(Query) || P1(Query);
+ };
+}
+/// True iff any given predicates are true.
+template<typename Predicate, typename... Args>
+Predicate any(Predicate P0, Predicate P1, Args... args) {
+ return any(any(P0, P1), args...);
+}
+
+/// True iff the given type index is the specified type.
+LegalityPredicate typeIs(unsigned TypeIdx, LLT TypesInit);
+/// True iff the given type index is one of the specified types.
+LegalityPredicate typeInSet(unsigned TypeIdx,
+ std::initializer_list<LLT> TypesInit);
+
+/// True iff the given type index is not the specified type.
+inline LegalityPredicate typeIsNot(unsigned TypeIdx, LLT Type) {
+ return [=](const LegalityQuery &Query) {
+ return Query.Types[TypeIdx] != Type;
+ };
+}
+
+/// True iff the given types for the given pair of type indexes is one of the
+/// specified type pairs.
+LegalityPredicate
+typePairInSet(unsigned TypeIdx0, unsigned TypeIdx1,
+ std::initializer_list<std::pair<LLT, LLT>> TypesInit);
+/// True iff the given types for the given pair of type indexes is one of the
+/// specified type pairs.
+LegalityPredicate typePairAndMemDescInSet(
+ unsigned TypeIdx0, unsigned TypeIdx1, unsigned MMOIdx,
+ std::initializer_list<TypePairAndMemDesc> TypesAndMemDescInit);
+/// True iff the specified type index is a scalar.
+LegalityPredicate isScalar(unsigned TypeIdx);
+/// True iff the specified type index is a vector.
+LegalityPredicate isVector(unsigned TypeIdx);
+/// True iff the specified type index is a pointer (with any address space).
+LegalityPredicate isPointer(unsigned TypeIdx);
+/// True iff the specified type index is a pointer with the specified address
+/// space.
+LegalityPredicate isPointer(unsigned TypeIdx, unsigned AddrSpace);
+
+/// True if the type index is a vector with element type \p EltTy
+LegalityPredicate elementTypeIs(unsigned TypeIdx, LLT EltTy);
+
+/// True iff the specified type index is a scalar that's narrower than the given
+/// size.
+LegalityPredicate scalarNarrowerThan(unsigned TypeIdx, unsigned Size);
+
+/// True iff the specified type index is a scalar that's wider than the given
+/// size.
+LegalityPredicate scalarWiderThan(unsigned TypeIdx, unsigned Size);
+
+/// True iff the specified type index is a scalar or vector with an element type
+/// that's narrower than the given size.
+LegalityPredicate scalarOrEltNarrowerThan(unsigned TypeIdx, unsigned Size);
+
+/// True iff the specified type index is a scalar or a vector with an element
+/// type that's wider than the given size.
+LegalityPredicate scalarOrEltWiderThan(unsigned TypeIdx, unsigned Size);
+
+/// True iff the specified type index is a scalar whose size is not a power of
+/// 2.
+LegalityPredicate sizeNotPow2(unsigned TypeIdx);
+
+/// True iff the specified type index is a scalar or vector whose element size
+/// is not a power of 2.
+LegalityPredicate scalarOrEltSizeNotPow2(unsigned TypeIdx);
+
+/// True if the total bitwidth of the specified type index is \p Size bits.
+LegalityPredicate sizeIs(unsigned TypeIdx, unsigned Size);
+
+/// True iff the specified type indices are both the same bit size.
+LegalityPredicate sameSize(unsigned TypeIdx0, unsigned TypeIdx1);
+
+/// True iff the first type index has a larger total bit size than second type
+/// index.
+LegalityPredicate largerThan(unsigned TypeIdx0, unsigned TypeIdx1);
+
+/// True iff the first type index has a smaller total bit size than second type
+/// index.
+LegalityPredicate smallerThan(unsigned TypeIdx0, unsigned TypeIdx1);
+
+/// True iff the specified MMO index has a size that is not a power of 2
+LegalityPredicate memSizeInBytesNotPow2(unsigned MMOIdx);
+/// True iff the specified type index is a vector whose element count is not a
+/// power of 2.
+LegalityPredicate numElementsNotPow2(unsigned TypeIdx);
+/// True iff the specified MMO index has at an atomic ordering of at Ordering or
+/// stronger.
+LegalityPredicate atomicOrderingAtLeastOrStrongerThan(unsigned MMOIdx,
+ AtomicOrdering Ordering);
+} // end namespace LegalityPredicates
+
+namespace LegalizeMutations {
+/// Select this specific type for the given type index.
+LegalizeMutation changeTo(unsigned TypeIdx, LLT Ty);
+
+/// Keep the same type as the given type index.
+LegalizeMutation changeTo(unsigned TypeIdx, unsigned FromTypeIdx);
+
+/// Keep the same scalar or element type as the given type index.
+LegalizeMutation changeElementTo(unsigned TypeIdx, unsigned FromTypeIdx);
+
+/// Keep the same scalar or element type as the given type.
+LegalizeMutation changeElementTo(unsigned TypeIdx, LLT Ty);
+
+/// Change the scalar size or element size to have the same scalar size as type
+/// index \p FromIndex. Unlike changeElementTo, this discards pointer types and
+/// only changes the size.
+LegalizeMutation changeElementSizeTo(unsigned TypeIdx, unsigned FromTypeIdx);
+
+/// Widen the scalar type or vector element type for the given type index to the
+/// next power of 2.
+LegalizeMutation widenScalarOrEltToNextPow2(unsigned TypeIdx, unsigned Min = 0);
+
+/// Add more elements to the type for the given type index to the next power of
+/// 2.
+LegalizeMutation moreElementsToNextPow2(unsigned TypeIdx, unsigned Min = 0);
+/// Break up the vector type for the given type index into the element type.
+LegalizeMutation scalarize(unsigned TypeIdx);
+} // end namespace LegalizeMutations
+
+/// A single rule in a legalizer info ruleset.
+/// The specified action is chosen when the predicate is true. Where appropriate
+/// for the action (e.g. for WidenScalar) the new type is selected using the
+/// given mutator.
+class LegalizeRule {
+ LegalityPredicate Predicate;
+ LegalizeAction Action;
+ LegalizeMutation Mutation;
+
+public:
+ LegalizeRule(LegalityPredicate Predicate, LegalizeAction Action,
+ LegalizeMutation Mutation = nullptr)
+ : Predicate(Predicate), Action(Action), Mutation(Mutation) {}
+
+ /// Test whether the LegalityQuery matches.
+ bool match(const LegalityQuery &Query) const {
+ return Predicate(Query);
+ }
+
+ LegalizeAction getAction() const { return Action; }
+
+ /// Determine the change to make.
+ std::pair<unsigned, LLT> determineMutation(const LegalityQuery &Query) const {
+ if (Mutation)
+ return Mutation(Query);
+ return std::make_pair(0, LLT{});
+ }
+};
+
+class LegalizeRuleSet {
+ /// When non-zero, the opcode we are an alias of
+ unsigned AliasOf;
+ /// If true, there is another opcode that aliases this one
+ bool IsAliasedByAnother;
+ SmallVector<LegalizeRule, 2> Rules;
+
+#ifndef NDEBUG
+ /// If bit I is set, this rule set contains a rule that may handle (predicate
+ /// or perform an action upon (or both)) the type index I. The uncertainty
+ /// comes from free-form rules executing user-provided lambda functions. We
+ /// conservatively assume such rules do the right thing and cover all type
+ /// indices. The bitset is intentionally 1 bit wider than it absolutely needs
+ /// to be to distinguish such cases from the cases where all type indices are
+ /// individually handled.
+ SmallBitVector TypeIdxsCovered{MCOI::OPERAND_LAST_GENERIC -
+ MCOI::OPERAND_FIRST_GENERIC + 2};
+ SmallBitVector ImmIdxsCovered{MCOI::OPERAND_LAST_GENERIC_IMM -
+ MCOI::OPERAND_FIRST_GENERIC_IMM + 2};
+#endif
+
+ unsigned typeIdx(unsigned TypeIdx) {
+ assert(TypeIdx <=
+ (MCOI::OPERAND_LAST_GENERIC - MCOI::OPERAND_FIRST_GENERIC) &&
+ "Type Index is out of bounds");
+#ifndef NDEBUG
+ TypeIdxsCovered.set(TypeIdx);
+#endif
+ return TypeIdx;
+ }
+
+ unsigned immIdx(unsigned ImmIdx) {
+ assert(ImmIdx <= (MCOI::OPERAND_LAST_GENERIC_IMM -
+ MCOI::OPERAND_FIRST_GENERIC_IMM) &&
+ "Imm Index is out of bounds");
+#ifndef NDEBUG
+ ImmIdxsCovered.set(ImmIdx);
+#endif
+ return ImmIdx;
+ }
+
+ void markAllIdxsAsCovered() {
+#ifndef NDEBUG
+ TypeIdxsCovered.set();
+ ImmIdxsCovered.set();
+#endif
+ }
+
+ void add(const LegalizeRule &Rule) {
+ assert(AliasOf == 0 &&
+ "RuleSet is aliased, change the representative opcode instead");
+ Rules.push_back(Rule);
+ }
+
+ static bool always(const LegalityQuery &) { return true; }
+
+ /// Use the given action when the predicate is true.
+ /// Action should not be an action that requires mutation.
+ LegalizeRuleSet &actionIf(LegalizeAction Action,
+ LegalityPredicate Predicate) {
+ add({Predicate, Action});
+ return *this;
+ }
+ /// Use the given action when the predicate is true.
+ /// Action should be an action that requires mutation.
+ LegalizeRuleSet &actionIf(LegalizeAction Action, LegalityPredicate Predicate,
+ LegalizeMutation Mutation) {
+ add({Predicate, Action, Mutation});
+ return *this;
+ }
+ /// Use the given action when type index 0 is any type in the given list.
+ /// Action should not be an action that requires mutation.
+ LegalizeRuleSet &actionFor(LegalizeAction Action,
+ std::initializer_list<LLT> Types) {
+ using namespace LegalityPredicates;
+ return actionIf(Action, typeInSet(typeIdx(0), Types));
+ }
+ /// Use the given action when type index 0 is any type in the given list.
+ /// Action should be an action that requires mutation.
+ LegalizeRuleSet &actionFor(LegalizeAction Action,
+ std::initializer_list<LLT> Types,
+ LegalizeMutation Mutation) {
+ using namespace LegalityPredicates;
+ return actionIf(Action, typeInSet(typeIdx(0), Types), Mutation);
+ }
+ /// Use the given action when type indexes 0 and 1 is any type pair in the
+ /// given list.
+ /// Action should not be an action that requires mutation.
+ LegalizeRuleSet &actionFor(LegalizeAction Action,
+ std::initializer_list<std::pair<LLT, LLT>> Types) {
+ using namespace LegalityPredicates;
+ return actionIf(Action, typePairInSet(typeIdx(0), typeIdx(1), Types));
+ }
+ /// Use the given action when type indexes 0 and 1 is any type pair in the
+ /// given list.
+ /// Action should be an action that requires mutation.
+ LegalizeRuleSet &actionFor(LegalizeAction Action,
+ std::initializer_list<std::pair<LLT, LLT>> Types,
+ LegalizeMutation Mutation) {
+ using namespace LegalityPredicates;
+ return actionIf(Action, typePairInSet(typeIdx(0), typeIdx(1), Types),
+ Mutation);
+ }
+ /// Use the given action when type index 0 is any type in the given list and
+ /// imm index 0 is anything. Action should not be an action that requires
+ /// mutation.
+ LegalizeRuleSet &actionForTypeWithAnyImm(LegalizeAction Action,
+ std::initializer_list<LLT> Types) {
+ using namespace LegalityPredicates;
+ immIdx(0); // Inform verifier imm idx 0 is handled.
+ return actionIf(Action, typeInSet(typeIdx(0), Types));
+ }
+
+ LegalizeRuleSet &actionForTypeWithAnyImm(
+ LegalizeAction Action, std::initializer_list<std::pair<LLT, LLT>> Types) {
+ using namespace LegalityPredicates;
+ immIdx(0); // Inform verifier imm idx 0 is handled.
+ return actionIf(Action, typePairInSet(typeIdx(0), typeIdx(1), Types));
+ }
+
+ /// Use the given action when type indexes 0 and 1 are both in the given list.
+ /// That is, the type pair is in the cartesian product of the list.
+ /// Action should not be an action that requires mutation.
+ LegalizeRuleSet &actionForCartesianProduct(LegalizeAction Action,
+ std::initializer_list<LLT> Types) {
+ using namespace LegalityPredicates;
+ return actionIf(Action, all(typeInSet(typeIdx(0), Types),
+ typeInSet(typeIdx(1), Types)));
+ }
+ /// Use the given action when type indexes 0 and 1 are both in their
+ /// respective lists.
+ /// That is, the type pair is in the cartesian product of the lists
+ /// Action should not be an action that requires mutation.
+ LegalizeRuleSet &
+ actionForCartesianProduct(LegalizeAction Action,
+ std::initializer_list<LLT> Types0,
+ std::initializer_list<LLT> Types1) {
+ using namespace LegalityPredicates;
+ return actionIf(Action, all(typeInSet(typeIdx(0), Types0),
+ typeInSet(typeIdx(1), Types1)));
+ }
+ /// Use the given action when type indexes 0, 1, and 2 are all in their
+ /// respective lists.
+ /// That is, the type triple is in the cartesian product of the lists
+ /// Action should not be an action that requires mutation.
+ LegalizeRuleSet &actionForCartesianProduct(
+ LegalizeAction Action, std::initializer_list<LLT> Types0,
+ std::initializer_list<LLT> Types1, std::initializer_list<LLT> Types2) {
+ using namespace LegalityPredicates;
+ return actionIf(Action, all(typeInSet(typeIdx(0), Types0),
+ all(typeInSet(typeIdx(1), Types1),
+ typeInSet(typeIdx(2), Types2))));
+ }
+
+public:
+ LegalizeRuleSet() : AliasOf(0), IsAliasedByAnother(false), Rules() {}
+
+ bool isAliasedByAnother() { return IsAliasedByAnother; }
+ void setIsAliasedByAnother() { IsAliasedByAnother = true; }
+ void aliasTo(unsigned Opcode) {
+ assert((AliasOf == 0 || AliasOf == Opcode) &&
+ "Opcode is already aliased to another opcode");
+ assert(Rules.empty() && "Aliasing will discard rules");
+ AliasOf = Opcode;
+ }
+ unsigned getAlias() const { return AliasOf; }
+
+ /// The instruction is legal if predicate is true.
+ LegalizeRuleSet &legalIf(LegalityPredicate Predicate) {
+ // We have no choice but conservatively assume that the free-form
+ // user-provided Predicate properly handles all type indices:
+ markAllIdxsAsCovered();
+ return actionIf(LegalizeAction::Legal, Predicate);
+ }
+ /// The instruction is legal when type index 0 is any type in the given list.
+ LegalizeRuleSet &legalFor(std::initializer_list<LLT> Types) {
+ return actionFor(LegalizeAction::Legal, Types);
+ }
+ /// The instruction is legal when type indexes 0 and 1 is any type pair in the
+ /// given list.
+ LegalizeRuleSet &legalFor(std::initializer_list<std::pair<LLT, LLT>> Types) {
+ return actionFor(LegalizeAction::Legal, Types);
+ }
+ /// The instruction is legal when type index 0 is any type in the given list
+ /// and imm index 0 is anything.
+ LegalizeRuleSet &legalForTypeWithAnyImm(std::initializer_list<LLT> Types) {
+ markAllIdxsAsCovered();
+ return actionForTypeWithAnyImm(LegalizeAction::Legal, Types);
+ }
+
+ LegalizeRuleSet &legalForTypeWithAnyImm(
+ std::initializer_list<std::pair<LLT, LLT>> Types) {
+ markAllIdxsAsCovered();
+ return actionForTypeWithAnyImm(LegalizeAction::Legal, Types);
+ }
+
+ /// The instruction is legal when type indexes 0 and 1 along with the memory
+ /// size and minimum alignment is any type and size tuple in the given list.
+ LegalizeRuleSet &legalForTypesWithMemDesc(
+ std::initializer_list<LegalityPredicates::TypePairAndMemDesc>
+ TypesAndMemDesc) {
+ return actionIf(LegalizeAction::Legal,
+ LegalityPredicates::typePairAndMemDescInSet(
+ typeIdx(0), typeIdx(1), /*MMOIdx*/ 0, TypesAndMemDesc));
+ }
+ /// The instruction is legal when type indexes 0 and 1 are both in the given
+ /// list. That is, the type pair is in the cartesian product of the list.
+ LegalizeRuleSet &legalForCartesianProduct(std::initializer_list<LLT> Types) {
+ return actionForCartesianProduct(LegalizeAction::Legal, Types);
+ }
+ /// The instruction is legal when type indexes 0 and 1 are both their
+ /// respective lists.
+ LegalizeRuleSet &legalForCartesianProduct(std::initializer_list<LLT> Types0,
+ std::initializer_list<LLT> Types1) {
+ return actionForCartesianProduct(LegalizeAction::Legal, Types0, Types1);
+ }
+ /// The instruction is legal when type indexes 0, 1, and 2 are both their
+ /// respective lists.
+ LegalizeRuleSet &legalForCartesianProduct(std::initializer_list<LLT> Types0,
+ std::initializer_list<LLT> Types1,
+ std::initializer_list<LLT> Types2) {
+ return actionForCartesianProduct(LegalizeAction::Legal, Types0, Types1,
+ Types2);
+ }
+
+ LegalizeRuleSet &alwaysLegal() {
+ using namespace LegalizeMutations;
+ markAllIdxsAsCovered();
+ return actionIf(LegalizeAction::Legal, always);
+ }
+
+ /// The specified type index is coerced if predicate is true.
+ LegalizeRuleSet &bitcastIf(LegalityPredicate Predicate,
+ LegalizeMutation Mutation) {
+ // We have no choice but conservatively assume that lowering with a
+ // free-form user provided Predicate properly handles all type indices:
+ markAllIdxsAsCovered();
+ return actionIf(LegalizeAction::Bitcast, Predicate, Mutation);
+ }
+
+ /// The instruction is lowered.
+ LegalizeRuleSet &lower() {
+ using namespace LegalizeMutations;
+ // We have no choice but conservatively assume that predicate-less lowering
+ // properly handles all type indices by design:
+ markAllIdxsAsCovered();
+ return actionIf(LegalizeAction::Lower, always);
+ }
+ /// The instruction is lowered if predicate is true. Keep type index 0 as the
+ /// same type.
+ LegalizeRuleSet &lowerIf(LegalityPredicate Predicate) {
+ using namespace LegalizeMutations;
+ // We have no choice but conservatively assume that lowering with a
+ // free-form user provided Predicate properly handles all type indices:
+ markAllIdxsAsCovered();
+ return actionIf(LegalizeAction::Lower, Predicate);
+ }
+ /// The instruction is lowered if predicate is true.
+ LegalizeRuleSet &lowerIf(LegalityPredicate Predicate,
+ LegalizeMutation Mutation) {
+ // We have no choice but conservatively assume that lowering with a
+ // free-form user provided Predicate properly handles all type indices:
+ markAllIdxsAsCovered();
+ return actionIf(LegalizeAction::Lower, Predicate, Mutation);
+ }
+ /// The instruction is lowered when type index 0 is any type in the given
+ /// list. Keep type index 0 as the same type.
+ LegalizeRuleSet &lowerFor(std::initializer_list<LLT> Types) {
+ return actionFor(LegalizeAction::Lower, Types);
+ }
+ /// The instruction is lowered when type index 0 is any type in the given
+ /// list.
+ LegalizeRuleSet &lowerFor(std::initializer_list<LLT> Types,
+ LegalizeMutation Mutation) {
+ return actionFor(LegalizeAction::Lower, Types, Mutation);
+ }
+ /// The instruction is lowered when type indexes 0 and 1 is any type pair in
+ /// the given list. Keep type index 0 as the same type.
+ LegalizeRuleSet &lowerFor(std::initializer_list<std::pair<LLT, LLT>> Types) {
+ return actionFor(LegalizeAction::Lower, Types);
+ }
+ /// The instruction is lowered when type indexes 0 and 1 is any type pair in
+ /// the given list.
+ LegalizeRuleSet &lowerFor(std::initializer_list<std::pair<LLT, LLT>> Types,
+ LegalizeMutation Mutation) {
+ return actionFor(LegalizeAction::Lower, Types, Mutation);
+ }
+ /// The instruction is lowered when type indexes 0 and 1 are both in their
+ /// respective lists.
+ LegalizeRuleSet &lowerForCartesianProduct(std::initializer_list<LLT> Types0,
+ std::initializer_list<LLT> Types1) {
+ using namespace LegalityPredicates;
+ return actionForCartesianProduct(LegalizeAction::Lower, Types0, Types1);
+ }
+ /// The instruction is lowered when when type indexes 0, 1, and 2 are all in
+ /// their respective lists.
+ LegalizeRuleSet &lowerForCartesianProduct(std::initializer_list<LLT> Types0,
+ std::initializer_list<LLT> Types1,
+ std::initializer_list<LLT> Types2) {
+ using namespace LegalityPredicates;
+ return actionForCartesianProduct(LegalizeAction::Lower, Types0, Types1,
+ Types2);
+ }
+
+ /// The instruction is emitted as a library call.
+ LegalizeRuleSet &libcall() {
+ using namespace LegalizeMutations;
+ // We have no choice but conservatively assume that predicate-less lowering
+ // properly handles all type indices by design:
+ markAllIdxsAsCovered();
+ return actionIf(LegalizeAction::Libcall, always);
+ }
+
+ /// Like legalIf, but for the Libcall action.
+ LegalizeRuleSet &libcallIf(LegalityPredicate Predicate) {
+ // We have no choice but conservatively assume that a libcall with a
+ // free-form user provided Predicate properly handles all type indices:
+ markAllIdxsAsCovered();
+ return actionIf(LegalizeAction::Libcall, Predicate);
+ }
+ LegalizeRuleSet &libcallFor(std::initializer_list<LLT> Types) {
+ return actionFor(LegalizeAction::Libcall, Types);
+ }
+ LegalizeRuleSet &
+ libcallFor(std::initializer_list<std::pair<LLT, LLT>> Types) {
+ return actionFor(LegalizeAction::Libcall, Types);
+ }
+ LegalizeRuleSet &
+ libcallForCartesianProduct(std::initializer_list<LLT> Types) {
+ return actionForCartesianProduct(LegalizeAction::Libcall, Types);
+ }
+ LegalizeRuleSet &
+ libcallForCartesianProduct(std::initializer_list<LLT> Types0,
+ std::initializer_list<LLT> Types1) {
+ return actionForCartesianProduct(LegalizeAction::Libcall, Types0, Types1);
+ }
+
+ /// Widen the scalar to the one selected by the mutation if the predicate is
+ /// true.
+ LegalizeRuleSet &widenScalarIf(LegalityPredicate Predicate,
+ LegalizeMutation Mutation) {
+ // We have no choice but conservatively assume that an action with a
+ // free-form user provided Predicate properly handles all type indices:
+ markAllIdxsAsCovered();
+ return actionIf(LegalizeAction::WidenScalar, Predicate, Mutation);
+ }
+ /// Narrow the scalar to the one selected by the mutation if the predicate is
+ /// true.
+ LegalizeRuleSet &narrowScalarIf(LegalityPredicate Predicate,
+ LegalizeMutation Mutation) {
+ // We have no choice but conservatively assume that an action with a
+ // free-form user provided Predicate properly handles all type indices:
+ markAllIdxsAsCovered();
+ return actionIf(LegalizeAction::NarrowScalar, Predicate, Mutation);
+ }
+ /// Narrow the scalar, specified in mutation, when type indexes 0 and 1 is any
+ /// type pair in the given list.
+ LegalizeRuleSet &
+ narrowScalarFor(std::initializer_list<std::pair<LLT, LLT>> Types,
+ LegalizeMutation Mutation) {
+ return actionFor(LegalizeAction::NarrowScalar, Types, Mutation);
+ }
+
+ /// Add more elements to reach the type selected by the mutation if the
+ /// predicate is true.
+ LegalizeRuleSet &moreElementsIf(LegalityPredicate Predicate,
+ LegalizeMutation Mutation) {
+ // We have no choice but conservatively assume that an action with a
+ // free-form user provided Predicate properly handles all type indices:
+ markAllIdxsAsCovered();
+ return actionIf(LegalizeAction::MoreElements, Predicate, Mutation);
+ }
+ /// Remove elements to reach the type selected by the mutation if the
+ /// predicate is true.
+ LegalizeRuleSet &fewerElementsIf(LegalityPredicate Predicate,
+ LegalizeMutation Mutation) {
+ // We have no choice but conservatively assume that an action with a
+ // free-form user provided Predicate properly handles all type indices:
+ markAllIdxsAsCovered();
+ return actionIf(LegalizeAction::FewerElements, Predicate, Mutation);
+ }
+
+ /// The instruction is unsupported.
+ LegalizeRuleSet &unsupported() {
+ markAllIdxsAsCovered();
+ return actionIf(LegalizeAction::Unsupported, always);
+ }
+ LegalizeRuleSet &unsupportedIf(LegalityPredicate Predicate) {
+ return actionIf(LegalizeAction::Unsupported, Predicate);
+ }
+
+ LegalizeRuleSet &unsupportedFor(std::initializer_list<LLT> Types) {
+ return actionFor(LegalizeAction::Unsupported, Types);
+ }
+
+ LegalizeRuleSet &unsupportedIfMemSizeNotPow2() {
+ return actionIf(LegalizeAction::Unsupported,
+ LegalityPredicates::memSizeInBytesNotPow2(0));
+ }
+ LegalizeRuleSet &lowerIfMemSizeNotPow2() {
+ return actionIf(LegalizeAction::Lower,
+ LegalityPredicates::memSizeInBytesNotPow2(0));
+ }
+
+ LegalizeRuleSet &customIf(LegalityPredicate Predicate) {
+ // We have no choice but conservatively assume that a custom action with a
+ // free-form user provided Predicate properly handles all type indices:
+ markAllIdxsAsCovered();
+ return actionIf(LegalizeAction::Custom, Predicate);
+ }
+ LegalizeRuleSet &customFor(std::initializer_list<LLT> Types) {
+ return actionFor(LegalizeAction::Custom, Types);
+ }
+
+ /// The instruction is custom when type indexes 0 and 1 is any type pair in the
+ /// given list.
+ LegalizeRuleSet &customFor(std::initializer_list<std::pair<LLT, LLT>> Types) {
+ return actionFor(LegalizeAction::Custom, Types);
+ }
+
+ LegalizeRuleSet &customForCartesianProduct(std::initializer_list<LLT> Types) {
+ return actionForCartesianProduct(LegalizeAction::Custom, Types);
+ }
+ LegalizeRuleSet &
+ customForCartesianProduct(std::initializer_list<LLT> Types0,
+ std::initializer_list<LLT> Types1) {
+ return actionForCartesianProduct(LegalizeAction::Custom, Types0, Types1);
+ }
+
+ /// Unconditionally custom lower.
+ LegalizeRuleSet &custom() {
+ return customIf(always);
+ }
+
+ /// Widen the scalar to the next power of two that is at least MinSize.
+ /// No effect if the type is not a scalar or is a power of two.
+ LegalizeRuleSet &widenScalarToNextPow2(unsigned TypeIdx,
+ unsigned MinSize = 0) {
+ using namespace LegalityPredicates;
+ return actionIf(
+ LegalizeAction::WidenScalar, sizeNotPow2(typeIdx(TypeIdx)),
+ LegalizeMutations::widenScalarOrEltToNextPow2(TypeIdx, MinSize));
+ }
+
+ /// Widen the scalar or vector element type to the next power of two that is
+ /// at least MinSize. No effect if the scalar size is a power of two.
+ LegalizeRuleSet &widenScalarOrEltToNextPow2(unsigned TypeIdx,
+ unsigned MinSize = 0) {
+ using namespace LegalityPredicates;
+ return actionIf(
+ LegalizeAction::WidenScalar, scalarOrEltSizeNotPow2(typeIdx(TypeIdx)),
+ LegalizeMutations::widenScalarOrEltToNextPow2(TypeIdx, MinSize));
+ }
+
+ LegalizeRuleSet &narrowScalar(unsigned TypeIdx, LegalizeMutation Mutation) {
+ using namespace LegalityPredicates;
+ return actionIf(LegalizeAction::NarrowScalar, isScalar(typeIdx(TypeIdx)),
+ Mutation);
+ }
+
+ LegalizeRuleSet &scalarize(unsigned TypeIdx) {
+ using namespace LegalityPredicates;
+ return actionIf(LegalizeAction::FewerElements, isVector(typeIdx(TypeIdx)),
+ LegalizeMutations::scalarize(TypeIdx));
+ }
+
+ LegalizeRuleSet &scalarizeIf(LegalityPredicate Predicate, unsigned TypeIdx) {
+ using namespace LegalityPredicates;
+ return actionIf(LegalizeAction::FewerElements,
+ all(Predicate, isVector(typeIdx(TypeIdx))),
+ LegalizeMutations::scalarize(TypeIdx));
+ }
+
+ /// Ensure the scalar or element is at least as wide as Ty.
+ LegalizeRuleSet &minScalarOrElt(unsigned TypeIdx, const LLT Ty) {
+ using namespace LegalityPredicates;
+ using namespace LegalizeMutations;
+ return actionIf(LegalizeAction::WidenScalar,
+ scalarOrEltNarrowerThan(TypeIdx, Ty.getScalarSizeInBits()),
+ changeElementTo(typeIdx(TypeIdx), Ty));
+ }
+
+ /// Ensure the scalar or element is at least as wide as Ty.
+ LegalizeRuleSet &minScalarOrEltIf(LegalityPredicate Predicate,
+ unsigned TypeIdx, const LLT Ty) {
+ using namespace LegalityPredicates;
+ using namespace LegalizeMutations;
+ return actionIf(LegalizeAction::WidenScalar,
+ all(Predicate, scalarOrEltNarrowerThan(
+ TypeIdx, Ty.getScalarSizeInBits())),
+ changeElementTo(typeIdx(TypeIdx), Ty));
+ }
+
+ /// Ensure the scalar is at least as wide as Ty.
+ LegalizeRuleSet &minScalar(unsigned TypeIdx, const LLT Ty) {
+ using namespace LegalityPredicates;
+ using namespace LegalizeMutations;
+ return actionIf(LegalizeAction::WidenScalar,
+ scalarNarrowerThan(TypeIdx, Ty.getSizeInBits()),
+ changeTo(typeIdx(TypeIdx), Ty));
+ }
+
+ /// Ensure the scalar is at most as wide as Ty.
+ LegalizeRuleSet &maxScalarOrElt(unsigned TypeIdx, const LLT Ty) {
+ using namespace LegalityPredicates;
+ using namespace LegalizeMutations;
+ return actionIf(LegalizeAction::NarrowScalar,
+ scalarOrEltWiderThan(TypeIdx, Ty.getScalarSizeInBits()),
+ changeElementTo(typeIdx(TypeIdx), Ty));
+ }
+
+ /// Ensure the scalar is at most as wide as Ty.
+ LegalizeRuleSet &maxScalar(unsigned TypeIdx, const LLT Ty) {
+ using namespace LegalityPredicates;
+ using namespace LegalizeMutations;
+ return actionIf(LegalizeAction::NarrowScalar,
+ scalarWiderThan(TypeIdx, Ty.getSizeInBits()),
+ changeTo(typeIdx(TypeIdx), Ty));
+ }
+
+ /// Conditionally limit the maximum size of the scalar.
+ /// For example, when the maximum size of one type depends on the size of
+ /// another such as extracting N bits from an M bit container.
+ LegalizeRuleSet &maxScalarIf(LegalityPredicate Predicate, unsigned TypeIdx,
+ const LLT Ty) {
+ using namespace LegalityPredicates;
+ using namespace LegalizeMutations;
+ return actionIf(
+ LegalizeAction::NarrowScalar,
+ [=](const LegalityQuery &Query) {
+ const LLT QueryTy = Query.Types[TypeIdx];
+ return QueryTy.isScalar() &&
+ QueryTy.getSizeInBits() > Ty.getSizeInBits() &&
+ Predicate(Query);
+ },
+ changeElementTo(typeIdx(TypeIdx), Ty));
+ }
+
+ /// Limit the range of scalar sizes to MinTy and MaxTy.
+ LegalizeRuleSet &clampScalar(unsigned TypeIdx, const LLT MinTy,
+ const LLT MaxTy) {
+ assert(MinTy.isScalar() && MaxTy.isScalar() && "Expected scalar types");
+ return minScalar(TypeIdx, MinTy).maxScalar(TypeIdx, MaxTy);
+ }
+
+ /// Limit the range of scalar sizes to MinTy and MaxTy.
+ LegalizeRuleSet &clampScalarOrElt(unsigned TypeIdx, const LLT MinTy,
+ const LLT MaxTy) {
+ return minScalarOrElt(TypeIdx, MinTy).maxScalarOrElt(TypeIdx, MaxTy);
+ }
+
+ /// Widen the scalar to match the size of another.
+ LegalizeRuleSet &minScalarSameAs(unsigned TypeIdx, unsigned LargeTypeIdx) {
+ typeIdx(TypeIdx);
+ return widenScalarIf(
+ [=](const LegalityQuery &Query) {
+ return Query.Types[LargeTypeIdx].getScalarSizeInBits() >
+ Query.Types[TypeIdx].getSizeInBits();
+ },
+ LegalizeMutations::changeElementSizeTo(TypeIdx, LargeTypeIdx));
+ }
+
+ /// Narrow the scalar to match the size of another.
+ LegalizeRuleSet &maxScalarSameAs(unsigned TypeIdx, unsigned NarrowTypeIdx) {
+ typeIdx(TypeIdx);
+ return narrowScalarIf(
+ [=](const LegalityQuery &Query) {
+ return Query.Types[NarrowTypeIdx].getScalarSizeInBits() <
+ Query.Types[TypeIdx].getSizeInBits();
+ },
+ LegalizeMutations::changeElementSizeTo(TypeIdx, NarrowTypeIdx));
+ }
+
+ /// Change the type \p TypeIdx to have the same scalar size as type \p
+ /// SameSizeIdx.
+ LegalizeRuleSet &scalarSameSizeAs(unsigned TypeIdx, unsigned SameSizeIdx) {
+ return minScalarSameAs(TypeIdx, SameSizeIdx)
+ .maxScalarSameAs(TypeIdx, SameSizeIdx);
+ }
+
+ /// Conditionally widen the scalar or elt to match the size of another.
+ LegalizeRuleSet &minScalarEltSameAsIf(LegalityPredicate Predicate,
+ unsigned TypeIdx, unsigned LargeTypeIdx) {
+ typeIdx(TypeIdx);
+ return widenScalarIf(
+ [=](const LegalityQuery &Query) {
+ return Query.Types[LargeTypeIdx].getScalarSizeInBits() >
+ Query.Types[TypeIdx].getScalarSizeInBits() &&
+ Predicate(Query);
+ },
+ [=](const LegalityQuery &Query) {
+ LLT T = Query.Types[LargeTypeIdx];
+ return std::make_pair(TypeIdx, T);
+ });
+ }
+
+ /// Add more elements to the vector to reach the next power of two.
+ /// No effect if the type is not a vector or the element count is a power of
+ /// two.
+ LegalizeRuleSet &moreElementsToNextPow2(unsigned TypeIdx) {
+ using namespace LegalityPredicates;
+ return actionIf(LegalizeAction::MoreElements,
+ numElementsNotPow2(typeIdx(TypeIdx)),
+ LegalizeMutations::moreElementsToNextPow2(TypeIdx));
+ }
+
+ /// Limit the number of elements in EltTy vectors to at least MinElements.
+ LegalizeRuleSet &clampMinNumElements(unsigned TypeIdx, const LLT EltTy,
+ unsigned MinElements) {
+ // Mark the type index as covered:
+ typeIdx(TypeIdx);
+ return actionIf(
+ LegalizeAction::MoreElements,
+ [=](const LegalityQuery &Query) {
+ LLT VecTy = Query.Types[TypeIdx];
+ return VecTy.isVector() && VecTy.getElementType() == EltTy &&
+ VecTy.getNumElements() < MinElements;
+ },
+ [=](const LegalityQuery &Query) {
+ LLT VecTy = Query.Types[TypeIdx];
+ return std::make_pair(
+ TypeIdx, LLT::vector(MinElements, VecTy.getElementType()));
+ });
+ }
+ /// Limit the number of elements in EltTy vectors to at most MaxElements.
+ LegalizeRuleSet &clampMaxNumElements(unsigned TypeIdx, const LLT EltTy,
+ unsigned MaxElements) {
+ // Mark the type index as covered:
+ typeIdx(TypeIdx);
+ return actionIf(
+ LegalizeAction::FewerElements,
+ [=](const LegalityQuery &Query) {
+ LLT VecTy = Query.Types[TypeIdx];
+ return VecTy.isVector() && VecTy.getElementType() == EltTy &&
+ VecTy.getNumElements() > MaxElements;
+ },
+ [=](const LegalityQuery &Query) {
+ LLT VecTy = Query.Types[TypeIdx];
+ LLT NewTy = LLT::scalarOrVector(MaxElements, VecTy.getElementType());
+ return std::make_pair(TypeIdx, NewTy);
+ });
+ }
+ /// Limit the number of elements for the given vectors to at least MinTy's
+ /// number of elements and at most MaxTy's number of elements.
+ ///
+ /// No effect if the type is not a vector or does not have the same element
+ /// type as the constraints.
+ /// The element type of MinTy and MaxTy must match.
+ LegalizeRuleSet &clampNumElements(unsigned TypeIdx, const LLT MinTy,
+ const LLT MaxTy) {
+ assert(MinTy.getElementType() == MaxTy.getElementType() &&
+ "Expected element types to agree");
+
+ const LLT EltTy = MinTy.getElementType();
+ return clampMinNumElements(TypeIdx, EltTy, MinTy.getNumElements())
+ .clampMaxNumElements(TypeIdx, EltTy, MaxTy.getNumElements());
+ }
+
+ /// Fallback on the previous implementation. This should only be used while
+ /// porting a rule.
+ LegalizeRuleSet &fallback() {
+ add({always, LegalizeAction::UseLegacyRules});
+ return *this;
+ }
+
+ /// Check if there is no type index which is obviously not handled by the
+ /// LegalizeRuleSet in any way at all.
+ /// \pre Type indices of the opcode form a dense [0, \p NumTypeIdxs) set.
+ bool verifyTypeIdxsCoverage(unsigned NumTypeIdxs) const;
+ /// Check if there is no imm index which is obviously not handled by the
+ /// LegalizeRuleSet in any way at all.
+ /// \pre Type indices of the opcode form a dense [0, \p NumTypeIdxs) set.
+ bool verifyImmIdxsCoverage(unsigned NumImmIdxs) const;
+
+ /// Apply the ruleset to the given LegalityQuery.
+ LegalizeActionStep apply(const LegalityQuery &Query) const;
+};
+
+class LegalizerInfo {
+public:
+ LegalizerInfo();
+ virtual ~LegalizerInfo() = default;
+
+ unsigned getOpcodeIdxForOpcode(unsigned Opcode) const;
+ unsigned getActionDefinitionsIdx(unsigned Opcode) const;
+
+ /// Compute any ancillary tables needed to quickly decide how an operation
+ /// should be handled. This must be called after all "set*Action"methods but
+ /// before any query is made or incorrect results may be returned.
+ void computeTables();
+
+ /// Perform simple self-diagnostic and assert if there is anything obviously
+ /// wrong with the actions set up.
+ void verify(const MCInstrInfo &MII) const;
+
+ static bool needsLegalizingToDifferentSize(const LegalizeAction Action) {
+ using namespace LegalizeActions;
+ switch (Action) {
+ case NarrowScalar:
+ case WidenScalar:
+ case FewerElements:
+ case MoreElements:
+ case Unsupported:
+ return true;
+ default:
+ return false;
+ }
+ }
+
+ using SizeAndAction = std::pair<uint16_t, LegalizeAction>;
+ using SizeAndActionsVec = std::vector<SizeAndAction>;
+ using SizeChangeStrategy =
+ std::function<SizeAndActionsVec(const SizeAndActionsVec &v)>;
+
+ /// More friendly way to set an action for common types that have an LLT
+ /// representation.
+ /// The LegalizeAction must be one for which NeedsLegalizingToDifferentSize
+ /// returns false.
+ void setAction(const InstrAspect &Aspect, LegalizeAction Action) {
+ assert(!needsLegalizingToDifferentSize(Action));
+ TablesInitialized = false;
+ const unsigned OpcodeIdx = Aspect.Opcode - FirstOp;
+ if (SpecifiedActions[OpcodeIdx].size() <= Aspect.Idx)
+ SpecifiedActions[OpcodeIdx].resize(Aspect.Idx + 1);
+ SpecifiedActions[OpcodeIdx][Aspect.Idx][Aspect.Type] = Action;
+ }
+
+ /// The setAction calls record the non-size-changing legalization actions
+ /// to take on specificly-sized types. The SizeChangeStrategy defines what
+ /// to do when the size of the type needs to be changed to reach a legally
+ /// sized type (i.e., one that was defined through a setAction call).
+ /// e.g.
+ /// setAction ({G_ADD, 0, LLT::scalar(32)}, Legal);
+ /// setLegalizeScalarToDifferentSizeStrategy(
+ /// G_ADD, 0, widenToLargerTypesAndNarrowToLargest);
+ /// will end up defining getAction({G_ADD, 0, T}) to return the following
+ /// actions for different scalar types T:
+ /// LLT::scalar(1)..LLT::scalar(31): {WidenScalar, 0, LLT::scalar(32)}
+ /// LLT::scalar(32): {Legal, 0, LLT::scalar(32)}
+ /// LLT::scalar(33)..: {NarrowScalar, 0, LLT::scalar(32)}
+ ///
+ /// If no SizeChangeAction gets defined, through this function,
+ /// the default is unsupportedForDifferentSizes.
+ void setLegalizeScalarToDifferentSizeStrategy(const unsigned Opcode,
+ const unsigned TypeIdx,
+ SizeChangeStrategy S) {
+ const unsigned OpcodeIdx = Opcode - FirstOp;
+ if (ScalarSizeChangeStrategies[OpcodeIdx].size() <= TypeIdx)
+ ScalarSizeChangeStrategies[OpcodeIdx].resize(TypeIdx + 1);
+ ScalarSizeChangeStrategies[OpcodeIdx][TypeIdx] = S;
+ }
+
+ /// See also setLegalizeScalarToDifferentSizeStrategy.
+ /// This function allows to set the SizeChangeStrategy for vector elements.
+ void setLegalizeVectorElementToDifferentSizeStrategy(const unsigned Opcode,
+ const unsigned TypeIdx,
+ SizeChangeStrategy S) {
+ const unsigned OpcodeIdx = Opcode - FirstOp;
+ if (VectorElementSizeChangeStrategies[OpcodeIdx].size() <= TypeIdx)
+ VectorElementSizeChangeStrategies[OpcodeIdx].resize(TypeIdx + 1);
+ VectorElementSizeChangeStrategies[OpcodeIdx][TypeIdx] = S;
+ }
+
+ /// A SizeChangeStrategy for the common case where legalization for a
+ /// particular operation consists of only supporting a specific set of type
+ /// sizes. E.g.
+ /// setAction ({G_DIV, 0, LLT::scalar(32)}, Legal);
+ /// setAction ({G_DIV, 0, LLT::scalar(64)}, Legal);
+ /// setLegalizeScalarToDifferentSizeStrategy(
+ /// G_DIV, 0, unsupportedForDifferentSizes);
+ /// will result in getAction({G_DIV, 0, T}) to return Legal for s32 and s64,
+ /// and Unsupported for all other scalar types T.
+ static SizeAndActionsVec
+ unsupportedForDifferentSizes(const SizeAndActionsVec &v) {
+ using namespace LegalizeActions;
+ return increaseToLargerTypesAndDecreaseToLargest(v, Unsupported,
+ Unsupported);
+ }
+
+ /// A SizeChangeStrategy for the common case where legalization for a
+ /// particular operation consists of widening the type to a large legal type,
+ /// unless there is no such type and then instead it should be narrowed to the
+ /// largest legal type.
+ static SizeAndActionsVec
+ widenToLargerTypesAndNarrowToLargest(const SizeAndActionsVec &v) {
+ using namespace LegalizeActions;
+ assert(v.size() > 0 &&
+ "At least one size that can be legalized towards is needed"
+ " for this SizeChangeStrategy");
+ return increaseToLargerTypesAndDecreaseToLargest(v, WidenScalar,
+ NarrowScalar);
+ }
+
+ static SizeAndActionsVec
+ widenToLargerTypesUnsupportedOtherwise(const SizeAndActionsVec &v) {
+ using namespace LegalizeActions;
+ return increaseToLargerTypesAndDecreaseToLargest(v, WidenScalar,
+ Unsupported);
+ }
+
+ static SizeAndActionsVec
+ narrowToSmallerAndUnsupportedIfTooSmall(const SizeAndActionsVec &v) {
+ using namespace LegalizeActions;
+ return decreaseToSmallerTypesAndIncreaseToSmallest(v, NarrowScalar,
+ Unsupported);
+ }
+
+ static SizeAndActionsVec
+ narrowToSmallerAndWidenToSmallest(const SizeAndActionsVec &v) {
+ using namespace LegalizeActions;
+ assert(v.size() > 0 &&
+ "At least one size that can be legalized towards is needed"
+ " for this SizeChangeStrategy");
+ return decreaseToSmallerTypesAndIncreaseToSmallest(v, NarrowScalar,
+ WidenScalar);
+ }
+
+ /// A SizeChangeStrategy for the common case where legalization for a
+ /// particular vector operation consists of having more elements in the
+ /// vector, to a type that is legal. Unless there is no such type and then
+ /// instead it should be legalized towards the widest vector that's still
+ /// legal. E.g.
+ /// setAction({G_ADD, LLT::vector(8, 8)}, Legal);
+ /// setAction({G_ADD, LLT::vector(16, 8)}, Legal);
+ /// setAction({G_ADD, LLT::vector(2, 32)}, Legal);
+ /// setAction({G_ADD, LLT::vector(4, 32)}, Legal);
+ /// setLegalizeVectorElementToDifferentSizeStrategy(
+ /// G_ADD, 0, moreToWiderTypesAndLessToWidest);
+ /// will result in the following getAction results:
+ /// * getAction({G_ADD, LLT::vector(8,8)}) returns
+ /// (Legal, vector(8,8)).
+ /// * getAction({G_ADD, LLT::vector(9,8)}) returns
+ /// (MoreElements, vector(16,8)).
+ /// * getAction({G_ADD, LLT::vector(8,32)}) returns
+ /// (FewerElements, vector(4,32)).
+ static SizeAndActionsVec
+ moreToWiderTypesAndLessToWidest(const SizeAndActionsVec &v) {
+ using namespace LegalizeActions;
+ return increaseToLargerTypesAndDecreaseToLargest(v, MoreElements,
+ FewerElements);
+ }
+
+ /// Helper function to implement many typical SizeChangeStrategy functions.
+ static SizeAndActionsVec
+ increaseToLargerTypesAndDecreaseToLargest(const SizeAndActionsVec &v,
+ LegalizeAction IncreaseAction,
+ LegalizeAction DecreaseAction);
+ /// Helper function to implement many typical SizeChangeStrategy functions.
+ static SizeAndActionsVec
+ decreaseToSmallerTypesAndIncreaseToSmallest(const SizeAndActionsVec &v,
+ LegalizeAction DecreaseAction,
+ LegalizeAction IncreaseAction);
+
+ /// Get the action definitions for the given opcode. Use this to run a
+ /// LegalityQuery through the definitions.
+ const LegalizeRuleSet &getActionDefinitions(unsigned Opcode) const;
+
+ /// Get the action definition builder for the given opcode. Use this to define
+ /// the action definitions.
+ ///
+ /// It is an error to request an opcode that has already been requested by the
+ /// multiple-opcode variant.
+ LegalizeRuleSet &getActionDefinitionsBuilder(unsigned Opcode);
+
+ /// Get the action definition builder for the given set of opcodes. Use this
+ /// to define the action definitions for multiple opcodes at once. The first
+ /// opcode given will be considered the representative opcode and will hold
+ /// the definitions whereas the other opcodes will be configured to refer to
+ /// the representative opcode. This lowers memory requirements and very
+ /// slightly improves performance.
+ ///
+ /// It would be very easy to introduce unexpected side-effects as a result of
+ /// this aliasing if it were permitted to request different but intersecting
+ /// sets of opcodes but that is difficult to keep track of. It is therefore an
+ /// error to request the same opcode twice using this API, to request an
+ /// opcode that already has definitions, or to use the single-opcode API on an
+ /// opcode that has already been requested by this API.
+ LegalizeRuleSet &
+ getActionDefinitionsBuilder(std::initializer_list<unsigned> Opcodes);
+ void aliasActionDefinitions(unsigned OpcodeTo, unsigned OpcodeFrom);
+
+ /// Determine what action should be taken to legalize the described
+ /// instruction. Requires computeTables to have been called.
+ ///
+ /// \returns a description of the next legalization step to perform.
+ LegalizeActionStep getAction(const LegalityQuery &Query) const;
+
+ /// Determine what action should be taken to legalize the given generic
+ /// instruction.
+ ///
+ /// \returns a description of the next legalization step to perform.
+ LegalizeActionStep getAction(const MachineInstr &MI,
+ const MachineRegisterInfo &MRI) const;
+
+ bool isLegal(const LegalityQuery &Query) const {
+ return getAction(Query).Action == LegalizeAction::Legal;
+ }
+
+ bool isLegalOrCustom(const LegalityQuery &Query) const {
+ auto Action = getAction(Query).Action;
+ return Action == LegalizeAction::Legal || Action == LegalizeAction::Custom;
+ }
+
+ bool isLegal(const MachineInstr &MI, const MachineRegisterInfo &MRI) const;
+ bool isLegalOrCustom(const MachineInstr &MI,
+ const MachineRegisterInfo &MRI) const;
+
+ /// Called for instructions with the Custom LegalizationAction.
+ virtual bool legalizeCustom(LegalizerHelper &Helper,
+ MachineInstr &MI) const {
+ llvm_unreachable("must implement this if custom action is used");
+ }
+
+ /// \returns true if MI is either legal or has been legalized and false if not
+ /// legal.
+ /// Return true if MI is either legal or has been legalized and false
+ /// if not legal.
+ virtual bool legalizeIntrinsic(LegalizerHelper &Helper,
+ MachineInstr &MI) const {
+ return true;
+ }
+
+ /// Return the opcode (SEXT/ZEXT/ANYEXT) that should be performed while
+ /// widening a constant of type SmallTy which targets can override.
+ /// For eg, the DAG does (SmallTy.isByteSized() ? G_SEXT : G_ZEXT) which
+ /// will be the default.
+ virtual unsigned getExtOpcodeForWideningConstant(LLT SmallTy) const;
+
+private:
+ /// Determine what action should be taken to legalize the given generic
+ /// instruction opcode, type-index and type. Requires computeTables to have
+ /// been called.
+ ///
+ /// \returns a pair consisting of the kind of legalization that should be
+ /// performed and the destination type.
+ std::pair<LegalizeAction, LLT>
+ getAspectAction(const InstrAspect &Aspect) const;
+
+ /// The SizeAndActionsVec is a representation mapping between all natural
+ /// numbers and an Action. The natural number represents the bit size of
+ /// the InstrAspect. For example, for a target with native support for 32-bit
+ /// and 64-bit additions, you'd express that as:
+ /// setScalarAction(G_ADD, 0,
+ /// {{1, WidenScalar}, // bit sizes [ 1, 31[
+ /// {32, Legal}, // bit sizes [32, 33[
+ /// {33, WidenScalar}, // bit sizes [33, 64[
+ /// {64, Legal}, // bit sizes [64, 65[
+ /// {65, NarrowScalar} // bit sizes [65, +inf[
+ /// });
+ /// It may be that only 64-bit pointers are supported on your target:
+ /// setPointerAction(G_PTR_ADD, 0, LLT:pointer(1),
+ /// {{1, Unsupported}, // bit sizes [ 1, 63[
+ /// {64, Legal}, // bit sizes [64, 65[
+ /// {65, Unsupported}, // bit sizes [65, +inf[
+ /// });
+ void setScalarAction(const unsigned Opcode, const unsigned TypeIndex,
+ const SizeAndActionsVec &SizeAndActions) {
+ const unsigned OpcodeIdx = Opcode - FirstOp;
+ SmallVector<SizeAndActionsVec, 1> &Actions = ScalarActions[OpcodeIdx];
+ setActions(TypeIndex, Actions, SizeAndActions);
+ }
+ void setPointerAction(const unsigned Opcode, const unsigned TypeIndex,
+ const unsigned AddressSpace,
+ const SizeAndActionsVec &SizeAndActions) {
+ const unsigned OpcodeIdx = Opcode - FirstOp;
+ if (AddrSpace2PointerActions[OpcodeIdx].find(AddressSpace) ==
+ AddrSpace2PointerActions[OpcodeIdx].end())
+ AddrSpace2PointerActions[OpcodeIdx][AddressSpace] = {{}};
+ SmallVector<SizeAndActionsVec, 1> &Actions =
+ AddrSpace2PointerActions[OpcodeIdx].find(AddressSpace)->second;
+ setActions(TypeIndex, Actions, SizeAndActions);
+ }
+
+ /// If an operation on a given vector type (say <M x iN>) isn't explicitly
+ /// specified, we proceed in 2 stages. First we legalize the underlying scalar
+ /// (so that there's at least one legal vector with that scalar), then we
+ /// adjust the number of elements in the vector so that it is legal. The
+ /// desired action in the first step is controlled by this function.
+ void setScalarInVectorAction(const unsigned Opcode, const unsigned TypeIndex,
+ const SizeAndActionsVec &SizeAndActions) {
+ unsigned OpcodeIdx = Opcode - FirstOp;
+ SmallVector<SizeAndActionsVec, 1> &Actions =
+ ScalarInVectorActions[OpcodeIdx];
+ setActions(TypeIndex, Actions, SizeAndActions);
+ }
+
+ /// See also setScalarInVectorAction.
+ /// This function let's you specify the number of elements in a vector that
+ /// are legal for a legal element size.
+ void setVectorNumElementAction(const unsigned Opcode,
+ const unsigned TypeIndex,
+ const unsigned ElementSize,
+ const SizeAndActionsVec &SizeAndActions) {
+ const unsigned OpcodeIdx = Opcode - FirstOp;
+ if (NumElements2Actions[OpcodeIdx].find(ElementSize) ==
+ NumElements2Actions[OpcodeIdx].end())
+ NumElements2Actions[OpcodeIdx][ElementSize] = {{}};
+ SmallVector<SizeAndActionsVec, 1> &Actions =
+ NumElements2Actions[OpcodeIdx].find(ElementSize)->second;
+ setActions(TypeIndex, Actions, SizeAndActions);
+ }
+
+ /// A partial SizeAndActionsVec potentially doesn't cover all bit sizes,
+ /// i.e. it's OK if it doesn't start from size 1.
+ static void checkPartialSizeAndActionsVector(const SizeAndActionsVec& v) {
+ using namespace LegalizeActions;
+#ifndef NDEBUG
+ // The sizes should be in increasing order
+ int prev_size = -1;
+ for(auto SizeAndAction: v) {
+ assert(SizeAndAction.first > prev_size);
+ prev_size = SizeAndAction.first;
+ }
+ // - for every Widen action, there should be a larger bitsize that
+ // can be legalized towards (e.g. Legal, Lower, Libcall or Custom
+ // action).
+ // - for every Narrow action, there should be a smaller bitsize that
+ // can be legalized towards.
+ int SmallestNarrowIdx = -1;
+ int LargestWidenIdx = -1;
+ int SmallestLegalizableToSameSizeIdx = -1;
+ int LargestLegalizableToSameSizeIdx = -1;
+ for(size_t i=0; i<v.size(); ++i) {
+ switch (v[i].second) {
+ case FewerElements:
+ case NarrowScalar:
+ if (SmallestNarrowIdx == -1)
+ SmallestNarrowIdx = i;
+ break;
+ case WidenScalar:
+ case MoreElements:
+ LargestWidenIdx = i;
+ break;
+ case Unsupported:
+ break;
+ default:
+ if (SmallestLegalizableToSameSizeIdx == -1)
+ SmallestLegalizableToSameSizeIdx = i;
+ LargestLegalizableToSameSizeIdx = i;
+ }
+ }
+ if (SmallestNarrowIdx != -1) {
+ assert(SmallestLegalizableToSameSizeIdx != -1);
+ assert(SmallestNarrowIdx > SmallestLegalizableToSameSizeIdx);
+ }
+ if (LargestWidenIdx != -1)
+ assert(LargestWidenIdx < LargestLegalizableToSameSizeIdx);
+#endif
+ }
+
+ /// A full SizeAndActionsVec must cover all bit sizes, i.e. must start with
+ /// from size 1.
+ static void checkFullSizeAndActionsVector(const SizeAndActionsVec& v) {
+#ifndef NDEBUG
+ // Data structure invariant: The first bit size must be size 1.
+ assert(v.size() >= 1);
+ assert(v[0].first == 1);
+ checkPartialSizeAndActionsVector(v);
+#endif
+ }
+
+ /// Sets actions for all bit sizes on a particular generic opcode, type
+ /// index and scalar or pointer type.
+ void setActions(unsigned TypeIndex,
+ SmallVector<SizeAndActionsVec, 1> &Actions,
+ const SizeAndActionsVec &SizeAndActions) {
+ checkFullSizeAndActionsVector(SizeAndActions);
+ if (Actions.size() <= TypeIndex)
+ Actions.resize(TypeIndex + 1);
+ Actions[TypeIndex] = SizeAndActions;
+ }
+
+ static SizeAndAction findAction(const SizeAndActionsVec &Vec,
+ const uint32_t Size);
+
+ /// Returns the next action needed to get the scalar or pointer type closer
+ /// to being legal
+ /// E.g. findLegalAction({G_REM, 13}) should return
+ /// (WidenScalar, 32). After that, findLegalAction({G_REM, 32}) will
+ /// probably be called, which should return (Lower, 32).
+ /// This is assuming the setScalarAction on G_REM was something like:
+ /// setScalarAction(G_REM, 0,
+ /// {{1, WidenScalar}, // bit sizes [ 1, 31[
+ /// {32, Lower}, // bit sizes [32, 33[
+ /// {33, NarrowScalar} // bit sizes [65, +inf[
+ /// });
+ std::pair<LegalizeAction, LLT>
+ findScalarLegalAction(const InstrAspect &Aspect) const;
+
+ /// Returns the next action needed towards legalizing the vector type.
+ std::pair<LegalizeAction, LLT>
+ findVectorLegalAction(const InstrAspect &Aspect) const;
+
+ static const int FirstOp = TargetOpcode::PRE_ISEL_GENERIC_OPCODE_START;
+ static const int LastOp = TargetOpcode::PRE_ISEL_GENERIC_OPCODE_END;
+
+ // Data structures used temporarily during construction of legality data:
+ using TypeMap = DenseMap<LLT, LegalizeAction>;
+ SmallVector<TypeMap, 1> SpecifiedActions[LastOp - FirstOp + 1];
+ SmallVector<SizeChangeStrategy, 1>
+ ScalarSizeChangeStrategies[LastOp - FirstOp + 1];
+ SmallVector<SizeChangeStrategy, 1>
+ VectorElementSizeChangeStrategies[LastOp - FirstOp + 1];
+ bool TablesInitialized;
+
+ // Data structures used by getAction:
+ SmallVector<SizeAndActionsVec, 1> ScalarActions[LastOp - FirstOp + 1];
+ SmallVector<SizeAndActionsVec, 1> ScalarInVectorActions[LastOp - FirstOp + 1];
+ std::unordered_map<uint16_t, SmallVector<SizeAndActionsVec, 1>>
+ AddrSpace2PointerActions[LastOp - FirstOp + 1];
+ std::unordered_map<uint16_t, SmallVector<SizeAndActionsVec, 1>>
+ NumElements2Actions[LastOp - FirstOp + 1];
+
+ LegalizeRuleSet RulesForOpcode[LastOp - FirstOp + 1];
+};
+
+#ifndef NDEBUG
+/// Checks that MIR is fully legal, returns an illegal instruction if it's not,
+/// nullptr otherwise
+const MachineInstr *machineFunctionIsIllegal(const MachineFunction &MF);
+#endif
+
+} // end namespace llvm.
+
+#endif // LLVM_CODEGEN_GLOBALISEL_LEGALIZERINFO_H
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel/Localizer.h b/contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel/Localizer.h
new file mode 100644
index 0000000000..d1716931e6
--- /dev/null
+++ b/contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel/Localizer.h
@@ -0,0 +1,108 @@
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//== llvm/CodeGen/GlobalISel/Localizer.h - Localizer -------------*- C++ -*-==//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+/// \file This file describes the interface of the Localizer pass.
+/// This pass moves/duplicates constant-like instructions close to their uses.
+/// Its primarily goal is to workaround the deficiencies of the fast register
+/// allocator.
+/// With GlobalISel constants are all materialized in the entry block of
+/// a function. However, the fast allocator cannot rematerialize constants and
+/// has a lot more live-ranges to deal with and will most likely end up
+/// spilling a lot.
+/// By pushing the constants close to their use, we only create small
+/// live-ranges.
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_GLOBALISEL_LOCALIZER_H
+#define LLVM_CODEGEN_GLOBALISEL_LOCALIZER_H
+
+#include "llvm/ADT/SetVector.h"
+#include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+
+namespace llvm {
+// Forward declarations.
+class MachineRegisterInfo;
+class TargetTransformInfo;
+
+/// This pass implements the localization mechanism described at the
+/// top of this file. One specificity of the implementation is that
+/// it will materialize one and only one instance of a constant per
+/// basic block, thus enabling reuse of that constant within that block.
+/// Moreover, it only materializes constants in blocks where they
+/// are used. PHI uses are considered happening at the end of the
+/// related predecessor.
+class Localizer : public MachineFunctionPass {
+public:
+ static char ID;
+
+private:
+ /// An input function to decide if the pass should run or not
+ /// on the given MachineFunction.
+ std::function<bool(const MachineFunction &)> DoNotRunPass;
+
+ /// MRI contains all the register class/bank information that this
+ /// pass uses and updates.
+ MachineRegisterInfo *MRI;
+ /// TTI used for getting remat costs for instructions.
+ TargetTransformInfo *TTI;
+
+ /// Check if \p MOUse is used in the same basic block as \p Def.
+ /// If the use is in the same block, we say it is local.
+ /// When the use is not local, \p InsertMBB will contain the basic
+ /// block when to insert \p Def to have a local use.
+ static bool isLocalUse(MachineOperand &MOUse, const MachineInstr &Def,
+ MachineBasicBlock *&InsertMBB);
+
+ /// Initialize the field members using \p MF.
+ void init(MachineFunction &MF);
+
+ typedef SmallSetVector<MachineInstr *, 32> LocalizedSetVecT;
+
+ /// If \p Op is a phi operand and not unique in that phi, that is,
+ /// there are other operands in the phi with the same register,
+ /// return true.
+ bool isNonUniquePhiValue(MachineOperand &Op) const;
+
+ /// Do inter-block localization from the entry block.
+ bool localizeInterBlock(MachineFunction &MF,
+ LocalizedSetVecT &LocalizedInstrs);
+
+ /// Do intra-block localization of already localized instructions.
+ bool localizeIntraBlock(LocalizedSetVecT &LocalizedInstrs);
+
+public:
+ Localizer();
+ Localizer(std::function<bool(const MachineFunction &)>);
+
+ StringRef getPassName() const override { return "Localizer"; }
+
+ MachineFunctionProperties getRequiredProperties() const override {
+ return MachineFunctionProperties()
+ .set(MachineFunctionProperties::Property::IsSSA);
+ }
+
+ void getAnalysisUsage(AnalysisUsage &AU) const override;
+
+ bool runOnMachineFunction(MachineFunction &MF) override;
+};
+
+} // End namespace llvm.
+
+#endif
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel/LostDebugLocObserver.h b/contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel/LostDebugLocObserver.h
new file mode 100644
index 0000000000..ec2092ccbc
--- /dev/null
+++ b/contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel/LostDebugLocObserver.h
@@ -0,0 +1,61 @@
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//===----- llvm/CodeGen/GlobalISel/LostDebugLocObserver.h -------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+/// Tracks DebugLocs between checkpoints and verifies that they are transferred.
+//
+//===----------------------------------------------------------------------===//
+#ifndef LLVM_CODEGEN_GLOBALISEL_LOSTDEBUGLOCOBSERVER_H
+#define LLVM_CODEGEN_GLOBALISEL_LOSTDEBUGLOCOBSERVER_H
+
+#include "llvm/ADT/SmallSet.h"
+#include "llvm/CodeGen/GlobalISel/GISelChangeObserver.h"
+
+namespace llvm {
+class LostDebugLocObserver : public GISelChangeObserver {
+ StringRef DebugType;
+ SmallSet<DebugLoc, 4> LostDebugLocs;
+ SmallPtrSet<MachineInstr *, 4> PotentialMIsForDebugLocs;
+ unsigned NumLostDebugLocs = 0;
+
+public:
+ LostDebugLocObserver(StringRef DebugType) : DebugType(DebugType) {}
+
+ unsigned getNumLostDebugLocs() const { return NumLostDebugLocs; }
+
+ /// Call this to indicate that it's a good point to assess whether locations
+ /// have been lost. Typically this will be when a logical change has been
+ /// completed such as the caller has finished replacing some instructions with
+ /// alternatives. When CheckDebugLocs is true, the locations will be checked
+ /// to see if any have been lost since the last checkpoint. When
+ /// CheckDebugLocs is false, it will just reset ready for the next checkpoint
+ /// without checking anything. This can be helpful to limit the detection to
+ /// easy-to-fix portions of an algorithm before allowing more difficult ones.
+ void checkpoint(bool CheckDebugLocs = true);
+
+ void createdInstr(MachineInstr &MI) override;
+ void erasingInstr(MachineInstr &MI) override;
+ void changingInstr(MachineInstr &MI) override;
+ void changedInstr(MachineInstr &MI) override;
+
+private:
+ void analyzeDebugLocations();
+};
+
+} // namespace llvm
+#endif // ifndef LLVM_CODEGEN_GLOBALISEL_LOSTDEBUGLOCOBSERVER_H
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel/MIPatternMatch.h b/contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel/MIPatternMatch.h
new file mode 100644
index 0000000000..223f61ccc5
--- /dev/null
+++ b/contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel/MIPatternMatch.h
@@ -0,0 +1,501 @@
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//==------ llvm/CodeGen/GlobalISel/MIPatternMatch.h -------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+/// Contains matchers for matching SSA Machine Instructions.
+//
+//===----------------------------------------------------------------------===//
+#ifndef LLVM_GMIR_PATTERNMATCH_H
+#define LLVM_GMIR_PATTERNMATCH_H
+
+#include "llvm/CodeGen/GlobalISel/Utils.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/IR/InstrTypes.h"
+
+namespace llvm {
+namespace MIPatternMatch {
+
+template <typename Reg, typename Pattern>
+bool mi_match(Reg R, const MachineRegisterInfo &MRI, Pattern &&P) {
+ return P.match(MRI, R);
+}
+
+// TODO: Extend for N use.
+template <typename SubPatternT> struct OneUse_match {
+ SubPatternT SubPat;
+ OneUse_match(const SubPatternT &SP) : SubPat(SP) {}
+
+ bool match(const MachineRegisterInfo &MRI, Register Reg) {
+ return MRI.hasOneUse(Reg) && SubPat.match(MRI, Reg);
+ }
+};
+
+template <typename SubPat>
+inline OneUse_match<SubPat> m_OneUse(const SubPat &SP) {
+ return SP;
+}
+
+template <typename SubPatternT> struct OneNonDBGUse_match {
+ SubPatternT SubPat;
+ OneNonDBGUse_match(const SubPatternT &SP) : SubPat(SP) {}
+
+ bool match(const MachineRegisterInfo &MRI, Register Reg) {
+ return MRI.hasOneNonDBGUse(Reg) && SubPat.match(MRI, Reg);
+ }
+};
+
+template <typename SubPat>
+inline OneNonDBGUse_match<SubPat> m_OneNonDBGUse(const SubPat &SP) {
+ return SP;
+}
+
+struct ConstantMatch {
+ int64_t &CR;
+ ConstantMatch(int64_t &C) : CR(C) {}
+ bool match(const MachineRegisterInfo &MRI, Register Reg) {
+ if (auto MaybeCst = getConstantVRegSExtVal(Reg, MRI)) {
+ CR = *MaybeCst;
+ return true;
+ }
+ return false;
+ }
+};
+
+inline ConstantMatch m_ICst(int64_t &Cst) { return ConstantMatch(Cst); }
+
+/// Matcher for a specific constant value.
+struct SpecificConstantMatch {
+ int64_t RequestedVal;
+ SpecificConstantMatch(int64_t RequestedVal) : RequestedVal(RequestedVal) {}
+ bool match(const MachineRegisterInfo &MRI, Register Reg) {
+ int64_t MatchedVal;
+ return mi_match(Reg, MRI, m_ICst(MatchedVal)) && MatchedVal == RequestedVal;
+ }
+};
+
+/// Matches a constant equal to \p RequestedValue.
+inline SpecificConstantMatch m_SpecificICst(int64_t RequestedValue) {
+ return SpecificConstantMatch(RequestedValue);
+}
+
+///{
+/// Convenience matchers for specific integer values.
+inline SpecificConstantMatch m_ZeroInt() { return SpecificConstantMatch(0); }
+inline SpecificConstantMatch m_AllOnesInt() {
+ return SpecificConstantMatch(-1);
+}
+///}
+
+// TODO: Rework this for different kinds of MachineOperand.
+// Currently assumes the Src for a match is a register.
+// We might want to support taking in some MachineOperands and call getReg on
+// that.
+
+struct operand_type_match {
+ bool match(const MachineRegisterInfo &MRI, Register Reg) { return true; }
+ bool match(const MachineRegisterInfo &MRI, MachineOperand *MO) {
+ return MO->isReg();
+ }
+};
+
+inline operand_type_match m_Reg() { return operand_type_match(); }
+
+/// Matching combinators.
+template <typename... Preds> struct And {
+ template <typename MatchSrc>
+ bool match(const MachineRegisterInfo &MRI, MatchSrc &&src) {
+ return true;
+ }
+};
+
+template <typename Pred, typename... Preds>
+struct And<Pred, Preds...> : And<Preds...> {
+ Pred P;
+ And(Pred &&p, Preds &&... preds)
+ : And<Preds...>(std::forward<Preds>(preds)...), P(std::forward<Pred>(p)) {
+ }
+ template <typename MatchSrc>
+ bool match(const MachineRegisterInfo &MRI, MatchSrc &&src) {
+ return P.match(MRI, src) && And<Preds...>::match(MRI, src);
+ }
+};
+
+template <typename... Preds> struct Or {
+ template <typename MatchSrc>
+ bool match(const MachineRegisterInfo &MRI, MatchSrc &&src) {
+ return false;
+ }
+};
+
+template <typename Pred, typename... Preds>
+struct Or<Pred, Preds...> : Or<Preds...> {
+ Pred P;
+ Or(Pred &&p, Preds &&... preds)
+ : Or<Preds...>(std::forward<Preds>(preds)...), P(std::forward<Pred>(p)) {}
+ template <typename MatchSrc>
+ bool match(const MachineRegisterInfo &MRI, MatchSrc &&src) {
+ return P.match(MRI, src) || Or<Preds...>::match(MRI, src);
+ }
+};
+
+template <typename... Preds> And<Preds...> m_all_of(Preds &&... preds) {
+ return And<Preds...>(std::forward<Preds>(preds)...);
+}
+
+template <typename... Preds> Or<Preds...> m_any_of(Preds &&... preds) {
+ return Or<Preds...>(std::forward<Preds>(preds)...);
+}
+
+template <typename BindTy> struct bind_helper {
+ static bool bind(const MachineRegisterInfo &MRI, BindTy &VR, BindTy &V) {
+ VR = V;
+ return true;
+ }
+};
+
+template <> struct bind_helper<MachineInstr *> {
+ static bool bind(const MachineRegisterInfo &MRI, MachineInstr *&MI,
+ Register Reg) {
+ MI = MRI.getVRegDef(Reg);
+ if (MI)
+ return true;
+ return false;
+ }
+};
+
+template <> struct bind_helper<LLT> {
+ static bool bind(const MachineRegisterInfo &MRI, LLT Ty, Register Reg) {
+ Ty = MRI.getType(Reg);
+ if (Ty.isValid())
+ return true;
+ return false;
+ }
+};
+
+template <> struct bind_helper<const ConstantFP *> {
+ static bool bind(const MachineRegisterInfo &MRI, const ConstantFP *&F,
+ Register Reg) {
+ F = getConstantFPVRegVal(Reg, MRI);
+ if (F)
+ return true;
+ return false;
+ }
+};
+
+template <typename Class> struct bind_ty {
+ Class &VR;
+
+ bind_ty(Class &V) : VR(V) {}
+
+ template <typename ITy> bool match(const MachineRegisterInfo &MRI, ITy &&V) {
+ return bind_helper<Class>::bind(MRI, VR, V);
+ }
+};
+
+inline bind_ty<Register> m_Reg(Register &R) { return R; }
+inline bind_ty<MachineInstr *> m_MInstr(MachineInstr *&MI) { return MI; }
+inline bind_ty<LLT> m_Type(LLT Ty) { return Ty; }
+inline bind_ty<CmpInst::Predicate> m_Pred(CmpInst::Predicate &P) { return P; }
+inline operand_type_match m_Pred() { return operand_type_match(); }
+
+// Helper for matching G_FCONSTANT
+inline bind_ty<const ConstantFP *> m_GFCst(const ConstantFP *&C) { return C; }
+
+// General helper for all the binary generic MI such as G_ADD/G_SUB etc
+template <typename LHS_P, typename RHS_P, unsigned Opcode,
+ bool Commutable = false>
+struct BinaryOp_match {
+ LHS_P L;
+ RHS_P R;
+
+ BinaryOp_match(const LHS_P &LHS, const RHS_P &RHS) : L(LHS), R(RHS) {}
+ template <typename OpTy>
+ bool match(const MachineRegisterInfo &MRI, OpTy &&Op) {
+ MachineInstr *TmpMI;
+ if (mi_match(Op, MRI, m_MInstr(TmpMI))) {
+ if (TmpMI->getOpcode() == Opcode && TmpMI->getNumOperands() == 3) {
+ return (L.match(MRI, TmpMI->getOperand(1).getReg()) &&
+ R.match(MRI, TmpMI->getOperand(2).getReg())) ||
+ (Commutable && (R.match(MRI, TmpMI->getOperand(1).getReg()) &&
+ L.match(MRI, TmpMI->getOperand(2).getReg())));
+ }
+ }
+ return false;
+ }
+};
+
+template <typename LHS, typename RHS>
+inline BinaryOp_match<LHS, RHS, TargetOpcode::G_ADD, true>
+m_GAdd(const LHS &L, const RHS &R) {
+ return BinaryOp_match<LHS, RHS, TargetOpcode::G_ADD, true>(L, R);
+}
+
+template <typename LHS, typename RHS>
+inline BinaryOp_match<LHS, RHS, TargetOpcode::G_PTR_ADD, true>
+m_GPtrAdd(const LHS &L, const RHS &R) {
+ return BinaryOp_match<LHS, RHS, TargetOpcode::G_PTR_ADD, true>(L, R);
+}
+
+template <typename LHS, typename RHS>
+inline BinaryOp_match<LHS, RHS, TargetOpcode::G_SUB> m_GSub(const LHS &L,
+ const RHS &R) {
+ return BinaryOp_match<LHS, RHS, TargetOpcode::G_SUB>(L, R);
+}
+
+template <typename LHS, typename RHS>
+inline BinaryOp_match<LHS, RHS, TargetOpcode::G_MUL, true>
+m_GMul(const LHS &L, const RHS &R) {
+ return BinaryOp_match<LHS, RHS, TargetOpcode::G_MUL, true>(L, R);
+}
+
+template <typename LHS, typename RHS>
+inline BinaryOp_match<LHS, RHS, TargetOpcode::G_FADD, true>
+m_GFAdd(const LHS &L, const RHS &R) {
+ return BinaryOp_match<LHS, RHS, TargetOpcode::G_FADD, true>(L, R);
+}
+
+template <typename LHS, typename RHS>
+inline BinaryOp_match<LHS, RHS, TargetOpcode::G_FMUL, true>
+m_GFMul(const LHS &L, const RHS &R) {
+ return BinaryOp_match<LHS, RHS, TargetOpcode::G_FMUL, true>(L, R);
+}
+
+template <typename LHS, typename RHS>
+inline BinaryOp_match<LHS, RHS, TargetOpcode::G_FSUB, false>
+m_GFSub(const LHS &L, const RHS &R) {
+ return BinaryOp_match<LHS, RHS, TargetOpcode::G_FSUB, false>(L, R);
+}
+
+template <typename LHS, typename RHS>
+inline BinaryOp_match<LHS, RHS, TargetOpcode::G_AND, true>
+m_GAnd(const LHS &L, const RHS &R) {
+ return BinaryOp_match<LHS, RHS, TargetOpcode::G_AND, true>(L, R);
+}
+
+template <typename LHS, typename RHS>
+inline BinaryOp_match<LHS, RHS, TargetOpcode::G_XOR, true>
+m_GXor(const LHS &L, const RHS &R) {
+ return BinaryOp_match<LHS, RHS, TargetOpcode::G_XOR, true>(L, R);
+}
+
+template <typename LHS, typename RHS>
+inline BinaryOp_match<LHS, RHS, TargetOpcode::G_OR, true> m_GOr(const LHS &L,
+ const RHS &R) {
+ return BinaryOp_match<LHS, RHS, TargetOpcode::G_OR, true>(L, R);
+}
+
+template <typename LHS, typename RHS>
+inline BinaryOp_match<LHS, RHS, TargetOpcode::G_SHL, false>
+m_GShl(const LHS &L, const RHS &R) {
+ return BinaryOp_match<LHS, RHS, TargetOpcode::G_SHL, false>(L, R);
+}
+
+template <typename LHS, typename RHS>
+inline BinaryOp_match<LHS, RHS, TargetOpcode::G_LSHR, false>
+m_GLShr(const LHS &L, const RHS &R) {
+ return BinaryOp_match<LHS, RHS, TargetOpcode::G_LSHR, false>(L, R);
+}
+
+template <typename LHS, typename RHS>
+inline BinaryOp_match<LHS, RHS, TargetOpcode::G_ASHR, false>
+m_GAShr(const LHS &L, const RHS &R) {
+ return BinaryOp_match<LHS, RHS, TargetOpcode::G_ASHR, false>(L, R);
+}
+
+// Helper for unary instructions (G_[ZSA]EXT/G_TRUNC) etc
+template <typename SrcTy, unsigned Opcode> struct UnaryOp_match {
+ SrcTy L;
+
+ UnaryOp_match(const SrcTy &LHS) : L(LHS) {}
+ template <typename OpTy>
+ bool match(const MachineRegisterInfo &MRI, OpTy &&Op) {
+ MachineInstr *TmpMI;
+ if (mi_match(Op, MRI, m_MInstr(TmpMI))) {
+ if (TmpMI->getOpcode() == Opcode && TmpMI->getNumOperands() == 2) {
+ return L.match(MRI, TmpMI->getOperand(1).getReg());
+ }
+ }
+ return false;
+ }
+};
+
+template <typename SrcTy>
+inline UnaryOp_match<SrcTy, TargetOpcode::G_ANYEXT>
+m_GAnyExt(const SrcTy &Src) {
+ return UnaryOp_match<SrcTy, TargetOpcode::G_ANYEXT>(Src);
+}
+
+template <typename SrcTy>
+inline UnaryOp_match<SrcTy, TargetOpcode::G_SEXT> m_GSExt(const SrcTy &Src) {
+ return UnaryOp_match<SrcTy, TargetOpcode::G_SEXT>(Src);
+}
+
+template <typename SrcTy>
+inline UnaryOp_match<SrcTy, TargetOpcode::G_ZEXT> m_GZExt(const SrcTy &Src) {
+ return UnaryOp_match<SrcTy, TargetOpcode::G_ZEXT>(Src);
+}
+
+template <typename SrcTy>
+inline UnaryOp_match<SrcTy, TargetOpcode::G_FPEXT> m_GFPExt(const SrcTy &Src) {
+ return UnaryOp_match<SrcTy, TargetOpcode::G_FPEXT>(Src);
+}
+
+template <typename SrcTy>
+inline UnaryOp_match<SrcTy, TargetOpcode::G_TRUNC> m_GTrunc(const SrcTy &Src) {
+ return UnaryOp_match<SrcTy, TargetOpcode::G_TRUNC>(Src);
+}
+
+template <typename SrcTy>
+inline UnaryOp_match<SrcTy, TargetOpcode::G_BITCAST>
+m_GBitcast(const SrcTy &Src) {
+ return UnaryOp_match<SrcTy, TargetOpcode::G_BITCAST>(Src);
+}
+
+template <typename SrcTy>
+inline UnaryOp_match<SrcTy, TargetOpcode::G_PTRTOINT>
+m_GPtrToInt(const SrcTy &Src) {
+ return UnaryOp_match<SrcTy, TargetOpcode::G_PTRTOINT>(Src);
+}
+
+template <typename SrcTy>
+inline UnaryOp_match<SrcTy, TargetOpcode::G_INTTOPTR>
+m_GIntToPtr(const SrcTy &Src) {
+ return UnaryOp_match<SrcTy, TargetOpcode::G_INTTOPTR>(Src);
+}
+
+template <typename SrcTy>
+inline UnaryOp_match<SrcTy, TargetOpcode::G_FPTRUNC>
+m_GFPTrunc(const SrcTy &Src) {
+ return UnaryOp_match<SrcTy, TargetOpcode::G_FPTRUNC>(Src);
+}
+
+template <typename SrcTy>
+inline UnaryOp_match<SrcTy, TargetOpcode::G_FABS> m_GFabs(const SrcTy &Src) {
+ return UnaryOp_match<SrcTy, TargetOpcode::G_FABS>(Src);
+}
+
+template <typename SrcTy>
+inline UnaryOp_match<SrcTy, TargetOpcode::G_FNEG> m_GFNeg(const SrcTy &Src) {
+ return UnaryOp_match<SrcTy, TargetOpcode::G_FNEG>(Src);
+}
+
+template <typename SrcTy>
+inline UnaryOp_match<SrcTy, TargetOpcode::COPY> m_Copy(SrcTy &&Src) {
+ return UnaryOp_match<SrcTy, TargetOpcode::COPY>(std::forward<SrcTy>(Src));
+}
+
+// General helper for generic MI compares, i.e. G_ICMP and G_FCMP
+// TODO: Allow checking a specific predicate.
+template <typename Pred_P, typename LHS_P, typename RHS_P, unsigned Opcode>
+struct CompareOp_match {
+ Pred_P P;
+ LHS_P L;
+ RHS_P R;
+
+ CompareOp_match(const Pred_P &Pred, const LHS_P &LHS, const RHS_P &RHS)
+ : P(Pred), L(LHS), R(RHS) {}
+
+ template <typename OpTy>
+ bool match(const MachineRegisterInfo &MRI, OpTy &&Op) {
+ MachineInstr *TmpMI;
+ if (!mi_match(Op, MRI, m_MInstr(TmpMI)) || TmpMI->getOpcode() != Opcode)
+ return false;
+
+ auto TmpPred =
+ static_cast<CmpInst::Predicate>(TmpMI->getOperand(1).getPredicate());
+ if (!P.match(MRI, TmpPred))
+ return false;
+
+ return L.match(MRI, TmpMI->getOperand(2).getReg()) &&
+ R.match(MRI, TmpMI->getOperand(3).getReg());
+ }
+};
+
+template <typename Pred, typename LHS, typename RHS>
+inline CompareOp_match<Pred, LHS, RHS, TargetOpcode::G_ICMP>
+m_GICmp(const Pred &P, const LHS &L, const RHS &R) {
+ return CompareOp_match<Pred, LHS, RHS, TargetOpcode::G_ICMP>(P, L, R);
+}
+
+template <typename Pred, typename LHS, typename RHS>
+inline CompareOp_match<Pred, LHS, RHS, TargetOpcode::G_FCMP>
+m_GFCmp(const Pred &P, const LHS &L, const RHS &R) {
+ return CompareOp_match<Pred, LHS, RHS, TargetOpcode::G_FCMP>(P, L, R);
+}
+
+// Helper for checking if a Reg is of specific type.
+struct CheckType {
+ LLT Ty;
+ CheckType(const LLT Ty) : Ty(Ty) {}
+
+ bool match(const MachineRegisterInfo &MRI, Register Reg) {
+ return MRI.getType(Reg) == Ty;
+ }
+};
+
+inline CheckType m_SpecificType(LLT Ty) { return Ty; }
+
+template <typename Src0Ty, typename Src1Ty, typename Src2Ty, unsigned Opcode>
+struct TernaryOp_match {
+ Src0Ty Src0;
+ Src1Ty Src1;
+ Src2Ty Src2;
+
+ TernaryOp_match(const Src0Ty &Src0, const Src1Ty &Src1, const Src2Ty &Src2)
+ : Src0(Src0), Src1(Src1), Src2(Src2) {}
+ template <typename OpTy>
+ bool match(const MachineRegisterInfo &MRI, OpTy &&Op) {
+ MachineInstr *TmpMI;
+ if (mi_match(Op, MRI, m_MInstr(TmpMI))) {
+ if (TmpMI->getOpcode() == Opcode && TmpMI->getNumOperands() == 4) {
+ return (Src0.match(MRI, TmpMI->getOperand(1).getReg()) &&
+ Src1.match(MRI, TmpMI->getOperand(2).getReg()) &&
+ Src2.match(MRI, TmpMI->getOperand(3).getReg()));
+ }
+ }
+ return false;
+ }
+};
+template <typename Src0Ty, typename Src1Ty, typename Src2Ty>
+inline TernaryOp_match<Src0Ty, Src1Ty, Src2Ty,
+ TargetOpcode::G_INSERT_VECTOR_ELT>
+m_GInsertVecElt(const Src0Ty &Src0, const Src1Ty &Src1, const Src2Ty &Src2) {
+ return TernaryOp_match<Src0Ty, Src1Ty, Src2Ty,
+ TargetOpcode::G_INSERT_VECTOR_ELT>(Src0, Src1, Src2);
+}
+
+/// Matches a register negated by a G_SUB.
+/// G_SUB 0, %negated_reg
+template <typename SrcTy>
+inline BinaryOp_match<SpecificConstantMatch, SrcTy, TargetOpcode::G_SUB>
+m_Neg(const SrcTy &&Src) {
+ return m_GSub(m_ZeroInt(), Src);
+}
+
+/// Matches a register not-ed by a G_XOR.
+/// G_XOR %not_reg, -1
+template <typename SrcTy>
+inline BinaryOp_match<SrcTy, SpecificConstantMatch, TargetOpcode::G_XOR, true>
+m_Not(const SrcTy &&Src) {
+ return m_GXor(Src, m_AllOnesInt());
+}
+
+} // namespace GMIPatternMatch
+} // namespace llvm
+
+#endif
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel/MachineIRBuilder.h b/contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel/MachineIRBuilder.h
new file mode 100644
index 0000000000..6e3f7cdc26
--- /dev/null
+++ b/contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel/MachineIRBuilder.h
@@ -0,0 +1,1817 @@
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//===-- llvm/CodeGen/GlobalISel/MachineIRBuilder.h - MIBuilder --*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+/// \file
+/// This file declares the MachineIRBuilder class.
+/// This is a helper class to build MachineInstr.
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_GLOBALISEL_MACHINEIRBUILDER_H
+#define LLVM_CODEGEN_GLOBALISEL_MACHINEIRBUILDER_H
+
+#include "llvm/CodeGen/GlobalISel/CSEInfo.h"
+#include "llvm/CodeGen/LowLevelType.h"
+#include "llvm/CodeGen/MachineBasicBlock.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/CodeGen/TargetOpcodes.h"
+#include "llvm/IR/Constants.h"
+#include "llvm/IR/DebugLoc.h"
+#include "llvm/IR/Module.h"
+
+namespace llvm {
+
+// Forward declarations.
+class MachineFunction;
+class MachineInstr;
+class TargetInstrInfo;
+class GISelChangeObserver;
+
+/// Class which stores all the state required in a MachineIRBuilder.
+/// Since MachineIRBuilders will only store state in this object, it allows
+/// to transfer BuilderState between different kinds of MachineIRBuilders.
+struct MachineIRBuilderState {
+ /// MachineFunction under construction.
+ MachineFunction *MF = nullptr;
+ /// Information used to access the description of the opcodes.
+ const TargetInstrInfo *TII = nullptr;
+ /// Information used to verify types are consistent and to create virtual registers.
+ MachineRegisterInfo *MRI = nullptr;
+ /// Debug location to be set to any instruction we create.
+ DebugLoc DL;
+
+ /// \name Fields describing the insertion point.
+ /// @{
+ MachineBasicBlock *MBB = nullptr;
+ MachineBasicBlock::iterator II;
+ /// @}
+
+ GISelChangeObserver *Observer = nullptr;
+
+ GISelCSEInfo *CSEInfo = nullptr;
+};
+
+class DstOp {
+ union {
+ LLT LLTTy;
+ Register Reg;
+ const TargetRegisterClass *RC;
+ };
+
+public:
+ enum class DstType { Ty_LLT, Ty_Reg, Ty_RC };
+ DstOp(unsigned R) : Reg(R), Ty(DstType::Ty_Reg) {}
+ DstOp(Register R) : Reg(R), Ty(DstType::Ty_Reg) {}
+ DstOp(const MachineOperand &Op) : Reg(Op.getReg()), Ty(DstType::Ty_Reg) {}
+ DstOp(const LLT T) : LLTTy(T), Ty(DstType::Ty_LLT) {}
+ DstOp(const TargetRegisterClass *TRC) : RC(TRC), Ty(DstType::Ty_RC) {}
+
+ void addDefToMIB(MachineRegisterInfo &MRI, MachineInstrBuilder &MIB) const {
+ switch (Ty) {
+ case DstType::Ty_Reg:
+ MIB.addDef(Reg);
+ break;
+ case DstType::Ty_LLT:
+ MIB.addDef(MRI.createGenericVirtualRegister(LLTTy));
+ break;
+ case DstType::Ty_RC:
+ MIB.addDef(MRI.createVirtualRegister(RC));
+ break;
+ }
+ }
+
+ LLT getLLTTy(const MachineRegisterInfo &MRI) const {
+ switch (Ty) {
+ case DstType::Ty_RC:
+ return LLT{};
+ case DstType::Ty_LLT:
+ return LLTTy;
+ case DstType::Ty_Reg:
+ return MRI.getType(Reg);
+ }
+ llvm_unreachable("Unrecognised DstOp::DstType enum");
+ }
+
+ Register getReg() const {
+ assert(Ty == DstType::Ty_Reg && "Not a register");
+ return Reg;
+ }
+
+ const TargetRegisterClass *getRegClass() const {
+ switch (Ty) {
+ case DstType::Ty_RC:
+ return RC;
+ default:
+ llvm_unreachable("Not a RC Operand");
+ }
+ }
+
+ DstType getDstOpKind() const { return Ty; }
+
+private:
+ DstType Ty;
+};
+
+class SrcOp {
+ union {
+ MachineInstrBuilder SrcMIB;
+ Register Reg;
+ CmpInst::Predicate Pred;
+ int64_t Imm;
+ };
+
+public:
+ enum class SrcType { Ty_Reg, Ty_MIB, Ty_Predicate, Ty_Imm };
+ SrcOp(Register R) : Reg(R), Ty(SrcType::Ty_Reg) {}
+ SrcOp(const MachineOperand &Op) : Reg(Op.getReg()), Ty(SrcType::Ty_Reg) {}
+ SrcOp(const MachineInstrBuilder &MIB) : SrcMIB(MIB), Ty(SrcType::Ty_MIB) {}
+ SrcOp(const CmpInst::Predicate P) : Pred(P), Ty(SrcType::Ty_Predicate) {}
+ /// Use of registers held in unsigned integer variables (or more rarely signed
+ /// integers) is no longer permitted to avoid ambiguity with upcoming support
+ /// for immediates.
+ SrcOp(unsigned) = delete;
+ SrcOp(int) = delete;
+ SrcOp(uint64_t V) : Imm(V), Ty(SrcType::Ty_Imm) {}
+ SrcOp(int64_t V) : Imm(V), Ty(SrcType::Ty_Imm) {}
+
+ void addSrcToMIB(MachineInstrBuilder &MIB) const {
+ switch (Ty) {
+ case SrcType::Ty_Predicate:
+ MIB.addPredicate(Pred);
+ break;
+ case SrcType::Ty_Reg:
+ MIB.addUse(Reg);
+ break;
+ case SrcType::Ty_MIB:
+ MIB.addUse(SrcMIB->getOperand(0).getReg());
+ break;
+ case SrcType::Ty_Imm:
+ MIB.addImm(Imm);
+ break;
+ }
+ }
+
+ LLT getLLTTy(const MachineRegisterInfo &MRI) const {
+ switch (Ty) {
+ case SrcType::Ty_Predicate:
+ case SrcType::Ty_Imm:
+ llvm_unreachable("Not a register operand");
+ case SrcType::Ty_Reg:
+ return MRI.getType(Reg);
+ case SrcType::Ty_MIB:
+ return MRI.getType(SrcMIB->getOperand(0).getReg());
+ }
+ llvm_unreachable("Unrecognised SrcOp::SrcType enum");
+ }
+
+ Register getReg() const {
+ switch (Ty) {
+ case SrcType::Ty_Predicate:
+ case SrcType::Ty_Imm:
+ llvm_unreachable("Not a register operand");
+ case SrcType::Ty_Reg:
+ return Reg;
+ case SrcType::Ty_MIB:
+ return SrcMIB->getOperand(0).getReg();
+ }
+ llvm_unreachable("Unrecognised SrcOp::SrcType enum");
+ }
+
+ CmpInst::Predicate getPredicate() const {
+ switch (Ty) {
+ case SrcType::Ty_Predicate:
+ return Pred;
+ default:
+ llvm_unreachable("Not a register operand");
+ }
+ }
+
+ int64_t getImm() const {
+ switch (Ty) {
+ case SrcType::Ty_Imm:
+ return Imm;
+ default:
+ llvm_unreachable("Not an immediate");
+ }
+ }
+
+ SrcType getSrcOpKind() const { return Ty; }
+
+private:
+ SrcType Ty;
+};
+
+class FlagsOp {
+ Optional<unsigned> Flags;
+
+public:
+ explicit FlagsOp(unsigned F) : Flags(F) {}
+ FlagsOp() : Flags(None) {}
+ Optional<unsigned> getFlags() const { return Flags; }
+};
+/// Helper class to build MachineInstr.
+/// It keeps internally the insertion point and debug location for all
+/// the new instructions we want to create.
+/// This information can be modify via the related setters.
+class MachineIRBuilder {
+
+ MachineIRBuilderState State;
+
+protected:
+ void validateTruncExt(const LLT Dst, const LLT Src, bool IsExtend);
+
+ void validateUnaryOp(const LLT Res, const LLT Op0);
+ void validateBinaryOp(const LLT Res, const LLT Op0, const LLT Op1);
+ void validateShiftOp(const LLT Res, const LLT Op0, const LLT Op1);
+
+ void validateSelectOp(const LLT ResTy, const LLT TstTy, const LLT Op0Ty,
+ const LLT Op1Ty);
+
+ void recordInsertion(MachineInstr *InsertedInstr) const {
+ if (State.Observer)
+ State.Observer->createdInstr(*InsertedInstr);
+ }
+
+public:
+ /// Some constructors for easy use.
+ MachineIRBuilder() = default;
+ MachineIRBuilder(MachineFunction &MF) { setMF(MF); }
+
+ MachineIRBuilder(MachineBasicBlock &MBB, MachineBasicBlock::iterator InsPt) {
+ setMF(*MBB.getParent());
+ setInsertPt(MBB, InsPt);
+ }
+
+ MachineIRBuilder(MachineInstr &MI) :
+ MachineIRBuilder(*MI.getParent(), MI.getIterator()) {
+ setInstr(MI);
+ setDebugLoc(MI.getDebugLoc());
+ }
+
+ MachineIRBuilder(MachineInstr &MI, GISelChangeObserver &Observer) :
+ MachineIRBuilder(MI) {
+ setChangeObserver(Observer);
+ }
+
+ virtual ~MachineIRBuilder() = default;
+
+ MachineIRBuilder(const MachineIRBuilderState &BState) : State(BState) {}
+
+ const TargetInstrInfo &getTII() {
+ assert(State.TII && "TargetInstrInfo is not set");
+ return *State.TII;
+ }
+
+ /// Getter for the function we currently build.
+ MachineFunction &getMF() {
+ assert(State.MF && "MachineFunction is not set");
+ return *State.MF;
+ }
+
+ const MachineFunction &getMF() const {
+ assert(State.MF && "MachineFunction is not set");
+ return *State.MF;
+ }
+
+ const DataLayout &getDataLayout() const {
+ return getMF().getFunction().getParent()->getDataLayout();
+ }
+
+ /// Getter for DebugLoc
+ const DebugLoc &getDL() { return State.DL; }
+
+ /// Getter for MRI
+ MachineRegisterInfo *getMRI() { return State.MRI; }
+ const MachineRegisterInfo *getMRI() const { return State.MRI; }
+
+ /// Getter for the State
+ MachineIRBuilderState &getState() { return State; }
+
+ /// Getter for the basic block we currently build.
+ const MachineBasicBlock &getMBB() const {
+ assert(State.MBB && "MachineBasicBlock is not set");
+ return *State.MBB;
+ }
+
+ MachineBasicBlock &getMBB() {
+ return const_cast<MachineBasicBlock &>(
+ const_cast<const MachineIRBuilder *>(this)->getMBB());
+ }
+
+ GISelCSEInfo *getCSEInfo() { return State.CSEInfo; }
+ const GISelCSEInfo *getCSEInfo() const { return State.CSEInfo; }
+
+ /// Current insertion point for new instructions.
+ MachineBasicBlock::iterator getInsertPt() { return State.II; }
+
+ /// Set the insertion point before the specified position.
+ /// \pre MBB must be in getMF().
+ /// \pre II must be a valid iterator in MBB.
+ void setInsertPt(MachineBasicBlock &MBB, MachineBasicBlock::iterator II) {
+ assert(MBB.getParent() == &getMF() &&
+ "Basic block is in a different function");
+ State.MBB = &MBB;
+ State.II = II;
+ }
+
+ /// @}
+
+ void setCSEInfo(GISelCSEInfo *Info) { State.CSEInfo = Info; }
+
+ /// \name Setters for the insertion point.
+ /// @{
+ /// Set the MachineFunction where to build instructions.
+ void setMF(MachineFunction &MF);
+
+ /// Set the insertion point to the end of \p MBB.
+ /// \pre \p MBB must be contained by getMF().
+ void setMBB(MachineBasicBlock &MBB) {
+ State.MBB = &MBB;
+ State.II = MBB.end();
+ assert(&getMF() == MBB.getParent() &&
+ "Basic block is in a different function");
+ }
+
+ /// Set the insertion point to before MI.
+ /// \pre MI must be in getMF().
+ void setInstr(MachineInstr &MI) {
+ assert(MI.getParent() && "Instruction is not part of a basic block");
+ setMBB(*MI.getParent());
+ State.II = MI.getIterator();
+ }
+ /// @}
+
+ /// Set the insertion point to before MI, and set the debug loc to MI's loc.
+ /// \pre MI must be in getMF().
+ void setInstrAndDebugLoc(MachineInstr &MI) {
+ setInstr(MI);
+ setDebugLoc(MI.getDebugLoc());
+ }
+
+ void setChangeObserver(GISelChangeObserver &Observer) {
+ State.Observer = &Observer;
+ }
+
+ void stopObservingChanges() { State.Observer = nullptr; }
+ /// @}
+
+ /// Set the debug location to \p DL for all the next build instructions.
+ void setDebugLoc(const DebugLoc &DL) { this->State.DL = DL; }
+
+ /// Get the current instruction's debug location.
+ DebugLoc getDebugLoc() { return State.DL; }
+
+ /// Build and insert <empty> = \p Opcode <empty>.
+ /// The insertion point is the one set by the last call of either
+ /// setBasicBlock or setMI.
+ ///
+ /// \pre setBasicBlock or setMI must have been called.
+ ///
+ /// \return a MachineInstrBuilder for the newly created instruction.
+ MachineInstrBuilder buildInstr(unsigned Opcode) {
+ return insertInstr(buildInstrNoInsert(Opcode));
+ }
+
+ /// Build but don't insert <empty> = \p Opcode <empty>.
+ ///
+ /// \pre setMF, setBasicBlock or setMI must have been called.
+ ///
+ /// \return a MachineInstrBuilder for the newly created instruction.
+ MachineInstrBuilder buildInstrNoInsert(unsigned Opcode);
+
+ /// Insert an existing instruction at the insertion point.
+ MachineInstrBuilder insertInstr(MachineInstrBuilder MIB);
+
+ /// Build and insert a DBG_VALUE instruction expressing the fact that the
+ /// associated \p Variable lives in \p Reg (suitably modified by \p Expr).
+ MachineInstrBuilder buildDirectDbgValue(Register Reg, const MDNode *Variable,
+ const MDNode *Expr);
+
+ /// Build and insert a DBG_VALUE instruction expressing the fact that the
+ /// associated \p Variable lives in memory at \p Reg (suitably modified by \p
+ /// Expr).
+ MachineInstrBuilder buildIndirectDbgValue(Register Reg,
+ const MDNode *Variable,
+ const MDNode *Expr);
+
+ /// Build and insert a DBG_VALUE instruction expressing the fact that the
+ /// associated \p Variable lives in the stack slot specified by \p FI
+ /// (suitably modified by \p Expr).
+ MachineInstrBuilder buildFIDbgValue(int FI, const MDNode *Variable,
+ const MDNode *Expr);
+
+ /// Build and insert a DBG_VALUE instructions specifying that \p Variable is
+ /// given by \p C (suitably modified by \p Expr).
+ MachineInstrBuilder buildConstDbgValue(const Constant &C,
+ const MDNode *Variable,
+ const MDNode *Expr);
+
+ /// Build and insert a DBG_LABEL instructions specifying that \p Label is
+ /// given. Convert "llvm.dbg.label Label" to "DBG_LABEL Label".
+ MachineInstrBuilder buildDbgLabel(const MDNode *Label);
+
+ /// Build and insert \p Res = G_DYN_STACKALLOC \p Size, \p Align
+ ///
+ /// G_DYN_STACKALLOC does a dynamic stack allocation and writes the address of
+ /// the allocated memory into \p Res.
+ /// \pre setBasicBlock or setMI must have been called.
+ /// \pre \p Res must be a generic virtual register with pointer type.
+ ///
+ /// \return a MachineInstrBuilder for the newly created instruction.
+ MachineInstrBuilder buildDynStackAlloc(const DstOp &Res, const SrcOp &Size,
+ Align Alignment);
+
+ /// Build and insert \p Res = G_FRAME_INDEX \p Idx
+ ///
+ /// G_FRAME_INDEX materializes the address of an alloca value or other
+ /// stack-based object.
+ ///
+ /// \pre setBasicBlock or setMI must have been called.
+ /// \pre \p Res must be a generic virtual register with pointer type.
+ ///
+ /// \return a MachineInstrBuilder for the newly created instruction.
+ MachineInstrBuilder buildFrameIndex(const DstOp &Res, int Idx);
+
+ /// Build and insert \p Res = G_GLOBAL_VALUE \p GV
+ ///
+ /// G_GLOBAL_VALUE materializes the address of the specified global
+ /// into \p Res.
+ ///
+ /// \pre setBasicBlock or setMI must have been called.
+ /// \pre \p Res must be a generic virtual register with pointer type
+ /// in the same address space as \p GV.
+ ///
+ /// \return a MachineInstrBuilder for the newly created instruction.
+ MachineInstrBuilder buildGlobalValue(const DstOp &Res, const GlobalValue *GV);
+
+ /// Build and insert \p Res = G_PTR_ADD \p Op0, \p Op1
+ ///
+ /// G_PTR_ADD adds \p Op1 addressible units to the pointer specified by \p Op0,
+ /// storing the resulting pointer in \p Res. Addressible units are typically
+ /// bytes but this can vary between targets.
+ ///
+ /// \pre setBasicBlock or setMI must have been called.
+ /// \pre \p Res and \p Op0 must be generic virtual registers with pointer
+ /// type.
+ /// \pre \p Op1 must be a generic virtual register with scalar type.
+ ///
+ /// \return a MachineInstrBuilder for the newly created instruction.
+ MachineInstrBuilder buildPtrAdd(const DstOp &Res, const SrcOp &Op0,
+ const SrcOp &Op1);
+
+ /// Materialize and insert \p Res = G_PTR_ADD \p Op0, (G_CONSTANT \p Value)
+ ///
+ /// G_PTR_ADD adds \p Value bytes to the pointer specified by \p Op0,
+ /// storing the resulting pointer in \p Res. If \p Value is zero then no
+ /// G_PTR_ADD or G_CONSTANT will be created and \pre Op0 will be assigned to
+ /// \p Res.
+ ///
+ /// \pre setBasicBlock or setMI must have been called.
+ /// \pre \p Op0 must be a generic virtual register with pointer type.
+ /// \pre \p ValueTy must be a scalar type.
+ /// \pre \p Res must be 0. This is to detect confusion between
+ /// materializePtrAdd() and buildPtrAdd().
+ /// \post \p Res will either be a new generic virtual register of the same
+ /// type as \p Op0 or \p Op0 itself.
+ ///
+ /// \return a MachineInstrBuilder for the newly created instruction.
+ Optional<MachineInstrBuilder> materializePtrAdd(Register &Res, Register Op0,
+ const LLT ValueTy,
+ uint64_t Value);
+
+ /// Build and insert \p Res = G_PTRMASK \p Op0, \p Op1
+ MachineInstrBuilder buildPtrMask(const DstOp &Res, const SrcOp &Op0,
+ const SrcOp &Op1) {
+ return buildInstr(TargetOpcode::G_PTRMASK, {Res}, {Op0, Op1});
+ }
+
+ /// Build and insert \p Res = G_PTRMASK \p Op0, \p G_CONSTANT (1 << NumBits) - 1
+ ///
+ /// This clears the low bits of a pointer operand without destroying its
+ /// pointer properties. This has the effect of rounding the address *down* to
+ /// a specified alignment in bits.
+ ///
+ /// \pre setBasicBlock or setMI must have been called.
+ /// \pre \p Res and \p Op0 must be generic virtual registers with pointer
+ /// type.
+ /// \pre \p NumBits must be an integer representing the number of low bits to
+ /// be cleared in \p Op0.
+ ///
+ /// \return a MachineInstrBuilder for the newly created instruction.
+ MachineInstrBuilder buildMaskLowPtrBits(const DstOp &Res, const SrcOp &Op0,
+ uint32_t NumBits);
+
+ /// Build and insert \p Res, \p CarryOut = G_UADDO \p Op0, \p Op1
+ ///
+ /// G_UADDO sets \p Res to \p Op0 + \p Op1 (truncated to the bit width) and
+ /// sets \p CarryOut to 1 if the result overflowed in unsigned arithmetic.
+ ///
+ /// \pre setBasicBlock or setMI must have been called.
+ /// \pre \p Res, \p Op0 and \p Op1 must be generic virtual registers with the
+ /// same scalar type.
+ ////\pre \p CarryOut must be generic virtual register with scalar type
+ ///(typically s1)
+ ///
+ /// \return The newly created instruction.
+ MachineInstrBuilder buildUAddo(const DstOp &Res, const DstOp &CarryOut,
+ const SrcOp &Op0, const SrcOp &Op1) {
+ return buildInstr(TargetOpcode::G_UADDO, {Res, CarryOut}, {Op0, Op1});
+ }
+
+ /// Build and insert \p Res, \p CarryOut = G_USUBO \p Op0, \p Op1
+ MachineInstrBuilder buildUSubo(const DstOp &Res, const DstOp &CarryOut,
+ const SrcOp &Op0, const SrcOp &Op1) {
+ return buildInstr(TargetOpcode::G_USUBO, {Res, CarryOut}, {Op0, Op1});
+ }
+
+ /// Build and insert \p Res, \p CarryOut = G_SADDO \p Op0, \p Op1
+ MachineInstrBuilder buildSAddo(const DstOp &Res, const DstOp &CarryOut,
+ const SrcOp &Op0, const SrcOp &Op1) {
+ return buildInstr(TargetOpcode::G_SADDO, {Res, CarryOut}, {Op0, Op1});
+ }
+
+ /// Build and insert \p Res, \p CarryOut = G_SUBO \p Op0, \p Op1
+ MachineInstrBuilder buildSSubo(const DstOp &Res, const DstOp &CarryOut,
+ const SrcOp &Op0, const SrcOp &Op1) {
+ return buildInstr(TargetOpcode::G_SSUBO, {Res, CarryOut}, {Op0, Op1});
+ }
+
+ /// Build and insert \p Res, \p CarryOut = G_UADDE \p Op0,
+ /// \p Op1, \p CarryIn
+ ///
+ /// G_UADDE sets \p Res to \p Op0 + \p Op1 + \p CarryIn (truncated to the bit
+ /// width) and sets \p CarryOut to 1 if the result overflowed in unsigned
+ /// arithmetic.
+ ///
+ /// \pre setBasicBlock or setMI must have been called.
+ /// \pre \p Res, \p Op0 and \p Op1 must be generic virtual registers
+ /// with the same scalar type.
+ /// \pre \p CarryOut and \p CarryIn must be generic virtual
+ /// registers with the same scalar type (typically s1)
+ ///
+ /// \return The newly created instruction.
+ MachineInstrBuilder buildUAdde(const DstOp &Res, const DstOp &CarryOut,
+ const SrcOp &Op0, const SrcOp &Op1,
+ const SrcOp &CarryIn) {
+ return buildInstr(TargetOpcode::G_UADDE, {Res, CarryOut},
+ {Op0, Op1, CarryIn});
+ }
+
+ /// Build and insert \p Res, \p CarryOut = G_USUBE \p Op0, \p Op1, \p CarryInp
+ MachineInstrBuilder buildUSube(const DstOp &Res, const DstOp &CarryOut,
+ const SrcOp &Op0, const SrcOp &Op1,
+ const SrcOp &CarryIn) {
+ return buildInstr(TargetOpcode::G_USUBE, {Res, CarryOut},
+ {Op0, Op1, CarryIn});
+ }
+
+ /// Build and insert \p Res, \p CarryOut = G_SADDE \p Op0, \p Op1, \p CarryInp
+ MachineInstrBuilder buildSAdde(const DstOp &Res, const DstOp &CarryOut,
+ const SrcOp &Op0, const SrcOp &Op1,
+ const SrcOp &CarryIn) {
+ return buildInstr(TargetOpcode::G_SADDE, {Res, CarryOut},
+ {Op0, Op1, CarryIn});
+ }
+
+ /// Build and insert \p Res, \p CarryOut = G_SSUBE \p Op0, \p Op1, \p CarryInp
+ MachineInstrBuilder buildSSube(const DstOp &Res, const DstOp &CarryOut,
+ const SrcOp &Op0, const SrcOp &Op1,
+ const SrcOp &CarryIn) {
+ return buildInstr(TargetOpcode::G_SSUBE, {Res, CarryOut},
+ {Op0, Op1, CarryIn});
+ }
+
+ /// Build and insert \p Res = G_ANYEXT \p Op0
+ ///
+ /// G_ANYEXT produces a register of the specified width, with bits 0 to
+ /// sizeof(\p Ty) * 8 set to \p Op. The remaining bits are unspecified
+ /// (i.e. this is neither zero nor sign-extension). For a vector register,
+ /// each element is extended individually.
+ ///
+ /// \pre setBasicBlock or setMI must have been called.
+ /// \pre \p Res must be a generic virtual register with scalar or vector type.
+ /// \pre \p Op must be a generic virtual register with scalar or vector type.
+ /// \pre \p Op must be smaller than \p Res
+ ///
+ /// \return The newly created instruction.
+
+ MachineInstrBuilder buildAnyExt(const DstOp &Res, const SrcOp &Op);
+
+ /// Build and insert \p Res = G_SEXT \p Op
+ ///
+ /// G_SEXT produces a register of the specified width, with bits 0 to
+ /// sizeof(\p Ty) * 8 set to \p Op. The remaining bits are duplicated from the
+ /// high bit of \p Op (i.e. 2s-complement sign extended).
+ ///
+ /// \pre setBasicBlock or setMI must have been called.
+ /// \pre \p Res must be a generic virtual register with scalar or vector type.
+ /// \pre \p Op must be a generic virtual register with scalar or vector type.
+ /// \pre \p Op must be smaller than \p Res
+ ///
+ /// \return The newly created instruction.
+ MachineInstrBuilder buildSExt(const DstOp &Res, const SrcOp &Op);
+
+ /// Build and insert \p Res = G_SEXT_INREG \p Op, ImmOp
+ MachineInstrBuilder buildSExtInReg(const DstOp &Res, const SrcOp &Op, int64_t ImmOp) {
+ return buildInstr(TargetOpcode::G_SEXT_INREG, {Res}, {Op, SrcOp(ImmOp)});
+ }
+
+ /// Build and insert \p Res = G_FPEXT \p Op
+ MachineInstrBuilder buildFPExt(const DstOp &Res, const SrcOp &Op,
+ Optional<unsigned> Flags = None) {
+ return buildInstr(TargetOpcode::G_FPEXT, {Res}, {Op}, Flags);
+ }
+
+
+ /// Build and insert a G_PTRTOINT instruction.
+ MachineInstrBuilder buildPtrToInt(const DstOp &Dst, const SrcOp &Src) {
+ return buildInstr(TargetOpcode::G_PTRTOINT, {Dst}, {Src});
+ }
+
+ /// Build and insert a G_INTTOPTR instruction.
+ MachineInstrBuilder buildIntToPtr(const DstOp &Dst, const SrcOp &Src) {
+ return buildInstr(TargetOpcode::G_INTTOPTR, {Dst}, {Src});
+ }
+
+ /// Build and insert \p Dst = G_BITCAST \p Src
+ MachineInstrBuilder buildBitcast(const DstOp &Dst, const SrcOp &Src) {
+ return buildInstr(TargetOpcode::G_BITCAST, {Dst}, {Src});
+ }
+
+ /// Build and insert \p Dst = G_ADDRSPACE_CAST \p Src
+ MachineInstrBuilder buildAddrSpaceCast(const DstOp &Dst, const SrcOp &Src) {
+ return buildInstr(TargetOpcode::G_ADDRSPACE_CAST, {Dst}, {Src});
+ }
+
+ /// \return The opcode of the extension the target wants to use for boolean
+ /// values.
+ unsigned getBoolExtOp(bool IsVec, bool IsFP) const;
+
+ // Build and insert \p Res = G_ANYEXT \p Op, \p Res = G_SEXT \p Op, or \p Res
+ // = G_ZEXT \p Op depending on how the target wants to extend boolean values.
+ MachineInstrBuilder buildBoolExt(const DstOp &Res, const SrcOp &Op,
+ bool IsFP);
+
+ /// Build and insert \p Res = G_ZEXT \p Op
+ ///
+ /// G_ZEXT produces a register of the specified width, with bits 0 to
+ /// sizeof(\p Ty) * 8 set to \p Op. The remaining bits are 0. For a vector
+ /// register, each element is extended individually.
+ ///
+ /// \pre setBasicBlock or setMI must have been called.
+ /// \pre \p Res must be a generic virtual register with scalar or vector type.
+ /// \pre \p Op must be a generic virtual register with scalar or vector type.
+ /// \pre \p Op must be smaller than \p Res
+ ///
+ /// \return The newly created instruction.
+ MachineInstrBuilder buildZExt(const DstOp &Res, const SrcOp &Op);
+
+ /// Build and insert \p Res = G_SEXT \p Op, \p Res = G_TRUNC \p Op, or
+ /// \p Res = COPY \p Op depending on the differing sizes of \p Res and \p Op.
+ /// ///
+ /// \pre setBasicBlock or setMI must have been called.
+ /// \pre \p Res must be a generic virtual register with scalar or vector type.
+ /// \pre \p Op must be a generic virtual register with scalar or vector type.
+ ///
+ /// \return The newly created instruction.
+ MachineInstrBuilder buildSExtOrTrunc(const DstOp &Res, const SrcOp &Op);
+
+ /// Build and insert \p Res = G_ZEXT \p Op, \p Res = G_TRUNC \p Op, or
+ /// \p Res = COPY \p Op depending on the differing sizes of \p Res and \p Op.
+ /// ///
+ /// \pre setBasicBlock or setMI must have been called.
+ /// \pre \p Res must be a generic virtual register with scalar or vector type.
+ /// \pre \p Op must be a generic virtual register with scalar or vector type.
+ ///
+ /// \return The newly created instruction.
+ MachineInstrBuilder buildZExtOrTrunc(const DstOp &Res, const SrcOp &Op);
+
+ // Build and insert \p Res = G_ANYEXT \p Op, \p Res = G_TRUNC \p Op, or
+ /// \p Res = COPY \p Op depending on the differing sizes of \p Res and \p Op.
+ /// ///
+ /// \pre setBasicBlock or setMI must have been called.
+ /// \pre \p Res must be a generic virtual register with scalar or vector type.
+ /// \pre \p Op must be a generic virtual register with scalar or vector type.
+ ///
+ /// \return The newly created instruction.
+ MachineInstrBuilder buildAnyExtOrTrunc(const DstOp &Res, const SrcOp &Op);
+
+ /// Build and insert \p Res = \p ExtOpc, \p Res = G_TRUNC \p
+ /// Op, or \p Res = COPY \p Op depending on the differing sizes of \p Res and
+ /// \p Op.
+ /// ///
+ /// \pre setBasicBlock or setMI must have been called.
+ /// \pre \p Res must be a generic virtual register with scalar or vector type.
+ /// \pre \p Op must be a generic virtual register with scalar or vector type.
+ ///
+ /// \return The newly created instruction.
+ MachineInstrBuilder buildExtOrTrunc(unsigned ExtOpc, const DstOp &Res,
+ const SrcOp &Op);
+
+ /// Build and insert an appropriate cast between two registers of equal size.
+ MachineInstrBuilder buildCast(const DstOp &Dst, const SrcOp &Src);
+
+ /// Build and insert G_BR \p Dest
+ ///
+ /// G_BR is an unconditional branch to \p Dest.
+ ///
+ /// \pre setBasicBlock or setMI must have been called.
+ ///
+ /// \return a MachineInstrBuilder for the newly created instruction.
+ MachineInstrBuilder buildBr(MachineBasicBlock &Dest);
+
+ /// Build and insert G_BRCOND \p Tst, \p Dest
+ ///
+ /// G_BRCOND is a conditional branch to \p Dest.
+ ///
+ /// \pre setBasicBlock or setMI must have been called.
+ /// \pre \p Tst must be a generic virtual register with scalar
+ /// type. At the beginning of legalization, this will be a single
+ /// bit (s1). Targets with interesting flags registers may change
+ /// this. For a wider type, whether the branch is taken must only
+ /// depend on bit 0 (for now).
+ ///
+ /// \return The newly created instruction.
+ MachineInstrBuilder buildBrCond(const SrcOp &Tst, MachineBasicBlock &Dest);
+
+ /// Build and insert G_BRINDIRECT \p Tgt
+ ///
+ /// G_BRINDIRECT is an indirect branch to \p Tgt.
+ ///
+ /// \pre setBasicBlock or setMI must have been called.
+ /// \pre \p Tgt must be a generic virtual register with pointer type.
+ ///
+ /// \return a MachineInstrBuilder for the newly created instruction.
+ MachineInstrBuilder buildBrIndirect(Register Tgt);
+
+ /// Build and insert G_BRJT \p TablePtr, \p JTI, \p IndexReg
+ ///
+ /// G_BRJT is a jump table branch using a table base pointer \p TablePtr,
+ /// jump table index \p JTI and index \p IndexReg
+ ///
+ /// \pre setBasicBlock or setMI must have been called.
+ /// \pre \p TablePtr must be a generic virtual register with pointer type.
+ /// \pre \p JTI must be be a jump table index.
+ /// \pre \p IndexReg must be a generic virtual register with pointer type.
+ ///
+ /// \return a MachineInstrBuilder for the newly created instruction.
+ MachineInstrBuilder buildBrJT(Register TablePtr, unsigned JTI,
+ Register IndexReg);
+
+ /// Build and insert \p Res = G_CONSTANT \p Val
+ ///
+ /// G_CONSTANT is an integer constant with the specified size and value. \p
+ /// Val will be extended or truncated to the size of \p Reg.
+ ///
+ /// \pre setBasicBlock or setMI must have been called.
+ /// \pre \p Res must be a generic virtual register with scalar or pointer
+ /// type.
+ ///
+ /// \return The newly created instruction.
+ virtual MachineInstrBuilder buildConstant(const DstOp &Res,
+ const ConstantInt &Val);
+
+ /// Build and insert \p Res = G_CONSTANT \p Val
+ ///
+ /// G_CONSTANT is an integer constant with the specified size and value.
+ ///
+ /// \pre setBasicBlock or setMI must have been called.
+ /// \pre \p Res must be a generic virtual register with scalar type.
+ ///
+ /// \return The newly created instruction.
+ MachineInstrBuilder buildConstant(const DstOp &Res, int64_t Val);
+ MachineInstrBuilder buildConstant(const DstOp &Res, const APInt &Val);
+
+ /// Build and insert \p Res = G_FCONSTANT \p Val
+ ///
+ /// G_FCONSTANT is a floating-point constant with the specified size and
+ /// value.
+ ///
+ /// \pre setBasicBlock or setMI must have been called.
+ /// \pre \p Res must be a generic virtual register with scalar type.
+ ///
+ /// \return The newly created instruction.
+ virtual MachineInstrBuilder buildFConstant(const DstOp &Res,
+ const ConstantFP &Val);
+
+ MachineInstrBuilder buildFConstant(const DstOp &Res, double Val);
+ MachineInstrBuilder buildFConstant(const DstOp &Res, const APFloat &Val);
+
+ /// Build and insert \p Res = COPY Op
+ ///
+ /// Register-to-register COPY sets \p Res to \p Op.
+ ///
+ /// \pre setBasicBlock or setMI must have been called.
+ ///
+ /// \return a MachineInstrBuilder for the newly created instruction.
+ MachineInstrBuilder buildCopy(const DstOp &Res, const SrcOp &Op);
+
+ /// Build and insert `Res = G_LOAD Addr, MMO`.
+ ///
+ /// Loads the value stored at \p Addr. Puts the result in \p Res.
+ ///
+ /// \pre setBasicBlock or setMI must have been called.
+ /// \pre \p Res must be a generic virtual register.
+ /// \pre \p Addr must be a generic virtual register with pointer type.
+ ///
+ /// \return a MachineInstrBuilder for the newly created instruction.
+ MachineInstrBuilder buildLoad(const DstOp &Res, const SrcOp &Addr,
+ MachineMemOperand &MMO) {
+ return buildLoadInstr(TargetOpcode::G_LOAD, Res, Addr, MMO);
+ }
+
+ /// Build and insert a G_LOAD instruction, while constructing the
+ /// MachineMemOperand.
+ MachineInstrBuilder
+ buildLoad(const DstOp &Res, const SrcOp &Addr, MachinePointerInfo PtrInfo,
+ Align Alignment,
+ MachineMemOperand::Flags MMOFlags = MachineMemOperand::MONone,
+ const AAMDNodes &AAInfo = AAMDNodes());
+
+ /// Build and insert `Res = <opcode> Addr, MMO`.
+ ///
+ /// Loads the value stored at \p Addr. Puts the result in \p Res.
+ ///
+ /// \pre setBasicBlock or setMI must have been called.
+ /// \pre \p Res must be a generic virtual register.
+ /// \pre \p Addr must be a generic virtual register with pointer type.
+ ///
+ /// \return a MachineInstrBuilder for the newly created instruction.
+ MachineInstrBuilder buildLoadInstr(unsigned Opcode, const DstOp &Res,
+ const SrcOp &Addr, MachineMemOperand &MMO);
+
+ /// Helper to create a load from a constant offset given a base address. Load
+ /// the type of \p Dst from \p Offset from the given base address and memory
+ /// operand.
+ MachineInstrBuilder buildLoadFromOffset(const DstOp &Dst,
+ const SrcOp &BasePtr,
+ MachineMemOperand &BaseMMO,
+ int64_t Offset);
+
+ /// Build and insert `G_STORE Val, Addr, MMO`.
+ ///
+ /// Stores the value \p Val to \p Addr.
+ ///
+ /// \pre setBasicBlock or setMI must have been called.
+ /// \pre \p Val must be a generic virtual register.
+ /// \pre \p Addr must be a generic virtual register with pointer type.
+ ///
+ /// \return a MachineInstrBuilder for the newly created instruction.
+ MachineInstrBuilder buildStore(const SrcOp &Val, const SrcOp &Addr,
+ MachineMemOperand &MMO);
+
+ /// Build and insert a G_STORE instruction, while constructing the
+ /// MachineMemOperand.
+ MachineInstrBuilder
+ buildStore(const SrcOp &Val, const SrcOp &Addr, MachinePointerInfo PtrInfo,
+ Align Alignment,
+ MachineMemOperand::Flags MMOFlags = MachineMemOperand::MONone,
+ const AAMDNodes &AAInfo = AAMDNodes());
+
+ /// Build and insert `Res0, ... = G_EXTRACT Src, Idx0`.
+ ///
+ /// \pre setBasicBlock or setMI must have been called.
+ /// \pre \p Res and \p Src must be generic virtual registers.
+ ///
+ /// \return a MachineInstrBuilder for the newly created instruction.
+ MachineInstrBuilder buildExtract(const DstOp &Res, const SrcOp &Src, uint64_t Index);
+
+ /// Build and insert \p Res = IMPLICIT_DEF.
+ MachineInstrBuilder buildUndef(const DstOp &Res);
+
+ /// Build and insert instructions to put \p Ops together at the specified p
+ /// Indices to form a larger register.
+ ///
+ /// If the types of the input registers are uniform and cover the entirity of
+ /// \p Res then a G_MERGE_VALUES will be produced. Otherwise an IMPLICIT_DEF
+ /// followed by a sequence of G_INSERT instructions.
+ ///
+ /// \pre setBasicBlock or setMI must have been called.
+ /// \pre The final element of the sequence must not extend past the end of the
+ /// destination register.
+ /// \pre The bits defined by each Op (derived from index and scalar size) must
+ /// not overlap.
+ /// \pre \p Indices must be in ascending order of bit position.
+ void buildSequence(Register Res, ArrayRef<Register> Ops,
+ ArrayRef<uint64_t> Indices);
+
+ /// Build and insert \p Res = G_MERGE_VALUES \p Op0, ...
+ ///
+ /// G_MERGE_VALUES combines the input elements contiguously into a larger
+ /// register.
+ ///
+ /// \pre setBasicBlock or setMI must have been called.
+ /// \pre The entire register \p Res (and no more) must be covered by the input
+ /// registers.
+ /// \pre The type of all \p Ops registers must be identical.
+ ///
+ /// \return a MachineInstrBuilder for the newly created instruction.
+ MachineInstrBuilder buildMerge(const DstOp &Res, ArrayRef<Register> Ops);
+ MachineInstrBuilder buildMerge(const DstOp &Res,
+ std::initializer_list<SrcOp> Ops);
+
+ /// Build and insert \p Res0, ... = G_UNMERGE_VALUES \p Op
+ ///
+ /// G_UNMERGE_VALUES splits contiguous bits of the input into multiple
+ ///
+ /// \pre setBasicBlock or setMI must have been called.
+ /// \pre The entire register \p Res (and no more) must be covered by the input
+ /// registers.
+ /// \pre The type of all \p Res registers must be identical.
+ ///
+ /// \return a MachineInstrBuilder for the newly created instruction.
+ MachineInstrBuilder buildUnmerge(ArrayRef<LLT> Res, const SrcOp &Op);
+ MachineInstrBuilder buildUnmerge(ArrayRef<Register> Res, const SrcOp &Op);
+
+ /// Build and insert an unmerge of \p Res sized pieces to cover \p Op
+ MachineInstrBuilder buildUnmerge(LLT Res, const SrcOp &Op);
+
+ /// Build and insert \p Res = G_BUILD_VECTOR \p Op0, ...
+ ///
+ /// G_BUILD_VECTOR creates a vector value from multiple scalar registers.
+ /// \pre setBasicBlock or setMI must have been called.
+ /// \pre The entire register \p Res (and no more) must be covered by the
+ /// input scalar registers.
+ /// \pre The type of all \p Ops registers must be identical.
+ ///
+ /// \return a MachineInstrBuilder for the newly created instruction.
+ MachineInstrBuilder buildBuildVector(const DstOp &Res,
+ ArrayRef<Register> Ops);
+
+ /// Build and insert \p Res = G_BUILD_VECTOR with \p Src replicated to fill
+ /// the number of elements
+ MachineInstrBuilder buildSplatVector(const DstOp &Res,
+ const SrcOp &Src);
+
+ /// Build and insert \p Res = G_BUILD_VECTOR_TRUNC \p Op0, ...
+ ///
+ /// G_BUILD_VECTOR_TRUNC creates a vector value from multiple scalar registers
+ /// which have types larger than the destination vector element type, and
+ /// truncates the values to fit.
+ ///
+ /// If the operands given are already the same size as the vector elt type,
+ /// then this method will instead create a G_BUILD_VECTOR instruction.
+ ///
+ /// \pre setBasicBlock or setMI must have been called.
+ /// \pre The type of all \p Ops registers must be identical.
+ ///
+ /// \return a MachineInstrBuilder for the newly created instruction.
+ MachineInstrBuilder buildBuildVectorTrunc(const DstOp &Res,
+ ArrayRef<Register> Ops);
+
+ /// Build and insert a vector splat of a scalar \p Src using a
+ /// G_INSERT_VECTOR_ELT and G_SHUFFLE_VECTOR idiom.
+ ///
+ /// \pre setBasicBlock or setMI must have been called.
+ /// \pre \p Src must have the same type as the element type of \p Dst
+ ///
+ /// \return a MachineInstrBuilder for the newly created instruction.
+ MachineInstrBuilder buildShuffleSplat(const DstOp &Res, const SrcOp &Src);
+
+ /// Build and insert \p Res = G_SHUFFLE_VECTOR \p Src1, \p Src2, \p Mask
+ ///
+ /// \pre setBasicBlock or setMI must have been called.
+ ///
+ /// \return a MachineInstrBuilder for the newly created instruction.
+ MachineInstrBuilder buildShuffleVector(const DstOp &Res, const SrcOp &Src1,
+ const SrcOp &Src2, ArrayRef<int> Mask);
+
+ /// Build and insert \p Res = G_CONCAT_VECTORS \p Op0, ...
+ ///
+ /// G_CONCAT_VECTORS creates a vector from the concatenation of 2 or more
+ /// vectors.
+ ///
+ /// \pre setBasicBlock or setMI must have been called.
+ /// \pre The entire register \p Res (and no more) must be covered by the input
+ /// registers.
+ /// \pre The type of all source operands must be identical.
+ ///
+ /// \return a MachineInstrBuilder for the newly created instruction.
+ MachineInstrBuilder buildConcatVectors(const DstOp &Res,
+ ArrayRef<Register> Ops);
+
+ MachineInstrBuilder buildInsert(const DstOp &Res, const SrcOp &Src,
+ const SrcOp &Op, unsigned Index);
+
+ /// Build and insert either a G_INTRINSIC (if \p HasSideEffects is false) or
+ /// G_INTRINSIC_W_SIDE_EFFECTS instruction. Its first operand will be the
+ /// result register definition unless \p Reg is NoReg (== 0). The second
+ /// operand will be the intrinsic's ID.
+ ///
+ /// Callers are expected to add the required definitions and uses afterwards.
+ ///
+ /// \pre setBasicBlock or setMI must have been called.
+ ///
+ /// \return a MachineInstrBuilder for the newly created instruction.
+ MachineInstrBuilder buildIntrinsic(Intrinsic::ID ID, ArrayRef<Register> Res,
+ bool HasSideEffects);
+ MachineInstrBuilder buildIntrinsic(Intrinsic::ID ID, ArrayRef<DstOp> Res,
+ bool HasSideEffects);
+
+ /// Build and insert \p Res = G_FPTRUNC \p Op
+ ///
+ /// G_FPTRUNC converts a floating-point value into one with a smaller type.
+ ///
+ /// \pre setBasicBlock or setMI must have been called.
+ /// \pre \p Res must be a generic virtual register with scalar or vector type.
+ /// \pre \p Op must be a generic virtual register with scalar or vector type.
+ /// \pre \p Res must be smaller than \p Op
+ ///
+ /// \return The newly created instruction.
+ MachineInstrBuilder buildFPTrunc(const DstOp &Res, const SrcOp &Op,
+ Optional<unsigned> Flags = None);
+
+ /// Build and insert \p Res = G_TRUNC \p Op
+ ///
+ /// G_TRUNC extracts the low bits of a type. For a vector type each element is
+ /// truncated independently before being packed into the destination.
+ ///
+ /// \pre setBasicBlock or setMI must have been called.
+ /// \pre \p Res must be a generic virtual register with scalar or vector type.
+ /// \pre \p Op must be a generic virtual register with scalar or vector type.
+ /// \pre \p Res must be smaller than \p Op
+ ///
+ /// \return The newly created instruction.
+ MachineInstrBuilder buildTrunc(const DstOp &Res, const SrcOp &Op);
+
+ /// Build and insert a \p Res = G_ICMP \p Pred, \p Op0, \p Op1
+ ///
+ /// \pre setBasicBlock or setMI must have been called.
+
+ /// \pre \p Res must be a generic virtual register with scalar or
+ /// vector type. Typically this starts as s1 or <N x s1>.
+ /// \pre \p Op0 and Op1 must be generic virtual registers with the
+ /// same number of elements as \p Res. If \p Res is a scalar,
+ /// \p Op0 must be either a scalar or pointer.
+ /// \pre \p Pred must be an integer predicate.
+ ///
+ /// \return a MachineInstrBuilder for the newly created instruction.
+ MachineInstrBuilder buildICmp(CmpInst::Predicate Pred, const DstOp &Res,
+ const SrcOp &Op0, const SrcOp &Op1);
+
+ /// Build and insert a \p Res = G_FCMP \p Pred\p Op0, \p Op1
+ ///
+ /// \pre setBasicBlock or setMI must have been called.
+
+ /// \pre \p Res must be a generic virtual register with scalar or
+ /// vector type. Typically this starts as s1 or <N x s1>.
+ /// \pre \p Op0 and Op1 must be generic virtual registers with the
+ /// same number of elements as \p Res (or scalar, if \p Res is
+ /// scalar).
+ /// \pre \p Pred must be a floating-point predicate.
+ ///
+ /// \return a MachineInstrBuilder for the newly created instruction.
+ MachineInstrBuilder buildFCmp(CmpInst::Predicate Pred, const DstOp &Res,
+ const SrcOp &Op0, const SrcOp &Op1,
+ Optional<unsigned> Flags = None);
+
+ /// Build and insert a \p Res = G_SELECT \p Tst, \p Op0, \p Op1
+ ///
+ /// \pre setBasicBlock or setMI must have been called.
+ /// \pre \p Res, \p Op0 and \p Op1 must be generic virtual registers
+ /// with the same type.
+ /// \pre \p Tst must be a generic virtual register with scalar, pointer or
+ /// vector type. If vector then it must have the same number of
+ /// elements as the other parameters.
+ ///
+ /// \return a MachineInstrBuilder for the newly created instruction.
+ MachineInstrBuilder buildSelect(const DstOp &Res, const SrcOp &Tst,
+ const SrcOp &Op0, const SrcOp &Op1,
+ Optional<unsigned> Flags = None);
+
+ /// Build and insert \p Res = G_INSERT_VECTOR_ELT \p Val,
+ /// \p Elt, \p Idx
+ ///
+ /// \pre setBasicBlock or setMI must have been called.
+ /// \pre \p Res and \p Val must be a generic virtual register
+ // with the same vector type.
+ /// \pre \p Elt and \p Idx must be a generic virtual register
+ /// with scalar type.
+ ///
+ /// \return The newly created instruction.
+ MachineInstrBuilder buildInsertVectorElement(const DstOp &Res,
+ const SrcOp &Val,
+ const SrcOp &Elt,
+ const SrcOp &Idx);
+
+ /// Build and insert \p Res = G_EXTRACT_VECTOR_ELT \p Val, \p Idx
+ ///
+ /// \pre setBasicBlock or setMI must have been called.
+ /// \pre \p Res must be a generic virtual register with scalar type.
+ /// \pre \p Val must be a generic virtual register with vector type.
+ /// \pre \p Idx must be a generic virtual register with scalar type.
+ ///
+ /// \return The newly created instruction.
+ MachineInstrBuilder buildExtractVectorElement(const DstOp &Res,
+ const SrcOp &Val,
+ const SrcOp &Idx);
+
+ /// Build and insert `OldValRes<def>, SuccessRes<def> =
+ /// G_ATOMIC_CMPXCHG_WITH_SUCCESS Addr, CmpVal, NewVal, MMO`.
+ ///
+ /// Atomically replace the value at \p Addr with \p NewVal if it is currently
+ /// \p CmpVal otherwise leaves it unchanged. Puts the original value from \p
+ /// Addr in \p Res, along with an s1 indicating whether it was replaced.
+ ///
+ /// \pre setBasicBlock or setMI must have been called.
+ /// \pre \p OldValRes must be a generic virtual register of scalar type.
+ /// \pre \p SuccessRes must be a generic virtual register of scalar type. It
+ /// will be assigned 0 on failure and 1 on success.
+ /// \pre \p Addr must be a generic virtual register with pointer type.
+ /// \pre \p OldValRes, \p CmpVal, and \p NewVal must be generic virtual
+ /// registers of the same type.
+ ///
+ /// \return a MachineInstrBuilder for the newly created instruction.
+ MachineInstrBuilder
+ buildAtomicCmpXchgWithSuccess(Register OldValRes, Register SuccessRes,
+ Register Addr, Register CmpVal, Register NewVal,
+ MachineMemOperand &MMO);
+
+ /// Build and insert `OldValRes<def> = G_ATOMIC_CMPXCHG Addr, CmpVal, NewVal,
+ /// MMO`.
+ ///
+ /// Atomically replace the value at \p Addr with \p NewVal if it is currently
+ /// \p CmpVal otherwise leaves it unchanged. Puts the original value from \p
+ /// Addr in \p Res.
+ ///
+ /// \pre setBasicBlock or setMI must have been called.
+ /// \pre \p OldValRes must be a generic virtual register of scalar type.
+ /// \pre \p Addr must be a generic virtual register with pointer type.
+ /// \pre \p OldValRes, \p CmpVal, and \p NewVal must be generic virtual
+ /// registers of the same type.
+ ///
+ /// \return a MachineInstrBuilder for the newly created instruction.
+ MachineInstrBuilder buildAtomicCmpXchg(Register OldValRes, Register Addr,
+ Register CmpVal, Register NewVal,
+ MachineMemOperand &MMO);
+
+ /// Build and insert `OldValRes<def> = G_ATOMICRMW_<Opcode> Addr, Val, MMO`.
+ ///
+ /// Atomically read-modify-update the value at \p Addr with \p Val. Puts the
+ /// original value from \p Addr in \p OldValRes. The modification is
+ /// determined by the opcode.
+ ///
+ /// \pre setBasicBlock or setMI must have been called.
+ /// \pre \p OldValRes must be a generic virtual register.
+ /// \pre \p Addr must be a generic virtual register with pointer type.
+ /// \pre \p OldValRes, and \p Val must be generic virtual registers of the
+ /// same type.
+ ///
+ /// \return a MachineInstrBuilder for the newly created instruction.
+ MachineInstrBuilder buildAtomicRMW(unsigned Opcode, const DstOp &OldValRes,
+ const SrcOp &Addr, const SrcOp &Val,
+ MachineMemOperand &MMO);
+
+ /// Build and insert `OldValRes<def> = G_ATOMICRMW_XCHG Addr, Val, MMO`.
+ ///
+ /// Atomically replace the value at \p Addr with \p Val. Puts the original
+ /// value from \p Addr in \p OldValRes.
+ ///
+ /// \pre setBasicBlock or setMI must have been called.
+ /// \pre \p OldValRes must be a generic virtual register.
+ /// \pre \p Addr must be a generic virtual register with pointer type.
+ /// \pre \p OldValRes, and \p Val must be generic virtual registers of the
+ /// same type.
+ ///
+ /// \return a MachineInstrBuilder for the newly created instruction.
+ MachineInstrBuilder buildAtomicRMWXchg(Register OldValRes, Register Addr,
+ Register Val, MachineMemOperand &MMO);
+
+ /// Build and insert `OldValRes<def> = G_ATOMICRMW_ADD Addr, Val, MMO`.
+ ///
+ /// Atomically replace the value at \p Addr with the addition of \p Val and
+ /// the original value. Puts the original value from \p Addr in \p OldValRes.
+ ///
+ /// \pre setBasicBlock or setMI must have been called.
+ /// \pre \p OldValRes must be a generic virtual register.
+ /// \pre \p Addr must be a generic virtual register with pointer type.
+ /// \pre \p OldValRes, and \p Val must be generic virtual registers of the
+ /// same type.
+ ///
+ /// \return a MachineInstrBuilder for the newly created instruction.
+ MachineInstrBuilder buildAtomicRMWAdd(Register OldValRes, Register Addr,
+ Register Val, MachineMemOperand &MMO);
+
+ /// Build and insert `OldValRes<def> = G_ATOMICRMW_SUB Addr, Val, MMO`.
+ ///
+ /// Atomically replace the value at \p Addr with the subtraction of \p Val and
+ /// the original value. Puts the original value from \p Addr in \p OldValRes.
+ ///
+ /// \pre setBasicBlock or setMI must have been called.
+ /// \pre \p OldValRes must be a generic virtual register.
+ /// \pre \p Addr must be a generic virtual register with pointer type.
+ /// \pre \p OldValRes, and \p Val must be generic virtual registers of the
+ /// same type.
+ ///
+ /// \return a MachineInstrBuilder for the newly created instruction.
+ MachineInstrBuilder buildAtomicRMWSub(Register OldValRes, Register Addr,
+ Register Val, MachineMemOperand &MMO);
+
+ /// Build and insert `OldValRes<def> = G_ATOMICRMW_AND Addr, Val, MMO`.
+ ///
+ /// Atomically replace the value at \p Addr with the bitwise and of \p Val and
+ /// the original value. Puts the original value from \p Addr in \p OldValRes.
+ ///
+ /// \pre setBasicBlock or setMI must have been called.
+ /// \pre \p OldValRes must be a generic virtual register.
+ /// \pre \p Addr must be a generic virtual register with pointer type.
+ /// \pre \p OldValRes, and \p Val must be generic virtual registers of the
+ /// same type.
+ ///
+ /// \return a MachineInstrBuilder for the newly created instruction.
+ MachineInstrBuilder buildAtomicRMWAnd(Register OldValRes, Register Addr,
+ Register Val, MachineMemOperand &MMO);
+
+ /// Build and insert `OldValRes<def> = G_ATOMICRMW_NAND Addr, Val, MMO`.
+ ///
+ /// Atomically replace the value at \p Addr with the bitwise nand of \p Val
+ /// and the original value. Puts the original value from \p Addr in \p
+ /// OldValRes.
+ ///
+ /// \pre setBasicBlock or setMI must have been called.
+ /// \pre \p OldValRes must be a generic virtual register.
+ /// \pre \p Addr must be a generic virtual register with pointer type.
+ /// \pre \p OldValRes, and \p Val must be generic virtual registers of the
+ /// same type.
+ ///
+ /// \return a MachineInstrBuilder for the newly created instruction.
+ MachineInstrBuilder buildAtomicRMWNand(Register OldValRes, Register Addr,
+ Register Val, MachineMemOperand &MMO);
+
+ /// Build and insert `OldValRes<def> = G_ATOMICRMW_OR Addr, Val, MMO`.
+ ///
+ /// Atomically replace the value at \p Addr with the bitwise or of \p Val and
+ /// the original value. Puts the original value from \p Addr in \p OldValRes.
+ ///
+ /// \pre setBasicBlock or setMI must have been called.
+ /// \pre \p OldValRes must be a generic virtual register.
+ /// \pre \p Addr must be a generic virtual register with pointer type.
+ /// \pre \p OldValRes, and \p Val must be generic virtual registers of the
+ /// same type.
+ ///
+ /// \return a MachineInstrBuilder for the newly created instruction.
+ MachineInstrBuilder buildAtomicRMWOr(Register OldValRes, Register Addr,
+ Register Val, MachineMemOperand &MMO);
+
+ /// Build and insert `OldValRes<def> = G_ATOMICRMW_XOR Addr, Val, MMO`.
+ ///
+ /// Atomically replace the value at \p Addr with the bitwise xor of \p Val and
+ /// the original value. Puts the original value from \p Addr in \p OldValRes.
+ ///
+ /// \pre setBasicBlock or setMI must have been called.
+ /// \pre \p OldValRes must be a generic virtual register.
+ /// \pre \p Addr must be a generic virtual register with pointer type.
+ /// \pre \p OldValRes, and \p Val must be generic virtual registers of the
+ /// same type.
+ ///
+ /// \return a MachineInstrBuilder for the newly created instruction.
+ MachineInstrBuilder buildAtomicRMWXor(Register OldValRes, Register Addr,
+ Register Val, MachineMemOperand &MMO);
+
+ /// Build and insert `OldValRes<def> = G_ATOMICRMW_MAX Addr, Val, MMO`.
+ ///
+ /// Atomically replace the value at \p Addr with the signed maximum of \p
+ /// Val and the original value. Puts the original value from \p Addr in \p
+ /// OldValRes.
+ ///
+ /// \pre setBasicBlock or setMI must have been called.
+ /// \pre \p OldValRes must be a generic virtual register.
+ /// \pre \p Addr must be a generic virtual register with pointer type.
+ /// \pre \p OldValRes, and \p Val must be generic virtual registers of the
+ /// same type.
+ ///
+ /// \return a MachineInstrBuilder for the newly created instruction.
+ MachineInstrBuilder buildAtomicRMWMax(Register OldValRes, Register Addr,
+ Register Val, MachineMemOperand &MMO);
+
+ /// Build and insert `OldValRes<def> = G_ATOMICRMW_MIN Addr, Val, MMO`.
+ ///
+ /// Atomically replace the value at \p Addr with the signed minimum of \p
+ /// Val and the original value. Puts the original value from \p Addr in \p
+ /// OldValRes.
+ ///
+ /// \pre setBasicBlock or setMI must have been called.
+ /// \pre \p OldValRes must be a generic virtual register.
+ /// \pre \p Addr must be a generic virtual register with pointer type.
+ /// \pre \p OldValRes, and \p Val must be generic virtual registers of the
+ /// same type.
+ ///
+ /// \return a MachineInstrBuilder for the newly created instruction.
+ MachineInstrBuilder buildAtomicRMWMin(Register OldValRes, Register Addr,
+ Register Val, MachineMemOperand &MMO);
+
+ /// Build and insert `OldValRes<def> = G_ATOMICRMW_UMAX Addr, Val, MMO`.
+ ///
+ /// Atomically replace the value at \p Addr with the unsigned maximum of \p
+ /// Val and the original value. Puts the original value from \p Addr in \p
+ /// OldValRes.
+ ///
+ /// \pre setBasicBlock or setMI must have been called.
+ /// \pre \p OldValRes must be a generic virtual register.
+ /// \pre \p Addr must be a generic virtual register with pointer type.
+ /// \pre \p OldValRes, and \p Val must be generic virtual registers of the
+ /// same type.
+ ///
+ /// \return a MachineInstrBuilder for the newly created instruction.
+ MachineInstrBuilder buildAtomicRMWUmax(Register OldValRes, Register Addr,
+ Register Val, MachineMemOperand &MMO);
+
+ /// Build and insert `OldValRes<def> = G_ATOMICRMW_UMIN Addr, Val, MMO`.
+ ///
+ /// Atomically replace the value at \p Addr with the unsigned minimum of \p
+ /// Val and the original value. Puts the original value from \p Addr in \p
+ /// OldValRes.
+ ///
+ /// \pre setBasicBlock or setMI must have been called.
+ /// \pre \p OldValRes must be a generic virtual register.
+ /// \pre \p Addr must be a generic virtual register with pointer type.
+ /// \pre \p OldValRes, and \p Val must be generic virtual registers of the
+ /// same type.
+ ///
+ /// \return a MachineInstrBuilder for the newly created instruction.
+ MachineInstrBuilder buildAtomicRMWUmin(Register OldValRes, Register Addr,
+ Register Val, MachineMemOperand &MMO);
+
+ /// Build and insert `OldValRes<def> = G_ATOMICRMW_FADD Addr, Val, MMO`.
+ MachineInstrBuilder buildAtomicRMWFAdd(
+ const DstOp &OldValRes, const SrcOp &Addr, const SrcOp &Val,
+ MachineMemOperand &MMO);
+
+ /// Build and insert `OldValRes<def> = G_ATOMICRMW_FSUB Addr, Val, MMO`.
+ MachineInstrBuilder buildAtomicRMWFSub(
+ const DstOp &OldValRes, const SrcOp &Addr, const SrcOp &Val,
+ MachineMemOperand &MMO);
+
+ /// Build and insert `G_FENCE Ordering, Scope`.
+ MachineInstrBuilder buildFence(unsigned Ordering, unsigned Scope);
+
+ /// Build and insert \p Dst = G_FREEZE \p Src
+ MachineInstrBuilder buildFreeze(const DstOp &Dst, const SrcOp &Src) {
+ return buildInstr(TargetOpcode::G_FREEZE, {Dst}, {Src});
+ }
+
+ /// Build and insert \p Res = G_BLOCK_ADDR \p BA
+ ///
+ /// G_BLOCK_ADDR computes the address of a basic block.
+ ///
+ /// \pre setBasicBlock or setMI must have been called.
+ /// \pre \p Res must be a generic virtual register of a pointer type.
+ ///
+ /// \return The newly created instruction.
+ MachineInstrBuilder buildBlockAddress(Register Res, const BlockAddress *BA);
+
+ /// Build and insert \p Res = G_ADD \p Op0, \p Op1
+ ///
+ /// G_ADD sets \p Res to the sum of integer parameters \p Op0 and \p Op1,
+ /// truncated to their width.
+ ///
+ /// \pre setBasicBlock or setMI must have been called.
+ /// \pre \p Res, \p Op0 and \p Op1 must be generic virtual registers
+ /// with the same (scalar or vector) type).
+ ///
+ /// \return a MachineInstrBuilder for the newly created instruction.
+
+ MachineInstrBuilder buildAdd(const DstOp &Dst, const SrcOp &Src0,
+ const SrcOp &Src1,
+ Optional<unsigned> Flags = None) {
+ return buildInstr(TargetOpcode::G_ADD, {Dst}, {Src0, Src1}, Flags);
+ }
+
+ /// Build and insert \p Res = G_SUB \p Op0, \p Op1
+ ///
+ /// G_SUB sets \p Res to the sum of integer parameters \p Op0 and \p Op1,
+ /// truncated to their width.
+ ///
+ /// \pre setBasicBlock or setMI must have been called.
+ /// \pre \p Res, \p Op0 and \p Op1 must be generic virtual registers
+ /// with the same (scalar or vector) type).
+ ///
+ /// \return a MachineInstrBuilder for the newly created instruction.
+
+ MachineInstrBuilder buildSub(const DstOp &Dst, const SrcOp &Src0,
+ const SrcOp &Src1,
+ Optional<unsigned> Flags = None) {
+ return buildInstr(TargetOpcode::G_SUB, {Dst}, {Src0, Src1}, Flags);
+ }
+
+ /// Build and insert \p Res = G_MUL \p Op0, \p Op1
+ ///
+ /// G_MUL sets \p Res to the sum of integer parameters \p Op0 and \p Op1,
+ /// truncated to their width.
+ ///
+ /// \pre setBasicBlock or setMI must have been called.
+ /// \pre \p Res, \p Op0 and \p Op1 must be generic virtual registers
+ /// with the same (scalar or vector) type).
+ ///
+ /// \return a MachineInstrBuilder for the newly created instruction.
+ MachineInstrBuilder buildMul(const DstOp &Dst, const SrcOp &Src0,
+ const SrcOp &Src1,
+ Optional<unsigned> Flags = None) {
+ return buildInstr(TargetOpcode::G_MUL, {Dst}, {Src0, Src1}, Flags);
+ }
+
+ MachineInstrBuilder buildUMulH(const DstOp &Dst, const SrcOp &Src0,
+ const SrcOp &Src1,
+ Optional<unsigned> Flags = None) {
+ return buildInstr(TargetOpcode::G_UMULH, {Dst}, {Src0, Src1}, Flags);
+ }
+
+ MachineInstrBuilder buildSMulH(const DstOp &Dst, const SrcOp &Src0,
+ const SrcOp &Src1,
+ Optional<unsigned> Flags = None) {
+ return buildInstr(TargetOpcode::G_SMULH, {Dst}, {Src0, Src1}, Flags);
+ }
+
+ MachineInstrBuilder buildFMul(const DstOp &Dst, const SrcOp &Src0,
+ const SrcOp &Src1,
+ Optional<unsigned> Flags = None) {
+ return buildInstr(TargetOpcode::G_FMUL, {Dst}, {Src0, Src1}, Flags);
+ }
+
+ MachineInstrBuilder buildFMinNum(const DstOp &Dst, const SrcOp &Src0,
+ const SrcOp &Src1,
+ Optional<unsigned> Flags = None) {
+ return buildInstr(TargetOpcode::G_FMINNUM, {Dst}, {Src0, Src1}, Flags);
+ }
+
+ MachineInstrBuilder buildFMaxNum(const DstOp &Dst, const SrcOp &Src0,
+ const SrcOp &Src1,
+ Optional<unsigned> Flags = None) {
+ return buildInstr(TargetOpcode::G_FMAXNUM, {Dst}, {Src0, Src1}, Flags);
+ }
+
+ MachineInstrBuilder buildFMinNumIEEE(const DstOp &Dst, const SrcOp &Src0,
+ const SrcOp &Src1,
+ Optional<unsigned> Flags = None) {
+ return buildInstr(TargetOpcode::G_FMINNUM_IEEE, {Dst}, {Src0, Src1}, Flags);
+ }
+
+ MachineInstrBuilder buildFMaxNumIEEE(const DstOp &Dst, const SrcOp &Src0,
+ const SrcOp &Src1,
+ Optional<unsigned> Flags = None) {
+ return buildInstr(TargetOpcode::G_FMAXNUM_IEEE, {Dst}, {Src0, Src1}, Flags);
+ }
+
+ MachineInstrBuilder buildShl(const DstOp &Dst, const SrcOp &Src0,
+ const SrcOp &Src1,
+ Optional<unsigned> Flags = None) {
+ return buildInstr(TargetOpcode::G_SHL, {Dst}, {Src0, Src1}, Flags);
+ }
+
+ MachineInstrBuilder buildLShr(const DstOp &Dst, const SrcOp &Src0,
+ const SrcOp &Src1,
+ Optional<unsigned> Flags = None) {
+ return buildInstr(TargetOpcode::G_LSHR, {Dst}, {Src0, Src1}, Flags);
+ }
+
+ MachineInstrBuilder buildAShr(const DstOp &Dst, const SrcOp &Src0,
+ const SrcOp &Src1,
+ Optional<unsigned> Flags = None) {
+ return buildInstr(TargetOpcode::G_ASHR, {Dst}, {Src0, Src1}, Flags);
+ }
+
+ /// Build and insert \p Res = G_AND \p Op0, \p Op1
+ ///
+ /// G_AND sets \p Res to the bitwise and of integer parameters \p Op0 and \p
+ /// Op1.
+ ///
+ /// \pre setBasicBlock or setMI must have been called.
+ /// \pre \p Res, \p Op0 and \p Op1 must be generic virtual registers
+ /// with the same (scalar or vector) type).
+ ///
+ /// \return a MachineInstrBuilder for the newly created instruction.
+
+ MachineInstrBuilder buildAnd(const DstOp &Dst, const SrcOp &Src0,
+ const SrcOp &Src1) {
+ return buildInstr(TargetOpcode::G_AND, {Dst}, {Src0, Src1});
+ }
+
+ /// Build and insert \p Res = G_OR \p Op0, \p Op1
+ ///
+ /// G_OR sets \p Res to the bitwise or of integer parameters \p Op0 and \p
+ /// Op1.
+ ///
+ /// \pre setBasicBlock or setMI must have been called.
+ /// \pre \p Res, \p Op0 and \p Op1 must be generic virtual registers
+ /// with the same (scalar or vector) type).
+ ///
+ /// \return a MachineInstrBuilder for the newly created instruction.
+ MachineInstrBuilder buildOr(const DstOp &Dst, const SrcOp &Src0,
+ const SrcOp &Src1) {
+ return buildInstr(TargetOpcode::G_OR, {Dst}, {Src0, Src1});
+ }
+
+ /// Build and insert \p Res = G_XOR \p Op0, \p Op1
+ MachineInstrBuilder buildXor(const DstOp &Dst, const SrcOp &Src0,
+ const SrcOp &Src1) {
+ return buildInstr(TargetOpcode::G_XOR, {Dst}, {Src0, Src1});
+ }
+
+ /// Build and insert a bitwise not,
+ /// \p NegOne = G_CONSTANT -1
+ /// \p Res = G_OR \p Op0, NegOne
+ MachineInstrBuilder buildNot(const DstOp &Dst, const SrcOp &Src0) {
+ auto NegOne = buildConstant(Dst.getLLTTy(*getMRI()), -1);
+ return buildInstr(TargetOpcode::G_XOR, {Dst}, {Src0, NegOne});
+ }
+
+ /// Build and insert \p Res = G_CTPOP \p Op0, \p Src0
+ MachineInstrBuilder buildCTPOP(const DstOp &Dst, const SrcOp &Src0) {
+ return buildInstr(TargetOpcode::G_CTPOP, {Dst}, {Src0});
+ }
+
+ /// Build and insert \p Res = G_CTLZ \p Op0, \p Src0
+ MachineInstrBuilder buildCTLZ(const DstOp &Dst, const SrcOp &Src0) {
+ return buildInstr(TargetOpcode::G_CTLZ, {Dst}, {Src0});
+ }
+
+ /// Build and insert \p Res = G_CTLZ_ZERO_UNDEF \p Op0, \p Src0
+ MachineInstrBuilder buildCTLZ_ZERO_UNDEF(const DstOp &Dst, const SrcOp &Src0) {
+ return buildInstr(TargetOpcode::G_CTLZ_ZERO_UNDEF, {Dst}, {Src0});
+ }
+
+ /// Build and insert \p Res = G_CTTZ \p Op0, \p Src0
+ MachineInstrBuilder buildCTTZ(const DstOp &Dst, const SrcOp &Src0) {
+ return buildInstr(TargetOpcode::G_CTTZ, {Dst}, {Src0});
+ }
+
+ /// Build and insert \p Res = G_CTTZ_ZERO_UNDEF \p Op0, \p Src0
+ MachineInstrBuilder buildCTTZ_ZERO_UNDEF(const DstOp &Dst, const SrcOp &Src0) {
+ return buildInstr(TargetOpcode::G_CTTZ_ZERO_UNDEF, {Dst}, {Src0});
+ }
+
+ /// Build and insert \p Dst = G_BSWAP \p Src0
+ MachineInstrBuilder buildBSwap(const DstOp &Dst, const SrcOp &Src0) {
+ return buildInstr(TargetOpcode::G_BSWAP, {Dst}, {Src0});
+ }
+
+ /// Build and insert \p Res = G_FADD \p Op0, \p Op1
+ MachineInstrBuilder buildFAdd(const DstOp &Dst, const SrcOp &Src0,
+ const SrcOp &Src1,
+ Optional<unsigned> Flags = None) {
+ return buildInstr(TargetOpcode::G_FADD, {Dst}, {Src0, Src1}, Flags);
+ }
+
+ /// Build and insert \p Res = G_FSUB \p Op0, \p Op1
+ MachineInstrBuilder buildFSub(const DstOp &Dst, const SrcOp &Src0,
+ const SrcOp &Src1,
+ Optional<unsigned> Flags = None) {
+ return buildInstr(TargetOpcode::G_FSUB, {Dst}, {Src0, Src1}, Flags);
+ }
+
+ /// Build and insert \p Res = G_FDIV \p Op0, \p Op1
+ MachineInstrBuilder buildFDiv(const DstOp &Dst, const SrcOp &Src0,
+ const SrcOp &Src1,
+ Optional<unsigned> Flags = None) {
+ return buildInstr(TargetOpcode::G_FDIV, {Dst}, {Src0, Src1}, Flags);
+ }
+
+ /// Build and insert \p Res = G_FMA \p Op0, \p Op1, \p Op2
+ MachineInstrBuilder buildFMA(const DstOp &Dst, const SrcOp &Src0,
+ const SrcOp &Src1, const SrcOp &Src2,
+ Optional<unsigned> Flags = None) {
+ return buildInstr(TargetOpcode::G_FMA, {Dst}, {Src0, Src1, Src2}, Flags);
+ }
+
+ /// Build and insert \p Res = G_FMAD \p Op0, \p Op1, \p Op2
+ MachineInstrBuilder buildFMAD(const DstOp &Dst, const SrcOp &Src0,
+ const SrcOp &Src1, const SrcOp &Src2,
+ Optional<unsigned> Flags = None) {
+ return buildInstr(TargetOpcode::G_FMAD, {Dst}, {Src0, Src1, Src2}, Flags);
+ }
+
+ /// Build and insert \p Res = G_FNEG \p Op0
+ MachineInstrBuilder buildFNeg(const DstOp &Dst, const SrcOp &Src0,
+ Optional<unsigned> Flags = None) {
+ return buildInstr(TargetOpcode::G_FNEG, {Dst}, {Src0}, Flags);
+ }
+
+ /// Build and insert \p Res = G_FABS \p Op0
+ MachineInstrBuilder buildFAbs(const DstOp &Dst, const SrcOp &Src0,
+ Optional<unsigned> Flags = None) {
+ return buildInstr(TargetOpcode::G_FABS, {Dst}, {Src0}, Flags);
+ }
+
+ /// Build and insert \p Dst = G_FCANONICALIZE \p Src0
+ MachineInstrBuilder buildFCanonicalize(const DstOp &Dst, const SrcOp &Src0,
+ Optional<unsigned> Flags = None) {
+ return buildInstr(TargetOpcode::G_FCANONICALIZE, {Dst}, {Src0}, Flags);
+ }
+
+ /// Build and insert \p Dst = G_INTRINSIC_TRUNC \p Src0
+ MachineInstrBuilder buildIntrinsicTrunc(const DstOp &Dst, const SrcOp &Src0,
+ Optional<unsigned> Flags = None) {
+ return buildInstr(TargetOpcode::G_INTRINSIC_TRUNC, {Dst}, {Src0}, Flags);
+ }
+
+ /// Build and insert \p Res = GFFLOOR \p Op0, \p Op1
+ MachineInstrBuilder buildFFloor(const DstOp &Dst, const SrcOp &Src0,
+ Optional<unsigned> Flags = None) {
+ return buildInstr(TargetOpcode::G_FFLOOR, {Dst}, {Src0}, Flags);
+ }
+
+ /// Build and insert \p Dst = G_FLOG \p Src
+ MachineInstrBuilder buildFLog(const DstOp &Dst, const SrcOp &Src,
+ Optional<unsigned> Flags = None) {
+ return buildInstr(TargetOpcode::G_FLOG, {Dst}, {Src}, Flags);
+ }
+
+ /// Build and insert \p Dst = G_FLOG2 \p Src
+ MachineInstrBuilder buildFLog2(const DstOp &Dst, const SrcOp &Src,
+ Optional<unsigned> Flags = None) {
+ return buildInstr(TargetOpcode::G_FLOG2, {Dst}, {Src}, Flags);
+ }
+
+ /// Build and insert \p Dst = G_FEXP2 \p Src
+ MachineInstrBuilder buildFExp2(const DstOp &Dst, const SrcOp &Src,
+ Optional<unsigned> Flags = None) {
+ return buildInstr(TargetOpcode::G_FEXP2, {Dst}, {Src}, Flags);
+ }
+
+ /// Build and insert \p Dst = G_FPOW \p Src0, \p Src1
+ MachineInstrBuilder buildFPow(const DstOp &Dst, const SrcOp &Src0,
+ const SrcOp &Src1,
+ Optional<unsigned> Flags = None) {
+ return buildInstr(TargetOpcode::G_FPOW, {Dst}, {Src0, Src1}, Flags);
+ }
+
+ /// Build and insert \p Res = G_FCOPYSIGN \p Op0, \p Op1
+ MachineInstrBuilder buildFCopysign(const DstOp &Dst, const SrcOp &Src0,
+ const SrcOp &Src1) {
+ return buildInstr(TargetOpcode::G_FCOPYSIGN, {Dst}, {Src0, Src1});
+ }
+
+ /// Build and insert \p Res = G_UITOFP \p Src0
+ MachineInstrBuilder buildUITOFP(const DstOp &Dst, const SrcOp &Src0) {
+ return buildInstr(TargetOpcode::G_UITOFP, {Dst}, {Src0});
+ }
+
+ /// Build and insert \p Res = G_SITOFP \p Src0
+ MachineInstrBuilder buildSITOFP(const DstOp &Dst, const SrcOp &Src0) {
+ return buildInstr(TargetOpcode::G_SITOFP, {Dst}, {Src0});
+ }
+
+ /// Build and insert \p Res = G_FPTOUI \p Src0
+ MachineInstrBuilder buildFPTOUI(const DstOp &Dst, const SrcOp &Src0) {
+ return buildInstr(TargetOpcode::G_FPTOUI, {Dst}, {Src0});
+ }
+
+ /// Build and insert \p Res = G_FPTOSI \p Src0
+ MachineInstrBuilder buildFPTOSI(const DstOp &Dst, const SrcOp &Src0) {
+ return buildInstr(TargetOpcode::G_FPTOSI, {Dst}, {Src0});
+ }
+
+ /// Build and insert \p Res = G_SMIN \p Op0, \p Op1
+ MachineInstrBuilder buildSMin(const DstOp &Dst, const SrcOp &Src0,
+ const SrcOp &Src1) {
+ return buildInstr(TargetOpcode::G_SMIN, {Dst}, {Src0, Src1});
+ }
+
+ /// Build and insert \p Res = G_SMAX \p Op0, \p Op1
+ MachineInstrBuilder buildSMax(const DstOp &Dst, const SrcOp &Src0,
+ const SrcOp &Src1) {
+ return buildInstr(TargetOpcode::G_SMAX, {Dst}, {Src0, Src1});
+ }
+
+ /// Build and insert \p Res = G_UMIN \p Op0, \p Op1
+ MachineInstrBuilder buildUMin(const DstOp &Dst, const SrcOp &Src0,
+ const SrcOp &Src1) {
+ return buildInstr(TargetOpcode::G_UMIN, {Dst}, {Src0, Src1});
+ }
+
+ /// Build and insert \p Res = G_UMAX \p Op0, \p Op1
+ MachineInstrBuilder buildUMax(const DstOp &Dst, const SrcOp &Src0,
+ const SrcOp &Src1) {
+ return buildInstr(TargetOpcode::G_UMAX, {Dst}, {Src0, Src1});
+ }
+
+ /// Build and insert \p Dst = G_ABS \p Src
+ MachineInstrBuilder buildAbs(const DstOp &Dst, const SrcOp &Src) {
+ return buildInstr(TargetOpcode::G_ABS, {Dst}, {Src});
+ }
+
+ /// Build and insert \p Res = G_JUMP_TABLE \p JTI
+ ///
+ /// G_JUMP_TABLE sets \p Res to the address of the jump table specified by
+ /// the jump table index \p JTI.
+ ///
+ /// \return a MachineInstrBuilder for the newly created instruction.
+ MachineInstrBuilder buildJumpTable(const LLT PtrTy, unsigned JTI);
+
+ /// Build and insert \p Res = G_VECREDUCE_SEQ_FADD \p ScalarIn, \p VecIn
+ ///
+ /// \p ScalarIn is the scalar accumulator input to start the sequential
+ /// reduction operation of \p VecIn.
+ MachineInstrBuilder buildVecReduceSeqFAdd(const DstOp &Dst,
+ const SrcOp &ScalarIn,
+ const SrcOp &VecIn) {
+ return buildInstr(TargetOpcode::G_VECREDUCE_SEQ_FADD, {Dst},
+ {ScalarIn, {VecIn}});
+ }
+
+ /// Build and insert \p Res = G_VECREDUCE_SEQ_FMUL \p ScalarIn, \p VecIn
+ ///
+ /// \p ScalarIn is the scalar accumulator input to start the sequential
+ /// reduction operation of \p VecIn.
+ MachineInstrBuilder buildVecReduceSeqFMul(const DstOp &Dst,
+ const SrcOp &ScalarIn,
+ const SrcOp &VecIn) {
+ return buildInstr(TargetOpcode::G_VECREDUCE_SEQ_FMUL, {Dst},
+ {ScalarIn, {VecIn}});
+ }
+
+ /// Build and insert \p Res = G_VECREDUCE_FADD \p Src
+ ///
+ /// \p ScalarIn is the scalar accumulator input to the reduction operation of
+ /// \p VecIn.
+ MachineInstrBuilder buildVecReduceFAdd(const DstOp &Dst,
+ const SrcOp &ScalarIn,
+ const SrcOp &VecIn) {
+ return buildInstr(TargetOpcode::G_VECREDUCE_FADD, {Dst}, {ScalarIn, VecIn});
+ }
+
+ /// Build and insert \p Res = G_VECREDUCE_FMUL \p Src
+ ///
+ /// \p ScalarIn is the scalar accumulator input to the reduction operation of
+ /// \p VecIn.
+ MachineInstrBuilder buildVecReduceFMul(const DstOp &Dst,
+ const SrcOp &ScalarIn,
+ const SrcOp &VecIn) {
+ return buildInstr(TargetOpcode::G_VECREDUCE_FMUL, {Dst}, {ScalarIn, VecIn});
+ }
+
+ /// Build and insert \p Res = G_VECREDUCE_FMAX \p Src
+ MachineInstrBuilder buildVecReduceFMax(const DstOp &Dst, const SrcOp &Src) {
+ return buildInstr(TargetOpcode::G_VECREDUCE_FMAX, {Dst}, {Src});
+ }
+
+ /// Build and insert \p Res = G_VECREDUCE_FMIN \p Src
+ MachineInstrBuilder buildVecReduceFMin(const DstOp &Dst, const SrcOp &Src) {
+ return buildInstr(TargetOpcode::G_VECREDUCE_FMIN, {Dst}, {Src});
+ }
+ /// Build and insert \p Res = G_VECREDUCE_ADD \p Src
+ MachineInstrBuilder buildVecReduceAdd(const DstOp &Dst, const SrcOp &Src) {
+ return buildInstr(TargetOpcode::G_VECREDUCE_ADD, {Dst}, {Src});
+ }
+
+ /// Build and insert \p Res = G_VECREDUCE_MUL \p Src
+ MachineInstrBuilder buildVecReduceMul(const DstOp &Dst, const SrcOp &Src) {
+ return buildInstr(TargetOpcode::G_VECREDUCE_MUL, {Dst}, {Src});
+ }
+
+ /// Build and insert \p Res = G_VECREDUCE_AND \p Src
+ MachineInstrBuilder buildVecReduceAnd(const DstOp &Dst, const SrcOp &Src) {
+ return buildInstr(TargetOpcode::G_VECREDUCE_AND, {Dst}, {Src});
+ }
+
+ /// Build and insert \p Res = G_VECREDUCE_OR \p Src
+ MachineInstrBuilder buildVecReduceOr(const DstOp &Dst, const SrcOp &Src) {
+ return buildInstr(TargetOpcode::G_VECREDUCE_OR, {Dst}, {Src});
+ }
+
+ /// Build and insert \p Res = G_VECREDUCE_XOR \p Src
+ MachineInstrBuilder buildVecReduceXor(const DstOp &Dst, const SrcOp &Src) {
+ return buildInstr(TargetOpcode::G_VECREDUCE_XOR, {Dst}, {Src});
+ }
+
+ /// Build and insert \p Res = G_VECREDUCE_SMAX \p Src
+ MachineInstrBuilder buildVecReduceSMax(const DstOp &Dst, const SrcOp &Src) {
+ return buildInstr(TargetOpcode::G_VECREDUCE_SMAX, {Dst}, {Src});
+ }
+
+ /// Build and insert \p Res = G_VECREDUCE_SMIN \p Src
+ MachineInstrBuilder buildVecReduceSMin(const DstOp &Dst, const SrcOp &Src) {
+ return buildInstr(TargetOpcode::G_VECREDUCE_SMIN, {Dst}, {Src});
+ }
+
+ /// Build and insert \p Res = G_VECREDUCE_UMAX \p Src
+ MachineInstrBuilder buildVecReduceUMax(const DstOp &Dst, const SrcOp &Src) {
+ return buildInstr(TargetOpcode::G_VECREDUCE_UMAX, {Dst}, {Src});
+ }
+
+ /// Build and insert \p Res = G_VECREDUCE_UMIN \p Src
+ MachineInstrBuilder buildVecReduceUMin(const DstOp &Dst, const SrcOp &Src) {
+ return buildInstr(TargetOpcode::G_VECREDUCE_UMIN, {Dst}, {Src});
+ }
+ virtual MachineInstrBuilder buildInstr(unsigned Opc, ArrayRef<DstOp> DstOps,
+ ArrayRef<SrcOp> SrcOps,
+ Optional<unsigned> Flags = None);
+};
+
+} // End namespace llvm.
+#endif // LLVM_CODEGEN_GLOBALISEL_MACHINEIRBUILDER_H
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel/RegBankSelect.h b/contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel/RegBankSelect.h
new file mode 100644
index 0000000000..099537d6d7
--- /dev/null
+++ b/contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel/RegBankSelect.h
@@ -0,0 +1,680 @@
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//=- llvm/CodeGen/GlobalISel/RegBankSelect.h - Reg Bank Selector --*- C++ -*-=//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+/// \file This file describes the interface of the MachineFunctionPass
+/// responsible for assigning the generic virtual registers to register bank.
+
+/// By default, the reg bank selector relies on local decisions to
+/// assign the register bank. In other words, it looks at one instruction
+/// at a time to decide where the operand of that instruction should live.
+///
+/// At higher optimization level, we could imagine that the reg bank selector
+/// would use more global analysis and do crazier thing like duplicating
+/// instructions and so on. This is future work.
+///
+/// For now, the pass uses a greedy algorithm to decide where the operand
+/// of an instruction should live. It asks the target which banks may be
+/// used for each operand of the instruction and what is the cost. Then,
+/// it chooses the solution which minimize the cost of the instruction plus
+/// the cost of any move that may be needed to the values into the right
+/// register bank.
+/// In other words, the cost for an instruction on a register bank RegBank
+/// is: Cost of I on RegBank plus the sum of the cost for bringing the
+/// input operands from their current register bank to RegBank.
+/// Thus, the following formula:
+/// cost(I, RegBank) = cost(I.Opcode, RegBank) +
+/// sum(for each arg in I.arguments: costCrossCopy(arg.RegBank, RegBank))
+///
+/// E.g., Let say we are assigning the register bank for the instruction
+/// defining v2.
+/// v0(A_REGBANK) = ...
+/// v1(A_REGBANK) = ...
+/// v2 = G_ADD i32 v0, v1 <-- MI
+///
+/// The target may say it can generate G_ADD i32 on register bank A and B
+/// with a cost of respectively 5 and 1.
+/// Then, let say the cost of a cross register bank copies from A to B is 1.
+/// The reg bank selector would compare the following two costs:
+/// cost(MI, A_REGBANK) = cost(G_ADD, A_REGBANK) + cost(v0.RegBank, A_REGBANK) +
+/// cost(v1.RegBank, A_REGBANK)
+/// = 5 + cost(A_REGBANK, A_REGBANK) + cost(A_REGBANK,
+/// A_REGBANK)
+/// = 5 + 0 + 0 = 5
+/// cost(MI, B_REGBANK) = cost(G_ADD, B_REGBANK) + cost(v0.RegBank, B_REGBANK) +
+/// cost(v1.RegBank, B_REGBANK)
+/// = 1 + cost(A_REGBANK, B_REGBANK) + cost(A_REGBANK,
+/// B_REGBANK)
+/// = 1 + 1 + 1 = 3
+/// Therefore, in this specific example, the reg bank selector would choose
+/// bank B for MI.
+/// v0(A_REGBANK) = ...
+/// v1(A_REGBANK) = ...
+/// tmp0(B_REGBANK) = COPY v0
+/// tmp1(B_REGBANK) = COPY v1
+/// v2(B_REGBANK) = G_ADD i32 tmp0, tmp1
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_GLOBALISEL_REGBANKSELECT_H
+#define LLVM_CODEGEN_GLOBALISEL_REGBANKSELECT_H
+
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
+#include "llvm/CodeGen/GlobalISel/RegisterBankInfo.h"
+#include "llvm/CodeGen/MachineBasicBlock.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/CodeGen/MachineOptimizationRemarkEmitter.h"
+#include <cassert>
+#include <cstdint>
+#include <memory>
+
+namespace llvm {
+
+class BlockFrequency;
+class MachineBlockFrequencyInfo;
+class MachineBranchProbabilityInfo;
+class MachineOperand;
+class MachineRegisterInfo;
+class Pass;
+class raw_ostream;
+class TargetPassConfig;
+class TargetRegisterInfo;
+
+/// This pass implements the reg bank selector pass used in the GlobalISel
+/// pipeline. At the end of this pass, all register operands have been assigned
+class RegBankSelect : public MachineFunctionPass {
+public:
+ static char ID;
+
+ /// List of the modes supported by the RegBankSelect pass.
+ enum Mode {
+ /// Assign the register banks as fast as possible (default).
+ Fast,
+ /// Greedily minimize the cost of assigning register banks.
+ /// This should produce code of greater quality, but will
+ /// require more compile time.
+ Greedy
+ };
+
+ /// Abstract class used to represent an insertion point in a CFG.
+ /// This class records an insertion point and materializes it on
+ /// demand.
+ /// It allows to reason about the frequency of this insertion point,
+ /// without having to logically materialize it (e.g., on an edge),
+ /// before we actually need to insert something.
+ class InsertPoint {
+ protected:
+ /// Tell if the insert point has already been materialized.
+ bool WasMaterialized = false;
+
+ /// Materialize the insertion point.
+ ///
+ /// If isSplit() is true, this involves actually splitting
+ /// the block or edge.
+ ///
+ /// \post getPointImpl() returns a valid iterator.
+ /// \post getInsertMBBImpl() returns a valid basic block.
+ /// \post isSplit() == false ; no more splitting should be required.
+ virtual void materialize() = 0;
+
+ /// Return the materialized insertion basic block.
+ /// Code will be inserted into that basic block.
+ ///
+ /// \pre ::materialize has been called.
+ virtual MachineBasicBlock &getInsertMBBImpl() = 0;
+
+ /// Return the materialized insertion point.
+ /// Code will be inserted before that point.
+ ///
+ /// \pre ::materialize has been called.
+ virtual MachineBasicBlock::iterator getPointImpl() = 0;
+
+ public:
+ virtual ~InsertPoint() = default;
+
+ /// The first call to this method will cause the splitting to
+ /// happen if need be, then sub sequent calls just return
+ /// the iterator to that point. I.e., no more splitting will
+ /// occur.
+ ///
+ /// \return The iterator that should be used with
+ /// MachineBasicBlock::insert. I.e., additional code happens
+ /// before that point.
+ MachineBasicBlock::iterator getPoint() {
+ if (!WasMaterialized) {
+ WasMaterialized = true;
+ assert(canMaterialize() && "Impossible to materialize this point");
+ materialize();
+ }
+ // When we materialized the point we should have done the splitting.
+ assert(!isSplit() && "Wrong pre-condition");
+ return getPointImpl();
+ }
+
+ /// The first call to this method will cause the splitting to
+ /// happen if need be, then sub sequent calls just return
+ /// the basic block that contains the insertion point.
+ /// I.e., no more splitting will occur.
+ ///
+ /// \return The basic block should be used with
+ /// MachineBasicBlock::insert and ::getPoint. The new code should
+ /// happen before that point.
+ MachineBasicBlock &getInsertMBB() {
+ if (!WasMaterialized) {
+ WasMaterialized = true;
+ assert(canMaterialize() && "Impossible to materialize this point");
+ materialize();
+ }
+ // When we materialized the point we should have done the splitting.
+ assert(!isSplit() && "Wrong pre-condition");
+ return getInsertMBBImpl();
+ }
+
+ /// Insert \p MI in the just before ::getPoint()
+ MachineBasicBlock::iterator insert(MachineInstr &MI) {
+ return getInsertMBB().insert(getPoint(), &MI);
+ }
+
+ /// Does this point involve splitting an edge or block?
+ /// As soon as ::getPoint is called and thus, the point
+ /// materialized, the point will not require splitting anymore,
+ /// i.e., this will return false.
+ virtual bool isSplit() const { return false; }
+
+ /// Frequency of the insertion point.
+ /// \p P is used to access the various analysis that will help to
+ /// get that information, like MachineBlockFrequencyInfo. If \p P
+ /// does not contain enough enough to return the actual frequency,
+ /// this returns 1.
+ virtual uint64_t frequency(const Pass &P) const { return 1; }
+
+ /// Check whether this insertion point can be materialized.
+ /// As soon as ::getPoint is called and thus, the point materialized
+ /// calling this method does not make sense.
+ virtual bool canMaterialize() const { return false; }
+ };
+
+ /// Insertion point before or after an instruction.
+ class InstrInsertPoint : public InsertPoint {
+ private:
+ /// Insertion point.
+ MachineInstr &Instr;
+
+ /// Does the insertion point is before or after Instr.
+ bool Before;
+
+ void materialize() override;
+
+ MachineBasicBlock::iterator getPointImpl() override {
+ if (Before)
+ return Instr;
+ return Instr.getNextNode() ? *Instr.getNextNode()
+ : Instr.getParent()->end();
+ }
+
+ MachineBasicBlock &getInsertMBBImpl() override {
+ return *Instr.getParent();
+ }
+
+ public:
+ /// Create an insertion point before (\p Before=true) or after \p Instr.
+ InstrInsertPoint(MachineInstr &Instr, bool Before = true);
+
+ bool isSplit() const override;
+ uint64_t frequency(const Pass &P) const override;
+
+ // Worst case, we need to slice the basic block, but that is still doable.
+ bool canMaterialize() const override { return true; }
+ };
+
+ /// Insertion point at the beginning or end of a basic block.
+ class MBBInsertPoint : public InsertPoint {
+ private:
+ /// Insertion point.
+ MachineBasicBlock &MBB;
+
+ /// Does the insertion point is at the beginning or end of MBB.
+ bool Beginning;
+
+ void materialize() override { /*Nothing to do to materialize*/
+ }
+
+ MachineBasicBlock::iterator getPointImpl() override {
+ return Beginning ? MBB.begin() : MBB.end();
+ }
+
+ MachineBasicBlock &getInsertMBBImpl() override { return MBB; }
+
+ public:
+ MBBInsertPoint(MachineBasicBlock &MBB, bool Beginning = true)
+ : InsertPoint(), MBB(MBB), Beginning(Beginning) {
+ // If we try to insert before phis, we should use the insertion
+ // points on the incoming edges.
+ assert((!Beginning || MBB.getFirstNonPHI() == MBB.begin()) &&
+ "Invalid beginning point");
+ // If we try to insert after the terminators, we should use the
+ // points on the outcoming edges.
+ assert((Beginning || MBB.getFirstTerminator() == MBB.end()) &&
+ "Invalid end point");
+ }
+
+ bool isSplit() const override { return false; }
+ uint64_t frequency(const Pass &P) const override;
+ bool canMaterialize() const override { return true; };
+ };
+
+ /// Insertion point on an edge.
+ class EdgeInsertPoint : public InsertPoint {
+ private:
+ /// Source of the edge.
+ MachineBasicBlock &Src;
+
+ /// Destination of the edge.
+ /// After the materialization is done, this hold the basic block
+ /// that resulted from the splitting.
+ MachineBasicBlock *DstOrSplit;
+
+ /// P is used to update the analysis passes as applicable.
+ Pass &P;
+
+ void materialize() override;
+
+ MachineBasicBlock::iterator getPointImpl() override {
+ // DstOrSplit should be the Split block at this point.
+ // I.e., it should have one predecessor, Src, and one successor,
+ // the original Dst.
+ assert(DstOrSplit && DstOrSplit->isPredecessor(&Src) &&
+ DstOrSplit->pred_size() == 1 && DstOrSplit->succ_size() == 1 &&
+ "Did not split?!");
+ return DstOrSplit->begin();
+ }
+
+ MachineBasicBlock &getInsertMBBImpl() override { return *DstOrSplit; }
+
+ public:
+ EdgeInsertPoint(MachineBasicBlock &Src, MachineBasicBlock &Dst, Pass &P)
+ : InsertPoint(), Src(Src), DstOrSplit(&Dst), P(P) {}
+
+ bool isSplit() const override {
+ return Src.succ_size() > 1 && DstOrSplit->pred_size() > 1;
+ }
+
+ uint64_t frequency(const Pass &P) const override;
+ bool canMaterialize() const override;
+ };
+
+ /// Struct used to represent the placement of a repairing point for
+ /// a given operand.
+ class RepairingPlacement {
+ public:
+ /// Define the kind of action this repairing needs.
+ enum RepairingKind {
+ /// Nothing to repair, just drop this action.
+ None,
+ /// Reparing code needs to happen before InsertPoints.
+ Insert,
+ /// (Re)assign the register bank of the operand.
+ Reassign,
+ /// Mark this repairing placement as impossible.
+ Impossible
+ };
+
+ /// \name Convenient types for a list of insertion points.
+ /// @{
+ using InsertionPoints = SmallVector<std::unique_ptr<InsertPoint>, 2>;
+ using insertpt_iterator = InsertionPoints::iterator;
+ using const_insertpt_iterator = InsertionPoints::const_iterator;
+ /// @}
+
+ private:
+ /// Kind of repairing.
+ RepairingKind Kind;
+ /// Index of the operand that will be repaired.
+ unsigned OpIdx;
+ /// Are all the insert points materializeable?
+ bool CanMaterialize;
+ /// Is there any of the insert points needing splitting?
+ bool HasSplit = false;
+ /// Insertion point for the repair code.
+ /// The repairing code needs to happen just before these points.
+ InsertionPoints InsertPoints;
+ /// Some insertion points may need to update the liveness and such.
+ Pass &P;
+
+ public:
+ /// Create a repairing placement for the \p OpIdx-th operand of
+ /// \p MI. \p TRI is used to make some checks on the register aliases
+ /// if the machine operand is a physical register. \p P is used to
+ /// to update liveness information and such when materializing the
+ /// points.
+ RepairingPlacement(MachineInstr &MI, unsigned OpIdx,
+ const TargetRegisterInfo &TRI, Pass &P,
+ RepairingKind Kind = RepairingKind::Insert);
+
+ /// \name Getters.
+ /// @{
+ RepairingKind getKind() const { return Kind; }
+ unsigned getOpIdx() const { return OpIdx; }
+ bool canMaterialize() const { return CanMaterialize; }
+ bool hasSplit() { return HasSplit; }
+ /// @}
+
+ /// \name Overloaded methods to add an insertion point.
+ /// @{
+ /// Add a MBBInsertionPoint to the list of InsertPoints.
+ void addInsertPoint(MachineBasicBlock &MBB, bool Beginning);
+ /// Add a InstrInsertionPoint to the list of InsertPoints.
+ void addInsertPoint(MachineInstr &MI, bool Before);
+ /// Add an EdgeInsertionPoint (\p Src, \p Dst) to the list of InsertPoints.
+ void addInsertPoint(MachineBasicBlock &Src, MachineBasicBlock &Dst);
+ /// Add an InsertPoint to the list of insert points.
+ /// This method takes the ownership of &\p Point.
+ void addInsertPoint(InsertPoint &Point);
+ /// @}
+
+ /// \name Accessors related to the insertion points.
+ /// @{
+ insertpt_iterator begin() { return InsertPoints.begin(); }
+ insertpt_iterator end() { return InsertPoints.end(); }
+
+ const_insertpt_iterator begin() const { return InsertPoints.begin(); }
+ const_insertpt_iterator end() const { return InsertPoints.end(); }
+
+ unsigned getNumInsertPoints() const { return InsertPoints.size(); }
+ /// @}
+
+ /// Change the type of this repairing placement to \p NewKind.
+ /// It is not possible to switch a repairing placement to the
+ /// RepairingKind::Insert. There is no fundamental problem with
+ /// that, but no uses as well, so do not support it for now.
+ ///
+ /// \pre NewKind != RepairingKind::Insert
+ /// \post getKind() == NewKind
+ void switchTo(RepairingKind NewKind) {
+ assert(NewKind != Kind && "Already of the right Kind");
+ Kind = NewKind;
+ InsertPoints.clear();
+ CanMaterialize = NewKind != RepairingKind::Impossible;
+ HasSplit = false;
+ assert(NewKind != RepairingKind::Insert &&
+ "We would need more MI to switch to Insert");
+ }
+ };
+
+private:
+ /// Helper class used to represent the cost for mapping an instruction.
+ /// When mapping an instruction, we may introduce some repairing code.
+ /// In most cases, the repairing code is local to the instruction,
+ /// thus, we can omit the basic block frequency from the cost.
+ /// However, some alternatives may produce non-local cost, e.g., when
+ /// repairing a phi, and thus we then need to scale the local cost
+ /// to the non-local cost. This class does this for us.
+ /// \note: We could simply always scale the cost. The problem is that
+ /// there are higher chances that we saturate the cost easier and end
+ /// up having the same cost for actually different alternatives.
+ /// Another option would be to use APInt everywhere.
+ class MappingCost {
+ private:
+ /// Cost of the local instructions.
+ /// This cost is free of basic block frequency.
+ uint64_t LocalCost = 0;
+ /// Cost of the non-local instructions.
+ /// This cost should include the frequency of the related blocks.
+ uint64_t NonLocalCost = 0;
+ /// Frequency of the block where the local instructions live.
+ uint64_t LocalFreq;
+
+ MappingCost(uint64_t LocalCost, uint64_t NonLocalCost, uint64_t LocalFreq)
+ : LocalCost(LocalCost), NonLocalCost(NonLocalCost),
+ LocalFreq(LocalFreq) {}
+
+ /// Check if this cost is saturated.
+ bool isSaturated() const;
+
+ public:
+ /// Create a MappingCost assuming that most of the instructions
+ /// will occur in a basic block with \p LocalFreq frequency.
+ MappingCost(const BlockFrequency &LocalFreq);
+
+ /// Add \p Cost to the local cost.
+ /// \return true if this cost is saturated, false otherwise.
+ bool addLocalCost(uint64_t Cost);
+
+ /// Add \p Cost to the non-local cost.
+ /// Non-local cost should reflect the frequency of their placement.
+ /// \return true if this cost is saturated, false otherwise.
+ bool addNonLocalCost(uint64_t Cost);
+
+ /// Saturate the cost to the maximal representable value.
+ void saturate();
+
+ /// Return an instance of MappingCost that represents an
+ /// impossible mapping.
+ static MappingCost ImpossibleCost();
+
+ /// Check if this is less than \p Cost.
+ bool operator<(const MappingCost &Cost) const;
+ /// Check if this is equal to \p Cost.
+ bool operator==(const MappingCost &Cost) const;
+ /// Check if this is not equal to \p Cost.
+ bool operator!=(const MappingCost &Cost) const { return !(*this == Cost); }
+ /// Check if this is greater than \p Cost.
+ bool operator>(const MappingCost &Cost) const {
+ return *this != Cost && Cost < *this;
+ }
+
+ /// Print this on dbgs() stream.
+ void dump() const;
+
+ /// Print this on \p OS;
+ void print(raw_ostream &OS) const;
+
+ /// Overload the stream operator for easy debug printing.
+ friend raw_ostream &operator<<(raw_ostream &OS, const MappingCost &Cost) {
+ Cost.print(OS);
+ return OS;
+ }
+ };
+
+ /// Interface to the target lowering info related
+ /// to register banks.
+ const RegisterBankInfo *RBI = nullptr;
+
+ /// MRI contains all the register class/bank information that this
+ /// pass uses and updates.
+ MachineRegisterInfo *MRI = nullptr;
+
+ /// Information on the register classes for the current function.
+ const TargetRegisterInfo *TRI = nullptr;
+
+ /// Get the frequency of blocks.
+ /// This is required for non-fast mode.
+ MachineBlockFrequencyInfo *MBFI = nullptr;
+
+ /// Get the frequency of the edges.
+ /// This is required for non-fast mode.
+ MachineBranchProbabilityInfo *MBPI = nullptr;
+
+ /// Current optimization remark emitter. Used to report failures.
+ std::unique_ptr<MachineOptimizationRemarkEmitter> MORE;
+
+ /// Helper class used for every code morphing.
+ MachineIRBuilder MIRBuilder;
+
+ /// Optimization mode of the pass.
+ Mode OptMode;
+
+ /// Current target configuration. Controls how the pass handles errors.
+ const TargetPassConfig *TPC;
+
+ /// Assign the register bank of each operand of \p MI.
+ /// \return True on success, false otherwise.
+ bool assignInstr(MachineInstr &MI);
+
+ /// Initialize the field members using \p MF.
+ void init(MachineFunction &MF);
+
+ /// Check if \p Reg is already assigned what is described by \p ValMapping.
+ /// \p OnlyAssign == true means that \p Reg just needs to be assigned a
+ /// register bank. I.e., no repairing is necessary to have the
+ /// assignment match.
+ bool assignmentMatch(Register Reg,
+ const RegisterBankInfo::ValueMapping &ValMapping,
+ bool &OnlyAssign) const;
+
+ /// Insert repairing code for \p Reg as specified by \p ValMapping.
+ /// The repairing placement is specified by \p RepairPt.
+ /// \p NewVRegs contains all the registers required to remap \p Reg.
+ /// In other words, the number of registers in NewVRegs must be equal
+ /// to ValMapping.BreakDown.size().
+ ///
+ /// The transformation could be sketched as:
+ /// \code
+ /// ... = op Reg
+ /// \endcode
+ /// Becomes
+ /// \code
+ /// <NewRegs> = COPY or extract Reg
+ /// ... = op Reg
+ /// \endcode
+ ///
+ /// and
+ /// \code
+ /// Reg = op ...
+ /// \endcode
+ /// Becomes
+ /// \code
+ /// Reg = op ...
+ /// Reg = COPY or build_sequence <NewRegs>
+ /// \endcode
+ ///
+ /// \pre NewVRegs.size() == ValMapping.BreakDown.size()
+ ///
+ /// \note The caller is supposed to do the rewriting of op if need be.
+ /// I.e., Reg = op ... => <NewRegs> = NewOp ...
+ ///
+ /// \return True if the repairing worked, false otherwise.
+ bool repairReg(MachineOperand &MO,
+ const RegisterBankInfo::ValueMapping &ValMapping,
+ RegBankSelect::RepairingPlacement &RepairPt,
+ const iterator_range<SmallVectorImpl<Register>::const_iterator>
+ &NewVRegs);
+
+ /// Return the cost of the instruction needed to map \p MO to \p ValMapping.
+ /// The cost is free of basic block frequencies.
+ /// \pre MO.isReg()
+ /// \pre MO is assigned to a register bank.
+ /// \pre ValMapping is a valid mapping for MO.
+ uint64_t
+ getRepairCost(const MachineOperand &MO,
+ const RegisterBankInfo::ValueMapping &ValMapping) const;
+
+ /// Find the best mapping for \p MI from \p PossibleMappings.
+ /// \return a reference on the best mapping in \p PossibleMappings.
+ const RegisterBankInfo::InstructionMapping &
+ findBestMapping(MachineInstr &MI,
+ RegisterBankInfo::InstructionMappings &PossibleMappings,
+ SmallVectorImpl<RepairingPlacement> &RepairPts);
+
+ /// Compute the cost of mapping \p MI with \p InstrMapping and
+ /// compute the repairing placement for such mapping in \p
+ /// RepairPts.
+ /// \p BestCost is used to specify when the cost becomes too high
+ /// and thus it is not worth computing the RepairPts. Moreover if
+ /// \p BestCost == nullptr, the mapping cost is actually not
+ /// computed.
+ MappingCost
+ computeMapping(MachineInstr &MI,
+ const RegisterBankInfo::InstructionMapping &InstrMapping,
+ SmallVectorImpl<RepairingPlacement> &RepairPts,
+ const MappingCost *BestCost = nullptr);
+
+ /// When \p RepairPt involves splitting to repair \p MO for the
+ /// given \p ValMapping, try to change the way we repair such that
+ /// the splitting is not required anymore.
+ ///
+ /// \pre \p RepairPt.hasSplit()
+ /// \pre \p MO == MO.getParent()->getOperand(\p RepairPt.getOpIdx())
+ /// \pre \p ValMapping is the mapping of \p MO for MO.getParent()
+ /// that implied \p RepairPt.
+ void tryAvoidingSplit(RegBankSelect::RepairingPlacement &RepairPt,
+ const MachineOperand &MO,
+ const RegisterBankInfo::ValueMapping &ValMapping) const;
+
+ /// Apply \p Mapping to \p MI. \p RepairPts represents the different
+ /// mapping action that need to happen for the mapping to be
+ /// applied.
+ /// \return True if the mapping was applied sucessfully, false otherwise.
+ bool applyMapping(MachineInstr &MI,
+ const RegisterBankInfo::InstructionMapping &InstrMapping,
+ SmallVectorImpl<RepairingPlacement> &RepairPts);
+
+public:
+ /// Create a RegBankSelect pass with the specified \p RunningMode.
+ RegBankSelect(Mode RunningMode = Fast);
+
+ StringRef getPassName() const override { return "RegBankSelect"; }
+
+ void getAnalysisUsage(AnalysisUsage &AU) const override;
+
+ MachineFunctionProperties getRequiredProperties() const override {
+ return MachineFunctionProperties()
+ .set(MachineFunctionProperties::Property::IsSSA)
+ .set(MachineFunctionProperties::Property::Legalized);
+ }
+
+ MachineFunctionProperties getSetProperties() const override {
+ return MachineFunctionProperties().set(
+ MachineFunctionProperties::Property::RegBankSelected);
+ }
+
+ MachineFunctionProperties getClearedProperties() const override {
+ return MachineFunctionProperties()
+ .set(MachineFunctionProperties::Property::NoPHIs);
+ }
+
+ /// Walk through \p MF and assign a register bank to every virtual register
+ /// that are still mapped to nothing.
+ /// The target needs to provide a RegisterBankInfo and in particular
+ /// override RegisterBankInfo::getInstrMapping.
+ ///
+ /// Simplified algo:
+ /// \code
+ /// RBI = MF.subtarget.getRegBankInfo()
+ /// MIRBuilder.setMF(MF)
+ /// for each bb in MF
+ /// for each inst in bb
+ /// MIRBuilder.setInstr(inst)
+ /// MappingCosts = RBI.getMapping(inst);
+ /// Idx = findIdxOfMinCost(MappingCosts)
+ /// CurRegBank = MappingCosts[Idx].RegBank
+ /// MRI.setRegBank(inst.getOperand(0).getReg(), CurRegBank)
+ /// for each argument in inst
+ /// if (CurRegBank != argument.RegBank)
+ /// ArgReg = argument.getReg()
+ /// Tmp = MRI.createNewVirtual(MRI.getSize(ArgReg), CurRegBank)
+ /// MIRBuilder.buildInstr(COPY, Tmp, ArgReg)
+ /// inst.getOperand(argument.getOperandNo()).setReg(Tmp)
+ /// \endcode
+ bool runOnMachineFunction(MachineFunction &MF) override;
+};
+
+} // end namespace llvm
+
+#endif // LLVM_CODEGEN_GLOBALISEL_REGBANKSELECT_H
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel/RegisterBank.h b/contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel/RegisterBank.h
new file mode 100644
index 0000000000..3ea1c4ab26
--- /dev/null
+++ b/contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel/RegisterBank.h
@@ -0,0 +1,109 @@
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//==-- llvm/CodeGen/GlobalISel/RegisterBank.h - Register Bank ----*- C++ -*-==//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+/// \file This file declares the API of register banks.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_GLOBALISEL_REGBANK_H
+#define LLVM_CODEGEN_GLOBALISEL_REGBANK_H
+
+#include "llvm/ADT/BitVector.h"
+
+namespace llvm {
+// Forward declarations.
+class RegisterBankInfo;
+class raw_ostream;
+class TargetRegisterClass;
+class TargetRegisterInfo;
+
+/// This class implements the register bank concept.
+/// Two instances of RegisterBank must have different ID.
+/// This property is enforced by the RegisterBankInfo class.
+class RegisterBank {
+private:
+ unsigned ID;
+ const char *Name;
+ unsigned Size;
+ BitVector ContainedRegClasses;
+
+ /// Sentinel value used to recognize register bank not properly
+ /// initialized yet.
+ static const unsigned InvalidID;
+
+ /// Only the RegisterBankInfo can initialize RegisterBank properly.
+ friend RegisterBankInfo;
+
+public:
+ RegisterBank(unsigned ID, const char *Name, unsigned Size,
+ const uint32_t *CoveredClasses, unsigned NumRegClasses);
+
+ /// Get the identifier of this register bank.
+ unsigned getID() const { return ID; }
+
+ /// Get a user friendly name of this register bank.
+ /// Should be used only for debugging purposes.
+ const char *getName() const { return Name; }
+
+ /// Get the maximal size in bits that fits in this register bank.
+ unsigned getSize() const { return Size; }
+
+ /// Check whether this instance is ready to be used.
+ bool isValid() const;
+
+ /// Check if this register bank is valid. In other words,
+ /// if it has been properly constructed.
+ ///
+ /// \note This method does not check anything when assertions are disabled.
+ ///
+ /// \return True is the check was successful.
+ bool verify(const TargetRegisterInfo &TRI) const;
+
+ /// Check whether this register bank covers \p RC.
+ /// In other words, check if this register bank fully covers
+ /// the registers that \p RC contains.
+ /// \pre isValid()
+ bool covers(const TargetRegisterClass &RC) const;
+
+ /// Check whether \p OtherRB is the same as this.
+ bool operator==(const RegisterBank &OtherRB) const;
+ bool operator!=(const RegisterBank &OtherRB) const {
+ return !this->operator==(OtherRB);
+ }
+
+ /// Dump the register mask on dbgs() stream.
+ /// The dump is verbose.
+ void dump(const TargetRegisterInfo *TRI = nullptr) const;
+
+ /// Print the register mask on OS.
+ /// If IsForDebug is false, then only the name of the register bank
+ /// is printed. Otherwise, all the fields are printing.
+ /// TRI is then used to print the name of the register classes that
+ /// this register bank covers.
+ void print(raw_ostream &OS, bool IsForDebug = false,
+ const TargetRegisterInfo *TRI = nullptr) const;
+};
+
+inline raw_ostream &operator<<(raw_ostream &OS, const RegisterBank &RegBank) {
+ RegBank.print(OS);
+ return OS;
+}
+} // End namespace llvm.
+
+#endif
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel/RegisterBankInfo.h b/contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel/RegisterBankInfo.h
new file mode 100644
index 0000000000..0dbd1ecffe
--- /dev/null
+++ b/contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel/RegisterBankInfo.h
@@ -0,0 +1,786 @@
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//===- llvm/CodeGen/GlobalISel/RegisterBankInfo.h ---------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+/// \file This file declares the API for the register bank info.
+/// This API is responsible for handling the register banks.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_GLOBALISEL_REGISTERBANKINFO_H
+#define LLVM_CODEGEN_GLOBALISEL_REGISTERBANKINFO_H
+
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/Hashing.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/iterator_range.h"
+#include "llvm/CodeGen/Register.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/LowLevelTypeImpl.h"
+#include <cassert>
+#include <initializer_list>
+#include <memory>
+
+namespace llvm {
+
+class MachineInstr;
+class MachineRegisterInfo;
+class raw_ostream;
+class RegisterBank;
+class TargetInstrInfo;
+class TargetRegisterClass;
+class TargetRegisterInfo;
+
+/// Holds all the information related to register banks.
+class RegisterBankInfo {
+public:
+ /// Helper struct that represents how a value is partially mapped
+ /// into a register.
+ /// The StartIdx and Length represent what region of the orginal
+ /// value this partial mapping covers.
+ /// This can be represented as a Mask of contiguous bit starting
+ /// at StartIdx bit and spanning Length bits.
+ /// StartIdx is the number of bits from the less significant bits.
+ struct PartialMapping {
+ /// Number of bits at which this partial mapping starts in the
+ /// original value. The bits are counted from less significant
+ /// bits to most significant bits.
+ unsigned StartIdx;
+
+ /// Length of this mapping in bits. This is how many bits this
+ /// partial mapping covers in the original value:
+ /// from StartIdx to StartIdx + Length -1.
+ unsigned Length;
+
+ /// Register bank where the partial value lives.
+ const RegisterBank *RegBank;
+
+ PartialMapping() = default;
+
+ /// Provide a shortcut for quickly building PartialMapping.
+ PartialMapping(unsigned StartIdx, unsigned Length,
+ const RegisterBank &RegBank)
+ : StartIdx(StartIdx), Length(Length), RegBank(&RegBank) {}
+
+ /// \return the index of in the original value of the most
+ /// significant bit that this partial mapping covers.
+ unsigned getHighBitIdx() const { return StartIdx + Length - 1; }
+
+ /// Print this partial mapping on dbgs() stream.
+ void dump() const;
+
+ /// Print this partial mapping on \p OS;
+ void print(raw_ostream &OS) const;
+
+ /// Check that the Mask is compatible with the RegBank.
+ /// Indeed, if the RegBank cannot accomadate the "active bits" of the mask,
+ /// there is no way this mapping is valid.
+ ///
+ /// \note This method does not check anything when assertions are disabled.
+ ///
+ /// \return True is the check was successful.
+ bool verify() const;
+ };
+
+ /// Helper struct that represents how a value is mapped through
+ /// different register banks.
+ ///
+ /// \note: So far we do not have any users of the complex mappings
+ /// (mappings with more than one partial mapping), but when we do,
+ /// we would have needed to duplicate partial mappings.
+ /// The alternative could be to use an array of pointers of partial
+ /// mapping (i.e., PartialMapping **BreakDown) and duplicate the
+ /// pointers instead.
+ ///
+ /// E.g.,
+ /// Let say we have a 32-bit add and a <2 x 32-bit> vadd. We
+ /// can expand the
+ /// <2 x 32-bit> add into 2 x 32-bit add.
+ ///
+ /// Currently the TableGen-like file would look like:
+ /// \code
+ /// PartialMapping[] = {
+ /// /*32-bit add*/ {0, 32, GPR}, // Scalar entry repeated for first
+ /// // vec elt.
+ /// /*2x32-bit add*/ {0, 32, GPR}, {32, 32, GPR},
+ /// /*<2x32-bit> vadd*/ {0, 64, VPR}
+ /// }; // PartialMapping duplicated.
+ ///
+ /// ValueMapping[] {
+ /// /*plain 32-bit add*/ {&PartialMapping[0], 1},
+ /// /*expanded vadd on 2xadd*/ {&PartialMapping[1], 2},
+ /// /*plain <2x32-bit> vadd*/ {&PartialMapping[3], 1}
+ /// };
+ /// \endcode
+ ///
+ /// With the array of pointer, we would have:
+ /// \code
+ /// PartialMapping[] = {
+ /// /*32-bit add lower */ { 0, 32, GPR},
+ /// /*32-bit add upper */ {32, 32, GPR},
+ /// /*<2x32-bit> vadd */ { 0, 64, VPR}
+ /// }; // No more duplication.
+ ///
+ /// BreakDowns[] = {
+ /// /*AddBreakDown*/ &PartialMapping[0],
+ /// /*2xAddBreakDown*/ &PartialMapping[0], &PartialMapping[1],
+ /// /*VAddBreakDown*/ &PartialMapping[2]
+ /// }; // Addresses of PartialMapping duplicated (smaller).
+ ///
+ /// ValueMapping[] {
+ /// /*plain 32-bit add*/ {&BreakDowns[0], 1},
+ /// /*expanded vadd on 2xadd*/ {&BreakDowns[1], 2},
+ /// /*plain <2x32-bit> vadd*/ {&BreakDowns[3], 1}
+ /// };
+ /// \endcode
+ ///
+ /// Given that a PartialMapping is actually small, the code size
+ /// impact is actually a degradation. Moreover the compile time will
+ /// be hit by the additional indirection.
+ /// If PartialMapping gets bigger we may reconsider.
+ struct ValueMapping {
+ /// How the value is broken down between the different register banks.
+ const PartialMapping *BreakDown;
+
+ /// Number of partial mapping to break down this value.
+ unsigned NumBreakDowns;
+
+ /// The default constructor creates an invalid (isValid() == false)
+ /// instance.
+ ValueMapping() : ValueMapping(nullptr, 0) {}
+
+ /// Initialize a ValueMapping with the given parameter.
+ /// \p BreakDown needs to have a life time at least as long
+ /// as this instance.
+ ValueMapping(const PartialMapping *BreakDown, unsigned NumBreakDowns)
+ : BreakDown(BreakDown), NumBreakDowns(NumBreakDowns) {}
+
+ /// Iterators through the PartialMappings.
+ const PartialMapping *begin() const { return BreakDown; }
+ const PartialMapping *end() const { return BreakDown + NumBreakDowns; }
+
+ /// \return true if all partial mappings are the same size and register
+ /// bank.
+ bool partsAllUniform() const;
+
+ /// Check if this ValueMapping is valid.
+ bool isValid() const { return BreakDown && NumBreakDowns; }
+
+ /// Verify that this mapping makes sense for a value of
+ /// \p MeaningfulBitWidth.
+ /// \note This method does not check anything when assertions are disabled.
+ ///
+ /// \return True is the check was successful.
+ bool verify(unsigned MeaningfulBitWidth) const;
+
+ /// Print this on dbgs() stream.
+ void dump() const;
+
+ /// Print this on \p OS;
+ void print(raw_ostream &OS) const;
+ };
+
+ /// Helper class that represents how the value of an instruction may be
+ /// mapped and what is the related cost of such mapping.
+ class InstructionMapping {
+ /// Identifier of the mapping.
+ /// This is used to communicate between the target and the optimizers
+ /// which mapping should be realized.
+ unsigned ID = InvalidMappingID;
+
+ /// Cost of this mapping.
+ unsigned Cost = 0;
+
+ /// Mapping of all the operands.
+ const ValueMapping *OperandsMapping = nullptr;
+
+ /// Number of operands.
+ unsigned NumOperands = 0;
+
+ const ValueMapping &getOperandMapping(unsigned i) {
+ assert(i < getNumOperands() && "Out of bound operand");
+ return OperandsMapping[i];
+ }
+
+ public:
+ /// Constructor for the mapping of an instruction.
+ /// \p NumOperands must be equal to number of all the operands of
+ /// the related instruction.
+ /// The rationale is that it is more efficient for the optimizers
+ /// to be able to assume that the mapping of the ith operand is
+ /// at the index i.
+ InstructionMapping(unsigned ID, unsigned Cost,
+ const ValueMapping *OperandsMapping,
+ unsigned NumOperands)
+ : ID(ID), Cost(Cost), OperandsMapping(OperandsMapping),
+ NumOperands(NumOperands) {
+ }
+
+ /// Default constructor.
+ /// Use this constructor to express that the mapping is invalid.
+ InstructionMapping() = default;
+
+ /// Get the cost.
+ unsigned getCost() const { return Cost; }
+
+ /// Get the ID.
+ unsigned getID() const { return ID; }
+
+ /// Get the number of operands.
+ unsigned getNumOperands() const { return NumOperands; }
+
+ /// Get the value mapping of the ith operand.
+ /// \pre The mapping for the ith operand has been set.
+ /// \pre The ith operand is a register.
+ const ValueMapping &getOperandMapping(unsigned i) const {
+ const ValueMapping &ValMapping =
+ const_cast<InstructionMapping *>(this)->getOperandMapping(i);
+ return ValMapping;
+ }
+
+ /// Set the mapping for all the operands.
+ /// In other words, OpdsMapping should hold at least getNumOperands
+ /// ValueMapping.
+ void setOperandsMapping(const ValueMapping *OpdsMapping) {
+ OperandsMapping = OpdsMapping;
+ }
+
+ /// Check whether this object is valid.
+ /// This is a lightweight check for obvious wrong instance.
+ bool isValid() const {
+ return getID() != InvalidMappingID && OperandsMapping;
+ }
+
+ /// Verifiy that this mapping makes sense for \p MI.
+ /// \pre \p MI must be connected to a MachineFunction.
+ ///
+ /// \note This method does not check anything when assertions are disabled.
+ ///
+ /// \return True is the check was successful.
+ bool verify(const MachineInstr &MI) const;
+
+ /// Print this on dbgs() stream.
+ void dump() const;
+
+ /// Print this on \p OS;
+ void print(raw_ostream &OS) const;
+ };
+
+ /// Convenient type to represent the alternatives for mapping an
+ /// instruction.
+ /// \todo When we move to TableGen this should be an array ref.
+ using InstructionMappings = SmallVector<const InstructionMapping *, 4>;
+
+ /// Helper class used to get/create the virtual registers that will be used
+ /// to replace the MachineOperand when applying a mapping.
+ class OperandsMapper {
+ /// The OpIdx-th cell contains the index in NewVRegs where the VRegs of the
+ /// OpIdx-th operand starts. -1 means we do not have such mapping yet.
+ /// Note: We use a SmallVector to avoid heap allocation for most cases.
+ SmallVector<int, 8> OpToNewVRegIdx;
+
+ /// Hold the registers that will be used to map MI with InstrMapping.
+ SmallVector<Register, 8> NewVRegs;
+
+ /// Current MachineRegisterInfo, used to create new virtual registers.
+ MachineRegisterInfo &MRI;
+
+ /// Instruction being remapped.
+ MachineInstr &MI;
+
+ /// New mapping of the instruction.
+ const InstructionMapping &InstrMapping;
+
+ /// Constant value identifying that the index in OpToNewVRegIdx
+ /// for an operand has not been set yet.
+ static const int DontKnowIdx;
+
+ /// Get the range in NewVRegs to store all the partial
+ /// values for the \p OpIdx-th operand.
+ ///
+ /// \return The iterator range for the space created.
+ //
+ /// \pre getMI().getOperand(OpIdx).isReg()
+ iterator_range<SmallVectorImpl<Register>::iterator>
+ getVRegsMem(unsigned OpIdx);
+
+ /// Get the end iterator for a range starting at \p StartIdx and
+ /// spannig \p NumVal in NewVRegs.
+ /// \pre StartIdx + NumVal <= NewVRegs.size()
+ SmallVectorImpl<Register>::const_iterator
+ getNewVRegsEnd(unsigned StartIdx, unsigned NumVal) const;
+ SmallVectorImpl<Register>::iterator getNewVRegsEnd(unsigned StartIdx,
+ unsigned NumVal);
+
+ public:
+ /// Create an OperandsMapper that will hold the information to apply \p
+ /// InstrMapping to \p MI.
+ /// \pre InstrMapping.verify(MI)
+ OperandsMapper(MachineInstr &MI, const InstructionMapping &InstrMapping,
+ MachineRegisterInfo &MRI);
+
+ /// \name Getters.
+ /// @{
+ /// The MachineInstr being remapped.
+ MachineInstr &getMI() const { return MI; }
+
+ /// The final mapping of the instruction.
+ const InstructionMapping &getInstrMapping() const { return InstrMapping; }
+
+ /// The MachineRegisterInfo we used to realize the mapping.
+ MachineRegisterInfo &getMRI() const { return MRI; }
+ /// @}
+
+ /// Create as many new virtual registers as needed for the mapping of the \p
+ /// OpIdx-th operand.
+ /// The number of registers is determined by the number of breakdown for the
+ /// related operand in the instruction mapping.
+ /// The type of the new registers is a plain scalar of the right size.
+ /// The proper type is expected to be set when the mapping is applied to
+ /// the instruction(s) that realizes the mapping.
+ ///
+ /// \pre getMI().getOperand(OpIdx).isReg()
+ ///
+ /// \post All the partial mapping of the \p OpIdx-th operand have been
+ /// assigned a new virtual register.
+ void createVRegs(unsigned OpIdx);
+
+ /// Set the virtual register of the \p PartialMapIdx-th partial mapping of
+ /// the OpIdx-th operand to \p NewVReg.
+ ///
+ /// \pre getMI().getOperand(OpIdx).isReg()
+ /// \pre getInstrMapping().getOperandMapping(OpIdx).BreakDown.size() >
+ /// PartialMapIdx
+ /// \pre NewReg != 0
+ ///
+ /// \post the \p PartialMapIdx-th register of the value mapping of the \p
+ /// OpIdx-th operand has been set.
+ void setVRegs(unsigned OpIdx, unsigned PartialMapIdx, Register NewVReg);
+
+ /// Get all the virtual registers required to map the \p OpIdx-th operand of
+ /// the instruction.
+ ///
+ /// This return an empty range when createVRegs or setVRegs has not been
+ /// called.
+ /// The iterator may be invalidated by a call to setVRegs or createVRegs.
+ ///
+ /// When \p ForDebug is true, we will not check that the list of new virtual
+ /// registers does not contain uninitialized values.
+ ///
+ /// \pre getMI().getOperand(OpIdx).isReg()
+ /// \pre ForDebug || All partial mappings have been set a register
+ iterator_range<SmallVectorImpl<Register>::const_iterator>
+ getVRegs(unsigned OpIdx, bool ForDebug = false) const;
+
+ /// Print this operands mapper on dbgs() stream.
+ void dump() const;
+
+ /// Print this operands mapper on \p OS stream.
+ void print(raw_ostream &OS, bool ForDebug = false) const;
+ };
+
+protected:
+ /// Hold the set of supported register banks.
+ RegisterBank **RegBanks;
+
+ /// Total number of register banks.
+ unsigned NumRegBanks;
+
+ /// Keep dynamically allocated PartialMapping in a separate map.
+ /// This shouldn't be needed when everything gets TableGen'ed.
+ mutable DenseMap<unsigned, std::unique_ptr<const PartialMapping>>
+ MapOfPartialMappings;
+
+ /// Keep dynamically allocated ValueMapping in a separate map.
+ /// This shouldn't be needed when everything gets TableGen'ed.
+ mutable DenseMap<unsigned, std::unique_ptr<const ValueMapping>>
+ MapOfValueMappings;
+
+ /// Keep dynamically allocated array of ValueMapping in a separate map.
+ /// This shouldn't be needed when everything gets TableGen'ed.
+ mutable DenseMap<unsigned, std::unique_ptr<ValueMapping[]>>
+ MapOfOperandsMappings;
+
+ /// Keep dynamically allocated InstructionMapping in a separate map.
+ /// This shouldn't be needed when everything gets TableGen'ed.
+ mutable DenseMap<unsigned, std::unique_ptr<const InstructionMapping>>
+ MapOfInstructionMappings;
+
+ /// Getting the minimal register class of a physreg is expensive.
+ /// Cache this information as we get it.
+ mutable DenseMap<unsigned, const TargetRegisterClass *> PhysRegMinimalRCs;
+
+ /// Create a RegisterBankInfo that can accommodate up to \p NumRegBanks
+ /// RegisterBank instances.
+ RegisterBankInfo(RegisterBank **RegBanks, unsigned NumRegBanks);
+
+ /// This constructor is meaningless.
+ /// It just provides a default constructor that can be used at link time
+ /// when GlobalISel is not built.
+ /// That way, targets can still inherit from this class without doing
+ /// crazy gymnastic to avoid link time failures.
+ /// \note That works because the constructor is inlined.
+ RegisterBankInfo() {
+ llvm_unreachable("This constructor should not be executed");
+ }
+
+ /// Get the register bank identified by \p ID.
+ RegisterBank &getRegBank(unsigned ID) {
+ assert(ID < getNumRegBanks() && "Accessing an unknown register bank");
+ return *RegBanks[ID];
+ }
+
+ /// Get the MinimalPhysRegClass for Reg.
+ /// \pre Reg is a physical register.
+ const TargetRegisterClass &
+ getMinimalPhysRegClass(Register Reg, const TargetRegisterInfo &TRI) const;
+
+ /// Try to get the mapping of \p MI.
+ /// See getInstrMapping for more details on what a mapping represents.
+ ///
+ /// Unlike getInstrMapping the returned InstructionMapping may be invalid
+ /// (isValid() == false).
+ /// This means that the target independent code is not smart enough
+ /// to get the mapping of \p MI and thus, the target has to provide the
+ /// information for \p MI.
+ ///
+ /// This implementation is able to get the mapping of:
+ /// - Target specific instructions by looking at the encoding constraints.
+ /// - Any instruction if all the register operands have already been assigned
+ /// a register, a register class, or a register bank.
+ /// - Copies and phis if at least one of the operands has been assigned a
+ /// register, a register class, or a register bank.
+ /// In other words, this method will likely fail to find a mapping for
+ /// any generic opcode that has not been lowered by target specific code.
+ const InstructionMapping &getInstrMappingImpl(const MachineInstr &MI) const;
+
+ /// Get the uniquely generated PartialMapping for the
+ /// given arguments.
+ const PartialMapping &getPartialMapping(unsigned StartIdx, unsigned Length,
+ const RegisterBank &RegBank) const;
+
+ /// \name Methods to get a uniquely generated ValueMapping.
+ /// @{
+
+ /// The most common ValueMapping consists of a single PartialMapping.
+ /// Feature a method for that.
+ const ValueMapping &getValueMapping(unsigned StartIdx, unsigned Length,
+ const RegisterBank &RegBank) const;
+
+ /// Get the ValueMapping for the given arguments.
+ const ValueMapping &getValueMapping(const PartialMapping *BreakDown,
+ unsigned NumBreakDowns) const;
+ /// @}
+
+ /// \name Methods to get a uniquely generated array of ValueMapping.
+ /// @{
+
+ /// Get the uniquely generated array of ValueMapping for the
+ /// elements of between \p Begin and \p End.
+ ///
+ /// Elements that are nullptr will be replaced by
+ /// invalid ValueMapping (ValueMapping::isValid == false).
+ ///
+ /// \pre The pointers on ValueMapping between \p Begin and \p End
+ /// must uniquely identify a ValueMapping. Otherwise, there is no
+ /// guarantee that the return instance will be unique, i.e., another
+ /// OperandsMapping could have the same content.
+ template <typename Iterator>
+ const ValueMapping *getOperandsMapping(Iterator Begin, Iterator End) const;
+
+ /// Get the uniquely generated array of ValueMapping for the
+ /// elements of \p OpdsMapping.
+ ///
+ /// Elements of \p OpdsMapping that are nullptr will be replaced by
+ /// invalid ValueMapping (ValueMapping::isValid == false).
+ const ValueMapping *getOperandsMapping(
+ const SmallVectorImpl<const ValueMapping *> &OpdsMapping) const;
+
+ /// Get the uniquely generated array of ValueMapping for the
+ /// given arguments.
+ ///
+ /// Arguments that are nullptr will be replaced by invalid
+ /// ValueMapping (ValueMapping::isValid == false).
+ const ValueMapping *getOperandsMapping(
+ std::initializer_list<const ValueMapping *> OpdsMapping) const;
+ /// @}
+
+ /// \name Methods to get a uniquely generated InstructionMapping.
+ /// @{
+
+private:
+ /// Method to get a uniquely generated InstructionMapping.
+ const InstructionMapping &
+ getInstructionMappingImpl(bool IsInvalid, unsigned ID = InvalidMappingID,
+ unsigned Cost = 0,
+ const ValueMapping *OperandsMapping = nullptr,
+ unsigned NumOperands = 0) const;
+
+public:
+ /// Method to get a uniquely generated InstructionMapping.
+ const InstructionMapping &
+ getInstructionMapping(unsigned ID, unsigned Cost,
+ const ValueMapping *OperandsMapping,
+ unsigned NumOperands) const {
+ return getInstructionMappingImpl(/*IsInvalid*/ false, ID, Cost,
+ OperandsMapping, NumOperands);
+ }
+
+ /// Method to get a uniquely generated invalid InstructionMapping.
+ const InstructionMapping &getInvalidInstructionMapping() const {
+ return getInstructionMappingImpl(/*IsInvalid*/ true);
+ }
+ /// @}
+
+ /// Get the register bank for the \p OpIdx-th operand of \p MI form
+ /// the encoding constraints, if any.
+ ///
+ /// \return A register bank that covers the register class of the
+ /// related encoding constraints or nullptr if \p MI did not provide
+ /// enough information to deduce it.
+ const RegisterBank *
+ getRegBankFromConstraints(const MachineInstr &MI, unsigned OpIdx,
+ const TargetInstrInfo &TII,
+ const MachineRegisterInfo &MRI) const;
+
+ /// Helper method to apply something that is like the default mapping.
+ /// Basically, that means that \p OpdMapper.getMI() is left untouched
+ /// aside from the reassignment of the register operand that have been
+ /// remapped.
+ ///
+ /// The type of all the new registers that have been created by the
+ /// mapper are properly remapped to the type of the original registers
+ /// they replace. In other words, the semantic of the instruction does
+ /// not change, only the register banks.
+ ///
+ /// If the mapping of one of the operand spans several registers, this
+ /// method will abort as this is not like a default mapping anymore.
+ ///
+ /// \pre For OpIdx in {0..\p OpdMapper.getMI().getNumOperands())
+ /// the range OpdMapper.getVRegs(OpIdx) is empty or of size 1.
+ static void applyDefaultMapping(const OperandsMapper &OpdMapper);
+
+ /// See ::applyMapping.
+ virtual void applyMappingImpl(const OperandsMapper &OpdMapper) const {
+ llvm_unreachable("The target has to implement that part");
+ }
+
+public:
+ virtual ~RegisterBankInfo() = default;
+
+ /// Get the register bank identified by \p ID.
+ const RegisterBank &getRegBank(unsigned ID) const {
+ return const_cast<RegisterBankInfo *>(this)->getRegBank(ID);
+ }
+
+ /// Get the register bank of \p Reg.
+ /// If Reg has not been assigned a register, a register class,
+ /// or a register bank, then this returns nullptr.
+ ///
+ /// \pre Reg != 0 (NoRegister)
+ const RegisterBank *getRegBank(Register Reg, const MachineRegisterInfo &MRI,
+ const TargetRegisterInfo &TRI) const;
+
+ /// Get the total number of register banks.
+ unsigned getNumRegBanks() const { return NumRegBanks; }
+
+ /// Get a register bank that covers \p RC.
+ ///
+ /// \pre \p RC is a user-defined register class (as opposed as one
+ /// generated by TableGen).
+ ///
+ /// \note The mapping RC -> RegBank could be built while adding the
+ /// coverage for the register banks. However, we do not do it, because,
+ /// at least for now, we only need this information for register classes
+ /// that are used in the description of instruction. In other words,
+ /// there are just a handful of them and we do not want to waste space.
+ ///
+ /// \todo This should be TableGen'ed.
+ virtual const RegisterBank &
+ getRegBankFromRegClass(const TargetRegisterClass &RC, LLT Ty) const {
+ llvm_unreachable("The target must override this method");
+ }
+
+ /// Get the cost of a copy from \p B to \p A, or put differently,
+ /// get the cost of A = COPY B. Since register banks may cover
+ /// different size, \p Size specifies what will be the size in bits
+ /// that will be copied around.
+ ///
+ /// \note Since this is a copy, both registers have the same size.
+ virtual unsigned copyCost(const RegisterBank &A, const RegisterBank &B,
+ unsigned Size) const {
+ // Optimistically assume that copies are coalesced. I.e., when
+ // they are on the same bank, they are free.
+ // Otherwise assume a non-zero cost of 1. The targets are supposed
+ // to override that properly anyway if they care.
+ return &A != &B;
+ }
+
+ /// \returns true if emitting a copy from \p Src to \p Dst is impossible.
+ bool cannotCopy(const RegisterBank &Dst, const RegisterBank &Src,
+ unsigned Size) const {
+ return copyCost(Dst, Src, Size) == std::numeric_limits<unsigned>::max();
+ }
+
+ /// Get the cost of using \p ValMapping to decompose a register. This is
+ /// similar to ::copyCost, except for cases where multiple copy-like
+ /// operations need to be inserted. If the register is used as a source
+ /// operand and already has a bank assigned, \p CurBank is non-null.
+ virtual unsigned getBreakDownCost(const ValueMapping &ValMapping,
+ const RegisterBank *CurBank = nullptr) const {
+ return std::numeric_limits<unsigned>::max();
+ }
+
+ /// Constrain the (possibly generic) virtual register \p Reg to \p RC.
+ ///
+ /// \pre \p Reg is a virtual register that either has a bank or a class.
+ /// \returns The constrained register class, or nullptr if there is none.
+ /// \note This is a generic variant of MachineRegisterInfo::constrainRegClass
+ /// \note Use MachineRegisterInfo::constrainRegAttrs instead for any non-isel
+ /// purpose, including non-select passes of GlobalISel
+ static const TargetRegisterClass *
+ constrainGenericRegister(Register Reg, const TargetRegisterClass &RC,
+ MachineRegisterInfo &MRI);
+
+ /// Identifier used when the related instruction mapping instance
+ /// is generated by target independent code.
+ /// Make sure not to use that identifier to avoid possible collision.
+ static const unsigned DefaultMappingID;
+
+ /// Identifier used when the related instruction mapping instance
+ /// is generated by the default constructor.
+ /// Make sure not to use that identifier.
+ static const unsigned InvalidMappingID;
+
+ /// Get the mapping of the different operands of \p MI
+ /// on the register bank.
+ /// This mapping should be the direct translation of \p MI.
+ /// In other words, when \p MI is mapped with the returned mapping,
+ /// only the register banks of the operands of \p MI need to be updated.
+ /// In particular, neither the opcode nor the type of \p MI needs to be
+ /// updated for this direct mapping.
+ ///
+ /// The target independent implementation gives a mapping based on
+ /// the register classes for the target specific opcode.
+ /// It uses the ID RegisterBankInfo::DefaultMappingID for that mapping.
+ /// Make sure you do not use that ID for the alternative mapping
+ /// for MI. See getInstrAlternativeMappings for the alternative
+ /// mappings.
+ ///
+ /// For instance, if \p MI is a vector add, the mapping should
+ /// not be a scalarization of the add.
+ ///
+ /// \post returnedVal.verify(MI).
+ ///
+ /// \note If returnedVal does not verify MI, this would probably mean
+ /// that the target does not support that instruction.
+ virtual const InstructionMapping &
+ getInstrMapping(const MachineInstr &MI) const;
+
+ /// Get the alternative mappings for \p MI.
+ /// Alternative in the sense different from getInstrMapping.
+ virtual InstructionMappings
+ getInstrAlternativeMappings(const MachineInstr &MI) const;
+
+ /// Get the possible mapping for \p MI.
+ /// A mapping defines where the different operands may live and at what cost.
+ /// For instance, let us consider:
+ /// v0(16) = G_ADD <2 x i8> v1, v2
+ /// The possible mapping could be:
+ ///
+ /// {/*ID*/VectorAdd, /*Cost*/1, /*v0*/{(0xFFFF, VPR)}, /*v1*/{(0xFFFF, VPR)},
+ /// /*v2*/{(0xFFFF, VPR)}}
+ /// {/*ID*/ScalarAddx2, /*Cost*/2, /*v0*/{(0x00FF, GPR),(0xFF00, GPR)},
+ /// /*v1*/{(0x00FF, GPR),(0xFF00, GPR)},
+ /// /*v2*/{(0x00FF, GPR),(0xFF00, GPR)}}
+ ///
+ /// \note The first alternative of the returned mapping should be the
+ /// direct translation of \p MI current form.
+ ///
+ /// \post !returnedVal.empty().
+ InstructionMappings getInstrPossibleMappings(const MachineInstr &MI) const;
+
+ /// Apply \p OpdMapper.getInstrMapping() to \p OpdMapper.getMI().
+ /// After this call \p OpdMapper.getMI() may not be valid anymore.
+ /// \p OpdMapper.getInstrMapping().getID() carries the information of
+ /// what has been chosen to map \p OpdMapper.getMI(). This ID is set
+ /// by the various getInstrXXXMapping method.
+ ///
+ /// Therefore, getting the mapping and applying it should be kept in
+ /// sync.
+ void applyMapping(const OperandsMapper &OpdMapper) const {
+ // The only mapping we know how to handle is the default mapping.
+ if (OpdMapper.getInstrMapping().getID() == DefaultMappingID)
+ return applyDefaultMapping(OpdMapper);
+ // For other mapping, the target needs to do the right thing.
+ // If that means calling applyDefaultMapping, fine, but this
+ // must be explicitly stated.
+ applyMappingImpl(OpdMapper);
+ }
+
+ /// Get the size in bits of \p Reg.
+ /// Utility method to get the size of any registers. Unlike
+ /// MachineRegisterInfo::getSize, the register does not need to be a
+ /// virtual register.
+ ///
+ /// \pre \p Reg != 0 (NoRegister).
+ unsigned getSizeInBits(Register Reg, const MachineRegisterInfo &MRI,
+ const TargetRegisterInfo &TRI) const;
+
+ /// Check that information hold by this instance make sense for the
+ /// given \p TRI.
+ ///
+ /// \note This method does not check anything when assertions are disabled.
+ ///
+ /// \return True is the check was successful.
+ bool verify(const TargetRegisterInfo &TRI) const;
+};
+
+inline raw_ostream &
+operator<<(raw_ostream &OS,
+ const RegisterBankInfo::PartialMapping &PartMapping) {
+ PartMapping.print(OS);
+ return OS;
+}
+
+inline raw_ostream &
+operator<<(raw_ostream &OS, const RegisterBankInfo::ValueMapping &ValMapping) {
+ ValMapping.print(OS);
+ return OS;
+}
+
+inline raw_ostream &
+operator<<(raw_ostream &OS,
+ const RegisterBankInfo::InstructionMapping &InstrMapping) {
+ InstrMapping.print(OS);
+ return OS;
+}
+
+inline raw_ostream &
+operator<<(raw_ostream &OS, const RegisterBankInfo::OperandsMapper &OpdMapper) {
+ OpdMapper.print(OS, /*ForDebug*/ false);
+ return OS;
+}
+
+/// Hashing function for PartialMapping.
+/// It is required for the hashing of ValueMapping.
+hash_code hash_value(const RegisterBankInfo::PartialMapping &PartMapping);
+
+} // end namespace llvm
+
+#endif // LLVM_CODEGEN_GLOBALISEL_REGISTERBANKINFO_H
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel/Utils.h b/contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel/Utils.h
new file mode 100644
index 0000000000..d07bcae8e7
--- /dev/null
+++ b/contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel/Utils.h
@@ -0,0 +1,293 @@
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//==-- llvm/CodeGen/GlobalISel/Utils.h ---------------------------*- C++ -*-==//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+/// \file This file declares the API of helper functions used throughout the
+/// GlobalISel pipeline.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_GLOBALISEL_UTILS_H
+#define LLVM_CODEGEN_GLOBALISEL_UTILS_H
+
+#include "llvm/ADT/StringRef.h"
+#include "llvm/CodeGen/Register.h"
+#include "llvm/Support/Alignment.h"
+#include "llvm/Support/LowLevelTypeImpl.h"
+#include <cstdint>
+
+namespace llvm {
+
+class AnalysisUsage;
+class GISelKnownBits;
+class MachineFunction;
+class MachineInstr;
+class MachineOperand;
+class MachineOptimizationRemarkEmitter;
+class MachineOptimizationRemarkMissed;
+struct MachinePointerInfo;
+class MachineRegisterInfo;
+class MCInstrDesc;
+class RegisterBankInfo;
+class TargetInstrInfo;
+class TargetLowering;
+class TargetPassConfig;
+class TargetRegisterInfo;
+class TargetRegisterClass;
+class ConstantFP;
+class APFloat;
+
+/// Try to constrain Reg to the specified register class. If this fails,
+/// create a new virtual register in the correct class.
+///
+/// \return The virtual register constrained to the right register class.
+Register constrainRegToClass(MachineRegisterInfo &MRI,
+ const TargetInstrInfo &TII,
+ const RegisterBankInfo &RBI, Register Reg,
+ const TargetRegisterClass &RegClass);
+
+/// Constrain the Register operand OpIdx, so that it is now constrained to the
+/// TargetRegisterClass passed as an argument (RegClass).
+/// If this fails, create a new virtual register in the correct class and insert
+/// a COPY before \p InsertPt if it is a use or after if it is a definition.
+/// In both cases, the function also updates the register of RegMo. The debug
+/// location of \p InsertPt is used for the new copy.
+///
+/// \return The virtual register constrained to the right register class.
+Register constrainOperandRegClass(const MachineFunction &MF,
+ const TargetRegisterInfo &TRI,
+ MachineRegisterInfo &MRI,
+ const TargetInstrInfo &TII,
+ const RegisterBankInfo &RBI,
+ MachineInstr &InsertPt,
+ const TargetRegisterClass &RegClass,
+ MachineOperand &RegMO);
+
+/// Try to constrain Reg so that it is usable by argument OpIdx of the provided
+/// MCInstrDesc \p II. If this fails, create a new virtual register in the
+/// correct class and insert a COPY before \p InsertPt if it is a use or after
+/// if it is a definition. In both cases, the function also updates the register
+/// of RegMo.
+/// This is equivalent to constrainOperandRegClass(..., RegClass, ...)
+/// with RegClass obtained from the MCInstrDesc. The debug location of \p
+/// InsertPt is used for the new copy.
+///
+/// \return The virtual register constrained to the right register class.
+Register constrainOperandRegClass(const MachineFunction &MF,
+ const TargetRegisterInfo &TRI,
+ MachineRegisterInfo &MRI,
+ const TargetInstrInfo &TII,
+ const RegisterBankInfo &RBI,
+ MachineInstr &InsertPt, const MCInstrDesc &II,
+ MachineOperand &RegMO, unsigned OpIdx);
+
+/// Mutate the newly-selected instruction \p I to constrain its (possibly
+/// generic) virtual register operands to the instruction's register class.
+/// This could involve inserting COPYs before (for uses) or after (for defs).
+/// This requires the number of operands to match the instruction description.
+/// \returns whether operand regclass constraining succeeded.
+///
+// FIXME: Not all instructions have the same number of operands. We should
+// probably expose a constrain helper per operand and let the target selector
+// constrain individual registers, like fast-isel.
+bool constrainSelectedInstRegOperands(MachineInstr &I,
+ const TargetInstrInfo &TII,
+ const TargetRegisterInfo &TRI,
+ const RegisterBankInfo &RBI);
+
+/// Check if DstReg can be replaced with SrcReg depending on the register
+/// constraints.
+bool canReplaceReg(Register DstReg, Register SrcReg, MachineRegisterInfo &MRI);
+
+/// Check whether an instruction \p MI is dead: it only defines dead virtual
+/// registers, and doesn't have other side effects.
+bool isTriviallyDead(const MachineInstr &MI, const MachineRegisterInfo &MRI);
+
+/// Report an ISel error as a missed optimization remark to the LLVMContext's
+/// diagnostic stream. Set the FailedISel MachineFunction property.
+void reportGISelFailure(MachineFunction &MF, const TargetPassConfig &TPC,
+ MachineOptimizationRemarkEmitter &MORE,
+ MachineOptimizationRemarkMissed &R);
+
+void reportGISelFailure(MachineFunction &MF, const TargetPassConfig &TPC,
+ MachineOptimizationRemarkEmitter &MORE,
+ const char *PassName, StringRef Msg,
+ const MachineInstr &MI);
+
+/// Report an ISel warning as a missed optimization remark to the LLVMContext's
+/// diagnostic stream.
+void reportGISelWarning(MachineFunction &MF, const TargetPassConfig &TPC,
+ MachineOptimizationRemarkEmitter &MORE,
+ MachineOptimizationRemarkMissed &R);
+
+/// If \p VReg is defined by a G_CONSTANT, return the corresponding value.
+Optional<APInt> getConstantVRegVal(Register VReg,
+ const MachineRegisterInfo &MRI);
+
+/// If \p VReg is defined by a G_CONSTANT fits in int64_t
+/// returns it.
+Optional<int64_t> getConstantVRegSExtVal(Register VReg,
+ const MachineRegisterInfo &MRI);
+
+/// Simple struct used to hold a constant integer value and a virtual
+/// register.
+struct ValueAndVReg {
+ APInt Value;
+ Register VReg;
+};
+/// If \p VReg is defined by a statically evaluable chain of
+/// instructions rooted on a G_F/CONSTANT (\p LookThroughInstrs == true)
+/// and that constant fits in int64_t, returns its value as well as the
+/// virtual register defined by this G_F/CONSTANT.
+/// When \p LookThroughInstrs == false this function behaves like
+/// getConstantVRegVal.
+/// When \p HandleFConstants == false the function bails on G_FCONSTANTs.
+/// When \p LookThroughAnyExt == true the function treats G_ANYEXT same as
+/// G_SEXT.
+Optional<ValueAndVReg>
+getConstantVRegValWithLookThrough(Register VReg, const MachineRegisterInfo &MRI,
+ bool LookThroughInstrs = true,
+ bool HandleFConstants = true,
+ bool LookThroughAnyExt = false);
+const ConstantFP* getConstantFPVRegVal(Register VReg,
+ const MachineRegisterInfo &MRI);
+
+/// See if Reg is defined by an single def instruction that is
+/// Opcode. Also try to do trivial folding if it's a COPY with
+/// same types. Returns null otherwise.
+MachineInstr *getOpcodeDef(unsigned Opcode, Register Reg,
+ const MachineRegisterInfo &MRI);
+
+/// Simple struct used to hold a Register value and the instruction which
+/// defines it.
+struct DefinitionAndSourceRegister {
+ MachineInstr *MI;
+ Register Reg;
+};
+
+/// Find the def instruction for \p Reg, and underlying value Register folding
+/// away any copies.
+Optional<DefinitionAndSourceRegister>
+getDefSrcRegIgnoringCopies(Register Reg, const MachineRegisterInfo &MRI);
+
+/// Find the def instruction for \p Reg, folding away any trivial copies. May
+/// return nullptr if \p Reg is not a generic virtual register.
+MachineInstr *getDefIgnoringCopies(Register Reg,
+ const MachineRegisterInfo &MRI);
+
+/// Find the source register for \p Reg, folding away any trivial copies. It
+/// will be an output register of the instruction that getDefIgnoringCopies
+/// returns. May return an invalid register if \p Reg is not a generic virtual
+/// register.
+Register getSrcRegIgnoringCopies(Register Reg,
+ const MachineRegisterInfo &MRI);
+
+/// Returns an APFloat from Val converted to the appropriate size.
+APFloat getAPFloatFromSize(double Val, unsigned Size);
+
+/// Modify analysis usage so it preserves passes required for the SelectionDAG
+/// fallback.
+void getSelectionDAGFallbackAnalysisUsage(AnalysisUsage &AU);
+
+Optional<APInt> ConstantFoldBinOp(unsigned Opcode, const Register Op1,
+ const Register Op2,
+ const MachineRegisterInfo &MRI);
+
+Optional<APInt> ConstantFoldExtOp(unsigned Opcode, const Register Op1,
+ uint64_t Imm, const MachineRegisterInfo &MRI);
+
+/// Test if the given value is known to have exactly one bit set. This differs
+/// from computeKnownBits in that it doesn't necessarily determine which bit is
+/// set.
+bool isKnownToBeAPowerOfTwo(Register Val, const MachineRegisterInfo &MRI,
+ GISelKnownBits *KnownBits = nullptr);
+
+/// Returns true if \p Val can be assumed to never be a NaN. If \p SNaN is true,
+/// this returns if \p Val can be assumed to never be a signaling NaN.
+bool isKnownNeverNaN(Register Val, const MachineRegisterInfo &MRI,
+ bool SNaN = false);
+
+/// Returns true if \p Val can be assumed to never be a signaling NaN.
+inline bool isKnownNeverSNaN(Register Val, const MachineRegisterInfo &MRI) {
+ return isKnownNeverNaN(Val, MRI, true);
+}
+
+Align inferAlignFromPtrInfo(MachineFunction &MF, const MachinePointerInfo &MPO);
+
+/// Return a virtual register corresponding to the incoming argument register \p
+/// PhysReg. This register is expected to have class \p RC, and optional type \p
+/// RegTy. This assumes all references to the register will use the same type.
+///
+/// If there is an existing live-in argument register, it will be returned.
+/// This will also ensure there is a valid copy
+Register getFunctionLiveInPhysReg(MachineFunction &MF, const TargetInstrInfo &TII,
+ MCRegister PhysReg,
+ const TargetRegisterClass &RC,
+ LLT RegTy = LLT());
+
+/// Return the least common multiple type of \p OrigTy and \p TargetTy, by changing the
+/// number of vector elements or scalar bitwidth. The intent is a
+/// G_MERGE_VALUES, G_BUILD_VECTOR, or G_CONCAT_VECTORS can be constructed from
+/// \p OrigTy elements, and unmerged into \p TargetTy
+LLVM_READNONE
+LLT getLCMType(LLT OrigTy, LLT TargetTy);
+
+/// Return a type where the total size is the greatest common divisor of \p
+/// OrigTy and \p TargetTy. This will try to either change the number of vector
+/// elements, or bitwidth of scalars. The intent is the result type can be used
+/// as the result of a G_UNMERGE_VALUES from \p OrigTy, and then some
+/// combination of G_MERGE_VALUES, G_BUILD_VECTOR and G_CONCAT_VECTORS (possibly
+/// with intermediate casts) can re-form \p TargetTy.
+///
+/// If these are vectors with different element types, this will try to produce
+/// a vector with a compatible total size, but the element type of \p OrigTy. If
+/// this can't be satisfied, this will produce a scalar smaller than the
+/// original vector elements.
+///
+/// In the worst case, this returns LLT::scalar(1)
+LLVM_READNONE
+LLT getGCDType(LLT OrigTy, LLT TargetTy);
+
+/// \returns The splat index of a G_SHUFFLE_VECTOR \p MI when \p MI is a splat.
+/// If \p MI is not a splat, returns None.
+Optional<int> getSplatIndex(MachineInstr &MI);
+
+/// Returns a scalar constant of a G_BUILD_VECTOR splat if it exists.
+Optional<int64_t> getBuildVectorConstantSplat(const MachineInstr &MI,
+ const MachineRegisterInfo &MRI);
+
+/// Return true if the specified instruction is a G_BUILD_VECTOR or
+/// G_BUILD_VECTOR_TRUNC where all of the elements are 0 or undef.
+bool isBuildVectorAllZeros(const MachineInstr &MI,
+ const MachineRegisterInfo &MRI);
+
+/// Return true if the specified instruction is a G_BUILD_VECTOR or
+/// G_BUILD_VECTOR_TRUNC where all of the elements are ~0 or undef.
+bool isBuildVectorAllOnes(const MachineInstr &MI,
+ const MachineRegisterInfo &MRI);
+
+/// Returns true if given the TargetLowering's boolean contents information,
+/// the value \p Val contains a true value.
+bool isConstTrueVal(const TargetLowering &TLI, int64_t Val, bool IsVector,
+ bool IsFP);
+
+/// Returns an integer representing true, as defined by the
+/// TargetBooleanContents.
+int64_t getICmpTrueVal(const TargetLowering &TLI, bool IsVector, bool IsFP);
+} // End namespace llvm.
+#endif
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif